banger/internal/daemon/fastpath_test.go
Thales Maciel 362009d747
daemon split (1/5): extract *HostNetwork service
First phase of splitting the daemon god-struct into focused services
with explicit ownership.

HostNetwork now owns everything host-networking: the TAP interface
pool (initializeTapPool / ensureTapPool / acquireTap / releaseTap /
createTap), bridge + socket dir setup, firecracker process primitives
(find/resolve/kill/wait/ensureSocketAccess/sendCtrlAltDel), DM
snapshot lifecycle, NAT rule enforcement, guest DNS server lifecycle
+ routing setup, and the vsock-agent readiness probe. That's 7 files
whose receivers flipped from *Daemon to *HostNetwork, plus a new
host_network.go that declares the struct, its hostNetworkDeps, and
the factored firecracker + DNS helpers that used to live in vm.go.

Daemon gives up the tapPool and vmDNS fields entirely; they're now
HostNetwork's business. Construction goes through newHostNetwork in
Daemon.Open with an explicit dependency bag (runner, logger, config,
layout, closing). A lazy-init hostNet() helper on Daemon supports
test literals that don't wire net explicitly — production always
populates it eagerly.

Signature tightenings where the old receiver reached into VM-service
state:
 - ensureNAT(ctx, vm, enable) → ensureNAT(ctx, guestIP, tap, enable).
   Callers resolve tap from the handle cache themselves.
 - initializeTapPool(ctx) → initializeTapPool(usedTaps []string).
   Daemon.Open enumerates VMs, collects taps from handles, hands the
   slice in.

rebuildDNS stays on *Daemon as the orchestrator — it filters by
vm-alive (a VMService concern handles will move to in phase 4) then
calls HostNetwork.replaceDNS with the already-filtered map.

Capability hooks continue to take *Daemon; they now use it as a
facade to reach services (d.net.ensureNAT, d.hostNet().*). Planned
CapabilityHost interface extraction is orthogonal, left for later.

Tests: dns_routing_test.go + fastpath_test.go + nat_test.go +
snapshot_test.go + open_close_test.go were touched to construct
HostNetwork literals where they exercise its methods directly, or
route through d.hostNet() where they exercise the Daemon entry
points.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-20 20:11:46 -03:00

132 lines
3.9 KiB
Go

package daemon
import (
"context"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"errors"
"os"
"path/filepath"
"strconv"
"testing"
"banger/internal/guest"
"banger/internal/model"
)
func TestEnsureWorkDiskClonesSeedImageAndResizes(t *testing.T) {
t.Parallel()
vmDir := t.TempDir()
seedPath := filepath.Join(t.TempDir(), "root.work-seed.ext4")
if err := os.WriteFile(seedPath, []byte("seed-data"), 0o644); err != nil {
t.Fatalf("WriteFile(seed): %v", err)
}
workDiskPath := filepath.Join(vmDir, "root.ext4")
runner := &scriptedRunner{
t: t,
steps: []runnerStep{
{call: runnerCall{name: "e2fsck", args: []string{"-p", "-f", workDiskPath}}},
{call: runnerCall{name: "resize2fs", args: []string{workDiskPath}}},
},
}
d := &Daemon{runner: runner}
vm := testVM("seeded", "image-seeded", "172.16.0.60")
vm.Runtime.WorkDiskPath = workDiskPath
vm.Spec.WorkDiskSizeBytes = 2 * 1024 * 1024
image := testImage("image-seeded")
image.WorkSeedPath = seedPath
if _, err := d.ensureWorkDisk(context.Background(), &vm, image); err != nil {
t.Fatalf("ensureWorkDisk: %v", err)
}
runner.assertExhausted()
info, err := os.Stat(workDiskPath)
if err != nil {
t.Fatalf("Stat(work disk): %v", err)
}
if info.Size() != vm.Spec.WorkDiskSizeBytes {
t.Fatalf("work disk size = %d, want %d", info.Size(), vm.Spec.WorkDiskSizeBytes)
}
}
func TestTapPoolWarmsAndReusesIdleTap(t *testing.T) {
t.Parallel()
runner := &scriptedRunner{
t: t,
steps: []runnerStep{
{call: runnerCall{name: "ip", args: []string{"link", "show", "tap-pool-0"}}, err: errors.New("exit status 1")},
sudoStep("", nil, "ip", "tuntap", "add", "dev", "tap-pool-0", "mode", "tap", "user", strconv.Itoa(os.Getuid()), "group", strconv.Itoa(os.Getgid())),
sudoStep("", nil, "ip", "link", "set", "tap-pool-0", "master", model.DefaultBridgeName),
sudoStep("", nil, "ip", "link", "set", "tap-pool-0", "up"),
sudoStep("", nil, "ip", "link", "set", model.DefaultBridgeName, "up"),
},
}
d := &Daemon{
runner: runner,
config: model.DaemonConfig{
BridgeName: model.DefaultBridgeName,
TapPoolSize: 1,
},
closing: make(chan struct{}),
}
d.hostNet().ensureTapPool(context.Background())
tapName, err := d.hostNet().acquireTap(context.Background(), "tap-fallback")
if err != nil {
t.Fatalf("acquireTap: %v", err)
}
if tapName != "tap-pool-0" {
t.Fatalf("tapName = %q, want tap-pool-0", tapName)
}
if err := d.hostNet().releaseTap(context.Background(), tapName); err != nil {
t.Fatalf("releaseTap: %v", err)
}
tapName, err = d.hostNet().acquireTap(context.Background(), "tap-fallback")
if err != nil {
t.Fatalf("acquireTap second time: %v", err)
}
if tapName != "tap-pool-0" {
t.Fatalf("tapName second = %q, want tap-pool-0", tapName)
}
runner.assertExhausted()
}
func TestEnsureAuthorizedKeyOnWorkDiskSkipsRepairForMatchingSeededFingerprint(t *testing.T) {
t.Parallel()
privateKey, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
t.Fatalf("GenerateKey: %v", err)
}
privateKeyPEM := pem.EncodeToMemory(&pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(privateKey),
})
sshKeyPath := filepath.Join(t.TempDir(), "id_rsa")
if err := os.WriteFile(sshKeyPath, privateKeyPEM, 0o600); err != nil {
t.Fatalf("WriteFile(private key): %v", err)
}
fingerprint, err := guest.AuthorizedPublicKeyFingerprint(sshKeyPath)
if err != nil {
t.Fatalf("AuthorizedPublicKeyFingerprint: %v", err)
}
runner := &scriptedRunner{t: t}
d := &Daemon{
runner: runner,
config: model.DaemonConfig{SSHKeyPath: sshKeyPath},
}
vm := testVM("seeded-fastpath", "image-seeded-fastpath", "172.16.0.62")
vm.Runtime.WorkDiskPath = filepath.Join(t.TempDir(), "root.ext4")
image := model.Image{SeededSSHPublicKeyFingerprint: fingerprint}
if err := d.ensureAuthorizedKeyOnWorkDisk(context.Background(), &vm, image, workDiskPreparation{ClonedFromSeed: true}); err != nil {
t.Fatalf("ensureAuthorizedKeyOnWorkDisk: %v", err)
}
runner.assertExhausted()
}