Phase 4 of the daemon god-struct refactor. VM lifecycle, create-op
registry, handle cache, disk provisioning, stats polling, ports
query, and the per-VM lock set all move off *Daemon onto *VMService.
Daemon keeps thin forwarders only for FindVM / TouchVM (dispatch
surface) and is otherwise out of VM lifecycle. Lazy-init via
d.vmSvc() mirrors the earlier services so test literals like
\`&Daemon{store: db, runner: r}\` still get a functional service
without spelling one out.
Three small cleanups along the way:
* preflight helpers (validateStartPrereqs / addBaseStartPrereqs
/ addBaseStartCommandPrereqs / validateWorkDiskResizePrereqs)
move with the VM methods that call them.
* cleanupRuntime / rebuildDNS move to *VMService, with
HostNetwork primitives (findFirecrackerPID, cleanupDMSnapshot,
killVMProcess, releaseTap, waitForExit, sendCtrlAltDel)
reached through s.net instead of the hostNet() facade.
* vsockAgentBinary becomes a package-level function so both
*Daemon (doctor) and *VMService (preflight) call one entry
point instead of each owning a forwarder method.
WorkspaceService's peer deps switch from eager method values to
closures — vmSvc() constructs VMService with WorkspaceService as a
peer, so resolving d.vmSvc().FindVM at construction time recursed
through workspaceSvc() → vmSvc(). Closures defer the lookup to call
time.
Pure code motion: build + unit tests green, lint clean. No RPC
surface or lock-ordering changes.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
132 lines
4 KiB
Go
132 lines
4 KiB
Go
package daemon
|
|
|
|
import (
|
|
"context"
|
|
"crypto/rand"
|
|
"crypto/rsa"
|
|
"crypto/x509"
|
|
"encoding/pem"
|
|
"errors"
|
|
"os"
|
|
"path/filepath"
|
|
"strconv"
|
|
"testing"
|
|
|
|
"banger/internal/guest"
|
|
"banger/internal/model"
|
|
)
|
|
|
|
func TestEnsureWorkDiskClonesSeedImageAndResizes(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
vmDir := t.TempDir()
|
|
seedPath := filepath.Join(t.TempDir(), "root.work-seed.ext4")
|
|
if err := os.WriteFile(seedPath, []byte("seed-data"), 0o644); err != nil {
|
|
t.Fatalf("WriteFile(seed): %v", err)
|
|
}
|
|
workDiskPath := filepath.Join(vmDir, "root.ext4")
|
|
runner := &scriptedRunner{
|
|
t: t,
|
|
steps: []runnerStep{
|
|
{call: runnerCall{name: "e2fsck", args: []string{"-p", "-f", workDiskPath}}},
|
|
{call: runnerCall{name: "resize2fs", args: []string{workDiskPath}}},
|
|
},
|
|
}
|
|
d := &Daemon{runner: runner}
|
|
vm := testVM("seeded", "image-seeded", "172.16.0.60")
|
|
vm.Runtime.WorkDiskPath = workDiskPath
|
|
vm.Spec.WorkDiskSizeBytes = 2 * 1024 * 1024
|
|
image := testImage("image-seeded")
|
|
image.WorkSeedPath = seedPath
|
|
|
|
if _, err := d.vmSvc().ensureWorkDisk(context.Background(), &vm, image); err != nil {
|
|
t.Fatalf("ensureWorkDisk: %v", err)
|
|
}
|
|
runner.assertExhausted()
|
|
|
|
info, err := os.Stat(workDiskPath)
|
|
if err != nil {
|
|
t.Fatalf("Stat(work disk): %v", err)
|
|
}
|
|
if info.Size() != vm.Spec.WorkDiskSizeBytes {
|
|
t.Fatalf("work disk size = %d, want %d", info.Size(), vm.Spec.WorkDiskSizeBytes)
|
|
}
|
|
}
|
|
|
|
func TestTapPoolWarmsAndReusesIdleTap(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
runner := &scriptedRunner{
|
|
t: t,
|
|
steps: []runnerStep{
|
|
{call: runnerCall{name: "ip", args: []string{"link", "show", "tap-pool-0"}}, err: errors.New("exit status 1")},
|
|
sudoStep("", nil, "ip", "tuntap", "add", "dev", "tap-pool-0", "mode", "tap", "user", strconv.Itoa(os.Getuid()), "group", strconv.Itoa(os.Getgid())),
|
|
sudoStep("", nil, "ip", "link", "set", "tap-pool-0", "master", model.DefaultBridgeName),
|
|
sudoStep("", nil, "ip", "link", "set", "tap-pool-0", "up"),
|
|
sudoStep("", nil, "ip", "link", "set", model.DefaultBridgeName, "up"),
|
|
},
|
|
}
|
|
d := &Daemon{
|
|
runner: runner,
|
|
config: model.DaemonConfig{
|
|
BridgeName: model.DefaultBridgeName,
|
|
TapPoolSize: 1,
|
|
},
|
|
closing: make(chan struct{}),
|
|
}
|
|
|
|
d.hostNet().ensureTapPool(context.Background())
|
|
tapName, err := d.hostNet().acquireTap(context.Background(), "tap-fallback")
|
|
if err != nil {
|
|
t.Fatalf("acquireTap: %v", err)
|
|
}
|
|
if tapName != "tap-pool-0" {
|
|
t.Fatalf("tapName = %q, want tap-pool-0", tapName)
|
|
}
|
|
if err := d.hostNet().releaseTap(context.Background(), tapName); err != nil {
|
|
t.Fatalf("releaseTap: %v", err)
|
|
}
|
|
tapName, err = d.hostNet().acquireTap(context.Background(), "tap-fallback")
|
|
if err != nil {
|
|
t.Fatalf("acquireTap second time: %v", err)
|
|
}
|
|
if tapName != "tap-pool-0" {
|
|
t.Fatalf("tapName second = %q, want tap-pool-0", tapName)
|
|
}
|
|
runner.assertExhausted()
|
|
}
|
|
|
|
func TestEnsureAuthorizedKeyOnWorkDiskSkipsRepairForMatchingSeededFingerprint(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
privateKey, err := rsa.GenerateKey(rand.Reader, 1024)
|
|
if err != nil {
|
|
t.Fatalf("GenerateKey: %v", err)
|
|
}
|
|
privateKeyPEM := pem.EncodeToMemory(&pem.Block{
|
|
Type: "RSA PRIVATE KEY",
|
|
Bytes: x509.MarshalPKCS1PrivateKey(privateKey),
|
|
})
|
|
sshKeyPath := filepath.Join(t.TempDir(), "id_rsa")
|
|
if err := os.WriteFile(sshKeyPath, privateKeyPEM, 0o600); err != nil {
|
|
t.Fatalf("WriteFile(private key): %v", err)
|
|
}
|
|
fingerprint, err := guest.AuthorizedPublicKeyFingerprint(sshKeyPath)
|
|
if err != nil {
|
|
t.Fatalf("AuthorizedPublicKeyFingerprint: %v", err)
|
|
}
|
|
|
|
runner := &scriptedRunner{t: t}
|
|
d := &Daemon{
|
|
runner: runner,
|
|
config: model.DaemonConfig{SSHKeyPath: sshKeyPath},
|
|
}
|
|
vm := testVM("seeded-fastpath", "image-seeded-fastpath", "172.16.0.62")
|
|
vm.Runtime.WorkDiskPath = filepath.Join(t.TempDir(), "root.ext4")
|
|
image := model.Image{SeededSSHPublicKeyFingerprint: fingerprint}
|
|
|
|
if err := d.workspaceSvc().ensureAuthorizedKeyOnWorkDisk(context.Background(), &vm, image, workDiskPreparation{ClonedFromSeed: true}); err != nil {
|
|
t.Fatalf("ensureAuthorizedKeyOnWorkDisk: %v", err)
|
|
}
|
|
runner.assertExhausted()
|
|
}
|