daemon split (6/n): extract wireServices + drop lazy service getters

Factor the service + capability wiring out of Daemon.Open() into
wireServices(d), an idempotent helper that constructs HostNetwork,
ImageService, WorkspaceService, and VMService from whatever
infrastructure (runner, store, config, layout, logger, closing) is
already set on d. Open() calls it once after filling the composition
root; tests that build &Daemon{...} literals call it to get a working
service graph, preinstalling stubs on the fields they want to fake.

Drops the four lazy-init getters on *Daemon — d.hostNet(),
d.imageSvc(), d.workspaceSvc(), d.vmSvc() — whose sole purpose was
keeping test literals working. Every production call site now reads
d.net / d.img / d.ws / d.vm directly; the services are guaranteed
non-nil once Open returns. No behavior change.

Mechanical: all existing `d.xxxSvc()` calls (production + tests)
rewritten to field access; each `d := &Daemon{...}` in tests gets a
trailing wireServices(d) so the literal + wiring are side-by-side.
Tests that override a pre-built service (e.g. d.img = &ImageService{
bundleFetch: stub}) now set the override before wireServices so the
replacement propagates into VMService's peer pointer.

Also nil-guards HostNetwork.stopVMDNS and d.store in Close() so
partially-initialised daemons (pre-reconcile open failure) still
tear down cleanly — same contract the old lazy getters provided.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
Thales Maciel 2026-04-21 15:55:28 -03:00
parent 0cfd8a5451
commit 16702bd5e1
No known key found for this signature in database
GPG key ID: 33112E6833C34679
22 changed files with 353 additions and 293 deletions

View file

@ -35,6 +35,7 @@ func TestFindVMPrefixResolution(t *testing.T) {
ctx := context.Background()
db := openDaemonStore(t)
d := &Daemon{store: db}
wireServices(d)
for _, vm := range []model.VMRecord{
testVM("alpha", "image-alpha", "172.16.0.2"),
@ -71,6 +72,7 @@ func TestFindImagePrefixResolution(t *testing.T) {
ctx := context.Background()
db := openDaemonStore(t)
d := &Daemon{store: db}
wireServices(d)
for _, image := range []model.Image{
testImage("base"),
@ -149,6 +151,7 @@ func TestReconcileStopsStaleRunningVMAndClearsRuntimeHandles(t *testing.T) {
},
}
d := &Daemon{store: db, runner: runner}
wireServices(d)
if err := d.reconcile(ctx); err != nil {
t.Fatalf("reconcile: %v", err)
@ -167,7 +170,7 @@ func TestReconcileStopsStaleRunningVMAndClearsRuntimeHandles(t *testing.T) {
t.Fatalf("handles.json still present after reconcile: %v", err)
}
// And the in-memory cache must be empty.
if h, ok := d.vmSvc().handles.get(vm.ID); ok && !h.IsZero() {
if h, ok := d.vm.handles.get(vm.ID); ok && !h.IsZero() {
t.Fatalf("handle cache not cleared after reconcile: %+v", h)
}
}
@ -213,12 +216,13 @@ func TestRebuildDNSIncludesOnlyLiveRunningVMs(t *testing.T) {
})
d := &Daemon{store: db, net: &HostNetwork{vmDNS: server}}
wireServices(d)
// rebuildDNS reads the alive check from the handle cache. Seed
// the live VM with its real PID; leave the stale entry with a PID
// that definitely isn't running (999999 ≫ max PID on most hosts).
d.vmSvc().setVMHandlesInMemory(live.ID, model.VMHandles{PID: liveCmd.Process.Pid})
d.vmSvc().setVMHandlesInMemory(stale.ID, model.VMHandles{PID: 999999})
if err := d.vmSvc().rebuildDNS(ctx); err != nil {
d.vm.setVMHandlesInMemory(live.ID, model.VMHandles{PID: liveCmd.Process.Pid})
d.vm.setVMHandlesInMemory(stale.ID, model.VMHandles{PID: 999999})
if err := d.vm.rebuildDNS(ctx); err != nil {
t.Fatalf("rebuildDNS: %v", err)
}
@ -252,7 +256,8 @@ func TestSetVMRejectsStoppedOnlyChangesForRunningVM(t *testing.T) {
upsertDaemonVM(t, ctx, db, vm)
d := &Daemon{store: db}
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: cmd.Process.Pid})
wireServices(d)
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: cmd.Process.Pid})
tests := []struct {
name string
params api.VMSetParams
@ -277,7 +282,7 @@ func TestSetVMRejectsStoppedOnlyChangesForRunningVM(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, err := d.vmSvc().SetVM(ctx, tt.params)
_, err := d.vm.SetVM(ctx, tt.params)
if err == nil || !strings.Contains(err.Error(), tt.want) {
t.Fatalf("SetVM(%s) error = %v, want %q", tt.name, err, tt.want)
}
@ -367,8 +372,9 @@ func TestHealthVMReturnsHealthyForRunningGuest(t *testing.T) {
},
}
d := &Daemon{store: db, runner: runner}
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: handlePID})
result, err := d.vmSvc().HealthVM(ctx, vm.Name)
wireServices(d)
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: handlePID})
result, err := d.vm.HealthVM(ctx, vm.Name)
if err != nil {
t.Fatalf("HealthVM: %v", err)
}
@ -430,8 +436,9 @@ func TestPingVMAliasReturnsAliveForHealthyVM(t *testing.T) {
},
}
d := &Daemon{store: db, runner: runner}
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: fake.Process.Pid})
result, err := d.vmSvc().PingVM(ctx, vm.Name)
wireServices(d)
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: fake.Process.Pid})
result, err := d.vm.PingVM(ctx, vm.Name)
if err != nil {
t.Fatalf("PingVM: %v", err)
}
@ -530,7 +537,8 @@ func TestHealthVMReturnsFalseForStoppedVM(t *testing.T) {
upsertDaemonVM(t, ctx, db, vm)
d := &Daemon{store: db}
result, err := d.vmSvc().HealthVM(ctx, vm.Name)
wireServices(d)
result, err := d.vm.HealthVM(ctx, vm.Name)
if err != nil {
t.Fatalf("HealthVM: %v", err)
}
@ -628,9 +636,10 @@ func TestPortsVMReturnsEnrichedPortsAndWebSchemes(t *testing.T) {
},
}
d := &Daemon{store: db, runner: runner}
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: fake.Process.Pid})
wireServices(d)
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: fake.Process.Pid})
result, err := d.vmSvc().PortsVM(ctx, vm.Name)
result, err := d.vm.PortsVM(ctx, vm.Name)
if err != nil {
t.Fatalf("PortsVM: %v", err)
}
@ -677,7 +686,8 @@ func TestPortsVMReturnsErrorForStoppedVM(t *testing.T) {
upsertDaemonVM(t, ctx, db, vm)
d := &Daemon{store: db}
_, err := d.vmSvc().PortsVM(ctx, vm.Name)
wireServices(d)
_, err := d.vm.PortsVM(ctx, vm.Name)
if err == nil || !strings.Contains(err.Error(), "is not running") {
t.Fatalf("PortsVM error = %v, want not running", err)
}
@ -740,7 +750,8 @@ func TestSetVMDiskResizeFailsPreflightWhenToolsMissing(t *testing.T) {
t.Setenv("PATH", t.TempDir())
d := &Daemon{store: db}
_, err := d.vmSvc().SetVM(ctx, api.VMSetParams{IDOrName: vm.ID, WorkDiskSize: "16G"})
wireServices(d)
_, err := d.vm.SetVM(ctx, api.VMSetParams{IDOrName: vm.ID, WorkDiskSize: "16G"})
if err == nil || !strings.Contains(err.Error(), "work disk resize preflight failed") {
t.Fatalf("SetVM() error = %v, want preflight failure", err)
}
@ -768,6 +779,7 @@ func TestFlattenNestedWorkHomeCopiesEntriesIndividually(t *testing.T) {
},
}
d := &Daemon{runner: runner}
wireServices(d)
if err := flattenNestedWorkHome(context.Background(), d.runner, workMount); err != nil {
t.Fatalf("flattenNestedWorkHome: %v", err)
@ -808,10 +820,11 @@ func TestEnsureAuthorizedKeyOnWorkDiskRepairsNestedRootLayout(t *testing.T) {
runner: &filesystemRunner{t: t},
config: model.DaemonConfig{SSHKeyPath: sshKeyPath},
}
wireServices(d)
vm := testVM("seed-repair", "image-seed-repair", "172.16.0.61")
vm.Runtime.WorkDiskPath = workDiskDir
if err := d.workspaceSvc().ensureAuthorizedKeyOnWorkDisk(context.Background(), &vm, model.Image{}, workDiskPreparation{}); err != nil {
if err := d.ws.ensureAuthorizedKeyOnWorkDisk(context.Background(), &vm, model.Image{}, workDiskPreparation{}); err != nil {
t.Fatalf("ensureAuthorizedKeyOnWorkDisk: %v", err)
}
if _, err := os.Stat(filepath.Join(workDiskDir, "root")); !os.IsNotExist(err) {
@ -845,10 +858,11 @@ func TestEnsureGitIdentityOnWorkDiskCopiesHostGlobalIdentity(t *testing.T) {
workDiskDir := t.TempDir()
d := &Daemon{runner: &filesystemRunner{t: t}}
wireServices(d)
vm := testVM("git-identity", "image-git-identity", "172.16.0.67")
vm.Runtime.WorkDiskPath = workDiskDir
if err := d.workspaceSvc().ensureGitIdentityOnWorkDisk(context.Background(), &vm); err != nil {
if err := d.ws.ensureGitIdentityOnWorkDisk(context.Background(), &vm); err != nil {
t.Fatalf("ensureGitIdentityOnWorkDisk: %v", err)
}
@ -878,10 +892,11 @@ func TestEnsureGitIdentityOnWorkDiskPreservesExistingGuestConfig(t *testing.T) {
}
d := &Daemon{runner: &filesystemRunner{t: t}}
wireServices(d)
vm := testVM("git-identity-preserve", "image-git-identity", "172.16.0.68")
vm.Runtime.WorkDiskPath = workDiskDir
if err := d.workspaceSvc().ensureGitIdentityOnWorkDisk(context.Background(), &vm); err != nil {
if err := d.ws.ensureGitIdentityOnWorkDisk(context.Background(), &vm); err != nil {
t.Fatalf("ensureGitIdentityOnWorkDisk: %v", err)
}
@ -922,10 +937,11 @@ func TestEnsureGitIdentityOnWorkDiskWarnsAndSkipsWhenHostIdentityIncomplete(t *t
runner: &filesystemRunner{t: t},
logger: logger,
}
wireServices(d)
vm := testVM("git-identity-missing", "image-git-identity", "172.16.0.69")
vm.Runtime.WorkDiskPath = workDiskDir
if err := d.workspaceSvc().ensureGitIdentityOnWorkDisk(context.Background(), &vm); err != nil {
if err := d.ws.ensureGitIdentityOnWorkDisk(context.Background(), &vm); err != nil {
t.Fatalf("ensureGitIdentityOnWorkDisk: %v", err)
}
@ -950,8 +966,9 @@ func TestEnsureGitIdentityOnWorkDiskWarnsAndSkipsWhenHostIdentityIncomplete(t *t
func TestRunFileSyncNoOpWhenConfigEmpty(t *testing.T) {
d := &Daemon{runner: &filesystemRunner{t: t}}
wireServices(d)
vm := testVM("no-sync", "image", "172.16.0.70")
if err := d.workspaceSvc().runFileSync(context.Background(), &vm); err != nil {
if err := d.ws.runFileSync(context.Background(), &vm); err != nil {
t.Fatalf("runFileSync: %v", err)
}
}
@ -977,9 +994,10 @@ func TestRunFileSyncCopiesFile(t *testing.T) {
},
},
}
wireServices(d)
vm := testVM("sync-file", "image", "172.16.0.71")
vm.Runtime.WorkDiskPath = workDisk
if err := d.workspaceSvc().runFileSync(context.Background(), &vm); err != nil {
if err := d.ws.runFileSync(context.Background(), &vm); err != nil {
t.Fatalf("runFileSync: %v", err)
}
@ -1017,9 +1035,10 @@ func TestRunFileSyncRespectsCustomMode(t *testing.T) {
},
},
}
wireServices(d)
vm := testVM("sync-mode", "image", "172.16.0.72")
vm.Runtime.WorkDiskPath = workDisk
if err := d.workspaceSvc().runFileSync(context.Background(), &vm); err != nil {
if err := d.ws.runFileSync(context.Background(), &vm); err != nil {
t.Fatalf("runFileSync: %v", err)
}
@ -1052,9 +1071,10 @@ func TestRunFileSyncSkipsMissingHostPath(t *testing.T) {
},
},
}
wireServices(d)
vm := testVM("sync-missing", "image", "172.16.0.73")
vm.Runtime.WorkDiskPath = workDisk
if err := d.workspaceSvc().runFileSync(context.Background(), &vm); err != nil {
if err := d.ws.runFileSync(context.Background(), &vm); err != nil {
t.Fatalf("runFileSync: %v", err)
}
@ -1091,9 +1111,10 @@ func TestRunFileSyncOverwritesExistingGuestFile(t *testing.T) {
},
},
}
wireServices(d)
vm := testVM("sync-overwrite", "image", "172.16.0.74")
vm.Runtime.WorkDiskPath = workDisk
if err := d.workspaceSvc().runFileSync(context.Background(), &vm); err != nil {
if err := d.ws.runFileSync(context.Background(), &vm); err != nil {
t.Fatalf("runFileSync: %v", err)
}
@ -1133,9 +1154,10 @@ func TestRunFileSyncCopiesDirectoryRecursively(t *testing.T) {
},
},
}
wireServices(d)
vm := testVM("sync-dir", "image", "172.16.0.75")
vm.Runtime.WorkDiskPath = workDisk
if err := d.workspaceSvc().runFileSync(context.Background(), &vm); err != nil {
if err := d.ws.runFileSync(context.Background(), &vm); err != nil {
t.Fatalf("runFileSync: %v", err)
}
@ -1157,10 +1179,11 @@ func TestRunFileSyncCopiesDirectoryRecursively(t *testing.T) {
func TestCreateVMRejectsNonPositiveCPUAndMemory(t *testing.T) {
d := &Daemon{}
if _, err := d.vmSvc().CreateVM(context.Background(), api.VMCreateParams{VCPUCount: ptr(0)}); err == nil || !strings.Contains(err.Error(), "vcpu must be a positive integer") {
wireServices(d)
if _, err := d.vm.CreateVM(context.Background(), api.VMCreateParams{VCPUCount: ptr(0)}); err == nil || !strings.Contains(err.Error(), "vcpu must be a positive integer") {
t.Fatalf("CreateVM(vcpu=0) error = %v", err)
}
if _, err := d.vmSvc().CreateVM(context.Background(), api.VMCreateParams{MemoryMiB: ptr(-1)}); err == nil || !strings.Contains(err.Error(), "memory must be a positive integer") {
if _, err := d.vm.CreateVM(context.Background(), api.VMCreateParams{MemoryMiB: ptr(-1)}); err == nil || !strings.Contains(err.Error(), "memory must be a positive integer") {
t.Fatalf("CreateVM(memory=-1) error = %v", err)
}
}
@ -1187,8 +1210,9 @@ func TestBeginVMCreateCompletesAndReturnsStatus(t *testing.T) {
BridgeIP: model.DefaultBridgeIP,
},
}
wireServices(d)
op, err := d.vmSvc().BeginVMCreate(ctx, api.VMCreateParams{Name: "queued", NoStart: true})
op, err := d.vm.BeginVMCreate(ctx, api.VMCreateParams{Name: "queued", NoStart: true})
if err != nil {
t.Fatalf("BeginVMCreate: %v", err)
}
@ -1198,7 +1222,7 @@ func TestBeginVMCreateCompletesAndReturnsStatus(t *testing.T) {
deadline := time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
status, err := d.vmSvc().VMCreateStatus(ctx, op.ID)
status, err := d.vm.VMCreateStatus(ctx, op.ID)
if err != nil {
t.Fatalf("VMCreateStatus: %v", err)
}
@ -1237,8 +1261,9 @@ func TestCreateVMUsesDefaultsWhenCPUAndMemoryOmitted(t *testing.T) {
BridgeIP: model.DefaultBridgeIP,
},
}
wireServices(d)
vm, err := d.vmSvc().CreateVM(ctx, api.VMCreateParams{Name: "defaults", ImageName: image.Name, NoStart: true})
vm, err := d.vm.CreateVM(ctx, api.VMCreateParams{Name: "defaults", ImageName: image.Name, NoStart: true})
if err != nil {
t.Fatalf("CreateVM: %v", err)
}
@ -1256,11 +1281,12 @@ func TestSetVMRejectsNonPositiveCPUAndMemory(t *testing.T) {
vm := testVM("validate", "image-validate", "172.16.0.13")
upsertDaemonVM(t, ctx, db, vm)
d := &Daemon{store: db}
wireServices(d)
if _, err := d.vmSvc().SetVM(ctx, api.VMSetParams{IDOrName: vm.ID, VCPUCount: ptr(0)}); err == nil || !strings.Contains(err.Error(), "vcpu must be a positive integer") {
if _, err := d.vm.SetVM(ctx, api.VMSetParams{IDOrName: vm.ID, VCPUCount: ptr(0)}); err == nil || !strings.Contains(err.Error(), "vcpu must be a positive integer") {
t.Fatalf("SetVM(vcpu=0) error = %v", err)
}
if _, err := d.vmSvc().SetVM(ctx, api.VMSetParams{IDOrName: vm.ID, MemoryMiB: ptr(0)}); err == nil || !strings.Contains(err.Error(), "memory must be a positive integer") {
if _, err := d.vm.SetVM(ctx, api.VMSetParams{IDOrName: vm.ID, MemoryMiB: ptr(0)}); err == nil || !strings.Contains(err.Error(), "memory must be a positive integer") {
t.Fatalf("SetVM(memory=0) error = %v", err)
}
}
@ -1281,7 +1307,8 @@ func TestCollectStatsIgnoresMalformedMetricsFile(t *testing.T) {
}
d := &Daemon{}
stats, err := d.vmSvc().collectStats(context.Background(), model.VMRecord{
wireServices(d)
stats, err := d.vm.collectStats(context.Background(), model.VMRecord{
Runtime: model.VMRuntime{
SystemOverlay: overlay,
WorkDiskPath: workDisk,
@ -1330,6 +1357,7 @@ func TestValidateStartPrereqsReportsNATUplinkFailure(t *testing.T) {
FirecrackerBin: firecrackerBin,
},
}
wireServices(d)
vm := testVM("nat", "image-nat", "172.16.0.12")
vm.Spec.NATEnabled = true
vm.Runtime.WorkDiskPath = filepath.Join(t.TempDir(), "missing-root.ext4")
@ -1337,7 +1365,7 @@ func TestValidateStartPrereqsReportsNATUplinkFailure(t *testing.T) {
image.RootfsPath = rootfsPath
image.KernelPath = kernelPath
err := d.vmSvc().validateStartPrereqs(ctx, vm, image)
err := d.vm.validateStartPrereqs(ctx, vm, image)
if err == nil || !strings.Contains(err.Error(), "uplink interface for NAT") {
t.Fatalf("validateStartPrereqs() error = %v, want NAT uplink failure", err)
}
@ -1365,13 +1393,14 @@ func TestCleanupRuntimeRediscoversLiveFirecrackerPID(t *testing.T) {
proc: fake,
}
d := &Daemon{runner: runner}
wireServices(d)
vm := testVM("cleanup", "image-cleanup", "172.16.0.22")
vm.Runtime.APISockPath = apiSock
// Seed a stale PID so cleanupRuntime's findFirecrackerPID pgrep
// fallback wins — it rediscovers fake.Process.Pid from apiSock.
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: fake.Process.Pid + 999})
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: fake.Process.Pid + 999})
if err := d.vmSvc().cleanupRuntime(context.Background(), vm, true); err != nil {
if err := d.vm.cleanupRuntime(context.Background(), vm, true); err != nil {
t.Fatalf("cleanupRuntime returned error: %v", err)
}
runner.assertExhausted()
@ -1398,7 +1427,8 @@ func TestDeleteStoppedNATVMDoesNotFailWithoutTapDevice(t *testing.T) {
upsertDaemonVM(t, ctx, db, vm)
d := &Daemon{store: db}
deleted, err := d.vmSvc().DeleteVM(ctx, vm.Name)
wireServices(d)
deleted, err := d.vm.DeleteVM(ctx, vm.Name)
if err != nil {
t.Fatalf("DeleteVM: %v", err)
}
@ -1452,9 +1482,10 @@ func TestStopVMFallsBackToForcedCleanupAfterGracefulTimeout(t *testing.T) {
proc: fake,
}
d := &Daemon{store: db, runner: runner}
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: fake.Process.Pid})
wireServices(d)
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: fake.Process.Pid})
got, err := d.vmSvc().StopVM(ctx, vm.ID)
got, err := d.vm.StopVM(ctx, vm.ID)
if err != nil {
t.Fatalf("StopVM returned error: %v", err)
}
@ -1465,7 +1496,7 @@ func TestStopVMFallsBackToForcedCleanupAfterGracefulTimeout(t *testing.T) {
// APISockPath + VSock paths are deterministic — they stay on the
// record for debugging and next-start reuse even after stop. The
// post-stop invariant is that the in-memory cache is empty.
if h, ok := d.vmSvc().handles.get(vm.ID); ok && !h.IsZero() {
if h, ok := d.vm.handles.get(vm.ID); ok && !h.IsZero() {
t.Fatalf("handle cache not cleared: %+v", h)
}
}
@ -1476,6 +1507,7 @@ func TestWithVMLockByIDSerializesSameVM(t *testing.T) {
vm := testVM("serial", "image-serial", "172.16.0.30")
upsertDaemonVM(t, ctx, db, vm)
d := &Daemon{store: db}
wireServices(d)
firstEntered := make(chan struct{})
releaseFirst := make(chan struct{})
@ -1483,7 +1515,7 @@ func TestWithVMLockByIDSerializesSameVM(t *testing.T) {
errCh := make(chan error, 2)
go func() {
_, err := d.vmSvc().withVMLockByID(ctx, vm.ID, func(vm model.VMRecord) (model.VMRecord, error) {
_, err := d.vm.withVMLockByID(ctx, vm.ID, func(vm model.VMRecord) (model.VMRecord, error) {
close(firstEntered)
<-releaseFirst
return vm, nil
@ -1498,7 +1530,7 @@ func TestWithVMLockByIDSerializesSameVM(t *testing.T) {
}
go func() {
_, err := d.vmSvc().withVMLockByID(ctx, vm.ID, func(vm model.VMRecord) (model.VMRecord, error) {
_, err := d.vm.withVMLockByID(ctx, vm.ID, func(vm model.VMRecord) (model.VMRecord, error) {
close(secondEntered)
return vm, nil
})
@ -1535,12 +1567,13 @@ func TestWithVMLockByIDAllowsDifferentVMsConcurrently(t *testing.T) {
upsertDaemonVM(t, ctx, db, vm)
}
d := &Daemon{store: db}
wireServices(d)
started := make(chan string, 2)
release := make(chan struct{})
errCh := make(chan error, 2)
run := func(id string) {
_, err := d.vmSvc().withVMLockByID(ctx, id, func(vm model.VMRecord) (model.VMRecord, error) {
_, err := d.vm.withVMLockByID(ctx, id, func(vm model.VMRecord) (model.VMRecord, error) {
started <- vm.ID
<-release
return vm, nil