daemon split (6/n): extract wireServices + drop lazy service getters
Factor the service + capability wiring out of Daemon.Open() into
wireServices(d), an idempotent helper that constructs HostNetwork,
ImageService, WorkspaceService, and VMService from whatever
infrastructure (runner, store, config, layout, logger, closing) is
already set on d. Open() calls it once after filling the composition
root; tests that build &Daemon{...} literals call it to get a working
service graph, preinstalling stubs on the fields they want to fake.
Drops the four lazy-init getters on *Daemon — d.hostNet(),
d.imageSvc(), d.workspaceSvc(), d.vmSvc() — whose sole purpose was
keeping test literals working. Every production call site now reads
d.net / d.img / d.ws / d.vm directly; the services are guaranteed
non-nil once Open returns. No behavior change.
Mechanical: all existing `d.xxxSvc()` calls (production + tests)
rewritten to field access; each `d := &Daemon{...}` in tests gets a
trailing wireServices(d) so the literal + wiring are side-by-side.
Tests that override a pre-built service (e.g. d.img = &ImageService{
bundleFetch: stub}) now set the override before wireServices so the
replacement propagates into VMService's peer pointer.
Also nil-guards HostNetwork.stopVMDNS and d.store in Close() so
partially-initialised daemons (pre-reconcile open failure) still
tear down cleanly — same contract the old lazy getters provided.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
0cfd8a5451
commit
16702bd5e1
22 changed files with 353 additions and 293 deletions
|
|
@ -65,6 +65,7 @@ func newExportTestDaemonStore(t *testing.T, fake *exportGuestClient) *Daemon {
|
|||
config: model.DaemonConfig{SSHKeyPath: filepath.Join(t.TempDir(), "id_ed25519")},
|
||||
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
|
||||
}
|
||||
wireServices(d)
|
||||
d.guestDial = func(_ context.Context, _ string, _ string) (guestSSHClient, error) {
|
||||
return fake, nil
|
||||
}
|
||||
|
|
@ -94,9 +95,9 @@ func TestExportVMWorkspace_HappyPath(t *testing.T) {
|
|||
}
|
||||
d := newExportTestDaemonStore(t, fake)
|
||||
upsertDaemonVM(t, ctx, d.store, vm)
|
||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||
|
||||
result, err := d.workspaceSvc().ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
||||
result, err := d.ws.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
||||
IDOrName: vm.Name,
|
||||
GuestPath: "/root/repo",
|
||||
})
|
||||
|
|
@ -155,10 +156,10 @@ func TestExportVMWorkspace_WithBaseCommit(t *testing.T) {
|
|||
}
|
||||
d := newExportTestDaemonStore(t, fake)
|
||||
upsertDaemonVM(t, ctx, d.store, vm)
|
||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||
|
||||
const prepareCommit = "abc1234deadbeef"
|
||||
result, err := d.workspaceSvc().ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
||||
result, err := d.ws.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
||||
IDOrName: vm.Name,
|
||||
BaseCommit: prepareCommit,
|
||||
})
|
||||
|
|
@ -202,9 +203,9 @@ func TestExportVMWorkspace_BaseCommitFallsBackToHEAD(t *testing.T) {
|
|||
}
|
||||
d := newExportTestDaemonStore(t, fake)
|
||||
upsertDaemonVM(t, ctx, d.store, vm)
|
||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||
|
||||
result, err := d.workspaceSvc().ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
||||
result, err := d.ws.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
||||
IDOrName: vm.Name,
|
||||
BaseCommit: "", // omitted
|
||||
})
|
||||
|
|
@ -242,9 +243,9 @@ func TestExportVMWorkspace_NoChanges(t *testing.T) {
|
|||
}
|
||||
d := newExportTestDaemonStore(t, fake)
|
||||
upsertDaemonVM(t, ctx, d.store, vm)
|
||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||
|
||||
result, err := d.workspaceSvc().ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
||||
result, err := d.ws.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
||||
IDOrName: vm.Name,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
@ -281,10 +282,10 @@ func TestExportVMWorkspace_DefaultGuestPath(t *testing.T) {
|
|||
}
|
||||
d := newExportTestDaemonStore(t, fake)
|
||||
upsertDaemonVM(t, ctx, d.store, vm)
|
||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||
|
||||
// GuestPath omitted — should default to /root/repo.
|
||||
result, err := d.workspaceSvc().ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
||||
result, err := d.ws.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
||||
IDOrName: vm.Name,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
@ -307,7 +308,7 @@ func TestExportVMWorkspace_VMNotRunning(t *testing.T) {
|
|||
upsertDaemonVM(t, ctx, d.store, vm)
|
||||
// VM is stopped — no handle seed; vmAlive must return false.
|
||||
|
||||
_, err := d.workspaceSvc().ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
||||
_, err := d.ws.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
||||
IDOrName: vm.Name,
|
||||
})
|
||||
if err == nil || !strings.Contains(err.Error(), "not running") {
|
||||
|
|
@ -341,9 +342,9 @@ func TestExportVMWorkspace_MultipleChangedFiles(t *testing.T) {
|
|||
}
|
||||
d := newExportTestDaemonStore(t, fake)
|
||||
upsertDaemonVM(t, ctx, d.store, vm)
|
||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||
|
||||
result, err := d.workspaceSvc().ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
||||
result, err := d.ws.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
||||
IDOrName: vm.Name,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
@ -386,22 +387,23 @@ func TestPrepareVMWorkspace_ReleasesVMLockDuringGuestIO(t *testing.T) {
|
|||
config: model.DaemonConfig{SSHKeyPath: filepath.Join(t.TempDir(), "id_ed25519")},
|
||||
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
|
||||
}
|
||||
wireServices(d)
|
||||
d.guestWaitForSSH = func(_ context.Context, _, _ string, _ time.Duration) error { return nil }
|
||||
d.guestDial = func(_ context.Context, _, _ string) (guestSSHClient, error) {
|
||||
return &exportGuestClient{}, nil
|
||||
}
|
||||
upsertDaemonVM(t, ctx, d.store, vm)
|
||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||
|
||||
// Install the workspace seams on this daemon instance. InspectRepo
|
||||
// returns a trivial spec so the real filesystem isn't touched;
|
||||
// Import blocks until we say go.
|
||||
importStarted := make(chan struct{})
|
||||
releaseImport := make(chan struct{})
|
||||
d.workspaceSvc().workspaceInspectRepo = func(context.Context, string, string, string) (workspace.RepoSpec, error) {
|
||||
d.ws.workspaceInspectRepo = func(context.Context, string, string, string) (workspace.RepoSpec, error) {
|
||||
return workspace.RepoSpec{RepoName: "fake", RepoRoot: "/tmp/fake"}, nil
|
||||
}
|
||||
d.workspaceSvc().workspaceImport = func(context.Context, workspace.GuestClient, workspace.RepoSpec, string, model.WorkspacePrepareMode) error {
|
||||
d.ws.workspaceImport = func(context.Context, workspace.GuestClient, workspace.RepoSpec, string, model.WorkspacePrepareMode) error {
|
||||
close(importStarted)
|
||||
<-releaseImport
|
||||
return nil
|
||||
|
|
@ -410,7 +412,7 @@ func TestPrepareVMWorkspace_ReleasesVMLockDuringGuestIO(t *testing.T) {
|
|||
// Kick off prepare in a goroutine. It will block inside the import.
|
||||
prepareDone := make(chan error, 1)
|
||||
go func() {
|
||||
_, err := d.workspaceSvc().PrepareVMWorkspace(ctx, api.VMWorkspacePrepareParams{
|
||||
_, err := d.ws.PrepareVMWorkspace(ctx, api.VMWorkspacePrepareParams{
|
||||
IDOrName: vm.Name,
|
||||
SourcePath: "/tmp/fake",
|
||||
})
|
||||
|
|
@ -429,7 +431,7 @@ func TestPrepareVMWorkspace_ReleasesVMLockDuringGuestIO(t *testing.T) {
|
|||
// import is in flight. Acquiring it must not wait.
|
||||
acquired := make(chan struct{})
|
||||
go func() {
|
||||
unlock := d.vmSvc().lockVMID(vm.ID)
|
||||
unlock := d.vm.lockVMID(vm.ID)
|
||||
close(acquired)
|
||||
unlock()
|
||||
}()
|
||||
|
|
@ -473,14 +475,15 @@ func TestPrepareVMWorkspace_SerialisesConcurrentPreparesOnSameVM(t *testing.T) {
|
|||
config: model.DaemonConfig{SSHKeyPath: filepath.Join(t.TempDir(), "id_ed25519")},
|
||||
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
|
||||
}
|
||||
wireServices(d)
|
||||
d.guestWaitForSSH = func(_ context.Context, _, _ string, _ time.Duration) error { return nil }
|
||||
d.guestDial = func(_ context.Context, _, _ string) (guestSSHClient, error) {
|
||||
return &exportGuestClient{}, nil
|
||||
}
|
||||
upsertDaemonVM(t, ctx, d.store, vm)
|
||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||
|
||||
d.workspaceSvc().workspaceInspectRepo = func(context.Context, string, string, string) (workspace.RepoSpec, error) {
|
||||
d.ws.workspaceInspectRepo = func(context.Context, string, string, string) (workspace.RepoSpec, error) {
|
||||
return workspace.RepoSpec{RepoName: "fake", RepoRoot: "/tmp/fake"}, nil
|
||||
}
|
||||
|
||||
|
|
@ -488,7 +491,7 @@ func TestPrepareVMWorkspace_SerialisesConcurrentPreparesOnSameVM(t *testing.T) {
|
|||
var active int32
|
||||
var maxObserved int32
|
||||
release := make(chan struct{})
|
||||
d.workspaceSvc().workspaceImport = func(context.Context, workspace.GuestClient, workspace.RepoSpec, string, model.WorkspacePrepareMode) error {
|
||||
d.ws.workspaceImport = func(context.Context, workspace.GuestClient, workspace.RepoSpec, string, model.WorkspacePrepareMode) error {
|
||||
n := atomic.AddInt32(&active, 1)
|
||||
for {
|
||||
prev := atomic.LoadInt32(&maxObserved)
|
||||
|
|
@ -505,7 +508,7 @@ func TestPrepareVMWorkspace_SerialisesConcurrentPreparesOnSameVM(t *testing.T) {
|
|||
done := make(chan error, n)
|
||||
for i := 0; i < n; i++ {
|
||||
go func() {
|
||||
_, err := d.workspaceSvc().PrepareVMWorkspace(ctx, api.VMWorkspacePrepareParams{
|
||||
_, err := d.ws.PrepareVMWorkspace(ctx, api.VMWorkspacePrepareParams{
|
||||
IDOrName: vm.Name,
|
||||
SourcePath: "/tmp/fake",
|
||||
})
|
||||
|
|
@ -565,9 +568,9 @@ func TestExportVMWorkspace_DoesNotMutateRealIndex(t *testing.T) {
|
|||
}
|
||||
d := newExportTestDaemonStore(t, fake)
|
||||
upsertDaemonVM(t, ctx, d.store, vm)
|
||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||
|
||||
if _, err := d.workspaceSvc().ExportVMWorkspace(ctx, api.WorkspaceExportParams{IDOrName: vm.Name}); err != nil {
|
||||
if _, err := d.ws.ExportVMWorkspace(ctx, api.WorkspaceExportParams{IDOrName: vm.Name}); err != nil {
|
||||
t.Fatalf("ExportVMWorkspace: %v", err)
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue