daemon split (3/5): extract *WorkspaceService service

Third phase of splitting the daemon god-struct. WorkspaceService now
owns workspace.prepare / workspace.export plus the ssh-key +
git-identity + arbitrary-file sync that runs as part of VM start's
prepare_work_disk capability hook. workspaceLocks (the per-VM tar
serialisation set) lives on the service.

workspace.go and vm_authsync.go flipped receivers from *Daemon to
*WorkspaceService. The workspaceInspectRepo / workspaceImport test
seams moved onto the service as fields.

Peer-service dependencies go through narrow function-typed fields:
vmResolver, aliveChecker, waitGuestSSH, dialGuest, imageResolver,
imageWorkSeed, withVMLockByRef, beginOperation. WorkspaceService
never touches VMService / HostNetwork / ImageService directly —
only the exact operations the Daemon hands it at construction.

Daemon lazy-init helper workspaceSvc() mirrors the Phase 1/2
pattern. Test literals still write `&Daemon{store: db, runner: r}`
and get a wired workspace service for free. Tests that override the
inspect/import seams (workspace_test.go, ~4 sites) assign them on
d.workspaceSvc() instead of on the daemon literal.

Dispatch in daemon.go: vm.workspace.prepare and vm.workspace.export
now forward one-liners to d.workspaceSvc().

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
Thales Maciel 2026-04-20 20:42:31 -03:00
parent d7614a3b2b
commit c0d456e734
No known key found for this signature in database
GPG key ID: 33112E6833C34679
8 changed files with 202 additions and 94 deletions

View file

@ -96,7 +96,7 @@ func TestExportVMWorkspace_HappyPath(t *testing.T) {
upsertDaemonVM(t, ctx, d.store, vm)
d.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
result, err := d.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
result, err := d.workspaceSvc().ExportVMWorkspace(ctx, api.WorkspaceExportParams{
IDOrName: vm.Name,
GuestPath: "/root/repo",
})
@ -158,7 +158,7 @@ func TestExportVMWorkspace_WithBaseCommit(t *testing.T) {
d.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
const prepareCommit = "abc1234deadbeef"
result, err := d.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
result, err := d.workspaceSvc().ExportVMWorkspace(ctx, api.WorkspaceExportParams{
IDOrName: vm.Name,
BaseCommit: prepareCommit,
})
@ -204,7 +204,7 @@ func TestExportVMWorkspace_BaseCommitFallsBackToHEAD(t *testing.T) {
upsertDaemonVM(t, ctx, d.store, vm)
d.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
result, err := d.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
result, err := d.workspaceSvc().ExportVMWorkspace(ctx, api.WorkspaceExportParams{
IDOrName: vm.Name,
BaseCommit: "", // omitted
})
@ -244,7 +244,7 @@ func TestExportVMWorkspace_NoChanges(t *testing.T) {
upsertDaemonVM(t, ctx, d.store, vm)
d.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
result, err := d.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
result, err := d.workspaceSvc().ExportVMWorkspace(ctx, api.WorkspaceExportParams{
IDOrName: vm.Name,
})
if err != nil {
@ -284,7 +284,7 @@ func TestExportVMWorkspace_DefaultGuestPath(t *testing.T) {
d.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
// GuestPath omitted — should default to /root/repo.
result, err := d.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
result, err := d.workspaceSvc().ExportVMWorkspace(ctx, api.WorkspaceExportParams{
IDOrName: vm.Name,
})
if err != nil {
@ -307,7 +307,7 @@ func TestExportVMWorkspace_VMNotRunning(t *testing.T) {
upsertDaemonVM(t, ctx, d.store, vm)
// VM is stopped — no handle seed; vmAlive must return false.
_, err := d.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
_, err := d.workspaceSvc().ExportVMWorkspace(ctx, api.WorkspaceExportParams{
IDOrName: vm.Name,
})
if err == nil || !strings.Contains(err.Error(), "not running") {
@ -343,7 +343,7 @@ func TestExportVMWorkspace_MultipleChangedFiles(t *testing.T) {
upsertDaemonVM(t, ctx, d.store, vm)
d.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
result, err := d.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
result, err := d.workspaceSvc().ExportVMWorkspace(ctx, api.WorkspaceExportParams{
IDOrName: vm.Name,
})
if err != nil {
@ -398,10 +398,10 @@ func TestPrepareVMWorkspace_ReleasesVMLockDuringGuestIO(t *testing.T) {
// Import blocks until we say go.
importStarted := make(chan struct{})
releaseImport := make(chan struct{})
d.workspaceInspectRepo = func(context.Context, string, string, string) (workspace.RepoSpec, error) {
d.workspaceSvc().workspaceInspectRepo = func(context.Context, string, string, string) (workspace.RepoSpec, error) {
return workspace.RepoSpec{RepoName: "fake", RepoRoot: "/tmp/fake"}, nil
}
d.workspaceImport = func(context.Context, workspace.GuestClient, workspace.RepoSpec, string, model.WorkspacePrepareMode) error {
d.workspaceSvc().workspaceImport = func(context.Context, workspace.GuestClient, workspace.RepoSpec, string, model.WorkspacePrepareMode) error {
close(importStarted)
<-releaseImport
return nil
@ -410,7 +410,7 @@ func TestPrepareVMWorkspace_ReleasesVMLockDuringGuestIO(t *testing.T) {
// Kick off prepare in a goroutine. It will block inside the import.
prepareDone := make(chan error, 1)
go func() {
_, err := d.PrepareVMWorkspace(ctx, api.VMWorkspacePrepareParams{
_, err := d.workspaceSvc().PrepareVMWorkspace(ctx, api.VMWorkspacePrepareParams{
IDOrName: vm.Name,
SourcePath: "/tmp/fake",
})
@ -480,7 +480,7 @@ func TestPrepareVMWorkspace_SerialisesConcurrentPreparesOnSameVM(t *testing.T) {
upsertDaemonVM(t, ctx, d.store, vm)
d.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
d.workspaceInspectRepo = func(context.Context, string, string, string) (workspace.RepoSpec, error) {
d.workspaceSvc().workspaceInspectRepo = func(context.Context, string, string, string) (workspace.RepoSpec, error) {
return workspace.RepoSpec{RepoName: "fake", RepoRoot: "/tmp/fake"}, nil
}
@ -488,7 +488,7 @@ func TestPrepareVMWorkspace_SerialisesConcurrentPreparesOnSameVM(t *testing.T) {
var active int32
var maxObserved int32
release := make(chan struct{})
d.workspaceImport = func(context.Context, workspace.GuestClient, workspace.RepoSpec, string, model.WorkspacePrepareMode) error {
d.workspaceSvc().workspaceImport = func(context.Context, workspace.GuestClient, workspace.RepoSpec, string, model.WorkspacePrepareMode) error {
n := atomic.AddInt32(&active, 1)
for {
prev := atomic.LoadInt32(&maxObserved)
@ -505,7 +505,7 @@ func TestPrepareVMWorkspace_SerialisesConcurrentPreparesOnSameVM(t *testing.T) {
done := make(chan error, n)
for i := 0; i < n; i++ {
go func() {
_, err := d.PrepareVMWorkspace(ctx, api.VMWorkspacePrepareParams{
_, err := d.workspaceSvc().PrepareVMWorkspace(ctx, api.VMWorkspacePrepareParams{
IDOrName: vm.Name,
SourcePath: "/tmp/fake",
})
@ -567,7 +567,7 @@ func TestExportVMWorkspace_DoesNotMutateRealIndex(t *testing.T) {
upsertDaemonVM(t, ctx, d.store, vm)
d.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
if _, err := d.ExportVMWorkspace(ctx, api.WorkspaceExportParams{IDOrName: vm.Name}); err != nil {
if _, err := d.workspaceSvc().ExportVMWorkspace(ctx, api.WorkspaceExportParams{IDOrName: vm.Name}); err != nil {
t.Fatalf("ExportVMWorkspace: %v", err)
}