banger/internal/daemon/workspace_service.go
Thales Maciel 466a7c30c4
daemon split (4/5): extract *VMService service
Phase 4 of the daemon god-struct refactor. VM lifecycle, create-op
registry, handle cache, disk provisioning, stats polling, ports
query, and the per-VM lock set all move off *Daemon onto *VMService.

Daemon keeps thin forwarders only for FindVM / TouchVM (dispatch
surface) and is otherwise out of VM lifecycle. Lazy-init via
d.vmSvc() mirrors the earlier services so test literals like
\`&Daemon{store: db, runner: r}\` still get a functional service
without spelling one out.

Three small cleanups along the way:

  * preflight helpers (validateStartPrereqs / addBaseStartPrereqs
    / addBaseStartCommandPrereqs / validateWorkDiskResizePrereqs)
    move with the VM methods that call them.
  * cleanupRuntime / rebuildDNS move to *VMService, with
    HostNetwork primitives (findFirecrackerPID, cleanupDMSnapshot,
    killVMProcess, releaseTap, waitForExit, sendCtrlAltDel)
    reached through s.net instead of the hostNet() facade.
  * vsockAgentBinary becomes a package-level function so both
    *Daemon (doctor) and *VMService (preflight) call one entry
    point instead of each owning a forwarder method.

WorkspaceService's peer deps switch from eager method values to
closures — vmSvc() constructs VMService with WorkspaceService as a
peer, so resolving d.vmSvc().FindVM at construction time recursed
through workspaceSvc() → vmSvc(). Closures defer the lookup to call
time.

Pure code motion: build + unit tests green, lint clean. No RPC
surface or lock-ordering changes.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-20 20:57:05 -03:00

126 lines
5.1 KiB
Go

package daemon
import (
"context"
"log/slog"
"time"
ws "banger/internal/daemon/workspace"
"banger/internal/model"
"banger/internal/paths"
"banger/internal/store"
"banger/internal/system"
)
// WorkspaceService owns workspace.prepare / workspace.export plus the
// ssh-key + git-identity sync that runs as part of VM start's
// prepare_work_disk capability hook. The workspaceLocks set lives here
// so its scope (serialise concurrent tar imports on the same VM) is
// obvious at the field definition.
//
// The inspect/import test seams are per-service fields so tests inject
// fakes without mutating package-level state.
type WorkspaceService struct {
runner system.CommandRunner
logger *slog.Logger
config model.DaemonConfig
layout paths.Layout
store *store.Store
// workspaceLocks serialises concurrent workspace.prepare /
// workspace.export on the same VM. Separate from vmLocks so slow
// guest I/O doesn't block lifecycle ops.
workspaceLocks vmLockSet
// Peer-service access via narrow function-typed dependencies.
// WorkspaceService doesn't hold pointers to the full VMService or
// HostNetwork; it only sees the exact operations it needs.
vmResolver func(ctx context.Context, idOrName string) (model.VMRecord, error)
aliveChecker func(vm model.VMRecord) bool
waitGuestSSH func(ctx context.Context, address string, interval time.Duration) error
dialGuest func(ctx context.Context, address string) (guestSSHClient, error)
imageResolver func(ctx context.Context, idOrName string) (model.Image, error)
imageWorkSeed func(ctx context.Context, image model.Image, fingerprint string) error
withVMLockByRef func(ctx context.Context, idOrName string, fn func(model.VMRecord) (model.VMRecord, error)) (model.VMRecord, error)
beginOperation func(name string, attrs ...any) *operationLog
// Test seams.
workspaceInspectRepo func(ctx context.Context, sourcePath, branchName, fromRef string) (ws.RepoSpec, error)
workspaceImport func(ctx context.Context, client ws.GuestClient, spec ws.RepoSpec, guestPath string, mode model.WorkspacePrepareMode) error
}
type workspaceServiceDeps struct {
runner system.CommandRunner
logger *slog.Logger
config model.DaemonConfig
layout paths.Layout
store *store.Store
vmResolver func(ctx context.Context, idOrName string) (model.VMRecord, error)
aliveChecker func(vm model.VMRecord) bool
waitGuestSSH func(ctx context.Context, address string, interval time.Duration) error
dialGuest func(ctx context.Context, address string) (guestSSHClient, error)
imageResolver func(ctx context.Context, idOrName string) (model.Image, error)
imageWorkSeed func(ctx context.Context, image model.Image, fingerprint string) error
withVMLockByRef func(ctx context.Context, idOrName string, fn func(model.VMRecord) (model.VMRecord, error)) (model.VMRecord, error)
beginOperation func(name string, attrs ...any) *operationLog
}
func newWorkspaceService(deps workspaceServiceDeps) *WorkspaceService {
return &WorkspaceService{
runner: deps.runner,
logger: deps.logger,
config: deps.config,
layout: deps.layout,
store: deps.store,
vmResolver: deps.vmResolver,
aliveChecker: deps.aliveChecker,
waitGuestSSH: deps.waitGuestSSH,
dialGuest: deps.dialGuest,
imageResolver: deps.imageResolver,
imageWorkSeed: deps.imageWorkSeed,
withVMLockByRef: deps.withVMLockByRef,
beginOperation: deps.beginOperation,
}
}
// workspaceSvc is Daemon's lazy-init getter. Mirrors hostNet() /
// imageSvc() so test literals like &Daemon{store: db, runner: r, ...}
// still get a functional WorkspaceService without spelling one out.
func (d *Daemon) workspaceSvc() *WorkspaceService {
if d.ws != nil {
return d.ws
}
// Peer seams capture d by closure instead of pointing to
// d.vmSvc() / d.imageSvc() directly. vmSvc() constructs VMService
// with WorkspaceService as a peer, so resolving the peer service
// eagerly here would recurse. Closures defer the lookup to call
// time, by which point the cycle is broken because d.vm / d.img
// are already populated.
d.ws = newWorkspaceService(workspaceServiceDeps{
runner: d.runner,
logger: d.logger,
config: d.config,
layout: d.layout,
store: d.store,
vmResolver: func(ctx context.Context, idOrName string) (model.VMRecord, error) {
return d.vmSvc().FindVM(ctx, idOrName)
},
aliveChecker: func(vm model.VMRecord) bool {
return d.vmSvc().vmAlive(vm)
},
waitGuestSSH: d.waitForGuestSSH,
dialGuest: d.dialGuest,
imageResolver: func(ctx context.Context, idOrName string) (model.Image, error) {
return d.FindImage(ctx, idOrName)
},
imageWorkSeed: func(ctx context.Context, image model.Image, fingerprint string) error {
return d.imageSvc().refreshManagedWorkSeedFingerprint(ctx, image, fingerprint)
},
withVMLockByRef: func(ctx context.Context, idOrName string, fn func(model.VMRecord) (model.VMRecord, error)) (model.VMRecord, error) {
return d.vmSvc().withVMLockByRef(ctx, idOrName, fn)
},
beginOperation: d.beginOperation,
})
return d.ws
}