daemon split (6/n): extract wireServices + drop lazy service getters
Factor the service + capability wiring out of Daemon.Open() into
wireServices(d), an idempotent helper that constructs HostNetwork,
ImageService, WorkspaceService, and VMService from whatever
infrastructure (runner, store, config, layout, logger, closing) is
already set on d. Open() calls it once after filling the composition
root; tests that build &Daemon{...} literals call it to get a working
service graph, preinstalling stubs on the fields they want to fake.
Drops the four lazy-init getters on *Daemon — d.hostNet(),
d.imageSvc(), d.workspaceSvc(), d.vmSvc() — whose sole purpose was
keeping test literals working. Every production call site now reads
d.net / d.img / d.ws / d.vm directly; the services are guaranteed
non-nil once Open returns. No behavior change.
Mechanical: all existing `d.xxxSvc()` calls (production + tests)
rewritten to field access; each `d := &Daemon{...}` in tests gets a
trailing wireServices(d) so the literal + wiring are side-by-side.
Tests that override a pre-built service (e.g. d.img = &ImageService{
bundleFetch: stub}) now set the override before wireServices so the
replacement propagates into VMService's peer pointer.
Also nil-guards HostNetwork.stopVMDNS and d.store in Close() so
partially-initialised daemons (pre-reconcile open failure) still
tear down cleanly — same contract the old lazy getters provided.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
0cfd8a5451
commit
16702bd5e1
22 changed files with 353 additions and 293 deletions
|
|
@ -81,14 +81,8 @@ func Open(ctx context.Context) (d *Daemon, err error) {
|
|||
logger: logger,
|
||||
closing: closing,
|
||||
pid: os.Getpid(),
|
||||
net: newHostNetwork(hostNetworkDeps{
|
||||
runner: runner,
|
||||
logger: logger,
|
||||
config: cfg,
|
||||
layout: layout,
|
||||
closing: closing,
|
||||
}),
|
||||
}
|
||||
wireServices(d)
|
||||
// From here on, every failure path must run Close() so the host
|
||||
// state we touched (DNS listener goroutine, resolvectl routing,
|
||||
// SQLite handle, future side effects) gets unwound. Close is
|
||||
|
|
@ -103,7 +97,7 @@ func Open(ctx context.Context) (d *Daemon, err error) {
|
|||
|
||||
d.ensureVMSSHClientConfig()
|
||||
d.logger.Info("daemon opened", "socket", layout.SocketPath, "state_dir", layout.StateDir, "log_level", cfg.LogLevel)
|
||||
if err = d.hostNet().startVMDNS(vmdns.DefaultListenAddr); err != nil {
|
||||
if err = d.net.startVMDNS(vmdns.DefaultListenAddr); err != nil {
|
||||
d.logger.Error("daemon open failed", "stage", "start_vm_dns", "error", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -111,7 +105,7 @@ func Open(ctx context.Context) (d *Daemon, err error) {
|
|||
d.logger.Error("daemon open failed", "stage", "reconcile", "error", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
d.hostNet().ensureVMDNSResolverRouting(ctx)
|
||||
d.net.ensureVMDNSResolverRouting(ctx)
|
||||
// Seed HostNetwork's pool index from taps already claimed by VMs
|
||||
// on disk so newly warmed pool entries don't collide with them.
|
||||
if d.config.TapPoolSize > 0 && d.store != nil {
|
||||
|
|
@ -122,13 +116,13 @@ func Open(ctx context.Context) (d *Daemon, err error) {
|
|||
}
|
||||
used := make([]string, 0, len(vms))
|
||||
for _, vm := range vms {
|
||||
if tap := d.vmSvc().vmHandles(vm.ID).TapDevice; tap != "" {
|
||||
if tap := d.vm.vmHandles(vm.ID).TapDevice; tap != "" {
|
||||
used = append(used, tap)
|
||||
}
|
||||
}
|
||||
d.hostNet().initializeTapPool(used)
|
||||
d.net.initializeTapPool(used)
|
||||
}
|
||||
go d.hostNet().ensureTapPool(context.Background())
|
||||
go d.net.ensureTapPool(context.Background())
|
||||
return d, nil
|
||||
}
|
||||
|
||||
|
|
@ -142,7 +136,11 @@ func (d *Daemon) Close() error {
|
|||
if d.listener != nil {
|
||||
_ = d.listener.Close()
|
||||
}
|
||||
err = errors.Join(d.hostNet().clearVMDNSResolverRouting(context.Background()), d.hostNet().stopVMDNS(), d.store.Close())
|
||||
var closeErr error
|
||||
if d.store != nil {
|
||||
closeErr = d.store.Close()
|
||||
}
|
||||
err = errors.Join(d.net.clearVMDNSResolverRouting(context.Background()), d.net.stopVMDNS(), closeErr)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
|
@ -282,28 +280,28 @@ func (d *Daemon) dispatch(ctx context.Context, req rpc.Request) rpc.Response {
|
|||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
vm, err := d.vmSvc().CreateVM(ctx, params)
|
||||
vm, err := d.vm.CreateVM(ctx, params)
|
||||
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
|
||||
case "vm.create.begin":
|
||||
params, err := rpc.DecodeParams[api.VMCreateParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
op, err := d.vmSvc().BeginVMCreate(ctx, params)
|
||||
op, err := d.vm.BeginVMCreate(ctx, params)
|
||||
return marshalResultOrError(api.VMCreateBeginResult{Operation: op}, err)
|
||||
case "vm.create.status":
|
||||
params, err := rpc.DecodeParams[api.VMCreateStatusParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
op, err := d.vmSvc().VMCreateStatus(ctx, params.ID)
|
||||
op, err := d.vm.VMCreateStatus(ctx, params.ID)
|
||||
return marshalResultOrError(api.VMCreateStatusResult{Operation: op}, err)
|
||||
case "vm.create.cancel":
|
||||
params, err := rpc.DecodeParams[api.VMCreateStatusParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
err = d.vmSvc().CancelVMCreate(ctx, params.ID)
|
||||
err = d.vm.CancelVMCreate(ctx, params.ID)
|
||||
return marshalResultOrError(api.Empty{}, err)
|
||||
case "vm.list":
|
||||
vms, err := d.store.ListVMs(ctx)
|
||||
|
|
@ -313,63 +311,63 @@ func (d *Daemon) dispatch(ctx context.Context, req rpc.Request) rpc.Response {
|
|||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
vm, err := d.vmSvc().FindVM(ctx, params.IDOrName)
|
||||
vm, err := d.vm.FindVM(ctx, params.IDOrName)
|
||||
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
|
||||
case "vm.start":
|
||||
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
vm, err := d.vmSvc().StartVM(ctx, params.IDOrName)
|
||||
vm, err := d.vm.StartVM(ctx, params.IDOrName)
|
||||
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
|
||||
case "vm.stop":
|
||||
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
vm, err := d.vmSvc().StopVM(ctx, params.IDOrName)
|
||||
vm, err := d.vm.StopVM(ctx, params.IDOrName)
|
||||
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
|
||||
case "vm.kill":
|
||||
params, err := rpc.DecodeParams[api.VMKillParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
vm, err := d.vmSvc().KillVM(ctx, params)
|
||||
vm, err := d.vm.KillVM(ctx, params)
|
||||
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
|
||||
case "vm.restart":
|
||||
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
vm, err := d.vmSvc().RestartVM(ctx, params.IDOrName)
|
||||
vm, err := d.vm.RestartVM(ctx, params.IDOrName)
|
||||
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
|
||||
case "vm.delete":
|
||||
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
vm, err := d.vmSvc().DeleteVM(ctx, params.IDOrName)
|
||||
vm, err := d.vm.DeleteVM(ctx, params.IDOrName)
|
||||
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
|
||||
case "vm.set":
|
||||
params, err := rpc.DecodeParams[api.VMSetParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
vm, err := d.vmSvc().SetVM(ctx, params)
|
||||
vm, err := d.vm.SetVM(ctx, params)
|
||||
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
|
||||
case "vm.stats":
|
||||
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
vm, stats, err := d.vmSvc().GetVMStats(ctx, params.IDOrName)
|
||||
vm, stats, err := d.vm.GetVMStats(ctx, params.IDOrName)
|
||||
return marshalResultOrError(api.VMStatsResult{VM: vm, Stats: stats}, err)
|
||||
case "vm.logs":
|
||||
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
vm, err := d.vmSvc().FindVM(ctx, params.IDOrName)
|
||||
vm, err := d.vm.FindVM(ctx, params.IDOrName)
|
||||
if err != nil {
|
||||
return rpc.NewError("not_found", err.Error())
|
||||
}
|
||||
|
|
@ -379,11 +377,11 @@ func (d *Daemon) dispatch(ctx context.Context, req rpc.Request) rpc.Response {
|
|||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
vm, err := d.vmSvc().TouchVM(ctx, params.IDOrName)
|
||||
vm, err := d.vm.TouchVM(ctx, params.IDOrName)
|
||||
if err != nil {
|
||||
return rpc.NewError("not_found", err.Error())
|
||||
}
|
||||
if !d.vmSvc().vmAlive(vm) {
|
||||
if !d.vm.vmAlive(vm) {
|
||||
return rpc.NewError("not_running", fmt.Sprintf("vm %s is not running", vm.Name))
|
||||
}
|
||||
return marshalResultOrError(api.VMSSHResult{Name: vm.Name, GuestIP: vm.Runtime.GuestIP}, nil)
|
||||
|
|
@ -392,35 +390,35 @@ func (d *Daemon) dispatch(ctx context.Context, req rpc.Request) rpc.Response {
|
|||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
result, err := d.vmSvc().HealthVM(ctx, params.IDOrName)
|
||||
result, err := d.vm.HealthVM(ctx, params.IDOrName)
|
||||
return marshalResultOrError(result, err)
|
||||
case "vm.ping":
|
||||
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
result, err := d.vmSvc().PingVM(ctx, params.IDOrName)
|
||||
result, err := d.vm.PingVM(ctx, params.IDOrName)
|
||||
return marshalResultOrError(result, err)
|
||||
case "vm.ports":
|
||||
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
result, err := d.vmSvc().PortsVM(ctx, params.IDOrName)
|
||||
result, err := d.vm.PortsVM(ctx, params.IDOrName)
|
||||
return marshalResultOrError(result, err)
|
||||
case "vm.workspace.prepare":
|
||||
params, err := rpc.DecodeParams[api.VMWorkspacePrepareParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
workspace, err := d.workspaceSvc().PrepareVMWorkspace(ctx, params)
|
||||
workspace, err := d.ws.PrepareVMWorkspace(ctx, params)
|
||||
return marshalResultOrError(api.VMWorkspacePrepareResult{Workspace: workspace}, err)
|
||||
case "vm.workspace.export":
|
||||
params, err := rpc.DecodeParams[api.WorkspaceExportParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
result, err := d.workspaceSvc().ExportVMWorkspace(ctx, params)
|
||||
result, err := d.ws.ExportVMWorkspace(ctx, params)
|
||||
return marshalResultOrError(result, err)
|
||||
case "image.list":
|
||||
images, err := d.store.ListImages(ctx)
|
||||
|
|
@ -430,68 +428,68 @@ func (d *Daemon) dispatch(ctx context.Context, req rpc.Request) rpc.Response {
|
|||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
image, err := d.imageSvc().FindImage(ctx, params.IDOrName)
|
||||
image, err := d.img.FindImage(ctx, params.IDOrName)
|
||||
return marshalResultOrError(api.ImageShowResult{Image: image}, err)
|
||||
case "image.register":
|
||||
params, err := rpc.DecodeParams[api.ImageRegisterParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
image, err := d.imageSvc().RegisterImage(ctx, params)
|
||||
image, err := d.img.RegisterImage(ctx, params)
|
||||
return marshalResultOrError(api.ImageShowResult{Image: image}, err)
|
||||
case "image.promote":
|
||||
params, err := rpc.DecodeParams[api.ImageRefParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
image, err := d.imageSvc().PromoteImage(ctx, params.IDOrName)
|
||||
image, err := d.img.PromoteImage(ctx, params.IDOrName)
|
||||
return marshalResultOrError(api.ImageShowResult{Image: image}, err)
|
||||
case "image.delete":
|
||||
params, err := rpc.DecodeParams[api.ImageRefParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
image, err := d.imageSvc().DeleteImage(ctx, params.IDOrName)
|
||||
image, err := d.img.DeleteImage(ctx, params.IDOrName)
|
||||
return marshalResultOrError(api.ImageShowResult{Image: image}, err)
|
||||
case "image.pull":
|
||||
params, err := rpc.DecodeParams[api.ImagePullParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
image, err := d.imageSvc().PullImage(ctx, params)
|
||||
image, err := d.img.PullImage(ctx, params)
|
||||
return marshalResultOrError(api.ImageShowResult{Image: image}, err)
|
||||
case "kernel.list":
|
||||
return marshalResultOrError(d.imageSvc().KernelList(ctx))
|
||||
return marshalResultOrError(d.img.KernelList(ctx))
|
||||
case "kernel.show":
|
||||
params, err := rpc.DecodeParams[api.KernelRefParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
entry, err := d.imageSvc().KernelShow(ctx, params.Name)
|
||||
entry, err := d.img.KernelShow(ctx, params.Name)
|
||||
return marshalResultOrError(api.KernelShowResult{Entry: entry}, err)
|
||||
case "kernel.delete":
|
||||
params, err := rpc.DecodeParams[api.KernelRefParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
err = d.imageSvc().KernelDelete(ctx, params.Name)
|
||||
err = d.img.KernelDelete(ctx, params.Name)
|
||||
return marshalResultOrError(api.Empty{}, err)
|
||||
case "kernel.import":
|
||||
params, err := rpc.DecodeParams[api.KernelImportParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
entry, err := d.imageSvc().KernelImport(ctx, params)
|
||||
entry, err := d.img.KernelImport(ctx, params)
|
||||
return marshalResultOrError(api.KernelShowResult{Entry: entry}, err)
|
||||
case "kernel.pull":
|
||||
params, err := rpc.DecodeParams[api.KernelPullParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
entry, err := d.imageSvc().KernelPull(ctx, params)
|
||||
entry, err := d.img.KernelPull(ctx, params)
|
||||
return marshalResultOrError(api.KernelShowResult{Entry: entry}, err)
|
||||
case "kernel.catalog":
|
||||
return marshalResultOrError(d.imageSvc().KernelCatalog(ctx))
|
||||
return marshalResultOrError(d.img.KernelCatalog(ctx))
|
||||
default:
|
||||
return rpc.NewError("unknown_method", req.Method)
|
||||
}
|
||||
|
|
@ -507,14 +505,14 @@ func (d *Daemon) backgroundLoop() {
|
|||
case <-d.closing:
|
||||
return
|
||||
case <-statsTicker.C:
|
||||
if err := d.vmSvc().pollStats(context.Background()); err != nil && d.logger != nil {
|
||||
if err := d.vm.pollStats(context.Background()); err != nil && d.logger != nil {
|
||||
d.logger.Error("background stats poll failed", "error", err.Error())
|
||||
}
|
||||
case <-staleTicker.C:
|
||||
if err := d.vmSvc().stopStaleVMs(context.Background()); err != nil && d.logger != nil {
|
||||
if err := d.vm.stopStaleVMs(context.Background()); err != nil && d.logger != nil {
|
||||
d.logger.Error("background stale sweep failed", "error", err.Error())
|
||||
}
|
||||
d.vmSvc().pruneVMCreateOperations(time.Now().Add(-10 * time.Minute))
|
||||
d.vm.pruneVMCreateOperations(time.Now().Add(-10 * time.Minute))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -531,18 +529,18 @@ func (d *Daemon) reconcile(ctx context.Context) error {
|
|||
return op.fail(err)
|
||||
}
|
||||
for _, vm := range vms {
|
||||
if err := d.vmSvc().withVMLockByIDErr(ctx, vm.ID, func(vm model.VMRecord) error {
|
||||
if err := d.vm.withVMLockByIDErr(ctx, vm.ID, func(vm model.VMRecord) error {
|
||||
if vm.State != model.VMStateRunning {
|
||||
// Belt-and-braces: a stopped VM should never have a
|
||||
// scratch file or a cache entry. Clean up anything
|
||||
// left by an ungraceful previous daemon crash.
|
||||
d.vmSvc().clearVMHandles(vm)
|
||||
d.vm.clearVMHandles(vm)
|
||||
return nil
|
||||
}
|
||||
// Rebuild the in-memory handle cache by loading the per-VM
|
||||
// scratch file and verifying the firecracker process is
|
||||
// still alive.
|
||||
h, alive, err := d.vmSvc().rediscoverHandles(ctx, vm)
|
||||
h, alive, err := d.vm.rediscoverHandles(ctx, vm)
|
||||
if err != nil && d.logger != nil {
|
||||
d.logger.Warn("rediscover handles failed", "vm_id", vm.ID, "error", err.Error())
|
||||
}
|
||||
|
|
@ -550,22 +548,22 @@ func (d *Daemon) reconcile(ctx context.Context) error {
|
|||
// claimed. If alive, subsequent vmAlive() calls pass; if
|
||||
// not, cleanupRuntime needs these handles to know which
|
||||
// kernel resources (DM / loops / tap) to tear down.
|
||||
d.vmSvc().setVMHandlesInMemory(vm.ID, h)
|
||||
d.vm.setVMHandlesInMemory(vm.ID, h)
|
||||
if alive {
|
||||
return nil
|
||||
}
|
||||
op.stage("stale_vm", vmLogAttrs(vm)...)
|
||||
_ = d.vmSvc().cleanupRuntime(ctx, vm, true)
|
||||
_ = d.vm.cleanupRuntime(ctx, vm, true)
|
||||
vm.State = model.VMStateStopped
|
||||
vm.Runtime.State = model.VMStateStopped
|
||||
d.vmSvc().clearVMHandles(vm)
|
||||
d.vm.clearVMHandles(vm)
|
||||
vm.UpdatedAt = model.Now()
|
||||
return d.store.UpsertVM(ctx, vm)
|
||||
}); err != nil {
|
||||
return op.fail(err, "vm_id", vm.ID)
|
||||
}
|
||||
}
|
||||
if err := d.vmSvc().rebuildDNS(ctx); err != nil {
|
||||
if err := d.vm.rebuildDNS(ctx); err != nil {
|
||||
return op.fail(err)
|
||||
}
|
||||
op.done()
|
||||
|
|
@ -576,18 +574,94 @@ func (d *Daemon) reconcile(ctx context.Context) error {
|
|||
// Dispatch code reads the facade directly; tests that pre-date the
|
||||
// service split keep compiling.
|
||||
func (d *Daemon) FindVM(ctx context.Context, idOrName string) (model.VMRecord, error) {
|
||||
return d.vmSvc().FindVM(ctx, idOrName)
|
||||
return d.vm.FindVM(ctx, idOrName)
|
||||
}
|
||||
|
||||
// FindImage stays on Daemon as a thin forwarder to the image service
|
||||
// lookup so callers reading dispatch code see the obvious facade, and
|
||||
// tests that pre-date the service split still compile.
|
||||
func (d *Daemon) FindImage(ctx context.Context, idOrName string) (model.Image, error) {
|
||||
return d.imageSvc().FindImage(ctx, idOrName)
|
||||
return d.img.FindImage(ctx, idOrName)
|
||||
}
|
||||
|
||||
func (d *Daemon) TouchVM(ctx context.Context, idOrName string) (model.VMRecord, error) {
|
||||
return d.vmSvc().TouchVM(ctx, idOrName)
|
||||
return d.vm.TouchVM(ctx, idOrName)
|
||||
}
|
||||
|
||||
// wireServices populates the four focused services and their peer
|
||||
// references from the infrastructure already on d (runner, logger,
|
||||
// config, layout, store, closing, plus the SSH-client test seams).
|
||||
// Idempotent: each service is skipped if the field is already non-nil,
|
||||
// so tests can preinstall stubs for the services they want to fake and
|
||||
// let wireServices fill the rest. The peer-service closures on
|
||||
// WorkspaceService capture d rather than a direct *VMService pointer so
|
||||
// the ws↔vm construction order doesn't recurse: the closures read d.vm
|
||||
// at call time, by which point it is populated.
|
||||
func wireServices(d *Daemon) {
|
||||
if d.net == nil {
|
||||
d.net = newHostNetwork(hostNetworkDeps{
|
||||
runner: d.runner,
|
||||
logger: d.logger,
|
||||
config: d.config,
|
||||
layout: d.layout,
|
||||
closing: d.closing,
|
||||
})
|
||||
}
|
||||
if d.img == nil {
|
||||
d.img = newImageService(imageServiceDeps{
|
||||
runner: d.runner,
|
||||
logger: d.logger,
|
||||
config: d.config,
|
||||
layout: d.layout,
|
||||
store: d.store,
|
||||
beginOperation: func(name string, attrs ...any) *operationLog {
|
||||
return d.beginOperation(name, attrs...)
|
||||
},
|
||||
})
|
||||
}
|
||||
if d.ws == nil {
|
||||
d.ws = newWorkspaceService(workspaceServiceDeps{
|
||||
runner: d.runner,
|
||||
logger: d.logger,
|
||||
config: d.config,
|
||||
layout: d.layout,
|
||||
store: d.store,
|
||||
vmResolver: func(ctx context.Context, idOrName string) (model.VMRecord, error) {
|
||||
return d.vm.FindVM(ctx, idOrName)
|
||||
},
|
||||
aliveChecker: func(vm model.VMRecord) bool {
|
||||
return d.vm.vmAlive(vm)
|
||||
},
|
||||
waitGuestSSH: d.waitForGuestSSH,
|
||||
dialGuest: d.dialGuest,
|
||||
imageResolver: func(ctx context.Context, idOrName string) (model.Image, error) {
|
||||
return d.FindImage(ctx, idOrName)
|
||||
},
|
||||
imageWorkSeed: func(ctx context.Context, image model.Image, fingerprint string) error {
|
||||
return d.img.refreshManagedWorkSeedFingerprint(ctx, image, fingerprint)
|
||||
},
|
||||
withVMLockByRef: func(ctx context.Context, idOrName string, fn func(model.VMRecord) (model.VMRecord, error)) (model.VMRecord, error) {
|
||||
return d.vm.withVMLockByRef(ctx, idOrName, fn)
|
||||
},
|
||||
beginOperation: d.beginOperation,
|
||||
})
|
||||
}
|
||||
if d.vm == nil {
|
||||
d.vm = newVMService(vmServiceDeps{
|
||||
runner: d.runner,
|
||||
logger: d.logger,
|
||||
config: d.config,
|
||||
layout: d.layout,
|
||||
store: d.store,
|
||||
net: d.net,
|
||||
img: d.img,
|
||||
ws: d.ws,
|
||||
guestWaitForSSH: d.guestWaitForSSH,
|
||||
guestDial: d.guestDial,
|
||||
capHooks: d.buildCapabilityHooks(),
|
||||
beginOperation: d.beginOperation,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func marshalResultOrError(v any, err error) rpc.Response {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue