daemon split (4/5): extract *VMService service
Phase 4 of the daemon god-struct refactor. VM lifecycle, create-op
registry, handle cache, disk provisioning, stats polling, ports
query, and the per-VM lock set all move off *Daemon onto *VMService.
Daemon keeps thin forwarders only for FindVM / TouchVM (dispatch
surface) and is otherwise out of VM lifecycle. Lazy-init via
d.vmSvc() mirrors the earlier services so test literals like
\`&Daemon{store: db, runner: r}\` still get a functional service
without spelling one out.
Three small cleanups along the way:
* preflight helpers (validateStartPrereqs / addBaseStartPrereqs
/ addBaseStartCommandPrereqs / validateWorkDiskResizePrereqs)
move with the VM methods that call them.
* cleanupRuntime / rebuildDNS move to *VMService, with
HostNetwork primitives (findFirecrackerPID, cleanupDMSnapshot,
killVMProcess, releaseTap, waitForExit, sendCtrlAltDel)
reached through s.net instead of the hostNet() facade.
* vsockAgentBinary becomes a package-level function so both
*Daemon (doctor) and *VMService (preflight) call one entry
point instead of each owning a forwarder method.
WorkspaceService's peer deps switch from eager method values to
closures — vmSvc() constructs VMService with WorkspaceService as a
peer, so resolving d.vmSvc().FindVM at construction time recursed
through workspaceSvc() → vmSvc(). Closures defer the lookup to call
time.
Pure code motion: build + unit tests green, lint clean. No RPC
surface or lock-ordering changes.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
c0d456e734
commit
466a7c30c4
23 changed files with 655 additions and 463 deletions
|
|
@ -21,14 +21,14 @@ import (
|
|||
|
||||
const httpProbeTimeout = 750 * time.Millisecond
|
||||
|
||||
func (d *Daemon) PortsVM(ctx context.Context, idOrName string) (result api.VMPortsResult, err error) {
|
||||
_, err = d.withVMLockByRef(ctx, idOrName, func(vm model.VMRecord) (model.VMRecord, error) {
|
||||
func (s *VMService) PortsVM(ctx context.Context, idOrName string) (result api.VMPortsResult, err error) {
|
||||
_, err = s.withVMLockByRef(ctx, idOrName, func(vm model.VMRecord) (model.VMRecord, error) {
|
||||
result.Name = vm.Name
|
||||
result.DNSName = strings.TrimSpace(vm.Runtime.DNSName)
|
||||
if result.DNSName == "" && strings.TrimSpace(vm.Name) != "" {
|
||||
result.DNSName = vmdns.RecordName(vm.Name)
|
||||
}
|
||||
if !d.vmAlive(vm) {
|
||||
if !s.vmAlive(vm) {
|
||||
return model.VMRecord{}, fmt.Errorf("vm %s is not running", vm.Name)
|
||||
}
|
||||
if strings.TrimSpace(vm.Runtime.GuestIP) == "" {
|
||||
|
|
@ -40,12 +40,12 @@ func (d *Daemon) PortsVM(ctx context.Context, idOrName string) (result api.VMPor
|
|||
if vm.Runtime.VSockCID == 0 {
|
||||
return model.VMRecord{}, errors.New("vm has no vsock cid")
|
||||
}
|
||||
if err := d.hostNet().ensureSocketAccess(ctx, vm.Runtime.VSockPath, "firecracker vsock socket"); err != nil {
|
||||
if err := s.net.ensureSocketAccess(ctx, vm.Runtime.VSockPath, "firecracker vsock socket"); err != nil {
|
||||
return model.VMRecord{}, err
|
||||
}
|
||||
portsCtx, cancel := context.WithTimeout(ctx, 3*time.Second)
|
||||
defer cancel()
|
||||
listeners, err := vsockagent.Ports(portsCtx, d.logger, vm.Runtime.VSockPath)
|
||||
listeners, err := vsockagent.Ports(portsCtx, s.logger, vm.Runtime.VSockPath)
|
||||
if err != nil {
|
||||
return model.VMRecord{}, err
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue