daemon split (4/5): extract *VMService service
Phase 4 of the daemon god-struct refactor. VM lifecycle, create-op
registry, handle cache, disk provisioning, stats polling, ports
query, and the per-VM lock set all move off *Daemon onto *VMService.
Daemon keeps thin forwarders only for FindVM / TouchVM (dispatch
surface) and is otherwise out of VM lifecycle. Lazy-init via
d.vmSvc() mirrors the earlier services so test literals like
\`&Daemon{store: db, runner: r}\` still get a functional service
without spelling one out.
Three small cleanups along the way:
* preflight helpers (validateStartPrereqs / addBaseStartPrereqs
/ addBaseStartCommandPrereqs / validateWorkDiskResizePrereqs)
move with the VM methods that call them.
* cleanupRuntime / rebuildDNS move to *VMService, with
HostNetwork primitives (findFirecrackerPID, cleanupDMSnapshot,
killVMProcess, releaseTap, waitForExit, sendCtrlAltDel)
reached through s.net instead of the hostNet() facade.
* vsockAgentBinary becomes a package-level function so both
*Daemon (doctor) and *VMService (preflight) call one entry
point instead of each owning a forwarder method.
WorkspaceService's peer deps switch from eager method values to
closures — vmSvc() constructs VMService with WorkspaceService as a
peer, so resolving d.vmSvc().FindVM at construction time recursed
through workspaceSvc() → vmSvc(). Closures defer the lookup to call
time.
Pure code motion: build + unit tests green, lint clean. No RPC
surface or lock-ordering changes.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
c0d456e734
commit
466a7c30c4
23 changed files with 655 additions and 463 deletions
|
|
@ -26,21 +26,21 @@ var (
|
|||
)
|
||||
|
||||
// rebuildDNS enumerates live VMs and republishes the DNS record set.
|
||||
// Lives on *Daemon (not HostNetwork) because "alive" is a VMService
|
||||
// concern that HostNetwork shouldn't need to reach into. Daemon
|
||||
// orchestrates: VM list from the store, alive filter, hand the
|
||||
// resulting map to HostNetwork.replaceDNS.
|
||||
func (d *Daemon) rebuildDNS(ctx context.Context) error {
|
||||
if d.net == nil {
|
||||
// Lives on VMService because "alive" is a VM-state concern that
|
||||
// HostNetwork shouldn't need to reach into. VMService orchestrates:
|
||||
// VM list from the store, alive filter, hand the resulting map to
|
||||
// HostNetwork.replaceDNS.
|
||||
func (s *VMService) rebuildDNS(ctx context.Context) error {
|
||||
if s.net == nil {
|
||||
return nil
|
||||
}
|
||||
vms, err := d.store.ListVMs(ctx)
|
||||
vms, err := s.store.ListVMs(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
records := make(map[string]string)
|
||||
for _, vm := range vms {
|
||||
if !d.vmAlive(vm) {
|
||||
if !s.vmAlive(vm) {
|
||||
continue
|
||||
}
|
||||
if strings.TrimSpace(vm.Runtime.GuestIP) == "" {
|
||||
|
|
@ -48,7 +48,7 @@ func (d *Daemon) rebuildDNS(ctx context.Context) error {
|
|||
}
|
||||
records[vmDNSRecordName(vm.Name)] = vm.Runtime.GuestIP
|
||||
}
|
||||
return d.hostNet().replaceDNS(records)
|
||||
return s.net.replaceDNS(records)
|
||||
}
|
||||
|
||||
// vmDNSRecordName is a small indirection so the dns-record-name
|
||||
|
|
@ -59,36 +59,37 @@ func vmDNSRecordName(name string) string {
|
|||
}
|
||||
|
||||
// cleanupRuntime tears down the host-side state for a VM: firecracker
|
||||
// process, DM snapshot, capabilities, tap, sockets. Stays on *Daemon
|
||||
// for now because it reaches into handles (VMService-owned) and
|
||||
// capabilities (still on Daemon). Phase 4 will move it to VMService.
|
||||
func (d *Daemon) cleanupRuntime(ctx context.Context, vm model.VMRecord, preserveDisks bool) error {
|
||||
if d.logger != nil {
|
||||
d.logger.Debug("cleanup runtime", append(vmLogAttrs(vm), "preserve_disks", preserveDisks)...)
|
||||
// process, DM snapshot, capabilities, tap, sockets. Lives on VMService
|
||||
// because it reaches into handles (VMService-owned); the capability
|
||||
// teardown goes through the capHooks seam to keep Daemon out of the
|
||||
// dependency chain.
|
||||
func (s *VMService) cleanupRuntime(ctx context.Context, vm model.VMRecord, preserveDisks bool) error {
|
||||
if s.logger != nil {
|
||||
s.logger.Debug("cleanup runtime", append(vmLogAttrs(vm), "preserve_disks", preserveDisks)...)
|
||||
}
|
||||
h := d.vmHandles(vm.ID)
|
||||
h := s.vmHandles(vm.ID)
|
||||
cleanupPID := h.PID
|
||||
if vm.Runtime.APISockPath != "" {
|
||||
if pid, err := d.hostNet().findFirecrackerPID(ctx, vm.Runtime.APISockPath); err == nil && pid > 0 {
|
||||
if pid, err := s.net.findFirecrackerPID(ctx, vm.Runtime.APISockPath); err == nil && pid > 0 {
|
||||
cleanupPID = pid
|
||||
}
|
||||
}
|
||||
if cleanupPID > 0 && system.ProcessRunning(cleanupPID, vm.Runtime.APISockPath) {
|
||||
_ = d.hostNet().killVMProcess(ctx, cleanupPID)
|
||||
if err := d.hostNet().waitForExit(ctx, cleanupPID, vm.Runtime.APISockPath, 30*time.Second); err != nil {
|
||||
_ = s.net.killVMProcess(ctx, cleanupPID)
|
||||
if err := s.net.waitForExit(ctx, cleanupPID, vm.Runtime.APISockPath, 30*time.Second); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
snapshotErr := d.hostNet().cleanupDMSnapshot(ctx, dmSnapshotHandles{
|
||||
snapshotErr := s.net.cleanupDMSnapshot(ctx, dmSnapshotHandles{
|
||||
BaseLoop: h.BaseLoop,
|
||||
COWLoop: h.COWLoop,
|
||||
DMName: h.DMName,
|
||||
DMDev: h.DMDev,
|
||||
})
|
||||
featureErr := d.cleanupCapabilityState(ctx, vm)
|
||||
featureErr := s.capHooks.cleanupState(ctx, vm)
|
||||
var tapErr error
|
||||
if h.TapDevice != "" {
|
||||
tapErr = d.hostNet().releaseTap(ctx, h.TapDevice)
|
||||
tapErr = s.net.releaseTap(ctx, h.TapDevice)
|
||||
}
|
||||
if vm.Runtime.APISockPath != "" {
|
||||
_ = os.Remove(vm.Runtime.APISockPath)
|
||||
|
|
@ -99,14 +100,14 @@ func (d *Daemon) cleanupRuntime(ctx context.Context, vm model.VMRecord, preserve
|
|||
// The handles are only meaningful while the kernel objects exist;
|
||||
// dropping them here keeps the cache in sync with reality even
|
||||
// when the caller forgets to call clearVMHandles explicitly.
|
||||
d.clearVMHandles(vm)
|
||||
s.clearVMHandles(vm)
|
||||
if !preserveDisks && vm.Runtime.VMDir != "" {
|
||||
return errors.Join(snapshotErr, featureErr, tapErr, os.RemoveAll(vm.Runtime.VMDir))
|
||||
}
|
||||
return errors.Join(snapshotErr, featureErr, tapErr)
|
||||
}
|
||||
|
||||
func (d *Daemon) generateName(ctx context.Context) (string, error) {
|
||||
func (s *VMService) generateName(ctx context.Context) (string, error) {
|
||||
_ = ctx
|
||||
if name := strings.TrimSpace(namegen.Generate()); name != "" {
|
||||
return name, nil
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue