package daemon import ( "context" "errors" "fmt" "os" "strconv" "strings" "time" "banger/internal/daemon/fcproc" "banger/internal/model" "banger/internal/namegen" "banger/internal/system" ) // Cross-service constants. Kept in vm.go because both lifecycle // (VMService) and networking (HostNetwork) reference them; moving // them to either owner would read as a layering violation. var ( errWaitForExitTimeout = fcproc.ErrWaitForExitTimeout gracefulShutdownWait = 10 * time.Second vsockReadyWait = 30 * time.Second vsockReadyPoll = 200 * time.Millisecond ) // rebuildDNS enumerates live VMs and republishes the DNS record set. // Lives on VMService because "alive" is a VM-state concern that // HostNetwork shouldn't need to reach into. VMService orchestrates: // VM list from the store, alive filter, hand the resulting map to // HostNetwork.replaceDNS. func (s *VMService) rebuildDNS(ctx context.Context) error { if s.net == nil { return nil } vms, err := s.store.ListVMs(ctx) if err != nil { return err } records := make(map[string]string) for _, vm := range vms { if !s.vmAlive(vm) { continue } if strings.TrimSpace(vm.Runtime.GuestIP) == "" { continue } records[vmDNSRecordName(vm.Name)] = vm.Runtime.GuestIP } return s.net.replaceDNS(records) } // vmDNSRecordName is a small indirection so the dns-record-name // helper is not directly pulled into every file that used to import // vmdns for this one call. Equivalent to vmdns.RecordName. func vmDNSRecordName(name string) string { return strings.ToLower(strings.TrimSpace(name)) + ".vm" } // cleanupRuntime tears down the host-side state for a VM: firecracker // process, DM snapshot, capabilities, tap, sockets. Lives on VMService // because it reaches into handles (VMService-owned); the capability // teardown goes through the capHooks seam to keep Daemon out of the // dependency chain. func (s *VMService) cleanupRuntime(ctx context.Context, vm model.VMRecord, preserveDisks bool) error { if s.logger != nil { s.logger.Debug("cleanup runtime", append(vmLogAttrs(vm), "preserve_disks", preserveDisks)...) } h := s.vmHandles(vm.ID) cleanupPID := h.PID if vm.Runtime.APISockPath != "" { if pid, err := s.net.findFirecrackerPID(ctx, vm.Runtime.APISockPath); err == nil && pid > 0 { cleanupPID = pid } } if cleanupPID > 0 && system.ProcessRunning(cleanupPID, vm.Runtime.APISockPath) { _ = s.net.killVMProcess(ctx, cleanupPID) if err := s.net.waitForExit(ctx, cleanupPID, vm.Runtime.APISockPath, 30*time.Second); err != nil { return err } } snapshotErr := s.net.cleanupDMSnapshot(ctx, dmSnapshotHandles{ BaseLoop: h.BaseLoop, COWLoop: h.COWLoop, DMName: h.DMName, DMDev: h.DMDev, }) featureErr := s.capHooks.cleanupState(ctx, vm) var tapErr error if h.TapDevice != "" { tapErr = s.net.releaseTap(ctx, h.TapDevice) } if vm.Runtime.APISockPath != "" { _ = os.Remove(vm.Runtime.APISockPath) } if vm.Runtime.VSockPath != "" { _ = os.Remove(vm.Runtime.VSockPath) } // The handles are only meaningful while the kernel objects exist; // dropping them here keeps the cache in sync with reality even // when the caller forgets to call clearVMHandles explicitly. s.clearVMHandles(vm) if !preserveDisks && vm.Runtime.VMDir != "" { return errors.Join(snapshotErr, featureErr, tapErr, os.RemoveAll(vm.Runtime.VMDir)) } return errors.Join(snapshotErr, featureErr, tapErr) } func (s *VMService) generateName(ctx context.Context) (string, error) { _ = ctx if name := strings.TrimSpace(namegen.Generate()); name != "" { return name, nil } return "vm-" + strconv.FormatInt(time.Now().Unix(), 10), nil } func bridgePrefix(bridgeIP string) string { parts := strings.Split(bridgeIP, ".") if len(parts) < 3 { return bridgeIP } return strings.Join(parts[:3], ".") } func optionalIntOrDefault(value *int, fallback int) int { if value != nil { return *value } return fallback } func validateOptionalPositiveSetting(label string, value *int) error { if value == nil { return nil } if *value <= 0 { return fmt.Errorf("%s must be a positive integer", label) } return nil }