Cleanup identity for kernel objects was split across two sources of
truth: vm.Runtime (DB-backed, durable) held paths and the guest IP,
but the TAP name lived only in the in-process handle cache + the
best-effort handles.json scratch file next to the VM dir. Every
other cleanup-identifying datum has a fallback — firecracker PID
can be rediscovered via `pgrep -f <apiSock>`, loops via losetup, dm
name from the deterministic ShortID(vm.ID). The tap is the one
truly cache-only datum (allocated from a pool, not derivable).
That made NAT teardown fragile:
- daemon crash between `acquireTap` and the handles.json write
- handles.json corrupt on the next daemon start
- partial cleanup that already zeroed the cache
In any of those cases natCapability.Cleanup short-circuited
("skipping nat cleanup without runtime network handles") and the
per-VM POSTROUTING MASQUERADE + the two FORWARD rules keyed off
the tap would leak. The VM row in the DB still existed, so a retry
couldn't close the loop — the tap name was simply gone.
Fix: mirror TapDevice onto model.VMRuntime (serialised via the
existing runtime_json column, omitempty so existing rows upgrade
cleanly). Set it in startVMLocked right next to the
s.setVMHandles call that seeds the in-memory cache; clear it at
every post-cleanup reset site (stop normal path + stop stale
branch, kill normal path + kill stale branch, cleanupOnErr in
start, reconcile's stale-vm branch, the stats poller's auto-stop
path).
Fallbacks now cascade:
- natCapability.Cleanup: handles cache → Runtime.TapDevice
- cleanupRuntime (releaseTap): handles cache → Runtime.TapDevice
Both surfaces refuse gracefully (old behaviour) only when neither
source has a value, which really does mean "no tap was ever
allocated for this VM" rather than "we lost track of it."
Test: TestNATCapabilityCleanup_FallsBackToRuntimeTapDevice clears
the handle cache, sets vm.Runtime.TapDevice, and asserts Cleanup
reaches the runner — the exact scenario the review flagged as a
plausible leak and the exact code path that now guarantees it
doesn't.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
449 lines
15 KiB
Go
449 lines
15 KiB
Go
package daemon
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"fmt"
|
|
"os"
|
|
"path/filepath"
|
|
"strconv"
|
|
"strings"
|
|
"time"
|
|
|
|
"banger/internal/api"
|
|
"banger/internal/firecracker"
|
|
"banger/internal/imagepull"
|
|
"banger/internal/model"
|
|
"banger/internal/system"
|
|
)
|
|
|
|
func (s *VMService) StartVM(ctx context.Context, idOrName string) (model.VMRecord, error) {
|
|
return s.withVMLockByRef(ctx, idOrName, func(vm model.VMRecord) (model.VMRecord, error) {
|
|
image, err := s.store.GetImageByID(ctx, vm.ImageID)
|
|
if err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
if s.vmAlive(vm) {
|
|
if s.logger != nil {
|
|
s.logger.Info("vm already running", vmLogAttrs(vm)...)
|
|
}
|
|
return vm, nil
|
|
}
|
|
return s.startVMLocked(ctx, vm, image)
|
|
})
|
|
}
|
|
|
|
func (s *VMService) startVMLocked(ctx context.Context, vm model.VMRecord, image model.Image) (_ model.VMRecord, err error) {
|
|
op := s.beginOperation("vm.start", append(vmLogAttrs(vm), imageLogAttrs(image)...)...)
|
|
defer func() {
|
|
if err != nil {
|
|
err = annotateLogPath(err, vm.Runtime.LogPath)
|
|
op.fail(err, vmLogAttrs(vm)...)
|
|
return
|
|
}
|
|
op.done(vmLogAttrs(vm)...)
|
|
}()
|
|
op.stage("preflight")
|
|
vmCreateStage(ctx, "preflight", "checking host prerequisites")
|
|
if err := s.validateStartPrereqs(ctx, vm, image); err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
if err := os.MkdirAll(vm.Runtime.VMDir, 0o755); err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
op.stage("cleanup_runtime")
|
|
if err := s.cleanupRuntime(ctx, vm, true); err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
s.clearVMHandles(vm)
|
|
op.stage("bridge")
|
|
if err := s.net.ensureBridge(ctx); err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
op.stage("socket_dir")
|
|
if err := s.net.ensureSocketDir(); err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
|
|
shortID := system.ShortID(vm.ID)
|
|
apiSock := filepath.Join(s.layout.RuntimeDir, "fc-"+shortID+".sock")
|
|
dmName := "fc-rootfs-" + shortID
|
|
tapName := "tap-fc-" + shortID
|
|
if strings.TrimSpace(vm.Runtime.VSockPath) == "" {
|
|
vm.Runtime.VSockPath = defaultVSockPath(s.layout.RuntimeDir, vm.ID)
|
|
}
|
|
if vm.Runtime.VSockCID == 0 {
|
|
vm.Runtime.VSockCID, err = defaultVSockCID(vm.Runtime.GuestIP)
|
|
if err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
}
|
|
if err := os.RemoveAll(apiSock); err != nil && !os.IsNotExist(err) {
|
|
return model.VMRecord{}, err
|
|
}
|
|
if err := os.RemoveAll(vm.Runtime.VSockPath); err != nil && !os.IsNotExist(err) {
|
|
return model.VMRecord{}, err
|
|
}
|
|
|
|
op.stage("system_overlay", "overlay_path", vm.Runtime.SystemOverlay)
|
|
vmCreateStage(ctx, "prepare_rootfs", "preparing system overlay")
|
|
if err := s.ensureSystemOverlay(ctx, &vm); err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
|
|
op.stage("dm_snapshot", "dm_name", dmName)
|
|
vmCreateStage(ctx, "prepare_rootfs", "creating root filesystem snapshot")
|
|
snapHandles, err := s.net.createDMSnapshot(ctx, image.RootfsPath, vm.Runtime.SystemOverlay, dmName)
|
|
if err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
// Live handles are threaded through this function as a local and
|
|
// pushed to the cache via setVMHandles once we have every piece.
|
|
// The cache update must happen BEFORE any step that reads handles
|
|
// back (e.g. cleanupRuntime via cleanupOnErr) — otherwise loops
|
|
// and DM would leak on an early failure.
|
|
live := model.VMHandles{
|
|
BaseLoop: snapHandles.BaseLoop,
|
|
COWLoop: snapHandles.COWLoop,
|
|
DMName: snapHandles.DMName,
|
|
DMDev: snapHandles.DMDev,
|
|
}
|
|
s.setVMHandles(vm, live)
|
|
|
|
vm.Runtime.APISockPath = apiSock
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.LastError = ""
|
|
|
|
cleanupOnErr := func(err error) (model.VMRecord, error) {
|
|
vm.State = model.VMStateError
|
|
vm.Runtime.State = model.VMStateError
|
|
vm.Runtime.LastError = err.Error()
|
|
op.stage("cleanup_after_failure", "error", err.Error())
|
|
if cleanupErr := s.cleanupRuntime(context.Background(), vm, true); cleanupErr != nil {
|
|
err = errors.Join(err, cleanupErr)
|
|
}
|
|
vm.Runtime.TapDevice = ""
|
|
s.clearVMHandles(vm)
|
|
_ = s.store.UpsertVM(context.Background(), vm)
|
|
return model.VMRecord{}, err
|
|
}
|
|
|
|
// On a restart the COW already holds writes from a previous guest
|
|
// boot — stale free-inode / free-block counters, possibly unwritten
|
|
// journal updates. e2fsprogs (e2cp/e2rm, used by patchRootOverlay)
|
|
// refuses to touch the snapshot with "Inode bitmap checksum does
|
|
// not match bitmap", which bubbles up as a "start failed" even
|
|
// though the filesystem is kernel-valid. `e2fsck -fy` reconciles
|
|
// the bitmaps and is a no-op on a fresh snapshot, so running it
|
|
// unconditionally keeps the code path the same for first vs.
|
|
// subsequent starts. Exit code 1 means "errors fixed" — we treat
|
|
// that as success.
|
|
op.stage("fsck_snapshot")
|
|
if _, err := s.runner.RunSudo(ctx, "e2fsck", "-fy", live.DMDev); err != nil {
|
|
// e2fsck exit codes: 0=clean, 1=errors corrected, 2=reboot
|
|
// needed, 4+=uncorrected. -1 means the error wasn't an
|
|
// exec.ExitError (e.g. command not found, ctx cancel).
|
|
if code := system.ExitCode(err); code < 0 || code > 1 {
|
|
return cleanupOnErr(fmt.Errorf("fsck snapshot: %w", err))
|
|
}
|
|
}
|
|
|
|
op.stage("patch_root_overlay")
|
|
vmCreateStage(ctx, "prepare_rootfs", "writing guest configuration")
|
|
if err := s.patchRootOverlay(ctx, vm, image); err != nil {
|
|
return cleanupOnErr(err)
|
|
}
|
|
op.stage("prepare_host_features")
|
|
vmCreateStage(ctx, "prepare_host_features", "preparing host-side vm features")
|
|
if err := s.capHooks.prepareHosts(ctx, &vm, image); err != nil {
|
|
return cleanupOnErr(err)
|
|
}
|
|
op.stage("tap")
|
|
tap, err := s.net.acquireTap(ctx, tapName)
|
|
if err != nil {
|
|
return cleanupOnErr(err)
|
|
}
|
|
live.TapDevice = tap
|
|
s.setVMHandles(vm, live)
|
|
// Mirror onto VM.Runtime so NAT teardown can recover the tap
|
|
// name from the DB even if the handle cache is empty (daemon
|
|
// crash + restart, corrupt handles.json).
|
|
vm.Runtime.TapDevice = tap
|
|
op.stage("metrics_file", "metrics_path", vm.Runtime.MetricsPath)
|
|
if err := os.WriteFile(vm.Runtime.MetricsPath, nil, 0o644); err != nil {
|
|
return cleanupOnErr(err)
|
|
}
|
|
|
|
op.stage("firecracker_binary")
|
|
fcPath, err := s.net.firecrackerBinary()
|
|
if err != nil {
|
|
return cleanupOnErr(err)
|
|
}
|
|
op.stage("firecracker_launch", "log_path", vm.Runtime.LogPath, "metrics_path", vm.Runtime.MetricsPath)
|
|
vmCreateStage(ctx, "boot_firecracker", "starting firecracker")
|
|
kernelArgs := system.BuildBootArgs(vm.Name)
|
|
if strings.TrimSpace(image.InitrdPath) == "" {
|
|
// Direct-boot image (no initramfs) — the rootfs may be a
|
|
// container image without /sbin/init or iproute2. Use:
|
|
// 1. Kernel-level IP config via ip= cmdline (CONFIG_IP_PNP),
|
|
// so the network is up before init runs — no ip(8) needed.
|
|
// 2. init= pointing at our universal wrapper which installs
|
|
// systemd+sshd on first boot if missing.
|
|
kernelArgs = system.BuildBootArgsWithKernelIP(
|
|
vm.Name, vm.Runtime.GuestIP, s.config.BridgeIP, s.config.DefaultDNS,
|
|
) + " init=" + imagepull.FirstBootScriptPath
|
|
}
|
|
|
|
machineConfig := firecracker.MachineConfig{
|
|
BinaryPath: fcPath,
|
|
VMID: vm.ID,
|
|
SocketPath: apiSock,
|
|
LogPath: vm.Runtime.LogPath,
|
|
MetricsPath: vm.Runtime.MetricsPath,
|
|
KernelImagePath: image.KernelPath,
|
|
InitrdPath: image.InitrdPath,
|
|
KernelArgs: kernelArgs,
|
|
Drives: []firecracker.DriveConfig{{
|
|
ID: "rootfs",
|
|
Path: live.DMDev,
|
|
ReadOnly: false,
|
|
IsRoot: true,
|
|
}},
|
|
TapDevice: tap,
|
|
VSockPath: vm.Runtime.VSockPath,
|
|
VSockCID: vm.Runtime.VSockCID,
|
|
VCPUCount: vm.Spec.VCPUCount,
|
|
MemoryMiB: vm.Spec.MemoryMiB,
|
|
Logger: s.logger,
|
|
}
|
|
s.capHooks.contributeMachine(&machineConfig, vm, image)
|
|
machine, err := firecracker.NewMachine(ctx, machineConfig)
|
|
if err != nil {
|
|
return cleanupOnErr(err)
|
|
}
|
|
if err := machine.Start(ctx); err != nil {
|
|
// Use a fresh context: the request ctx may already be cancelled (client
|
|
// disconnect), but we still need the PID so cleanupRuntime can kill the
|
|
// Firecracker process that was spawned before the failure.
|
|
live.PID = s.net.resolveFirecrackerPID(context.Background(), machine, apiSock)
|
|
s.setVMHandles(vm, live)
|
|
return cleanupOnErr(err)
|
|
}
|
|
live.PID = s.net.resolveFirecrackerPID(context.Background(), machine, apiSock)
|
|
s.setVMHandles(vm, live)
|
|
op.debugStage("firecracker_started", "pid", live.PID)
|
|
op.stage("socket_access", "api_socket", apiSock)
|
|
if err := s.net.ensureSocketAccess(ctx, apiSock, "firecracker api socket"); err != nil {
|
|
return cleanupOnErr(err)
|
|
}
|
|
op.stage("vsock_access", "vsock_path", vm.Runtime.VSockPath, "vsock_cid", vm.Runtime.VSockCID)
|
|
if err := s.net.ensureSocketAccess(ctx, vm.Runtime.VSockPath, "firecracker vsock socket"); err != nil {
|
|
return cleanupOnErr(err)
|
|
}
|
|
vmCreateStage(ctx, "wait_vsock_agent", "waiting for guest vsock agent")
|
|
if err := s.net.waitForGuestVSockAgent(ctx, vm.Runtime.VSockPath, vsockReadyWait); err != nil {
|
|
return cleanupOnErr(err)
|
|
}
|
|
op.stage("post_start_features")
|
|
vmCreateStage(ctx, "wait_guest_ready", "waiting for guest services")
|
|
if err := s.capHooks.postStart(ctx, vm, image); err != nil {
|
|
return cleanupOnErr(err)
|
|
}
|
|
system.TouchNow(&vm)
|
|
op.stage("persist")
|
|
vmCreateStage(ctx, "finalize", "saving vm state")
|
|
if err := s.store.UpsertVM(ctx, vm); err != nil {
|
|
return cleanupOnErr(err)
|
|
}
|
|
return vm, nil
|
|
}
|
|
|
|
func (s *VMService) StopVM(ctx context.Context, idOrName string) (model.VMRecord, error) {
|
|
return s.withVMLockByRef(ctx, idOrName, func(vm model.VMRecord) (model.VMRecord, error) {
|
|
return s.stopVMLocked(ctx, vm)
|
|
})
|
|
}
|
|
|
|
func (s *VMService) stopVMLocked(ctx context.Context, current model.VMRecord) (vm model.VMRecord, err error) {
|
|
vm = current
|
|
op := s.beginOperation("vm.stop", "vm_ref", vm.ID)
|
|
defer func() {
|
|
if err != nil {
|
|
op.fail(err, vmLogAttrs(vm)...)
|
|
return
|
|
}
|
|
op.done(vmLogAttrs(vm)...)
|
|
}()
|
|
if !s.vmAlive(vm) {
|
|
op.stage("cleanup_stale_runtime")
|
|
if err := s.cleanupRuntime(ctx, vm, true); err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
vm.State = model.VMStateStopped
|
|
vm.Runtime.State = model.VMStateStopped
|
|
vm.Runtime.TapDevice = ""
|
|
s.clearVMHandles(vm)
|
|
if err := s.store.UpsertVM(ctx, vm); err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
return vm, nil
|
|
}
|
|
pid := s.vmHandles(vm.ID).PID
|
|
op.stage("graceful_shutdown")
|
|
if err := s.net.sendCtrlAltDel(ctx, vm.Runtime.APISockPath); err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
op.stage("wait_for_exit", "pid", pid)
|
|
if err := s.net.waitForExit(ctx, pid, vm.Runtime.APISockPath, gracefulShutdownWait); err != nil {
|
|
if !errors.Is(err, errWaitForExitTimeout) {
|
|
return model.VMRecord{}, err
|
|
}
|
|
op.stage("graceful_shutdown_timeout", "pid", pid)
|
|
}
|
|
op.stage("cleanup_runtime")
|
|
if err := s.cleanupRuntime(ctx, vm, true); err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
vm.State = model.VMStateStopped
|
|
vm.Runtime.State = model.VMStateStopped
|
|
vm.Runtime.TapDevice = ""
|
|
s.clearVMHandles(vm)
|
|
system.TouchNow(&vm)
|
|
if err := s.store.UpsertVM(ctx, vm); err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
return vm, nil
|
|
}
|
|
|
|
func (s *VMService) KillVM(ctx context.Context, params api.VMKillParams) (model.VMRecord, error) {
|
|
return s.withVMLockByRef(ctx, params.IDOrName, func(vm model.VMRecord) (model.VMRecord, error) {
|
|
return s.killVMLocked(ctx, vm, params.Signal)
|
|
})
|
|
}
|
|
|
|
func (s *VMService) killVMLocked(ctx context.Context, current model.VMRecord, signalValue string) (vm model.VMRecord, err error) {
|
|
vm = current
|
|
op := s.beginOperation("vm.kill", "vm_ref", vm.ID, "signal", signalValue)
|
|
defer func() {
|
|
if err != nil {
|
|
op.fail(err, vmLogAttrs(vm)...)
|
|
return
|
|
}
|
|
op.done(vmLogAttrs(vm)...)
|
|
}()
|
|
if !s.vmAlive(vm) {
|
|
op.stage("cleanup_stale_runtime")
|
|
if err := s.cleanupRuntime(ctx, vm, true); err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
vm.State = model.VMStateStopped
|
|
vm.Runtime.State = model.VMStateStopped
|
|
vm.Runtime.TapDevice = ""
|
|
s.clearVMHandles(vm)
|
|
if err := s.store.UpsertVM(ctx, vm); err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
return vm, nil
|
|
}
|
|
|
|
signal := strings.TrimSpace(signalValue)
|
|
if signal == "" {
|
|
signal = "TERM"
|
|
}
|
|
pid := s.vmHandles(vm.ID).PID
|
|
op.stage("send_signal", "pid", pid, "signal", signal)
|
|
if _, err := s.runner.RunSudo(ctx, "kill", "-"+signal, strconv.Itoa(pid)); err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
op.stage("wait_for_exit", "pid", pid)
|
|
if err := s.net.waitForExit(ctx, pid, vm.Runtime.APISockPath, 30*time.Second); err != nil {
|
|
if !errors.Is(err, errWaitForExitTimeout) {
|
|
return model.VMRecord{}, err
|
|
}
|
|
op.stage("signal_timeout", "pid", pid, "signal", signal)
|
|
}
|
|
op.stage("cleanup_runtime")
|
|
if err := s.cleanupRuntime(ctx, vm, true); err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
vm.State = model.VMStateStopped
|
|
vm.Runtime.State = model.VMStateStopped
|
|
vm.Runtime.TapDevice = ""
|
|
s.clearVMHandles(vm)
|
|
system.TouchNow(&vm)
|
|
if err := s.store.UpsertVM(ctx, vm); err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
return vm, nil
|
|
}
|
|
|
|
func (s *VMService) RestartVM(ctx context.Context, idOrName string) (vm model.VMRecord, err error) {
|
|
op := s.beginOperation("vm.restart", "vm_ref", idOrName)
|
|
defer func() {
|
|
if err != nil {
|
|
op.fail(err, vmLogAttrs(vm)...)
|
|
return
|
|
}
|
|
op.done(vmLogAttrs(vm)...)
|
|
}()
|
|
resolved, err := s.FindVM(ctx, idOrName)
|
|
if err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
return s.withVMLockByID(ctx, resolved.ID, func(vm model.VMRecord) (model.VMRecord, error) {
|
|
op.stage("stop")
|
|
vm, err = s.stopVMLocked(ctx, vm)
|
|
if err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
image, err := s.store.GetImageByID(ctx, vm.ImageID)
|
|
if err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
op.stage("start", vmLogAttrs(vm)...)
|
|
return s.startVMLocked(ctx, vm, image)
|
|
})
|
|
}
|
|
|
|
func (s *VMService) DeleteVM(ctx context.Context, idOrName string) (model.VMRecord, error) {
|
|
return s.withVMLockByRef(ctx, idOrName, func(vm model.VMRecord) (model.VMRecord, error) {
|
|
return s.deleteVMLocked(ctx, vm)
|
|
})
|
|
}
|
|
|
|
func (s *VMService) deleteVMLocked(ctx context.Context, current model.VMRecord) (vm model.VMRecord, err error) {
|
|
vm = current
|
|
op := s.beginOperation("vm.delete", "vm_ref", vm.ID)
|
|
defer func() {
|
|
if err != nil {
|
|
op.fail(err, vmLogAttrs(vm)...)
|
|
return
|
|
}
|
|
op.done(vmLogAttrs(vm)...)
|
|
}()
|
|
if s.vmAlive(vm) {
|
|
pid := s.vmHandles(vm.ID).PID
|
|
op.stage("kill_running_vm", "pid", pid)
|
|
_ = s.net.killVMProcess(ctx, pid)
|
|
}
|
|
op.stage("cleanup_runtime")
|
|
if err := s.cleanupRuntime(ctx, vm, false); err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
op.stage("delete_store_record")
|
|
if err := s.store.DeleteVM(ctx, vm.ID); err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
if vm.Runtime.VMDir != "" {
|
|
op.stage("delete_vm_dir", "vm_dir", vm.Runtime.VMDir)
|
|
if err := os.RemoveAll(vm.Runtime.VMDir); err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
}
|
|
// Drop any host-key pins. A future VM reusing this IP or name
|
|
// would otherwise trip the TOFU mismatch branch in
|
|
// TOFUHostKeyCallback and fail to connect.
|
|
removeVMKnownHosts(s.layout.KnownHostsPath, vm, s.logger)
|
|
return vm, nil
|
|
}
|