banger/internal/daemon/vm_lifecycle.go
Thales Maciel 05439d2325
daemon: cut vm stop latency
Three changes to stopVMLocked, biggest win first:

- Skip waitForExit on the SSH-success path. sync inside the guest
  already flushed root.ext4, so cleanupRuntime's SIGKILL is safe
  immediately. Saves up to gracefulShutdownWait (10s) per stop.
- Drop the SendCtrlAltDel + 10s wait fallback when SSH is
  unreachable. On Debian, ctrl+alt+del routes to reboot.target so
  FC never exits on it — the wait was pure latency.
- Shrink the SSH dial timeout 5s → 2s. A reachable guest dials in
  single-digit milliseconds; if it doesn't, fail fast and SIGKILL.

Worst-case (broken SSH) goes ~15s → ~2s + cleanup.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-05-03 17:51:22 -03:00

342 lines
11 KiB
Go

package daemon
import (
"context"
"errors"
"io"
"net"
"os"
"path/filepath"
"strings"
"time"
"banger/internal/api"
"banger/internal/guest"
"banger/internal/model"
"banger/internal/system"
)
func (s *VMService) StartVM(ctx context.Context, idOrName string) (model.VMRecord, error) {
return s.withVMLockByRef(ctx, idOrName, func(vm model.VMRecord) (model.VMRecord, error) {
image, err := s.store.GetImageByID(ctx, vm.ImageID)
if err != nil {
return model.VMRecord{}, err
}
if s.vmAlive(vm) {
if s.logger != nil {
s.logger.Info("vm already running", vmLogAttrs(vm)...)
}
return vm, nil
}
return s.startVMLocked(ctx, vm, image)
})
}
func (s *VMService) startVMLocked(ctx context.Context, vm model.VMRecord, image model.Image) (_ model.VMRecord, err error) {
op := s.beginOperation(ctx, "vm.start", append(vmLogAttrs(vm), imageLogAttrs(image)...)...)
defer func() {
if err != nil {
err = annotateLogPath(err, vm.Runtime.LogPath)
op.fail(err, vmLogAttrs(vm)...)
return
}
op.done(vmLogAttrs(vm)...)
}()
// Derive per-VM paths/names up front so every step sees the same
// values. Shortening vm.ID mirrors how the pre-refactor inline
// code did it.
shortID := system.ShortID(vm.ID)
apiSock := filepath.Join(s.layout.RuntimeDir, "fc-"+shortID+".sock")
dmName := "fc-rootfs-" + shortID
tapName := "tap-fc-" + shortID
if strings.TrimSpace(vm.Runtime.VSockPath) == "" {
vm.Runtime.VSockPath = defaultVSockPath(s.layout.RuntimeDir, vm.ID)
}
if vm.Runtime.VSockCID == 0 {
vm.Runtime.VSockCID, err = defaultVSockCID(vm.Runtime.GuestIP)
if err != nil {
return model.VMRecord{}, err
}
}
live := model.VMHandles{}
sc := &startContext{
vm: &vm,
image: image,
live: &live,
apiSock: apiSock,
dmName: dmName,
tapName: tapName,
}
if runErr := s.runStartSteps(ctx, op, sc, s.buildStartSteps(op, sc)); runErr != nil {
// The step driver already ran rollback in reverse for every
// succeeded step. All that's left is to persist the ERROR
// state so operators see the failure via `vm show`. Use a
// fresh context in case the request ctx is cancelled — DB
// writes past this point are recovery, not user-driven.
//
// The store check is for tests that construct a bare Daemon
// without a DB; production always has s.store non-nil.
vm.State = model.VMStateError
vm.Runtime.State = model.VMStateError
vm.Runtime.LastError = runErr.Error()
clearRuntimeTeardownState(&vm)
s.clearVMHandles(vm)
if s.store != nil {
// We're in the recovery path: the start has already
// failed, and the user will see runErr. A persist
// failure here only affects what 'banger vm show'
// reads on the next call, so we keep returning runErr
// — but a silent swallow leaves operators unable to
// debug "why does the record still say running?". Log
// at warn instead.
if persistErr := s.store.UpsertVM(context.Background(), vm); persistErr != nil && s.logger != nil {
s.logger.Warn("persist vm error state failed", append(vmLogAttrs(vm), "error", persistErr.Error())...)
}
}
return model.VMRecord{}, runErr
}
return vm, nil
}
func (s *VMService) StopVM(ctx context.Context, idOrName string) (model.VMRecord, error) {
return s.withVMLockByRef(ctx, idOrName, func(vm model.VMRecord) (model.VMRecord, error) {
return s.stopVMLocked(ctx, vm)
})
}
func (s *VMService) stopVMLocked(ctx context.Context, current model.VMRecord) (vm model.VMRecord, err error) {
vm = current
op := s.beginOperation(ctx, "vm.stop", "vm_ref", vm.ID)
defer func() {
if err != nil {
op.fail(err, vmLogAttrs(vm)...)
return
}
op.done(vmLogAttrs(vm)...)
}()
if !s.vmAlive(vm) {
op.stage("cleanup_stale_runtime")
if err := s.cleanupRuntime(ctx, vm, true); err != nil {
return model.VMRecord{}, err
}
vm.State = model.VMStateStopped
vm.Runtime.State = model.VMStateStopped
clearRuntimeTeardownState(&vm)
s.clearVMHandles(vm)
if err := s.store.UpsertVM(ctx, vm); err != nil {
return model.VMRecord{}, err
}
return vm, nil
}
op.stage("graceful_shutdown")
// Reach into the guest over SSH to force a sync + queue a poweroff.
// The sync is what keeps stop() from losing data: every dirty page
// the guest hasn't flushed through virtio-blk to the work disk is
// written out before this RPC returns. Once sync completes,
// root.ext4 on the host is consistent and cleanupRuntime's SIGKILL
// is safe — there is no benefit to waiting for the guest's
// poweroff.target to finish, so we skip waitForExit entirely.
//
// When SSH is unreachable (broken sshd, network down, drifted host
// key) we drop straight to SIGKILL via cleanupRuntime. The
// previous fallback was SendCtrlAltDel + a 10-second wait for FC
// to exit, but on Debian ctrl+alt+del routes to reboot.target, so
// FC never exits on it — the wait was always a wasted 10s. We pay
// the data-loss cost we already paid before (after the timeout
// expired the old code SIGKILLed too), but without the latency.
if err := s.requestGuestPoweroff(ctx, vm); err != nil {
if s.logger != nil {
s.logger.Warn("guest ssh poweroff failed; SIGKILL without sync",
append(vmLogAttrs(vm), "error", err.Error())...)
}
}
op.stage("cleanup_runtime")
if err := s.cleanupRuntime(ctx, vm, true); err != nil {
return model.VMRecord{}, err
}
vm.State = model.VMStateStopped
vm.Runtime.State = model.VMStateStopped
clearRuntimeTeardownState(&vm)
s.clearVMHandles(vm)
system.TouchNow(&vm)
if err := s.store.UpsertVM(ctx, vm); err != nil {
return model.VMRecord{}, err
}
return vm, nil
}
// requestGuestPoweroff dials the guest over SSH and runs a sync +
// queues a poweroff job. The sync is the load-bearing piece — see the
// comment in stopVMLocked. Returns the dial / SSH error if the guest
// is unreachable; the caller treats that as a fallback signal.
//
// Bounded by a hard 2-second SSH-dial timeout. A reachable guest on
// the host bridge dials in single-digit milliseconds; if we haven't
// connected in 2s the guest is effectively gone, so we fail fast and
// let the caller SIGKILL rather than burning latency on a doomed dial.
func (s *VMService) requestGuestPoweroff(ctx context.Context, vm model.VMRecord) error {
guestIP := strings.TrimSpace(vm.Runtime.GuestIP)
if guestIP == "" {
return errors.New("guest IP unknown")
}
dialCtx, cancel := context.WithTimeout(ctx, 2*time.Second)
defer cancel()
address := net.JoinHostPort(guestIP, "22")
client, err := guest.Dial(dialCtx, address, s.config.SSHKeyPath, s.layout.KnownHostsPath)
if err != nil {
return err
}
defer client.Close()
// `sync` runs synchronously and blocks RunScript until every dirty
// page hits virtio-blk → root.ext4. That's the persistence
// guarantee. The `systemctl --no-block poweroff` queues a job and
// returns; whether poweroff.target completes before the SIGKILL
// fallback fires is incidental — by then sync has already done
// its work. The `|| /sbin/poweroff -f` is the last-ditch fallback
// when systemd itself is wedged.
const script = "sync; systemctl --no-block poweroff || /sbin/poweroff -f &"
return client.RunScript(ctx, script, io.Discard)
}
func (s *VMService) KillVM(ctx context.Context, params api.VMKillParams) (model.VMRecord, error) {
return s.withVMLockByRef(ctx, params.IDOrName, func(vm model.VMRecord) (model.VMRecord, error) {
return s.killVMLocked(ctx, vm, params.Signal)
})
}
func (s *VMService) killVMLocked(ctx context.Context, current model.VMRecord, signalValue string) (vm model.VMRecord, err error) {
vm = current
op := s.beginOperation(ctx, "vm.kill", "vm_ref", vm.ID, "signal", signalValue)
defer func() {
if err != nil {
op.fail(err, vmLogAttrs(vm)...)
return
}
op.done(vmLogAttrs(vm)...)
}()
if !s.vmAlive(vm) {
op.stage("cleanup_stale_runtime")
if err := s.cleanupRuntime(ctx, vm, true); err != nil {
return model.VMRecord{}, err
}
vm.State = model.VMStateStopped
vm.Runtime.State = model.VMStateStopped
clearRuntimeTeardownState(&vm)
s.clearVMHandles(vm)
if err := s.store.UpsertVM(ctx, vm); err != nil {
return model.VMRecord{}, err
}
return vm, nil
}
signal := strings.TrimSpace(signalValue)
if signal == "" {
signal = "TERM"
}
pid := s.vmHandles(vm.ID).PID
op.stage("send_signal", "pid", pid, "signal", signal)
if err := s.privOps().SignalProcess(ctx, pid, signal); err != nil {
return model.VMRecord{}, err
}
op.stage("wait_for_exit", "pid", pid)
if err := s.net.waitForExit(ctx, pid, vm.Runtime.APISockPath, 30*time.Second); err != nil {
if !errors.Is(err, errWaitForExitTimeout) {
return model.VMRecord{}, err
}
op.stage("signal_timeout", "pid", pid, "signal", signal)
}
op.stage("cleanup_runtime")
if err := s.cleanupRuntime(ctx, vm, true); err != nil {
return model.VMRecord{}, err
}
vm.State = model.VMStateStopped
vm.Runtime.State = model.VMStateStopped
clearRuntimeTeardownState(&vm)
s.clearVMHandles(vm)
system.TouchNow(&vm)
if err := s.store.UpsertVM(ctx, vm); err != nil {
return model.VMRecord{}, err
}
return vm, nil
}
func (s *VMService) RestartVM(ctx context.Context, idOrName string) (vm model.VMRecord, err error) {
op := s.beginOperation(ctx, "vm.restart", "vm_ref", idOrName)
defer func() {
if err != nil {
op.fail(err, vmLogAttrs(vm)...)
return
}
op.done(vmLogAttrs(vm)...)
}()
resolved, err := s.FindVM(ctx, idOrName)
if err != nil {
return model.VMRecord{}, err
}
return s.withVMLockByID(ctx, resolved.ID, func(vm model.VMRecord) (model.VMRecord, error) {
op.stage("stop")
vm, err = s.stopVMLocked(ctx, vm)
if err != nil {
return model.VMRecord{}, err
}
image, err := s.store.GetImageByID(ctx, vm.ImageID)
if err != nil {
return model.VMRecord{}, err
}
op.stage("start", vmLogAttrs(vm)...)
return s.startVMLocked(ctx, vm, image)
})
}
func (s *VMService) DeleteVM(ctx context.Context, idOrName string) (model.VMRecord, error) {
return s.withVMLockByRef(ctx, idOrName, func(vm model.VMRecord) (model.VMRecord, error) {
return s.deleteVMLocked(ctx, vm)
})
}
func (s *VMService) deleteVMLocked(ctx context.Context, current model.VMRecord) (vm model.VMRecord, err error) {
vm = current
op := s.beginOperation(ctx, "vm.delete", "vm_ref", vm.ID)
defer func() {
if err != nil {
op.fail(err, vmLogAttrs(vm)...)
return
}
op.done(vmLogAttrs(vm)...)
}()
if s.vmAlive(vm) {
pid := s.vmHandles(vm.ID).PID
op.stage("kill_running_vm", "pid", pid)
// Best-effort: cleanupRuntime below tears the process down
// regardless. A kill failure here only matters when it
// surfaces something operators should see (permission
// denied, etc.), so promote it from a silent _ to a Warn
// without changing the control flow.
if killErr := s.net.killVMProcess(ctx, pid); killErr != nil && s.logger != nil {
s.logger.Warn("kill vm process during delete failed", append(vmLogAttrs(vm), "pid", pid, "error", killErr.Error())...)
}
}
op.stage("cleanup_runtime")
if err := s.cleanupRuntime(ctx, vm, false); err != nil {
return model.VMRecord{}, err
}
clearRuntimeTeardownState(&vm)
op.stage("delete_store_record")
if err := s.store.DeleteVM(ctx, vm.ID); err != nil {
return model.VMRecord{}, err
}
if vm.Runtime.VMDir != "" {
op.stage("delete_vm_dir", "vm_dir", vm.Runtime.VMDir)
if err := os.RemoveAll(vm.Runtime.VMDir); err != nil {
return model.VMRecord{}, err
}
}
// Drop any host-key pins. A future VM reusing this IP or name
// would otherwise trip the TOFU mismatch branch in
// TOFUHostKeyCallback and fail to connect.
removeVMKnownHosts(s.layout.KnownHostsPath, vm, s.logger)
return vm, nil
}