Add regression coverage for VM failure paths

Dangerous lifecycle, store, system, and RPC paths still had little or no automated confidence, and the live smoke harness failed opaquely when guest boot timing drifted. This adds targeted unit coverage for store allocation and decode failures, system helper failure ordering and cleanup, RPC error handling, and daemon lookup/reconcile/editing/stats/preflight edge cases.

It also makes verify.sh wait for daemon-observable VM readiness before SSH, reuse a bounded boot deadline for the SSH phase, and dump VM metadata, logs, tap state, socket state, and NAT rules on timeout so host-level failures are diagnosable instead of surfacing only connection refused.

Validation: go test ./..., go test ./... -cover, bash -n verify.sh. No live ./verify.sh boot was run in this environment.
This commit is contained in:
Thales Maciel 2026-03-16 15:46:54 -03:00
parent fcedacba5c
commit 5018bc6170
No known key found for this signature in database
GPG key ID: 33112E6833C34679
5 changed files with 1229 additions and 16 deletions

394
internal/daemon/vm_test.go Normal file
View file

@ -0,0 +1,394 @@
package daemon
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"testing"
"time"
"banger/internal/api"
"banger/internal/model"
"banger/internal/store"
)
func TestFindVMPrefixResolution(t *testing.T) {
t.Parallel()
ctx := context.Background()
db := openDaemonStore(t)
d := &Daemon{store: db}
for _, vm := range []model.VMRecord{
testVM("alpha", "image-alpha", "172.16.0.2"),
testVM("alpine", "image-alpha", "172.16.0.3"),
testVM("bravo", "image-alpha", "172.16.0.4"),
} {
if err := db.UpsertVM(ctx, vm); err != nil {
t.Fatalf("UpsertVM(%s): %v", vm.Name, err)
}
}
vm, err := d.FindVM(ctx, "alpha")
if err != nil || vm.Name != "alpha" {
t.Fatalf("FindVM(alpha) = %+v, %v", vm, err)
}
vm, err = d.FindVM(ctx, "br")
if err != nil || vm.Name != "bravo" {
t.Fatalf("FindVM(br) = %+v, %v", vm, err)
}
_, err = d.FindVM(ctx, "al")
if err == nil || !strings.Contains(err.Error(), "multiple VMs match") {
t.Fatalf("FindVM(al) error = %v, want ambiguity", err)
}
_, err = d.FindVM(ctx, "missing")
if err == nil || !strings.Contains(err.Error(), `vm "missing" not found`) {
t.Fatalf("FindVM(missing) error = %v, want not-found", err)
}
}
func TestFindImagePrefixResolution(t *testing.T) {
t.Parallel()
ctx := context.Background()
db := openDaemonStore(t)
d := &Daemon{store: db}
for _, image := range []model.Image{
testImage("base"),
testImage("basic"),
testImage("docker"),
} {
if err := db.UpsertImage(ctx, image); err != nil {
t.Fatalf("UpsertImage(%s): %v", image.Name, err)
}
}
image, err := d.FindImage(ctx, "base")
if err != nil || image.Name != "base" {
t.Fatalf("FindImage(base) = %+v, %v", image, err)
}
image, err = d.FindImage(ctx, "dock")
if err != nil || image.Name != "docker" {
t.Fatalf("FindImage(dock) = %+v, %v", image, err)
}
_, err = d.FindImage(ctx, "ba")
if err == nil || !strings.Contains(err.Error(), "multiple images match") {
t.Fatalf("FindImage(ba) error = %v, want ambiguity", err)
}
_, err = d.FindImage(ctx, "missing")
if err == nil || !strings.Contains(err.Error(), `image "missing" not found`) {
t.Fatalf("FindImage(missing) error = %v, want not-found", err)
}
}
func TestReconcileStopsStaleRunningVMAndClearsRuntimeHandles(t *testing.T) {
t.Parallel()
ctx := context.Background()
db := openDaemonStore(t)
apiSock := filepath.Join(t.TempDir(), "fc.sock")
if err := os.WriteFile(apiSock, []byte{}, 0o644); err != nil {
t.Fatalf("WriteFile(api sock): %v", err)
}
vm := testVM("stale", "image-stale", "172.16.0.9")
vm.State = model.VMStateRunning
vm.Runtime.State = model.VMStateRunning
vm.Runtime.PID = 999999
vm.Runtime.APISockPath = apiSock
vm.Runtime.DMName = "fc-rootfs-stale"
vm.Runtime.DMDev = "/dev/mapper/fc-rootfs-stale"
vm.Runtime.COWLoop = "/dev/loop11"
vm.Runtime.BaseLoop = "/dev/loop10"
vm.Runtime.DNSName = ""
if err := db.UpsertVM(ctx, vm); err != nil {
t.Fatalf("UpsertVM: %v", err)
}
runner := &scriptedRunner{
t: t,
steps: []runnerStep{
sudoStep("", nil, "dmsetup", "remove", "fc-rootfs-stale"),
sudoStep("", nil, "losetup", "-d", "/dev/loop11"),
sudoStep("", nil, "losetup", "-d", "/dev/loop10"),
},
}
d := &Daemon{store: db, runner: runner}
if err := d.reconcile(ctx); err != nil {
t.Fatalf("reconcile: %v", err)
}
runner.assertExhausted()
got, err := db.GetVM(ctx, vm.ID)
if err != nil {
t.Fatalf("GetVM: %v", err)
}
if got.State != model.VMStateStopped || got.Runtime.State != model.VMStateStopped {
t.Fatalf("vm state after reconcile = %s/%s, want stopped", got.State, got.Runtime.State)
}
if got.Runtime.PID != 0 || got.Runtime.APISockPath != "" || got.Runtime.DMName != "" || got.Runtime.COWLoop != "" || got.Runtime.BaseLoop != "" {
t.Fatalf("runtime handles not cleared after reconcile: %+v", got.Runtime)
}
}
func TestSetVMRejectsStoppedOnlyChangesForRunningVM(t *testing.T) {
t.Parallel()
ctx := context.Background()
db := openDaemonStore(t)
apiSock := filepath.Join(t.TempDir(), "running.sock")
cmd := startFakeFirecrackerProcess(t, apiSock)
t.Cleanup(func() {
_ = cmd.Process.Kill()
_ = cmd.Wait()
})
vm := testVM("running", "image-run", "172.16.0.10")
vm.State = model.VMStateRunning
vm.Runtime.State = model.VMStateRunning
vm.Runtime.PID = cmd.Process.Pid
vm.Runtime.APISockPath = apiSock
if err := db.UpsertVM(ctx, vm); err != nil {
t.Fatalf("UpsertVM: %v", err)
}
d := &Daemon{store: db}
tests := []struct {
name string
params api.VMSetParams
want string
}{
{
name: "vcpu",
params: api.VMSetParams{IDOrName: vm.ID, VCPUCount: ptr(4)},
want: "vcpu changes require the VM to be stopped",
},
{
name: "memory",
params: api.VMSetParams{IDOrName: vm.ID, MemoryMiB: ptr(2048)},
want: "memory changes require the VM to be stopped",
},
{
name: "disk",
params: api.VMSetParams{IDOrName: vm.ID, WorkDiskSize: "16G"},
want: "disk changes require the VM to be stopped",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, err := d.SetVM(ctx, tt.params)
if err == nil || !strings.Contains(err.Error(), tt.want) {
t.Fatalf("SetVM(%s) error = %v, want %q", tt.name, err, tt.want)
}
})
}
}
func TestSetVMDiskResizeFailsPreflightWhenToolsMissing(t *testing.T) {
ctx := context.Background()
db := openDaemonStore(t)
workDisk := filepath.Join(t.TempDir(), "root.ext4")
if err := os.WriteFile(workDisk, []byte("disk"), 0o644); err != nil {
t.Fatalf("WriteFile: %v", err)
}
vm := testVM("resize", "image-resize", "172.16.0.11")
vm.Runtime.WorkDiskPath = workDisk
vm.Spec.WorkDiskSizeBytes = 8 * 1024 * 1024 * 1024
if err := db.UpsertVM(ctx, vm); err != nil {
t.Fatalf("UpsertVM: %v", err)
}
t.Setenv("PATH", t.TempDir())
d := &Daemon{store: db}
_, err := d.SetVM(ctx, api.VMSetParams{IDOrName: vm.ID, WorkDiskSize: "16G"})
if err == nil || !strings.Contains(err.Error(), "work disk resize preflight failed") {
t.Fatalf("SetVM() error = %v, want preflight failure", err)
}
}
func TestCollectStatsIgnoresMalformedMetricsFile(t *testing.T) {
t.Parallel()
overlay := filepath.Join(t.TempDir(), "system.cow")
workDisk := filepath.Join(t.TempDir(), "root.ext4")
metrics := filepath.Join(t.TempDir(), "metrics.json")
for _, path := range []string{overlay, workDisk} {
if err := os.WriteFile(path, []byte("allocated"), 0o644); err != nil {
t.Fatalf("WriteFile(%s): %v", path, err)
}
}
if err := os.WriteFile(metrics, []byte("{not-json}\n"), 0o644); err != nil {
t.Fatalf("WriteFile(metrics): %v", err)
}
d := &Daemon{}
stats, err := d.collectStats(context.Background(), model.VMRecord{
Runtime: model.VMRuntime{
SystemOverlay: overlay,
WorkDiskPath: workDisk,
MetricsPath: metrics,
},
})
if err != nil {
t.Fatalf("collectStats: %v", err)
}
if stats.MetricsRaw != nil {
t.Fatalf("MetricsRaw = %v, want nil for malformed metrics", stats.MetricsRaw)
}
if stats.SystemOverlayBytes == 0 || stats.WorkDiskBytes == 0 {
t.Fatalf("allocated bytes not captured: %+v", stats)
}
}
func TestValidateStartPrereqsReportsNATUplinkFailure(t *testing.T) {
ctx := context.Background()
binDir := t.TempDir()
for _, name := range []string{
"sudo", "ip", "dmsetup", "losetup", "blockdev", "truncate", "pgrep", "ps",
"chown", "chmod", "kill", "e2cp", "e2rm", "debugfs", "mkfs.ext4", "mount",
"umount", "cp", "iptables", "sysctl", "mapdns",
} {
writeFakeExecutable(t, filepath.Join(binDir, name))
}
t.Setenv("PATH", binDir)
firecrackerBin := filepath.Join(t.TempDir(), "firecracker")
rootfsPath := filepath.Join(t.TempDir(), "rootfs.ext4")
kernelPath := filepath.Join(t.TempDir(), "vmlinux")
for _, path := range []string{firecrackerBin, rootfsPath, kernelPath} {
writeFakeExecutable(t, path)
}
runner := &scriptedRunner{
t: t,
steps: []runnerStep{
{call: runnerCall{name: "ip", args: []string{"route", "show", "default"}}, out: []byte("10.0.0.0/24 dev br-fc\n")},
},
}
d := &Daemon{
runner: runner,
config: model.DaemonConfig{
FirecrackerBin: firecrackerBin,
MapDNSBin: "mapdns",
},
}
vm := testVM("nat", "image-nat", "172.16.0.12")
vm.Spec.NATEnabled = true
vm.Runtime.WorkDiskPath = filepath.Join(t.TempDir(), "missing-root.ext4")
image := testImage("image-nat")
image.RootfsPath = rootfsPath
image.KernelPath = kernelPath
err := d.validateStartPrereqs(ctx, vm, image)
if err == nil || !strings.Contains(err.Error(), "uplink interface for NAT") {
t.Fatalf("validateStartPrereqs() error = %v, want NAT uplink failure", err)
}
runner.assertExhausted()
}
func openDaemonStore(t *testing.T) *store.Store {
t.Helper()
db, err := store.Open(filepath.Join(t.TempDir(), "state.db"))
if err != nil {
t.Fatalf("store.Open: %v", err)
}
t.Cleanup(func() {
_ = db.Close()
})
return db
}
func testVM(name, imageID, guestIP string) model.VMRecord {
now := time.Date(2026, time.March, 16, 12, 0, 0, 0, time.UTC)
return model.VMRecord{
ID: name + "-id",
Name: name,
ImageID: imageID,
State: model.VMStateStopped,
CreatedAt: now,
UpdatedAt: now,
LastTouchedAt: now,
Spec: model.VMSpec{
VCPUCount: 2,
MemoryMiB: 1024,
SystemOverlaySizeByte: model.DefaultSystemOverlaySize,
WorkDiskSizeBytes: model.DefaultWorkDiskSize,
},
Runtime: model.VMRuntime{
State: model.VMStateStopped,
GuestIP: guestIP,
DNSName: name + ".vm",
VMDir: filepath.Join("/state", name),
SystemOverlay: filepath.Join("/state", name, "system.cow"),
WorkDiskPath: filepath.Join("/state", name, "root.ext4"),
},
}
}
func testImage(name string) model.Image {
now := time.Date(2026, time.March, 16, 12, 0, 0, 0, time.UTC)
return model.Image{
ID: name + "-id",
Name: name,
RootfsPath: filepath.Join("/images", name+".ext4"),
KernelPath: filepath.Join("/kernels", name),
CreatedAt: now,
UpdatedAt: now,
}
}
func startFakeFirecrackerProcess(t *testing.T, apiSock string) *exec.Cmd {
t.Helper()
cmd := exec.Command("bash", "-lc", fmt.Sprintf("exec -a %q sleep 30", "firecracker --api-sock "+apiSock))
if err := cmd.Start(); err != nil {
t.Fatalf("start fake firecracker: %v", err)
}
deadline := time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
if cmd.Process != nil && cmd.Process.Pid > 0 && systemProcessRunning(cmd.Process.Pid, apiSock) {
return cmd
}
time.Sleep(20 * time.Millisecond)
}
_ = cmd.Process.Kill()
_ = cmd.Wait()
t.Fatalf("fake firecracker process never looked running for %s", apiSock)
return nil
}
func systemProcessRunning(pid int, apiSock string) bool {
data, err := os.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "cmdline"))
if err != nil {
return false
}
cmdline := strings.ReplaceAll(string(data), "\x00", " ")
return strings.Contains(cmdline, "firecracker") && strings.Contains(cmdline, apiSock)
}
func writeFakeExecutable(t *testing.T, path string) {
t.Helper()
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
t.Fatalf("MkdirAll(%s): %v", filepath.Dir(path), err)
}
if err := os.WriteFile(path, []byte("#!/bin/sh\nexit 0\n"), 0o755); err != nil {
t.Fatalf("WriteFile(%s): %v", path, err)
}
}
func ptr[T any](value T) *T {
return &value
}