Add regression coverage for VM failure paths

Dangerous lifecycle, store, system, and RPC paths still had little or no automated confidence, and the live smoke harness failed opaquely when guest boot timing drifted. This adds targeted unit coverage for store allocation and decode failures, system helper failure ordering and cleanup, RPC error handling, and daemon lookup/reconcile/editing/stats/preflight edge cases.

It also makes verify.sh wait for daemon-observable VM readiness before SSH, reuse a bounded boot deadline for the SSH phase, and dump VM metadata, logs, tap state, socket state, and NAT rules on timeout so host-level failures are diagnosable instead of surfacing only connection refused.

Validation: go test ./..., go test ./... -cover, bash -n verify.sh. No live ./verify.sh boot was run in this environment.
This commit is contained in:
Thales Maciel 2026-03-16 15:46:54 -03:00
parent fcedacba5c
commit 5018bc6170
No known key found for this signature in database
GPG key ID: 33112E6833C34679
5 changed files with 1229 additions and 16 deletions

View file

@ -0,0 +1,276 @@
package store
import (
"context"
"database/sql"
"errors"
"path/filepath"
"reflect"
"strconv"
"strings"
"testing"
"time"
"banger/internal/model"
)
func TestStoreImageAndVMRoundTrip(t *testing.T) {
t.Parallel()
ctx := context.Background()
store := openTestStore(t)
image := sampleImage("image-one")
if err := store.UpsertImage(ctx, image); err != nil {
t.Fatalf("UpsertImage: %v", err)
}
vm := sampleVM("vm-one", image.ID, "172.16.0.8")
if err := store.UpsertVM(ctx, vm); err != nil {
t.Fatalf("UpsertVM: %v", err)
}
gotImage, err := store.GetImageByName(ctx, image.Name)
if err != nil {
t.Fatalf("GetImageByName: %v", err)
}
if !reflect.DeepEqual(gotImage, image) {
t.Fatalf("GetImageByName = %+v, want %+v", gotImage, image)
}
gotVM, err := store.GetVM(ctx, vm.Name)
if err != nil {
t.Fatalf("GetVM: %v", err)
}
if !reflect.DeepEqual(gotVM, vm) {
t.Fatalf("GetVM = %+v, want %+v", gotVM, vm)
}
images, err := store.ListImages(ctx)
if err != nil {
t.Fatalf("ListImages: %v", err)
}
if len(images) != 1 || !reflect.DeepEqual(images[0], image) {
t.Fatalf("ListImages = %+v, want [%+v]", images, image)
}
vms, err := store.ListVMs(ctx)
if err != nil {
t.Fatalf("ListVMs: %v", err)
}
if len(vms) != 1 || !reflect.DeepEqual(vms[0], vm) {
t.Fatalf("ListVMs = %+v, want [%+v]", vms, vm)
}
users, err := store.FindVMsUsingImage(ctx, image.ID)
if err != nil {
t.Fatalf("FindVMsUsingImage: %v", err)
}
if len(users) != 1 || users[0].ID != vm.ID {
t.Fatalf("FindVMsUsingImage = %+v, want vm %s", users, vm.ID)
}
if err := store.DeleteVM(ctx, vm.ID); err != nil {
t.Fatalf("DeleteVM: %v", err)
}
if _, err := store.GetVM(ctx, vm.ID); !errors.Is(err, sql.ErrNoRows) {
t.Fatalf("GetVM after delete error = %v, want sql.ErrNoRows", err)
}
if err := store.DeleteImage(ctx, image.ID); err != nil {
t.Fatalf("DeleteImage: %v", err)
}
if _, err := store.GetImageByID(ctx, image.ID); !errors.Is(err, sql.ErrNoRows) {
t.Fatalf("GetImageByID after delete error = %v, want sql.ErrNoRows", err)
}
}
func TestNextGuestIPSkipsAllocatedAddresses(t *testing.T) {
t.Parallel()
ctx := context.Background()
store := openTestStore(t)
image := sampleImage("image-next-ip")
if err := store.UpsertImage(ctx, image); err != nil {
t.Fatalf("UpsertImage: %v", err)
}
for i, ip := range []string{"172.16.0.2", "172.16.0.3", "172.16.0.5"} {
vm := sampleVM("vm-next-"+strconv.Itoa(i), image.ID, ip)
if err := store.UpsertVM(ctx, vm); err != nil {
t.Fatalf("UpsertVM(%s): %v", ip, err)
}
}
got, err := store.NextGuestIP(ctx, "172.16.0")
if err != nil {
t.Fatalf("NextGuestIP: %v", err)
}
if got != "172.16.0.4" {
t.Fatalf("NextGuestIP = %q, want 172.16.0.4", got)
}
}
func TestNextGuestIPReturnsErrorWhenRangeExhausted(t *testing.T) {
t.Parallel()
ctx := context.Background()
store := openTestStore(t)
image := sampleImage("image-full")
if err := store.UpsertImage(ctx, image); err != nil {
t.Fatalf("UpsertImage: %v", err)
}
for i := 2; i < 255; i++ {
vm := sampleVM("vm-"+strconv.Itoa(i), image.ID, "172.16.0."+strconv.Itoa(i))
if err := store.UpsertVM(ctx, vm); err != nil {
t.Fatalf("UpsertVM(%d): %v", i, err)
}
}
_, err := store.NextGuestIP(ctx, "172.16.0")
if err == nil || !strings.Contains(err.Error(), "no guest IPs available") {
t.Fatalf("NextGuestIP() error = %v, want exhaustion error", err)
}
}
func TestGetVMRejectsMalformedRuntimeJSON(t *testing.T) {
t.Parallel()
ctx := context.Background()
store := openTestStore(t)
now := fixedTime()
_, err := store.db.ExecContext(ctx, `
INSERT INTO vms (
id, name, image_id, guest_ip, state, created_at, updated_at, last_touched_at,
spec_json, runtime_json, stats_json
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"vm-malformed-runtime",
"vm-malformed-runtime",
"image-id",
"172.16.0.8",
string(model.VMStateCreated),
now.Format(time.RFC3339),
now.Format(time.RFC3339),
now.Format(time.RFC3339),
`{"vcpu_count":2}`,
`{"guest_ip":`,
`{}`,
)
if err != nil {
t.Fatalf("insert malformed vm: %v", err)
}
_, err = store.GetVM(ctx, "vm-malformed-runtime")
if err == nil || !strings.Contains(err.Error(), "unexpected end of JSON input") {
t.Fatalf("GetVM() error = %v, want runtime JSON failure", err)
}
}
func TestGetImageRejectsMalformedTimestamp(t *testing.T) {
t.Parallel()
ctx := context.Background()
store := openTestStore(t)
_, err := store.db.ExecContext(ctx, `
INSERT INTO images (
id, name, managed, artifact_dir, rootfs_path, kernel_path, initrd_path,
modules_dir, packages_path, build_size, docker, created_at, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"image-bad-time",
"image-bad-time",
0,
"",
"/rootfs.ext4",
"/vmlinux",
"",
"",
"",
"",
0,
"not-a-time",
"not-a-time",
)
if err != nil {
t.Fatalf("insert malformed image: %v", err)
}
_, err = store.GetImageByName(ctx, "image-bad-time")
if err == nil || !strings.Contains(err.Error(), "cannot parse") {
t.Fatalf("GetImageByName() error = %v, want timestamp parse failure", err)
}
}
func openTestStore(t *testing.T) *Store {
t.Helper()
store, err := Open(filepath.Join(t.TempDir(), "state.db"))
if err != nil {
t.Fatalf("Open: %v", err)
}
t.Cleanup(func() {
_ = store.Close()
})
return store
}
func sampleImage(name string) model.Image {
now := fixedTime()
return model.Image{
ID: name + "-id",
Name: name,
Managed: true,
ArtifactDir: "/artifacts/" + name,
RootfsPath: "/images/" + name + ".ext4",
KernelPath: "/kernels/" + name,
InitrdPath: "/initrd/" + name,
ModulesDir: "/modules/" + name,
PackagesPath: "/packages/" + name + ".apt",
BuildSize: "8G",
Docker: true,
CreatedAt: now,
UpdatedAt: now,
}
}
func sampleVM(name, imageID, guestIP string) model.VMRecord {
now := fixedTime()
return model.VMRecord{
ID: name + "-id",
Name: name,
ImageID: imageID,
State: model.VMStateStopped,
CreatedAt: now,
UpdatedAt: now,
LastTouchedAt: now,
Spec: model.VMSpec{
VCPUCount: 2,
MemoryMiB: 1024,
SystemOverlaySizeByte: 8 * 1024 * 1024 * 1024,
WorkDiskSizeBytes: 8 * 1024 * 1024 * 1024,
NATEnabled: true,
},
Runtime: model.VMRuntime{
State: model.VMStateStopped,
GuestIP: guestIP,
TapDevice: "tap-" + name,
APISockPath: "/tmp/" + name + ".sock",
LogPath: "/tmp/" + name + ".log",
MetricsPath: "/tmp/" + name + ".metrics",
DNSName: name + ".vm",
VMDir: "/state/" + name,
SystemOverlay: "/state/" + name + "/system.cow",
WorkDiskPath: "/state/" + name + "/root.ext4",
},
Stats: model.VMStats{
CPUPercent: 1.25,
RSSBytes: 1024,
VSZBytes: 2048,
SystemOverlayBytes: 4096,
WorkDiskBytes: 8192,
MetricsRaw: map[string]any{"uptime": 12.0},
CollectedAt: now,
},
}
}
func fixedTime() time.Time {
return time.Date(2026, time.March, 16, 12, 0, 0, 0, time.UTC)
}