test: end-to-end VMService lifecycle flow harness
Uses the newTestDaemon harness added earlier in this series to
drive VMService.CreateVM → FindVM → (duplicate CreateVM reject) →
DeleteVM through the real code path. Pins contracts the
single-responsibility tests can't:
- image resolution finds a locally-upserted row
- reserveVM allocates an IP, mkdirs VMDir, persists the Stopped
row
- FindVM round-trips the record
- duplicate-name CreateVM fails without persisting a second row
- DeleteVM runs cleanupRuntime with zero handles (no sudo calls
needed), removes the store row, removes the VMDir
Plus TestVMCreateWithUnknownImageFails for the error branch: if
CreateVM can't resolve an image, it must error before mutating
any state.
Scope: everything except firecracker boot. NoStart: true skips
machine.Start, which is the upstream SDK boundary we can't cross
without a real firecracker binary. The integration we GET
exercises name/IP reservation, per-VM lock lifecycle, store
round-trip, VMDir lifecycle, and the never-started delete path —
all of which were only indirectly covered by unit tests before.
-race clean across the two tests; nothing touches goroutines but
the harness does initialize the tap pool background goroutine on
wireServices, which the race detector validated.
Coverage is flat at the global level (37.8% → 37.8%) because
this slice tests integration of already-covered units, not new
branches. That's expected — the slice's value is as a regression
bedrock for future refactors, not a line-count bump.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
7820960706
commit
e2ac70631b
1 changed files with 143 additions and 0 deletions
143
internal/daemon/lifecycle_flow_test.go
Normal file
143
internal/daemon/lifecycle_flow_test.go
Normal file
|
|
@ -0,0 +1,143 @@
|
|||
package daemon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"banger/internal/api"
|
||||
"banger/internal/model"
|
||||
)
|
||||
|
||||
// TestVMCreateNoStartDeleteFlow is the end-to-end lifecycle harness
|
||||
// test: one test that drives VMService.CreateVM → VMService.DeleteVM
|
||||
// through the real code path, using newTestDaemon to stand up
|
||||
// infrastructure. If a future refactor breaks store persistence,
|
||||
// VM dir creation, or delete-side cleanup for a never-booted VM,
|
||||
// this test fails.
|
||||
//
|
||||
// Scope: everything except the firecracker boot step. CreateVM is
|
||||
// called with NoStart: true so we skip machine.Start (the upstream
|
||||
// SDK boundary we can't cross without a real firecracker binary +
|
||||
// KVM). The flow still exercises image resolution, name/IP
|
||||
// reservation, VMDir creation, store round-trip, per-VM lock
|
||||
// lifecycle, handle cache, and the delete-side cleanupRuntime path
|
||||
// that runs against a never-started VM.
|
||||
//
|
||||
// This is the bar for "can we catch a full-lifecycle regression
|
||||
// without real KVM?" — subsequent harness tests can exercise
|
||||
// individual error branches (delete while running, create with
|
||||
// duplicate name, etc.) against the same fixture.
|
||||
func TestVMCreateNoStartDeleteFlow(t *testing.T) {
|
||||
d := newTestDaemon(t)
|
||||
ctx := context.Background()
|
||||
|
||||
// Pre-seed an image record so findOrAutoPullImage finds it
|
||||
// locally and doesn't try to hit the embedded catalog.
|
||||
image := testImage("flow-img")
|
||||
if err := d.store.UpsertImage(ctx, image); err != nil {
|
||||
t.Fatalf("UpsertImage: %v", err)
|
||||
}
|
||||
|
||||
// CreateVM with NoStart → reserves name + IP, mkdirs VMDir,
|
||||
// persists row in state Stopped. Returns the persisted record.
|
||||
created, err := d.vm.CreateVM(ctx, api.VMCreateParams{
|
||||
Name: "flow-vm",
|
||||
ImageName: image.Name,
|
||||
NoStart: true,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("CreateVM: %v", err)
|
||||
}
|
||||
|
||||
if created.Name != "flow-vm" {
|
||||
t.Fatalf("created.Name = %q, want flow-vm", created.Name)
|
||||
}
|
||||
if created.ImageID != image.ID {
|
||||
t.Fatalf("created.ImageID = %q, want %q", created.ImageID, image.ID)
|
||||
}
|
||||
if created.State != model.VMStateStopped || created.Runtime.State != model.VMStateStopped {
|
||||
t.Fatalf("created states = (%q, %q), want both stopped", created.State, created.Runtime.State)
|
||||
}
|
||||
if created.Runtime.GuestIP == "" {
|
||||
t.Fatal("created.Runtime.GuestIP empty — reservation didn't allocate an IP")
|
||||
}
|
||||
if created.Runtime.VMDir == "" {
|
||||
t.Fatal("created.Runtime.VMDir empty — reservation didn't pick a per-VM dir")
|
||||
}
|
||||
|
||||
// VMDir must exist on disk — reserveVM creates it during the
|
||||
// reservation window so subsequent lifecycle steps can drop
|
||||
// handles.json, firecracker.log, etc. inside.
|
||||
info, err := os.Stat(created.Runtime.VMDir)
|
||||
if err != nil {
|
||||
t.Fatalf("VMDir missing after CreateVM: %v", err)
|
||||
}
|
||||
if !info.IsDir() {
|
||||
t.Fatalf("VMDir %q is not a directory", created.Runtime.VMDir)
|
||||
}
|
||||
|
||||
// Store round-trip: FindVM must return the same record.
|
||||
found, err := d.vm.FindVM(ctx, created.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("FindVM: %v", err)
|
||||
}
|
||||
if found.ID != created.ID || found.Name != created.Name {
|
||||
t.Fatalf("FindVM mismatch: got %+v, created %+v", found, created)
|
||||
}
|
||||
|
||||
// Duplicate-name rejection: a second CreateVM with the same
|
||||
// name must fail with a useful error, not persist a second row.
|
||||
if _, err := d.vm.CreateVM(ctx, api.VMCreateParams{
|
||||
Name: "flow-vm",
|
||||
ImageName: image.Name,
|
||||
NoStart: true,
|
||||
}); err == nil {
|
||||
t.Fatal("second CreateVM with duplicate name succeeded; reserveVM's exact-name check didn't fire")
|
||||
}
|
||||
|
||||
// DeleteVM against a never-started VM: takes the per-VM lock,
|
||||
// calls cleanupRuntime (no-op on zero handles), removes the
|
||||
// store row and the VMDir. Because vmCaps is empty in the
|
||||
// harness default, capability Cleanup hooks don't fire real
|
||||
// side effects.
|
||||
deleted, err := d.vm.DeleteVM(ctx, created.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("DeleteVM: %v", err)
|
||||
}
|
||||
if deleted.ID != created.ID {
|
||||
t.Fatalf("DeleteVM returned %+v, want ID %q", deleted, created.ID)
|
||||
}
|
||||
|
||||
// After delete: store has no record.
|
||||
if _, err := d.vm.FindVM(ctx, created.ID); err == nil {
|
||||
t.Fatal("FindVM succeeded after DeleteVM — store row wasn't removed")
|
||||
}
|
||||
|
||||
// VMDir is gone.
|
||||
if _, err := os.Stat(created.Runtime.VMDir); !errors.Is(err, os.ErrNotExist) {
|
||||
t.Fatalf("VMDir %q still present after DeleteVM (stat err = %v)", created.Runtime.VMDir, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestVMCreateWithUnknownImageFails pins the error branch when the
|
||||
// requested image isn't local and isn't in the embedded catalog.
|
||||
// The failure must come before any state mutation — in particular,
|
||||
// no VM row should linger.
|
||||
func TestVMCreateWithUnknownImageFails(t *testing.T) {
|
||||
d := newTestDaemon(t)
|
||||
ctx := context.Background()
|
||||
|
||||
if _, err := d.vm.CreateVM(ctx, api.VMCreateParams{
|
||||
Name: "ghostly",
|
||||
ImageName: "nothing-called-this-image",
|
||||
NoStart: true,
|
||||
}); err == nil {
|
||||
t.Fatal("CreateVM: want error for unknown image, got nil")
|
||||
}
|
||||
|
||||
if _, err := d.vm.FindVM(ctx, "ghostly"); err == nil {
|
||||
t.Fatal("FindVM found a record for a VM that should never have been persisted")
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue