Stop relying on ad hoc rootfs handling by adding image promotion, managed work-seed fingerprint metadata, and lazy self-healing for older managed images after the first create. Rebuild guest images with baked SSH access, a guest NIC bootstrap, and default opencode services, and add the staged Void kernel/initramfs/modules workflow so void-exp uses a matching Void boot stack. Replace the opaque blocking vm.create RPC with a begin/status flow that prints live stages in the CLI while still waiting for vsock health and opencode on guest port 4096. Validate with GOCACHE=/tmp/banger-gocache go test ./... and live void-exp create/delete smoke runs.
157 lines
4.9 KiB
Go
157 lines
4.9 KiB
Go
package daemon
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"reflect"
|
|
"testing"
|
|
|
|
"banger/internal/firecracker"
|
|
"banger/internal/guestconfig"
|
|
"banger/internal/model"
|
|
"banger/internal/system"
|
|
)
|
|
|
|
type testCapability struct {
|
|
name string
|
|
prepare func(context.Context, *Daemon, *model.VMRecord, model.Image) error
|
|
cleanup func(context.Context, *Daemon, model.VMRecord) error
|
|
contribute func(*guestconfig.Builder, model.VMRecord, model.Image)
|
|
contributeFC func(*firecracker.MachineConfig, model.VMRecord, model.Image)
|
|
configChange func(context.Context, *Daemon, model.VMRecord, model.VMRecord) error
|
|
doctor func(context.Context, *Daemon, *system.Report)
|
|
startPreflight func(context.Context, *Daemon, *system.Preflight, model.VMRecord, model.Image)
|
|
}
|
|
|
|
func (c testCapability) Name() string { return c.name }
|
|
|
|
func (c testCapability) PrepareHost(ctx context.Context, d *Daemon, vm *model.VMRecord, image model.Image) error {
|
|
if c.prepare != nil {
|
|
return c.prepare(ctx, d, vm, image)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (c testCapability) Cleanup(ctx context.Context, d *Daemon, vm model.VMRecord) error {
|
|
if c.cleanup != nil {
|
|
return c.cleanup(ctx, d, vm)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (c testCapability) ContributeGuest(builder *guestconfig.Builder, vm model.VMRecord, image model.Image) {
|
|
if c.contribute != nil {
|
|
c.contribute(builder, vm, image)
|
|
}
|
|
}
|
|
|
|
func (c testCapability) ContributeMachine(cfg *firecracker.MachineConfig, vm model.VMRecord, image model.Image) {
|
|
if c.contributeFC != nil {
|
|
c.contributeFC(cfg, vm, image)
|
|
}
|
|
}
|
|
|
|
func (c testCapability) ApplyConfigChange(ctx context.Context, d *Daemon, before, after model.VMRecord) error {
|
|
if c.configChange != nil {
|
|
return c.configChange(ctx, d, before, after)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (c testCapability) AddDoctorChecks(ctx context.Context, d *Daemon, report *system.Report) {
|
|
if c.doctor != nil {
|
|
c.doctor(ctx, d, report)
|
|
}
|
|
}
|
|
|
|
func (c testCapability) AddStartPreflight(ctx context.Context, d *Daemon, checks *system.Preflight, vm model.VMRecord, image model.Image) {
|
|
if c.startPreflight != nil {
|
|
c.startPreflight(ctx, d, checks, vm, image)
|
|
}
|
|
}
|
|
|
|
func TestPrepareCapabilityHostsRollsBackPreparedCapabilitiesInReverseOrder(t *testing.T) {
|
|
vm := testVM("devbox", "image", "172.16.0.2")
|
|
var cleanupOrder []string
|
|
|
|
d := &Daemon{
|
|
vmCaps: []vmCapability{
|
|
testCapability{
|
|
name: "first",
|
|
prepare: func(context.Context, *Daemon, *model.VMRecord, model.Image) error {
|
|
return nil
|
|
},
|
|
cleanup: func(context.Context, *Daemon, model.VMRecord) error {
|
|
cleanupOrder = append(cleanupOrder, "first")
|
|
return nil
|
|
},
|
|
},
|
|
testCapability{
|
|
name: "second",
|
|
prepare: func(context.Context, *Daemon, *model.VMRecord, model.Image) error {
|
|
return nil
|
|
},
|
|
cleanup: func(context.Context, *Daemon, model.VMRecord) error {
|
|
cleanupOrder = append(cleanupOrder, "second")
|
|
return nil
|
|
},
|
|
},
|
|
testCapability{
|
|
name: "broken",
|
|
prepare: func(context.Context, *Daemon, *model.VMRecord, model.Image) error {
|
|
return errors.New("boom")
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
err := d.prepareCapabilityHosts(context.Background(), &vm, model.Image{})
|
|
if err == nil || err.Error() != "boom" {
|
|
t.Fatalf("prepareCapabilityHosts() error = %v, want boom", err)
|
|
}
|
|
if !reflect.DeepEqual(cleanupOrder, []string{"second", "first"}) {
|
|
t.Fatalf("cleanup order = %v, want reverse prepared order", cleanupOrder)
|
|
}
|
|
}
|
|
|
|
func TestContributeHooksPopulateGuestAndMachineConfig(t *testing.T) {
|
|
d := &Daemon{
|
|
vmCaps: []vmCapability{
|
|
testCapability{
|
|
name: "guest",
|
|
contribute: func(builder *guestconfig.Builder, _ model.VMRecord, _ model.Image) {
|
|
builder.AddMount(guestconfig.MountSpec{Source: "/dev/vdb", Target: "/work", FSType: "ext4"})
|
|
},
|
|
contributeFC: func(cfg *firecracker.MachineConfig, _ model.VMRecord, _ model.Image) {
|
|
cfg.Drives = append(cfg.Drives, firecracker.DriveConfig{ID: "work", Path: "/tmp/work.ext4"})
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
builder := guestconfig.NewBuilder()
|
|
d.contributeGuestConfig(builder, model.VMRecord{}, model.Image{})
|
|
|
|
cfg := firecracker.MachineConfig{Drives: []firecracker.DriveConfig{{ID: "rootfs", Path: "/dev/root", IsRoot: true}}}
|
|
d.contributeMachineConfig(&cfg, model.VMRecord{}, model.Image{})
|
|
|
|
fstab := builder.RenderFSTab("")
|
|
if !reflect.DeepEqual(cfg.Drives[1], firecracker.DriveConfig{ID: "work", Path: "/tmp/work.ext4"}) {
|
|
t.Fatalf("machine drives = %+v, want contributed work drive", cfg.Drives)
|
|
}
|
|
if want := "/dev/vdb /work ext4 defaults 0 0\n"; fstab != want {
|
|
t.Fatalf("guest fstab = %q, want %q", fstab, want)
|
|
}
|
|
}
|
|
|
|
func TestRegisteredCapabilitiesIncludeOpencode(t *testing.T) {
|
|
d := &Daemon{}
|
|
var names []string
|
|
for _, capability := range d.registeredCapabilities() {
|
|
names = append(names, capability.Name())
|
|
}
|
|
want := []string{"work-disk", "opencode", "dns", "nat"}
|
|
if !reflect.DeepEqual(names, want) {
|
|
t.Fatalf("capabilities = %v, want %v", names, want)
|
|
}
|
|
}
|