Make host-integrated VM features fit a standard Go extension path instead of adding more one-off branches through vm.go. This is the enabling refactor for future work like shared mounts, not the /work feature itself. Add a daemon capability pipeline plus a structured guest-config builder, then move the existing /root work-disk mount, built-in DNS, and NAT wiring onto those hooks. Generalize Firecracker drive config at the same time so later storage features can extend machine setup without another hardcoded path. Add banger doctor on top of the shared readiness checks, update the docs to describe the new architecture, and cover the new seams with guest-config, capability, report, CLI, and full go test verification. Also verify make build and a real ./banger doctor run on the host.
145 lines
4.5 KiB
Go
145 lines
4.5 KiB
Go
package daemon
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"reflect"
|
|
"testing"
|
|
|
|
"banger/internal/firecracker"
|
|
"banger/internal/guestconfig"
|
|
"banger/internal/model"
|
|
"banger/internal/system"
|
|
)
|
|
|
|
type testCapability struct {
|
|
name string
|
|
prepare func(context.Context, *Daemon, *model.VMRecord, model.Image) error
|
|
cleanup func(context.Context, *Daemon, model.VMRecord) error
|
|
contribute func(*guestconfig.Builder, model.VMRecord, model.Image)
|
|
contributeFC func(*firecracker.MachineConfig, model.VMRecord, model.Image)
|
|
configChange func(context.Context, *Daemon, model.VMRecord, model.VMRecord) error
|
|
doctor func(context.Context, *Daemon, *system.Report)
|
|
startPreflight func(context.Context, *Daemon, *system.Preflight, model.VMRecord, model.Image)
|
|
}
|
|
|
|
func (c testCapability) Name() string { return c.name }
|
|
|
|
func (c testCapability) PrepareHost(ctx context.Context, d *Daemon, vm *model.VMRecord, image model.Image) error {
|
|
if c.prepare != nil {
|
|
return c.prepare(ctx, d, vm, image)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (c testCapability) Cleanup(ctx context.Context, d *Daemon, vm model.VMRecord) error {
|
|
if c.cleanup != nil {
|
|
return c.cleanup(ctx, d, vm)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (c testCapability) ContributeGuest(builder *guestconfig.Builder, vm model.VMRecord, image model.Image) {
|
|
if c.contribute != nil {
|
|
c.contribute(builder, vm, image)
|
|
}
|
|
}
|
|
|
|
func (c testCapability) ContributeMachine(cfg *firecracker.MachineConfig, vm model.VMRecord, image model.Image) {
|
|
if c.contributeFC != nil {
|
|
c.contributeFC(cfg, vm, image)
|
|
}
|
|
}
|
|
|
|
func (c testCapability) ApplyConfigChange(ctx context.Context, d *Daemon, before, after model.VMRecord) error {
|
|
if c.configChange != nil {
|
|
return c.configChange(ctx, d, before, after)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (c testCapability) AddDoctorChecks(ctx context.Context, d *Daemon, report *system.Report) {
|
|
if c.doctor != nil {
|
|
c.doctor(ctx, d, report)
|
|
}
|
|
}
|
|
|
|
func (c testCapability) AddStartPreflight(ctx context.Context, d *Daemon, checks *system.Preflight, vm model.VMRecord, image model.Image) {
|
|
if c.startPreflight != nil {
|
|
c.startPreflight(ctx, d, checks, vm, image)
|
|
}
|
|
}
|
|
|
|
func TestPrepareCapabilityHostsRollsBackPreparedCapabilitiesInReverseOrder(t *testing.T) {
|
|
vm := testVM("devbox", "image", "172.16.0.2")
|
|
var cleanupOrder []string
|
|
|
|
d := &Daemon{
|
|
vmCaps: []vmCapability{
|
|
testCapability{
|
|
name: "first",
|
|
prepare: func(context.Context, *Daemon, *model.VMRecord, model.Image) error {
|
|
return nil
|
|
},
|
|
cleanup: func(context.Context, *Daemon, model.VMRecord) error {
|
|
cleanupOrder = append(cleanupOrder, "first")
|
|
return nil
|
|
},
|
|
},
|
|
testCapability{
|
|
name: "second",
|
|
prepare: func(context.Context, *Daemon, *model.VMRecord, model.Image) error {
|
|
return nil
|
|
},
|
|
cleanup: func(context.Context, *Daemon, model.VMRecord) error {
|
|
cleanupOrder = append(cleanupOrder, "second")
|
|
return nil
|
|
},
|
|
},
|
|
testCapability{
|
|
name: "broken",
|
|
prepare: func(context.Context, *Daemon, *model.VMRecord, model.Image) error {
|
|
return errors.New("boom")
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
err := d.prepareCapabilityHosts(context.Background(), &vm, model.Image{})
|
|
if err == nil || err.Error() != "boom" {
|
|
t.Fatalf("prepareCapabilityHosts() error = %v, want boom", err)
|
|
}
|
|
if !reflect.DeepEqual(cleanupOrder, []string{"second", "first"}) {
|
|
t.Fatalf("cleanup order = %v, want reverse prepared order", cleanupOrder)
|
|
}
|
|
}
|
|
|
|
func TestContributeHooksPopulateGuestAndMachineConfig(t *testing.T) {
|
|
d := &Daemon{
|
|
vmCaps: []vmCapability{
|
|
testCapability{
|
|
name: "guest",
|
|
contribute: func(builder *guestconfig.Builder, _ model.VMRecord, _ model.Image) {
|
|
builder.AddMount(guestconfig.MountSpec{Source: "/dev/vdb", Target: "/work", FSType: "ext4"})
|
|
},
|
|
contributeFC: func(cfg *firecracker.MachineConfig, _ model.VMRecord, _ model.Image) {
|
|
cfg.Drives = append(cfg.Drives, firecracker.DriveConfig{ID: "work", Path: "/tmp/work.ext4"})
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
builder := guestconfig.NewBuilder()
|
|
d.contributeGuestConfig(builder, model.VMRecord{}, model.Image{})
|
|
|
|
cfg := firecracker.MachineConfig{Drives: []firecracker.DriveConfig{{ID: "rootfs", Path: "/dev/root", IsRoot: true}}}
|
|
d.contributeMachineConfig(&cfg, model.VMRecord{}, model.Image{})
|
|
|
|
fstab := builder.RenderFSTab("")
|
|
if !reflect.DeepEqual(cfg.Drives[1], firecracker.DriveConfig{ID: "work", Path: "/tmp/work.ext4"}) {
|
|
t.Fatalf("machine drives = %+v, want contributed work drive", cfg.Drives)
|
|
}
|
|
if want := "/dev/vdb /work ext4 defaults 0 0\n"; fstab != want {
|
|
t.Fatalf("guest fstab = %q, want %q", fstab, want)
|
|
}
|
|
}
|