Factor the service + capability wiring out of Daemon.Open() into
wireServices(d), an idempotent helper that constructs HostNetwork,
ImageService, WorkspaceService, and VMService from whatever
infrastructure (runner, store, config, layout, logger, closing) is
already set on d. Open() calls it once after filling the composition
root; tests that build &Daemon{...} literals call it to get a working
service graph, preinstalling stubs on the fields they want to fake.
Drops the four lazy-init getters on *Daemon — d.hostNet(),
d.imageSvc(), d.workspaceSvc(), d.vmSvc() — whose sole purpose was
keeping test literals working. Every production call site now reads
d.net / d.img / d.ws / d.vm directly; the services are guaranteed
non-nil once Open returns. No behavior change.
Mechanical: all existing `d.xxxSvc()` calls (production + tests)
rewritten to field access; each `d := &Daemon{...}` in tests gets a
trailing wireServices(d) so the literal + wiring are side-by-side.
Tests that override a pre-built service (e.g. d.img = &ImageService{
bundleFetch: stub}) now set the override before wireServices so the
replacement propagates into VMService's peer pointer.
Also nil-guards HostNetwork.stopVMDNS and d.store in Close() so
partially-initialised daemons (pre-reconcile open failure) still
tear down cleanly — same contract the old lazy getters provided.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
160 lines
4.9 KiB
Go
160 lines
4.9 KiB
Go
package daemon
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"reflect"
|
|
"testing"
|
|
|
|
"banger/internal/firecracker"
|
|
"banger/internal/guestconfig"
|
|
"banger/internal/model"
|
|
"banger/internal/system"
|
|
)
|
|
|
|
type testCapability struct {
|
|
name string
|
|
prepare func(context.Context, *Daemon, *model.VMRecord, model.Image) error
|
|
cleanup func(context.Context, *Daemon, model.VMRecord) error
|
|
contribute func(*guestconfig.Builder, model.VMRecord, model.Image)
|
|
contributeFC func(*firecracker.MachineConfig, model.VMRecord, model.Image)
|
|
configChange func(context.Context, *Daemon, model.VMRecord, model.VMRecord) error
|
|
doctor func(context.Context, *Daemon, *system.Report)
|
|
startPreflight func(context.Context, *Daemon, *system.Preflight, model.VMRecord, model.Image)
|
|
}
|
|
|
|
func (c testCapability) Name() string { return c.name }
|
|
|
|
func (c testCapability) PrepareHost(ctx context.Context, d *Daemon, vm *model.VMRecord, image model.Image) error {
|
|
if c.prepare != nil {
|
|
return c.prepare(ctx, d, vm, image)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (c testCapability) Cleanup(ctx context.Context, d *Daemon, vm model.VMRecord) error {
|
|
if c.cleanup != nil {
|
|
return c.cleanup(ctx, d, vm)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (c testCapability) ContributeGuest(builder *guestconfig.Builder, vm model.VMRecord, image model.Image) {
|
|
if c.contribute != nil {
|
|
c.contribute(builder, vm, image)
|
|
}
|
|
}
|
|
|
|
func (c testCapability) ContributeMachine(cfg *firecracker.MachineConfig, vm model.VMRecord, image model.Image) {
|
|
if c.contributeFC != nil {
|
|
c.contributeFC(cfg, vm, image)
|
|
}
|
|
}
|
|
|
|
func (c testCapability) ApplyConfigChange(ctx context.Context, d *Daemon, before, after model.VMRecord) error {
|
|
if c.configChange != nil {
|
|
return c.configChange(ctx, d, before, after)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (c testCapability) AddDoctorChecks(ctx context.Context, d *Daemon, report *system.Report) {
|
|
if c.doctor != nil {
|
|
c.doctor(ctx, d, report)
|
|
}
|
|
}
|
|
|
|
func (c testCapability) AddStartPreflight(ctx context.Context, d *Daemon, checks *system.Preflight, vm model.VMRecord, image model.Image) {
|
|
if c.startPreflight != nil {
|
|
c.startPreflight(ctx, d, checks, vm, image)
|
|
}
|
|
}
|
|
|
|
func TestPrepareCapabilityHostsRollsBackPreparedCapabilitiesInReverseOrder(t *testing.T) {
|
|
vm := testVM("devbox", "image", "172.16.0.2")
|
|
var cleanupOrder []string
|
|
|
|
d := &Daemon{
|
|
vmCaps: []vmCapability{
|
|
testCapability{
|
|
name: "first",
|
|
prepare: func(context.Context, *Daemon, *model.VMRecord, model.Image) error {
|
|
return nil
|
|
},
|
|
cleanup: func(context.Context, *Daemon, model.VMRecord) error {
|
|
cleanupOrder = append(cleanupOrder, "first")
|
|
return nil
|
|
},
|
|
},
|
|
testCapability{
|
|
name: "second",
|
|
prepare: func(context.Context, *Daemon, *model.VMRecord, model.Image) error {
|
|
return nil
|
|
},
|
|
cleanup: func(context.Context, *Daemon, model.VMRecord) error {
|
|
cleanupOrder = append(cleanupOrder, "second")
|
|
return nil
|
|
},
|
|
},
|
|
testCapability{
|
|
name: "broken",
|
|
prepare: func(context.Context, *Daemon, *model.VMRecord, model.Image) error {
|
|
return errors.New("boom")
|
|
},
|
|
},
|
|
},
|
|
}
|
|
wireServices(d)
|
|
|
|
err := d.prepareCapabilityHosts(context.Background(), &vm, model.Image{})
|
|
if err == nil || err.Error() != "boom" {
|
|
t.Fatalf("prepareCapabilityHosts() error = %v, want boom", err)
|
|
}
|
|
if !reflect.DeepEqual(cleanupOrder, []string{"second", "first"}) {
|
|
t.Fatalf("cleanup order = %v, want reverse prepared order", cleanupOrder)
|
|
}
|
|
}
|
|
|
|
func TestContributeHooksPopulateGuestAndMachineConfig(t *testing.T) {
|
|
d := &Daemon{
|
|
vmCaps: []vmCapability{
|
|
testCapability{
|
|
name: "guest",
|
|
contribute: func(builder *guestconfig.Builder, _ model.VMRecord, _ model.Image) {
|
|
builder.AddMount(guestconfig.MountSpec{Source: "/dev/vdb", Target: "/work", FSType: "ext4"})
|
|
},
|
|
contributeFC: func(cfg *firecracker.MachineConfig, _ model.VMRecord, _ model.Image) {
|
|
cfg.Drives = append(cfg.Drives, firecracker.DriveConfig{ID: "work", Path: "/tmp/work.ext4"})
|
|
},
|
|
},
|
|
},
|
|
}
|
|
wireServices(d)
|
|
|
|
builder := guestconfig.NewBuilder()
|
|
d.contributeGuestConfig(builder, model.VMRecord{}, model.Image{})
|
|
|
|
cfg := firecracker.MachineConfig{Drives: []firecracker.DriveConfig{{ID: "rootfs", Path: "/dev/root", IsRoot: true}}}
|
|
d.contributeMachineConfig(&cfg, model.VMRecord{}, model.Image{})
|
|
|
|
fstab := builder.RenderFSTab("")
|
|
if !reflect.DeepEqual(cfg.Drives[1], firecracker.DriveConfig{ID: "work", Path: "/tmp/work.ext4"}) {
|
|
t.Fatalf("machine drives = %+v, want contributed work drive", cfg.Drives)
|
|
}
|
|
if want := "/dev/vdb /work ext4 defaults 0 0\n"; fstab != want {
|
|
t.Fatalf("guest fstab = %q, want %q", fstab, want)
|
|
}
|
|
}
|
|
|
|
func TestRegisteredCapabilitiesInOrder(t *testing.T) {
|
|
d := &Daemon{}
|
|
wireServices(d)
|
|
var names []string
|
|
for _, capability := range d.registeredCapabilities() {
|
|
names = append(names, capability.Name())
|
|
}
|
|
want := []string{"work-disk", "dns", "nat"}
|
|
if !reflect.DeepEqual(names, want) {
|
|
t.Fatalf("capabilities = %v, want %v", names, want)
|
|
}
|
|
}
|