Refactor VM lifecycle around capabilities
Make host-integrated VM features fit a standard Go extension path instead of adding more one-off branches through vm.go. This is the enabling refactor for future work like shared mounts, not the /work feature itself. Add a daemon capability pipeline plus a structured guest-config builder, then move the existing /root work-disk mount, built-in DNS, and NAT wiring onto those hooks. Generalize Firecracker drive config at the same time so later storage features can extend machine setup without another hardcoded path. Add banger doctor on top of the shared readiness checks, update the docs to describe the new architecture, and cover the new seams with guest-config, capability, report, CLI, and full go test verification. Also verify make build and a real ./banger doctor run on the host.
This commit is contained in:
parent
9e98445fa2
commit
4930d82cb9
18 changed files with 1120 additions and 105 deletions
|
|
@ -16,6 +16,7 @@ import (
|
|||
|
||||
"banger/internal/api"
|
||||
"banger/internal/config"
|
||||
"banger/internal/daemon"
|
||||
"banger/internal/hostnat"
|
||||
"banger/internal/model"
|
||||
"banger/internal/paths"
|
||||
|
|
@ -31,6 +32,7 @@ var (
|
|||
daemonExePath = func(pid int) string {
|
||||
return filepath.Join("/proc", fmt.Sprintf("%d", pid), "exe")
|
||||
}
|
||||
doctorFunc = daemon.Doctor
|
||||
)
|
||||
|
||||
func NewBangerCommand() *cobra.Command {
|
||||
|
|
@ -42,10 +44,31 @@ func NewBangerCommand() *cobra.Command {
|
|||
RunE: helpNoArgs,
|
||||
}
|
||||
root.CompletionOptions.DisableDefaultCmd = true
|
||||
root.AddCommand(newDaemonCommand(), newVMCommand(), newImageCommand(), newTUICommand(), newInternalCommand())
|
||||
root.AddCommand(newDaemonCommand(), newDoctorCommand(), newVMCommand(), newImageCommand(), newTUICommand(), newInternalCommand())
|
||||
return root
|
||||
}
|
||||
|
||||
func newDoctorCommand() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "doctor",
|
||||
Short: "Check host and runtime readiness",
|
||||
Args: noArgsUsage("usage: banger doctor"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
report, err := doctorFunc(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := printDoctorReport(cmd.OutOrStdout(), report); err != nil {
|
||||
return err
|
||||
}
|
||||
if report.HasFailures() {
|
||||
return errors.New("doctor found failing checks")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newInternalCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "internal",
|
||||
|
|
@ -994,6 +1017,21 @@ func printImageSummary(out anyWriter, image model.Image) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func printDoctorReport(out anyWriter, report system.Report) error {
|
||||
for _, check := range report.Checks {
|
||||
status := strings.ToUpper(string(check.Status))
|
||||
if _, err := fmt.Fprintf(out, "%s\t%s\n", status, check.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, detail := range check.Details {
|
||||
if _, err := fmt.Fprintf(out, " - %s\n", detail); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type anyWriter interface {
|
||||
Write(p []byte) (n int, err error)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package cli
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
|
|
@ -12,6 +13,7 @@ import (
|
|||
|
||||
"banger/internal/api"
|
||||
"banger/internal/model"
|
||||
"banger/internal/system"
|
||||
)
|
||||
|
||||
func TestNewBangerCommandHasExpectedSubcommands(t *testing.T) {
|
||||
|
|
@ -20,12 +22,62 @@ func TestNewBangerCommandHasExpectedSubcommands(t *testing.T) {
|
|||
for _, sub := range cmd.Commands() {
|
||||
names = append(names, sub.Name())
|
||||
}
|
||||
want := []string{"daemon", "image", "internal", "tui", "vm"}
|
||||
want := []string{"daemon", "doctor", "image", "internal", "tui", "vm"}
|
||||
if !reflect.DeepEqual(names, want) {
|
||||
t.Fatalf("subcommands = %v, want %v", names, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDoctorCommandPrintsReportAndFailsOnHardFailures(t *testing.T) {
|
||||
original := doctorFunc
|
||||
t.Cleanup(func() {
|
||||
doctorFunc = original
|
||||
})
|
||||
doctorFunc = func(context.Context) (system.Report, error) {
|
||||
return system.Report{
|
||||
Checks: []system.CheckResult{
|
||||
{Name: "runtime bundle", Status: system.CheckStatusPass, Details: []string{"runtime dir /tmp/runtime"}},
|
||||
{Name: "feature nat", Status: system.CheckStatusFail, Details: []string{"missing iptables"}},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
cmd := NewBangerCommand()
|
||||
var stdout bytes.Buffer
|
||||
cmd.SetOut(&stdout)
|
||||
cmd.SetErr(&stdout)
|
||||
cmd.SetArgs([]string{"doctor"})
|
||||
|
||||
err := cmd.Execute()
|
||||
if err == nil || !strings.Contains(err.Error(), "doctor found failing checks") {
|
||||
t.Fatalf("Execute() error = %v, want doctor failure", err)
|
||||
}
|
||||
output := stdout.String()
|
||||
if !strings.Contains(output, "PASS\truntime bundle") {
|
||||
t.Fatalf("output = %q, want runtime bundle pass", output)
|
||||
}
|
||||
if !strings.Contains(output, "FAIL\tfeature nat") {
|
||||
t.Fatalf("output = %q, want feature nat fail", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDoctorCommandReturnsUnderlyingError(t *testing.T) {
|
||||
original := doctorFunc
|
||||
t.Cleanup(func() {
|
||||
doctorFunc = original
|
||||
})
|
||||
doctorFunc = func(context.Context) (system.Report, error) {
|
||||
return system.Report{}, errors.New("load failed")
|
||||
}
|
||||
|
||||
cmd := NewBangerCommand()
|
||||
cmd.SetArgs([]string{"doctor"})
|
||||
err := cmd.Execute()
|
||||
if err == nil || !strings.Contains(err.Error(), "load failed") {
|
||||
t.Fatalf("Execute() error = %v, want load failed", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInternalNATFlagsExist(t *testing.T) {
|
||||
root := NewBangerCommand()
|
||||
internal, _, err := root.Find([]string{"internal"})
|
||||
|
|
|
|||
272
internal/daemon/capabilities.go
Normal file
272
internal/daemon/capabilities.go
Normal file
|
|
@ -0,0 +1,272 @@
|
|||
package daemon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"banger/internal/firecracker"
|
||||
"banger/internal/guestconfig"
|
||||
"banger/internal/model"
|
||||
"banger/internal/system"
|
||||
"banger/internal/vmdns"
|
||||
)
|
||||
|
||||
type vmCapability interface {
|
||||
Name() string
|
||||
}
|
||||
|
||||
type startPreflightCapability interface {
|
||||
AddStartPreflight(context.Context, *Daemon, *system.Preflight, model.VMRecord, model.Image)
|
||||
}
|
||||
|
||||
type guestConfigCapability interface {
|
||||
ContributeGuest(*guestconfig.Builder, model.VMRecord, model.Image)
|
||||
}
|
||||
|
||||
type machineConfigCapability interface {
|
||||
ContributeMachine(*firecracker.MachineConfig, model.VMRecord, model.Image)
|
||||
}
|
||||
|
||||
type prepareHostCapability interface {
|
||||
PrepareHost(context.Context, *Daemon, *model.VMRecord, model.Image) error
|
||||
}
|
||||
|
||||
type postStartCapability interface {
|
||||
PostStart(context.Context, *Daemon, model.VMRecord, model.Image) error
|
||||
}
|
||||
|
||||
type cleanupCapability interface {
|
||||
Cleanup(context.Context, *Daemon, model.VMRecord) error
|
||||
}
|
||||
|
||||
type configChangeCapability interface {
|
||||
ApplyConfigChange(context.Context, *Daemon, model.VMRecord, model.VMRecord) error
|
||||
}
|
||||
|
||||
type doctorCapability interface {
|
||||
AddDoctorChecks(context.Context, *Daemon, *system.Report)
|
||||
}
|
||||
|
||||
func (d *Daemon) registeredCapabilities() []vmCapability {
|
||||
if len(d.vmCaps) > 0 {
|
||||
return d.vmCaps
|
||||
}
|
||||
return []vmCapability{
|
||||
workDiskCapability{},
|
||||
dnsCapability{},
|
||||
natCapability{},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Daemon) addCapabilityStartPrereqs(ctx context.Context, checks *system.Preflight, vm model.VMRecord, image model.Image) {
|
||||
for _, capability := range d.registeredCapabilities() {
|
||||
if hook, ok := capability.(startPreflightCapability); ok {
|
||||
hook.AddStartPreflight(ctx, d, checks, vm, image)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Daemon) contributeGuestConfig(builder *guestconfig.Builder, vm model.VMRecord, image model.Image) {
|
||||
for _, capability := range d.registeredCapabilities() {
|
||||
if hook, ok := capability.(guestConfigCapability); ok {
|
||||
hook.ContributeGuest(builder, vm, image)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Daemon) contributeMachineConfig(cfg *firecracker.MachineConfig, vm model.VMRecord, image model.Image) {
|
||||
for _, capability := range d.registeredCapabilities() {
|
||||
if hook, ok := capability.(machineConfigCapability); ok {
|
||||
hook.ContributeMachine(cfg, vm, image)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Daemon) prepareCapabilityHosts(ctx context.Context, vm *model.VMRecord, image model.Image) error {
|
||||
prepared := make([]vmCapability, 0, len(d.registeredCapabilities()))
|
||||
for _, capability := range d.registeredCapabilities() {
|
||||
hook, ok := capability.(prepareHostCapability)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if err := hook.PrepareHost(ctx, d, vm, image); err != nil {
|
||||
d.cleanupPreparedCapabilities(context.Background(), vm, prepared)
|
||||
return err
|
||||
}
|
||||
prepared = append(prepared, capability)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Daemon) postStartCapabilities(ctx context.Context, vm model.VMRecord, image model.Image) error {
|
||||
for _, capability := range d.registeredCapabilities() {
|
||||
if hook, ok := capability.(postStartCapability); ok {
|
||||
if err := hook.PostStart(ctx, d, vm, image); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Daemon) cleanupCapabilityState(ctx context.Context, vm model.VMRecord) error {
|
||||
return d.cleanupPreparedCapabilities(ctx, &vm, d.registeredCapabilities())
|
||||
}
|
||||
|
||||
func (d *Daemon) cleanupPreparedCapabilities(ctx context.Context, vm *model.VMRecord, capabilities []vmCapability) error {
|
||||
var err error
|
||||
for index := len(capabilities) - 1; index >= 0; index-- {
|
||||
hook, ok := capabilities[index].(cleanupCapability)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
err = joinErr(err, hook.Cleanup(ctx, d, *vm))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Daemon) applyCapabilityConfigChanges(ctx context.Context, before, after model.VMRecord) error {
|
||||
for _, capability := range d.registeredCapabilities() {
|
||||
if hook, ok := capability.(configChangeCapability); ok {
|
||||
if err := hook.ApplyConfigChange(ctx, d, before, after); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Daemon) addCapabilityDoctorChecks(ctx context.Context, report *system.Report) {
|
||||
for _, capability := range d.registeredCapabilities() {
|
||||
if hook, ok := capability.(doctorCapability); ok {
|
||||
hook.AddDoctorChecks(ctx, d, report)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type workDiskCapability struct{}
|
||||
|
||||
func (workDiskCapability) Name() string { return "work-disk" }
|
||||
|
||||
func (workDiskCapability) AddStartPreflight(_ context.Context, _ *Daemon, checks *system.Preflight, vm model.VMRecord, _ model.Image) {
|
||||
if exists(vm.Runtime.WorkDiskPath) {
|
||||
return
|
||||
}
|
||||
for _, command := range []string{"mkfs.ext4", "mount", "umount", "cp"} {
|
||||
checks.RequireCommand(command, toolHint(command))
|
||||
}
|
||||
}
|
||||
|
||||
func (workDiskCapability) ContributeGuest(builder *guestconfig.Builder, _ model.VMRecord, _ model.Image) {
|
||||
builder.AddMount(guestconfig.MountSpec{
|
||||
Source: "/dev/vdb",
|
||||
Target: "/root",
|
||||
FSType: "ext4",
|
||||
Options: []string{"defaults"},
|
||||
Dump: 0,
|
||||
Pass: 2,
|
||||
})
|
||||
}
|
||||
|
||||
func (workDiskCapability) ContributeMachine(cfg *firecracker.MachineConfig, vm model.VMRecord, _ model.Image) {
|
||||
cfg.Drives = append(cfg.Drives, firecracker.DriveConfig{
|
||||
ID: "work",
|
||||
Path: vm.Runtime.WorkDiskPath,
|
||||
ReadOnly: false,
|
||||
})
|
||||
}
|
||||
|
||||
func (workDiskCapability) PrepareHost(ctx context.Context, d *Daemon, vm *model.VMRecord, _ model.Image) error {
|
||||
return d.ensureWorkDisk(ctx, vm)
|
||||
}
|
||||
|
||||
func (workDiskCapability) AddDoctorChecks(_ context.Context, _ *Daemon, report *system.Report) {
|
||||
checks := system.NewPreflight()
|
||||
for _, command := range []string{"mkfs.ext4", "mount", "umount", "cp"} {
|
||||
checks.RequireCommand(command, toolHint(command))
|
||||
}
|
||||
report.AddPreflight("feature /root work disk", checks, "guest /root work disk tooling available")
|
||||
}
|
||||
|
||||
type dnsCapability struct{}
|
||||
|
||||
func (dnsCapability) Name() string { return "dns" }
|
||||
|
||||
func (dnsCapability) PostStart(ctx context.Context, d *Daemon, vm model.VMRecord, _ model.Image) error {
|
||||
return d.setDNS(ctx, vm.Name, vm.Runtime.GuestIP)
|
||||
}
|
||||
|
||||
func (dnsCapability) Cleanup(ctx context.Context, d *Daemon, vm model.VMRecord) error {
|
||||
return d.removeDNS(ctx, vm.Runtime.DNSName)
|
||||
}
|
||||
|
||||
func (dnsCapability) AddDoctorChecks(_ context.Context, _ *Daemon, report *system.Report) {
|
||||
conn, err := net.ListenPacket("udp", vmdns.DefaultListenAddr)
|
||||
if err != nil {
|
||||
if strings.Contains(strings.ToLower(err.Error()), "address already in use") {
|
||||
report.AddWarn("feature vm dns", "listener address "+vmdns.DefaultListenAddr+" is already in use")
|
||||
return
|
||||
}
|
||||
report.AddFail("feature vm dns", "cannot bind "+vmdns.DefaultListenAddr+": "+err.Error())
|
||||
return
|
||||
}
|
||||
_ = conn.Close()
|
||||
report.AddPass("feature vm dns", "listener can bind "+vmdns.DefaultListenAddr)
|
||||
}
|
||||
|
||||
type natCapability struct{}
|
||||
|
||||
func (natCapability) Name() string { return "nat" }
|
||||
|
||||
func (natCapability) AddStartPreflight(ctx context.Context, d *Daemon, checks *system.Preflight, vm model.VMRecord, _ model.Image) {
|
||||
if !vm.Spec.NATEnabled {
|
||||
return
|
||||
}
|
||||
d.addNATPrereqs(ctx, checks)
|
||||
}
|
||||
|
||||
func (natCapability) PostStart(ctx context.Context, d *Daemon, vm model.VMRecord, _ model.Image) error {
|
||||
if !vm.Spec.NATEnabled {
|
||||
return nil
|
||||
}
|
||||
return d.ensureNAT(ctx, vm, true)
|
||||
}
|
||||
|
||||
func (natCapability) Cleanup(ctx context.Context, d *Daemon, vm model.VMRecord) error {
|
||||
if !vm.Spec.NATEnabled {
|
||||
return nil
|
||||
}
|
||||
return d.ensureNAT(ctx, vm, false)
|
||||
}
|
||||
|
||||
func (natCapability) ApplyConfigChange(ctx context.Context, d *Daemon, before, after model.VMRecord) error {
|
||||
if before.Spec.NATEnabled == after.Spec.NATEnabled {
|
||||
return nil
|
||||
}
|
||||
if after.State != model.VMStateRunning || !system.ProcessRunning(after.Runtime.PID, after.Runtime.APISockPath) {
|
||||
return nil
|
||||
}
|
||||
return d.ensureNAT(ctx, after, after.Spec.NATEnabled)
|
||||
}
|
||||
|
||||
func (natCapability) AddDoctorChecks(ctx context.Context, d *Daemon, report *system.Report) {
|
||||
checks := system.NewPreflight()
|
||||
checks.RequireCommand("ip", toolHint("ip"))
|
||||
d.addNATPrereqs(ctx, checks)
|
||||
if len(checks.Problems()) > 0 {
|
||||
report.Add(system.CheckStatusFail, "feature nat", checks.Problems()...)
|
||||
return
|
||||
}
|
||||
uplink, err := d.defaultUplink(ctx)
|
||||
if err != nil {
|
||||
report.AddFail("feature nat", err.Error())
|
||||
return
|
||||
}
|
||||
report.AddPass("feature nat", "iptables/sysctl available, uplink "+uplink)
|
||||
}
|
||||
|
||||
func joinErr(current, next error) error {
|
||||
return errors.Join(current, next)
|
||||
}
|
||||
145
internal/daemon/capabilities_test.go
Normal file
145
internal/daemon/capabilities_test.go
Normal file
|
|
@ -0,0 +1,145 @@
|
|||
package daemon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"banger/internal/firecracker"
|
||||
"banger/internal/guestconfig"
|
||||
"banger/internal/model"
|
||||
"banger/internal/system"
|
||||
)
|
||||
|
||||
type testCapability struct {
|
||||
name string
|
||||
prepare func(context.Context, *Daemon, *model.VMRecord, model.Image) error
|
||||
cleanup func(context.Context, *Daemon, model.VMRecord) error
|
||||
contribute func(*guestconfig.Builder, model.VMRecord, model.Image)
|
||||
contributeFC func(*firecracker.MachineConfig, model.VMRecord, model.Image)
|
||||
configChange func(context.Context, *Daemon, model.VMRecord, model.VMRecord) error
|
||||
doctor func(context.Context, *Daemon, *system.Report)
|
||||
startPreflight func(context.Context, *Daemon, *system.Preflight, model.VMRecord, model.Image)
|
||||
}
|
||||
|
||||
func (c testCapability) Name() string { return c.name }
|
||||
|
||||
func (c testCapability) PrepareHost(ctx context.Context, d *Daemon, vm *model.VMRecord, image model.Image) error {
|
||||
if c.prepare != nil {
|
||||
return c.prepare(ctx, d, vm, image)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c testCapability) Cleanup(ctx context.Context, d *Daemon, vm model.VMRecord) error {
|
||||
if c.cleanup != nil {
|
||||
return c.cleanup(ctx, d, vm)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c testCapability) ContributeGuest(builder *guestconfig.Builder, vm model.VMRecord, image model.Image) {
|
||||
if c.contribute != nil {
|
||||
c.contribute(builder, vm, image)
|
||||
}
|
||||
}
|
||||
|
||||
func (c testCapability) ContributeMachine(cfg *firecracker.MachineConfig, vm model.VMRecord, image model.Image) {
|
||||
if c.contributeFC != nil {
|
||||
c.contributeFC(cfg, vm, image)
|
||||
}
|
||||
}
|
||||
|
||||
func (c testCapability) ApplyConfigChange(ctx context.Context, d *Daemon, before, after model.VMRecord) error {
|
||||
if c.configChange != nil {
|
||||
return c.configChange(ctx, d, before, after)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c testCapability) AddDoctorChecks(ctx context.Context, d *Daemon, report *system.Report) {
|
||||
if c.doctor != nil {
|
||||
c.doctor(ctx, d, report)
|
||||
}
|
||||
}
|
||||
|
||||
func (c testCapability) AddStartPreflight(ctx context.Context, d *Daemon, checks *system.Preflight, vm model.VMRecord, image model.Image) {
|
||||
if c.startPreflight != nil {
|
||||
c.startPreflight(ctx, d, checks, vm, image)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrepareCapabilityHostsRollsBackPreparedCapabilitiesInReverseOrder(t *testing.T) {
|
||||
vm := testVM("devbox", "image", "172.16.0.2")
|
||||
var cleanupOrder []string
|
||||
|
||||
d := &Daemon{
|
||||
vmCaps: []vmCapability{
|
||||
testCapability{
|
||||
name: "first",
|
||||
prepare: func(context.Context, *Daemon, *model.VMRecord, model.Image) error {
|
||||
return nil
|
||||
},
|
||||
cleanup: func(context.Context, *Daemon, model.VMRecord) error {
|
||||
cleanupOrder = append(cleanupOrder, "first")
|
||||
return nil
|
||||
},
|
||||
},
|
||||
testCapability{
|
||||
name: "second",
|
||||
prepare: func(context.Context, *Daemon, *model.VMRecord, model.Image) error {
|
||||
return nil
|
||||
},
|
||||
cleanup: func(context.Context, *Daemon, model.VMRecord) error {
|
||||
cleanupOrder = append(cleanupOrder, "second")
|
||||
return nil
|
||||
},
|
||||
},
|
||||
testCapability{
|
||||
name: "broken",
|
||||
prepare: func(context.Context, *Daemon, *model.VMRecord, model.Image) error {
|
||||
return errors.New("boom")
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := d.prepareCapabilityHosts(context.Background(), &vm, model.Image{})
|
||||
if err == nil || err.Error() != "boom" {
|
||||
t.Fatalf("prepareCapabilityHosts() error = %v, want boom", err)
|
||||
}
|
||||
if !reflect.DeepEqual(cleanupOrder, []string{"second", "first"}) {
|
||||
t.Fatalf("cleanup order = %v, want reverse prepared order", cleanupOrder)
|
||||
}
|
||||
}
|
||||
|
||||
func TestContributeHooksPopulateGuestAndMachineConfig(t *testing.T) {
|
||||
d := &Daemon{
|
||||
vmCaps: []vmCapability{
|
||||
testCapability{
|
||||
name: "guest",
|
||||
contribute: func(builder *guestconfig.Builder, _ model.VMRecord, _ model.Image) {
|
||||
builder.AddMount(guestconfig.MountSpec{Source: "/dev/vdb", Target: "/work", FSType: "ext4"})
|
||||
},
|
||||
contributeFC: func(cfg *firecracker.MachineConfig, _ model.VMRecord, _ model.Image) {
|
||||
cfg.Drives = append(cfg.Drives, firecracker.DriveConfig{ID: "work", Path: "/tmp/work.ext4"})
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
builder := guestconfig.NewBuilder()
|
||||
d.contributeGuestConfig(builder, model.VMRecord{}, model.Image{})
|
||||
|
||||
cfg := firecracker.MachineConfig{Drives: []firecracker.DriveConfig{{ID: "rootfs", Path: "/dev/root", IsRoot: true}}}
|
||||
d.contributeMachineConfig(&cfg, model.VMRecord{}, model.Image{})
|
||||
|
||||
fstab := builder.RenderFSTab("")
|
||||
if !reflect.DeepEqual(cfg.Drives[1], firecracker.DriveConfig{ID: "work", Path: "/tmp/work.ext4"}) {
|
||||
t.Fatalf("machine drives = %+v, want contributed work drive", cfg.Drives)
|
||||
}
|
||||
if want := "/dev/vdb /work ext4 defaults 0 0\n"; fstab != want {
|
||||
t.Fatalf("guest fstab = %q, want %q", fstab, want)
|
||||
}
|
||||
}
|
||||
|
|
@ -39,6 +39,7 @@ type Daemon struct {
|
|||
pid int
|
||||
listener net.Listener
|
||||
vmDNS *vmdns.Server
|
||||
vmCaps []vmCapability
|
||||
imageBuild func(context.Context, imageBuildSpec) error
|
||||
requestHandler func(context.Context, rpc.Request) rpc.Response
|
||||
}
|
||||
|
|
|
|||
92
internal/daemon/doctor.go
Normal file
92
internal/daemon/doctor.go
Normal file
|
|
@ -0,0 +1,92 @@
|
|||
package daemon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"banger/internal/config"
|
||||
"banger/internal/model"
|
||||
"banger/internal/paths"
|
||||
"banger/internal/system"
|
||||
)
|
||||
|
||||
func Doctor(ctx context.Context) (system.Report, error) {
|
||||
layout, err := paths.Resolve()
|
||||
if err != nil {
|
||||
return system.Report{}, err
|
||||
}
|
||||
cfg, err := config.Load(layout)
|
||||
if err != nil {
|
||||
return system.Report{}, err
|
||||
}
|
||||
d := &Daemon{
|
||||
layout: layout,
|
||||
config: cfg,
|
||||
runner: system.NewRunner(),
|
||||
}
|
||||
return d.doctorReport(ctx), nil
|
||||
}
|
||||
|
||||
func (d *Daemon) doctorReport(ctx context.Context) system.Report {
|
||||
report := system.Report{}
|
||||
|
||||
report.AddPreflight("runtime bundle", d.runtimeBundleChecks(), runtimeBundleStatus(d.config))
|
||||
report.AddPreflight("core vm lifecycle", d.coreVMLifecycleChecks(), "required host tools available")
|
||||
d.addCapabilityDoctorChecks(ctx, &report)
|
||||
report.AddPreflight("image build", d.imageBuildChecks(ctx), "image build prerequisites available")
|
||||
|
||||
return report
|
||||
}
|
||||
|
||||
func (d *Daemon) runtimeBundleChecks() *system.Preflight {
|
||||
checks := system.NewPreflight()
|
||||
hint := paths.RuntimeBundleHint()
|
||||
checks.RequireExecutable(d.config.FirecrackerBin, "firecracker binary", hint)
|
||||
checks.RequireFile(d.config.SSHKeyPath, "ssh private key", `set "ssh_key_path" or refresh the runtime bundle`)
|
||||
checks.RequireFile(d.config.DefaultRootfs, "default rootfs image", `set "default_rootfs" or refresh the runtime bundle`)
|
||||
checks.RequireFile(d.config.DefaultKernel, "kernel image", `set "default_kernel" or refresh the runtime bundle`)
|
||||
if strings.TrimSpace(d.config.DefaultInitrd) != "" {
|
||||
checks.RequireFile(d.config.DefaultInitrd, "initrd image", `set "default_initrd" or refresh the runtime bundle`)
|
||||
}
|
||||
if strings.TrimSpace(d.config.DefaultPackagesFile) != "" {
|
||||
checks.RequireFile(d.config.DefaultPackagesFile, "package manifest", `set "default_packages_file" or refresh the runtime bundle`)
|
||||
}
|
||||
return checks
|
||||
}
|
||||
|
||||
func (d *Daemon) coreVMLifecycleChecks() *system.Preflight {
|
||||
checks := system.NewPreflight()
|
||||
d.addBaseStartCommandPrereqs(checks)
|
||||
return checks
|
||||
}
|
||||
|
||||
func (d *Daemon) imageBuildChecks(ctx context.Context) *system.Preflight {
|
||||
checks := system.NewPreflight()
|
||||
d.addImageBuildPrereqs(
|
||||
ctx,
|
||||
checks,
|
||||
firstNonEmpty(d.config.DefaultBaseRootfs, d.config.DefaultRootfs),
|
||||
d.config.DefaultKernel,
|
||||
d.config.DefaultInitrd,
|
||||
d.config.DefaultModulesDir,
|
||||
"",
|
||||
)
|
||||
return checks
|
||||
}
|
||||
|
||||
func runtimeBundleStatus(cfg model.DaemonConfig) string {
|
||||
if strings.TrimSpace(cfg.RuntimeDir) == "" {
|
||||
return "runtime dir not configured"
|
||||
}
|
||||
return fmt.Sprintf("runtime dir %s", cfg.RuntimeDir)
|
||||
}
|
||||
|
||||
func firstNonEmpty(values ...string) string {
|
||||
for _, value := range values {
|
||||
if strings.TrimSpace(value) != "" {
|
||||
return value
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
|
@ -185,11 +185,16 @@ func (d *Daemon) startImageBuildVM(ctx context.Context, spec imageBuildSpec) (im
|
|||
KernelImagePath: spec.KernelPath,
|
||||
InitrdPath: spec.InitrdPath,
|
||||
KernelArgs: system.BuildBootArgs(vm.Name, vm.GuestIP, d.config.BridgeIP, d.config.DefaultDNS),
|
||||
RootDrivePath: spec.RootfsPath,
|
||||
TapDevice: vm.TapDevice,
|
||||
VCPUCount: model.DefaultVCPUCount,
|
||||
MemoryMiB: model.DefaultMemoryMiB,
|
||||
Logger: d.logger,
|
||||
Drives: []firecracker.DriveConfig{{
|
||||
ID: "rootfs",
|
||||
Path: spec.RootfsPath,
|
||||
ReadOnly: false,
|
||||
IsRoot: true,
|
||||
}},
|
||||
TapDevice: vm.TapDevice,
|
||||
VCPUCount: model.DefaultVCPUCount,
|
||||
MemoryMiB: model.DefaultMemoryMiB,
|
||||
Logger: d.logger,
|
||||
})
|
||||
if err != nil {
|
||||
_ = hostnat.Ensure(ctx, d.runner, vm.GuestIP, vm.TapDevice, false)
|
||||
|
|
|
|||
|
|
@ -11,56 +11,14 @@ import (
|
|||
|
||||
func (d *Daemon) validateStartPrereqs(ctx context.Context, vm model.VMRecord, image model.Image) error {
|
||||
checks := system.NewPreflight()
|
||||
hint := paths.RuntimeBundleHint()
|
||||
|
||||
for _, command := range []string{"sudo", "ip", "dmsetup", "losetup", "blockdev", "truncate", "pgrep", "chown", "chmod", "kill", "e2cp", "e2rm", "debugfs"} {
|
||||
checks.RequireCommand(command, toolHint(command))
|
||||
}
|
||||
checks.RequireExecutable(d.config.FirecrackerBin, "firecracker binary", hint)
|
||||
checks.RequireFile(image.RootfsPath, "rootfs image", "select a valid image or rebuild the runtime bundle")
|
||||
checks.RequireFile(image.KernelPath, "kernel image", `set "default_kernel" or refresh the runtime bundle`)
|
||||
if strings.TrimSpace(image.InitrdPath) != "" {
|
||||
checks.RequireFile(image.InitrdPath, "initrd image", `set "default_initrd" or refresh the runtime bundle`)
|
||||
}
|
||||
if !exists(vm.Runtime.WorkDiskPath) {
|
||||
for _, command := range []string{"mkfs.ext4", "mount", "umount", "cp"} {
|
||||
checks.RequireCommand(command, toolHint(command))
|
||||
}
|
||||
}
|
||||
if vm.Spec.NATEnabled {
|
||||
d.addNATPrereqs(ctx, checks)
|
||||
}
|
||||
d.addBaseStartPrereqs(checks, image)
|
||||
d.addCapabilityStartPrereqs(ctx, checks, vm, image)
|
||||
return checks.Err("vm start preflight failed")
|
||||
}
|
||||
|
||||
func (d *Daemon) validateImageBuildPrereqs(ctx context.Context, baseRootfs, kernelPath, initrdPath, modulesDir, sizeSpec string) error {
|
||||
checks := system.NewPreflight()
|
||||
hint := paths.RuntimeBundleHint()
|
||||
|
||||
for _, command := range []string{"sudo", "ip", "pgrep", "chown", "chmod", "kill"} {
|
||||
checks.RequireCommand(command, toolHint(command))
|
||||
}
|
||||
checks.RequireExecutable(d.config.FirecrackerBin, "firecracker binary", hint)
|
||||
checks.RequireFile(d.config.SSHKeyPath, "ssh private key", `set "ssh_key_path" or refresh the runtime bundle`)
|
||||
checks.RequireFile(baseRootfs, "base rootfs image", `pass --base-rootfs or set "default_base_rootfs"`)
|
||||
checks.RequireFile(kernelPath, "kernel image", `pass --kernel or set "default_kernel"`)
|
||||
checks.RequireFile(d.config.DefaultPackagesFile, "package manifest", `set "default_packages_file" or refresh the runtime bundle`)
|
||||
if strings.TrimSpace(initrdPath) != "" {
|
||||
checks.RequireFile(initrdPath, "initrd image", `pass --initrd or set "default_initrd"`)
|
||||
}
|
||||
if strings.TrimSpace(modulesDir) != "" {
|
||||
checks.RequireDir(modulesDir, "modules directory", `pass --modules or set "default_modules_dir"`)
|
||||
}
|
||||
if strings.TrimSpace(d.config.DefaultPackagesFile) != "" {
|
||||
if _, err := system.ReadNormalizedLines(d.config.DefaultPackagesFile); err != nil {
|
||||
checks.Addf("package manifest at %s is invalid: %v", d.config.DefaultPackagesFile, err)
|
||||
}
|
||||
}
|
||||
if strings.TrimSpace(sizeSpec) != "" {
|
||||
checks.RequireCommand("e2fsck", toolHint("e2fsck"))
|
||||
checks.RequireCommand("resize2fs", toolHint("resize2fs"))
|
||||
}
|
||||
d.addNATPrereqs(ctx, checks)
|
||||
d.addImageBuildPrereqs(ctx, checks, baseRootfs, kernelPath, initrdPath, modulesDir, sizeSpec)
|
||||
return checks.Err("image build preflight failed")
|
||||
}
|
||||
|
||||
|
|
@ -89,6 +47,53 @@ func (d *Daemon) addNATPrereqs(ctx context.Context, checks *system.Preflight) {
|
|||
}
|
||||
}
|
||||
|
||||
func (d *Daemon) addBaseStartPrereqs(checks *system.Preflight, image model.Image) {
|
||||
hint := paths.RuntimeBundleHint()
|
||||
|
||||
d.addBaseStartCommandPrereqs(checks)
|
||||
checks.RequireExecutable(d.config.FirecrackerBin, "firecracker binary", hint)
|
||||
checks.RequireFile(image.RootfsPath, "rootfs image", "select a valid image or rebuild the runtime bundle")
|
||||
checks.RequireFile(image.KernelPath, "kernel image", `set "default_kernel" or refresh the runtime bundle`)
|
||||
if strings.TrimSpace(image.InitrdPath) != "" {
|
||||
checks.RequireFile(image.InitrdPath, "initrd image", `set "default_initrd" or refresh the runtime bundle`)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Daemon) addBaseStartCommandPrereqs(checks *system.Preflight) {
|
||||
for _, command := range []string{"sudo", "ip", "dmsetup", "losetup", "blockdev", "truncate", "pgrep", "chown", "chmod", "kill", "e2cp", "e2rm", "debugfs"} {
|
||||
checks.RequireCommand(command, toolHint(command))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Daemon) addImageBuildPrereqs(ctx context.Context, checks *system.Preflight, baseRootfs, kernelPath, initrdPath, modulesDir, sizeSpec string) {
|
||||
hint := paths.RuntimeBundleHint()
|
||||
|
||||
for _, command := range []string{"sudo", "ip", "pgrep", "chown", "chmod", "kill"} {
|
||||
checks.RequireCommand(command, toolHint(command))
|
||||
}
|
||||
checks.RequireExecutable(d.config.FirecrackerBin, "firecracker binary", hint)
|
||||
checks.RequireFile(d.config.SSHKeyPath, "ssh private key", `set "ssh_key_path" or refresh the runtime bundle`)
|
||||
checks.RequireFile(baseRootfs, "base rootfs image", `pass --base-rootfs or set "default_base_rootfs"`)
|
||||
checks.RequireFile(kernelPath, "kernel image", `pass --kernel or set "default_kernel"`)
|
||||
checks.RequireFile(d.config.DefaultPackagesFile, "package manifest", `set "default_packages_file" or refresh the runtime bundle`)
|
||||
if strings.TrimSpace(initrdPath) != "" {
|
||||
checks.RequireFile(initrdPath, "initrd image", `pass --initrd or set "default_initrd"`)
|
||||
}
|
||||
if strings.TrimSpace(modulesDir) != "" {
|
||||
checks.RequireDir(modulesDir, "modules directory", `pass --modules or set "default_modules_dir"`)
|
||||
}
|
||||
if strings.TrimSpace(d.config.DefaultPackagesFile) != "" {
|
||||
if _, err := system.ReadNormalizedLines(d.config.DefaultPackagesFile); err != nil {
|
||||
checks.Addf("package manifest at %s is invalid: %v", d.config.DefaultPackagesFile, err)
|
||||
}
|
||||
}
|
||||
if strings.TrimSpace(sizeSpec) != "" {
|
||||
checks.RequireCommand("e2fsck", toolHint("e2fsck"))
|
||||
checks.RequireCommand("resize2fs", toolHint("resize2fs"))
|
||||
}
|
||||
d.addNATPrereqs(ctx, checks)
|
||||
}
|
||||
|
||||
func toolHint(command string) string {
|
||||
switch command {
|
||||
case "ip":
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ import (
|
|||
|
||||
"banger/internal/api"
|
||||
"banger/internal/firecracker"
|
||||
"banger/internal/guestconfig"
|
||||
"banger/internal/model"
|
||||
"banger/internal/paths"
|
||||
"banger/internal/system"
|
||||
|
|
@ -223,8 +224,8 @@ func (d *Daemon) startVMLocked(ctx context.Context, vm model.VMRecord, image mod
|
|||
if err := d.patchRootOverlay(ctx, vm, image); err != nil {
|
||||
return cleanupOnErr(err)
|
||||
}
|
||||
op.stage("work_disk", "work_disk_path", vm.Runtime.WorkDiskPath)
|
||||
if err := d.ensureWorkDisk(ctx, &vm); err != nil {
|
||||
op.stage("prepare_host_features")
|
||||
if err := d.prepareCapabilityHosts(ctx, &vm, image); err != nil {
|
||||
return cleanupOnErr(err)
|
||||
}
|
||||
op.stage("tap", "tap_device", tap)
|
||||
|
|
@ -243,7 +244,7 @@ func (d *Daemon) startVMLocked(ctx context.Context, vm model.VMRecord, image mod
|
|||
}
|
||||
op.stage("firecracker_launch", "log_path", vm.Runtime.LogPath, "metrics_path", vm.Runtime.MetricsPath)
|
||||
firecrackerCtx := context.Background()
|
||||
machine, err := firecracker.NewMachine(firecrackerCtx, firecracker.MachineConfig{
|
||||
machineConfig := firecracker.MachineConfig{
|
||||
BinaryPath: fcPath,
|
||||
VMID: vm.ID,
|
||||
SocketPath: apiSock,
|
||||
|
|
@ -252,13 +253,19 @@ func (d *Daemon) startVMLocked(ctx context.Context, vm model.VMRecord, image mod
|
|||
KernelImagePath: image.KernelPath,
|
||||
InitrdPath: image.InitrdPath,
|
||||
KernelArgs: system.BuildBootArgs(vm.Name, vm.Runtime.GuestIP, d.config.BridgeIP, d.config.DefaultDNS),
|
||||
RootDrivePath: vm.Runtime.DMDev,
|
||||
WorkDrivePath: vm.Runtime.WorkDiskPath,
|
||||
TapDevice: tap,
|
||||
VCPUCount: vm.Spec.VCPUCount,
|
||||
MemoryMiB: vm.Spec.MemoryMiB,
|
||||
Logger: d.logger,
|
||||
})
|
||||
Drives: []firecracker.DriveConfig{{
|
||||
ID: "rootfs",
|
||||
Path: vm.Runtime.DMDev,
|
||||
ReadOnly: false,
|
||||
IsRoot: true,
|
||||
}},
|
||||
TapDevice: tap,
|
||||
VCPUCount: vm.Spec.VCPUCount,
|
||||
MemoryMiB: vm.Spec.MemoryMiB,
|
||||
Logger: d.logger,
|
||||
}
|
||||
d.contributeMachineConfig(&machineConfig, vm, image)
|
||||
machine, err := firecracker.NewMachine(firecrackerCtx, machineConfig)
|
||||
if err != nil {
|
||||
return cleanupOnErr(err)
|
||||
}
|
||||
|
|
@ -272,16 +279,10 @@ func (d *Daemon) startVMLocked(ctx context.Context, vm model.VMRecord, image mod
|
|||
if err := d.ensureSocketAccess(ctx, apiSock); err != nil {
|
||||
return cleanupOnErr(err)
|
||||
}
|
||||
op.stage("dns", "dns_name", vm.Runtime.DNSName)
|
||||
if err := d.setDNS(ctx, vm.Name, vm.Runtime.GuestIP); err != nil {
|
||||
op.stage("post_start_features")
|
||||
if err := d.postStartCapabilities(ctx, vm, image); err != nil {
|
||||
return cleanupOnErr(err)
|
||||
}
|
||||
if vm.Spec.NATEnabled {
|
||||
op.stage("nat")
|
||||
if err := d.ensureNAT(ctx, vm, true); err != nil {
|
||||
return cleanupOnErr(err)
|
||||
}
|
||||
}
|
||||
system.TouchNow(&vm)
|
||||
op.stage("persist")
|
||||
if err := d.store.UpsertVM(ctx, vm); err != nil {
|
||||
|
|
@ -455,12 +456,6 @@ func (d *Daemon) deleteVMLocked(ctx context.Context, current model.VMRecord) (vm
|
|||
if err := d.cleanupRuntime(ctx, vm, false); err != nil {
|
||||
return model.VMRecord{}, err
|
||||
}
|
||||
if vm.Spec.NATEnabled {
|
||||
op.debugStage("disable_nat")
|
||||
_ = d.ensureNAT(ctx, vm, false)
|
||||
}
|
||||
op.debugStage("remove_dns", "dns_name", vm.Runtime.DNSName)
|
||||
_ = d.removeDNS(ctx, vm.Runtime.DNSName)
|
||||
op.stage("delete_store_record")
|
||||
if err := d.store.DeleteVM(ctx, vm.ID); err != nil {
|
||||
return model.VMRecord{}, err
|
||||
|
|
@ -538,10 +533,10 @@ func (d *Daemon) setVMLocked(ctx context.Context, current model.VMRecord, params
|
|||
if params.NATEnabled != nil {
|
||||
op.stage("update_nat", "nat_enabled", *params.NATEnabled)
|
||||
vm.Spec.NATEnabled = *params.NATEnabled
|
||||
if running {
|
||||
if err := d.ensureNAT(ctx, vm, *params.NATEnabled); err != nil {
|
||||
return model.VMRecord{}, err
|
||||
}
|
||||
}
|
||||
if running {
|
||||
if err := d.applyCapabilityConfigChanges(ctx, current, vm); err != nil {
|
||||
return model.VMRecord{}, err
|
||||
}
|
||||
}
|
||||
system.TouchNow(&vm)
|
||||
|
|
@ -684,14 +679,34 @@ func (d *Daemon) patchRootOverlay(ctx context.Context, vm model.VMRecord, image
|
|||
if err != nil {
|
||||
fstab = ""
|
||||
}
|
||||
newFSTab := system.UpdateFSTab(fstab)
|
||||
for guestPath, data := range map[string][]byte{
|
||||
"/etc/resolv.conf": resolv,
|
||||
"/etc/hostname": hostname,
|
||||
"/etc/hosts": hosts,
|
||||
"/etc/fstab": []byte(newFSTab),
|
||||
"/etc/ssh/sshd_config.d/99-banger.conf": sshdConfig,
|
||||
} {
|
||||
builder := guestconfig.NewBuilder()
|
||||
builder.WriteFile("/etc/resolv.conf", resolv)
|
||||
builder.WriteFile("/etc/hostname", hostname)
|
||||
builder.WriteFile("/etc/hosts", hosts)
|
||||
builder.WriteFile("/etc/ssh/sshd_config.d/99-banger.conf", sshdConfig)
|
||||
builder.DropMountTarget("/home")
|
||||
builder.DropMountTarget("/var")
|
||||
builder.AddMount(guestconfig.MountSpec{
|
||||
Source: "tmpfs",
|
||||
Target: "/run",
|
||||
FSType: "tmpfs",
|
||||
Options: []string{"defaults", "nodev", "nosuid", "mode=0755"},
|
||||
Dump: 0,
|
||||
Pass: 0,
|
||||
})
|
||||
builder.AddMount(guestconfig.MountSpec{
|
||||
Source: "tmpfs",
|
||||
Target: "/tmp",
|
||||
FSType: "tmpfs",
|
||||
Options: []string{"defaults", "nodev", "nosuid", "mode=1777"},
|
||||
Dump: 0,
|
||||
Pass: 0,
|
||||
})
|
||||
d.contributeGuestConfig(builder, vm, image)
|
||||
builder.WriteFile("/etc/fstab", []byte(builder.RenderFSTab(fstab)))
|
||||
files := builder.Files()
|
||||
for _, guestPath := range builder.FilePaths() {
|
||||
data := files[guestPath]
|
||||
if err := system.WriteExt4File(ctx, d.runner, vm.Runtime.DMDev, guestPath, data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -878,14 +893,11 @@ func (d *Daemon) cleanupRuntime(ctx context.Context, vm model.VMRecord, preserve
|
|||
DMName: vm.Runtime.DMName,
|
||||
DMDev: vm.Runtime.DMDev,
|
||||
})
|
||||
if vm.Spec.NATEnabled {
|
||||
_ = d.ensureNAT(ctx, vm, false)
|
||||
}
|
||||
_ = d.removeDNS(ctx, vm.Runtime.DNSName)
|
||||
featureErr := d.cleanupCapabilityState(ctx, vm)
|
||||
if !preserveDisks && vm.Runtime.VMDir != "" {
|
||||
return errors.Join(snapshotErr, os.RemoveAll(vm.Runtime.VMDir))
|
||||
return errors.Join(snapshotErr, featureErr, os.RemoveAll(vm.Runtime.VMDir))
|
||||
}
|
||||
return snapshotErr
|
||||
return errors.Join(snapshotErr, featureErr)
|
||||
}
|
||||
|
||||
func clearRuntimeHandles(vm *model.VMRecord) {
|
||||
|
|
|
|||
|
|
@ -23,14 +23,20 @@ type MachineConfig struct {
|
|||
KernelImagePath string
|
||||
InitrdPath string
|
||||
KernelArgs string
|
||||
RootDrivePath string
|
||||
WorkDrivePath string
|
||||
Drives []DriveConfig
|
||||
TapDevice string
|
||||
VCPUCount int
|
||||
MemoryMiB int
|
||||
Logger *slog.Logger
|
||||
}
|
||||
|
||||
type DriveConfig struct {
|
||||
ID string
|
||||
Path string
|
||||
ReadOnly bool
|
||||
IsRoot bool
|
||||
}
|
||||
|
||||
type Machine struct {
|
||||
machine *sdk.Machine
|
||||
logFile *os.File
|
||||
|
|
@ -102,10 +108,14 @@ func openLogFile(path string) (*os.File, error) {
|
|||
}
|
||||
|
||||
func buildConfig(cfg MachineConfig) sdk.Config {
|
||||
drivesBuilder := sdk.NewDrivesBuilder(cfg.RootDrivePath).
|
||||
WithRootDrive(cfg.RootDrivePath, sdk.WithDriveID("rootfs"), sdk.WithReadOnly(false))
|
||||
if strings.TrimSpace(cfg.WorkDrivePath) != "" {
|
||||
drivesBuilder = drivesBuilder.AddDrive(cfg.WorkDrivePath, false, sdk.WithDriveID("work"))
|
||||
rootDrive, extraDrives := splitDrives(cfg.Drives)
|
||||
drivesBuilder := sdk.NewDrivesBuilder(rootDrive.Path).
|
||||
WithRootDrive(rootDrive.Path, sdk.WithDriveID(defaultDriveID(rootDrive, "rootfs")), sdk.WithReadOnly(rootDrive.ReadOnly))
|
||||
for _, drive := range extraDrives {
|
||||
if strings.TrimSpace(drive.Path) == "" {
|
||||
continue
|
||||
}
|
||||
drivesBuilder = drivesBuilder.AddDrive(drive.Path, drive.ReadOnly, sdk.WithDriveID(defaultDriveID(drive, "drive")))
|
||||
}
|
||||
drives := drivesBuilder.Build()
|
||||
|
||||
|
|
@ -131,6 +141,32 @@ func buildConfig(cfg MachineConfig) sdk.Config {
|
|||
}
|
||||
}
|
||||
|
||||
func splitDrives(drives []DriveConfig) (DriveConfig, []DriveConfig) {
|
||||
root := DriveConfig{ID: "rootfs"}
|
||||
var extras []DriveConfig
|
||||
for _, drive := range drives {
|
||||
if strings.TrimSpace(drive.Path) == "" {
|
||||
continue
|
||||
}
|
||||
if drive.IsRoot {
|
||||
root = drive
|
||||
if root.ID == "" {
|
||||
root.ID = "rootfs"
|
||||
}
|
||||
continue
|
||||
}
|
||||
extras = append(extras, drive)
|
||||
}
|
||||
return root, extras
|
||||
}
|
||||
|
||||
func defaultDriveID(drive DriveConfig, fallback string) string {
|
||||
if strings.TrimSpace(drive.ID) != "" {
|
||||
return drive.ID
|
||||
}
|
||||
return fallback
|
||||
}
|
||||
|
||||
func buildProcessRunner(cfg MachineConfig, logFile *os.File) *exec.Cmd {
|
||||
script := "umask 000 && exec " + shellQuote(cfg.BinaryPath) +
|
||||
" --api-sock " + shellQuote(cfg.SocketPath) +
|
||||
|
|
|
|||
|
|
@ -16,11 +16,13 @@ func TestBuildConfig(t *testing.T) {
|
|||
KernelImagePath: "/kernel",
|
||||
InitrdPath: "/initrd",
|
||||
KernelArgs: "console=ttyS0",
|
||||
RootDrivePath: "/dev/mapper/root",
|
||||
WorkDrivePath: "/var/lib/banger/root.ext4",
|
||||
TapDevice: "tap-fc-1",
|
||||
VCPUCount: 4,
|
||||
MemoryMiB: 2048,
|
||||
Drives: []DriveConfig{
|
||||
{ID: "rootfs", Path: "/dev/mapper/root", IsRoot: true},
|
||||
{ID: "work", Path: "/var/lib/banger/root.ext4"},
|
||||
},
|
||||
TapDevice: "tap-fc-1",
|
||||
VCPUCount: 4,
|
||||
MemoryMiB: 2048,
|
||||
})
|
||||
|
||||
if cfg.SocketPath != "/tmp/fc.sock" {
|
||||
|
|
|
|||
159
internal/guestconfig/guestconfig.go
Normal file
159
internal/guestconfig/guestconfig.go
Normal file
|
|
@ -0,0 +1,159 @@
|
|||
package guestconfig
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type MountSpec struct {
|
||||
Source string
|
||||
Target string
|
||||
FSType string
|
||||
Options []string
|
||||
Dump int
|
||||
Pass int
|
||||
}
|
||||
|
||||
func (m MountSpec) String() string {
|
||||
options := strings.Join(compactStrings(m.Options), ",")
|
||||
if options == "" {
|
||||
options = "defaults"
|
||||
}
|
||||
return strings.Join([]string{
|
||||
m.Source,
|
||||
m.Target,
|
||||
m.FSType,
|
||||
options,
|
||||
strconv.Itoa(m.Dump),
|
||||
strconv.Itoa(m.Pass),
|
||||
}, " ")
|
||||
}
|
||||
|
||||
type Builder struct {
|
||||
files map[string][]byte
|
||||
dropTargets map[string]struct{}
|
||||
mounts map[string]MountSpec
|
||||
order []string
|
||||
}
|
||||
|
||||
func NewBuilder() *Builder {
|
||||
return &Builder{
|
||||
files: make(map[string][]byte),
|
||||
dropTargets: make(map[string]struct{}),
|
||||
mounts: make(map[string]MountSpec),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Builder) WriteFile(path string, data []byte) {
|
||||
if b.files == nil {
|
||||
b.files = make(map[string][]byte)
|
||||
}
|
||||
b.files[path] = append([]byte(nil), data...)
|
||||
}
|
||||
|
||||
func (b *Builder) DropMountTarget(target string) {
|
||||
target = strings.TrimSpace(target)
|
||||
if target == "" {
|
||||
return
|
||||
}
|
||||
if b.dropTargets == nil {
|
||||
b.dropTargets = make(map[string]struct{})
|
||||
}
|
||||
b.dropTargets[target] = struct{}{}
|
||||
}
|
||||
|
||||
func (b *Builder) AddMount(spec MountSpec) {
|
||||
spec.Source = strings.TrimSpace(spec.Source)
|
||||
spec.Target = strings.TrimSpace(spec.Target)
|
||||
spec.FSType = strings.TrimSpace(spec.FSType)
|
||||
spec.Options = compactStrings(spec.Options)
|
||||
if spec.Source == "" || spec.Target == "" || spec.FSType == "" {
|
||||
return
|
||||
}
|
||||
if b.mounts == nil {
|
||||
b.mounts = make(map[string]MountSpec)
|
||||
}
|
||||
if _, exists := b.mounts[spec.Target]; !exists {
|
||||
b.order = append(b.order, spec.Target)
|
||||
}
|
||||
b.mounts[spec.Target] = spec
|
||||
}
|
||||
|
||||
func (b *Builder) Files() map[string][]byte {
|
||||
if len(b.files) == 0 {
|
||||
return nil
|
||||
}
|
||||
keys := b.FilePaths()
|
||||
out := make(map[string][]byte, len(keys))
|
||||
for _, path := range keys {
|
||||
out[path] = append([]byte(nil), b.files[path]...)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (b *Builder) FilePaths() []string {
|
||||
keys := make([]string, 0, len(b.files))
|
||||
for path := range b.files {
|
||||
keys = append(keys, path)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
func (b *Builder) RenderFSTab(existing string) string {
|
||||
lines := strings.Split(existing, "\n")
|
||||
out := make([]string, 0, len(lines)+len(b.mounts))
|
||||
managedTargets := make(map[string]struct{}, len(b.mounts))
|
||||
for target := range b.mounts {
|
||||
managedTargets[target] = struct{}{}
|
||||
}
|
||||
for _, line := range lines {
|
||||
trimmed := strings.TrimSpace(line)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
target := mountTarget(trimmed)
|
||||
if target != "" {
|
||||
if _, drop := b.dropTargets[target]; drop {
|
||||
continue
|
||||
}
|
||||
if _, managed := managedTargets[target]; managed {
|
||||
continue
|
||||
}
|
||||
}
|
||||
out = append(out, line)
|
||||
}
|
||||
for _, target := range b.order {
|
||||
out = append(out, b.mounts[target].String())
|
||||
}
|
||||
return strings.Join(out, "\n") + "\n"
|
||||
}
|
||||
|
||||
func mountTarget(line string) string {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < 2 {
|
||||
return ""
|
||||
}
|
||||
return fields[1]
|
||||
}
|
||||
|
||||
func compactStrings(values []string) []string {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
out := make([]string, 0, len(values))
|
||||
seen := make(map[string]struct{}, len(values))
|
||||
for _, value := range values {
|
||||
value = strings.TrimSpace(value)
|
||||
if value == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[value]; ok {
|
||||
continue
|
||||
}
|
||||
seen[value] = struct{}{}
|
||||
out = append(out, value)
|
||||
}
|
||||
return out
|
||||
}
|
||||
71
internal/guestconfig/guestconfig_test.go
Normal file
71
internal/guestconfig/guestconfig_test.go
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
package guestconfig
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBuilderRenderFSTabReplacesManagedTargetsAndDropsLegacyMounts(t *testing.T) {
|
||||
builder := NewBuilder()
|
||||
builder.DropMountTarget("/home")
|
||||
builder.DropMountTarget("/var")
|
||||
builder.AddMount(MountSpec{
|
||||
Source: "/dev/vdb",
|
||||
Target: "/root",
|
||||
FSType: "ext4",
|
||||
Options: []string{"defaults"},
|
||||
Dump: 0,
|
||||
Pass: 2,
|
||||
})
|
||||
builder.AddMount(MountSpec{
|
||||
Source: "tmpfs",
|
||||
Target: "/run",
|
||||
FSType: "tmpfs",
|
||||
Options: []string{"defaults", "nodev", "nosuid", "mode=0755"},
|
||||
})
|
||||
builder.AddMount(MountSpec{
|
||||
Source: "tmpfs",
|
||||
Target: "/tmp",
|
||||
FSType: "tmpfs",
|
||||
Options: []string{"defaults", "nodev", "nosuid", "mode=1777"},
|
||||
})
|
||||
|
||||
input := strings.Join([]string{
|
||||
"/dev/vdb /home ext4 defaults 0 2",
|
||||
"/dev/vdc /var ext4 defaults 0 2",
|
||||
"/dev/vdb /root ext4 defaults 0 2",
|
||||
"tmpfs /run tmpfs defaults,nodev,nosuid,mode=0700 0 0",
|
||||
"",
|
||||
}, "\n")
|
||||
|
||||
got := builder.RenderFSTab(input)
|
||||
|
||||
if strings.Contains(got, "/home") {
|
||||
t.Fatalf("RenderFSTab() kept /home mount: %q", got)
|
||||
}
|
||||
if strings.Contains(got, "/var") {
|
||||
t.Fatalf("RenderFSTab() kept /var mount: %q", got)
|
||||
}
|
||||
if strings.Count(got, "/dev/vdb /root") != 1 {
|
||||
t.Fatalf("RenderFSTab() duplicated /root mount: %q", got)
|
||||
}
|
||||
if !strings.Contains(got, "tmpfs /run tmpfs defaults,nodev,nosuid,mode=0755 0 0") {
|
||||
t.Fatalf("RenderFSTab() missing rendered /run mount: %q", got)
|
||||
}
|
||||
if !strings.Contains(got, "tmpfs /tmp tmpfs defaults,nodev,nosuid,mode=1777 0 0") {
|
||||
t.Fatalf("RenderFSTab() missing rendered /tmp mount: %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderFilesReturnsCopies(t *testing.T) {
|
||||
builder := NewBuilder()
|
||||
builder.WriteFile("/etc/hostname", []byte("devbox\n"))
|
||||
|
||||
files := builder.Files()
|
||||
files["/etc/hostname"][0] = 'x'
|
||||
|
||||
again := builder.Files()
|
||||
if string(again["/etc/hostname"]) != "devbox\n" {
|
||||
t.Fatalf("Files() returned aliasing data: %q", string(again["/etc/hostname"]))
|
||||
}
|
||||
}
|
||||
|
|
@ -86,6 +86,15 @@ func (p *Preflight) Addf(format string, args ...any) {
|
|||
p.add(format, args...)
|
||||
}
|
||||
|
||||
func (p *Preflight) Problems() []string {
|
||||
if len(p.problems) == 0 {
|
||||
return nil
|
||||
}
|
||||
out := make([]string, len(p.problems))
|
||||
copy(out, p.problems)
|
||||
return out
|
||||
}
|
||||
|
||||
func (p *Preflight) Err(prefix string) error {
|
||||
if len(p.problems) == 0 {
|
||||
return nil
|
||||
|
|
|
|||
61
internal/system/report.go
Normal file
61
internal/system/report.go
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
package system
|
||||
|
||||
type CheckStatus string
|
||||
|
||||
const (
|
||||
CheckStatusPass CheckStatus = "pass"
|
||||
CheckStatusWarn CheckStatus = "warn"
|
||||
CheckStatusFail CheckStatus = "fail"
|
||||
)
|
||||
|
||||
type CheckResult struct {
|
||||
Name string
|
||||
Status CheckStatus
|
||||
Details []string
|
||||
}
|
||||
|
||||
type Report struct {
|
||||
Checks []CheckResult
|
||||
}
|
||||
|
||||
func (r *Report) Add(status CheckStatus, name string, details ...string) {
|
||||
r.Checks = append(r.Checks, CheckResult{
|
||||
Name: name,
|
||||
Status: status,
|
||||
Details: append([]string(nil), details...),
|
||||
})
|
||||
}
|
||||
|
||||
func (r *Report) AddPass(name string, details ...string) {
|
||||
r.Add(CheckStatusPass, name, details...)
|
||||
}
|
||||
|
||||
func (r *Report) AddWarn(name string, details ...string) {
|
||||
r.Add(CheckStatusWarn, name, details...)
|
||||
}
|
||||
|
||||
func (r *Report) AddFail(name string, details ...string) {
|
||||
r.Add(CheckStatusFail, name, details...)
|
||||
}
|
||||
|
||||
func (r *Report) AddPreflight(name string, checks *Preflight, successDetail string) {
|
||||
problems := checks.Problems()
|
||||
if len(problems) == 0 {
|
||||
if successDetail == "" {
|
||||
r.AddPass(name)
|
||||
return
|
||||
}
|
||||
r.AddPass(name, successDetail)
|
||||
return
|
||||
}
|
||||
r.AddFail(name, problems...)
|
||||
}
|
||||
|
||||
func (r Report) HasFailures() bool {
|
||||
for _, check := range r.Checks {
|
||||
if check.Status == CheckStatusFail {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
27
internal/system/report_test.go
Normal file
27
internal/system/report_test.go
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
package system
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReportAddPreflightPassAndFail(t *testing.T) {
|
||||
report := Report{}
|
||||
|
||||
pass := NewPreflight()
|
||||
report.AddPreflight("runtime bundle", pass, "ready")
|
||||
|
||||
fail := NewPreflight()
|
||||
fail.Addf("missing firecracker")
|
||||
report.AddPreflight("core vm lifecycle", fail, "")
|
||||
|
||||
if len(report.Checks) != 2 {
|
||||
t.Fatalf("len(report.Checks) = %d, want 2", len(report.Checks))
|
||||
}
|
||||
if report.Checks[0].Status != CheckStatusPass {
|
||||
t.Fatalf("report.Checks[0].Status = %s, want pass", report.Checks[0].Status)
|
||||
}
|
||||
if report.Checks[1].Status != CheckStatusFail {
|
||||
t.Fatalf("report.Checks[1].Status = %s, want fail", report.Checks[1].Status)
|
||||
}
|
||||
if !report.HasFailures() {
|
||||
t.Fatal("HasFailures() = false, want true")
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue