Phase 4 of the daemon god-struct refactor. VM lifecycle, create-op
registry, handle cache, disk provisioning, stats polling, ports
query, and the per-VM lock set all move off *Daemon onto *VMService.
Daemon keeps thin forwarders only for FindVM / TouchVM (dispatch
surface) and is otherwise out of VM lifecycle. Lazy-init via
d.vmSvc() mirrors the earlier services so test literals like
\`&Daemon{store: db, runner: r}\` still get a functional service
without spelling one out.
Three small cleanups along the way:
* preflight helpers (validateStartPrereqs / addBaseStartPrereqs
/ addBaseStartCommandPrereqs / validateWorkDiskResizePrereqs)
move with the VM methods that call them.
* cleanupRuntime / rebuildDNS move to *VMService, with
HostNetwork primitives (findFirecrackerPID, cleanupDMSnapshot,
killVMProcess, releaseTap, waitForExit, sendCtrlAltDel)
reached through s.net instead of the hostNet() facade.
* vsockAgentBinary becomes a package-level function so both
*Daemon (doctor) and *VMService (preflight) call one entry
point instead of each owning a forwarder method.
WorkspaceService's peer deps switch from eager method values to
closures — vmSvc() constructs VMService with WorkspaceService as a
peer, so resolving d.vmSvc().FindVM at construction time recursed
through workspaceSvc() → vmSvc(). Closures defer the lookup to call
time.
Pure code motion: build + unit tests green, lint clean. No RPC
surface or lock-ordering changes.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
171 lines
5 KiB
Go
171 lines
5 KiB
Go
package daemon
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"encoding/json"
|
|
"errors"
|
|
"fmt"
|
|
"os"
|
|
"path/filepath"
|
|
"strings"
|
|
"testing"
|
|
|
|
"banger/internal/model"
|
|
"banger/internal/paths"
|
|
)
|
|
|
|
func TestNewDaemonLoggerEmitsJSONAtConfiguredLevel(t *testing.T) {
|
|
var buf bytes.Buffer
|
|
logger, level, err := newDaemonLogger(&buf, "")
|
|
if err != nil {
|
|
t.Fatalf("newDaemonLogger: %v", err)
|
|
}
|
|
if level != "info" {
|
|
t.Fatalf("level = %q, want info", level)
|
|
}
|
|
|
|
logger.Debug("hidden debug")
|
|
logger.Info("visible info", "vm_name", "otter")
|
|
|
|
entries := parseLogEntries(t, buf.Bytes())
|
|
if len(entries) != 1 {
|
|
t.Fatalf("entry count = %d, want 1", len(entries))
|
|
}
|
|
if entries[0]["msg"] != "visible info" {
|
|
t.Fatalf("msg = %v, want visible info", entries[0]["msg"])
|
|
}
|
|
if entries[0]["vm_name"] != "otter" {
|
|
t.Fatalf("vm_name = %v, want otter", entries[0]["vm_name"])
|
|
}
|
|
}
|
|
|
|
func TestStartVMLockedLogsBridgeFailure(t *testing.T) {
|
|
ctx := context.Background()
|
|
origVsockHostDevicePath := vsockHostDevicePath
|
|
vsockHostDevicePath = filepath.Join(t.TempDir(), "vhost-vsock")
|
|
t.Cleanup(func() {
|
|
vsockHostDevicePath = origVsockHostDevicePath
|
|
})
|
|
binDir := t.TempDir()
|
|
for _, name := range []string{
|
|
"sudo", "ip", "dmsetup", "losetup", "blockdev", "truncate", "pgrep", "ps",
|
|
"chown", "chmod", "kill", "e2cp", "e2rm", "debugfs", "mkfs.ext4", "mount",
|
|
"umount", "cp",
|
|
} {
|
|
writeFakeExecutable(t, filepath.Join(binDir, name))
|
|
}
|
|
t.Setenv("PATH", binDir)
|
|
|
|
firecrackerBin := filepath.Join(t.TempDir(), "firecracker")
|
|
vsockHelper := filepath.Join(t.TempDir(), "banger-vsock-agent")
|
|
if err := os.WriteFile(firecrackerBin, []byte("#!/bin/sh\nexit 0\n"), 0o755); err != nil {
|
|
t.Fatalf("write firecracker: %v", err)
|
|
}
|
|
if err := os.WriteFile(vsockHostDevicePath, []byte{}, 0o644); err != nil {
|
|
t.Fatalf("write vsock host device: %v", err)
|
|
}
|
|
if err := os.WriteFile(vsockHelper, []byte("#!/bin/sh\nexit 0\n"), 0o755); err != nil {
|
|
t.Fatalf("write vsock helper: %v", err)
|
|
}
|
|
t.Setenv("BANGER_VSOCK_AGENT_BIN", vsockHelper)
|
|
rootfsPath := filepath.Join(t.TempDir(), "rootfs.ext4")
|
|
kernelPath := filepath.Join(t.TempDir(), "vmlinux")
|
|
for _, path := range []string{rootfsPath, kernelPath} {
|
|
if err := os.WriteFile(path, []byte("artifact"), 0o644); err != nil {
|
|
t.Fatalf("write %s: %v", path, err)
|
|
}
|
|
}
|
|
|
|
runner := &scriptedRunner{
|
|
t: t,
|
|
steps: []runnerStep{
|
|
{call: runnerCall{name: "ip", args: []string{"link", "show", "br-fc"}}, out: []byte("1: br-fc\n")},
|
|
sudoStep("", errors.New("bridge up failed"), "ip", "link", "set", "br-fc", "up"),
|
|
},
|
|
}
|
|
var buf bytes.Buffer
|
|
logger, _, err := newDaemonLogger(&buf, "info")
|
|
if err != nil {
|
|
t.Fatalf("newDaemonLogger: %v", err)
|
|
}
|
|
|
|
vmDir := filepath.Join(t.TempDir(), "vm")
|
|
vm := testVM("loggy", "image-loggy", "172.16.0.50")
|
|
vm.Runtime.DNSName = ""
|
|
vm.Runtime.VMDir = vmDir
|
|
vm.Runtime.SystemOverlay = filepath.Join(vmDir, "system.cow")
|
|
vm.Runtime.WorkDiskPath = filepath.Join(vmDir, "root.ext4")
|
|
vm.Runtime.LogPath = filepath.Join(vmDir, "firecracker.log")
|
|
vm.Runtime.MetricsPath = filepath.Join(vmDir, "metrics.json")
|
|
image := testImage("image-loggy")
|
|
image.RootfsPath = rootfsPath
|
|
image.KernelPath = kernelPath
|
|
|
|
d := &Daemon{
|
|
layout: paths.Layout{RuntimeDir: filepath.Join(t.TempDir(), "runtime")},
|
|
config: model.DaemonConfig{
|
|
BridgeName: "br-fc",
|
|
BridgeIP: model.DefaultBridgeIP,
|
|
DefaultDNS: model.DefaultDNS,
|
|
FirecrackerBin: firecrackerBin,
|
|
StatsPollInterval: model.DefaultStatsPollInterval,
|
|
},
|
|
runner: runner,
|
|
logger: logger,
|
|
}
|
|
|
|
_, err = d.vmSvc().startVMLocked(ctx, vm, image)
|
|
if err == nil || !strings.Contains(err.Error(), "bridge up failed") {
|
|
t.Fatalf("startVMLocked() error = %v, want bridge failure", err)
|
|
}
|
|
runner.assertExhausted()
|
|
|
|
entries := parseLogEntries(t, buf.Bytes())
|
|
if !hasLogEntry(entries, map[string]string{"msg": "operation stage", "operation": "vm.start", "stage": "bridge", "vm_name": "loggy"}) {
|
|
t.Fatalf("expected bridge stage log, got %v", entries)
|
|
}
|
|
if !hasLogEntry(entries, map[string]string{"msg": "operation failed", "operation": "vm.start", "vm_name": "loggy", "error": "bridge up failed"}) {
|
|
t.Fatalf("expected operation failure log, got %v", entries)
|
|
}
|
|
}
|
|
|
|
func parseLogEntries(t *testing.T, data []byte) []map[string]any {
|
|
t.Helper()
|
|
lines := bytes.Split(bytes.TrimSpace(data), []byte("\n"))
|
|
if len(lines) == 1 && len(lines[0]) == 0 {
|
|
return nil
|
|
}
|
|
entries := make([]map[string]any, 0, len(lines))
|
|
for _, line := range lines {
|
|
if len(bytes.TrimSpace(line)) == 0 {
|
|
continue
|
|
}
|
|
var entry map[string]any
|
|
if err := json.Unmarshal(line, &entry); err != nil {
|
|
t.Fatalf("unmarshal log line %q: %v", string(line), err)
|
|
}
|
|
entries = append(entries, entry)
|
|
}
|
|
return entries
|
|
}
|
|
|
|
func hasLogEntry(entries []map[string]any, want map[string]string) bool {
|
|
for _, entry := range entries {
|
|
match := true
|
|
for key, value := range want {
|
|
if !strings.Contains(stringValue(entry[key]), value) {
|
|
match = false
|
|
break
|
|
}
|
|
}
|
|
if match {
|
|
return true
|
|
}
|
|
}
|
|
return false
|
|
}
|
|
|
|
func stringValue(value any) string {
|
|
return fmt.Sprint(value)
|
|
}
|