banger/internal/daemon/logger_test.go
Thales Maciel 16702bd5e1
daemon split (6/n): extract wireServices + drop lazy service getters
Factor the service + capability wiring out of Daemon.Open() into
wireServices(d), an idempotent helper that constructs HostNetwork,
ImageService, WorkspaceService, and VMService from whatever
infrastructure (runner, store, config, layout, logger, closing) is
already set on d. Open() calls it once after filling the composition
root; tests that build &Daemon{...} literals call it to get a working
service graph, preinstalling stubs on the fields they want to fake.

Drops the four lazy-init getters on *Daemon — d.hostNet(),
d.imageSvc(), d.workspaceSvc(), d.vmSvc() — whose sole purpose was
keeping test literals working. Every production call site now reads
d.net / d.img / d.ws / d.vm directly; the services are guaranteed
non-nil once Open returns. No behavior change.

Mechanical: all existing `d.xxxSvc()` calls (production + tests)
rewritten to field access; each `d := &Daemon{...}` in tests gets a
trailing wireServices(d) so the literal + wiring are side-by-side.
Tests that override a pre-built service (e.g. d.img = &ImageService{
bundleFetch: stub}) now set the override before wireServices so the
replacement propagates into VMService's peer pointer.

Also nil-guards HostNetwork.stopVMDNS and d.store in Close() so
partially-initialised daemons (pre-reconcile open failure) still
tear down cleanly — same contract the old lazy getters provided.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-21 15:55:28 -03:00

172 lines
5 KiB
Go

package daemon
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"banger/internal/model"
"banger/internal/paths"
)
func TestNewDaemonLoggerEmitsJSONAtConfiguredLevel(t *testing.T) {
var buf bytes.Buffer
logger, level, err := newDaemonLogger(&buf, "")
if err != nil {
t.Fatalf("newDaemonLogger: %v", err)
}
if level != "info" {
t.Fatalf("level = %q, want info", level)
}
logger.Debug("hidden debug")
logger.Info("visible info", "vm_name", "otter")
entries := parseLogEntries(t, buf.Bytes())
if len(entries) != 1 {
t.Fatalf("entry count = %d, want 1", len(entries))
}
if entries[0]["msg"] != "visible info" {
t.Fatalf("msg = %v, want visible info", entries[0]["msg"])
}
if entries[0]["vm_name"] != "otter" {
t.Fatalf("vm_name = %v, want otter", entries[0]["vm_name"])
}
}
func TestStartVMLockedLogsBridgeFailure(t *testing.T) {
ctx := context.Background()
origVsockHostDevicePath := vsockHostDevicePath
vsockHostDevicePath = filepath.Join(t.TempDir(), "vhost-vsock")
t.Cleanup(func() {
vsockHostDevicePath = origVsockHostDevicePath
})
binDir := t.TempDir()
for _, name := range []string{
"sudo", "ip", "dmsetup", "losetup", "blockdev", "truncate", "pgrep", "ps",
"chown", "chmod", "kill", "e2cp", "e2rm", "debugfs", "mkfs.ext4", "mount",
"umount", "cp",
} {
writeFakeExecutable(t, filepath.Join(binDir, name))
}
t.Setenv("PATH", binDir)
firecrackerBin := filepath.Join(t.TempDir(), "firecracker")
vsockHelper := filepath.Join(t.TempDir(), "banger-vsock-agent")
if err := os.WriteFile(firecrackerBin, []byte("#!/bin/sh\nexit 0\n"), 0o755); err != nil {
t.Fatalf("write firecracker: %v", err)
}
if err := os.WriteFile(vsockHostDevicePath, []byte{}, 0o644); err != nil {
t.Fatalf("write vsock host device: %v", err)
}
if err := os.WriteFile(vsockHelper, []byte("#!/bin/sh\nexit 0\n"), 0o755); err != nil {
t.Fatalf("write vsock helper: %v", err)
}
t.Setenv("BANGER_VSOCK_AGENT_BIN", vsockHelper)
rootfsPath := filepath.Join(t.TempDir(), "rootfs.ext4")
kernelPath := filepath.Join(t.TempDir(), "vmlinux")
for _, path := range []string{rootfsPath, kernelPath} {
if err := os.WriteFile(path, []byte("artifact"), 0o644); err != nil {
t.Fatalf("write %s: %v", path, err)
}
}
runner := &scriptedRunner{
t: t,
steps: []runnerStep{
{call: runnerCall{name: "ip", args: []string{"link", "show", "br-fc"}}, out: []byte("1: br-fc\n")},
sudoStep("", errors.New("bridge up failed"), "ip", "link", "set", "br-fc", "up"),
},
}
var buf bytes.Buffer
logger, _, err := newDaemonLogger(&buf, "info")
if err != nil {
t.Fatalf("newDaemonLogger: %v", err)
}
vmDir := filepath.Join(t.TempDir(), "vm")
vm := testVM("loggy", "image-loggy", "172.16.0.50")
vm.Runtime.DNSName = ""
vm.Runtime.VMDir = vmDir
vm.Runtime.SystemOverlay = filepath.Join(vmDir, "system.cow")
vm.Runtime.WorkDiskPath = filepath.Join(vmDir, "root.ext4")
vm.Runtime.LogPath = filepath.Join(vmDir, "firecracker.log")
vm.Runtime.MetricsPath = filepath.Join(vmDir, "metrics.json")
image := testImage("image-loggy")
image.RootfsPath = rootfsPath
image.KernelPath = kernelPath
d := &Daemon{
layout: paths.Layout{RuntimeDir: filepath.Join(t.TempDir(), "runtime")},
config: model.DaemonConfig{
BridgeName: "br-fc",
BridgeIP: model.DefaultBridgeIP,
DefaultDNS: model.DefaultDNS,
FirecrackerBin: firecrackerBin,
StatsPollInterval: model.DefaultStatsPollInterval,
},
runner: runner,
logger: logger,
}
wireServices(d)
_, err = d.vm.startVMLocked(ctx, vm, image)
if err == nil || !strings.Contains(err.Error(), "bridge up failed") {
t.Fatalf("startVMLocked() error = %v, want bridge failure", err)
}
runner.assertExhausted()
entries := parseLogEntries(t, buf.Bytes())
if !hasLogEntry(entries, map[string]string{"msg": "operation stage", "operation": "vm.start", "stage": "bridge", "vm_name": "loggy"}) {
t.Fatalf("expected bridge stage log, got %v", entries)
}
if !hasLogEntry(entries, map[string]string{"msg": "operation failed", "operation": "vm.start", "vm_name": "loggy", "error": "bridge up failed"}) {
t.Fatalf("expected operation failure log, got %v", entries)
}
}
func parseLogEntries(t *testing.T, data []byte) []map[string]any {
t.Helper()
lines := bytes.Split(bytes.TrimSpace(data), []byte("\n"))
if len(lines) == 1 && len(lines[0]) == 0 {
return nil
}
entries := make([]map[string]any, 0, len(lines))
for _, line := range lines {
if len(bytes.TrimSpace(line)) == 0 {
continue
}
var entry map[string]any
if err := json.Unmarshal(line, &entry); err != nil {
t.Fatalf("unmarshal log line %q: %v", string(line), err)
}
entries = append(entries, entry)
}
return entries
}
func hasLogEntry(entries []map[string]any, want map[string]string) bool {
for _, entry := range entries {
match := true
for key, value := range want {
if !strings.Contains(stringValue(entry[key]), value) {
match = false
break
}
}
if match {
return true
}
}
return false
}
func stringValue(value any) string {
return fmt.Sprint(value)
}