banger/internal/cli/cli_test.go
Thales Maciel 4930d82cb9
Refactor VM lifecycle around capabilities
Make host-integrated VM features fit a standard Go extension path instead of adding more one-off branches through vm.go. This is the enabling refactor for future work like shared mounts, not the /work feature itself.

Add a daemon capability pipeline plus a structured guest-config builder, then move the existing /root work-disk mount, built-in DNS, and NAT wiring onto those hooks. Generalize Firecracker drive config at the same time so later storage features can extend machine setup without another hardcoded path.

Add banger doctor on top of the shared readiness checks, update the docs to describe the new architecture, and cover the new seams with guest-config, capability, report, CLI, and full go test verification. Also verify make build and a real ./banger doctor run on the host.
2026-03-18 19:28:26 -03:00

463 lines
13 KiB
Go

package cli
import (
"bytes"
"context"
"errors"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
"time"
"banger/internal/api"
"banger/internal/model"
"banger/internal/system"
)
func TestNewBangerCommandHasExpectedSubcommands(t *testing.T) {
cmd := NewBangerCommand()
names := []string{}
for _, sub := range cmd.Commands() {
names = append(names, sub.Name())
}
want := []string{"daemon", "doctor", "image", "internal", "tui", "vm"}
if !reflect.DeepEqual(names, want) {
t.Fatalf("subcommands = %v, want %v", names, want)
}
}
func TestDoctorCommandPrintsReportAndFailsOnHardFailures(t *testing.T) {
original := doctorFunc
t.Cleanup(func() {
doctorFunc = original
})
doctorFunc = func(context.Context) (system.Report, error) {
return system.Report{
Checks: []system.CheckResult{
{Name: "runtime bundle", Status: system.CheckStatusPass, Details: []string{"runtime dir /tmp/runtime"}},
{Name: "feature nat", Status: system.CheckStatusFail, Details: []string{"missing iptables"}},
},
}, nil
}
cmd := NewBangerCommand()
var stdout bytes.Buffer
cmd.SetOut(&stdout)
cmd.SetErr(&stdout)
cmd.SetArgs([]string{"doctor"})
err := cmd.Execute()
if err == nil || !strings.Contains(err.Error(), "doctor found failing checks") {
t.Fatalf("Execute() error = %v, want doctor failure", err)
}
output := stdout.String()
if !strings.Contains(output, "PASS\truntime bundle") {
t.Fatalf("output = %q, want runtime bundle pass", output)
}
if !strings.Contains(output, "FAIL\tfeature nat") {
t.Fatalf("output = %q, want feature nat fail", output)
}
}
func TestDoctorCommandReturnsUnderlyingError(t *testing.T) {
original := doctorFunc
t.Cleanup(func() {
doctorFunc = original
})
doctorFunc = func(context.Context) (system.Report, error) {
return system.Report{}, errors.New("load failed")
}
cmd := NewBangerCommand()
cmd.SetArgs([]string{"doctor"})
err := cmd.Execute()
if err == nil || !strings.Contains(err.Error(), "load failed") {
t.Fatalf("Execute() error = %v, want load failed", err)
}
}
func TestInternalNATFlagsExist(t *testing.T) {
root := NewBangerCommand()
internal, _, err := root.Find([]string{"internal"})
if err != nil {
t.Fatalf("find internal: %v", err)
}
nat, _, err := internal.Find([]string{"nat"})
if err != nil {
t.Fatalf("find nat: %v", err)
}
up, _, err := nat.Find([]string{"up"})
if err != nil {
t.Fatalf("find nat up: %v", err)
}
for _, flagName := range []string{"guest-ip", "tap"} {
if up.Flags().Lookup(flagName) == nil {
t.Fatalf("missing flag %q", flagName)
}
}
}
func TestVMCreateFlagsExist(t *testing.T) {
root := NewBangerCommand()
vm, _, err := root.Find([]string{"vm"})
if err != nil {
t.Fatalf("find vm: %v", err)
}
create, _, err := vm.Find([]string{"create"})
if err != nil {
t.Fatalf("find create: %v", err)
}
for _, flagName := range []string{"name", "image", "vcpu", "memory", "system-overlay-size", "disk-size", "nat", "no-start"} {
if create.Flags().Lookup(flagName) == nil {
t.Fatalf("missing flag %q", flagName)
}
}
}
func TestVMKillFlagsExist(t *testing.T) {
root := NewBangerCommand()
vm, _, err := root.Find([]string{"vm"})
if err != nil {
t.Fatalf("find vm: %v", err)
}
kill, _, err := vm.Find([]string{"kill"})
if err != nil {
t.Fatalf("find kill: %v", err)
}
if kill.Flags().Lookup("signal") == nil {
t.Fatal("missing signal flag")
}
}
func TestVMSetParamsFromFlags(t *testing.T) {
params, err := vmSetParamsFromFlags("devbox", 4, 2048, "16G", true, false)
if err != nil {
t.Fatalf("vmSetParamsFromFlags: %v", err)
}
if params.IDOrName != "devbox" || params.VCPUCount == nil || *params.VCPUCount != 4 {
t.Fatalf("unexpected params: %+v", params)
}
if params.MemoryMiB == nil || *params.MemoryMiB != 2048 {
t.Fatalf("unexpected memory: %+v", params)
}
if params.WorkDiskSize != "16G" {
t.Fatalf("unexpected disk size: %+v", params)
}
if params.NATEnabled == nil || !*params.NATEnabled {
t.Fatalf("unexpected nat value: %+v", params)
}
}
func TestVMCreateParamsFromFlagsUsesNilForOmittedCPUAndMemory(t *testing.T) {
cmd := NewBangerCommand()
vm, _, err := cmd.Find([]string{"vm"})
if err != nil {
t.Fatalf("find vm: %v", err)
}
create, _, err := vm.Find([]string{"create"})
if err != nil {
t.Fatalf("find create: %v", err)
}
params, err := vmCreateParamsFromFlags(create, "devbox", "default", 0, 0, "8G", "16G", false, false)
if err != nil {
t.Fatalf("vmCreateParamsFromFlags: %v", err)
}
if params.VCPUCount != nil || params.MemoryMiB != nil {
t.Fatalf("expected omitted cpu/memory to stay nil: %+v", params)
}
}
func TestVMCreateParamsFromFlagsRejectsNonPositiveCPUAndMemory(t *testing.T) {
cmd := NewBangerCommand()
vm, _, err := cmd.Find([]string{"vm"})
if err != nil {
t.Fatalf("find vm: %v", err)
}
create, _, err := vm.Find([]string{"create"})
if err != nil {
t.Fatalf("find create: %v", err)
}
if err := create.Flags().Set("vcpu", "0"); err != nil {
t.Fatalf("set vcpu flag: %v", err)
}
if _, err := vmCreateParamsFromFlags(create, "devbox", "default", 0, 0, "", "", false, false); err == nil || !strings.Contains(err.Error(), "vcpu must be a positive integer") {
t.Fatalf("vmCreateParamsFromFlags(vcpu=0) error = %v", err)
}
if err := create.Flags().Set("memory", "-1"); err != nil {
t.Fatalf("set memory flag: %v", err)
}
if _, err := vmCreateParamsFromFlags(create, "devbox", "default", 1, -1, "", "", false, false); err == nil || !strings.Contains(err.Error(), "memory must be a positive integer") {
t.Fatalf("vmCreateParamsFromFlags(memory=-1) error = %v", err)
}
}
func TestVMSetParamsFromFlagsConflict(t *testing.T) {
if _, err := vmSetParamsFromFlags("devbox", -1, -1, "", true, true); err == nil {
t.Fatal("expected nat conflict error")
}
}
func TestVMSetParamsFromFlagsRejectsNonPositiveCPUAndMemory(t *testing.T) {
if _, err := vmSetParamsFromFlags("devbox", 0, -1, "", false, false); err == nil || !strings.Contains(err.Error(), "vcpu must be a positive integer") {
t.Fatalf("vmSetParamsFromFlags(vcpu=0) error = %v", err)
}
if _, err := vmSetParamsFromFlags("devbox", -1, 0, "", false, false); err == nil || !strings.Contains(err.Error(), "memory must be a positive integer") {
t.Fatalf("vmSetParamsFromFlags(memory=0) error = %v", err)
}
}
func TestResolveVMTargetsDeduplicatesAndReportsErrors(t *testing.T) {
vms := []model.VMRecord{
testCLIResolvedVM("alpha-id", "alpha"),
testCLIResolvedVM("alpine-id", "alpine"),
testCLIResolvedVM("bravo-id", "bravo"),
}
targets, errs := resolveVMTargets(vms, []string{"alpha", "alpha-id", "al", "missing", "br"})
if len(targets) != 2 {
t.Fatalf("len(targets) = %d, want 2", len(targets))
}
if targets[0].VM.ID != "alpha-id" || targets[0].Ref != "alpha" {
t.Fatalf("targets[0] = %+v, want alpha target", targets[0])
}
if targets[1].VM.ID != "bravo-id" || targets[1].Ref != "br" {
t.Fatalf("targets[1] = %+v, want bravo target", targets[1])
}
if len(errs) != 2 {
t.Fatalf("len(errs) = %d, want 2", len(errs))
}
if errs[0].Ref != "al" || !strings.Contains(errs[0].Err.Error(), "multiple VMs match") {
t.Fatalf("errs[0] = %+v, want ambiguous prefix", errs[0])
}
if errs[1].Ref != "missing" || !strings.Contains(errs[1].Err.Error(), `vm "missing" not found`) {
t.Fatalf("errs[1] = %+v, want missing vm", errs[1])
}
}
func TestResolveVMRefPrefersExactMatchBeforePrefix(t *testing.T) {
vms := []model.VMRecord{
testCLIResolvedVM("1111111111111111111111111111111111111111111111111111111111111111", "alpha"),
testCLIResolvedVM("alpha222222222222222222222222222222222222222222222222222222222222", "bravo"),
}
vm, err := resolveVMRef(vms, "alpha")
if err != nil {
t.Fatalf("resolveVMRef(alpha): %v", err)
}
if vm.Name != "alpha" {
t.Fatalf("resolveVMRef(alpha) = %+v, want exact-name vm", vm)
}
}
func TestExecuteVMActionBatchRunsConcurrentlyAndPreservesOrder(t *testing.T) {
targets := []resolvedVMTarget{
{Ref: "alpha", VM: testCLIResolvedVM("alpha-id", "alpha")},
{Ref: "bravo", VM: testCLIResolvedVM("bravo-id", "bravo")},
}
started := make(chan string, len(targets))
release := make(chan struct{})
done := make(chan []vmBatchActionResult, 1)
go func() {
done <- executeVMActionBatch(context.Background(), targets, func(ctx context.Context, id string) (model.VMRecord, error) {
started <- id
<-release
return model.VMRecord{ID: id, Name: id}, nil
})
}()
for range targets {
select {
case <-started:
case <-time.After(500 * time.Millisecond):
t.Fatal("batch actions did not overlap")
}
}
close(release)
var results []vmBatchActionResult
select {
case results = <-done:
case <-time.After(500 * time.Millisecond):
t.Fatal("executeVMActionBatch did not finish")
}
if len(results) != len(targets) {
t.Fatalf("len(results) = %d, want %d", len(results), len(targets))
}
for index, result := range results {
if result.Target.Ref != targets[index].Ref {
t.Fatalf("results[%d].Target.Ref = %q, want %q", index, result.Target.Ref, targets[index].Ref)
}
if result.VM.ID != targets[index].VM.ID {
t.Fatalf("results[%d].VM.ID = %q, want %q", index, result.VM.ID, targets[index].VM.ID)
}
}
}
func TestSSHCommandArgs(t *testing.T) {
args, err := sshCommandArgs(model.DaemonConfig{SSHKeyPath: "/bundle/id_ed25519"}, "172.16.0.2", []string{"--", "uname", "-a"})
if err != nil {
t.Fatalf("sshCommandArgs: %v", err)
}
want := []string{
"-i", "/bundle/id_ed25519",
"-o", "StrictHostKeyChecking=no",
"-o", "UserKnownHostsFile=/dev/null",
"root@172.16.0.2",
"--", "uname", "-a",
}
if !reflect.DeepEqual(args, want) {
t.Fatalf("args = %v, want %v", args, want)
}
}
func TestValidateSSHPrereqs(t *testing.T) {
dir := t.TempDir()
keyPath := filepath.Join(dir, "id_ed25519")
if err := os.WriteFile(keyPath, []byte("key"), 0o600); err != nil {
t.Fatalf("write key: %v", err)
}
if err := validateSSHPrereqs(model.DaemonConfig{SSHKeyPath: keyPath}); err != nil {
t.Fatalf("validateSSHPrereqs: %v", err)
}
}
func TestValidateSSHPrereqsFailsForMissingKey(t *testing.T) {
err := validateSSHPrereqs(model.DaemonConfig{SSHKeyPath: "/does/not/exist"})
if err == nil || !strings.Contains(err.Error(), "ssh private key") {
t.Fatalf("validateSSHPrereqs() error = %v, want missing key", err)
}
}
func TestNewBangerdCommandRejectsArgs(t *testing.T) {
cmd := NewBangerdCommand()
cmd.SetArgs([]string{"extra"})
if err := cmd.Execute(); err == nil {
t.Fatal("expected extra args to be rejected")
}
}
func TestDaemonOutdated(t *testing.T) {
dir := t.TempDir()
current := filepath.Join(dir, "bangerd-current")
same := filepath.Join(dir, "bangerd-same")
stale := filepath.Join(dir, "bangerd-stale")
if err := os.WriteFile(current, []byte("current"), 0o755); err != nil {
t.Fatalf("write current: %v", err)
}
if err := os.Link(current, same); err != nil {
t.Fatalf("hard link: %v", err)
}
if err := os.WriteFile(stale, []byte("stale"), 0o755); err != nil {
t.Fatalf("write stale: %v", err)
}
origBangerdPath := bangerdPathFunc
origDaemonExePath := daemonExePath
t.Cleanup(func() {
bangerdPathFunc = origBangerdPath
daemonExePath = origDaemonExePath
})
bangerdPathFunc = func() (string, error) {
return current, nil
}
daemonExePath = func(pid int) string {
if pid == 1 {
return same
}
return stale
}
if daemonOutdated(1) {
t.Fatal("expected matching daemon executable to be current")
}
if !daemonOutdated(2) {
t.Fatal("expected replaced daemon executable to be outdated")
}
}
func TestDaemonStatusIncludesLogPathWhenStopped(t *testing.T) {
configHome := filepath.Join(t.TempDir(), "config")
stateHome := filepath.Join(t.TempDir(), "state")
runtimeHome := filepath.Join(t.TempDir(), "runtime")
t.Setenv("XDG_CONFIG_HOME", configHome)
t.Setenv("XDG_STATE_HOME", stateHome)
t.Setenv("XDG_RUNTIME_DIR", runtimeHome)
cmd := NewBangerCommand()
var stdout bytes.Buffer
cmd.SetOut(&stdout)
cmd.SetErr(&stdout)
cmd.SetArgs([]string{"daemon", "status"})
if err := cmd.Execute(); err != nil {
t.Fatalf("Execute: %v", err)
}
output := stdout.String()
if !strings.Contains(output, "stopped\n") {
t.Fatalf("output = %q, want stopped status", output)
}
if !strings.Contains(output, "log: "+filepath.Join(stateHome, "banger", "bangerd.log")) {
t.Fatalf("output = %q, want daemon log path", output)
}
if !strings.Contains(output, "dns: 127.0.0.1:42069") {
t.Fatalf("output = %q, want dns listener", output)
}
}
func TestBuildDaemonCommandIsDetachedFromCallerContext(t *testing.T) {
cmd := buildDaemonCommand("/tmp/bangerd")
if cmd.Path != "/tmp/bangerd" {
t.Fatalf("command path = %q", cmd.Path)
}
if cmd.Cancel != nil {
t.Fatal("daemon process should not be tied to a CLI request context")
}
}
func TestAbsolutizeImageBuildPaths(t *testing.T) {
dir := t.TempDir()
prev, err := os.Getwd()
if err != nil {
t.Fatalf("getwd: %v", err)
}
if err := os.Chdir(dir); err != nil {
t.Fatalf("chdir: %v", err)
}
t.Cleanup(func() {
_ = os.Chdir(prev)
})
params := api.ImageBuildParams{
BaseRootfs: "images/base.ext4",
KernelPath: "/kernel",
InitrdPath: "boot/initrd.img",
ModulesDir: "modules",
}
if err := absolutizeImageBuildPaths(&params); err != nil {
t.Fatalf("absolutizeImageBuildPaths: %v", err)
}
want := api.ImageBuildParams{
BaseRootfs: filepath.Join(dir, "images/base.ext4"),
KernelPath: "/kernel",
InitrdPath: filepath.Join(dir, "boot/initrd.img"),
ModulesDir: filepath.Join(dir, "modules"),
}
if !reflect.DeepEqual(params, want) {
t.Fatalf("params = %+v, want %+v", params, want)
}
}
func testCLIResolvedVM(id, name string) model.VMRecord {
return model.VMRecord{ID: id, Name: name}
}