banger/internal/cli/tui_test.go
Thales Maciel 3ed78fdcfc
Add experimental Void guest workflow and vsock agent
Make iterating on a Firecracker-friendly Void guest practical without replacing the Debian default image path.

Add local Void rootfs build/register/verify plumbing, a language-agnostic dev package baseline, and guest SSH/work-disk hardening so new images use the runtime bundle key, keep a normal root bash environment, and repair stale nested /root layouts on restart.

Replace the guest PING/PONG responder with an HTTP /healthz agent over vsock, rename the runtime bundle and config surface from ping helper to agent while still accepting the legacy keys, and route the post-SSH reminder through the new vm.health path.

Validated with GOCACHE=/tmp/banger-gocache go test ./..., make build, bash -n customize.sh make-rootfs-void.sh, and git diff --check.
2026-03-19 14:51:25 -03:00

396 lines
12 KiB
Go

package cli
import (
"context"
"errors"
"os"
"path/filepath"
"strings"
"testing"
"time"
"banger/internal/api"
"banger/internal/model"
"banger/internal/paths"
tea "github.com/charmbracelet/bubbletea"
)
func TestCreateVMFormSubmit(t *testing.T) {
form := newCreateVMForm([]model.Image{{Name: "default"}}, model.DaemonConfig{DefaultImageName: "default"})
form.fields[0].input.SetValue("devbox")
form.fields[2].input.SetValue("4")
form.fields[3].input.SetValue("2048")
form.fields[4].input.SetValue("12G")
form.fields[5].input.SetValue("24G")
form.fields[6].index = 1
action, err := form.submit()
if err != nil {
t.Fatalf("submit: %v", err)
}
if action.kind != actionCreate {
t.Fatalf("kind = %s, want %s", action.kind, actionCreate)
}
if action.create.Name != "devbox" || action.create.ImageName != "default" {
t.Fatalf("unexpected create params: %+v", action.create)
}
if action.create.VCPUCount == nil || *action.create.VCPUCount != 4 || action.create.MemoryMiB == nil || *action.create.MemoryMiB != 2048 {
t.Fatalf("unexpected cpu/memory: %+v", action.create)
}
if action.create.SystemOverlaySize != "12G" || action.create.WorkDiskSize != "24G" {
t.Fatalf("unexpected disk sizes: %+v", action.create)
}
if !action.create.NATEnabled {
t.Fatalf("expected NAT enabled: %+v", action.create)
}
}
func TestEditVMFormSubmit(t *testing.T) {
form := newEditVMForm(model.VMRecord{
ID: "vm-1",
Spec: model.VMSpec{
VCPUCount: 2,
MemoryMiB: 1024,
WorkDiskSizeBytes: 16 * 1024 * 1024 * 1024,
NATEnabled: false,
},
})
form.fields[0].input.SetValue("6")
form.fields[1].input.SetValue("4096")
form.fields[2].input.SetValue("32G")
form.fields[3].index = 1
action, err := form.submit()
if err != nil {
t.Fatalf("submit: %v", err)
}
if action.kind != actionEdit {
t.Fatalf("kind = %s, want %s", action.kind, actionEdit)
}
if action.set.IDOrName != "vm-1" {
t.Fatalf("unexpected vm id: %+v", action.set)
}
if action.set.VCPUCount == nil || *action.set.VCPUCount != 6 {
t.Fatalf("unexpected vcpu: %+v", action.set)
}
if action.set.MemoryMiB == nil || *action.set.MemoryMiB != 4096 {
t.Fatalf("unexpected memory: %+v", action.set)
}
if action.set.WorkDiskSize != "32G" {
t.Fatalf("unexpected disk size: %+v", action.set)
}
if action.set.NATEnabled == nil || !*action.set.NATEnabled {
t.Fatalf("expected nat enabled: %+v", action.set)
}
}
func TestResolveSelectedID(t *testing.T) {
vms := []model.VMRecord{{ID: "one"}, {ID: "two"}}
if got := resolveSelectedID("two", vms); got != "two" {
t.Fatalf("resolveSelectedID existing = %q, want %q", got, "two")
}
if got := resolveSelectedID("missing", vms); got != "one" {
t.Fatalf("resolveSelectedID fallback = %q, want %q", got, "one")
}
if got := resolveSelectedID("anything", nil); got != "" {
t.Fatalf("resolveSelectedID empty = %q, want empty", got)
}
}
func TestNewTUICommandStartsProgramWithoutEnsuringDaemon(t *testing.T) {
origEnsure := tuiEnsureDaemonFunc
origRunner := tuiProgramRunner
origTerminal := tuiIsTerminal
t.Cleanup(func() {
tuiEnsureDaemonFunc = origEnsure
tuiProgramRunner = origRunner
tuiIsTerminal = origTerminal
})
ensureCalled := false
tuiEnsureDaemonFunc = func(ctx context.Context) (paths.Layout, model.DaemonConfig, error) {
ensureCalled = true
return paths.Layout{}, model.DaemonConfig{}, nil
}
tuiProgramRunner = func(model tuiModel) error {
if ensureCalled {
t.Fatal("ensureDaemon should not run before the TUI starts")
}
if !model.daemonPending || !model.loading {
t.Fatalf("startup model = %+v, want pending daemon startup", model)
}
return nil
}
tuiIsTerminal = func(fd uintptr) bool { return true }
cmd := NewBangerCommand()
cmd.SetArgs([]string{"tui"})
if err := cmd.Execute(); err != nil {
t.Fatalf("Execute: %v", err)
}
if ensureCalled {
t.Fatal("ensureDaemon should not have been called")
}
}
func TestTUIViewRendersLayoutImmediately(t *testing.T) {
m := newTUIModel(paths.Layout{}, model.DaemonConfig{})
view := m.View()
if strings.Contains(view, "Loading...") {
t.Fatalf("view = %q, want full layout instead of one-line loading", view)
}
if !strings.Contains(view, "Starting daemon") {
t.Fatalf("view = %q, want startup placeholder", view)
}
}
func TestTUIVMLoadCanCompleteBeforeImages(t *testing.T) {
now := time.Date(2026, time.March, 18, 12, 0, 0, 0, time.UTC)
initial := newTUIModel(paths.Layout{}, model.DaemonConfig{})
updated, _ := initial.Update(daemonReadyMsg{
generation: initial.loadGeneration,
layout: paths.Layout{SocketPath: "/tmp/bangerd.sock"},
cfg: model.DaemonConfig{DefaultImageName: "default"},
duration: 2400 * time.Millisecond,
})
m := updated.(tuiModel)
if !m.daemonReady || !m.vmListPending || !m.imagePending {
t.Fatalf("model after daemonReady = %+v, want pending vm/image loads", m)
}
vm := model.VMRecord{
ID: "vm-1",
Name: "devbox",
State: model.VMStateRunning,
CreatedAt: now,
UpdatedAt: now,
LastTouchedAt: now,
Spec: model.VMSpec{
VCPUCount: 2,
MemoryMiB: 1024,
WorkDiskSizeBytes: 16 * 1024 * 1024 * 1024,
},
Runtime: model.VMRuntime{
GuestIP: "172.16.0.2",
DNSName: "devbox.vm",
},
}
updated, _ = m.Update(vmListLoadedMsg{
generation: m.loadGeneration,
vms: []model.VMRecord{vm},
duration: 20 * time.Millisecond,
})
m = updated.(tuiModel)
if len(m.vms) != 1 || m.selectedID != vm.ID {
t.Fatalf("model after vmListLoaded = %+v, want selected vm", m)
}
if !m.imagePending {
t.Fatalf("image load should still be pending: %+v", m)
}
if strings.Contains(m.View(), "No VMs") {
t.Fatalf("view should render the loaded VM while images are pending: %q", m.View())
}
if !strings.Contains(m.View(), "devbox") {
t.Fatalf("view = %q, want loaded VM name", m.View())
}
}
func TestTUICreateBlockedWhileImagesLoad(t *testing.T) {
m := newTUIModel(paths.Layout{}, model.DaemonConfig{})
m.daemonPending = false
m.daemonReady = true
m.imagePending = true
m.loading = true
updated, _ := m.updateBrowse(tea.KeyMsg{Type: tea.KeyRunes, Runes: []rune{'c'}})
if updated.mode != tuiModeBrowse {
t.Fatalf("mode = %v, want browse", updated.mode)
}
if updated.statusText != "Images are still loading" {
t.Fatalf("status = %q, want image loading warning", updated.statusText)
}
}
func TestTUIStatusIncludesStageDurationsAfterInitialLoad(t *testing.T) {
initial := newTUIModel(paths.Layout{}, model.DaemonConfig{})
updated, _ := initial.Update(daemonReadyMsg{
generation: initial.loadGeneration,
layout: paths.Layout{SocketPath: "/tmp/bangerd.sock"},
duration: 2400 * time.Millisecond,
})
m := updated.(tuiModel)
updated, _ = m.Update(vmListLoadedMsg{
generation: m.loadGeneration,
vms: []model.VMRecord{},
duration: 20 * time.Millisecond,
})
m = updated.(tuiModel)
updated, _ = m.Update(imageListLoadedMsg{
generation: m.loadGeneration,
images: []model.Image{{Name: "default"}},
duration: 15 * time.Millisecond,
})
m = updated.(tuiModel)
if !strings.Contains(m.statusText, "daemon 2.4s") || !strings.Contains(m.statusText, "vm list 20ms") || !strings.Contains(m.statusText, "image list 15ms") {
t.Fatalf("statusText = %q, want stage timings", m.statusText)
}
}
func TestSSHDoneMsgShowsReminderWhenHealthCheckPasses(t *testing.T) {
origHealth := vmHealthFunc
t.Cleanup(func() {
vmHealthFunc = origHealth
})
vmHealthFunc = func(ctx context.Context, socketPath, idOrName string) (api.VMHealthResult, error) {
return api.VMHealthResult{Name: "devbox", Healthy: true}, nil
}
msg := sshDoneMsg(paths.Layout{SocketPath: "/tmp/bangerd.sock"}, actionRequest{id: "devbox", name: "devbox"}, "devbox", nil)
result, ok := msg.(actionResultMsg)
if !ok {
t.Fatalf("msg = %T, want actionResultMsg", msg)
}
if !strings.Contains(result.status, "devbox is still running") {
t.Fatalf("status = %q, want reminder", result.status)
}
}
func TestSSHDoneMsgShowsWarningWhenHealthCheckFails(t *testing.T) {
origHealth := vmHealthFunc
t.Cleanup(func() {
vmHealthFunc = origHealth
})
vmHealthFunc = func(ctx context.Context, socketPath, idOrName string) (api.VMHealthResult, error) {
return api.VMHealthResult{}, errors.New("dial failed")
}
msg := sshDoneMsg(paths.Layout{SocketPath: "/tmp/bangerd.sock"}, actionRequest{id: "devbox", name: "devbox"}, "devbox", nil)
result := msg.(actionResultMsg)
if !strings.Contains(result.status, "failed to check whether devbox is still running") {
t.Fatalf("status = %q, want warning", result.status)
}
}
func TestAggregateRunningVMResources(t *testing.T) {
t.Parallel()
running, vcpus, memoryBytes := aggregateRunningVMResources([]model.VMRecord{
{
State: model.VMStateRunning,
Spec: model.VMSpec{
VCPUCount: 2,
MemoryMiB: 1024,
},
},
{
State: model.VMStateStopped,
Spec: model.VMSpec{
VCPUCount: 8,
MemoryMiB: 8192,
},
},
{
State: model.VMStateRunning,
Spec: model.VMSpec{
VCPUCount: 4,
MemoryMiB: 2048,
},
},
})
if running != 2 || vcpus != 6 || memoryBytes != 3*1024*1024*1024 {
t.Fatalf("aggregateRunningVMResources = (%d, %d, %d), want (2, 6, %d)", running, vcpus, memoryBytes, int64(3*1024*1024*1024))
}
}
func TestTUIViewShowsResourceBar(t *testing.T) {
t.Parallel()
m := newTUIModel(paths.Layout{}, model.DaemonConfig{})
m.hostCPUCount = 32
m.hostMemoryBytes = 125 * 1024 * 1024 * 1024
m.hostDiskBytes = 200 * 1024 * 1024 * 1024
m.daemonPending = false
m.loading = false
stateDir := t.TempDir()
overlayPath := filepath.Join(stateDir, "system.cow")
workDiskPath := filepath.Join(stateDir, "root.ext4")
if err := os.WriteFile(overlayPath, make([]byte, 1024), 0o644); err != nil {
t.Fatalf("WriteFile overlay: %v", err)
}
if err := os.WriteFile(workDiskPath, make([]byte, 2048), 0o644); err != nil {
t.Fatalf("WriteFile work disk: %v", err)
}
m.vms = []model.VMRecord{
{
ID: "vm-1",
Name: "devbox",
State: model.VMStateRunning,
Spec: model.VMSpec{
VCPUCount: 2,
MemoryMiB: 1024,
WorkDiskSizeBytes: 16 * 1024 * 1024 * 1024,
},
Runtime: model.VMRuntime{
SystemOverlay: overlayPath,
WorkDiskPath: workDiskPath,
},
},
{
ID: "vm-2",
Name: "db",
State: model.VMStateStopped,
Spec: model.VMSpec{
VCPUCount: 4,
MemoryMiB: 4096,
WorkDiskSizeBytes: 32 * 1024 * 1024 * 1024,
},
},
}
m.selectedID = "vm-1"
m.rebuildTable()
m.refreshDetail()
view := m.View()
if !strings.Contains(view, "VMs") || !strings.Contains(view, "1/2") {
t.Fatalf("view = %q, want running VM count", view)
}
if !strings.Contains(view, "CPU") || !strings.Contains(view, "2/32") {
t.Fatalf("view = %q, want vcpu aggregate", view)
}
if !strings.Contains(view, "RAM") || !strings.Contains(view, "1.0G/125.0G") {
t.Fatalf("view = %q, want memory aggregate", view)
}
if !strings.Contains(view, "Disk") {
t.Fatalf("view = %q, want disk aggregate", view)
}
if !strings.Contains(view, "█") || !strings.Contains(view, "░") {
t.Fatalf("view = %q, want visual progress bars", view)
}
}
func TestAggregateVMDiskUsage(t *testing.T) {
t.Parallel()
dir := t.TempDir()
overlayPath := filepath.Join(dir, "system.cow")
workDiskPath := filepath.Join(dir, "root.ext4")
if err := os.WriteFile(overlayPath, make([]byte, 4096), 0o644); err != nil {
t.Fatalf("WriteFile overlay: %v", err)
}
if err := os.WriteFile(workDiskPath, make([]byte, 8192), 0o644); err != nil {
t.Fatalf("WriteFile work disk: %v", err)
}
total := aggregateVMDiskUsage([]model.VMRecord{{
Runtime: model.VMRuntime{
SystemOverlay: overlayPath,
WorkDiskPath: workDiskPath,
},
}})
if total <= 0 {
t.Fatalf("aggregateVMDiskUsage = %d, want positive allocated bytes", total)
}
}