port smoke to go

This commit is contained in:
Thales Maciel 2026-05-01 19:34:44 -03:00
parent b0a9d64f4a
commit 9ed44bfd75
No known key found for this signature in database
GPG key ID: 33112E6833C34679
20 changed files with 2118 additions and 1573 deletions

View file

@ -40,9 +40,10 @@ make lint # gofmt + go vet + shellcheck
The smoke suite (`make smoke`) builds coverage-instrumented binaries, The smoke suite (`make smoke`) builds coverage-instrumented binaries,
installs them as a temporary systemd service, and runs end-to-end installs them as a temporary systemd service, and runs end-to-end
scenarios against real Firecracker. Requires a KVM-capable host and scenarios against real Firecracker. Requires a KVM-capable host and
`sudo`. `make smoke-list` prints scenario names; `make smoke-one `sudo`. The suite lives under `internal/smoketest/` (build-tagged
SCENARIO=<name>` runs just one. See the smoke comments in the `smoke`); `make smoke-list` prints scenario names; `make smoke-one
`Makefile` for details. SCENARIO=<name>` runs just one (comma-separated for several). See
the smoke comments in the `Makefile` for details.
## Pre-commit hook ## Pre-commit hook

View file

@ -25,7 +25,6 @@ SMOKE_DIR := $(BUILD_DIR)/smoke
SMOKE_BIN_DIR := $(SMOKE_DIR)/bin SMOKE_BIN_DIR := $(SMOKE_DIR)/bin
SMOKE_COVER_DIR := $(SMOKE_DIR)/covdata SMOKE_COVER_DIR := $(SMOKE_DIR)/covdata
SMOKE_XDG_DIR := $(SMOKE_DIR)/xdg SMOKE_XDG_DIR := $(SMOKE_DIR)/xdg
SMOKE_SCRIPT := scripts/smoke.sh
VERSION ?= $(shell git describe --tags --exact-match 2>/dev/null || echo dev) VERSION ?= $(shell git describe --tags --exact-match 2>/dev/null || echo dev)
COMMIT ?= $(shell git rev-parse --verify HEAD 2>/dev/null || echo unknown) COMMIT ?= $(shell git rev-parse --verify HEAD 2>/dev/null || echo unknown)
BUILT_AT ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ) BUILT_AT ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
@ -61,9 +60,9 @@ help:
' make tidy Run go mod tidy' \ ' make tidy Run go mod tidy' \
' make clean Remove built Go binaries and coverage artefacts' \ ' make clean Remove built Go binaries and coverage artefacts' \
' make smoke Build instrumented binaries, run the supported systemd smoke suite, report coverage (needs KVM + sudo)' \ ' make smoke Build instrumented binaries, run the supported systemd smoke suite, report coverage (needs KVM + sudo)' \
' make smoke JOBS=N Override parallelism (default: nproc, capped at 8 by the script). JOBS=1 forces serial.' \ ' make smoke JOBS=N Override parallelism (default: nproc, capped at 8). JOBS=1 forces serial.' \
' make smoke-list Print the list of smoke scenarios with descriptions (no build, no install)' \ ' make smoke-list Print the list of smoke scenarios (no build, no install)' \
' make smoke-one SCENARIO=NAME Run a single smoke scenario (still does the install preamble)' \ ' make smoke-one SCENARIO=NAME Run a single smoke scenario (still does the install preamble; comma-separated for several)' \
' make smoke-fresh smoke-clean + smoke — purges stale smoke-owned installs before a clean supported-path run' \ ' make smoke-fresh smoke-clean + smoke — purges stale smoke-owned installs before a clean supported-path run' \
' make smoke-coverage-html HTML coverage report from the last smoke run' \ ' make smoke-coverage-html HTML coverage report from the last smoke run' \
' make smoke-clean Remove the smoke build tree and purge any stale smoke-owned system install' \ ' make smoke-clean Remove the smoke build tree and purge any stale smoke-owned system install' \
@ -164,17 +163,17 @@ clean:
# Smoke test suite. Builds the three banger binaries with -cover # Smoke test suite. Builds the three banger binaries with -cover
# instrumentation under $(SMOKE_BIN_DIR), installs them as temporary # instrumentation under $(SMOKE_BIN_DIR), installs them as temporary
# bangerd.service + bangerd-root.service, runs scripts/smoke.sh, copies # bangerd.service + bangerd-root.service, runs the Go scenarios under
# service covdata out of /var/lib/banger, then purges the smoke-owned # internal/smoketest (built with -tags=smoke), copies service covdata
# install on exit. # out of /var/lib/banger, then purges the smoke-owned install on exit.
# #
# Unlike the old per-user daemon path, this touches global systemd # This touches global systemd state. The harness refuses to overwrite a
# state. The smoke script refuses to overwrite a pre-existing non-smoke # pre-existing non-smoke install and drops a marker file under
# install and uses a marker file so `make smoke-clean` can recover a # /etc/banger so `make smoke-clean` can recover a stale smoke-owned
# stale smoke-owned install after an interrupted run. # install after an interrupted run.
# #
# Requires a KVM-capable Linux host with sudo. This is a pre-release # Requires a KVM-capable Linux host with sudo. This is a pre-release
# gate, not CI — the Go test suite is what runs everywhere. # gate, not CI — the Go unit suite (`make test`) is what runs everywhere.
smoke-build: $(SMOKE_BIN_DIR)/.built smoke-build: $(SMOKE_BIN_DIR)/.built
$(SMOKE_BIN_DIR)/.built: $(BUILD_INPUTS) go.mod go.sum $(SMOKE_BIN_DIR)/.built: $(BUILD_INPUTS) go.mod go.sum
@ -184,10 +183,11 @@ $(SMOKE_BIN_DIR)/.built: $(BUILD_INPUTS) go.mod go.sum
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 $(GO) build -ldflags '$(GO_LDFLAGS)' -o "$(SMOKE_BIN_DIR)/banger-vsock-agent" ./cmd/banger-vsock-agent CGO_ENABLED=0 GOOS=linux GOARCH=amd64 $(GO) build -ldflags '$(GO_LDFLAGS)' -o "$(SMOKE_BIN_DIR)/banger-vsock-agent" ./cmd/banger-vsock-agent
touch "$@" touch "$@"
# JOBS defaults to nproc (the script caps at 8). Override with # JOBS defaults to nproc; SMOKE_JOBS clamps it at 8. Each parallel slot
# `make smoke JOBS=1` for a fully serial run, or any specific N for # runs a smoke-tuned VM, and over-subscribing the host pushes
# tighter parallelism. # waitForSSH past its 60s deadline. Floored at 1 so JOBS=1 still works.
JOBS ?= $(shell nproc 2>/dev/null || echo 1) JOBS ?= $(shell nproc 2>/dev/null || echo 1)
SMOKE_JOBS := $(shell n=$(JOBS); [ $$n -lt 1 ] && n=1; [ $$n -gt 8 ] && n=8; echo $$n)
smoke: smoke-build smoke: smoke-build
rm -rf "$(SMOKE_COVER_DIR)" rm -rf "$(SMOKE_COVER_DIR)"
@ -195,27 +195,31 @@ smoke: smoke-build
BANGER_SMOKE_BIN_DIR="$(abspath $(SMOKE_BIN_DIR))" \ BANGER_SMOKE_BIN_DIR="$(abspath $(SMOKE_BIN_DIR))" \
BANGER_SMOKE_COVER_DIR="$(abspath $(SMOKE_COVER_DIR))" \ BANGER_SMOKE_COVER_DIR="$(abspath $(SMOKE_COVER_DIR))" \
BANGER_SMOKE_XDG_DIR="$(abspath $(SMOKE_XDG_DIR))" \ BANGER_SMOKE_XDG_DIR="$(abspath $(SMOKE_XDG_DIR))" \
bash "$(SMOKE_SCRIPT)" --jobs $(JOBS) $(GO) test -tags=smoke -count=1 -v -parallel $(SMOKE_JOBS) -timeout 30m ./internal/smoketest
@echo '' @echo ''
@echo 'Smoke coverage:' @echo 'Smoke coverage:'
@$(GO) tool covdata percent -i="$(SMOKE_COVER_DIR)" @$(GO) tool covdata percent -i="$(SMOKE_COVER_DIR)"
# smoke-list is intentionally cheap: no smoke-build dep, no env vars. # smoke-list parses the test scaffold for scenario names. Cheap: no
# The script's --list path short-circuits before any side-effect or # smoke-build dep, no env vars, no test binary spawned.
# env validation, so this works on a fresh checkout.
smoke-list: smoke-list:
@bash "$(SMOKE_SCRIPT)" --list @grep -oE 't\.Run\("[a-z_]+", *test[A-Za-z]+\)' internal/smoketest/smoke_test.go \
| sed -E 's/t\.Run\("([a-z_]+)".*/ \1/'
# smoke-one runs one scenario (or a comma-separated list) with the
# install preamble. Comma list becomes a regex alternation so multiple
# scenarios can be selected without invoking go test by hand.
SCENARIO_PATTERN := $(shell echo '$(SCENARIO)' | tr ',' '|')
# smoke-one runs one scenario (or a comma-separated list) with the same
# install preamble as the full suite. Useful when iterating on a specific
# scenario — see `make smoke-list` for names.
smoke-one: smoke-build smoke-one: smoke-build
rm -rf "$(SMOKE_COVER_DIR)" rm -rf "$(SMOKE_COVER_DIR)"
mkdir -p "$(SMOKE_COVER_DIR)" "$(SMOKE_XDG_DIR)" mkdir -p "$(SMOKE_COVER_DIR)" "$(SMOKE_XDG_DIR)"
BANGER_SMOKE_BIN_DIR="$(abspath $(SMOKE_BIN_DIR))" \ BANGER_SMOKE_BIN_DIR="$(abspath $(SMOKE_BIN_DIR))" \
BANGER_SMOKE_COVER_DIR="$(abspath $(SMOKE_COVER_DIR))" \ BANGER_SMOKE_COVER_DIR="$(abspath $(SMOKE_COVER_DIR))" \
BANGER_SMOKE_XDG_DIR="$(abspath $(SMOKE_XDG_DIR))" \ BANGER_SMOKE_XDG_DIR="$(abspath $(SMOKE_XDG_DIR))" \
bash "$(SMOKE_SCRIPT)" --scenario "$(SCENARIO)" $(GO) test -tags=smoke -count=1 -v -timeout 30m \
-run "TestSmoke/.*/($(SCENARIO_PATTERN))$$" \
./internal/smoketest
smoke-coverage-html: smoke smoke-coverage-html: smoke
$(GO) tool covdata textfmt -i="$(SMOKE_COVER_DIR)" -o="$(SMOKE_DIR)/cover.out" $(GO) tool covdata textfmt -i="$(SMOKE_COVER_DIR)" -o="$(SMOKE_DIR)/cover.out"

View file

@ -34,10 +34,14 @@ The most common workflow is one command:
banger vm run bare sandbox, drops into ssh banger vm run bare sandbox, drops into ssh
banger vm run ./repo ships a repo into /root/repo, drops into ssh banger vm run ./repo ships a repo into /root/repo, drops into ssh
banger vm run ./repo -- make test ships a repo, runs the command, exits with its status banger vm run ./repo -- make test ships a repo, runs the command, exits with its status
banger vm run --rm -- script.sh --rm: VM auto-deletes when the session/command exits
banger vm run --nat ./repo --nat: outbound internet (required when .mise.toml installs tools)
banger vm run -d ./repo --nat -d/--detach: prep workspace + bootstrap, exit without ssh
For a longer-lived VM, use 'banger vm create' to provision and For a longer-lived VM, use 'banger vm create' to provision and
'banger vm ssh <name>' to attach. 'banger ps' lists running VMs; 'banger vm ssh <name>' to attach. 'banger ps' lists running VMs;
'banger vm list --all' shows stopped ones too. 'banger vm list --all' shows stopped ones too. Guests are reachable
at <name>.vm from the host once 'banger ssh-config --install' is run.
First-time setup, in order: First-time setup, in order:
sudo banger system install install the systemd services sudo banger system install install the systemd services

View file

@ -588,7 +588,7 @@ func TestRunVMCreatePollsUntilDone(t *testing.T) {
} }
var stderr bytes.Buffer var stderr bytes.Buffer
got, err := d.runVMCreate(context.Background(), "/tmp/bangerd.sock", &stderr, api.VMCreateParams{Name: "devbox"}) got, err := d.runVMCreate(context.Background(), "/tmp/bangerd.sock", &stderr, api.VMCreateParams{Name: "devbox"}, false)
if err != nil { if err != nil {
t.Fatalf("d.runVMCreate: %v", err) t.Fatalf("d.runVMCreate: %v", err)
} }
@ -643,7 +643,7 @@ func TestVMCreateProgressRendererSuppressesDuplicateLines(t *testing.T) {
func TestVMRunProgressRendererSuppressesDuplicateLines(t *testing.T) { func TestVMRunProgressRendererSuppressesDuplicateLines(t *testing.T) {
var stderr bytes.Buffer var stderr bytes.Buffer
renderer := newVMRunProgressRenderer(&stderr) renderer := newVMRunProgressRenderer(&stderr, true)
renderer.render("waiting for guest ssh") renderer.render("waiting for guest ssh")
renderer.render("waiting for guest ssh") renderer.render("waiting for guest ssh")
@ -661,6 +661,67 @@ func TestVMRunProgressRendererSuppressesDuplicateLines(t *testing.T) {
} }
} }
// TestVMRunProgressRendererInlineRewrites covers the TTY default: each
// render call rewrites the same line via \r + clear-to-EOL instead of
// emitting a newline, so the user sees one moving status line until
// commitLine / clear / the caller's own newline closes it out.
func TestVMRunProgressRendererInlineRewrites(t *testing.T) {
var stderr bytes.Buffer
renderer := &vmRunProgressRenderer{out: &stderr, enabled: true, inline: true}
renderer.render("waiting for guest ssh")
renderer.render("preparing guest workspace")
renderer.commitLine("vm devbox running; reconnect with: banger vm ssh devbox")
got := stderr.String()
wantPrefix := "\r\x1b[K[vm run] waiting for guest ssh" +
"\r\x1b[K[vm run] preparing guest workspace" +
"\r\x1b[K[vm run] vm devbox running; reconnect with: banger vm ssh devbox\n"
if got != wantPrefix {
t.Fatalf("inline output = %q, want %q", got, wantPrefix)
}
}
// TestVMRunProgressRendererClearWipesActiveLine guards the path used
// before sshExec/runSSHSession: clear() must erase the live inline
// line so the next writer (the ssh session, a warning, the user's
// command output) starts from column 0 without a trailing status.
func TestVMRunProgressRendererClearWipesActiveLine(t *testing.T) {
var stderr bytes.Buffer
renderer := &vmRunProgressRenderer{out: &stderr, enabled: true, inline: true}
renderer.render("attaching to guest")
renderer.clear()
// clear() on an already-cleared renderer is a no-op (active=false).
renderer.clear()
got := stderr.String()
want := "\r\x1b[K[vm run] attaching to guest\r\x1b[K"
if got != want {
t.Fatalf("after clear stderr = %q, want %q", got, want)
}
}
// TestVMCreateProgressRendererInlineRewrites mirrors the vm_run inline
// test for the create-side renderer so both progress paths stay in
// sync if either is touched in isolation.
func TestVMCreateProgressRendererInlineRewrites(t *testing.T) {
var stderr bytes.Buffer
renderer := &vmCreateProgressRenderer{out: &stderr, enabled: true, inline: true}
renderer.render(api.VMCreateOperation{Stage: "prepare_work_disk", Detail: "cloning work seed"})
renderer.render(api.VMCreateOperation{Stage: "wait_vsock_agent", Detail: "waiting for guest vsock agent"})
renderer.clear()
got := stderr.String()
want := "\r\x1b[K[vm create] preparing work disk: cloning work seed" +
"\r\x1b[K[vm create] waiting for vsock agent: waiting for guest vsock agent" +
"\r\x1b[K"
if got != want {
t.Fatalf("inline output = %q, want %q", got, want)
}
}
func TestWithHeartbeatNoOpForNonTTY(t *testing.T) { func TestWithHeartbeatNoOpForNonTTY(t *testing.T) {
var buf bytes.Buffer var buf bytes.Buffer
called := false called := false
@ -1326,6 +1387,7 @@ func TestRunVMRunWorkspacePreparesAndAttaches(t *testing.T) {
false, false,
false, false,
false, false,
false,
) )
if err != nil { if err != nil {
t.Fatalf("d.runVMRun: %v", err) t.Fatalf("d.runVMRun: %v", err)
@ -1404,6 +1466,7 @@ func TestVMRunPrintsPostCreateProgress(t *testing.T) {
false, false,
false, false,
false, false,
false,
) )
if err != nil { if err != nil {
t.Fatalf("d.runVMRun: %v", err) t.Fatalf("d.runVMRun: %v", err)
@ -1481,6 +1544,7 @@ func TestRunVMRunWarnsWhenToolingHarnessStartFails(t *testing.T) {
false, false,
false, false,
false, false,
false,
) )
if err != nil { if err != nil {
t.Fatalf("d.runVMRun: %v", err) t.Fatalf("d.runVMRun: %v", err)
@ -1534,6 +1598,7 @@ func TestRunVMRunBareModeSkipsWorkspaceAndTooling(t *testing.T) {
false, false,
false, false,
false, false,
false,
) )
if err != nil { if err != nil {
t.Fatalf("d.runVMRun: %v", err) t.Fatalf("d.runVMRun: %v", err)
@ -1580,6 +1645,7 @@ func TestRunVMRunRMDeletesAfterSessionExits(t *testing.T) {
true, // --rm, true, // --rm,
false, false,
false, false,
false,
) )
if err != nil { if err != nil {
t.Fatalf("d.runVMRun: %v", err) t.Fatalf("d.runVMRun: %v", err)
@ -1632,6 +1698,7 @@ func TestRunVMRunRMSkipsDeleteOnSSHWaitTimeout(t *testing.T) {
true, // --rm, true, // --rm,
false, false,
false, false,
false,
) )
if err == nil { if err == nil {
t.Fatal("want timeout error") t.Fatal("want timeout error")
@ -1676,6 +1743,7 @@ func TestRunVMRunSSHTimeoutReturnsActionableError(t *testing.T) {
false, false,
false, false,
false, false,
false,
) )
if err == nil { if err == nil {
t.Fatal("want timeout error") t.Fatal("want timeout error")
@ -1727,6 +1795,7 @@ func TestRunVMRunCommandModePropagatesExitCode(t *testing.T) {
false, false,
false, false,
false, false,
false,
) )
var exitErr ExitCodeError var exitErr ExitCodeError
if !errors.As(err, &exitErr) || exitErr.Code != 7 { if !errors.As(err, &exitErr) || exitErr.Code != 7 {

View file

@ -35,8 +35,11 @@ provisions ssh, and drops you into the guest in one command. Use
longer-lived VM you'll come back to. longer-lived VM you'll come back to.
Quick reference: Quick reference:
banger vm run ephemeral sandbox; --rm to delete on exit banger vm run interactive sandbox (stays alive on disconnect)
banger vm run ./repo -- make test ship a repo, run a command, exit banger vm run --rm -- script.sh ephemeral: VM auto-deletes on exit
banger vm run ./repo -- make test ship a repo, run a command, exit with its status
banger vm run --nat ./repo --nat: outbound internet (required for mise bootstrap)
banger vm run -d ./repo --nat -d/--detach: prep + bootstrap, exit (no ssh attach)
banger vm create --name dev persistent VM; pair with 'vm ssh' banger vm create --name dev persistent VM; pair with 'vm ssh'
banger vm ssh <name> open a shell in a running VM banger vm ssh <name> open a shell in a running VM
banger vm exec <name> -- make test run a command in the workspace with mise toolchain banger vm exec <name> -- make test run a command in the workspace with mise toolchain
@ -45,6 +48,7 @@ Quick reference:
banger vm delete <name> stop + remove disks banger vm delete <name> stop + remove disks
banger ps / banger vm list running / all VMs (use --all) banger ps / banger vm list running / all VMs (use --all)
banger vm logs <name> guest console + daemon log banger vm logs <name> guest console + daemon log
banger vm set --nat <name> toggle NAT on an existing VM (--no-nat to remove)
banger vm workspace prepare/export ship a repo in, pull diffs back banger vm workspace prepare/export ship a repo in, pull diffs back
`), `),
Example: strings.TrimSpace(` Example: strings.TrimSpace(`
@ -93,6 +97,7 @@ func (d *deps) newVMRunCommand() *cobra.Command {
dryRun bool dryRun bool
detach bool detach bool
skipBootstrap bool skipBootstrap bool
verbose bool
) )
cmd := &cobra.Command{ cmd := &cobra.Command{
Use: "run [path] [-- command args...]", Use: "run [path] [-- command args...]",
@ -103,14 +108,33 @@ Create a sandbox VM and either drop into an interactive shell or run a command.
Modes: Modes:
banger vm run bare sandbox, drops into ssh banger vm run bare sandbox, drops into ssh
banger vm run ./repo workspace sandbox, drops into ssh at /root/repo banger vm run ./repo workspace sandbox, drops into ssh at /root/repo
banger vm run ./repo -- make test workspace, runs command, exits with its status banger vm run ./repo -- make test workspace + run command, exit with its status
banger vm run -d ./repo workspace + bootstrap, exit (no ssh attach) banger vm run --rm -- script.sh ephemeral: VM auto-deletes when the session/command exits
banger vm run -d ./repo workspace + bootstrap, exit (reconnect with 'vm ssh')
Workspace mode (path argument):
Passing a path copies the repo's git-tracked files into /root/repo
inside the guest. Untracked files are skipped by default pass
--include-untracked to ship them too, or --dry-run to preview the
file list without creating a VM.
Outbound internet (--nat):
Guests have no internet access by default. Pass --nat to enable
host-side MASQUERADE so the VM can reach the public network. NAT is
required whenever the workspace declares mise tooling (see below).
Toggle on an existing VM with 'banger vm set --nat <name>'.
Tooling bootstrap (workspace mode): Tooling bootstrap (workspace mode):
When the workspace contains a .mise.toml or .tool-versions, vm run When the workspace contains a .mise.toml or .tool-versions, vm run
installs the listed tools via mise on first boot. The bootstrap installs the listed tools via mise on first boot. The bootstrap
needs internet, so --nat must be set. Pass --no-bootstrap to skip needs internet, so --nat must be set. Pass --no-bootstrap to skip
it entirely (no NAT requirement). it entirely (no NAT requirement).
Exit behaviour:
In command mode (-- <cmd>), the guest command's exit code propagates
through banger. Without --rm, the VM stays alive after the session
or command exits reconnect with 'banger vm ssh <name>'. With --rm,
the VM is deleted on exit (stdout/stderr are preserved).
`), `),
Args: cobra.ArbitraryArgs, Args: cobra.ArbitraryArgs,
Example: strings.TrimSpace(` Example: strings.TrimSpace(`
@ -190,7 +214,7 @@ Tooling bootstrap (workspace mode):
if err != nil { if err != nil {
return err return err
} }
return d.runVMRun(cmd.Context(), layout.SocketPath, cfg, cmd.InOrStdin(), cmd.OutOrStdout(), cmd.ErrOrStderr(), params, repoPtr, commandArgs, removeOnExit, detach, skipBootstrap) return d.runVMRun(cmd.Context(), layout.SocketPath, cfg, cmd.InOrStdin(), cmd.OutOrStdout(), cmd.ErrOrStderr(), params, repoPtr, commandArgs, removeOnExit, detach, skipBootstrap, verbose)
}, },
} }
cmd.Flags().StringVar(&name, "name", "", "vm name") cmd.Flags().StringVar(&name, "name", "", "vm name")
@ -199,14 +223,15 @@ Tooling bootstrap (workspace mode):
cmd.Flags().IntVar(&memory, "memory", defaults.MemoryMiB, "memory in MiB") cmd.Flags().IntVar(&memory, "memory", defaults.MemoryMiB, "memory in MiB")
cmd.Flags().StringVar(&systemOverlaySize, "system-overlay-size", model.FormatSizeBytes(defaults.SystemOverlaySizeByte), "system overlay size") cmd.Flags().StringVar(&systemOverlaySize, "system-overlay-size", model.FormatSizeBytes(defaults.SystemOverlaySizeByte), "system overlay size")
cmd.Flags().StringVar(&workDiskSize, "disk-size", model.FormatSizeBytes(defaults.WorkDiskSizeBytes), "work disk size") cmd.Flags().StringVar(&workDiskSize, "disk-size", model.FormatSizeBytes(defaults.WorkDiskSizeBytes), "work disk size")
cmd.Flags().BoolVar(&natEnabled, "nat", false, "enable NAT") cmd.Flags().BoolVar(&natEnabled, "nat", false, "enable outbound internet from the guest (host-side MASQUERADE; required when the workspace declares mise tooling)")
cmd.Flags().StringVar(&branchName, "branch", "", "create and switch to a new guest branch") cmd.Flags().StringVar(&branchName, "branch", "", "create and switch to a new guest branch")
cmd.Flags().StringVar(&fromRef, "from", "HEAD", "git ref to branch from when --branch is set (default: HEAD)") cmd.Flags().StringVar(&fromRef, "from", "HEAD", "git ref to branch from when --branch is set (default: HEAD)")
cmd.Flags().BoolVar(&removeOnExit, "rm", false, "delete the VM after the ssh session / command exits") cmd.Flags().BoolVar(&removeOnExit, "rm", false, "ephemeral mode: delete the VM (and its disks) after the ssh session / command exits")
cmd.Flags().BoolVar(&includeUntracked, "include-untracked", false, "also copy untracked non-ignored files into the guest workspace (default: tracked files only)") cmd.Flags().BoolVar(&includeUntracked, "include-untracked", false, "also copy untracked non-ignored files into the guest workspace (default: tracked files only)")
cmd.Flags().BoolVar(&dryRun, "dry-run", false, "list the files that would be copied into the guest workspace and exit without creating a VM") cmd.Flags().BoolVar(&dryRun, "dry-run", false, "list the files that would be copied into the guest workspace and exit without creating a VM")
cmd.Flags().BoolVarP(&detach, "detach", "d", false, "create the VM, prep workspace + bootstrap, exit without attaching to ssh") cmd.Flags().BoolVarP(&detach, "detach", "d", false, "detached mode: create the VM, run workspace prep + bootstrap synchronously, exit without ssh attach (reconnect with 'vm ssh')")
cmd.Flags().BoolVar(&skipBootstrap, "no-bootstrap", false, "skip the mise tooling bootstrap (no --nat requirement)") cmd.Flags().BoolVar(&skipBootstrap, "no-bootstrap", false, "skip the mise tooling bootstrap (no --nat requirement)")
cmd.Flags().BoolVarP(&verbose, "verbose", "v", false, "show every progress line instead of a single rewriting status line")
_ = cmd.RegisterFlagCompletionFunc("image", d.completeImageNames) _ = cmd.RegisterFlagCompletionFunc("image", d.completeImageNames)
return cmd return cmd
} }
@ -370,6 +395,7 @@ func (d *deps) newVMCreateCommand() *cobra.Command {
workDiskSize = model.FormatSizeBytes(defaults.WorkDiskSizeBytes) workDiskSize = model.FormatSizeBytes(defaults.WorkDiskSizeBytes)
natEnabled bool natEnabled bool
noStart bool noStart bool
verbose bool
) )
cmd := &cobra.Command{ cmd := &cobra.Command{
Use: "create", Use: "create",
@ -397,7 +423,7 @@ Use 'vm create' for a longer-lived VM you'll come back to. Use
if err != nil { if err != nil {
return err return err
} }
vm, err := d.runVMCreate(cmd.Context(), layout.SocketPath, cmd.ErrOrStderr(), params) vm, err := d.runVMCreate(cmd.Context(), layout.SocketPath, cmd.ErrOrStderr(), params, verbose)
if err != nil { if err != nil {
return err return err
} }
@ -410,8 +436,9 @@ Use 'vm create' for a longer-lived VM you'll come back to. Use
cmd.Flags().IntVar(&memory, "memory", defaults.MemoryMiB, "memory in MiB") cmd.Flags().IntVar(&memory, "memory", defaults.MemoryMiB, "memory in MiB")
cmd.Flags().StringVar(&systemOverlaySize, "system-overlay-size", model.FormatSizeBytes(defaults.SystemOverlaySizeByte), "system overlay size") cmd.Flags().StringVar(&systemOverlaySize, "system-overlay-size", model.FormatSizeBytes(defaults.SystemOverlaySizeByte), "system overlay size")
cmd.Flags().StringVar(&workDiskSize, "disk-size", model.FormatSizeBytes(defaults.WorkDiskSizeBytes), "work disk size") cmd.Flags().StringVar(&workDiskSize, "disk-size", model.FormatSizeBytes(defaults.WorkDiskSizeBytes), "work disk size")
cmd.Flags().BoolVar(&natEnabled, "nat", false, "enable NAT") cmd.Flags().BoolVar(&natEnabled, "nat", false, "enable outbound internet from the guest (host-side MASQUERADE)")
cmd.Flags().BoolVar(&noStart, "no-start", false, "create without starting") cmd.Flags().BoolVar(&noStart, "no-start", false, "create without starting")
cmd.Flags().BoolVarP(&verbose, "verbose", "v", false, "show every progress line instead of a single rewriting status line")
_ = cmd.RegisterFlagCompletionFunc("image", d.completeImageNames) _ = cmd.RegisterFlagCompletionFunc("image", d.completeImageNames)
return cmd return cmd
} }

View file

@ -61,14 +61,14 @@ func printVMSpecLine(out io.Writer, params api.VMCreateParams) {
// gets the spec line up front and the progress renderer thereafter. // gets the spec line up front and the progress renderer thereafter.
// On context cancel we cooperate with the daemon to cancel the // On context cancel we cooperate with the daemon to cancel the
// in-flight op so it doesn't leak partially-created VM state. // in-flight op so it doesn't leak partially-created VM state.
func (d *deps) runVMCreate(ctx context.Context, socketPath string, stderr io.Writer, params api.VMCreateParams) (model.VMRecord, error) { func (d *deps) runVMCreate(ctx context.Context, socketPath string, stderr io.Writer, params api.VMCreateParams, verbose bool) (model.VMRecord, error) {
start := time.Now() start := time.Now()
printVMSpecLine(stderr, params) printVMSpecLine(stderr, params)
begin, err := d.vmCreateBegin(ctx, socketPath, params) begin, err := d.vmCreateBegin(ctx, socketPath, params)
if err != nil { if err != nil {
return model.VMRecord{}, err return model.VMRecord{}, err
} }
renderer := newVMCreateProgressRenderer(stderr) renderer := newVMCreateProgressRenderer(stderr, verbose)
renderer.render(begin.Operation) renderer.render(begin.Operation)
op := begin.Operation op := begin.Operation
@ -76,6 +76,7 @@ func (d *deps) runVMCreate(ctx context.Context, socketPath string, stderr io.Wri
if op.Done { if op.Done {
renderer.render(op) renderer.render(op)
if op.Success && op.VM != nil { if op.Success && op.VM != nil {
renderer.clear()
elapsed := formatVMCreateElapsed(time.Since(start)) elapsed := formatVMCreateElapsed(time.Since(start))
_, _ = fmt.Fprintf(stderr, "[vm create] ready in %s\n", style.Dim(stderr, elapsed)) _, _ = fmt.Fprintf(stderr, "[vm create] ready in %s\n", style.Dim(stderr, elapsed))
return *op.VM, nil return *op.VM, nil
@ -113,13 +114,22 @@ func (d *deps) runVMCreate(ctx context.Context, socketPath string, stderr io.Wri
type vmCreateProgressRenderer struct { type vmCreateProgressRenderer struct {
out io.Writer out io.Writer
enabled bool enabled bool
inline bool
active bool
lastLine string lastLine string
} }
func newVMCreateProgressRenderer(out io.Writer) *vmCreateProgressRenderer { // newVMCreateProgressRenderer wires up progress for `vm create`. On
// non-TTY writers it stays disabled (CI/test logs already capture the
// spec + ready lines); on TTY it rewrites a single line via \r unless
// verbose is set or BANGER_NO_PROGRESS is exported, in which case it
// falls back to one line per stage.
func newVMCreateProgressRenderer(out io.Writer, verbose bool) *vmCreateProgressRenderer {
tty := writerSupportsProgress(out)
return &vmCreateProgressRenderer{ return &vmCreateProgressRenderer{
out: out, out: out,
enabled: writerSupportsProgress(out), enabled: tty,
inline: tty && !verbose && !progressDisabledByEnv(),
} }
} }
@ -132,9 +142,32 @@ func (r *vmCreateProgressRenderer) render(op api.VMCreateOperation) {
return return
} }
r.lastLine = line r.lastLine = line
if r.inline {
_, _ = fmt.Fprint(r.out, "\r\x1b[K", line)
r.active = true
return
}
_, _ = fmt.Fprintln(r.out, line) _, _ = fmt.Fprintln(r.out, line)
} }
// clear resets the live inline line so the caller can write a clean
// terminating message. No-op outside inline mode.
func (r *vmCreateProgressRenderer) clear() {
if r == nil || !r.enabled || !r.inline || !r.active {
return
}
_, _ = fmt.Fprint(r.out, "\r\x1b[K")
r.active = false
r.lastLine = ""
}
// progressDisabledByEnv is the BANGER_NO_PROGRESS escape hatch — a
// non-empty value forces line-per-stage output even on a TTY, so users
// can pipe `script(1)` / tmux capture without \r artifacts.
func progressDisabledByEnv() bool {
return strings.TrimSpace(os.Getenv("BANGER_NO_PROGRESS")) != ""
}
// writerSupportsProgress returns true only when out is a terminal. // writerSupportsProgress returns true only when out is a terminal.
// Keeps stage lines + heartbeat dots out of piped / logged output // Keeps stage lines + heartbeat dots out of piped / logged output
// where they'd just be noise. // where they'd just be noise.

View file

@ -149,7 +149,7 @@ func splitVMRunArgs(cmd *cobra.Command, args []string) (pathArgs, commandArgs []
// for guest ssh, optionally materialise a workspace and kick off the // for guest ssh, optionally materialise a workspace and kick off the
// tooling bootstrap, then either attach interactively or run the // tooling bootstrap, then either attach interactively or run the
// user's command and propagate its exit status. // user's command and propagate its exit status.
func (d *deps) runVMRun(ctx context.Context, socketPath string, cfg model.DaemonConfig, stdin io.Reader, stdout, stderr io.Writer, params api.VMCreateParams, repo *vmRunRepo, command []string, removeOnExit, detach, skipBootstrap bool) error { func (d *deps) runVMRun(ctx context.Context, socketPath string, cfg model.DaemonConfig, stdin io.Reader, stdout, stderr io.Writer, params api.VMCreateParams, repo *vmRunRepo, command []string, removeOnExit, detach, skipBootstrap, verbose bool) error {
if repo != nil && !skipBootstrap && !params.NATEnabled { if repo != nil && !skipBootstrap && !params.NATEnabled {
hasMise, err := repoHasMiseFiles(repo.sourcePath) hasMise, err := repoHasMiseFiles(repo.sourcePath)
if err != nil { if err != nil {
@ -159,8 +159,9 @@ func (d *deps) runVMRun(ctx context.Context, socketPath string, cfg model.Daemon
return errors.New("tooling bootstrap requires --nat (or pass --no-bootstrap to skip)") return errors.New("tooling bootstrap requires --nat (or pass --no-bootstrap to skip)")
} }
} }
progress := newVMRunProgressRenderer(stderr) progress := newVMRunProgressRenderer(stderr, verbose)
vm, err := d.runVMCreate(ctx, socketPath, stderr, params) defer progress.clear()
vm, err := d.runVMCreate(ctx, socketPath, stderr, params, verbose)
if err != nil { if err != nil {
return err return err
} }
@ -183,8 +184,10 @@ func (d *deps) runVMRun(ctx context.Context, socketPath string, cfg model.Daemon
cleanupCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) cleanupCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel() defer cancel()
if err := d.vmDelete(cleanupCtx, socketPath, vmRef); err != nil { if err := d.vmDelete(cleanupCtx, socketPath, vmRef); err != nil {
progress.clear()
printVMRunWarning(stderr, fmt.Sprintf("--rm cleanup failed: %v (leaked vm %q; delete manually)", err, vmRef)) printVMRunWarning(stderr, fmt.Sprintf("--rm cleanup failed: %v (leaked vm %q; delete manually)", err, vmRef))
} else if err := removeUserKnownHosts(vm); err != nil { } else if err := removeUserKnownHosts(vm); err != nil {
progress.clear()
printVMRunWarning(stderr, fmt.Sprintf("known_hosts cleanup failed: %v", err)) printVMRunWarning(stderr, fmt.Sprintf("known_hosts cleanup failed: %v", err))
} }
}() }()
@ -223,6 +226,7 @@ func (d *deps) runVMRun(ctx context.Context, socketPath string, cfg model.Daemon
fromRef = repo.fromRef fromRef = repo.fromRef
} }
if !repo.includeUntracked { if !repo.includeUntracked {
progress.clear()
d.noteUntrackedSkipped(ctx, stderr, repo.sourcePath) d.noteUntrackedSkipped(ctx, stderr, repo.sourcePath)
} }
prepared, err := d.vmWorkspacePrepare(ctx, socketPath, api.VMWorkspacePrepareParams{ prepared, err := d.vmWorkspacePrepare(ctx, socketPath, api.VMWorkspacePrepareParams{
@ -246,13 +250,14 @@ func (d *deps) runVMRun(ctx context.Context, socketPath string, cfg model.Daemon
return fmt.Errorf("vm %q is running but guest ssh is unavailable: %w", vmRef, err) return fmt.Errorf("vm %q is running but guest ssh is unavailable: %w", vmRef, err)
} }
if err := d.startVMRunToolingHarness(ctx, client, prepared.Workspace.RepoRoot, prepared.Workspace.RepoName, progress, detach, stderr); err != nil { if err := d.startVMRunToolingHarness(ctx, client, prepared.Workspace.RepoRoot, prepared.Workspace.RepoName, progress, detach, stderr); err != nil {
progress.clear()
printVMRunWarning(stderr, fmt.Sprintf("guest tooling bootstrap start failed: %v", err)) printVMRunWarning(stderr, fmt.Sprintf("guest tooling bootstrap start failed: %v", err))
} }
_ = client.Close() _ = client.Close()
} }
} }
if detach { if detach {
progress.render(fmt.Sprintf("vm %s running; reconnect with: banger vm ssh %s", vmRef, vmRef)) progress.commitLine(fmt.Sprintf("vm %s running; reconnect with: banger vm ssh %s", vmRef, vmRef))
return nil return nil
} }
sshArgs, err := sshCommandArgs(cfg, vm.Runtime.GuestIP, command) sshArgs, err := sshCommandArgs(cfg, vm.Runtime.GuestIP, command)
@ -261,6 +266,7 @@ func (d *deps) runVMRun(ctx context.Context, socketPath string, cfg model.Daemon
} }
if len(command) > 0 { if len(command) > 0 {
progress.render("running command in guest") progress.render("running command in guest")
progress.clear()
if err := d.sshExec(ctx, stdin, stdout, stderr, sshArgs); err != nil { if err := d.sshExec(ctx, stdin, stdout, stderr, sshArgs); err != nil {
var exitErr *exec.ExitError var exitErr *exec.ExitError
if errors.As(err, &exitErr) { if errors.As(err, &exitErr) {
@ -271,6 +277,7 @@ func (d *deps) runVMRun(ctx context.Context, socketPath string, cfg model.Daemon
return nil return nil
} }
progress.render("attaching to guest") progress.render("attaching to guest")
progress.clear()
return d.runSSHSession(ctx, socketPath, vmRef, stdin, stdout, stderr, sshArgs, removeOnExit) return d.runSSHSession(ctx, socketPath, vmRef, stdin, stdout, stderr, sshArgs, removeOnExit)
} }
@ -442,13 +449,24 @@ func formatVMRunStepError(action string, err error, log string) error {
type vmRunProgressRenderer struct { type vmRunProgressRenderer struct {
out io.Writer out io.Writer
enabled bool enabled bool
inline bool
active bool
lastLine string lastLine string
} }
func newVMRunProgressRenderer(out io.Writer) *vmRunProgressRenderer { // newVMRunProgressRenderer wires up progress for `vm run`. Unlike the
// vm_create renderer, this one emits in line mode even on non-TTY
// writers (covers tests and piped output that the existing tooling
// already parses); inline mode kicks in only when stderr is a TTY,
// verbose is unset, and BANGER_NO_PROGRESS is unset.
func newVMRunProgressRenderer(out io.Writer, verbose bool) *vmRunProgressRenderer {
if out == nil {
return &vmRunProgressRenderer{}
}
return &vmRunProgressRenderer{ return &vmRunProgressRenderer{
out: out, out: out,
enabled: out != nil, enabled: true,
inline: writerSupportsProgress(out) && !verbose && !progressDisabledByEnv(),
} }
} }
@ -461,6 +479,47 @@ func (r *vmRunProgressRenderer) render(detail string) {
return return
} }
r.lastLine = line r.lastLine = line
if r.inline {
_, _ = fmt.Fprint(r.out, "\r\x1b[K", line)
r.active = true
return
}
_, _ = fmt.Fprintln(r.out, line)
}
// clear erases the live inline line so the caller can write a clean
// terminating message (warning, ssh attach, command output). No-op
// outside inline mode.
func (r *vmRunProgressRenderer) clear() {
if r == nil || !r.enabled || !r.inline || !r.active {
return
}
_, _ = fmt.Fprint(r.out, "\r\x1b[K")
r.active = false
r.lastLine = ""
}
// commitLine prints detail as a final, persistent line. In inline
// mode it overwrites the live status; in line mode it just appends.
// Used for terminal messages like the --detach hand-off summary.
func (r *vmRunProgressRenderer) commitLine(detail string) {
if r == nil || !r.enabled {
return
}
line := formatVMRunProgress(detail)
if line == "" {
return
}
if r.inline {
_, _ = fmt.Fprint(r.out, "\r\x1b[K", line, "\n")
r.active = false
r.lastLine = ""
return
}
if line == r.lastLine {
return
}
r.lastLine = line
_, _ = fmt.Fprintln(r.out, line) _, _ = fmt.Fprintln(r.out, line)
} }

View file

@ -124,7 +124,7 @@ func TestRunVMRunRefusesBootstrapWithoutNAT(t *testing.T) {
api.VMCreateParams{Name: "devbox", NATEnabled: false}, api.VMCreateParams{Name: "devbox", NATEnabled: false},
&repo, &repo,
nil, nil,
false, false, false, false, false, false, false,
) )
if err == nil || !strings.Contains(err.Error(), "tooling bootstrap requires --nat") { if err == nil || !strings.Contains(err.Error(), "tooling bootstrap requires --nat") {
t.Fatalf("runVMRun = %v, want NAT precondition refusal", err) t.Fatalf("runVMRun = %v, want NAT precondition refusal", err)
@ -155,7 +155,7 @@ func TestRunVMRunBootstrapPreconditionRespectsNoBootstrap(t *testing.T) {
api.VMCreateParams{Name: "devbox", NATEnabled: false}, api.VMCreateParams{Name: "devbox", NATEnabled: false},
&repo, &repo,
nil, nil,
false, false, true, // skipBootstrap = true false, false, true, false, // skipBootstrap = true
) )
if err != nil { if err != nil {
t.Fatalf("runVMRun: %v", err) t.Fatalf("runVMRun: %v", err)
@ -186,7 +186,7 @@ func TestRunVMRunBootstrapPreconditionPassesWithoutMiseFiles(t *testing.T) {
api.VMCreateParams{Name: "devbox", NATEnabled: false}, api.VMCreateParams{Name: "devbox", NATEnabled: false},
&repo, &repo,
nil, nil,
false, false, false, false, false, false, false,
) )
if err != nil { if err != nil {
t.Fatalf("runVMRun: %v", err) t.Fatalf("runVMRun: %v", err)
@ -219,7 +219,7 @@ func TestRunVMRunDetachSkipsSshAttach(t *testing.T) {
api.VMCreateParams{Name: "devbox"}, api.VMCreateParams{Name: "devbox"},
nil, // bare mode nil, // bare mode
nil, // no command nil, // no command
false, true, false, // detach = true false, true, false, false, // detach = true
) )
if err != nil { if err != nil {
t.Fatalf("runVMRun: %v", err) t.Fatalf("runVMRun: %v", err)
@ -257,7 +257,7 @@ func TestRunVMRunDetachUsesSyncBootstrapPath(t *testing.T) {
api.VMCreateParams{Name: "devbox", NATEnabled: true}, api.VMCreateParams{Name: "devbox", NATEnabled: true},
&repo, &repo,
nil, nil,
false, true, false, // detach = true false, true, false, false, // detach = true
) )
if err != nil { if err != nil {
t.Fatalf("runVMRun: %v", err) t.Fatalf("runVMRun: %v", err)

View file

@ -20,6 +20,11 @@ func TestSshdGuestConfig_Hardened(t *testing.T) {
"PasswordAuthentication no", "PasswordAuthentication no",
"KbdInteractiveAuthentication no", "KbdInteractiveAuthentication no",
"AuthorizedKeysFile /root/.ssh/authorized_keys", "AuthorizedKeysFile /root/.ssh/authorized_keys",
// Quiet-login: short-lived sandboxes don't need the Debian
// MOTD or the "Last login" line. .hushlogin in /root covers
// pam_motd; these two cover sshd's own paths.
"PrintMotd no",
"PrintLastLog no",
} }
for _, line := range mustContain { for _, line := range mustContain {
if !strings.Contains(cfg, line) { if !strings.Contains(cfg, line) {

View file

@ -50,6 +50,11 @@ func (s *VMService) patchRootOverlay(ctx context.Context, vm model.VMRecord, ima
builder.WriteFile(guestnet.ConfigPath, guestnet.ConfigFile(vm.Runtime.GuestIP, s.config.BridgeIP, s.config.DefaultDNS)) builder.WriteFile(guestnet.ConfigPath, guestnet.ConfigFile(vm.Runtime.GuestIP, s.config.BridgeIP, s.config.DefaultDNS))
builder.WriteFile(guestnet.GuestScriptPath, []byte(guestnet.BootstrapScript())) builder.WriteFile(guestnet.GuestScriptPath, []byte(guestnet.BootstrapScript()))
builder.WriteFile("/etc/ssh/sshd_config.d/99-banger.conf", sshdConfig) builder.WriteFile("/etc/ssh/sshd_config.d/99-banger.conf", sshdConfig)
// pam_motd reads /etc/motd + /etc/update-motd.d on Debian-family
// guests independent of sshd's PrintMotd. .hushlogin in $HOME tells
// pam_motd to stay quiet for that user — root is the only login on
// banger VMs, so a single file suffices.
builder.WriteFile("/root/.hushlogin", []byte{})
builder.DropMountTarget("/home") builder.DropMountTarget("/home")
builder.DropMountTarget("/var") builder.DropMountTarget("/var")
builder.AddMount(guestconfig.MountSpec{ builder.AddMount(guestconfig.MountSpec{
@ -159,6 +164,14 @@ func (s *VMService) ensureWorkDisk(ctx context.Context, vm *model.VMRecord, imag
// Pins the lookup path so the banger-written file always wins, // Pins the lookup path so the banger-written file always wins,
// regardless of distro default ($HOME/.ssh/authorized_keys) and // regardless of distro default ($HOME/.ssh/authorized_keys) and
// regardless of any per-image weirdness. // regardless of any per-image weirdness.
//
// - PrintMotd no / PrintLastLog no
// Banger VMs are short-lived sandboxes. The Debian-style MOTD
// ("Linux ... GNU/Linux comes with ABSOLUTELY NO WARRANTY …") and
// the "Last login" line are pure noise for `vm run -- echo hi`
// style invocations. Pair this with the .hushlogin written below
// so pam_motd also stays silent on distros that read /etc/motd
// through PAM rather than sshd.
func sshdGuestConfig() string { func sshdGuestConfig() string {
return strings.Join([]string{ return strings.Join([]string{
"PermitRootLogin prohibit-password", "PermitRootLogin prohibit-password",
@ -166,6 +179,8 @@ func sshdGuestConfig() string {
"PasswordAuthentication no", "PasswordAuthentication no",
"KbdInteractiveAuthentication no", "KbdInteractiveAuthentication no",
"AuthorizedKeysFile /root/.ssh/authorized_keys", "AuthorizedKeysFile /root/.ssh/authorized_keys",
"PrintMotd no",
"PrintLastLog no",
"", "",
}, "\n") }, "\n")
} }

24
internal/smoketest/doc.go Normal file
View file

@ -0,0 +1,24 @@
//go:build smoke
// Package smoketest is the end-to-end smoke gate for banger's supported
// two-service systemd model. It runs only when the build is tagged
// `smoke`, which keeps it out of `go test ./...` on contributor
// machines and CI.
//
// The suite touches global host state: it installs instrumented
// bangerd.service + bangerd-root.service, drives real Firecracker/KVM
// scenarios, copies covdata back out, then purges the smoke-owned
// install on exit. It refuses to run if a non-smoke install is already
// on the host (see the marker file under /etc/banger).
//
// The harness expects three env vars, normally set by `make smoke`:
//
// BANGER_SMOKE_BIN_DIR — instrumented banger / bangerd / vsock-agent
// BANGER_SMOKE_COVER_DIR — coverage output directory (GOCOVERDIR)
// BANGER_SMOKE_XDG_DIR — scratch root for fake homes, fake repos, etc.
//
// Coverage: the test binary itself is not instrumented, but every
// banger / bangerd subprocess it spawns is, and writes covdata into
// BANGER_SMOKE_COVER_DIR. Service-side covdata under /var/lib/banger
// is copied out at teardown.
package smoketest

View file

@ -0,0 +1,50 @@
//go:build smoke
package smoketest
import (
"fmt"
"os"
"os/exec"
"path/filepath"
)
// setupRepoFixture builds the throwaway git repo at runtimeDir/fake-repo
// that every repodir-class scenario consumes. Mirrors
// scripts/smoke.sh:441-456. The path is stored in the package-level
// repoDir so scenarios can reference it directly.
func setupRepoFixture() error {
repoDir = filepath.Join(runtimeDir, "fake-repo")
if err := os.MkdirAll(repoDir, 0o755); err != nil {
return fmt.Errorf("setupRepoFixture: mkdir %s: %w", repoDir, err)
}
steps := [][]string{
{"git", "init", "-q", "-b", "main"},
{"git", "config", "commit.gpgsign", "false"},
{"git", "config", "user.name", "smoke"},
{"git", "config", "user.email", "smoke@smoke"},
}
for _, args := range steps {
cmd := exec.Command(args[0], args[1:]...)
cmd.Dir = repoDir
if out, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("setupRepoFixture: %s: %w\n%s", args, err, out)
}
}
marker := filepath.Join(repoDir, "smoke-file.txt")
if err := os.WriteFile(marker, []byte("smoke-workspace-marker\n"), 0o644); err != nil {
return fmt.Errorf("setupRepoFixture: write marker: %w", err)
}
commit := [][]string{
{"git", "add", "."},
{"git", "commit", "-q", "-m", "init"},
}
for _, args := range commit {
cmd := exec.Command(args[0], args[1:]...)
cmd.Dir = repoDir
if out, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("setupRepoFixture: %s: %w\n%s", args, err, out)
}
}
return nil
}

View file

@ -0,0 +1,201 @@
//go:build smoke
package smoketest
import (
"bytes"
"os"
"os/exec"
"strings"
"testing"
"time"
)
// result captures the output and exit status of a banger invocation.
// stdout / stderr are kept separate so assertions can target one or the
// other (matches the bash suite's `out=$(cmd)` vs `2>&1` patterns).
type result struct {
stdout string
stderr string
rc int
}
// runCmd executes the given exec.Cmd, capturing stdout and stderr into
// the returned result. Non-zero exits are returned as a non-zero rc, not
// as an error — scenarios decide for themselves whether non-zero is a
// failure or the assertion under test.
func runCmd(t *testing.T, cmd *exec.Cmd) result {
t.Helper()
var outBuf, errBuf bytes.Buffer
cmd.Stdout = &outBuf
cmd.Stderr = &errBuf
err := cmd.Run()
res := result{stdout: outBuf.String(), stderr: errBuf.String()}
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
res.rc = exitErr.ExitCode()
} else {
t.Fatalf("exec %s: %v\nstderr: %s", strings.Join(cmd.Args, " "), err, res.stderr)
}
}
return res
}
// banger runs the instrumented `banger` binary with the given arguments
// and returns the captured result. GOCOVERDIR is inherited from the
// process environment (TestMain exports it), so child covdata lands
// under BANGER_SMOKE_COVER_DIR automatically.
func banger(t *testing.T, args ...string) result {
t.Helper()
return runCmd(t, exec.Command(bangerBin, args...))
}
// mustBanger runs `banger` and Fatals if it exits non-zero. Returns the
// captured stdout for downstream `wantContains`. Most happy-path
// scenarios use this; scenarios that assert on non-zero exits use
// banger() directly.
func mustBanger(t *testing.T, args ...string) string {
t.Helper()
res := banger(t, args...)
if res.rc != 0 {
t.Fatalf("banger %s: exit %d\nstdout: %s\nstderr: %s",
strings.Join(args, " "), res.rc, res.stdout, res.stderr)
}
return res.stdout
}
// sudoBanger runs `banger` under `sudo env GOCOVERDIR=...`. Sudo strips
// the env by default; explicit re-export keeps coverage flowing for
// scenarios that exercise the privileged path (system install / restart
// / update / daemon stop).
func sudoBanger(t *testing.T, args ...string) result {
t.Helper()
full := append([]string{"env", "GOCOVERDIR=" + coverDir, bangerBin}, args...)
return runCmd(t, exec.Command("sudo", full...))
}
// wantContains asserts that haystack contains needle. label is a short
// human-readable identifier for the failure message.
func wantContains(t *testing.T, haystack, needle, label string) {
t.Helper()
if !strings.Contains(haystack, needle) {
t.Fatalf("%s missing %q\ngot: %s", label, needle, haystack)
}
}
// wantNotContains is the negative-assertion counterpart. Used by
// scenarios that verify a warning has been suppressed (e.g. the post-
// auto-prepare clean-state check in vm_exec) or that an export patch
// did NOT capture a guest-side commit.
func wantNotContains(t *testing.T, haystack, needle, label string) {
t.Helper()
if strings.Contains(haystack, needle) {
t.Fatalf("%s unexpectedly contains %q\ngot: %s", label, needle, haystack)
}
}
// wantExit asserts the captured result exited with want. Used for
// scenarios that test exit-code propagation or refusal paths.
func wantExit(t *testing.T, got result, want int, label string) {
t.Helper()
if got.rc != want {
t.Fatalf("%s: exit %d, want %d\nstdout: %s\nstderr: %s",
label, got.rc, want, got.stdout, got.stderr)
}
}
// vmDelete removes a VM, ignoring failure. Used in t.Cleanup hooks
// where the VM may already be gone (deleted by the scenario itself).
func vmDelete(name string) {
cmd := exec.Command(bangerBin, "vm", "delete", name)
_ = cmd.Run()
}
// vmCreate creates a VM with the given name and registers a cleanup
// hook to delete it. extraArgs is forwarded after `vm create --name X`
// so callers can pass --vcpu N / --nat / --no-start / etc. Fatals if
// creation fails — every scenario that uses vmCreate needs the VM up.
func vmCreate(t *testing.T, name string, extraArgs ...string) {
t.Helper()
args := append([]string{"vm", "create", "--name", name}, extraArgs...)
mustBanger(t, args...)
t.Cleanup(func() { vmDelete(name) })
}
// bangerHome runs `banger` with HOME overridden to the given directory.
// Used by ssh-config scenarios that mutate ~/.ssh/config under a fake
// home so the test doesn't touch the contributor's real config.
func bangerHome(t *testing.T, home string, args ...string) result {
t.Helper()
cmd := exec.Command(bangerBin, args...)
cmd.Env = append(os.Environ(), "HOME="+home)
return runCmd(t, cmd)
}
// mustBangerHome is bangerHome + Fatal-on-non-zero. Returns stdout.
func mustBangerHome(t *testing.T, home string, args ...string) string {
t.Helper()
res := bangerHome(t, home, args...)
if res.rc != 0 {
t.Fatalf("banger %s (HOME=%s): exit %d\nstdout: %s\nstderr: %s",
strings.Join(args, " "), home, res.rc, res.stdout, res.stderr)
}
return res.stdout
}
// waitForSSH polls `banger vm ssh <name> -- true` until SSH answers,
// up to 120 seconds. The original bash suite used 60s and occasionally
// flaked under load (post-update VM, large parallel pool); 120s gives
// enough headroom for the post-update / post-rollback paths where the
// daemon has just restarted, without making genuine breakage slow to
// surface.
func waitForSSH(t *testing.T, name string) {
t.Helper()
const timeout = 120 * time.Second
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
cmd := exec.Command(bangerBin, "vm", "ssh", name, "--", "true")
if err := cmd.Run(); err == nil {
return
}
time.Sleep(1 * time.Second)
}
t.Fatalf("vm %q ssh did not come up within %s", name, timeout)
}
// requirePasswordlessSudo skips the test if `sudo -n true` cannot run.
// Mirrors the bash `if ! sudo -n true 2>/dev/null; then return 0; fi`
// pattern used by scenarios that exercise privileged paths.
func requirePasswordlessSudo(t *testing.T) {
t.Helper()
if err := exec.Command("sudo", "-n", "true").Run(); err != nil {
t.Skip("passwordless sudo unavailable")
}
}
// requireSudoIptables skips the test if iptables can't be queried under
// `sudo -n`. Used by the NAT scenario whose assertions read POSTROUTING.
func requireSudoIptables(t *testing.T) {
t.Helper()
if err := exec.Command("sudo", "-n", "iptables", "-t", "nat", "-S", "POSTROUTING").Run(); err != nil {
t.Skip("passwordless sudo iptables unavailable")
}
}
// installedVersion reads `/usr/local/bin/banger --version` and returns
// the version token. This is the *installed* binary that `banger update`
// swaps out — the smoke CLI under $BANGER_SMOKE_BIN_DIR is separate
// (and unaffected by update). Mirrors the bash `installed_version`
// helper at scripts/smoke.sh:1156-1162.
func installedVersion(t *testing.T) string {
t.Helper()
out, err := exec.Command("/usr/local/bin/banger", "--version").Output()
if err != nil {
t.Fatalf("read installed version: %v", err)
}
parts := strings.Fields(string(out))
if len(parts) < 2 {
t.Fatalf("unparseable installed --version output: %q", string(out))
}
return parts[1]
}

View file

@ -0,0 +1,310 @@
//go:build smoke
package smoketest
import (
"archive/tar"
"compress/gzip"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"fmt"
"io"
"net/http"
"net/http/httptest"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
)
// Release-server state set up lazily by prepareSmokeReleases. The HTTP
// server stays up for the duration of TestMain (shut down in teardown).
// smokeRelOnce serializes concurrent first-callers; smokeRelErr is the
// stored result for replay so subsequent callers see the same outcome.
var (
smokeRelOnce sync.Once
smokeRelErr error
manifestURL string
pubkeyFile string
releaseHTTPServer *httptest.Server
releaseRelDir string
smokeRelKey *ecdsa.PrivateKey
)
const (
smokeReleaseGood = "v0.smoke.0"
smokeReleaseBroken = "v0.smoke.broken-bangerd"
)
// prepareSmokeReleases is the Go port of scripts/smoke.sh's
// prepare_smoke_releases. It generates an ECDSA P-256 keypair (matching
// cosign blob signatures, which are ASN.1 DER ECDSA over SHA256(body),
// base64-encoded), builds two coverage-instrumented release tarballs
// signed with that key, writes a manifest, and stands up an httptest
// file server. The hidden --manifest-url / --pubkey-file flags on
// `banger update` redirect the updater at this fake bucket.
//
// Idempotent. The first caller pays the build/server cost; later
// callers replay the cached result.
func prepareSmokeReleases() error {
smokeRelOnce.Do(func() {
smokeRelErr = doPrepareSmokeReleases()
})
return smokeRelErr
}
func doPrepareSmokeReleases() error {
releaseRelDir = filepath.Join(scratchRoot, "release")
if err := os.RemoveAll(releaseRelDir); err != nil {
return fmt.Errorf("clean release dir: %w", err)
}
if err := os.MkdirAll(releaseRelDir, 0o755); err != nil {
return fmt.Errorf("mkdir release dir: %w", err)
}
priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return fmt.Errorf("generate ECDSA key: %w", err)
}
smokeRelKey = priv
pubDER, err := x509.MarshalPKIXPublicKey(&priv.PublicKey)
if err != nil {
return fmt.Errorf("marshal pub key: %w", err)
}
pubPEM := pem.EncodeToMemory(&pem.Block{Type: "PUBLIC KEY", Bytes: pubDER})
pubkeyFile = filepath.Join(releaseRelDir, "cosign.pub")
if err := os.WriteFile(pubkeyFile, pubPEM, 0o644); err != nil {
return fmt.Errorf("write pub key: %w", err)
}
if err := buildSmokeReleaseTarball(smokeReleaseGood); err != nil {
return err
}
if err := buildSmokeReleaseTarball(smokeReleaseBroken); err != nil {
return err
}
releaseHTTPServer = httptest.NewServer(http.FileServer(http.Dir(releaseRelDir)))
manifestPath := filepath.Join(releaseRelDir, "manifest.json")
if err := writeSmokeManifest(manifestPath, releaseHTTPServer.URL); err != nil {
return err
}
manifestURL = releaseHTTPServer.URL + "/manifest.json"
return nil
}
func shutdownReleaseServer() {
if releaseHTTPServer != nil {
releaseHTTPServer.Close()
}
}
// buildSmokeReleaseTarball is the Go port of build_smoke_release_tarball
// from scripts/smoke.sh. It compiles banger / bangerd / banger-vsock-agent
// with the requested Version baked in, packages them as a gzip tarball,
// and writes SHA256SUMS + SHA256SUMS.sig alongside.
//
// The v0.smoke.broken-* family ships a shell-script bangerd that passes
// `--check-migrations` (so the swap proceeds) but exits non-zero in
// service mode (so the post-swap restart fails and rollbackAndWrap
// fires). Same trick the bash version uses.
func buildSmokeReleaseTarball(version string) error {
outDir := filepath.Join(releaseRelDir, version)
stage := filepath.Join(outDir, ".stage")
if err := os.MkdirAll(stage, 0o755); err != nil {
return fmt.Errorf("mkdir stage: %w", err)
}
ldflags := "-X banger/internal/buildinfo.Version=" + version +
" -X banger/internal/buildinfo.Commit=smoke" +
" -X banger/internal/buildinfo.BuiltAt=2026-04-30T00:00:00Z"
root, err := repoRoot()
if err != nil {
return err
}
build := func(target, output string, extraEnv ...string) error {
cmd := exec.Command("go", "build", "-ldflags", ldflags, "-o", output, target)
cmd.Dir = root
if len(extraEnv) > 0 {
cmd.Env = append(os.Environ(), extraEnv...)
}
if out, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("build %s@%s: %w\n%s", target, version, err, out)
}
return nil
}
if err := build("./cmd/banger", filepath.Join(stage, "banger")); err != nil {
return err
}
if strings.HasPrefix(version, "v0.smoke.broken-") {
const brokenScript = `#!/bin/sh
case "$*" in
*--check-migrations*)
printf 'compatible: smoke broken-bangerd pretends to be ready\n'
exit 0
;;
*)
printf 'smoke broken-bangerd: refusing to run as daemon\n' >&2
exit 1
;;
esac
`
if err := os.WriteFile(filepath.Join(stage, "bangerd"), []byte(brokenScript), 0o755); err != nil {
return fmt.Errorf("write broken bangerd: %w", err)
}
} else {
if err := build("./cmd/bangerd", filepath.Join(stage, "bangerd")); err != nil {
return err
}
}
if err := build("./cmd/banger-vsock-agent", filepath.Join(stage, "banger-vsock-agent"),
"CGO_ENABLED=0", "GOOS=linux", "GOARCH=amd64"); err != nil {
return err
}
tarballName := fmt.Sprintf("banger-%s-linux-amd64.tar.gz", version)
tarballPath := filepath.Join(outDir, tarballName)
if err := writeTarGz(stage, tarballPath); err != nil {
return fmt.Errorf("tar %s: %w", version, err)
}
body, err := os.ReadFile(tarballPath)
if err != nil {
return fmt.Errorf("read tarball: %w", err)
}
hash := sha256.Sum256(body)
sumsBody := fmt.Sprintf("%x %s\n", hash, tarballName)
if err := os.WriteFile(filepath.Join(outDir, "SHA256SUMS"), []byte(sumsBody), 0o644); err != nil {
return fmt.Errorf("write SHA256SUMS: %w", err)
}
sig, err := signCosignBlob(smokeRelKey, []byte(sumsBody))
if err != nil {
return fmt.Errorf("sign SHA256SUMS for %s: %w", version, err)
}
if err := os.WriteFile(filepath.Join(outDir, "SHA256SUMS.sig"), []byte(sig), 0o644); err != nil {
return fmt.Errorf("write sig: %w", err)
}
return os.RemoveAll(stage)
}
// signCosignBlob produces a cosign-compatible blob signature: ASN.1 DER
// ECDSA over SHA256(body), base64 encoded with no newline. This is the
// exact wire format cosign produces and the Go updater verifies, and
// matches the bash chain `openssl dgst -sha256 -sign | base64 -w0`.
func signCosignBlob(priv *ecdsa.PrivateKey, body []byte) (string, error) {
hash := sha256.Sum256(body)
sig, err := ecdsa.SignASN1(rand.Reader, priv, hash[:])
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(sig), nil
}
// writeTarGz packages every regular file in srcDir at the root of a
// gzip tarball at dst. Mirrors the bash `tar czf` of the staged binary
// trio (banger, bangerd, banger-vsock-agent).
func writeTarGz(srcDir, dst string) error {
out, err := os.Create(dst)
if err != nil {
return err
}
defer out.Close()
gw := gzip.NewWriter(out)
defer gw.Close()
tw := tar.NewWriter(gw)
defer tw.Close()
entries, err := os.ReadDir(srcDir)
if err != nil {
return err
}
for _, e := range entries {
if !e.Type().IsRegular() {
continue
}
path := filepath.Join(srcDir, e.Name())
st, err := os.Stat(path)
if err != nil {
return err
}
hdr := &tar.Header{
Name: e.Name(),
Mode: int64(st.Mode().Perm()),
Size: st.Size(),
ModTime: st.ModTime(),
}
if err := tw.WriteHeader(hdr); err != nil {
return err
}
f, err := os.Open(path)
if err != nil {
return err
}
if _, err := io.Copy(tw, f); err != nil {
f.Close()
return err
}
f.Close()
}
return nil
}
func writeSmokeManifest(path, base string) error {
body := fmt.Sprintf(`{
"schema_version": 1,
"latest_stable": %q,
"releases": [
{
"version": %q,
"tarball_url": "%s/%s/banger-%s-linux-amd64.tar.gz",
"sha256sums_url": "%s/%s/SHA256SUMS",
"sha256sums_sig_url": "%s/%s/SHA256SUMS.sig",
"released_at": "2026-04-29T00:00:00Z"
},
{
"version": %q,
"tarball_url": "%s/%s/banger-%s-linux-amd64.tar.gz",
"sha256sums_url": "%s/%s/SHA256SUMS",
"sha256sums_sig_url": "%s/%s/SHA256SUMS.sig",
"released_at": "2026-04-30T00:00:00Z"
}
]
}
`,
smokeReleaseGood,
smokeReleaseGood,
base, smokeReleaseGood, smokeReleaseGood,
base, smokeReleaseGood,
base, smokeReleaseGood,
smokeReleaseBroken,
base, smokeReleaseBroken, smokeReleaseBroken,
base, smokeReleaseBroken,
base, smokeReleaseBroken,
)
return os.WriteFile(path, []byte(body), 0o644)
}
// repoRoot resolves the repo root (where go.mod lives) from the test
// binary's cwd. `go test` runs each package's tests from that package's
// source dir, so internal/smoketest -> ../.. lands at the root.
func repoRoot() (string, error) {
cwd, err := os.Getwd()
if err != nil {
return "", err
}
return filepath.Abs(filepath.Join(cwd, "..", ".."))
}

View file

@ -0,0 +1,368 @@
//go:build smoke
package smoketest
import (
"os/exec"
"regexp"
"strings"
"testing"
)
// testInvalidSpec is the Go port of scenario_invalid_spec. Asserts that
// `vm run --rm --vcpu 0 ...` is rejected and that no VM row is leaked
// in the process. Global-class because it asserts on host-wide vm-list
// counts; running concurrently with pure-class VM creation would race.
func testInvalidSpec(t *testing.T) {
preCount := vmListAllCount(t)
res := banger(t, "vm", "run", "--rm", "--vcpu", "0", "--", "echo", "unused")
if res.rc == 0 {
t.Fatalf("invalid spec: vm run unexpectedly succeeded with --vcpu 0\nstdout: %s\nstderr: %s",
res.stdout, res.stderr)
}
postCount := vmListAllCount(t)
if preCount != postCount {
t.Fatalf("invalid spec leaked a VM row: pre=%d, post=%d", preCount, postCount)
}
}
// vmListAllCount returns the line count of `banger vm list --all`.
// Mirrors the bash `vm list --all | wc -l` idiom; the absolute count
// doesn't matter, only that it doesn't change across the rejected
// invocation.
func vmListAllCount(t *testing.T) int {
t.Helper()
out := mustBanger(t, "vm", "list", "--all")
return strings.Count(out, "\n")
}
// testVMPrune ports scenario_vm_prune. `vm prune -f` should remove
// stopped VMs while preserving running ones. Global-class because it
// asserts on host-wide vm-list contents.
func testVMPrune(t *testing.T) {
mustBanger(t, "vm", "create", "--name", "smoke-prune-running")
t.Cleanup(func() { vmDelete("smoke-prune-running") })
mustBanger(t, "vm", "create", "--name", "smoke-prune-stopped")
t.Cleanup(func() { vmDelete("smoke-prune-stopped") })
mustBanger(t, "vm", "stop", "smoke-prune-stopped")
mustBanger(t, "vm", "prune", "-f")
if banger(t, "vm", "show", "smoke-prune-running").rc != 0 {
t.Fatalf("vm prune: running VM was deleted (regression!)")
}
if banger(t, "vm", "show", "smoke-prune-stopped").rc == 0 {
t.Fatalf("vm prune: stopped VM survived prune")
}
}
// guestIPRE captures `"guest_ip": "172.16.0.X"` from `vm show` JSON.
// Used by testNAT to map VMs to their POSTROUTING rule subjects.
var guestIPRE = regexp.MustCompile(`"guest_ip":\s*"([^"]+)"`)
// vmGuestIP returns the guest_ip field from `vm show`. Fatals if
// missing — every running VM has one.
func vmGuestIP(t *testing.T, name string) string {
t.Helper()
show := mustBanger(t, "vm", "show", name)
m := guestIPRE.FindStringSubmatch(show)
if len(m) != 2 {
t.Fatalf("could not read guest_ip from vm show %q:\n%s", name, show)
}
return m[1]
}
// testNAT ports scenario_nat. Verifies that `--nat` installs a per-VM
// MASQUERADE rule, that the rule survives stop/start, and that delete
// cleans it up. The control VM (no --nat) must NOT have a rule.
func testNAT(t *testing.T) {
requireSudoIptables(t)
mustBanger(t, "vm", "create", "--name", "smoke-nat", "--nat")
t.Cleanup(func() { vmDelete("smoke-nat") })
mustBanger(t, "vm", "create", "--name", "smoke-nocnat")
t.Cleanup(func() { vmDelete("smoke-nocnat") })
natIP := vmGuestIP(t, "smoke-nat")
ctlIP := vmGuestIP(t, "smoke-nocnat")
postrouting := iptablesPostrouting(t)
natRule := "-s " + natIP + "/32"
if !strings.Contains(postrouting, natRule) || !strings.Contains(postrouting, "MASQUERADE") {
t.Fatalf("NAT: --nat VM has no POSTROUTING MASQUERADE rule for %s; got:\n%s", natIP, postrouting)
}
if strings.Contains(postrouting, "-s "+ctlIP+"/32") {
t.Fatalf("NAT: control VM unexpectedly has a MASQUERADE rule for %s", ctlIP)
}
mustBanger(t, "vm", "stop", "smoke-nat")
mustBanger(t, "vm", "start", "smoke-nat")
postrouting = iptablesPostrouting(t)
count := strings.Count(postrouting, natRule)
if count != 1 {
t.Fatalf("NAT: MASQUERADE rule count for %s = %d after restart, want 1", natIP, count)
}
mustBanger(t, "vm", "delete", "smoke-nat")
mustBanger(t, "vm", "delete", "smoke-nocnat")
postrouting = iptablesPostrouting(t)
if strings.Contains(postrouting, natRule) {
t.Fatalf("NAT: delete left a MASQUERADE rule behind for %s", natIP)
}
}
func iptablesPostrouting(t *testing.T) string {
t.Helper()
out, err := exec.Command("sudo", "-n", "iptables", "-t", "nat", "-S", "POSTROUTING").Output()
if err != nil {
t.Fatalf("read iptables POSTROUTING: %v", err)
}
return string(out)
}
// testInvalidName ports scenario_invalid_name. A handful of malformed
// names must all be rejected and none of them may leak a VM row.
func testInvalidName(t *testing.T) {
preCount := vmListAllCount(t)
for _, bad := range []string{"MyBox", "my box", "box.vm", "-box"} {
res := banger(t, "vm", "create", "--name", bad, "--no-start")
if res.rc == 0 {
t.Fatalf("invalid name: vm create accepted %q", bad)
}
}
if postCount := vmListAllCount(t); postCount != preCount {
t.Fatalf("invalid name leaked VM row(s): pre=%d, post=%d", preCount, postCount)
}
}
// updateBaseArgs are the manifest/pubkey flags every update scenario
// needs to redirect the updater away from the production R2 bucket
// and at our smoke release server. Built lazily because manifestURL /
// pubkeyFile are populated by prepareSmokeReleases.
func updateBaseArgs() []string {
return []string{"--manifest-url", manifestURL, "--pubkey-file", pubkeyFile}
}
// testUpdateCheck ports scenario_update_check. `update --check` must
// succeed against the smoke release server and announce the available
// version on stdout.
func testUpdateCheck(t *testing.T) {
if err := prepareSmokeReleases(); err != nil {
t.Fatalf("prepare smoke releases: %v", err)
}
args := append([]string{"update", "--check"}, updateBaseArgs()...)
res := banger(t, args...)
if res.rc != 0 {
t.Fatalf("update --check failed: rc=%d\nstdout: %s\nstderr: %s",
res.rc, res.stdout, res.stderr)
}
wantContains(t, res.stdout+res.stderr, "update available: ", "update --check stdout")
}
// testUpdateToUnknown ports scenario_update_to_unknown. Asking for a
// version not in the manifest must fail before any host mutation —
// the installed binary's version stays put.
func testUpdateToUnknown(t *testing.T) {
if err := prepareSmokeReleases(); err != nil {
t.Fatalf("prepare smoke releases: %v", err)
}
preVer := installedVersion(t)
args := append([]string{"update", "--to", "v9.9.9"}, updateBaseArgs()...)
res := banger(t, args...)
if res.rc == 0 {
t.Fatalf("update --to v9.9.9: exit 0 (out: %s%s)", res.stdout, res.stderr)
}
combined := strings.ToLower(res.stdout + res.stderr)
if !strings.Contains(combined, "not found") {
t.Fatalf("update --to v9.9.9: error doesn't say 'not found'; got: %s%s", res.stdout, res.stderr)
}
if postVer := installedVersion(t); preVer != postVer {
t.Fatalf("update --to v9.9.9 mutated the install: %s -> %s", preVer, postVer)
}
}
// testUpdateNoRoot ports scenario_update_no_root. Non-sudo invocation
// of `update --to` must refuse with a root-required error and leave
// the install untouched.
func testUpdateNoRoot(t *testing.T) {
if err := prepareSmokeReleases(); err != nil {
t.Fatalf("prepare smoke releases: %v", err)
}
preVer := installedVersion(t)
args := append([]string{"update", "--to", smokeReleaseGood}, updateBaseArgs()...)
res := banger(t, args...)
if res.rc == 0 {
t.Fatalf("update without sudo: exit 0 (out: %s%s)", res.stdout, res.stderr)
}
combined := strings.ToLower(res.stdout + res.stderr)
if !strings.Contains(combined, "root") {
t.Fatalf("update without sudo: error doesn't mention root; got: %s%s", res.stdout, res.stderr)
}
if postVer := installedVersion(t); preVer != postVer {
t.Fatalf("update without sudo mutated the install: %s -> %s", preVer, postVer)
}
}
// testUpdateDryRun ports scenario_update_dry_run. `--dry-run` fetches
// + verifies the new release but must not swap the binary.
func testUpdateDryRun(t *testing.T) {
requirePasswordlessSudo(t)
if err := prepareSmokeReleases(); err != nil {
t.Fatalf("prepare smoke releases: %v", err)
}
preVer := installedVersion(t)
args := append([]string{"update", "--to", smokeReleaseGood, "--dry-run"}, updateBaseArgs()...)
res := sudoBanger(t, args...)
if res.rc != 0 {
t.Fatalf("update --dry-run failed: %s%s", res.stdout, res.stderr)
}
wantContains(t, res.stdout+res.stderr, "dry-run:", "update --dry-run stdout")
if postVer := installedVersion(t); preVer != postVer {
t.Fatalf("update --dry-run swapped the binary: %s -> %s", preVer, postVer)
}
}
// vmBootID reads /proc/sys/kernel/random/boot_id from the guest. The
// kernel regenerates it on every boot, so an unchanged value across a
// daemon restart proves the firecracker process survived. Used by both
// update scenarios that assert "the VM stays alive".
func vmBootID(t *testing.T, name string) string {
t.Helper()
out, _ := exec.Command(bangerBin, "vm", "ssh", name, "--", "cat", "/proc/sys/kernel/random/boot_id").Output()
return strings.TrimSpace(string(out))
}
var installTomlVersionRE = regexp.MustCompile(`(?m)^version\s*=\s*"([^"]+)"`)
// installedTomlVersion reads /etc/banger/install.toml's version field
// (under sudo since the dir is not always world-readable).
func installedTomlVersion(t *testing.T) string {
t.Helper()
out, err := exec.Command("sudo", "cat", "/etc/banger/install.toml").Output()
if err != nil {
t.Fatalf("read /etc/banger/install.toml: %v", err)
}
m := installTomlVersionRE.FindStringSubmatch(string(out))
if len(m) != 2 {
t.Fatalf("install.toml: no version field in:\n%s", out)
}
return m[1]
}
// testUpdateKeepsVMAlive ports scenario_update_keeps_vm_alive. The
// long-running update scenario: a real swap to v0.smoke.0, must not
// reboot the running VM, must update the install metadata, and the VM
// must still answer SSH afterwards.
func testUpdateKeepsVMAlive(t *testing.T) {
requirePasswordlessSudo(t)
if err := prepareSmokeReleases(); err != nil {
t.Fatalf("prepare smoke releases: %v", err)
}
const name = "smoke-update"
vmCreate(t, name)
waitForSSH(t, name)
preBoot := vmBootID(t, name)
if preBoot == "" {
t.Fatalf("pre-update boot_id capture failed")
}
preVer := installedVersion(t)
args := append([]string{"update", "--to", smokeReleaseGood}, updateBaseArgs()...)
if res := sudoBanger(t, args...); res.rc != 0 {
t.Fatalf("update --to %s failed: %s%s", smokeReleaseGood, res.stdout, res.stderr)
}
postVer := installedVersion(t)
if postVer != smokeReleaseGood {
t.Fatalf("post-update /usr/local/bin/banger version = %s, want %s", postVer, smokeReleaseGood)
}
if preVer == postVer {
t.Fatalf("update did not change the binary version (pre==post=%s)", postVer)
}
if metaVer := installedTomlVersion(t); metaVer != smokeReleaseGood {
t.Fatalf("install.toml version = %q, want %s", metaVer, smokeReleaseGood)
}
waitForSSH(t, name)
postBoot := vmBootID(t, name)
if postBoot == "" {
t.Fatalf("post-update boot_id read failed")
}
if preBoot != postBoot {
t.Fatalf("VM rebooted during update: boot_id %s -> %s", preBoot, postBoot)
}
}
// testUpdateRollbackKeepsVMAlive ports scenario_update_rollback_keeps_vm_alive.
// Rollback drill: install the broken-bangerd release, which passes the
// pre-swap migration sanity but fails as a service. runUpdate's
// rollbackAndWrap must restore the previous binaries, and the VM must
// survive the whole drill.
func testUpdateRollbackKeepsVMAlive(t *testing.T) {
requirePasswordlessSudo(t)
if err := prepareSmokeReleases(); err != nil {
t.Fatalf("prepare smoke releases: %v", err)
}
preVer := installedVersion(t)
const name = "smoke-rollback"
vmCreate(t, name)
waitForSSH(t, name)
preBoot := vmBootID(t, name)
if preBoot == "" {
t.Fatalf("pre-drill boot_id capture failed")
}
args := append([]string{"update", "--to", smokeReleaseBroken}, updateBaseArgs()...)
res := sudoBanger(t, args...)
if res.rc == 0 {
t.Fatalf("rollback drill: update returned exit 0 despite broken bangerd\nstdout: %s\nstderr: %s",
res.stdout, res.stderr)
}
if postVer := installedVersion(t); postVer != preVer {
t.Fatalf("rollback drill: post-rollback version = %s, want %s", postVer, preVer)
}
waitForSSH(t, name)
postBoot := vmBootID(t, name)
if postBoot == "" {
t.Fatalf("post-rollback boot_id read failed")
}
if preBoot != postBoot {
t.Fatalf("VM rebooted during rollback drill: boot_id %s -> %s", preBoot, postBoot)
}
}
// testDaemonAdmin ports scenario_daemon_admin. MUST be the last global
// scenario in the run order: `banger daemon stop` tears the installed
// services down, so anything after it that talks to the daemon would
// fail. The teardown path re-stops idempotently.
func testDaemonAdmin(t *testing.T) {
socket := strings.TrimSpace(mustBanger(t, "daemon", "socket"))
if socket != "/run/banger/bangerd.sock" {
t.Fatalf("daemon socket: got %q, want /run/banger/bangerd.sock", socket)
}
migOut, err := exec.Command(bangerdBin, "--system", "--check-migrations").CombinedOutput()
if err != nil {
t.Fatalf("bangerd --check-migrations: %v\n%s", err, migOut)
}
if !strings.HasPrefix(strings.TrimSpace(string(migOut)), "compatible:") {
t.Fatalf("bangerd --check-migrations: stdout missing 'compatible:' prefix; got: %s", migOut)
}
requirePasswordlessSudo(t)
if res := sudoBanger(t, "daemon", "stop"); res.rc != 0 {
t.Fatalf("banger daemon stop: %s%s", res.stdout, res.stderr)
}
status, _ := exec.Command(bangerBin, "system", "status").Output()
if !regexp.MustCompile(`(?m)^active\s+inactive`).Match(status) {
t.Fatalf("owner daemon still active after daemon stop:\n%s", status)
}
if !regexp.MustCompile(`(?m)^helper_active\s+inactive`).Match(status) {
t.Fatalf("root helper still active after daemon stop:\n%s", status)
}
}

View file

@ -0,0 +1,311 @@
//go:build smoke
package smoketest
import (
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"sync"
"testing"
)
// testBareRun is the Go port of scenario_bare_run from
// scripts/smoke.sh. Bare ephemeral VM run: create + start + ssh +
// echo + --rm.
func testBareRun(t *testing.T) {
t.Parallel()
out := mustBanger(t, "vm", "run", "--rm", "--", "echo", "smoke-bare-ok")
wantContains(t, out, "smoke-bare-ok", "bare vm run stdout")
}
// testExitCode is the Go port of scenario_exit_code. Asserts that
// `vm run -- sh -c 'exit 42'` propagates rc=42 verbatim.
func testExitCode(t *testing.T) {
t.Parallel()
res := banger(t, "vm", "run", "--rm", "--", "sh", "-c", "exit 42")
wantExit(t, res, 42, "exit-code propagation")
}
// testConcurrentRun fires two `vm run --rm` invocations simultaneously
// and asserts both succeed and emit their respective markers. Bash uses
// `& ; wait`; Go uses two goroutines that capture the result and a
// WaitGroup. Note: t.Fatalf cannot be called from a goroutine, so the
// children write to result slots and assertions run on the main goroutine.
func testConcurrentRun(t *testing.T) {
t.Parallel()
var wg sync.WaitGroup
var resA, resB result
run := func(dst *result, marker string) {
defer wg.Done()
cmd := exec.Command(bangerBin, "vm", "run", "--rm", "--", "echo", marker)
var out, errBuf strings.Builder
cmd.Stdout = &out
cmd.Stderr = &errBuf
err := cmd.Run()
dst.stdout = out.String()
dst.stderr = errBuf.String()
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
dst.rc = exitErr.ExitCode()
} else {
dst.rc = -1
dst.stderr += "\nexec error: " + err.Error()
}
}
}
wg.Add(2)
go run(&resA, "smoke-concurrent-a")
go run(&resB, "smoke-concurrent-b")
wg.Wait()
wantExit(t, resA, 0, "concurrent A exit")
wantExit(t, resB, 0, "concurrent B exit")
wantContains(t, resA.stdout, "smoke-concurrent-a", "concurrent A stdout")
wantContains(t, resB.stdout, "smoke-concurrent-b", "concurrent B stdout")
}
// testDetachRun ports scenario_detach_run. Verifies -d combined with
// --rm or with a guest command is rejected before VM creation, then
// that -d --name leaves the VM running and ssh-able.
func testDetachRun(t *testing.T) {
t.Parallel()
res := banger(t, "vm", "run", "-d", "--rm")
if res.rc == 0 {
t.Fatalf("detach: -d --rm should be rejected before VM creation")
}
res = banger(t, "vm", "run", "-d", "--", "echo", "hi")
if res.rc == 0 {
t.Fatalf("detach: -d -- <cmd> should be rejected before VM creation")
}
const name = "smoke-detach"
mustBanger(t, "vm", "run", "-d", "--name", name)
t.Cleanup(func() { vmDelete(name) })
show := mustBanger(t, "vm", "show", name)
wantContains(t, show, `"state": "running"`, "detach: post-detach state")
out := mustBanger(t, "vm", "ssh", name, "--", "echo", "detach-marker")
wantContains(t, out, "detach-marker", "detach: ssh stdout")
}
// testBootstrapPrecondition ports scenario_bootstrap_precondition.
// A workspace with .mise.toml requires NAT (or --no-bootstrap) to run.
// The fake repo lives in a TempDir so it doesn't pollute the shared
// repodir fixture used by repodir-class scenarios.
func testBootstrapPrecondition(t *testing.T) {
t.Parallel()
miseRepo := t.TempDir()
gitInit := func(args ...string) {
t.Helper()
cmd := exec.Command(args[0], args[1:]...)
cmd.Dir = miseRepo
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("setup mise repo: %s: %v\n%s", args, err, out)
}
}
gitInit("git", "init", "-q")
gitInit("git", "-c", "user.email=smoke@banger", "-c", "user.name=smoke",
"commit", "--allow-empty", "-q", "-m", "init")
if err := os.WriteFile(filepath.Join(miseRepo, ".mise.toml"), []byte("[tools]\n"), 0o644); err != nil {
t.Fatalf("write .mise.toml: %v", err)
}
gitInit("git", "add", ".mise.toml")
gitInit("git", "-c", "user.email=smoke@banger", "-c", "user.name=smoke",
"commit", "-q", "-m", "add mise")
res := banger(t, "vm", "run", "--rm", miseRepo, "--", "echo", "nope")
if res.rc == 0 {
t.Fatalf("bootstrap: workspace with .mise.toml should refuse without --nat / --no-bootstrap")
}
out := mustBanger(t, "vm", "run", "--rm", "--no-bootstrap", miseRepo, "--", "echo", "no-bootstrap-ok")
wantContains(t, out, "no-bootstrap-ok", "bootstrap: --no-bootstrap stdout")
}
// testVMLifecycle ports scenario_vm_lifecycle. Drives an explicit
// create / show / ssh / stop / start / ssh / delete and asserts the
// state transitions are visible in `vm show`.
func testVMLifecycle(t *testing.T) {
t.Parallel()
const name = "smoke-lifecycle"
vmCreate(t, name)
show := mustBanger(t, "vm", "show", name)
wantContains(t, show, `"state": "running"`, "post-create state")
waitForSSH(t, name)
out := mustBanger(t, "vm", "ssh", name, "--", "echo", "hello-1")
wantContains(t, out, "hello-1", "vm ssh #1")
mustBanger(t, "vm", "stop", name)
show = mustBanger(t, "vm", "show", name)
wantContains(t, show, `"state": "stopped"`, "post-stop state")
mustBanger(t, "vm", "start", name)
show = mustBanger(t, "vm", "show", name)
wantContains(t, show, `"state": "running"`, "post-start state")
waitForSSH(t, name)
out = mustBanger(t, "vm", "ssh", name, "--", "echo", "hello-2")
wantContains(t, out, "hello-2", "vm ssh #2 (post-restart)")
mustBanger(t, "vm", "delete", name)
res := banger(t, "vm", "show", name)
if res.rc == 0 {
t.Fatalf("vm show still finds %q after delete\nstdout: %s", name, res.stdout)
}
}
// testVMSet ports scenario_vm_set. Creates with --vcpu 2, asserts
// guest sees 2 CPUs, reconfigures to 4 while stopped, asserts guest
// sees 4 after restart.
func testVMSet(t *testing.T) {
t.Parallel()
const name = "smoke-set"
vmCreate(t, name, "--vcpu", "2")
waitForSSH(t, name)
out := mustBanger(t, "vm", "ssh", name, "--", "nproc")
if got := strings.TrimSpace(out); got != "2" {
t.Fatalf("vm set: initial nproc got %q, want 2", got)
}
mustBanger(t, "vm", "stop", name)
mustBanger(t, "vm", "set", name, "--vcpu", "4")
mustBanger(t, "vm", "start", name)
waitForSSH(t, name)
out = mustBanger(t, "vm", "ssh", name, "--", "nproc")
if got := strings.TrimSpace(out); got != "4" {
t.Fatalf("vm set: post-reconfig nproc got %q, want 4 (spec change didn't land)", got)
}
}
// testVMRestart ports scenario_vm_restart. Reads /proc boot_id before
// and after `vm restart`; the kernel regenerates it on every boot, so
// distinct values prove the verb actually rebooted the guest.
func testVMRestart(t *testing.T) {
t.Parallel()
const name = "smoke-restart"
vmCreate(t, name)
waitForSSH(t, name)
bootBefore := strings.TrimSpace(mustBanger(t, "vm", "ssh", name, "--", "cat", "/proc/sys/kernel/random/boot_id"))
if bootBefore == "" {
t.Fatalf("vm restart: could not read initial boot_id")
}
mustBanger(t, "vm", "restart", name)
waitForSSH(t, name)
bootAfter := strings.TrimSpace(mustBanger(t, "vm", "ssh", name, "--", "cat", "/proc/sys/kernel/random/boot_id"))
if bootAfter == "" {
t.Fatalf("vm restart: could not read post-restart boot_id")
}
if bootBefore == bootAfter {
t.Fatalf("vm restart: boot_id unchanged (%s); verb didn't actually reboot the guest", bootBefore)
}
}
// dmDevRE captures the dm-snapshot device name from `vm show` JSON.
// Used by testVMKill to check that `vm kill --signal KILL` cleans up
// the dm device alongside the firecracker process.
var dmDevRE = regexp.MustCompile(`"dm_dev":\s*"(fc-rootfs-[^"]+)"`)
// testVMKill ports scenario_vm_kill. `vm kill --signal KILL` must stop
// the VM and clean up its dm-snapshot device. The dm-name capture
// degrades gracefully — older builds without the field still pass the
// state-check half.
func testVMKill(t *testing.T) {
t.Parallel()
const name = "smoke-kill"
vmCreate(t, name)
show := mustBanger(t, "vm", "show", name)
var dmName string
if m := dmDevRE.FindStringSubmatch(show); len(m) == 2 {
dmName = m[1]
}
mustBanger(t, "vm", "kill", "--signal", "KILL", name)
show = mustBanger(t, "vm", "show", name)
wantContains(t, show, `"state": "stopped"`, "post-kill state")
if dmName != "" {
out, _ := exec.Command("sudo", "-n", "dmsetup", "ls").CombinedOutput()
for _, line := range strings.Split(string(out), "\n") {
fields := strings.Fields(line)
if len(fields) > 0 && fields[0] == dmName {
t.Fatalf("vm kill: dm device %q still mapped (cleanup didn't run)", dmName)
}
}
}
}
// testVMPorts ports scenario_vm_ports. Asserts `vm ports` reports the
// guest's sshd listener under the VM's DNS name.
func testVMPorts(t *testing.T) {
t.Parallel()
const name = "smoke-ports"
vmCreate(t, name)
waitForSSH(t, name)
out := mustBanger(t, "vm", "ports", name)
wantContains(t, out, "smoke-ports.vm:22", "vm ports stdout (host:port)")
wantContains(t, out, "sshd", "vm ports stdout (process name)")
}
// testSSHConfig ports scenario_ssh_config. Drives ssh-config
// install/uninstall against a fake $HOME so the contributor's real
// ~/.ssh/config is never touched. Verifies idempotent install,
// preservation of pre-existing user content, and clean uninstall.
func testSSHConfig(t *testing.T) {
t.Parallel()
fakeHome := t.TempDir()
if err := os.MkdirAll(filepath.Join(fakeHome, ".ssh"), 0o700); err != nil {
t.Fatalf("mkdir .ssh: %v", err)
}
cfg := filepath.Join(fakeHome, ".ssh", "config")
if err := os.WriteFile(cfg, []byte("Host myserver\n HostName example.invalid\n"), 0o600); err != nil {
t.Fatalf("write fake config: %v", err)
}
mustBangerHome(t, fakeHome, "ssh-config", "--install")
cfgBytes, err := os.ReadFile(cfg)
if err != nil {
t.Fatalf("read fake config after install: %v", err)
}
body := string(cfgBytes)
if !strings.Contains(body, "\nInclude ") && !strings.HasPrefix(body, "Include ") {
t.Fatalf("ssh-config: install didn't add Include line:\n%s", body)
}
wantContains(t, body, "Host myserver", "ssh-config: install must preserve user content")
mustBangerHome(t, fakeHome, "ssh-config", "--install")
cfgBytes, _ = os.ReadFile(cfg)
body = string(cfgBytes)
includeCount := 0
for _, line := range strings.Split(body, "\n") {
if strings.HasPrefix(line, "Include ") && strings.Contains(line, "banger") {
includeCount++
}
}
if includeCount != 1 {
t.Fatalf("ssh-config: install not idempotent (Include appeared %d times)", includeCount)
}
mustBangerHome(t, fakeHome, "ssh-config", "--uninstall")
cfgBytes, _ = os.ReadFile(cfg)
body = string(cfgBytes)
for _, line := range strings.Split(body, "\n") {
if strings.HasPrefix(line, "Include ") && strings.Contains(line, "banger") {
t.Fatalf("ssh-config: uninstall left the Include line behind:\n%s", body)
}
}
wantContains(t, body, "Host myserver", "ssh-config: uninstall must keep user content")
}

View file

@ -0,0 +1,205 @@
//go:build smoke
package smoketest
import (
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
)
// testWorkspaceRun ports scenario_workspace_run. Ships the throwaway
// git repo to a fresh VM and reads the marker file from the guest.
func testWorkspaceRun(t *testing.T) {
out := mustBanger(t, "vm", "run", "--rm", repoDir, "--", "cat", "/root/repo/smoke-file.txt")
wantContains(t, out, "smoke-workspace-marker", "workspace vm run guest read")
}
// testWorkspaceDryrun ports scenario_workspace_dryrun. `--dry-run`
// lists the tracked files and the resolved transfer mode without
// creating a VM.
func testWorkspaceDryrun(t *testing.T) {
out := mustBanger(t, "vm", "run", "--dry-run", repoDir)
wantContains(t, out, "smoke-file.txt", "dry-run file list")
wantContains(t, out, "mode: tracked only", "dry-run mode line")
}
// testIncludeUntracked ports scenario_include_untracked. Drops an
// untracked file in the fixture and asserts --include-untracked picks
// it up. The cleanup hook removes the file even if the scenario fails
// so downstream repodir scenarios see the original tree.
func testIncludeUntracked(t *testing.T) {
untracked := filepath.Join(repoDir, "smoke-untracked.txt")
if err := os.WriteFile(untracked, []byte("untracked-marker\n"), 0o644); err != nil {
t.Fatalf("write untracked file: %v", err)
}
t.Cleanup(func() { _ = os.Remove(untracked) })
out := mustBanger(t, "vm", "run", "--rm", "--include-untracked", repoDir,
"--", "cat", "/root/repo/smoke-untracked.txt")
wantContains(t, out, "untracked-marker", "include-untracked guest read")
}
// testWorkspaceExport ports scenario_workspace_export. Round-trips a
// guest-side edit back out as a patch via `vm workspace export`.
func testWorkspaceExport(t *testing.T) {
const name = "smoke-export"
vmCreate(t, name, "--image", "debian-bookworm")
mustBanger(t, "vm", "workspace", "prepare", name, repoDir)
mustBanger(t, "vm", "ssh", name, "--", "sh", "-c",
"echo guest-edit > /root/repo/new-guest-file.txt")
patch := filepath.Join(runtimeDir, "smoke-export.diff")
mustBanger(t, "vm", "workspace", "export", name, "--output", patch)
st, err := os.Stat(patch)
if err != nil {
t.Fatalf("export: stat patch %s: %v", patch, err)
}
if st.Size() == 0 {
t.Fatalf("export: patch file empty at %s", patch)
}
body, err := os.ReadFile(patch)
if err != nil {
t.Fatalf("export: read patch: %v", err)
}
wantContains(t, string(body), "new-guest-file.txt", "export: patch must reference new-guest-file.txt")
}
// testWorkspaceFullCopy ports scenario_workspace_full_copy. Verifies
// the alternate transfer path (--mode full_copy) lands the same fixture
// in the guest.
func testWorkspaceFullCopy(t *testing.T) {
const name = "smoke-fc"
vmCreate(t, name)
mustBanger(t, "vm", "workspace", "prepare", name, repoDir, "--mode", "full_copy")
out := mustBanger(t, "vm", "ssh", name, "--", "cat", "/root/repo/smoke-file.txt")
wantContains(t, out, "smoke-workspace-marker", "full_copy: marker missing in guest")
}
// testWorkspaceBasecommit ports scenario_workspace_basecommit. Confirms
// that `vm workspace export` without --base-commit captures only the
// working-copy diff, while --base-commit also captures guest-side
// commits made on top of HEAD.
func testWorkspaceBasecommit(t *testing.T) {
const name = "smoke-basecommit"
vmCreate(t, name)
mustBanger(t, "vm", "workspace", "prepare", name, repoDir)
baseSHA := strings.TrimSpace(mustBanger(t, "vm", "ssh", name, "--",
"sh", "-c", "cd /root/repo && git rev-parse HEAD"))
if len(baseSHA) != 40 {
t.Fatalf("export base: bad base sha: %q", baseSHA)
}
mustBanger(t, "vm", "ssh", name, "--", "sh", "-c",
"cd /root/repo && "+
"git -c user.email=smoke@smoke -c user.name=smoke checkout -b smoke-branch >/dev/null 2>&1 && "+
"echo committed-marker > smoke-committed.txt && "+
"git add smoke-committed.txt && "+
"git -c user.email=smoke@smoke -c user.name=smoke commit -q -m 'guest side'")
plain := filepath.Join(runtimeDir, "smoke-plain.diff")
mustBanger(t, "vm", "workspace", "export", name, "--output", plain)
if body, err := os.ReadFile(plain); err == nil {
wantNotContains(t, string(body), "smoke-committed.txt",
"export base: plain export must NOT capture guest-side commit")
}
base := filepath.Join(runtimeDir, "smoke-base.diff")
mustBanger(t, "vm", "workspace", "export", name, "--base-commit", baseSHA, "--output", base)
st, err := os.Stat(base)
if err != nil || st.Size() == 0 {
t.Fatalf("export base: --base-commit patch empty/missing: stat=%v err=%v", st, err)
}
body, _ := os.ReadFile(base)
wantContains(t, string(body), "smoke-committed.txt",
"export base: --base-commit patch must include committed marker")
}
// testWorkspaceRestart ports scenario_workspace_restart. Verifies the
// workspace marker survives a stop/start cycle (rootfs persistence).
func testWorkspaceRestart(t *testing.T) {
const name = "smoke-wsrestart"
vmCreate(t, name)
mustBanger(t, "vm", "workspace", "prepare", name, repoDir)
pre := mustBanger(t, "vm", "ssh", name, "--", "cat", "/root/repo/smoke-file.txt")
wantContains(t, pre, "smoke-workspace-marker", "workspace stop/start: pre-cycle marker")
mustBanger(t, "vm", "stop", name)
mustBanger(t, "vm", "start", name)
waitForSSH(t, name)
post := mustBanger(t, "vm", "ssh", name, "--", "cat", "/root/repo/smoke-file.txt")
wantContains(t, post, "smoke-workspace-marker", "workspace stop/start: post-cycle marker")
}
// testVMExec ports scenario_vm_exec. The longest scenario in the suite
// — covers auto-cd, exit-code propagation, stale-workspace detection,
// --auto-prepare resync, and the not-running refusal. The repodir
// commit added mid-scenario is rolled back via t.Cleanup so subsequent
// repodir-chain scenarios see the original fixture state.
func testVMExec(t *testing.T) {
const name = "smoke-exec"
vmCreate(t, name)
mustBanger(t, "vm", "workspace", "prepare", name, repoDir)
show := mustBanger(t, "vm", "show", name)
wantContains(t, show, `"guest_path": "/root/repo"`,
"vm exec: workspace.guest_path not persisted")
out := mustBanger(t, "vm", "exec", name, "--", "cat", "smoke-file.txt")
wantContains(t, out, "smoke-workspace-marker", "vm exec: workspace marker")
if got := strings.TrimSpace(mustBanger(t, "vm", "exec", name, "--", "pwd")); got != "/root/repo" {
t.Fatalf("vm exec: pwd got %q, want /root/repo (auto-cd didn't happen)", got)
}
res := banger(t, "vm", "exec", name, "--", "sh", "-c", "exit 17")
wantExit(t, res, 17, "vm exec: exit-code propagation")
// Advance host HEAD so the workspace goes stale, register the
// rollback before mutating so a Fatal anywhere below still
// restores the fixture.
t.Cleanup(func() {
cmd := exec.Command("git", "reset", "--hard", "HEAD~1", "-q")
cmd.Dir = repoDir
_ = cmd.Run()
})
for _, args := range [][]string{
{"sh", "-c", "echo post-prepare-marker > smoke-exec-new.txt"},
{"git", "add", "smoke-exec-new.txt"},
{"git", "commit", "-q", "-m", "add smoke-exec-new.txt after prepare"},
} {
cmd := exec.Command(args[0], args[1:]...)
cmd.Dir = repoDir
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("vm exec: stage host commit: %s: %v\n%s", args, err, out)
}
}
stale := banger(t, "vm", "exec", name, "--", "ls", "smoke-exec-new.txt")
if stale.rc == 0 {
t.Fatalf("vm exec: stale workspace already had the new file (dirty path didn't take effect)")
}
wantContains(t, stale.stderr, "workspace stale", "vm exec: stale-workspace warning on stderr")
wantContains(t, stale.stderr, "--auto-prepare", "vm exec: stale warning must mention --auto-prepare")
auto := mustBanger(t, "vm", "exec", name, "--auto-prepare", "--", "cat", "smoke-exec-new.txt")
wantContains(t, auto, "post-prepare-marker", "vm exec: --auto-prepare didn't re-sync new file")
clean := banger(t, "vm", "exec", name, "--", "true")
wantExit(t, clean, 0, "vm exec: post-auto-prepare run")
wantNotContains(t, clean.stderr, "workspace stale", "vm exec: stale warning persisted after --auto-prepare")
mustBanger(t, "vm", "stop", name)
stopped := banger(t, "vm", "exec", name, "--", "true")
if stopped.rc == 0 {
t.Fatalf("vm exec: exec on stopped VM unexpectedly succeeded")
}
wantContains(t, stopped.stderr, "not running", "vm exec: stopped-VM error message")
}

View file

@ -0,0 +1,305 @@
//go:build smoke
package smoketest
import (
"errors"
"fmt"
"io"
"os"
"os/exec"
"os/user"
"path/filepath"
"regexp"
"strings"
"testing"
)
// Package-level state set up in TestMain and consumed by every test.
// Lowercase, file-scope; tests in this package don't share globals
// with other packages because of the build tag.
var (
bangerBin string
bangerdBin string
vsockBin string
coverDir string
scratchRoot string
runtimeDir string
repoDir string
smokeOwner string
)
const (
serviceCoverDir = "/var/lib/banger"
smokeMarker = "/etc/banger/.smoke-owned"
ownerService = "bangerd.service"
rootService = "bangerd-root.service"
)
// smokeConfigTOML is the smoke-tuned daemon config dropped at
// /etc/banger/config.toml after install (mirrors scripts/smoke.sh:404-415).
// Small VMs by default — scenarios that need full-size resources override
// --vcpu / --memory / --disk-size explicitly.
const smokeConfigTOML = `# Smoke-tuned defaults every VM starts small unless the scenario
# overrides --vcpu / --memory / --disk-size explicitly.
[vm_defaults]
vcpu = 2
memory_mib = 1024
disk_size = "2G"
system_overlay_size = "2G"
`
func TestMain(m *testing.M) {
// `go test -list ...` (used by `make smoke-list`) just enumerates
// the test names. Skip the install preamble and let m.Run() print
// the listing — env vars + KVM aren't needed for discovery.
if isListMode() {
os.Exit(m.Run())
}
if err := requireEnv(); err != nil {
fmt.Fprintf(os.Stderr, "[smoke] %v\n", err)
// Skip cleanly when run outside `make smoke`. Returning 0
// prevents `go test` from being mistaken for a real failure
// when a contributor accidentally runs the smoke package
// directly without the harness env.
os.Exit(0)
}
// Export GOCOVERDIR so every banger / bangerd subprocess this
// test binary spawns lands its covdata under BANGER_SMOKE_COVER_DIR.
// The test binary itself is not instrumented; only the smoke
// binaries are (they were built with `go build -cover`).
if err := os.Setenv("GOCOVERDIR", coverDir); err != nil {
fmt.Fprintf(os.Stderr, "[smoke] setenv GOCOVERDIR: %v\n", err)
os.Exit(1)
}
if err := installPreamble(); err != nil {
fmt.Fprintf(os.Stderr, "[smoke] install preamble failed: %v\n", err)
os.Exit(1)
}
if err := setupRepoFixture(); err != nil {
fmt.Fprintf(os.Stderr, "[smoke] fixture setup failed: %v\n", err)
teardown()
os.Exit(1)
}
code := m.Run()
teardown()
os.Exit(code)
}
// isListMode returns true when the test binary was invoked with the
// `-test.list` flag, which `go test -list ...` translates into. In that
// mode the harness only enumerates names and never spawns a test, so
// requireEnv / installPreamble would needlessly block discovery on a
// fresh checkout (no KVM, no sudo).
func isListMode() bool {
for _, a := range os.Args[1:] {
if a == "-test.list" || strings.HasPrefix(a, "-test.list=") {
return true
}
}
return false
}
// requireEnv reads and validates the three BANGER_SMOKE_* env vars and
// confirms the binaries they point at exist and are executable. Returns
// a single descriptive error so a contributor running by hand sees
// exactly which variable is missing.
func requireEnv() error {
binDir := os.Getenv("BANGER_SMOKE_BIN_DIR")
if binDir == "" {
return errors.New("BANGER_SMOKE_BIN_DIR not set; run via `make smoke`")
}
cov := os.Getenv("BANGER_SMOKE_COVER_DIR")
if cov == "" {
return errors.New("BANGER_SMOKE_COVER_DIR not set; run via `make smoke`")
}
xdg := os.Getenv("BANGER_SMOKE_XDG_DIR")
if xdg == "" {
return errors.New("BANGER_SMOKE_XDG_DIR not set; run via `make smoke`")
}
bangerBin = filepath.Join(binDir, "banger")
bangerdBin = filepath.Join(binDir, "bangerd")
vsockBin = filepath.Join(binDir, "banger-vsock-agent")
coverDir = cov
scratchRoot = xdg
for _, bin := range []string{bangerBin, bangerdBin, vsockBin} {
st, err := os.Stat(bin)
if err != nil {
return fmt.Errorf("smoke binary missing: %s: %w", bin, err)
}
if st.Mode()&0o111 == 0 {
return fmt.Errorf("smoke binary not executable: %s", bin)
}
}
if err := os.MkdirAll(coverDir, 0o755); err != nil {
return fmt.Errorf("mkdir cover dir: %w", err)
}
// Reset the scratch root each run — leftover state from a prior
// crashed run would otherwise leak into this one's fixtures.
if err := os.RemoveAll(scratchRoot); err != nil {
return fmt.Errorf("clean scratch root: %w", err)
}
if err := os.MkdirAll(scratchRoot, 0o755); err != nil {
return fmt.Errorf("mkdir scratch root: %w", err)
}
rt, err := os.MkdirTemp(scratchRoot, "runtime-")
if err != nil {
return fmt.Errorf("mktemp runtime: %w", err)
}
runtimeDir = rt
u, err := user.Current()
if err != nil {
return fmt.Errorf("user.Current: %w", err)
}
smokeOwner = u.Username
return nil
}
// installPreamble mirrors scripts/smoke.sh's install_preamble. Refuses to
// overwrite a non-smoke install, otherwise installs the instrumented
// services, runs doctor, drops the smoke-tuned config, and restarts.
func installPreamble() error {
if installExists() {
if markerExists() {
fmt.Fprintln(os.Stderr, "[smoke] found stale smoke-owned install; purging it first")
_ = exec.Command("sudo", "env", "GOCOVERDIR="+coverDir, bangerBin,
"system", "uninstall", "--purge").Run()
} else {
return errors.New("banger is already installed on this host; supported-path smoke refuses to overwrite a non-smoke install")
}
}
// Wipe the user-side known_hosts. Fresh VMs reuse guest IPs with
// new host keys every run; a stale entry trips StrictHostKeyChecking.
// scripts/smoke.sh:374-380 explains why this is host-side, not
// daemon-side state.
if home, err := os.UserHomeDir(); err == nil {
_ = os.Remove(filepath.Join(home, ".local", "state", "banger", "ssh", "known_hosts"))
}
fmt.Fprintln(os.Stderr, "[smoke] installing smoke-owned services")
install := exec.Command("sudo", "env",
"GOCOVERDIR="+coverDir,
"BANGER_SYSTEM_GOCOVERDIR="+serviceCoverDir,
"BANGER_ROOT_HELPER_GOCOVERDIR="+serviceCoverDir,
bangerBin, "system", "install", "--owner", smokeOwner,
)
if out, err := install.CombinedOutput(); err != nil {
return fmt.Errorf("system install: %w\n%s", err, out)
}
if out, err := exec.Command("sudo", "touch", smokeMarker).CombinedOutput(); err != nil {
return fmt.Errorf("touch smoke marker: %w\n%s", err, out)
}
if err := assertServicesActive("after install"); err != nil {
return err
}
fmt.Fprintln(os.Stderr, "[smoke] doctor: checking host readiness")
if out, err := exec.Command(bangerBin, "doctor").CombinedOutput(); err != nil {
return fmt.Errorf("doctor reported failures; fix the host before running smoke:\n%s", out)
}
fmt.Fprintln(os.Stderr, "[smoke] writing smoke-tuned daemon config")
if err := writeSmokeConfig(); err != nil {
return err
}
fmt.Fprintln(os.Stderr, "[smoke] system restart: services should come back cleanly")
restart := exec.Command("sudo", "env", "GOCOVERDIR="+coverDir,
bangerBin, "system", "restart")
if out, err := restart.CombinedOutput(); err != nil {
return fmt.Errorf("system restart: %w\n%s", err, out)
}
return assertServicesActive("after restart")
}
// installExists checks /etc/banger/install.toml under sudo (the dir is
// not always world-readable).
func installExists() bool {
return exec.Command("sudo", "test", "-f", "/etc/banger/install.toml").Run() == nil
}
func markerExists() bool {
return exec.Command("sudo", "test", "-f", smokeMarker).Run() == nil
}
var (
statusOwnerRE = regexp.MustCompile(`(?m)^active\s+active\b`)
statusHelperRE = regexp.MustCompile(`(?m)^helper_active\s+active\b`)
)
func assertServicesActive(label string) error {
out, err := exec.Command(bangerBin, "system", "status").CombinedOutput()
if err != nil {
return fmt.Errorf("system status %s: %w\n%s", label, err, out)
}
if !statusOwnerRE.Match(out) {
return fmt.Errorf("owner daemon not active %s:\n%s", label, out)
}
if !statusHelperRE.Match(out) {
return fmt.Errorf("root helper not active %s:\n%s", label, out)
}
return nil
}
// writeSmokeConfig drops smokeConfigTOML at /etc/banger/config.toml via
// `sudo tee`. tee is the path of least resistance for "write to a root-
// owned file from a non-root process".
func writeSmokeConfig() error {
cmd := exec.Command("sudo", "tee", "/etc/banger/config.toml")
cmd.Stdin = strings.NewReader(smokeConfigTOML)
cmd.Stdout = io.Discard
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("write smoke config: %w", err)
}
return nil
}
// teardown is the equivalent of scripts/smoke.sh's `cleanup` trap. It
// best-efforts every step — partial failures during teardown should
// not mask the test outcome.
func teardown() {
shutdownReleaseServer()
stopServicesForCoverage()
collectServiceCoverage()
_ = exec.Command("sudo", "env", "GOCOVERDIR="+coverDir, bangerBin,
"system", "uninstall", "--purge").Run()
_ = os.RemoveAll(scratchRoot)
}
func stopServicesForCoverage() {
_ = exec.Command("sudo", "systemctl", "stop", ownerService, rootService).Run()
}
// collectServiceCoverage copies covmeta.* / covcounters.* out of
// /var/lib/banger into BANGER_SMOKE_COVER_DIR, chowning to the test
// user so subsequent `go tool covdata` invocations can read them.
// Mirrors the inline `sudo bash -lc '...'` in scripts/smoke.sh:307-325.
func collectServiceCoverage() {
uid := fmt.Sprint(os.Getuid())
gid := fmt.Sprint(os.Getgid())
const script = `
shopt -s nullglob
for file in "$1"/covmeta.* "$1"/covcounters.*; do
base="${file##*/}"
cp "$file" "$2/$base"
chown "$3:$4" "$2/$base"
chmod 0644 "$2/$base"
done
`
_ = exec.Command("sudo", "bash", "-c", script, "bash",
serviceCoverDir, coverDir, uid, gid).Run()
}

View file

@ -0,0 +1,72 @@
//go:build smoke
package smoketest
import "testing"
// TestSmoke is the single top-level test that pins run-order across
// scenario classes:
//
// - "pool" runs pure scenarios concurrently (each calls t.Parallel)
// alongside the repodir chain, which runs its own subtests
// sequentially. The pool subtest only returns once every t.Parallel
// child has finished.
// - "global" runs after pool, serially, in registry order. These
// scenarios assert host-wide state (iptables, vm row counts,
// ssh-config under a fake HOME, the update / rollback flow, daemon
// stop) and would race with the parallel pool.
//
// `go test -parallel N` controls fan-out within the pool. `-run
// TestSmoke/pool/bare_run` runs a single scenario without changing
// the install preamble path.
func TestSmoke(t *testing.T) {
t.Run("pool", func(t *testing.T) {
// Pure scenarios — t.Parallel inside each, fan out under -parallel.
t.Run("bare_run", testBareRun)
t.Run("exit_code", testExitCode)
t.Run("concurrent_run", testConcurrentRun)
t.Run("detach_run", testDetachRun)
t.Run("bootstrap_precondition", testBootstrapPrecondition)
t.Run("vm_lifecycle", testVMLifecycle)
t.Run("vm_set", testVMSet)
t.Run("vm_restart", testVMRestart)
t.Run("vm_kill", testVMKill)
t.Run("vm_ports", testVMPorts)
t.Run("ssh_config", testSSHConfig)
// Repodir chain — single virtual job in the pool. Subtests run
// sequentially because they share the throwaway git repo at
// repoDir and mutate it; t.Parallel() is intentionally absent.
// The chain itself competes with the pure scenarios for a
// parallel slot at this outer level.
t.Run("repodir_chain", func(t *testing.T) {
t.Parallel()
t.Run("workspace_run", testWorkspaceRun)
t.Run("workspace_dryrun", testWorkspaceDryrun)
t.Run("include_untracked", testIncludeUntracked)
t.Run("workspace_export", testWorkspaceExport)
t.Run("workspace_full_copy", testWorkspaceFullCopy)
t.Run("workspace_basecommit", testWorkspaceBasecommit)
t.Run("workspace_restart", testWorkspaceRestart)
t.Run("vm_exec", testVMExec)
})
})
// Global scenarios — serial, after the pool drains. Order matters:
// daemon_admin tears the installed services down and must be LAST.
// The order otherwise mirrors scripts/smoke.sh's SMOKE_SCENARIOS
// registry so the run shape is comparable.
t.Run("global", func(t *testing.T) {
t.Run("vm_prune", testVMPrune)
t.Run("nat", testNAT)
t.Run("invalid_spec", testInvalidSpec)
t.Run("invalid_name", testInvalidName)
t.Run("update_check", testUpdateCheck)
t.Run("update_to_unknown", testUpdateToUnknown)
t.Run("update_no_root", testUpdateNoRoot)
t.Run("update_dry_run", testUpdateDryRun)
t.Run("update_keeps_vm_alive", testUpdateKeepsVMAlive)
t.Run("update_rollback_keeps_vm_alive", testUpdateRollbackKeepsVMAlive)
t.Run("daemon_admin", testDaemonAdmin)
})
}

File diff suppressed because it is too large Load diff