Move avoidable daemon shell-outs into Go

Reduce the control plane's dependency on helper scripts while keeping the hard Linux integration points in the approved shell-out layer.

Replace the bash-driven image build path with a native Go builder that clones and optionally resizes the rootfs, boots a temporary Firecracker VM, provisions the guest over SSH, installs packages and modules, and preserves the package-manifest sidecar.

Also replace a few small convenience shell-outs with Go helpers: read process stats from /proc, use os.Truncate for ext4 image growth, add file-clone and normalized-line helpers, drop the sh -c work-disk flattening path, and launch Firecracker via a direct sudo command.

Add tests for the new SSH/archive and system helpers, plus a policy test that keeps os/exec imports confined to cli/firecracker/system. Update the docs to describe customize.sh as a manual helper rather than the daemon's image-build backend.

Validated with go mod tidy, go test ./..., and make build.
This commit is contained in:
Thales Maciel 2026-03-17 17:13:07 -03:00
parent 0a0b0b617b
commit 942d242c03
No known key found for this signature in database
GPG key ID: 33112E6833C34679
17 changed files with 936 additions and 145 deletions

View file

@ -3,7 +3,7 @@
## Project Structure & Module Organization
- `cmd/banger` and `cmd/bangerd` are the primary user-facing entrypoints.
- `internal/` contains the daemon, CLI, RPC, storage, Firecracker, and system integration code.
- `customize.sh`, `make-rootfs.sh`, and `interactive.sh` remain as image-build/customization helpers; normal VM lifecycle, NAT, and `.vm` DNS are handled by the Go control plane.
- `customize.sh`, `make-rootfs.sh`, and `interactive.sh` remain as manual rootfs/customization helpers; normal VM lifecycle, NAT, `.vm` DNS, and daemon-driven image builds are handled by the Go control plane.
- Source checkouts use a generated `./runtime/` bundle for Firecracker, kernels, modules, rootfs images, and helper copies. Bundle defaults come from `./runtime/bundle.json` when present. Those runtime artifacts are not meant to be tracked directly in Git.
- The daemon keeps state under XDG directories rather than the old repo-local `state/` layout.

View file

@ -4,12 +4,12 @@ Persistent Firecracker development VMs managed through a Go daemon, CLI, and TUI
## Requirements
- Linux host with KVM (`/dev/kvm` access)
- Core VM lifecycle: `sudo`, `ip`, `dmsetup`, `losetup`, `blockdev`, `truncate`, `pgrep`, `ps`
- Core VM lifecycle: `sudo`, `ip`, `dmsetup`, `losetup`, `blockdev`, `truncate`, `pgrep`, `chown`, `chmod`, `kill`
- Guest rootfs patching: `e2cp`, `e2rm`, `debugfs`
- Guest work disk creation/resizing: `mkfs.ext4`, `e2fsck`, `resize2fs`, `mount`, `umount`, `cp`
- SSH and logs: `ssh`
- Optional NAT: `iptables`, `sysctl`
- Image build helper flow: `bash`, `curl`, `jq`, `sha256sum`
- Image build: the bundled SSH key plus the tools above; `banger image build` no longer shells out through `customize.sh`
`banger` validates these per command and returns actionable errors instead of
assuming one workstation layout.
@ -27,7 +27,7 @@ The bundle contains:
- `rootfs.ext4` when present
- `packages.apt`
- `id_ed25519`
- the helper scripts used by image builds and installs
- the helper scripts used by manual customization and installs
Bootstrap a source checkout from a local or published runtime archive. The
checked-in [`runtime-bundle.toml`](/home/thales/projects/personal/banger/runtime-bundle.toml)
@ -241,7 +241,7 @@ archive elsewhere.
## Remaining Shell Helpers
The runtime VM lifecycle is managed through `banger`. The remaining shell scripts are not the primary user interface:
- `customize.sh`: implementation used by `banger image build`; it now reads
- `customize.sh`: manual reference flow for rootfs customization; `banger image build` is now Go-native, but the script still reads
assets from `BANGER_RUNTIME_DIR` and stores transient state under
`BANGER_STATE_DIR`/XDG state
- `make-rootfs.sh`: convenience wrapper for rebuilding `./runtime/rootfs-docker.ext4`

3
go.mod
View file

@ -12,6 +12,8 @@ require (
github.com/pelletier/go-toml v1.9.5
github.com/sirupsen/logrus v1.9.4
github.com/spf13/cobra v1.8.1
golang.org/x/crypto v0.46.0
golang.org/x/sys v0.39.0
modernc.org/sqlite v1.38.2
)
@ -63,7 +65,6 @@ require (
golang.org/x/mod v0.31.0 // indirect
golang.org/x/net v0.48.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.39.0 // indirect
golang.org/x/term v0.38.0 // indirect
golang.org/x/text v0.32.0 // indirect
golang.org/x/tools v0.40.0 // indirect

View file

@ -37,6 +37,7 @@ type Daemon struct {
pid int
listener net.Listener
vmDNS *vmdns.Server
imageBuild func(context.Context, imageBuildSpec) error
requestHandler func(context.Context, rpc.Request) rpc.Response
}

View file

@ -0,0 +1,275 @@
package daemon
import (
"bytes"
"context"
"crypto/sha256"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
"banger/internal/firecracker"
"banger/internal/guest"
"banger/internal/hostnat"
"banger/internal/model"
"banger/internal/system"
)
type imageBuildSpec struct {
ID string
Name string
BaseRootfs string
RootfsPath string
BuildLog io.Writer
KernelPath string
InitrdPath string
ModulesDir string
PackagesPath string
InstallDocker bool
Size string
}
type imageBuildVM struct {
Name string
GuestIP string
TapDevice string
APISock string
PID int
}
func (d *Daemon) runImageBuild(ctx context.Context, spec imageBuildSpec) error {
if d.imageBuild != nil {
return d.imageBuild(ctx, spec)
}
return d.runImageBuildNative(ctx, spec)
}
func (d *Daemon) runImageBuildNative(ctx context.Context, spec imageBuildSpec) (err error) {
packages, err := system.ReadNormalizedLines(spec.PackagesPath)
if err != nil {
return err
}
if err := system.CopyFilePreferClone(spec.BaseRootfs, spec.RootfsPath); err != nil {
return err
}
if spec.Size != "" {
if err := resizeRootfs(spec.BaseRootfs, spec.RootfsPath, spec.Size); err != nil {
return err
}
}
vm, cleanup, err := d.startImageBuildVM(ctx, spec)
if err != nil {
return err
}
defer func() {
cleanupErr := cleanup(context.Background())
if cleanupErr != nil {
err = errors.Join(err, cleanupErr)
}
}()
sshAddress := vm.GuestIP + ":22"
if _, err := fmt.Fprintf(spec.BuildLog, "[image.build] waiting for ssh on %s\n", sshAddress); err != nil {
return err
}
waitCtx, cancel := context.WithTimeout(ctx, 60*time.Second)
defer cancel()
if err := guest.WaitForSSH(waitCtx, sshAddress, d.config.SSHKeyPath, time.Second); err != nil {
return err
}
client, err := guest.Dial(ctx, sshAddress, d.config.SSHKeyPath)
if err != nil {
return err
}
defer client.Close()
if err := writeBuildLog(spec.BuildLog, "configuring guest"); err != nil {
return err
}
if err := client.RunScript(ctx, buildProvisionScript(vm.Name, d.config.DefaultDNS, packages, spec.InstallDocker), spec.BuildLog); err != nil {
return err
}
if strings.TrimSpace(spec.ModulesDir) != "" {
if err := writeBuildLog(spec.BuildLog, "copying kernel modules"); err != nil {
return err
}
if err := client.StreamTar(ctx, spec.ModulesDir, buildModulesCommand(filepath.Base(spec.ModulesDir)), spec.BuildLog); err != nil {
return err
}
}
if err := writeBuildLog(spec.BuildLog, "shutting down guest"); err != nil {
return err
}
if err := client.RunScript(ctx, "set -e\nsync\n", spec.BuildLog); err != nil {
return err
}
return d.shutdownImageBuildVM(ctx, vm)
}
func resizeRootfs(baseRootfs, rootfsPath, sizeSpec string) error {
sizeBytes, err := model.ParseSize(sizeSpec)
if err != nil {
return err
}
info, err := os.Stat(baseRootfs)
if err != nil {
return err
}
if sizeBytes < info.Size() {
return fmt.Errorf("size must be >= base image size")
}
return system.ResizeExt4Image(context.Background(), system.NewRunner(), rootfsPath, sizeBytes)
}
func (d *Daemon) startImageBuildVM(ctx context.Context, spec imageBuildSpec) (imageBuildVM, func(context.Context) error, error) {
if err := d.ensureBridge(ctx); err != nil {
return imageBuildVM{}, nil, err
}
if err := d.ensureSocketDir(); err != nil {
return imageBuildVM{}, nil, err
}
fcPath, err := d.firecrackerBinary()
if err != nil {
return imageBuildVM{}, nil, err
}
shortID := system.ShortID(spec.ID)
guestIP, err := d.store.NextGuestIP(ctx, bridgePrefix(d.config.BridgeIP))
if err != nil {
return imageBuildVM{}, nil, err
}
vm := imageBuildVM{
Name: "image-build-" + shortID,
GuestIP: guestIP,
TapDevice: "tap-img-" + shortID,
APISock: filepath.Join(d.layout.RuntimeDir, "img-"+shortID+".sock"),
}
if err := os.RemoveAll(vm.APISock); err != nil && !os.IsNotExist(err) {
return imageBuildVM{}, nil, err
}
if err := d.createTap(ctx, vm.TapDevice); err != nil {
return imageBuildVM{}, nil, err
}
if err := hostnat.Ensure(ctx, d.runner, vm.GuestIP, vm.TapDevice, true); err != nil {
_, _ = d.runner.RunSudo(ctx, "ip", "link", "del", vm.TapDevice)
return imageBuildVM{}, nil, err
}
firecrackerCtx := context.Background()
machine, err := firecracker.NewMachine(firecrackerCtx, firecracker.MachineConfig{
BinaryPath: fcPath,
VMID: spec.ID,
SocketPath: vm.APISock,
LogPath: spec.RootfsPath + ".firecracker.log",
MetricsPath: filepath.Join(filepath.Dir(spec.RootfsPath), "metrics.json"),
KernelImagePath: spec.KernelPath,
InitrdPath: spec.InitrdPath,
KernelArgs: system.BuildBootArgs(vm.Name, vm.GuestIP, d.config.BridgeIP, d.config.DefaultDNS),
RootDrivePath: spec.RootfsPath,
TapDevice: vm.TapDevice,
VCPUCount: model.DefaultVCPUCount,
MemoryMiB: model.DefaultMemoryMiB,
Logger: d.logger,
})
if err != nil {
_ = hostnat.Ensure(ctx, d.runner, vm.GuestIP, vm.TapDevice, false)
_, _ = d.runner.RunSudo(ctx, "ip", "link", "del", vm.TapDevice)
return imageBuildVM{}, nil, err
}
if err := machine.Start(firecrackerCtx); err != nil {
_ = hostnat.Ensure(ctx, d.runner, vm.GuestIP, vm.TapDevice, false)
_, _ = d.runner.RunSudo(ctx, "ip", "link", "del", vm.TapDevice)
return imageBuildVM{}, nil, err
}
vm.PID = d.resolveFirecrackerPID(firecrackerCtx, machine, vm.APISock)
if err := d.ensureSocketAccess(ctx, vm.APISock); err != nil {
_ = d.killVMProcess(context.Background(), vm.PID)
_ = hostnat.Ensure(ctx, d.runner, vm.GuestIP, vm.TapDevice, false)
_, _ = d.runner.RunSudo(ctx, "ip", "link", "del", vm.TapDevice)
return imageBuildVM{}, nil, err
}
cleanup := func(cleanupCtx context.Context) error {
if vm.PID > 0 && system.ProcessRunning(vm.PID, vm.APISock) {
_ = d.killVMProcess(cleanupCtx, vm.PID)
_ = d.waitForExit(cleanupCtx, vm.PID, vm.APISock, 10*time.Second)
}
_ = hostnat.Ensure(cleanupCtx, d.runner, vm.GuestIP, vm.TapDevice, false)
if vm.TapDevice != "" {
_, _ = d.runner.RunSudo(cleanupCtx, "ip", "link", "del", vm.TapDevice)
}
if vm.APISock != "" {
_ = os.Remove(vm.APISock)
}
return nil
}
return vm, cleanup, nil
}
func (d *Daemon) shutdownImageBuildVM(ctx context.Context, vm imageBuildVM) error {
buildVM := model.VMRecord{Runtime: model.VMRuntime{APISockPath: vm.APISock}}
if err := d.sendCtrlAltDel(ctx, buildVM); err != nil {
return err
}
return d.waitForExit(ctx, vm.PID, vm.APISock, 15*time.Second)
}
func buildProvisionScript(vmName, dnsServer string, packages []string, installDocker bool) string {
var script bytes.Buffer
script.WriteString("set -euo pipefail\n")
fmt.Fprintf(&script, "printf 'nameserver %%s\\n' %s > /etc/resolv.conf\n", shellQuote(dnsServer))
fmt.Fprintf(&script, "printf '%%s\\n' %s > /etc/hostname\n", shellQuote(vmName))
fmt.Fprintf(&script, "printf '127.0.0.1 localhost\\n127.0.1.1 %%s\\n' %s > /etc/hosts\n", shellQuote(vmName))
script.WriteString("touch /etc/fstab\n")
script.WriteString("sed -i '\\|^/dev/vdb[[:space:]]\\+/home[[:space:]]|d; \\|^/dev/vdc[[:space:]]\\+/var[[:space:]]|d' /etc/fstab\n")
script.WriteString("if ! grep -q '^tmpfs /run ' /etc/fstab; then echo 'tmpfs /run tmpfs defaults,nodev,nosuid,mode=0755 0 0' >> /etc/fstab; fi\n")
script.WriteString("if ! grep -q '^tmpfs /tmp ' /etc/fstab; then echo 'tmpfs /tmp tmpfs defaults,nodev,nosuid,mode=1777 0 0' >> /etc/fstab; fi\n")
script.WriteString("apt-get update\n")
script.WriteString("DEBIAN_FRONTEND=noninteractive apt-get -y upgrade\n")
fmt.Fprintf(&script, "PACKAGES=%s\n", shellArray(packages))
script.WriteString("DEBIAN_FRONTEND=noninteractive apt-get -y install \"${PACKAGES[@]}\"\n")
if installDocker {
script.WriteString("DEBIAN_FRONTEND=noninteractive apt-get -y remove containerd || true\n")
script.WriteString("if ! DEBIAN_FRONTEND=noninteractive apt-get -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin; then\n")
script.WriteString(" DEBIAN_FRONTEND=noninteractive apt-get -y install docker.io\n")
script.WriteString("fi\n")
script.WriteString("if command -v systemctl >/dev/null 2>&1; then systemctl enable --now docker || true; fi\n")
}
script.WriteString("git config --system init.defaultBranch main\n")
return script.String()
}
func buildModulesCommand(modulesBase string) string {
return fmt.Sprintf("bash -se <<'EOF'\nset -euo pipefail\nmkdir -p /lib/modules\ntar -C /lib/modules -xf -\ndepmod -a %s\nmkdir -p /etc/modules-load.d\nprintf 'nf_tables\\nnft_chain_nat\\nveth\\nbr_netfilter\\noverlay\\n' > /etc/modules-load.d/docker-netfilter.conf\nmkdir -p /etc/sysctl.d\ncat > /etc/sysctl.d/99-docker.conf <<'SYSCTL'\nnet.bridge.bridge-nf-call-iptables = 1\nnet.bridge.bridge-nf-call-ip6tables = 1\nnet.ipv4.ip_forward = 1\nSYSCTL\nsysctl --system >/dev/null 2>&1 || true\nEOF", shellQuote(modulesBase))
}
func shellArray(values []string) string {
quoted := make([]string, 0, len(values))
for _, value := range values {
quoted = append(quoted, shellQuote(value))
}
return "(" + strings.Join(quoted, " ") + ")"
}
func shellQuote(value string) string {
return "'" + strings.ReplaceAll(value, "'", `'"'"'`) + "'"
}
func writeBuildLog(w io.Writer, message string) error {
if w == nil {
return nil
}
_, err := fmt.Fprintf(w, "[image.build] %s\n", message)
return err
}
func packagesHash(lines []string) string {
sum := sha256.Sum256([]byte(strings.Join(lines, "\n") + "\n"))
return fmt.Sprintf("%x", sum)
}

View file

@ -4,12 +4,12 @@ import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"banger/internal/api"
"banger/internal/model"
"banger/internal/paths"
"banger/internal/system"
)
func (d *Daemon) BuildImage(ctx context.Context, params api.ImageBuildParams) (image model.Image, err error) {
@ -60,56 +60,40 @@ func (d *Daemon) BuildImage(ctx context.Context, params api.ImageBuildParams) (i
}
defer logFile.Close()
rootfsPath := filepath.Join(artifactDir, "rootfs.ext4")
script := d.config.CustomizeScript
if script == "" {
return model.Image{}, fmt.Errorf("customize script not configured; %s", paths.RuntimeBundleHint())
}
if _, err := os.Stat(script); err != nil {
return model.Image{}, fmt.Errorf("customize.sh not found at %s; %s", script, paths.RuntimeBundleHint())
}
args := []string{script, baseRootfs, "--out", rootfsPath}
if params.Size != "" {
args = append(args, "--size", params.Size)
}
kernelPath := params.KernelPath
if kernelPath == "" {
kernelPath = d.config.DefaultKernel
}
if kernelPath != "" {
args = append(args, "--kernel", kernelPath)
}
initrdPath := params.InitrdPath
if initrdPath == "" {
initrdPath = d.config.DefaultInitrd
}
if initrdPath != "" {
args = append(args, "--initrd", initrdPath)
}
modulesDir := params.ModulesDir
if modulesDir == "" {
modulesDir = d.config.DefaultModulesDir
}
if modulesDir != "" {
args = append(args, "--modules", modulesDir)
}
if params.Docker {
args = append(args, "--docker")
}
if err := d.validateImageBuildPrereqs(ctx, baseRootfs, kernelPath, initrdPath, modulesDir); err != nil {
if err := d.validateImageBuildPrereqs(ctx, baseRootfs, kernelPath, initrdPath, modulesDir, params.Size); err != nil {
return model.Image{}, err
}
op.stage("launch_helper", "script", script, "build_log_path", buildLogPath, "artifact_dir", artifactDir)
cmd := exec.CommandContext(ctx, "bash", args...)
cmd.Stdout = logFile
cmd.Stderr = logFile
cmd.Stdin = nil
cmd.Dir = d.layout.StateDir
cmd.Env = append(
os.Environ(),
"BANGER_RUNTIME_DIR="+d.config.RuntimeDir,
"BANGER_STATE_DIR="+filepath.Join(d.layout.StateDir, "image-build"),
)
if err := cmd.Run(); err != nil {
spec := imageBuildSpec{
ID: id,
Name: name,
BaseRootfs: baseRootfs,
RootfsPath: rootfsPath,
BuildLog: logFile,
KernelPath: kernelPath,
InitrdPath: initrdPath,
ModulesDir: modulesDir,
PackagesPath: d.config.DefaultPackagesFile,
InstallDocker: params.Docker,
Size: params.Size,
}
op.stage("launch_builder", "build_log_path", buildLogPath, "artifact_dir", artifactDir)
if err := d.runImageBuild(ctx, spec); err != nil {
_ = os.RemoveAll(artifactDir)
return model.Image{}, err
}
if err := writePackagesMetadata(rootfsPath, d.config.DefaultPackagesFile); err != nil {
_ = os.RemoveAll(artifactDir)
return model.Image{}, err
}
@ -138,6 +122,18 @@ func (d *Daemon) BuildImage(ctx context.Context, params api.ImageBuildParams) (i
return image, nil
}
func writePackagesMetadata(rootfsPath, packagesPath string) error {
if rootfsPath == "" || packagesPath == "" {
return nil
}
lines, err := system.ReadNormalizedLines(packagesPath)
if err != nil {
return err
}
metadataPath := rootfsPath + ".packages.sha256"
return os.WriteFile(metadataPath, []byte(packagesHash(lines)+"\n"), 0o644)
}
func (d *Daemon) DeleteImage(ctx context.Context, idOrName string) (model.Image, error) {
d.mu.Lock()
defer d.mu.Unlock()

View file

@ -7,7 +7,6 @@ import (
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
@ -129,31 +128,30 @@ func TestBuildImagePreservesBuildLogOnFailure(t *testing.T) {
}
binDir := t.TempDir()
for _, name := range []string{"sudo", "ip", "curl", "ssh", "jq", "sha256sum", "e2fsck", "resize2fs"} {
for _, name := range []string{"sudo", "ip", "pgrep", "chown", "chmod", "kill", "iptables", "sysctl", "e2fsck", "resize2fs"} {
writeFakeExecutable(t, filepath.Join(binDir, name))
}
bashPath, err := exec.LookPath("bash")
if err != nil {
t.Fatalf("lookpath bash: %v", err)
}
bashWrapper := filepath.Join(binDir, "bash")
if err := os.WriteFile(bashWrapper, []byte(fmt.Sprintf("#!/bin/sh\nexec %q \"$@\"\n", bashPath)), 0o755); err != nil {
t.Fatalf("write bash wrapper: %v", err)
}
t.Setenv("PATH", binDir)
script := filepath.Join(t.TempDir(), "customize.sh")
scriptBody := "#!/bin/sh\necho helper-stdout\necho helper-stderr >&2\nexit 17\n"
if err := os.WriteFile(script, []byte(scriptBody), 0o755); err != nil {
t.Fatalf("write customize script: %v", err)
}
baseRootfs := filepath.Join(t.TempDir(), "base.ext4")
kernelPath := filepath.Join(t.TempDir(), "vmlinux")
for _, path := range []string{baseRootfs, kernelPath} {
packagesPath := filepath.Join(t.TempDir(), "packages.apt")
sshKeyPath := filepath.Join(t.TempDir(), "id_ed25519")
firecrackerBin := filepath.Join(t.TempDir(), "firecracker")
for _, path := range []string{baseRootfs, kernelPath, packagesPath, sshKeyPath} {
if err := os.WriteFile(path, []byte("artifact"), 0o644); err != nil {
t.Fatalf("write %s: %v", path, err)
}
}
if err := os.WriteFile(firecrackerBin, []byte("#!/bin/sh\nexit 0\n"), 0o755); err != nil {
t.Fatalf("write %s: %v", firecrackerBin, err)
}
runner := &scriptedRunner{
t: t,
steps: []runnerStep{
{call: runnerCall{name: "ip", args: []string{"route", "show", "default"}}, out: []byte("default via 192.0.2.1 dev eth0\n")},
},
}
var buf bytes.Buffer
logger, _, err := newDaemonLogger(&buf, "info")
@ -167,11 +165,23 @@ func TestBuildImagePreservesBuildLogOnFailure(t *testing.T) {
},
config: model.DaemonConfig{
RuntimeDir: t.TempDir(),
CustomizeScript: script,
DefaultImageName: "default",
DefaultPackagesFile: packagesPath,
SSHKeyPath: sshKeyPath,
FirecrackerBin: firecrackerBin,
},
store: store,
runner: runner,
logger: logger,
imageBuild: func(ctx context.Context, spec imageBuildSpec) error {
if _, err := fmt.Fprintln(spec.BuildLog, "builder-stdout"); err != nil {
return err
}
if spec.BaseRootfs != baseRootfs || spec.KernelPath != kernelPath || spec.PackagesPath != packagesPath {
t.Fatalf("unexpected image build spec: %+v", spec)
}
return errors.New("builder failed")
},
}
_, err = d.BuildImage(ctx, api.ImageBuildParams{
@ -194,13 +204,14 @@ func TestBuildImagePreservesBuildLogOnFailure(t *testing.T) {
if readErr != nil {
t.Fatalf("read build log: %v", readErr)
}
if !strings.Contains(string(logData), "helper-stdout") || !strings.Contains(string(logData), "helper-stderr") {
t.Fatalf("build log = %q, want helper stdout/stderr", string(logData))
if !strings.Contains(string(logData), "builder-stdout") {
t.Fatalf("build log = %q, want builder output", string(logData))
}
runner.assertExhausted()
entries := parseLogEntries(t, buf.Bytes())
if !hasLogEntry(entries, map[string]string{"msg": "operation stage", "operation": "image.build", "stage": "launch_helper"}) {
t.Fatalf("expected launch_helper log, got %v", entries)
if !hasLogEntry(entries, map[string]string{"msg": "operation stage", "operation": "image.build", "stage": "launch_builder"}) {
t.Fatalf("expected launch_builder log, got %v", entries)
}
if !strings.Contains(buf.String(), buildLogs[0]) {
t.Fatalf("daemon logs = %q, want build log path %s", buf.String(), buildLogs[0])

View file

@ -13,7 +13,7 @@ func (d *Daemon) validateStartPrereqs(ctx context.Context, vm model.VMRecord, im
checks := system.NewPreflight()
hint := paths.RuntimeBundleHint()
for _, command := range []string{"sudo", "ip", "dmsetup", "losetup", "blockdev", "truncate", "pgrep", "ps", "chown", "chmod", "kill", "e2cp", "e2rm", "debugfs"} {
for _, command := range []string{"sudo", "ip", "dmsetup", "losetup", "blockdev", "truncate", "pgrep", "chown", "chmod", "kill", "e2cp", "e2rm", "debugfs"} {
checks.RequireCommand(command, toolHint(command))
}
checks.RequireExecutable(d.config.FirecrackerBin, "firecracker binary", hint)
@ -33,22 +33,34 @@ func (d *Daemon) validateStartPrereqs(ctx context.Context, vm model.VMRecord, im
return checks.Err("vm start preflight failed")
}
func (d *Daemon) validateImageBuildPrereqs(ctx context.Context, baseRootfs, kernelPath, initrdPath, modulesDir string) error {
func (d *Daemon) validateImageBuildPrereqs(ctx context.Context, baseRootfs, kernelPath, initrdPath, modulesDir, sizeSpec string) error {
checks := system.NewPreflight()
hint := paths.RuntimeBundleHint()
for _, command := range []string{"bash", "sudo", "ip", "curl", "ssh", "jq", "sha256sum", "e2fsck", "resize2fs"} {
for _, command := range []string{"sudo", "ip", "pgrep", "chown", "chmod", "kill"} {
checks.RequireCommand(command, toolHint(command))
}
checks.RequireExecutable(d.config.CustomizeScript, "customize.sh helper", hint)
checks.RequireExecutable(d.config.FirecrackerBin, "firecracker binary", hint)
checks.RequireFile(d.config.SSHKeyPath, "ssh private key", `set "ssh_key_path" or refresh the runtime bundle`)
checks.RequireFile(baseRootfs, "base rootfs image", `pass --base-rootfs or set "default_base_rootfs"`)
checks.RequireFile(kernelPath, "kernel image", `pass --kernel or set "default_kernel"`)
checks.RequireFile(d.config.DefaultPackagesFile, "package manifest", `set "default_packages_file" or refresh the runtime bundle`)
if strings.TrimSpace(initrdPath) != "" {
checks.RequireFile(initrdPath, "initrd image", `pass --initrd or set "default_initrd"`)
}
if strings.TrimSpace(modulesDir) != "" {
checks.RequireDir(modulesDir, "modules directory", `pass --modules or set "default_modules_dir"`)
}
if strings.TrimSpace(d.config.DefaultPackagesFile) != "" {
if _, err := system.ReadNormalizedLines(d.config.DefaultPackagesFile); err != nil {
checks.Addf("package manifest at %s is invalid: %v", d.config.DefaultPackagesFile, err)
}
}
if strings.TrimSpace(sizeSpec) != "" {
checks.RequireCommand("e2fsck", toolHint("e2fsck"))
checks.RequireCommand("resize2fs", toolHint("resize2fs"))
}
d.addNATPrereqs(ctx, checks)
return checks.Err("image build preflight failed")
}
@ -63,7 +75,11 @@ func (d *Daemon) validateWorkDiskResizePrereqs() error {
func (d *Daemon) addNATPrereqs(ctx context.Context, checks *system.Preflight) {
checks.RequireCommand("iptables", toolHint("iptables"))
checks.RequireCommand("sysctl", toolHint("sysctl"))
out, err := d.runner.Run(ctx, "ip", "route", "show", "default")
runner := d.runner
if runner == nil {
runner = system.NewRunner()
}
out, err := runner.Run(ctx, "ip", "route", "show", "default")
if err != nil {
checks.Addf("failed to inspect the default route for NAT: %v", err)
return
@ -83,7 +99,7 @@ func toolHint(command string) string {
return "install util-linux"
case "dmsetup":
return "install device-mapper"
case "pgrep", "ps", "kill":
case "pgrep", "kill":
return "install procps"
case "chown", "chmod", "cp", "truncate":
return "install coreutils"
@ -91,16 +107,6 @@ func toolHint(command string) string {
return "install e2fsprogs"
case "e2cp", "e2rm":
return "install e2tools"
case "curl":
return "install curl"
case "jq":
return "install jq"
case "sha256sum":
return "install coreutils"
case "ssh":
return "install openssh-client"
case "bash":
return "install bash"
case "sudo":
return "install sudo"
default:

View file

@ -703,14 +703,7 @@ func (d *Daemon) flattenNestedWorkHome(ctx context.Context, workMount string) er
if !exists(nestedHome) {
return nil
}
script := `set -e
src="$1"
dst="$2"
for path in "$src"/.[!.]* "$src"/..?* "$src"/*; do
[ -e "$path" ] || continue
cp -a "$path" "$dst"/
done`
if _, err := d.runner.RunSudo(ctx, "sh", "-c", script, "sh", nestedHome, workMount); err != nil {
if err := system.CopyDirContents(ctx, d.runner, nestedHome, workMount, true); err != nil {
return err
}
_, err := d.runner.RunSudo(ctx, "rm", "-rf", nestedHome)

View file

@ -102,12 +102,12 @@ func openLogFile(path string) (*os.File, error) {
}
func buildConfig(cfg MachineConfig) sdk.Config {
drives := sdk.NewDrivesBuilder(
cfg.RootDrivePath,
).
WithRootDrive(cfg.RootDrivePath, sdk.WithDriveID("rootfs"), sdk.WithReadOnly(false)).
AddDrive(cfg.WorkDrivePath, false, sdk.WithDriveID("work")).
Build()
drivesBuilder := sdk.NewDrivesBuilder(cfg.RootDrivePath).
WithRootDrive(cfg.RootDrivePath, sdk.WithDriveID("rootfs"), sdk.WithReadOnly(false))
if strings.TrimSpace(cfg.WorkDrivePath) != "" {
drivesBuilder = drivesBuilder.AddDrive(cfg.WorkDrivePath, false, sdk.WithDriveID("work"))
}
drives := drivesBuilder.Build()
return sdk.Config{
SocketPath: cfg.SocketPath,
@ -132,14 +132,7 @@ func buildConfig(cfg MachineConfig) sdk.Config {
}
func buildProcessRunner(cfg MachineConfig, logFile *os.File) *exec.Cmd {
script := strings.Join([]string{
"umask 000",
"exec " + shellQuote(cfg.BinaryPath) +
" --api-sock " + shellQuote(cfg.SocketPath) +
" --id " + shellQuote(cfg.VMID),
}, " && ")
cmd := exec.Command("sudo", "-n", "sh", "-c", script)
cmd := exec.Command("sudo", "-n", cfg.BinaryPath, "--api-sock", cfg.SocketPath, "--id", cfg.VMID)
cmd.Stdin = nil
if logFile != nil {
cmd.Stdout = logFile
@ -148,10 +141,6 @@ func buildProcessRunner(cfg MachineConfig, logFile *os.File) *exec.Cmd {
return cmd
}
func shellQuote(value string) string {
return "'" + strings.ReplaceAll(value, "'", `'"'"'`) + "'"
}
func newLogger(base *slog.Logger) *logrus.Entry {
logger := logrus.New()
logger.SetOutput(io.Discard)

View file

@ -58,7 +58,7 @@ func TestBuildConfig(t *testing.T) {
}
}
func TestBuildProcessRunnerUsesSudoWrapper(t *testing.T) {
func TestBuildProcessRunnerUsesDirectSudoCommand(t *testing.T) {
cmd := buildProcessRunner(MachineConfig{
BinaryPath: "/repo/firecracker",
SocketPath: "/tmp/fc.sock",
@ -68,14 +68,14 @@ func TestBuildProcessRunnerUsesSudoWrapper(t *testing.T) {
if cmd.Path != "/usr/bin/sudo" && cmd.Path != "sudo" {
t.Fatalf("command path = %q", cmd.Path)
}
if len(cmd.Args) != 5 {
if len(cmd.Args) != 7 {
t.Fatalf("args = %v", cmd.Args)
}
if cmd.Args[1] != "-n" || cmd.Args[2] != "sh" || cmd.Args[3] != "-c" {
t.Fatalf("args = %v", cmd.Args)
want := []string{"sudo", "-n", "/repo/firecracker", "--api-sock", "/tmp/fc.sock", "--id", "vm-1"}
for i, arg := range want {
if cmd.Args[i] != arg {
t.Fatalf("args[%d] = %q, want %q (all args: %v)", i, cmd.Args[i], arg, cmd.Args)
}
if want := "umask 000 && exec '/repo/firecracker' --api-sock '/tmp/fc.sock' --id 'vm-1'"; cmd.Args[4] != want {
t.Fatalf("script = %q, want %q", cmd.Args[4], want)
}
if cmd.Cancel != nil {
t.Fatal("process runner should not be tied to a request context")

170
internal/guest/ssh.go Normal file
View file

@ -0,0 +1,170 @@
package guest
import (
"archive/tar"
"context"
"errors"
"fmt"
"io"
"net"
"os"
"path/filepath"
"strings"
"time"
"golang.org/x/crypto/ssh"
)
type Client struct {
client *ssh.Client
}
func WaitForSSH(ctx context.Context, address, privateKeyPath string, interval time.Duration) error {
if interval <= 0 {
interval = time.Second
}
for {
client, err := Dial(ctx, address, privateKeyPath)
if err == nil {
_ = client.Close()
return nil
}
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(interval):
}
}
}
func Dial(ctx context.Context, address, privateKeyPath string) (*Client, error) {
signer, err := privateKeySigner(privateKeyPath)
if err != nil {
return nil, err
}
config := &ssh.ClientConfig{
User: "root",
Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
Timeout: 10 * time.Second,
}
dialer := &net.Dialer{Timeout: 10 * time.Second}
conn, err := dialer.DialContext(ctx, "tcp", address)
if err != nil {
return nil, err
}
sshConn, chans, reqs, err := ssh.NewClientConn(conn, address, config)
if err != nil {
_ = conn.Close()
return nil, err
}
client := ssh.NewClient(sshConn, chans, reqs)
return &Client{client: client}, nil
}
func (c *Client) Close() error {
if c == nil || c.client == nil {
return nil
}
return c.client.Close()
}
func (c *Client) RunScript(ctx context.Context, script string, logWriter io.Writer) error {
return c.runSession(ctx, "bash -se", strings.NewReader(script), logWriter)
}
func (c *Client) StreamTar(ctx context.Context, sourceDir, remoteCommand string, logWriter io.Writer) error {
reader, writer := io.Pipe()
writeErr := make(chan error, 1)
go func() {
writeErr <- writeTarArchive(writer, sourceDir)
_ = writer.Close()
}()
runErr := c.runSession(ctx, remoteCommand, reader, logWriter)
tarErr := <-writeErr
return errors.Join(runErr, tarErr)
}
func (c *Client) runSession(ctx context.Context, command string, stdin io.Reader, logWriter io.Writer) error {
if c == nil || c.client == nil {
return fmt.Errorf("ssh client is not connected")
}
session, err := c.client.NewSession()
if err != nil {
return err
}
defer session.Close()
session.Stdin = stdin
if logWriter != nil {
session.Stdout = logWriter
session.Stderr = logWriter
}
done := make(chan error, 1)
go func() {
select {
case <-ctx.Done():
_ = c.client.Close()
case <-done:
}
}()
err = session.Run(command)
done <- nil
return err
}
func privateKeySigner(path string) (ssh.Signer, error) {
data, err := os.ReadFile(path)
if err != nil {
return nil, err
}
return ssh.ParsePrivateKey(data)
}
func writeTarArchive(dst io.Writer, sourceDir string) error {
tw := tar.NewWriter(dst)
defer tw.Close()
sourceDir = filepath.Clean(sourceDir)
rootName := filepath.Base(sourceDir)
return filepath.Walk(sourceDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
name := rootName
if path != sourceDir {
relPath, err := filepath.Rel(sourceDir, path)
if err != nil {
return err
}
name = filepath.Join(rootName, relPath)
}
linkTarget := ""
if info.Mode()&os.ModeSymlink != 0 {
linkTarget, err = os.Readlink(path)
if err != nil {
return err
}
}
header, err := tar.FileInfoHeader(info, linkTarget)
if err != nil {
return err
}
header.Name = name
if err := tw.WriteHeader(header); err != nil {
return err
}
if !info.Mode().IsRegular() {
return nil
}
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
_, err = io.Copy(tw, file)
return err
})
}

View file

@ -0,0 +1,58 @@
package guest
import (
"archive/tar"
"bytes"
"io"
"os"
"path/filepath"
"testing"
)
func TestWriteTarArchiveKeepsTopLevelDirectory(t *testing.T) {
t.Parallel()
sourceDir := filepath.Join(t.TempDir(), "6.8.0-test")
if err := os.MkdirAll(filepath.Join(sourceDir, "kernel"), 0o755); err != nil {
t.Fatalf("MkdirAll: %v", err)
}
if err := os.WriteFile(filepath.Join(sourceDir, "modules.dep"), []byte("deps"), 0o644); err != nil {
t.Fatalf("WriteFile modules.dep: %v", err)
}
if err := os.WriteFile(filepath.Join(sourceDir, "kernel", "module.ko"), []byte("ko"), 0o644); err != nil {
t.Fatalf("WriteFile module.ko: %v", err)
}
var buf bytes.Buffer
if err := writeTarArchive(&buf, sourceDir); err != nil {
t.Fatalf("writeTarArchive: %v", err)
}
tr := tar.NewReader(bytes.NewReader(buf.Bytes()))
var names []string
for {
header, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatalf("tar.Next: %v", err)
}
names = append(names, header.Name)
}
want := map[string]struct{}{
"6.8.0-test": {},
"6.8.0-test/modules.dep": {},
"6.8.0-test/kernel": {},
"6.8.0-test/kernel/module.ko": {},
}
if len(names) != len(want) {
t.Fatalf("archive names = %v, want %d entries", names, len(want))
}
for _, name := range names {
if _, ok := want[name]; !ok {
t.Fatalf("unexpected archive entry %q in %v", name, names)
}
}
}

View file

@ -0,0 +1,65 @@
package policy
import (
"go/parser"
"go/token"
"io/fs"
"path/filepath"
"runtime"
"strings"
"testing"
)
func TestExecImportsStayInsideApprovedPackages(t *testing.T) {
t.Parallel()
_, thisFile, _, ok := runtime.Caller(0)
if !ok {
t.Fatal("runtime.Caller failed")
}
repoRoot := filepath.Clean(filepath.Join(filepath.Dir(thisFile), "..", ".."))
fset := token.NewFileSet()
var offenders []string
err := filepath.WalkDir(filepath.Join(repoRoot, "internal"), func(path string, entry fs.DirEntry, err error) error {
if err != nil {
return err
}
if entry.IsDir() {
return nil
}
if filepath.Ext(path) != ".go" || strings.HasSuffix(path, "_test.go") {
return nil
}
relPath, err := filepath.Rel(repoRoot, path)
if err != nil {
return err
}
if allowedExecImportPath(relPath) {
return nil
}
file, err := parser.ParseFile(fset, path, nil, parser.ImportsOnly)
if err != nil {
return err
}
for _, imp := range file.Imports {
if imp.Path != nil && imp.Path.Value == `"os/exec"` {
offenders = append(offenders, relPath)
break
}
}
return nil
})
if err != nil {
t.Fatalf("walk repo: %v", err)
}
if len(offenders) != 0 {
t.Fatalf("os/exec imports are only allowed in internal/cli, internal/firecracker, and internal/system; found %v", offenders)
}
}
func allowedExecImportPath(relPath string) bool {
return strings.HasPrefix(relPath, "internal/cli/") ||
strings.HasPrefix(relPath, "internal/firecracker/") ||
strings.HasPrefix(relPath, "internal/system/")
}

74
internal/system/files.go Normal file
View file

@ -0,0 +1,74 @@
package system
import (
"fmt"
"io"
"os"
"strings"
"golang.org/x/sys/unix"
)
func CopyFilePreferClone(sourcePath, targetPath string) error {
source, err := os.Open(sourcePath)
if err != nil {
return err
}
defer source.Close()
info, err := source.Stat()
if err != nil {
return err
}
target, err := os.OpenFile(targetPath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, info.Mode().Perm())
if err != nil {
return err
}
defer target.Close()
if err := unix.IoctlFileClone(int(target.Fd()), int(source.Fd())); err == nil {
return nil
}
if _, err := source.Seek(0, io.SeekStart); err != nil {
return err
}
if _, err := target.Seek(0, io.SeekStart); err != nil {
return err
}
if _, err := io.Copy(target, source); err != nil {
return err
}
if err := target.Sync(); err != nil {
return err
}
if err := target.Chmod(info.Mode().Perm()); err != nil {
return err
}
return nil
}
func ReadNormalizedLines(path string) ([]string, error) {
data, err := os.ReadFile(path)
if err != nil {
return nil, err
}
var out []string
for _, line := range strings.Split(string(data), "\n") {
if strings.HasSuffix(line, "\r") {
line = strings.TrimSuffix(line, "\r")
}
if idx := strings.Index(line, "#"); idx >= 0 {
line = line[:idx]
}
line = strings.TrimSpace(line)
if line == "" {
continue
}
out = append(out, line)
}
if len(out) == 0 {
return nil, fmt.Errorf("file has no entries: %s", path)
}
return out, nil
}

View file

@ -8,6 +8,7 @@ import (
"errors"
"fmt"
"io"
"math"
"os"
"os/exec"
"path/filepath"
@ -105,25 +106,48 @@ type ProcessStats struct {
}
func ReadProcessStats(ctx context.Context, pid int) (ProcessStats, error) {
_ = ctx
if pid <= 0 {
return ProcessStats{}, errors.New("pid is required")
}
runner := NewRunner()
out, err := runner.Run(ctx, "ps", "-p", strconv.Itoa(pid), "-o", "%cpu=,rss=,vsz=")
statData, err := os.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat"))
if err != nil {
return ProcessStats{}, err
}
fields := strings.Fields(string(out))
if len(fields) < 3 {
return ProcessStats{}, fmt.Errorf("unexpected ps output: %q", string(out))
statmData, err := os.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "statm"))
if err != nil {
return ProcessStats{}, err
}
cpu, _ := strconv.ParseFloat(fields[0], 64)
rssKB, _ := strconv.ParseInt(fields[1], 10, 64)
vszKB, _ := strconv.ParseInt(fields[2], 10, 64)
uptimeData, err := os.ReadFile("/proc/uptime")
if err != nil {
return ProcessStats{}, err
}
procStat, err := parseProcStat(string(statData))
if err != nil {
return ProcessStats{}, err
}
memStat, err := parseProcStatm(string(statmData))
if err != nil {
return ProcessStats{}, err
}
uptimeSeconds, err := parseProcUptime(string(uptimeData))
if err != nil {
return ProcessStats{}, err
}
const ticksPerSecond = 100.0
elapsedSeconds := uptimeSeconds - (float64(procStat.startTicks) / ticksPerSecond)
cpuPercent := 0.0
if elapsedSeconds > 0 {
totalCPUSeconds := float64(procStat.userTicks+procStat.systemTicks) / ticksPerSecond
cpuPercent = math.Max(0, (totalCPUSeconds/elapsedSeconds)*100)
}
pageSize := int64(os.Getpagesize())
return ProcessStats{
CPUPercent: cpu,
RSSBytes: rssKB * 1024,
VSZBytes: vszKB * 1024,
CPUPercent: cpuPercent,
RSSBytes: memStat.residentPages * pageSize,
VSZBytes: memStat.sizePages * pageSize,
}, nil
}
@ -179,7 +203,7 @@ func CopyDirContents(ctx context.Context, runner CommandRunner, sourceDir, targe
}
func ResizeExt4Image(ctx context.Context, runner CommandRunner, path string, bytes int64) error {
if _, err := runner.Run(ctx, "truncate", "-s", strconv.FormatInt(bytes, 10), path); err != nil {
if err := os.Truncate(path, bytes); err != nil {
return err
}
if _, err := runner.Run(ctx, "e2fsck", "-p", "-f", path); err != nil {
@ -324,3 +348,67 @@ func CopyStream(dst io.Writer, cmd *exec.Cmd) error {
cmd.Stdin = os.Stdin
return cmd.Run()
}
type procStat struct {
userTicks uint64
systemTicks uint64
startTicks uint64
}
type procStatm struct {
sizePages int64
residentPages int64
}
func parseProcStat(raw string) (procStat, error) {
raw = strings.TrimSpace(raw)
end := strings.LastIndex(raw, ")")
if end == -1 || end+2 >= len(raw) {
return procStat{}, fmt.Errorf("unexpected /proc stat format: %q", raw)
}
fields := strings.Fields(raw[end+2:])
if len(fields) < 20 {
return procStat{}, fmt.Errorf("unexpected /proc stat field count: %q", raw)
}
userTicks, err := strconv.ParseUint(fields[11], 10, 64)
if err != nil {
return procStat{}, err
}
systemTicks, err := strconv.ParseUint(fields[12], 10, 64)
if err != nil {
return procStat{}, err
}
startTicks, err := strconv.ParseUint(fields[19], 10, 64)
if err != nil {
return procStat{}, err
}
return procStat{
userTicks: userTicks,
systemTicks: systemTicks,
startTicks: startTicks,
}, nil
}
func parseProcStatm(raw string) (procStatm, error) {
fields := strings.Fields(strings.TrimSpace(raw))
if len(fields) < 2 {
return procStatm{}, fmt.Errorf("unexpected /proc statm format: %q", raw)
}
sizePages, err := strconv.ParseInt(fields[0], 10, 64)
if err != nil {
return procStatm{}, err
}
residentPages, err := strconv.ParseInt(fields[1], 10, 64)
if err != nil {
return procStatm{}, err
}
return procStatm{sizePages: sizePages, residentPages: residentPages}, nil
}
func parseProcUptime(raw string) (float64, error) {
fields := strings.Fields(strings.TrimSpace(raw))
if len(fields) == 0 {
return 0, fmt.Errorf("unexpected /proc uptime format: %q", raw)
}
return strconv.ParseFloat(fields[0], 64)
}

View file

@ -81,46 +81,64 @@ func TestResizeExt4ImageStopsAtFirstFailure(t *testing.T) {
tests := []struct {
name string
steps []systemStep
setup func(t *testing.T) string
steps func(path string) []systemStep
wantErr string
wantCalls int
}{
{
name: "truncate failure",
steps: []systemStep{
{call: systemCall{name: "truncate", args: []string{"-s", "4096", "/tmp/root.ext4"}}, err: errors.New("truncate failed")},
setup: func(t *testing.T) string {
return t.TempDir()
},
wantErr: "truncate failed",
wantCalls: 1,
wantErr: "",
wantCalls: 0,
},
{
name: "e2fsck failure",
steps: []systemStep{
{call: systemCall{name: "truncate", args: []string{"-s", "4096", "/tmp/root.ext4"}}},
{call: systemCall{name: "e2fsck", args: []string{"-p", "-f", "/tmp/root.ext4"}}, err: errors.New("e2fsck failed")},
steps: func(path string) []systemStep {
return []systemStep{
{call: systemCall{name: "e2fsck", args: []string{"-p", "-f", path}}, err: errors.New("e2fsck failed")},
}
},
wantErr: "e2fsck failed",
wantCalls: 2,
wantCalls: 1,
},
{
name: "resize2fs failure",
steps: []systemStep{
{call: systemCall{name: "truncate", args: []string{"-s", "4096", "/tmp/root.ext4"}}},
{call: systemCall{name: "e2fsck", args: []string{"-p", "-f", "/tmp/root.ext4"}}},
{call: systemCall{name: "resize2fs", args: []string{"/tmp/root.ext4"}}, err: errors.New("resize2fs failed")},
steps: func(path string) []systemStep {
return []systemStep{
{call: systemCall{name: "e2fsck", args: []string{"-p", "-f", path}}},
{call: systemCall{name: "resize2fs", args: []string{path}}, err: errors.New("resize2fs failed")},
}
},
wantErr: "resize2fs failed",
wantCalls: 3,
wantCalls: 2,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
runner := &scriptedRunner{t: t, steps: tt.steps}
err := ResizeExt4Image(context.Background(), runner, "/tmp/root.ext4", 4096)
if err == nil || !strings.Contains(err.Error(), tt.wantErr) {
path := "/tmp/root.ext4"
if tt.setup != nil {
path = tt.setup(t)
} else {
path = filepath.Join(t.TempDir(), "root.ext4")
if err := os.WriteFile(path, []byte("seed"), 0o644); err != nil {
t.Fatalf("WriteFile(%s): %v", path, err)
}
}
var steps []systemStep
if tt.steps != nil {
steps = tt.steps(path)
}
runner := &scriptedRunner{t: t, steps: steps}
err := ResizeExt4Image(context.Background(), runner, path, 4096)
if err == nil {
t.Fatal("ResizeExt4Image() succeeded, want error")
}
if tt.wantErr != "" && !strings.Contains(err.Error(), tt.wantErr) {
t.Fatalf("ResizeExt4Image() error = %v, want %q", err, tt.wantErr)
}
if len(runner.calls) != tt.wantCalls {
@ -131,6 +149,24 @@ func TestResizeExt4ImageStopsAtFirstFailure(t *testing.T) {
}
}
func TestReadNormalizedLines(t *testing.T) {
t.Parallel()
path := filepath.Join(t.TempDir(), "packages.apt")
if err := os.WriteFile(path, []byte("\n# comment\n git \nless # trailing\n\r\ntmux\r\n"), 0o644); err != nil {
t.Fatalf("WriteFile: %v", err)
}
got, err := ReadNormalizedLines(path)
if err != nil {
t.Fatalf("ReadNormalizedLines: %v", err)
}
want := []string{"git", "less", "tmux"}
if !reflect.DeepEqual(got, want) {
t.Fatalf("lines = %v, want %v", got, want)
}
}
func TestWriteExt4FileRemovesTempFileAndReturnsCopyError(t *testing.T) {
t.Parallel()
@ -218,6 +254,34 @@ func TestMountTempDirUsesLoopForRegularFilesAndCleanupUsesBackgroundContext(t *t
}
}
func TestParseProcHelpers(t *testing.T) {
t.Parallel()
stat, err := parseProcStat("1234 (firecracker) S 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22")
if err != nil {
t.Fatalf("parseProcStat: %v", err)
}
if stat.userTicks != 11 || stat.systemTicks != 12 || stat.startTicks != 19 {
t.Fatalf("proc stat = %+v", stat)
}
statm, err := parseProcStatm("200 50 0 0 0 0 0")
if err != nil {
t.Fatalf("parseProcStatm: %v", err)
}
if statm.sizePages != 200 || statm.residentPages != 50 {
t.Fatalf("proc statm = %+v", statm)
}
uptime, err := parseProcUptime("321.50 42.10")
if err != nil {
t.Fatalf("parseProcUptime: %v", err)
}
if uptime != 321.50 {
t.Fatalf("uptime = %v, want 321.50", uptime)
}
}
func TestMountTempDirRemovesTempDirWhenMountFails(t *testing.T) {
t.Parallel()