Speed up VM create with work seeds

Beat VM create wall time without changing VM semantics.

Generate a work-seed ext4 sidecar during image builds and rootfs rebuilds, then clone and resize that seed for each new VM instead of rebuilding /root from scratch. Plumb the new seed artifact through config, runtime metadata, store state, runtime-bundle defaults, doctor checks, and default-image reconciliation so older images still fall back cleanly.

Add a daemon TAP pool to keep idle bridge-attached devices warm, expose stage timing in lifecycle logs, add a create/SSH benchmark script plus Make target, and teach verify.sh that tap-pool-* devices are reusable capacity rather than cleanup leaks.

Validated with go test ./..., make build, ./verify.sh, and make bench-create ARGS="--runs 2".
This commit is contained in:
Thales Maciel 2026-03-18 21:22:12 -03:00
parent a14a80fd6b
commit c8d9a122f9
No known key found for this signature in database
GPG key ID: 33112E6833C34679
24 changed files with 695 additions and 44 deletions

View file

@ -10,6 +10,7 @@
## Build, Test, and Development Commands ## Build, Test, and Development Commands
- `make build` builds `./banger`, `./bangerd`, and the bundled `./runtime/banger-vsock-pingd` guest helper. - `make build` builds `./banger`, `./bangerd`, and the bundled `./runtime/banger-vsock-pingd` guest helper.
- `make bench-create` benchmarks `vm create` and first-SSH readiness on the current host.
- `make runtime-bundle` bootstraps `./runtime/` from the archive referenced by `RUNTIME_MANIFEST`; the checked-in `runtime-bundle.toml` is only a template. - `make runtime-bundle` bootstraps `./runtime/` from the archive referenced by `RUNTIME_MANIFEST`; the checked-in `runtime-bundle.toml` is only a template.
- `banger` validates required host tools per command and reports actionable missing-tool errors; do not assume one workstation's package set. - `banger` validates required host tools per command and reports actionable missing-tool errors; do not assume one workstation's package set.
- `./banger vm create --name testbox` creates and starts a VM. - `./banger vm create --name testbox` creates and starts a VM.
@ -32,6 +33,8 @@
- Manual verification for VM lifecycle changes: `./banger vm create`, confirm SSH access, then stop/delete the VM. - Manual verification for VM lifecycle changes: `./banger vm create`, confirm SSH access, then stop/delete the VM.
- For host-integration changes, run `./banger doctor` as a quick readiness check before the live VM smoke. - For host-integration changes, run `./banger doctor` as a quick readiness check before the live VM smoke.
- Rebuilt images now include `mise`, `opencode`, `tmux-resurrect`/`tmux-continuum` defaults for `root`, and the `banger-vsock-pingd` service used by the SSH reminder path; if you change guest provisioning, document whether users need to rebuild `./runtime/rootfs-docker.ext4` or another base image to pick it up. - Rebuilt images now include `mise`, `opencode`, `tmux-resurrect`/`tmux-continuum` defaults for `root`, and the `banger-vsock-pingd` service used by the SSH reminder path; if you change guest provisioning, document whether users need to rebuild `./runtime/rootfs-docker.ext4` or another base image to pick it up.
- Rebuilt images also emit a `work-seed.ext4` sidecar used to speed up future VM creates. If you touch `/root` provisioning, verify both the rootfs and the work-seed output.
- The daemon may keep idle TAP devices in a pool for faster creates. Smoke tests should treat `tap-pool-*` devices as reusable capacity, not cleanup leaks.
- If you add a new operational workflow, document how to exercise it in `README.md`. - If you add a new operational workflow, document how to exercise it in `README.md`.
- For NAT changes, verify both guest outbound access and host rule cleanup, for example with `./verify.sh --nat`. - For NAT changes, verify both guest outbound access and host rule cleanup, for example with `./verify.sh --nat`.

View file

@ -16,13 +16,13 @@ RUNTIME_HELPERS := $(RUNTIME_SOURCE_DIR)/banger-vsock-pingd
GO_SOURCES := $(shell find cmd internal -type f -name '*.go' | sort) GO_SOURCES := $(shell find cmd internal -type f -name '*.go' | sort)
RUNTIME_EXECUTABLES := firecracker customize.sh packages.sh namegen banger-vsock-pingd RUNTIME_EXECUTABLES := firecracker customize.sh packages.sh namegen banger-vsock-pingd
RUNTIME_DATA_FILES := packages.apt id_ed25519 rootfs-docker.ext4 RUNTIME_DATA_FILES := packages.apt id_ed25519 rootfs-docker.ext4
RUNTIME_OPTIONAL_DATA_FILES := rootfs.ext4 bundle.json RUNTIME_OPTIONAL_DATA_FILES := rootfs.ext4 rootfs-docker.work-seed.ext4 bundle.json
RUNTIME_BOOT_FILES := wtf/root/boot/vmlinux-6.8.0-94-generic wtf/root/boot/initrd.img-6.8.0-94-generic RUNTIME_BOOT_FILES := wtf/root/boot/vmlinux-6.8.0-94-generic wtf/root/boot/initrd.img-6.8.0-94-generic
RUNTIME_MODULES_DIR := wtf/root/lib/modules/6.8.0-94-generic RUNTIME_MODULES_DIR := wtf/root/lib/modules/6.8.0-94-generic
.DEFAULT_GOAL := help .DEFAULT_GOAL := help
.PHONY: help build banger bangerd test fmt tidy clean rootfs install runtime-bundle runtime-package check-runtime .PHONY: help build banger bangerd test fmt tidy clean rootfs install runtime-bundle runtime-package check-runtime bench-create
help: help:
@printf '%s\n' \ @printf '%s\n' \
@ -30,6 +30,7 @@ help:
' make build Build ./banger and ./bangerd' \ ' make build Build ./banger and ./bangerd' \
' make runtime-bundle Fetch and unpack ./runtime from the archive referenced by $(RUNTIME_MANIFEST)' \ ' make runtime-bundle Fetch and unpack ./runtime from the archive referenced by $(RUNTIME_MANIFEST)' \
' make runtime-package Package $(RUNTIME_SOURCE_DIR) into $(RUNTIME_ARCHIVE) and print its SHA256' \ ' make runtime-package Package $(RUNTIME_SOURCE_DIR) into $(RUNTIME_ARCHIVE) and print its SHA256' \
' make bench-create Benchmark vm create and SSH readiness with scripts/bench-create.sh' \
' make install Build and install binaries plus the runtime bundle into $(DESTDIR)$(BINDIR) and $(DESTDIR)$(RUNTIMEDIR)' \ ' make install Build and install binaries plus the runtime bundle into $(DESTDIR)$(BINDIR) and $(DESTDIR)$(RUNTIMEDIR)' \
' make test Run go test ./...' \ ' make test Run go test ./...' \
' make fmt Format Go sources under cmd/ and internal/' \ ' make fmt Format Go sources under cmd/ and internal/' \
@ -67,6 +68,9 @@ runtime-bundle:
runtime-package: runtime-package:
$(GO) run ./cmd/runtimebundle package --manifest "$(RUNTIME_MANIFEST)" --runtime-dir "$(RUNTIME_SOURCE_DIR)" --out "$(RUNTIME_ARCHIVE)" $(GO) run ./cmd/runtimebundle package --manifest "$(RUNTIME_MANIFEST)" --runtime-dir "$(RUNTIME_SOURCE_DIR)" --out "$(RUNTIME_ARCHIVE)"
bench-create: build
bash ./scripts/bench-create.sh $(ARGS)
check-runtime: check-runtime:
@test -d "$(RUNTIME_SOURCE_DIR)" || { echo "missing runtime bundle directory: $(RUNTIME_SOURCE_DIR); run 'make runtime-bundle'" >&2; exit 1; } @test -d "$(RUNTIME_SOURCE_DIR)" || { echo "missing runtime bundle directory: $(RUNTIME_SOURCE_DIR); run 'make runtime-bundle'" >&2; exit 1; }
@for path in $(RUNTIME_EXECUTABLES) $(RUNTIME_DATA_FILES) $(RUNTIME_BOOT_FILES) $(RUNTIME_MODULES_DIR); do \ @for path in $(RUNTIME_EXECUTABLES) $(RUNTIME_DATA_FILES) $(RUNTIME_BOOT_FILES) $(RUNTIME_MODULES_DIR); do \

View file

@ -26,6 +26,8 @@ The bundle contains:
- `bundle.json` with the bundle's default kernel/initrd/modules/rootfs paths - `bundle.json` with the bundle's default kernel/initrd/modules/rootfs paths
- a kernel, initrd, and modules tree referenced by `bundle.json` - a kernel, initrd, and modules tree referenced by `bundle.json`
- `rootfs-docker.ext4` - `rootfs-docker.ext4`
- `rootfs-docker.work-seed.ext4` when present, used to seed `/root` quickly on
new VM creates
- `rootfs.ext4` when present - `rootfs.ext4` when present
- `packages.apt` - `packages.apt`
- `id_ed25519` - `id_ed25519`
@ -162,12 +164,14 @@ repo-built `./banger`. You can override either with `runtime_dir` in
Useful config keys: Useful config keys:
- `log_level` - `log_level`
- `runtime_dir` - `runtime_dir`
- `tap_pool_size`
- `firecracker_bin` - `firecracker_bin`
- `ssh_key_path` - `ssh_key_path`
- `namegen_path` - `namegen_path`
- `customize_script` (manual helper compatibility; `banger image build` is Go-native) - `customize_script` (manual helper compatibility; `banger image build` is Go-native)
- `vsock_ping_helper_path` - `vsock_ping_helper_path`
- `default_rootfs` - `default_rootfs`
- `default_work_seed`
- `default_base_rootfs` - `default_base_rootfs`
- `default_kernel` - `default_kernel`
- `default_initrd` - `default_initrd`
@ -207,7 +211,9 @@ Rebuilt images install a pinned `mise` at `/usr/local/bin/mise`, activate it
for bash login and interactive shells, install `opencode` through `mise`, for bash login and interactive shells, install `opencode` through `mise`,
configure `tmux-resurrect` plus `tmux-continuum` for `root` with periodic configure `tmux-resurrect` plus `tmux-continuum` for `root` with periodic
autosaves and manual-only restore by default, and bake in the autosaves and manual-only restore by default, and bake in the
`banger-vsock-pingd` systemd service used by the post-SSH reminder path. `banger-vsock-pingd` systemd service used by the post-SSH reminder path. They
also emit a `work-seed.ext4` sidecar that lets new VMs clone a prepared `/root`
work disk instead of rebuilding it from scratch on every create.
Show or delete images: Show or delete images:
```bash ```bash
@ -240,6 +246,12 @@ transparent `.vm` lookups on the host.
- VMs share a read-only base rootfs image. - VMs share a read-only base rootfs image.
- Each VM gets its own sparse writable system overlay for `/`. - Each VM gets its own sparse writable system overlay for `/`.
- Each VM gets its own persistent ext4 work disk mounted at `/root`. - Each VM gets its own persistent ext4 work disk mounted at `/root`.
- When an image has a `work-seed.ext4` sidecar, new VM creates clone that seed
and only resize it when needed. Older images still work, but create more
slowly because `/root` must be built from scratch.
- The daemon can keep a small idle TAP pool warm in the background so VM create
does not need to synchronously create a fresh TAP every time. `tap_pool_size`
controls the pool depth.
## Architecture Notes ## Architecture Notes
The Go daemon is the primary control plane. VM host integrations such as the The Go daemon is the primary control plane. VM host integrations such as the
@ -261,6 +273,9 @@ To rebuild the source-checkout default image in `./runtime/rootfs-docker.ext4`:
make rootfs make rootfs
``` ```
That rebuild also regenerates `./runtime/rootfs-docker.work-seed.ext4`, which
the daemon uses to speed up future `vm create` calls.
If your runtime bundle does not include `./runtime/rootfs.ext4`, pass an If your runtime bundle does not include `./runtime/rootfs.ext4`, pass an
explicit base image instead: explicit base image instead:
```bash ```bash
@ -293,6 +308,22 @@ That writes `dist/banger-runtime.tar.gz` and prints its SHA256 so you can update
a local manifest copy before testing bootstrap changes or publishing the a local manifest copy before testing bootstrap changes or publishing the
archive elsewhere. archive elsewhere.
## Benchmarking Create Time
Benchmark the current host's `vm create` wall time plus first-SSH readiness:
```bash
make bench-create
```
Pass options through `ARGS`, for example:
```bash
make bench-create ARGS="--runs 3 --image docker-dev"
```
The benchmark prints JSON with:
- `create_ms`: wall time for `banger vm create`
- `ssh_ready_ms`: wall time from create start until `banger vm ssh <vm> -- true`
succeeds
## Remaining Shell Helpers ## Remaining Shell Helpers
The runtime VM lifecycle is managed through `banger`. The remaining shell scripts are not the primary user interface: The runtime VM lifecycle is managed through `banger`. The remaining shell scripts are not the primary user interface:
- `customize.sh`: manual reference flow for rootfs customization; `banger image build` is now Go-native, but the script still reads - `customize.sh`: manual reference flow for rootfs customization; `banger image build` is now Go-native, but the script still reads

View file

@ -174,6 +174,11 @@ if [[ -z "$OUT_ROOTFS" ]]; then
base_name="$(basename "$BASE_ROOTFS")" base_name="$(basename "$BASE_ROOTFS")"
OUT_ROOTFS="${base_dir}/docker-${base_name}" OUT_ROOTFS="${base_dir}/docker-${base_name}"
fi fi
if [[ "$OUT_ROOTFS" == *.ext4 ]]; then
WORK_SEED="${OUT_ROOTFS%.ext4}.work-seed.ext4"
else
WORK_SEED="${OUT_ROOTFS}.work-seed"
fi
if [[ ! -f "$KERNEL" ]]; then if [[ ! -f "$KERNEL" ]]; then
log "kernel not found: $KERNEL" log "kernel not found: $KERNEL"
exit 1 exit 1
@ -547,4 +552,6 @@ for _ in $(seq 1 200); do
sleep 0.05 sleep 0.05
done done
banger_write_rootfs_manifest_metadata "$OUT_ROOTFS" "$PACKAGES_HASH" banger_write_rootfs_manifest_metadata "$OUT_ROOTFS" "$PACKAGES_HASH"
log "building work seed $WORK_SEED"
"$BANGER_BIN" internal work-seed --rootfs "$OUT_ROOTFS" --out "$WORK_SEED"
log "done" log "done"

View file

@ -87,7 +87,34 @@ func newInternalCommand() *cobra.Command {
Hidden: true, Hidden: true,
RunE: helpNoArgs, RunE: helpNoArgs,
} }
cmd.AddCommand(newInternalNATCommand()) cmd.AddCommand(newInternalNATCommand(), newInternalWorkSeedCommand())
return cmd
}
func newInternalWorkSeedCommand() *cobra.Command {
var rootfsPath string
var outPath string
cmd := &cobra.Command{
Use: "work-seed",
Hidden: true,
Args: noArgsUsage("usage: banger internal work-seed --rootfs <path> [--out <path>]"),
RunE: func(cmd *cobra.Command, args []string) error {
rootfsPath = strings.TrimSpace(rootfsPath)
outPath = strings.TrimSpace(outPath)
if rootfsPath == "" {
return errors.New("rootfs path is required")
}
if outPath == "" {
outPath = system.WorkSeedPath(rootfsPath)
}
if err := system.EnsureSudo(cmd.Context()); err != nil {
return err
}
return system.BuildWorkSeedImage(cmd.Context(), system.NewRunner(), rootfsPath, outPath)
},
}
cmd.Flags().StringVar(&rootfsPath, "rootfs", "", "rootfs image path")
cmd.Flags().StringVar(&outPath, "out", "", "output work-seed image path")
return cmd return cmd
} }

View file

@ -4,6 +4,7 @@ import (
"errors" "errors"
"os" "os"
"path/filepath" "path/filepath"
"strings"
"time" "time"
toml "github.com/pelletier/go-toml" toml "github.com/pelletier/go-toml"
@ -22,6 +23,7 @@ type fileConfig struct {
NamegenPath string `toml:"namegen_path"` NamegenPath string `toml:"namegen_path"`
CustomizeScript string `toml:"customize_script"` CustomizeScript string `toml:"customize_script"`
VSockPingHelper string `toml:"vsock_ping_helper_path"` VSockPingHelper string `toml:"vsock_ping_helper_path"`
DefaultWorkSeed string `toml:"default_work_seed"`
DefaultImageName string `toml:"default_image_name"` DefaultImageName string `toml:"default_image_name"`
DefaultRootfs string `toml:"default_rootfs"` DefaultRootfs string `toml:"default_rootfs"`
DefaultBaseRootfs string `toml:"default_base_rootfs"` DefaultBaseRootfs string `toml:"default_base_rootfs"`
@ -35,6 +37,7 @@ type fileConfig struct {
BridgeName string `toml:"bridge_name"` BridgeName string `toml:"bridge_name"`
BridgeIP string `toml:"bridge_ip"` BridgeIP string `toml:"bridge_ip"`
CIDR string `toml:"cidr"` CIDR string `toml:"cidr"`
TapPoolSize int `toml:"tap_pool_size"`
DefaultDNS string `toml:"default_dns"` DefaultDNS string `toml:"default_dns"`
} }
@ -47,6 +50,7 @@ func Load(layout paths.Layout) (model.DaemonConfig, error) {
BridgeName: model.DefaultBridgeName, BridgeName: model.DefaultBridgeName,
BridgeIP: model.DefaultBridgeIP, BridgeIP: model.DefaultBridgeIP,
CIDR: model.DefaultCIDR, CIDR: model.DefaultCIDR,
TapPoolSize: 4,
DefaultDNS: model.DefaultDNS, DefaultDNS: model.DefaultDNS,
DefaultImageName: "default", DefaultImageName: "default",
} }
@ -91,6 +95,9 @@ func Load(layout paths.Layout) (model.DaemonConfig, error) {
if file.VSockPingHelper != "" { if file.VSockPingHelper != "" {
cfg.VSockPingHelperPath = file.VSockPingHelper cfg.VSockPingHelperPath = file.VSockPingHelper
} }
if file.DefaultWorkSeed != "" {
cfg.DefaultWorkSeed = file.DefaultWorkSeed
}
if file.DefaultImageName != "" { if file.DefaultImageName != "" {
cfg.DefaultImageName = file.DefaultImageName cfg.DefaultImageName = file.DefaultImageName
} }
@ -121,6 +128,9 @@ func Load(layout paths.Layout) (model.DaemonConfig, error) {
if file.CIDR != "" { if file.CIDR != "" {
cfg.CIDR = file.CIDR cfg.CIDR = file.CIDR
} }
if file.TapPoolSize > 0 {
cfg.TapPoolSize = file.TapPoolSize
}
if file.DefaultDNS != "" { if file.DefaultDNS != "" {
cfg.DefaultDNS = file.DefaultDNS cfg.DefaultDNS = file.DefaultDNS
} }
@ -176,6 +186,9 @@ func applyRuntimeDefaults(cfg *model.DaemonConfig) error {
cfg.DefaultRootfs, cfg.DefaultRootfs,
) )
} }
if cfg.DefaultWorkSeed == "" && cfg.DefaultRootfs != "" {
cfg.DefaultWorkSeed = firstExistingRuntimePath(associatedWorkSeedPath(cfg.DefaultRootfs))
}
return nil return nil
} }
@ -185,6 +198,7 @@ func applyBundleMetadataDefaults(cfg *model.DaemonConfig, runtimeDir string, met
cfg.NamegenPath = defaultRuntimePath(cfg.NamegenPath, runtimeDir, meta.NamegenPath) cfg.NamegenPath = defaultRuntimePath(cfg.NamegenPath, runtimeDir, meta.NamegenPath)
cfg.CustomizeScript = defaultRuntimePath(cfg.CustomizeScript, runtimeDir, meta.CustomizeScript) cfg.CustomizeScript = defaultRuntimePath(cfg.CustomizeScript, runtimeDir, meta.CustomizeScript)
cfg.VSockPingHelperPath = defaultRuntimePath(cfg.VSockPingHelperPath, runtimeDir, meta.VSockPingHelperPath) cfg.VSockPingHelperPath = defaultRuntimePath(cfg.VSockPingHelperPath, runtimeDir, meta.VSockPingHelperPath)
cfg.DefaultWorkSeed = defaultRuntimePath(cfg.DefaultWorkSeed, runtimeDir, meta.DefaultWorkSeed)
cfg.DefaultKernel = defaultRuntimePath(cfg.DefaultKernel, runtimeDir, meta.DefaultKernel) cfg.DefaultKernel = defaultRuntimePath(cfg.DefaultKernel, runtimeDir, meta.DefaultKernel)
cfg.DefaultInitrd = defaultRuntimePath(cfg.DefaultInitrd, runtimeDir, meta.DefaultInitrd) cfg.DefaultInitrd = defaultRuntimePath(cfg.DefaultInitrd, runtimeDir, meta.DefaultInitrd)
cfg.DefaultModulesDir = defaultRuntimePath(cfg.DefaultModulesDir, runtimeDir, meta.DefaultModulesDir) cfg.DefaultModulesDir = defaultRuntimePath(cfg.DefaultModulesDir, runtimeDir, meta.DefaultModulesDir)
@ -199,6 +213,7 @@ func applyLegacyRuntimeDefaults(cfg *model.DaemonConfig) {
cfg.NamegenPath = defaultRuntimePath(cfg.NamegenPath, cfg.RuntimeDir, "namegen") cfg.NamegenPath = defaultRuntimePath(cfg.NamegenPath, cfg.RuntimeDir, "namegen")
cfg.CustomizeScript = defaultRuntimePath(cfg.CustomizeScript, cfg.RuntimeDir, "customize.sh") cfg.CustomizeScript = defaultRuntimePath(cfg.CustomizeScript, cfg.RuntimeDir, "customize.sh")
cfg.VSockPingHelperPath = defaultRuntimePath(cfg.VSockPingHelperPath, cfg.RuntimeDir, "banger-vsock-pingd") cfg.VSockPingHelperPath = defaultRuntimePath(cfg.VSockPingHelperPath, cfg.RuntimeDir, "banger-vsock-pingd")
cfg.DefaultWorkSeed = defaultRuntimePath(cfg.DefaultWorkSeed, cfg.RuntimeDir, "rootfs-docker.work-seed.ext4")
cfg.DefaultKernel = defaultRuntimePath(cfg.DefaultKernel, cfg.RuntimeDir, "wtf/root/boot/vmlinux-6.8.0-94-generic") cfg.DefaultKernel = defaultRuntimePath(cfg.DefaultKernel, cfg.RuntimeDir, "wtf/root/boot/vmlinux-6.8.0-94-generic")
cfg.DefaultInitrd = defaultRuntimePath(cfg.DefaultInitrd, cfg.RuntimeDir, "wtf/root/boot/initrd.img-6.8.0-94-generic") cfg.DefaultInitrd = defaultRuntimePath(cfg.DefaultInitrd, cfg.RuntimeDir, "wtf/root/boot/initrd.img-6.8.0-94-generic")
cfg.DefaultModulesDir = defaultRuntimePath(cfg.DefaultModulesDir, cfg.RuntimeDir, "wtf/root/lib/modules/6.8.0-94-generic") cfg.DefaultModulesDir = defaultRuntimePath(cfg.DefaultModulesDir, cfg.RuntimeDir, "wtf/root/lib/modules/6.8.0-94-generic")
@ -223,3 +238,14 @@ func firstExistingRuntimePath(paths ...string) string {
} }
return "" return ""
} }
func associatedWorkSeedPath(rootfsPath string) string {
rootfsPath = strings.TrimSpace(rootfsPath)
if rootfsPath == "" {
return ""
}
if strings.HasSuffix(rootfsPath, ".ext4") {
return strings.TrimSuffix(rootfsPath, ".ext4") + ".work-seed.ext4"
}
return rootfsPath + ".work-seed"
}

View file

@ -20,6 +20,7 @@ func TestLoadDerivesArtifactPathsFromRuntimeDir(t *testing.T) {
VSockPingHelperPath: "bin/banger-vsock-pingd", VSockPingHelperPath: "bin/banger-vsock-pingd",
DefaultPackages: "config/packages.apt", DefaultPackages: "config/packages.apt",
DefaultRootfs: "images/rootfs-docker.ext4", DefaultRootfs: "images/rootfs-docker.ext4",
DefaultWorkSeed: "images/rootfs-docker.work-seed.ext4",
DefaultKernel: "kernels/vmlinux", DefaultKernel: "kernels/vmlinux",
DefaultInitrd: "kernels/initrd.img", DefaultInitrd: "kernels/initrd.img",
DefaultModulesDir: "modules/current", DefaultModulesDir: "modules/current",
@ -32,6 +33,7 @@ func TestLoadDerivesArtifactPathsFromRuntimeDir(t *testing.T) {
meta.VSockPingHelperPath, meta.VSockPingHelperPath,
meta.DefaultPackages, meta.DefaultPackages,
meta.DefaultRootfs, meta.DefaultRootfs,
meta.DefaultWorkSeed,
meta.DefaultKernel, meta.DefaultKernel,
meta.DefaultInitrd, meta.DefaultInitrd,
filepath.Join(meta.DefaultModulesDir, "modules.dep"), filepath.Join(meta.DefaultModulesDir, "modules.dep"),
@ -79,6 +81,9 @@ func TestLoadDerivesArtifactPathsFromRuntimeDir(t *testing.T) {
if cfg.DefaultRootfs != filepath.Join(runtimeDir, meta.DefaultRootfs) { if cfg.DefaultRootfs != filepath.Join(runtimeDir, meta.DefaultRootfs) {
t.Fatalf("DefaultRootfs = %q", cfg.DefaultRootfs) t.Fatalf("DefaultRootfs = %q", cfg.DefaultRootfs)
} }
if cfg.DefaultWorkSeed != filepath.Join(runtimeDir, meta.DefaultWorkSeed) {
t.Fatalf("DefaultWorkSeed = %q", cfg.DefaultWorkSeed)
}
if cfg.DefaultBaseRootfs != filepath.Join(runtimeDir, meta.DefaultRootfs) { if cfg.DefaultBaseRootfs != filepath.Join(runtimeDir, meta.DefaultRootfs) {
t.Fatalf("DefaultBaseRootfs = %q", cfg.DefaultBaseRootfs) t.Fatalf("DefaultBaseRootfs = %q", cfg.DefaultBaseRootfs)
} }
@ -106,6 +111,7 @@ func TestLoadFallsBackToLegacyRuntimeLayoutWithoutBundleMetadata(t *testing.T) {
"banger-vsock-pingd", "banger-vsock-pingd",
"packages.apt", "packages.apt",
"rootfs-docker.ext4", "rootfs-docker.ext4",
"rootfs-docker.work-seed.ext4",
"wtf/root/boot/vmlinux-6.8.0-94-generic", "wtf/root/boot/vmlinux-6.8.0-94-generic",
"wtf/root/boot/initrd.img-6.8.0-94-generic", "wtf/root/boot/initrd.img-6.8.0-94-generic",
"wtf/root/lib/modules/6.8.0-94-generic/modules.dep", "wtf/root/lib/modules/6.8.0-94-generic/modules.dep",
@ -131,6 +137,9 @@ func TestLoadFallsBackToLegacyRuntimeLayoutWithoutBundleMetadata(t *testing.T) {
if cfg.VSockPingHelperPath != filepath.Join(runtimeDir, "banger-vsock-pingd") { if cfg.VSockPingHelperPath != filepath.Join(runtimeDir, "banger-vsock-pingd") {
t.Fatalf("VSockPingHelperPath = %q", cfg.VSockPingHelperPath) t.Fatalf("VSockPingHelperPath = %q", cfg.VSockPingHelperPath)
} }
if cfg.DefaultWorkSeed != filepath.Join(runtimeDir, "rootfs-docker.work-seed.ext4") {
t.Fatalf("DefaultWorkSeed = %q", cfg.DefaultWorkSeed)
}
if cfg.DefaultKernel != filepath.Join(runtimeDir, "wtf/root/boot/vmlinux-6.8.0-94-generic") { if cfg.DefaultKernel != filepath.Join(runtimeDir, "wtf/root/boot/vmlinux-6.8.0-94-generic") {
t.Fatalf("DefaultKernel = %q", cfg.DefaultKernel) t.Fatalf("DefaultKernel = %q", cfg.DefaultKernel)
} }

View file

@ -4,6 +4,7 @@ import (
"context" "context"
"errors" "errors"
"net" "net"
"os"
"strings" "strings"
"banger/internal/firecracker" "banger/internal/firecracker"
@ -150,10 +151,21 @@ type workDiskCapability struct{}
func (workDiskCapability) Name() string { return "work-disk" } func (workDiskCapability) Name() string { return "work-disk" }
func (workDiskCapability) AddStartPreflight(_ context.Context, _ *Daemon, checks *system.Preflight, vm model.VMRecord, _ model.Image) { func (workDiskCapability) AddStartPreflight(_ context.Context, _ *Daemon, checks *system.Preflight, vm model.VMRecord, image model.Image) {
if exists(vm.Runtime.WorkDiskPath) { if exists(vm.Runtime.WorkDiskPath) {
return return
} }
imageSeed := ""
if image.RootfsPath != "" {
imageSeed = image.WorkSeedPath
}
if exists(imageSeed) {
if info, err := os.Stat(imageSeed); err == nil && vm.Spec.WorkDiskSizeBytes > info.Size() {
checks.RequireCommand("e2fsck", toolHint("e2fsck"))
checks.RequireCommand("resize2fs", toolHint("resize2fs"))
}
return
}
for _, command := range []string{"mkfs.ext4", "mount", "umount", "cp"} { for _, command := range []string{"mkfs.ext4", "mount", "umount", "cp"} {
checks.RequireCommand(command, toolHint(command)) checks.RequireCommand(command, toolHint(command))
} }
@ -178,16 +190,23 @@ func (workDiskCapability) ContributeMachine(cfg *firecracker.MachineConfig, vm m
}) })
} }
func (workDiskCapability) PrepareHost(ctx context.Context, d *Daemon, vm *model.VMRecord, _ model.Image) error { func (workDiskCapability) PrepareHost(ctx context.Context, d *Daemon, vm *model.VMRecord, image model.Image) error {
return d.ensureWorkDisk(ctx, vm) return d.ensureWorkDisk(ctx, vm, image)
} }
func (workDiskCapability) AddDoctorChecks(_ context.Context, _ *Daemon, report *system.Report) { func (workDiskCapability) AddDoctorChecks(_ context.Context, d *Daemon, report *system.Report) {
if strings.TrimSpace(d.config.DefaultWorkSeed) != "" && exists(d.config.DefaultWorkSeed) {
checks := system.NewPreflight()
checks.RequireFile(d.config.DefaultWorkSeed, "default work seed image", `rebuild the default runtime rootfs to regenerate the /root seed`)
report.AddPreflight("feature /root work disk", checks, "seeded /root work disk artifact available")
return
}
checks := system.NewPreflight() checks := system.NewPreflight()
for _, command := range []string{"mkfs.ext4", "mount", "umount", "cp"} { for _, command := range []string{"mkfs.ext4", "mount", "umount", "cp"} {
checks.RequireCommand(command, toolHint(command)) checks.RequireCommand(command, toolHint(command))
} }
report.AddPreflight("feature /root work disk", checks, "guest /root work disk tooling available") report.AddPreflight("feature /root work disk", checks, "fallback /root work disk tooling available")
report.AddWarn("feature /root work disk", "default image has no work-seed artifact; new VM creates will be slower until the image is rebuilt")
} }
type dnsCapability struct{} type dnsCapability struct{}

View file

@ -34,6 +34,9 @@ type Daemon struct {
mu sync.Mutex mu sync.Mutex
vmLocksMu sync.Mutex vmLocksMu sync.Mutex
vmLocks map[string]*sync.Mutex vmLocks map[string]*sync.Mutex
tapPoolMu sync.Mutex
tapPool []string
tapPoolNext int
closing chan struct{} closing chan struct{}
once sync.Once once sync.Once
pid int pid int
@ -92,6 +95,11 @@ func Open(ctx context.Context) (d *Daemon, err error) {
d.logger.Error("daemon open failed", "stage", "reconcile", "error", err.Error()) d.logger.Error("daemon open failed", "stage", "reconcile", "error", err.Error())
return nil, err return nil, err
} }
if err = d.initializeTapPool(ctx); err != nil {
d.logger.Error("daemon open failed", "stage", "initialize_tap_pool", "error", err.Error())
return nil, err
}
go d.ensureTapPool(context.Background())
return d, nil return d, nil
} }
@ -436,7 +444,7 @@ func (d *Daemon) ensureDefaultImage(ctx context.Context) error {
return err return err
} }
if d.logger != nil { if d.logger != nil {
d.logger.Info("default image reconciled", append(imageLogAttrs(updated), "previous_rootfs_path", image.RootfsPath, "previous_kernel_path", image.KernelPath)...) d.logger.Info("default image reconciled", append(imageLogAttrs(updated), "previous_rootfs_path", image.RootfsPath, "previous_work_seed_path", image.WorkSeedPath, "previous_kernel_path", image.KernelPath)...)
} }
return nil return nil
case errors.Is(err, sql.ErrNoRows): case errors.Is(err, sql.ErrNoRows):
@ -471,6 +479,7 @@ func (d *Daemon) desiredDefaultImage() (model.Image, bool) {
Managed: false, Managed: false,
ArtifactDir: "", ArtifactDir: "",
RootfsPath: rootfs, RootfsPath: rootfs,
WorkSeedPath: d.config.DefaultWorkSeed,
KernelPath: kernel, KernelPath: kernel,
InitrdPath: d.config.DefaultInitrd, InitrdPath: d.config.DefaultInitrd,
ModulesDir: d.config.DefaultModulesDir, ModulesDir: d.config.DefaultModulesDir,
@ -484,6 +493,7 @@ func defaultImageMatches(current, desired model.Image) bool {
current.Managed == desired.Managed && current.Managed == desired.Managed &&
current.ArtifactDir == desired.ArtifactDir && current.ArtifactDir == desired.ArtifactDir &&
current.RootfsPath == desired.RootfsPath && current.RootfsPath == desired.RootfsPath &&
current.WorkSeedPath == desired.WorkSeedPath &&
current.KernelPath == desired.KernelPath && current.KernelPath == desired.KernelPath &&
current.InitrdPath == desired.InitrdPath && current.InitrdPath == desired.InitrdPath &&
current.ModulesDir == desired.ModulesDir && current.ModulesDir == desired.ModulesDir &&

View file

@ -0,0 +1,92 @@
package daemon
import (
"context"
"errors"
"os"
"path/filepath"
"strconv"
"testing"
"banger/internal/model"
)
func TestEnsureWorkDiskClonesSeedImageAndResizes(t *testing.T) {
t.Parallel()
vmDir := t.TempDir()
seedPath := filepath.Join(t.TempDir(), "root.work-seed.ext4")
if err := os.WriteFile(seedPath, []byte("seed-data"), 0o644); err != nil {
t.Fatalf("WriteFile(seed): %v", err)
}
workDiskPath := filepath.Join(vmDir, "root.ext4")
runner := &scriptedRunner{
t: t,
steps: []runnerStep{
{call: runnerCall{name: "e2fsck", args: []string{"-p", "-f", workDiskPath}}},
{call: runnerCall{name: "resize2fs", args: []string{workDiskPath}}},
},
}
d := &Daemon{runner: runner}
vm := testVM("seeded", "image-seeded", "172.16.0.60")
vm.Runtime.WorkDiskPath = workDiskPath
vm.Spec.WorkDiskSizeBytes = 2 * 1024 * 1024
image := testImage("image-seeded")
image.WorkSeedPath = seedPath
if err := d.ensureWorkDisk(context.Background(), &vm, image); err != nil {
t.Fatalf("ensureWorkDisk: %v", err)
}
runner.assertExhausted()
info, err := os.Stat(workDiskPath)
if err != nil {
t.Fatalf("Stat(work disk): %v", err)
}
if info.Size() != vm.Spec.WorkDiskSizeBytes {
t.Fatalf("work disk size = %d, want %d", info.Size(), vm.Spec.WorkDiskSizeBytes)
}
}
func TestTapPoolWarmsAndReusesIdleTap(t *testing.T) {
t.Parallel()
runner := &scriptedRunner{
t: t,
steps: []runnerStep{
{call: runnerCall{name: "ip", args: []string{"link", "show", "tap-pool-0"}}, err: errors.New("exit status 1")},
sudoStep("", nil, "ip", "tuntap", "add", "dev", "tap-pool-0", "mode", "tap", "user", strconv.Itoa(os.Getuid()), "group", strconv.Itoa(os.Getgid())),
sudoStep("", nil, "ip", "link", "set", "tap-pool-0", "master", model.DefaultBridgeName),
sudoStep("", nil, "ip", "link", "set", "tap-pool-0", "up"),
sudoStep("", nil, "ip", "link", "set", model.DefaultBridgeName, "up"),
},
}
d := &Daemon{
runner: runner,
config: model.DaemonConfig{
BridgeName: model.DefaultBridgeName,
TapPoolSize: 1,
},
closing: make(chan struct{}),
}
d.ensureTapPool(context.Background())
tapName, err := d.acquireTap(context.Background(), "tap-fallback")
if err != nil {
t.Fatalf("acquireTap: %v", err)
}
if tapName != "tap-pool-0" {
t.Fatalf("tapName = %q, want tap-pool-0", tapName)
}
if err := d.releaseTap(context.Background(), tapName); err != nil {
t.Fatalf("releaseTap: %v", err)
}
tapName, err = d.acquireTap(context.Background(), "tap-fallback")
if err != nil {
t.Fatalf("acquireTap second time: %v", err)
}
if tapName != "tap-pool-0" {
t.Fatalf("tapName second = %q, want tap-pool-0", tapName)
}
runner.assertExhausted()
}

View file

@ -60,6 +60,7 @@ func (d *Daemon) BuildImage(ctx context.Context, params api.ImageBuildParams) (i
} }
defer logFile.Close() defer logFile.Close()
rootfsPath := filepath.Join(artifactDir, "rootfs.ext4") rootfsPath := filepath.Join(artifactDir, "rootfs.ext4")
workSeedPath := filepath.Join(artifactDir, "work-seed.ext4")
kernelPath := params.KernelPath kernelPath := params.KernelPath
if kernelPath == "" { if kernelPath == "" {
kernelPath = d.config.DefaultKernel kernelPath = d.config.DefaultKernel
@ -90,10 +91,17 @@ func (d *Daemon) BuildImage(ctx context.Context, params api.ImageBuildParams) (i
} }
op.stage("launch_builder", "build_log_path", buildLogPath, "artifact_dir", artifactDir) op.stage("launch_builder", "build_log_path", buildLogPath, "artifact_dir", artifactDir)
if err := d.runImageBuild(ctx, spec); err != nil { if err := d.runImageBuild(ctx, spec); err != nil {
_ = logFile.Sync()
_ = os.RemoveAll(artifactDir)
return model.Image{}, err
}
if err := system.BuildWorkSeedImage(ctx, d.runner, rootfsPath, workSeedPath); err != nil {
_ = logFile.Sync()
_ = os.RemoveAll(artifactDir) _ = os.RemoveAll(artifactDir)
return model.Image{}, err return model.Image{}, err
} }
if err := writePackagesMetadata(rootfsPath, d.config.DefaultPackagesFile); err != nil { if err := writePackagesMetadata(rootfsPath, d.config.DefaultPackagesFile); err != nil {
_ = logFile.Sync()
_ = os.RemoveAll(artifactDir) _ = os.RemoveAll(artifactDir)
return model.Image{}, err return model.Image{}, err
} }
@ -103,6 +111,7 @@ func (d *Daemon) BuildImage(ctx context.Context, params api.ImageBuildParams) (i
Managed: true, Managed: true,
ArtifactDir: artifactDir, ArtifactDir: artifactDir,
RootfsPath: rootfsPath, RootfsPath: rootfsPath,
WorkSeedPath: workSeedPath,
KernelPath: kernelPath, KernelPath: kernelPath,
InitrdPath: initrdPath, InitrdPath: initrdPath,
ModulesDir: modulesDir, ModulesDir: modulesDir,
@ -119,6 +128,7 @@ func (d *Daemon) BuildImage(ctx context.Context, params api.ImageBuildParams) (i
if d.logger != nil { if d.logger != nil {
d.logger.Info("image build log preserved", append(imageLogAttrs(image), "build_log_path", buildLogPath)...) d.logger.Info("image build log preserved", append(imageLogAttrs(image), "build_log_path", buildLogPath)...)
} }
_ = logFile.Sync()
return image, nil return image, nil
} }

View file

@ -35,14 +35,16 @@ func parseLogLevel(raw string) (slog.Level, string, error) {
} }
} }
func (d *Daemon) beginOperation(name string, attrs ...any) operationLog { func (d *Daemon) beginOperation(name string, attrs ...any) *operationLog {
if d.logger != nil { if d.logger != nil {
d.logger.Info("operation started", append([]any{"operation", name}, attrs...)...) d.logger.Info("operation started", append([]any{"operation", name}, attrs...)...)
} }
return operationLog{ now := time.Now()
return &operationLog{
logger: d.logger, logger: d.logger,
name: name, name: name,
started: time.Now(), started: now,
last: now,
attrs: append([]any(nil), attrs...), attrs: append([]any(nil), attrs...),
} }
} }
@ -51,22 +53,35 @@ type operationLog struct {
logger *slog.Logger logger *slog.Logger
name string name string
started time.Time started time.Time
last time.Time
attrs []any attrs []any
} }
func (o operationLog) stage(stage string, attrs ...any) { func (o *operationLog) stage(stage string, attrs ...any) {
o.log(slog.LevelInfo, "operation stage", append([]any{"stage", stage}, attrs...)...) now := time.Now()
o.log(slog.LevelInfo, "operation stage", append([]any{
"stage", stage,
"since_start_ms", now.Sub(o.started).Milliseconds(),
"since_last_stage_ms", now.Sub(o.last).Milliseconds(),
}, attrs...)...)
o.last = now
} }
func (o operationLog) debugStage(stage string, attrs ...any) { func (o *operationLog) debugStage(stage string, attrs ...any) {
o.log(slog.LevelDebug, "operation stage", append([]any{"stage", stage}, attrs...)...) now := time.Now()
o.log(slog.LevelDebug, "operation stage", append([]any{
"stage", stage,
"since_start_ms", now.Sub(o.started).Milliseconds(),
"since_last_stage_ms", now.Sub(o.last).Milliseconds(),
}, attrs...)...)
o.last = now
} }
func (o operationLog) done(attrs ...any) { func (o *operationLog) done(attrs ...any) {
o.log(slog.LevelInfo, "operation completed", append([]any{"duration_ms", time.Since(o.started).Milliseconds()}, attrs...)...) o.log(slog.LevelInfo, "operation completed", append([]any{"duration_ms", time.Since(o.started).Milliseconds()}, attrs...)...)
} }
func (o operationLog) fail(err error, attrs ...any) error { func (o *operationLog) fail(err error, attrs ...any) error {
if err == nil { if err == nil {
return nil return nil
} }
@ -118,6 +133,9 @@ func imageLogAttrs(image model.Image) []any {
if image.RootfsPath != "" { if image.RootfsPath != "" {
attrs = append(attrs, "rootfs_path", image.RootfsPath) attrs = append(attrs, "rootfs_path", image.RootfsPath)
} }
if image.WorkSeedPath != "" {
attrs = append(attrs, "work_seed_path", image.WorkSeedPath)
}
return attrs return attrs
} }

View file

@ -141,7 +141,7 @@ func TestBuildImagePreservesBuildLogOnFailure(t *testing.T) {
} }
binDir := t.TempDir() binDir := t.TempDir()
for _, name := range []string{"sudo", "ip", "pgrep", "chown", "chmod", "kill", "iptables", "sysctl", "e2fsck", "resize2fs"} { for _, name := range []string{"sudo", "ip", "pgrep", "chown", "chmod", "kill", "iptables", "sysctl", "e2fsck", "resize2fs", "mkfs.ext4", "mount", "umount", "cp"} {
writeFakeExecutable(t, filepath.Join(binDir, name)) writeFakeExecutable(t, filepath.Join(binDir, name))
} }
t.Setenv("PATH", binDir) t.Setenv("PATH", binDir)

View file

@ -75,6 +75,9 @@ func (d *Daemon) addImageBuildPrereqs(ctx context.Context, checks *system.Prefli
for _, command := range []string{"sudo", "ip", "pgrep", "chown", "chmod", "kill"} { for _, command := range []string{"sudo", "ip", "pgrep", "chown", "chmod", "kill"} {
checks.RequireCommand(command, toolHint(command)) checks.RequireCommand(command, toolHint(command))
} }
for _, command := range []string{"mkfs.ext4", "mount", "umount", "cp"} {
checks.RequireCommand(command, toolHint(command))
}
checks.RequireExecutable(d.config.FirecrackerBin, "firecracker binary", hint) checks.RequireExecutable(d.config.FirecrackerBin, "firecracker binary", hint)
checks.RequireFile(d.config.SSHKeyPath, "ssh private key", `set "ssh_key_path" or refresh the runtime bundle`) checks.RequireFile(d.config.SSHKeyPath, "ssh private key", `set "ssh_key_path" or refresh the runtime bundle`)
checks.RequireExecutable(d.config.VSockPingHelperPath, "vsock ping helper", `run 'make build' or refresh the runtime bundle`) checks.RequireExecutable(d.config.VSockPingHelperPath, "vsock ping helper", `run 'make build' or refresh the runtime bundle`)

121
internal/daemon/tap_pool.go Normal file
View file

@ -0,0 +1,121 @@
package daemon
import (
"context"
"fmt"
"strconv"
"strings"
)
const tapPoolPrefix = "tap-pool-"
func (d *Daemon) initializeTapPool(ctx context.Context) error {
if d.config.TapPoolSize <= 0 || d.store == nil {
return nil
}
vms, err := d.store.ListVMs(ctx)
if err != nil {
return err
}
next := 0
for _, vm := range vms {
if index, ok := parseTapPoolIndex(vm.Runtime.TapDevice); ok && index >= next {
next = index + 1
}
}
d.tapPoolMu.Lock()
d.tapPoolNext = next
d.tapPoolMu.Unlock()
return nil
}
func (d *Daemon) ensureTapPool(ctx context.Context) {
if d.config.TapPoolSize <= 0 {
return
}
for {
select {
case <-ctx.Done():
return
case <-d.closing:
return
default:
}
d.tapPoolMu.Lock()
if len(d.tapPool) >= d.config.TapPoolSize {
d.tapPoolMu.Unlock()
return
}
tapName := fmt.Sprintf("%s%d", tapPoolPrefix, d.tapPoolNext)
d.tapPoolNext++
d.tapPoolMu.Unlock()
if err := d.createTap(ctx, tapName); err != nil {
if d.logger != nil {
d.logger.Warn("tap pool warmup failed", "tap_device", tapName, "error", err.Error())
}
return
}
d.tapPoolMu.Lock()
d.tapPool = append(d.tapPool, tapName)
d.tapPoolMu.Unlock()
if d.logger != nil {
d.logger.Debug("tap added to idle pool", "tap_device", tapName)
}
}
}
func (d *Daemon) acquireTap(ctx context.Context, fallbackName string) (string, error) {
d.tapPoolMu.Lock()
if n := len(d.tapPool); n > 0 {
tapName := d.tapPool[n-1]
d.tapPool = d.tapPool[:n-1]
d.tapPoolMu.Unlock()
return tapName, nil
}
d.tapPoolMu.Unlock()
if err := d.createTap(ctx, fallbackName); err != nil {
return "", err
}
return fallbackName, nil
}
func (d *Daemon) releaseTap(ctx context.Context, tapName string) error {
tapName = strings.TrimSpace(tapName)
if tapName == "" {
return nil
}
if isTapPoolName(tapName) {
d.tapPoolMu.Lock()
if len(d.tapPool) < d.config.TapPoolSize {
d.tapPool = append(d.tapPool, tapName)
d.tapPoolMu.Unlock()
return nil
}
d.tapPoolMu.Unlock()
}
_, err := d.runner.RunSudo(ctx, "ip", "link", "del", tapName)
if err == nil {
go d.ensureTapPool(context.Background())
}
return err
}
func isTapPoolName(tapName string) bool {
return strings.HasPrefix(strings.TrimSpace(tapName), tapPoolPrefix)
}
func parseTapPoolIndex(tapName string) (int, bool) {
if !isTapPoolName(tapName) {
return 0, false
}
value, err := strconv.Atoi(strings.TrimPrefix(strings.TrimSpace(tapName), tapPoolPrefix))
if err != nil {
return 0, false
}
return value, true
}

View file

@ -188,8 +188,8 @@ func (d *Daemon) startVMLocked(ctx context.Context, vm model.VMRecord, image mod
shortID := system.ShortID(vm.ID) shortID := system.ShortID(vm.ID)
apiSock := filepath.Join(d.layout.RuntimeDir, "fc-"+shortID+".sock") apiSock := filepath.Join(d.layout.RuntimeDir, "fc-"+shortID+".sock")
tap := "tap-fc-" + shortID
dmName := "fc-rootfs-" + shortID dmName := "fc-rootfs-" + shortID
tapName := "tap-fc-" + shortID
if strings.TrimSpace(vm.Runtime.VSockPath) == "" { if strings.TrimSpace(vm.Runtime.VSockPath) == "" {
vm.Runtime.VSockPath = defaultVSockPath(d.layout.RuntimeDir, vm.ID) vm.Runtime.VSockPath = defaultVSockPath(d.layout.RuntimeDir, vm.ID)
} }
@ -221,7 +221,6 @@ func (d *Daemon) startVMLocked(ctx context.Context, vm model.VMRecord, image mod
vm.Runtime.DMName = handles.DMName vm.Runtime.DMName = handles.DMName
vm.Runtime.DMDev = handles.DMDev vm.Runtime.DMDev = handles.DMDev
vm.Runtime.APISockPath = apiSock vm.Runtime.APISockPath = apiSock
vm.Runtime.TapDevice = tap
vm.Runtime.State = model.VMStateRunning vm.Runtime.State = model.VMStateRunning
vm.State = model.VMStateRunning vm.State = model.VMStateRunning
vm.Runtime.LastError = "" vm.Runtime.LastError = ""
@ -247,10 +246,12 @@ func (d *Daemon) startVMLocked(ctx context.Context, vm model.VMRecord, image mod
if err := d.prepareCapabilityHosts(ctx, &vm, image); err != nil { if err := d.prepareCapabilityHosts(ctx, &vm, image); err != nil {
return cleanupOnErr(err) return cleanupOnErr(err)
} }
op.stage("tap", "tap_device", tap) op.stage("tap")
if err := d.createTap(ctx, tap); err != nil { tap, err := d.acquireTap(ctx, tapName)
if err != nil {
return cleanupOnErr(err) return cleanupOnErr(err)
} }
vm.Runtime.TapDevice = tap
op.stage("metrics_file", "metrics_path", vm.Runtime.MetricsPath) op.stage("metrics_file", "metrics_path", vm.Runtime.MetricsPath)
if err := os.WriteFile(vm.Runtime.MetricsPath, nil, 0o644); err != nil { if err := os.WriteFile(vm.Runtime.MetricsPath, nil, 0o644); err != nil {
return cleanupOnErr(err) return cleanupOnErr(err)
@ -766,10 +767,28 @@ func (d *Daemon) patchRootOverlay(ctx context.Context, vm model.VMRecord, image
return nil return nil
} }
func (d *Daemon) ensureWorkDisk(ctx context.Context, vm *model.VMRecord) error { func (d *Daemon) ensureWorkDisk(ctx context.Context, vm *model.VMRecord, image model.Image) error {
if exists(vm.Runtime.WorkDiskPath) { if exists(vm.Runtime.WorkDiskPath) {
return nil return nil
} }
if exists(image.WorkSeedPath) {
if err := system.CopyFilePreferClone(image.WorkSeedPath, vm.Runtime.WorkDiskPath); err != nil {
return err
}
seedInfo, err := os.Stat(image.WorkSeedPath)
if err != nil {
return err
}
if vm.Spec.WorkDiskSizeBytes < seedInfo.Size() {
return fmt.Errorf("requested work disk size %d is smaller than seed image %d", vm.Spec.WorkDiskSizeBytes, seedInfo.Size())
}
if vm.Spec.WorkDiskSizeBytes > seedInfo.Size() {
if err := system.ResizeExt4Image(ctx, d.runner, vm.Runtime.WorkDiskPath, vm.Spec.WorkDiskSizeBytes); err != nil {
return err
}
}
return nil
}
if _, err := d.runner.Run(ctx, "truncate", "-s", strconv.FormatInt(vm.Spec.WorkDiskSizeBytes, 10), vm.Runtime.WorkDiskPath); err != nil { if _, err := d.runner.Run(ctx, "truncate", "-s", strconv.FormatInt(vm.Spec.WorkDiskSizeBytes, 10), vm.Runtime.WorkDiskPath); err != nil {
return err return err
} }
@ -936,15 +955,6 @@ func (d *Daemon) cleanupRuntime(ctx context.Context, vm model.VMRecord, preserve
return err return err
} }
} }
if vm.Runtime.TapDevice != "" {
_, _ = d.runner.RunSudo(ctx, "ip", "link", "del", vm.Runtime.TapDevice)
}
if vm.Runtime.APISockPath != "" {
_ = os.Remove(vm.Runtime.APISockPath)
}
if vm.Runtime.VSockPath != "" {
_ = os.Remove(vm.Runtime.VSockPath)
}
snapshotErr := d.cleanupDMSnapshot(ctx, dmSnapshotHandles{ snapshotErr := d.cleanupDMSnapshot(ctx, dmSnapshotHandles{
BaseLoop: vm.Runtime.BaseLoop, BaseLoop: vm.Runtime.BaseLoop,
COWLoop: vm.Runtime.COWLoop, COWLoop: vm.Runtime.COWLoop,
@ -952,10 +962,20 @@ func (d *Daemon) cleanupRuntime(ctx context.Context, vm model.VMRecord, preserve
DMDev: vm.Runtime.DMDev, DMDev: vm.Runtime.DMDev,
}) })
featureErr := d.cleanupCapabilityState(ctx, vm) featureErr := d.cleanupCapabilityState(ctx, vm)
if !preserveDisks && vm.Runtime.VMDir != "" { var tapErr error
return errors.Join(snapshotErr, featureErr, os.RemoveAll(vm.Runtime.VMDir)) if vm.Runtime.TapDevice != "" {
tapErr = d.releaseTap(ctx, vm.Runtime.TapDevice)
} }
return errors.Join(snapshotErr, featureErr) if vm.Runtime.APISockPath != "" {
_ = os.Remove(vm.Runtime.APISockPath)
}
if vm.Runtime.VSockPath != "" {
_ = os.Remove(vm.Runtime.VSockPath)
}
if !preserveDisks && vm.Runtime.VMDir != "" {
return errors.Join(snapshotErr, featureErr, tapErr, os.RemoveAll(vm.Runtime.VMDir))
}
return errors.Join(snapshotErr, featureErr, tapErr)
} }
func clearRuntimeHandles(vm *model.VMRecord) { func clearRuntimeHandles(vm *model.VMRecord) {

View file

@ -42,12 +42,14 @@ type DaemonConfig struct {
NamegenPath string NamegenPath string
CustomizeScript string CustomizeScript string
VSockPingHelperPath string VSockPingHelperPath string
DefaultWorkSeed string
AutoStopStaleAfter time.Duration AutoStopStaleAfter time.Duration
StatsPollInterval time.Duration StatsPollInterval time.Duration
MetricsPollInterval time.Duration MetricsPollInterval time.Duration
BridgeName string BridgeName string
BridgeIP string BridgeIP string
CIDR string CIDR string
TapPoolSize int
DefaultDNS string DefaultDNS string
DefaultImageName string DefaultImageName string
DefaultRootfs string DefaultRootfs string
@ -64,6 +66,7 @@ type Image struct {
Managed bool `json:"managed"` Managed bool `json:"managed"`
ArtifactDir string `json:"artifact_dir,omitempty"` ArtifactDir string `json:"artifact_dir,omitempty"`
RootfsPath string `json:"rootfs_path"` RootfsPath string `json:"rootfs_path"`
WorkSeedPath string `json:"work_seed_path,omitempty"`
KernelPath string `json:"kernel_path"` KernelPath string `json:"kernel_path"`
InitrdPath string `json:"initrd_path,omitempty"` InitrdPath string `json:"initrd_path,omitempty"`
ModulesDir string `json:"modules_dir,omitempty"` ModulesDir string `json:"modules_dir,omitempty"`

View file

@ -38,6 +38,7 @@ type BundleMetadata struct {
DefaultPackages string `json:"default_packages_file" toml:"default_packages_file"` DefaultPackages string `json:"default_packages_file" toml:"default_packages_file"`
DefaultRootfs string `json:"default_rootfs" toml:"default_rootfs"` DefaultRootfs string `json:"default_rootfs" toml:"default_rootfs"`
DefaultBaseRootfs string `json:"default_base_rootfs,omitempty" toml:"default_base_rootfs"` DefaultBaseRootfs string `json:"default_base_rootfs,omitempty" toml:"default_base_rootfs"`
DefaultWorkSeed string `json:"default_work_seed,omitempty" toml:"default_work_seed"`
DefaultKernel string `json:"default_kernel" toml:"default_kernel"` DefaultKernel string `json:"default_kernel" toml:"default_kernel"`
DefaultInitrd string `json:"default_initrd,omitempty" toml:"default_initrd"` DefaultInitrd string `json:"default_initrd,omitempty" toml:"default_initrd"`
DefaultModulesDir string `json:"default_modules_dir,omitempty" toml:"default_modules_dir"` DefaultModulesDir string `json:"default_modules_dir,omitempty" toml:"default_modules_dir"`
@ -233,6 +234,7 @@ func validateBundleMetadata(runtimeDir string, meta BundleMetadata) error {
{meta.DefaultPackages, "default_packages_file", true}, {meta.DefaultPackages, "default_packages_file", true},
{meta.DefaultRootfs, "default_rootfs", true}, {meta.DefaultRootfs, "default_rootfs", true},
{meta.DefaultBaseRootfs, "default_base_rootfs", false}, {meta.DefaultBaseRootfs, "default_base_rootfs", false},
{meta.DefaultWorkSeed, "default_work_seed", false},
{meta.DefaultKernel, "default_kernel", true}, {meta.DefaultKernel, "default_kernel", true},
{meta.DefaultInitrd, "default_initrd", false}, {meta.DefaultInitrd, "default_initrd", false},
{meta.DefaultModulesDir, "default_modules_dir", false}, {meta.DefaultModulesDir, "default_modules_dir", false},
@ -271,6 +273,7 @@ func metadataArchiveBytes(runtimeDir string, meta BundleMetadata) ([]byte, error
strings.TrimSpace(meta.DefaultPackages) == "" && strings.TrimSpace(meta.DefaultPackages) == "" &&
strings.TrimSpace(meta.DefaultRootfs) == "" && strings.TrimSpace(meta.DefaultRootfs) == "" &&
strings.TrimSpace(meta.DefaultBaseRootfs) == "" && strings.TrimSpace(meta.DefaultBaseRootfs) == "" &&
strings.TrimSpace(meta.DefaultWorkSeed) == "" &&
strings.TrimSpace(meta.DefaultKernel) == "" && strings.TrimSpace(meta.DefaultKernel) == "" &&
strings.TrimSpace(meta.DefaultInitrd) == "" && strings.TrimSpace(meta.DefaultInitrd) == "" &&
strings.TrimSpace(meta.DefaultModulesDir) == "" { strings.TrimSpace(meta.DefaultModulesDir) == "" {
@ -291,6 +294,7 @@ func normalizeBundleMetadata(meta BundleMetadata) BundleMetadata {
meta.DefaultPackages = strings.TrimSpace(meta.DefaultPackages) meta.DefaultPackages = strings.TrimSpace(meta.DefaultPackages)
meta.DefaultRootfs = strings.TrimSpace(meta.DefaultRootfs) meta.DefaultRootfs = strings.TrimSpace(meta.DefaultRootfs)
meta.DefaultBaseRootfs = strings.TrimSpace(meta.DefaultBaseRootfs) meta.DefaultBaseRootfs = strings.TrimSpace(meta.DefaultBaseRootfs)
meta.DefaultWorkSeed = strings.TrimSpace(meta.DefaultWorkSeed)
meta.DefaultKernel = strings.TrimSpace(meta.DefaultKernel) meta.DefaultKernel = strings.TrimSpace(meta.DefaultKernel)
meta.DefaultInitrd = strings.TrimSpace(meta.DefaultInitrd) meta.DefaultInitrd = strings.TrimSpace(meta.DefaultInitrd)
meta.DefaultModulesDir = strings.TrimSpace(meta.DefaultModulesDir) meta.DefaultModulesDir = strings.TrimSpace(meta.DefaultModulesDir)

View file

@ -74,6 +74,7 @@ func (s *Store) migrate() error {
managed INTEGER NOT NULL DEFAULT 0, managed INTEGER NOT NULL DEFAULT 0,
artifact_dir TEXT, artifact_dir TEXT,
rootfs_path TEXT NOT NULL, rootfs_path TEXT NOT NULL,
work_seed_path TEXT,
kernel_path TEXT NOT NULL, kernel_path TEXT NOT NULL,
initrd_path TEXT, initrd_path TEXT,
modules_dir TEXT, modules_dir TEXT,
@ -103,6 +104,9 @@ func (s *Store) migrate() error {
return err return err
} }
} }
if err := ensureColumnExists(s.db, "images", "work_seed_path", "TEXT"); err != nil {
return err
}
return nil return nil
} }
@ -111,14 +115,15 @@ func (s *Store) UpsertImage(ctx context.Context, image model.Image) error {
defer s.writeMu.Unlock() defer s.writeMu.Unlock()
const query = ` const query = `
INSERT INTO images ( INSERT INTO images (
id, name, managed, artifact_dir, rootfs_path, kernel_path, initrd_path, id, name, managed, artifact_dir, rootfs_path, work_seed_path, kernel_path, initrd_path,
modules_dir, packages_path, build_size, docker, created_at, updated_at modules_dir, packages_path, build_size, docker, created_at, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET ON CONFLICT(id) DO UPDATE SET
name=excluded.name, name=excluded.name,
managed=excluded.managed, managed=excluded.managed,
artifact_dir=excluded.artifact_dir, artifact_dir=excluded.artifact_dir,
rootfs_path=excluded.rootfs_path, rootfs_path=excluded.rootfs_path,
work_seed_path=excluded.work_seed_path,
kernel_path=excluded.kernel_path, kernel_path=excluded.kernel_path,
initrd_path=excluded.initrd_path, initrd_path=excluded.initrd_path,
modules_dir=excluded.modules_dir, modules_dir=excluded.modules_dir,
@ -132,6 +137,7 @@ func (s *Store) UpsertImage(ctx context.Context, image model.Image) error {
boolToInt(image.Managed), boolToInt(image.Managed),
image.ArtifactDir, image.ArtifactDir,
image.RootfsPath, image.RootfsPath,
image.WorkSeedPath,
image.KernelPath, image.KernelPath,
image.InitrdPath, image.InitrdPath,
image.ModulesDir, image.ModulesDir,
@ -145,15 +151,15 @@ func (s *Store) UpsertImage(ctx context.Context, image model.Image) error {
} }
func (s *Store) GetImageByName(ctx context.Context, name string) (model.Image, error) { func (s *Store) GetImageByName(ctx context.Context, name string) (model.Image, error) {
return s.getImage(ctx, "SELECT id, name, managed, artifact_dir, rootfs_path, kernel_path, initrd_path, modules_dir, packages_path, build_size, docker, created_at, updated_at FROM images WHERE name = ?", name) return s.getImage(ctx, "SELECT id, name, managed, artifact_dir, rootfs_path, work_seed_path, kernel_path, initrd_path, modules_dir, packages_path, build_size, docker, created_at, updated_at FROM images WHERE name = ?", name)
} }
func (s *Store) GetImageByID(ctx context.Context, id string) (model.Image, error) { func (s *Store) GetImageByID(ctx context.Context, id string) (model.Image, error) {
return s.getImage(ctx, "SELECT id, name, managed, artifact_dir, rootfs_path, kernel_path, initrd_path, modules_dir, packages_path, build_size, docker, created_at, updated_at FROM images WHERE id = ?", id) return s.getImage(ctx, "SELECT id, name, managed, artifact_dir, rootfs_path, work_seed_path, kernel_path, initrd_path, modules_dir, packages_path, build_size, docker, created_at, updated_at FROM images WHERE id = ?", id)
} }
func (s *Store) ListImages(ctx context.Context) ([]model.Image, error) { func (s *Store) ListImages(ctx context.Context) ([]model.Image, error) {
rows, err := s.db.QueryContext(ctx, "SELECT id, name, managed, artifact_dir, rootfs_path, kernel_path, initrd_path, modules_dir, packages_path, build_size, docker, created_at, updated_at FROM images ORDER BY created_at ASC") rows, err := s.db.QueryContext(ctx, "SELECT id, name, managed, artifact_dir, rootfs_path, work_seed_path, kernel_path, initrd_path, modules_dir, packages_path, build_size, docker, created_at, updated_at FROM images ORDER BY created_at ASC")
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -330,6 +336,7 @@ type scanner interface {
func scanImageRow(row scanner) (model.Image, error) { func scanImageRow(row scanner) (model.Image, error) {
var image model.Image var image model.Image
var managed, docker int var managed, docker int
var workSeedPath sql.NullString
var createdAt, updatedAt string var createdAt, updatedAt string
err := row.Scan( err := row.Scan(
&image.ID, &image.ID,
@ -337,6 +344,7 @@ func scanImageRow(row scanner) (model.Image, error) {
&managed, &managed,
&image.ArtifactDir, &image.ArtifactDir,
&image.RootfsPath, &image.RootfsPath,
&workSeedPath,
&image.KernelPath, &image.KernelPath,
&image.InitrdPath, &image.InitrdPath,
&image.ModulesDir, &image.ModulesDir,
@ -351,6 +359,7 @@ func scanImageRow(row scanner) (model.Image, error) {
} }
image.Managed = managed == 1 image.Managed = managed == 1
image.Docker = docker == 1 image.Docker = docker == 1
image.WorkSeedPath = workSeedPath.String
image.CreatedAt, err = time.Parse(time.RFC3339, createdAt) image.CreatedAt, err = time.Parse(time.RFC3339, createdAt)
if err != nil { if err != nil {
return image, err return image, err
@ -417,6 +426,35 @@ func scanVMInto(row scanner) (model.VMRecord, error) {
return vm, nil return vm, nil
} }
func ensureColumnExists(db *sql.DB, table, column, columnType string) error {
rows, err := db.Query(fmt.Sprintf("PRAGMA table_info(%s)", table))
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
var (
cid int
name string
valueType string
notNull int
defaultV sql.NullString
pk int
)
if err := rows.Scan(&cid, &name, &valueType, &notNull, &defaultV, &pk); err != nil {
return err
}
if name == column {
return nil
}
}
if err := rows.Err(); err != nil {
return err
}
_, err = db.Exec(fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s", table, column, columnType))
return err
}
func boolToInt(value bool) int { func boolToInt(value bool) int {
if value { if value {
return 1 return 1

View file

@ -340,6 +340,7 @@ func sampleImage(name string) model.Image {
Managed: true, Managed: true,
ArtifactDir: "/artifacts/" + name, ArtifactDir: "/artifacts/" + name,
RootfsPath: "/images/" + name + ".ext4", RootfsPath: "/images/" + name + ".ext4",
WorkSeedPath: "/images/" + name + ".work-seed.ext4",
KernelPath: "/kernels/" + name, KernelPath: "/kernels/" + name,
InitrdPath: "/initrd/" + name, InitrdPath: "/initrd/" + name,
ModulesDir: "/modules/" + name, ModulesDir: "/modules/" + name,

View file

@ -1,14 +1,22 @@
package system package system
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"os" "os"
"path/filepath"
"strings" "strings"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
const (
minWorkSeedBytes int64 = 512 * 1024 * 1024
workSeedSlackBytes int64 = 256 * 1024 * 1024
workSeedRoundBytes int64 = 64 * 1024 * 1024
)
func CopyFilePreferClone(sourcePath, targetPath string) error { func CopyFilePreferClone(sourcePath, targetPath string) error {
source, err := os.Open(sourcePath) source, err := os.Open(sourcePath)
if err != nil { if err != nil {
@ -48,6 +56,79 @@ func CopyFilePreferClone(sourcePath, targetPath string) error {
return nil return nil
} }
func WorkSeedPath(rootfsPath string) string {
rootfsPath = strings.TrimSpace(rootfsPath)
if rootfsPath == "" {
return ""
}
if strings.HasSuffix(rootfsPath, ".ext4") {
return strings.TrimSuffix(rootfsPath, ".ext4") + ".work-seed.ext4"
}
return rootfsPath + ".work-seed"
}
func BuildWorkSeedImage(ctx context.Context, runner CommandRunner, rootfsPath, outPath string) error {
rootMount, cleanupRoot, err := MountTempDir(ctx, runner, rootfsPath, true)
if err != nil {
return err
}
defer cleanupRoot()
rootHome := filepath.Join(rootMount, "root")
sizeBytes, err := estimateWorkSeedSize(rootHome)
if err != nil {
return err
}
if err := os.RemoveAll(outPath); err != nil && !os.IsNotExist(err) {
return err
}
file, err := os.OpenFile(outPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o644)
if err != nil {
return err
}
if err := file.Close(); err != nil {
return err
}
if err := os.Truncate(outPath, sizeBytes); err != nil {
return err
}
if _, err := runner.Run(ctx, "mkfs.ext4", "-F", outPath); err != nil {
return err
}
workMount, cleanupWork, err := MountTempDir(ctx, runner, outPath, false)
if err != nil {
return err
}
defer cleanupWork()
return CopyDirContents(ctx, runner, rootHome, workMount, true)
}
func estimateWorkSeedSize(rootHome string) (int64, error) {
var usedBytes int64
err := filepath.Walk(rootHome, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.Mode().IsRegular() {
usedBytes += info.Size()
}
return nil
})
if err != nil {
return 0, err
}
sizeBytes := usedBytes*2 + workSeedSlackBytes
if sizeBytes < minWorkSeedBytes {
sizeBytes = minWorkSeedBytes
}
if rem := sizeBytes % workSeedRoundBytes; rem != 0 {
sizeBytes += workSeedRoundBytes - rem
}
return sizeBytes, nil
}
func ReadNormalizedLines(path string) ([]string, error) { func ReadNormalizedLines(path string) ([]string, error) {
data, err := os.ReadFile(path) data, err := os.ReadFile(path)
if err != nil { if err != nil {

View file

@ -27,6 +27,7 @@ customize_script = "customize.sh"
vsock_ping_helper_path = "banger-vsock-pingd" vsock_ping_helper_path = "banger-vsock-pingd"
default_packages_file = "packages.apt" default_packages_file = "packages.apt"
default_rootfs = "rootfs-docker.ext4" default_rootfs = "rootfs-docker.ext4"
default_work_seed = "rootfs-docker.work-seed.ext4"
default_kernel = "wtf/root/boot/vmlinux-6.8.0-94-generic" default_kernel = "wtf/root/boot/vmlinux-6.8.0-94-generic"
default_initrd = "wtf/root/boot/initrd.img-6.8.0-94-generic" default_initrd = "wtf/root/boot/initrd.img-6.8.0-94-generic"
default_modules_dir = "wtf/root/lib/modules/6.8.0-94-generic" default_modules_dir = "wtf/root/lib/modules/6.8.0-94-generic"

114
scripts/bench-create.sh Normal file
View file

@ -0,0 +1,114 @@
#!/usr/bin/env bash
set -euo pipefail
log() {
printf '[bench-create] %s\n' "$*" >&2
}
usage() {
cat <<'EOF'
Usage: ./scripts/bench-create.sh [--runs N] [--image NAME] [--keep]
Measures:
- create_ms: time for `banger vm create`
- ssh_ready_ms: time until `banger vm ssh <vm> -- true` succeeds
EOF
}
RUNS=5
IMAGE_NAME=""
KEEP=0
while [[ $# -gt 0 ]]; do
case "$1" in
--runs)
RUNS="${2:-}"
shift 2
;;
--image)
IMAGE_NAME="${2:-}"
shift 2
;;
--keep)
KEEP=1
shift
;;
-h|--help)
usage
exit 0
;;
*)
log "unknown option: $1"
usage
exit 1
;;
esac
done
if ! [[ "$RUNS" =~ ^[0-9]+$ ]] || [[ "$RUNS" -le 0 ]]; then
log "--runs must be a positive integer"
exit 1
fi
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
BANGER_BIN="${BANGER_BIN:-$REPO_ROOT/banger}"
if [[ ! -x "$BANGER_BIN" ]]; then
log "banger binary not found: $BANGER_BIN"
log "run 'make build' or set BANGER_BIN"
exit 1
fi
timestamp_ms() {
date +%s%3N
}
json_escape() {
python3 - <<'PY' "$1"
import json, sys
print(json.dumps(sys.argv[1]))
PY
}
printf '[\n'
for run in $(seq 1 "$RUNS"); do
vm_name="bench-$(date +%s)-$run"
create_args=("$BANGER_BIN" vm create --name "$vm_name")
if [[ -n "$IMAGE_NAME" ]]; then
create_args+=(--image "$IMAGE_NAME")
fi
create_start="$(timestamp_ms)"
if ! "${create_args[@]}" >/dev/null; then
log "create failed for $vm_name"
exit 1
fi
create_end="$(timestamp_ms)"
ssh_start="$create_end"
ssh_ready=0
deadline=$((ssh_start + 60000))
while (( $(timestamp_ms) < deadline )); do
if "$BANGER_BIN" vm ssh "$vm_name" -- true >/dev/null 2>&1; then
ssh_ready="$(timestamp_ms)"
break
fi
sleep 0.5
done
if [[ "$ssh_ready" -eq 0 ]]; then
log "ssh did not become ready for $vm_name"
exit 1
fi
if [[ "$KEEP" -ne 1 ]]; then
"$BANGER_BIN" vm delete "$vm_name" >/dev/null || true
fi
printf ' {"run": %d, "vm_name": %s, "create_ms": %d, "ssh_ready_ms": %d}%s\n' \
"$run" \
"$(json_escape "$vm_name")" \
"$((create_end - create_start))" \
"$((ssh_ready - create_start))" \
"$( [[ "$run" -lt "$RUNS" ]] && printf ',' )"
done
printf ']\n'

View file

@ -38,6 +38,11 @@ firecracker_running() {
[[ "$cmdline" == *firecracker* && "$cmdline" == *"$api_sock"* ]] [[ "$cmdline" == *firecracker* && "$cmdline" == *"$api_sock"* ]]
} }
pooled_tap() {
local tap="$1"
[[ "$tap" == tap-pool-* ]]
}
wait_for_ssh() { wait_for_ssh() {
local guest_ip="$1" local guest_ip="$1"
local deadline="$2" local deadline="$2"
@ -228,8 +233,12 @@ if ./banger vm show "$VM_NAME" >/dev/null 2>&1; then
exit 1 exit 1
fi fi
if ip link show "$TAP" >/dev/null 2>&1; then if ip link show "$TAP" >/dev/null 2>&1; then
log "tap still exists: $TAP" if pooled_tap "$TAP"; then
exit 1 log "tap returned to idle pool: $TAP"
else
log "tap still exists: $TAP"
exit 1
fi
fi fi
if [[ -d "$VM_DIR" ]]; then if [[ -d "$VM_DIR" ]]; then
log "vm dir still exists: $VM_DIR" log "vm dir still exists: $VM_DIR"