Beat VM create wall time without changing VM semantics. Generate a work-seed ext4 sidecar during image builds and rootfs rebuilds, then clone and resize that seed for each new VM instead of rebuilding /root from scratch. Plumb the new seed artifact through config, runtime metadata, store state, runtime-bundle defaults, doctor checks, and default-image reconciliation so older images still fall back cleanly. Add a daemon TAP pool to keep idle bridge-attached devices warm, expose stage timing in lifecycle logs, add a create/SSH benchmark script plus Make target, and teach verify.sh that tap-pool-* devices are reusable capacity rather than cleanup leaks. Validated with go test ./..., make build, ./verify.sh, and make bench-create ARGS="--runs 2".
92 lines
2.6 KiB
Go
92 lines
2.6 KiB
Go
package daemon
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"os"
|
|
"path/filepath"
|
|
"strconv"
|
|
"testing"
|
|
|
|
"banger/internal/model"
|
|
)
|
|
|
|
func TestEnsureWorkDiskClonesSeedImageAndResizes(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
vmDir := t.TempDir()
|
|
seedPath := filepath.Join(t.TempDir(), "root.work-seed.ext4")
|
|
if err := os.WriteFile(seedPath, []byte("seed-data"), 0o644); err != nil {
|
|
t.Fatalf("WriteFile(seed): %v", err)
|
|
}
|
|
workDiskPath := filepath.Join(vmDir, "root.ext4")
|
|
runner := &scriptedRunner{
|
|
t: t,
|
|
steps: []runnerStep{
|
|
{call: runnerCall{name: "e2fsck", args: []string{"-p", "-f", workDiskPath}}},
|
|
{call: runnerCall{name: "resize2fs", args: []string{workDiskPath}}},
|
|
},
|
|
}
|
|
d := &Daemon{runner: runner}
|
|
vm := testVM("seeded", "image-seeded", "172.16.0.60")
|
|
vm.Runtime.WorkDiskPath = workDiskPath
|
|
vm.Spec.WorkDiskSizeBytes = 2 * 1024 * 1024
|
|
image := testImage("image-seeded")
|
|
image.WorkSeedPath = seedPath
|
|
|
|
if err := d.ensureWorkDisk(context.Background(), &vm, image); err != nil {
|
|
t.Fatalf("ensureWorkDisk: %v", err)
|
|
}
|
|
runner.assertExhausted()
|
|
|
|
info, err := os.Stat(workDiskPath)
|
|
if err != nil {
|
|
t.Fatalf("Stat(work disk): %v", err)
|
|
}
|
|
if info.Size() != vm.Spec.WorkDiskSizeBytes {
|
|
t.Fatalf("work disk size = %d, want %d", info.Size(), vm.Spec.WorkDiskSizeBytes)
|
|
}
|
|
}
|
|
|
|
func TestTapPoolWarmsAndReusesIdleTap(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
runner := &scriptedRunner{
|
|
t: t,
|
|
steps: []runnerStep{
|
|
{call: runnerCall{name: "ip", args: []string{"link", "show", "tap-pool-0"}}, err: errors.New("exit status 1")},
|
|
sudoStep("", nil, "ip", "tuntap", "add", "dev", "tap-pool-0", "mode", "tap", "user", strconv.Itoa(os.Getuid()), "group", strconv.Itoa(os.Getgid())),
|
|
sudoStep("", nil, "ip", "link", "set", "tap-pool-0", "master", model.DefaultBridgeName),
|
|
sudoStep("", nil, "ip", "link", "set", "tap-pool-0", "up"),
|
|
sudoStep("", nil, "ip", "link", "set", model.DefaultBridgeName, "up"),
|
|
},
|
|
}
|
|
d := &Daemon{
|
|
runner: runner,
|
|
config: model.DaemonConfig{
|
|
BridgeName: model.DefaultBridgeName,
|
|
TapPoolSize: 1,
|
|
},
|
|
closing: make(chan struct{}),
|
|
}
|
|
|
|
d.ensureTapPool(context.Background())
|
|
tapName, err := d.acquireTap(context.Background(), "tap-fallback")
|
|
if err != nil {
|
|
t.Fatalf("acquireTap: %v", err)
|
|
}
|
|
if tapName != "tap-pool-0" {
|
|
t.Fatalf("tapName = %q, want tap-pool-0", tapName)
|
|
}
|
|
if err := d.releaseTap(context.Background(), tapName); err != nil {
|
|
t.Fatalf("releaseTap: %v", err)
|
|
}
|
|
tapName, err = d.acquireTap(context.Background(), "tap-fallback")
|
|
if err != nil {
|
|
t.Fatalf("acquireTap second time: %v", err)
|
|
}
|
|
if tapName != "tap-pool-0" {
|
|
t.Fatalf("tapName second = %q, want tap-pool-0", tapName)
|
|
}
|
|
runner.assertExhausted()
|
|
}
|