Manage image artifacts and show VM create progress

Stop relying on ad hoc rootfs handling by adding image promotion, managed work-seed fingerprint metadata, and lazy self-healing for older managed images after the first create.

Rebuild guest images with baked SSH access, a guest NIC bootstrap, and default opencode services, and add the staged Void kernel/initramfs/modules workflow so void-exp uses a matching Void boot stack.

Replace the opaque blocking vm.create RPC with a begin/status flow that prints live stages in the CLI while still waiting for vsock health and opencode on guest port 4096.

Validate with GOCACHE=/tmp/banger-gocache go test ./... and live void-exp create/delete smoke runs.
This commit is contained in:
Thales Maciel 2026-03-21 14:48:01 -03:00
parent 9f09b0d25c
commit 30f0c0b54a
No known key found for this signature in database
GPG key ID: 33112E6833C34679
37 changed files with 2334 additions and 99 deletions

View file

@ -1,6 +1,10 @@
package api
import "banger/internal/model"
import (
"time"
"banger/internal/model"
)
type Empty struct{}
@ -24,6 +28,32 @@ type VMCreateParams struct {
NoStart bool `json:"no_start,omitempty"`
}
type VMCreateStatusParams struct {
ID string `json:"id"`
}
type VMCreateOperation struct {
ID string `json:"id"`
VMID string `json:"vm_id,omitempty"`
VMName string `json:"vm_name,omitempty"`
Stage string `json:"stage,omitempty"`
Detail string `json:"detail,omitempty"`
StartedAt time.Time `json:"started_at,omitempty"`
UpdatedAt time.Time `json:"updated_at,omitempty"`
Done bool `json:"done"`
Success bool `json:"success"`
Error string `json:"error,omitempty"`
VM *model.VMRecord `json:"vm,omitempty"`
}
type VMCreateBeginResult struct {
Operation VMCreateOperation `json:"operation"`
}
type VMCreateStatusResult struct {
Operation VMCreateOperation `json:"operation"`
}
type VMRefParams struct {
IDOrName string `json:"id_or_name"`
}

View file

@ -46,6 +46,16 @@ var (
vmHealthFunc = func(ctx context.Context, socketPath, idOrName string) (api.VMHealthResult, error) {
return rpc.Call[api.VMHealthResult](ctx, socketPath, "vm.health", api.VMRefParams{IDOrName: idOrName})
}
vmCreateBeginFunc = func(ctx context.Context, socketPath string, params api.VMCreateParams) (api.VMCreateBeginResult, error) {
return rpc.Call[api.VMCreateBeginResult](ctx, socketPath, "vm.create.begin", params)
}
vmCreateStatusFunc = func(ctx context.Context, socketPath, operationID string) (api.VMCreateStatusResult, error) {
return rpc.Call[api.VMCreateStatusResult](ctx, socketPath, "vm.create.status", api.VMCreateStatusParams{ID: operationID})
}
vmCreateCancelFunc = func(ctx context.Context, socketPath, operationID string) error {
_, err := rpc.Call[api.Empty](ctx, socketPath, "vm.create.cancel", api.VMCreateStatusParams{ID: operationID})
return err
}
vmPortsFunc = func(ctx context.Context, socketPath, idOrName string) (api.VMPortsResult, error) {
return rpc.Call[api.VMPortsResult](ctx, socketPath, "vm.ports", api.VMRefParams{IDOrName: idOrName})
}
@ -323,11 +333,11 @@ func newVMCreateCommand() *cobra.Command {
if err != nil {
return err
}
result, err := rpc.Call[api.VMShowResult](cmd.Context(), layout.SocketPath, "vm.create", params)
vm, err := runVMCreate(cmd.Context(), layout.SocketPath, cmd.ErrOrStderr(), params)
if err != nil {
return err
}
return printVMSummary(cmd.OutOrStdout(), result.VM)
return printVMSummary(cmd.OutOrStdout(), vm)
},
}
cmd.Flags().StringVar(&name, "name", "", "vm name")
@ -575,6 +585,7 @@ func newImageCommand() *cobra.Command {
cmd.AddCommand(
newImageBuildCommand(),
newImageRegisterCommand(),
newImagePromoteCommand(),
newImageListCommand(),
newImageShowCommand(),
newImageDeleteCommand(),
@ -651,6 +662,28 @@ func newImageRegisterCommand() *cobra.Command {
return cmd
}
func newImagePromoteCommand() *cobra.Command {
return &cobra.Command{
Use: "promote <id-or-name>",
Short: "Promote an unmanaged image to a managed artifact",
Args: exactArgsUsage(1, "usage: banger image promote <id-or-name>"),
RunE: func(cmd *cobra.Command, args []string) error {
if err := system.EnsureSudo(cmd.Context()); err != nil {
return err
}
layout, _, err := ensureDaemon(cmd.Context())
if err != nil {
return err
}
result, err := rpc.Call[api.ImageShowResult](cmd.Context(), layout.SocketPath, "image.promote", api.ImageRefParams{IDOrName: args[0]})
if err != nil {
return err
}
return printImageSummary(cmd.OutOrStdout(), result.Image)
},
}
}
func newImageListCommand() *cobra.Command {
return &cobra.Command{
Use: "list",
@ -1255,6 +1288,141 @@ type anyWriter interface {
Write(p []byte) (n int, err error)
}
func runVMCreate(ctx context.Context, socketPath string, stderr io.Writer, params api.VMCreateParams) (model.VMRecord, error) {
begin, err := vmCreateBeginFunc(ctx, socketPath, params)
if err != nil {
return model.VMRecord{}, err
}
renderer := newVMCreateProgressRenderer(stderr)
renderer.render(begin.Operation)
op := begin.Operation
for {
if op.Done {
renderer.render(op)
if op.Success && op.VM != nil {
return *op.VM, nil
}
if strings.TrimSpace(op.Error) == "" {
return model.VMRecord{}, errors.New("vm create failed")
}
return model.VMRecord{}, errors.New(op.Error)
}
select {
case <-ctx.Done():
cancelCtx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
_ = vmCreateCancelFunc(cancelCtx, socketPath, op.ID)
return model.VMRecord{}, ctx.Err()
case <-time.After(200 * time.Millisecond):
}
status, err := vmCreateStatusFunc(ctx, socketPath, op.ID)
if err != nil {
if ctx.Err() != nil {
cancelCtx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
_ = vmCreateCancelFunc(cancelCtx, socketPath, op.ID)
return model.VMRecord{}, ctx.Err()
}
return model.VMRecord{}, err
}
op = status.Operation
renderer.render(op)
}
}
type vmCreateProgressRenderer struct {
out io.Writer
enabled bool
lastLine string
}
func newVMCreateProgressRenderer(out io.Writer) *vmCreateProgressRenderer {
return &vmCreateProgressRenderer{
out: out,
enabled: writerSupportsProgress(out),
}
}
func (r *vmCreateProgressRenderer) render(op api.VMCreateOperation) {
if r == nil || !r.enabled {
return
}
line := formatVMCreateProgress(op)
if line == "" || line == r.lastLine {
return
}
r.lastLine = line
_, _ = fmt.Fprintln(r.out, line)
}
func writerSupportsProgress(out io.Writer) bool {
file, ok := out.(*os.File)
if !ok {
return false
}
info, err := file.Stat()
if err != nil {
return false
}
return info.Mode()&os.ModeCharDevice != 0
}
func formatVMCreateProgress(op api.VMCreateOperation) string {
stage := strings.TrimSpace(op.Stage)
detail := strings.TrimSpace(op.Detail)
label := vmCreateStageLabel(stage)
if label == "" && detail == "" {
return ""
}
if label == "" {
return "[vm create] " + detail
}
if detail == "" {
return "[vm create] " + label
}
return "[vm create] " + label + ": " + detail
}
func vmCreateStageLabel(stage string) string {
switch strings.TrimSpace(stage) {
case "queued":
return "queued"
case "resolve_image":
return "resolving image"
case "reserve_vm":
return "allocating vm"
case "preflight":
return "checking host prerequisites"
case "prepare_rootfs":
return "preparing root filesystem"
case "prepare_host_features":
return "preparing host features"
case "prepare_work_disk":
return "preparing work disk"
case "boot_firecracker":
return "starting firecracker"
case "wait_vsock_agent":
return "waiting for vsock agent"
case "wait_guest_ready":
return "waiting for guest services"
case "wait_opencode":
return "waiting for opencode"
case "apply_dns":
return "publishing dns"
case "apply_nat":
return "configuring nat"
case "finalize":
return "finalizing"
case "ready":
return "ready"
default:
return strings.ReplaceAll(stage, "_", " ")
}
}
func shortID(id string) string {
if len(id) <= 12 {
return id

View file

@ -170,6 +170,17 @@ func TestImageRegisterFlagsExist(t *testing.T) {
}
}
func TestImagePromoteCommandExists(t *testing.T) {
root := NewBangerCommand()
image, _, err := root.Find([]string{"image"})
if err != nil {
t.Fatalf("find image: %v", err)
}
if _, _, err := image.Find([]string{"promote"}); err != nil {
t.Fatalf("find promote: %v", err)
}
}
func TestVMKillFlagsExist(t *testing.T) {
root := NewBangerCommand()
vm, _, err := root.Find([]string{"vm"})
@ -304,6 +315,95 @@ func TestVMCreateParamsFromFlagsRejectsNonPositiveCPUAndMemory(t *testing.T) {
}
}
func TestRunVMCreatePollsUntilDone(t *testing.T) {
origBegin := vmCreateBeginFunc
origStatus := vmCreateStatusFunc
origCancel := vmCreateCancelFunc
t.Cleanup(func() {
vmCreateBeginFunc = origBegin
vmCreateStatusFunc = origStatus
vmCreateCancelFunc = origCancel
})
vm := model.VMRecord{
ID: "vm-id",
Name: "devbox",
Spec: model.VMSpec{WorkDiskSizeBytes: model.DefaultWorkDiskSize},
Runtime: model.VMRuntime{
State: model.VMStateRunning,
GuestIP: "172.16.0.2",
DNSName: "devbox.vm",
},
}
vmCreateBeginFunc = func(context.Context, string, api.VMCreateParams) (api.VMCreateBeginResult, error) {
return api.VMCreateBeginResult{
Operation: api.VMCreateOperation{
ID: "op-1",
Stage: "prepare_work_disk",
Detail: "cloning work seed",
},
}, nil
}
statusCalls := 0
vmCreateStatusFunc = func(context.Context, string, string) (api.VMCreateStatusResult, error) {
statusCalls++
if statusCalls == 1 {
return api.VMCreateStatusResult{
Operation: api.VMCreateOperation{
ID: "op-1",
Stage: "wait_opencode",
Detail: "waiting for opencode on guest port 4096",
},
}, nil
}
return api.VMCreateStatusResult{
Operation: api.VMCreateOperation{
ID: "op-1",
Stage: "ready",
Detail: "vm is ready",
Done: true,
Success: true,
VM: &vm,
},
}, nil
}
vmCreateCancelFunc = func(context.Context, string, string) error {
t.Fatal("cancel should not be called")
return nil
}
got, err := runVMCreate(context.Background(), "/tmp/bangerd.sock", &bytes.Buffer{}, api.VMCreateParams{Name: "devbox"})
if err != nil {
t.Fatalf("runVMCreate: %v", err)
}
if got.Name != vm.Name || got.Runtime.GuestIP != vm.Runtime.GuestIP {
t.Fatalf("vm = %+v, want %+v", got, vm)
}
if statusCalls != 2 {
t.Fatalf("statusCalls = %d, want 2", statusCalls)
}
}
func TestVMCreateProgressRendererSuppressesDuplicateLines(t *testing.T) {
var stderr bytes.Buffer
renderer := &vmCreateProgressRenderer{out: &stderr, enabled: true}
renderer.render(api.VMCreateOperation{Stage: "prepare_work_disk", Detail: "cloning work seed"})
renderer.render(api.VMCreateOperation{Stage: "prepare_work_disk", Detail: "cloning work seed"})
renderer.render(api.VMCreateOperation{Stage: "wait_opencode", Detail: "waiting for opencode on guest port 4096"})
lines := strings.Split(strings.TrimSpace(stderr.String()), "\n")
if len(lines) != 2 {
t.Fatalf("rendered lines = %q, want 2 lines", stderr.String())
}
if lines[0] != "[vm create] preparing work disk: cloning work seed" {
t.Fatalf("first line = %q", lines[0])
}
if lines[1] != "[vm create] waiting for opencode: waiting for opencode on guest port 4096" {
t.Fatalf("second line = %q", lines[1])
}
}
func TestVMSetParamsFromFlagsConflict(t *testing.T) {
if _, err := vmSetParamsFromFlags("devbox", -1, -1, "", true, true); err == nil {
t.Fatal("expected nat conflict error")

View file

@ -56,6 +56,7 @@ func (d *Daemon) registeredCapabilities() []vmCapability {
}
return []vmCapability{
workDiskCapability{},
opencodeCapability{},
dnsCapability{},
natCapability{},
}
@ -103,6 +104,14 @@ func (d *Daemon) prepareCapabilityHosts(ctx context.Context, vm *model.VMRecord,
func (d *Daemon) postStartCapabilities(ctx context.Context, vm model.VMRecord, image model.Image) error {
for _, capability := range d.registeredCapabilities() {
switch capability.Name() {
case "dns":
vmCreateStage(ctx, "apply_dns", "publishing vm dns record")
case "nat":
if vm.Spec.NATEnabled {
vmCreateStage(ctx, "apply_nat", "configuring nat")
}
}
if hook, ok := capability.(postStartCapability); ok {
if err := hook.PostStart(ctx, d, vm, image); err != nil {
return err
@ -191,10 +200,11 @@ func (workDiskCapability) ContributeMachine(cfg *firecracker.MachineConfig, vm m
}
func (workDiskCapability) PrepareHost(ctx context.Context, d *Daemon, vm *model.VMRecord, image model.Image) error {
if err := d.ensureWorkDisk(ctx, vm, image); err != nil {
prep, err := d.ensureWorkDisk(ctx, vm, image)
if err != nil {
return err
}
return d.ensureAuthorizedKeyOnWorkDisk(ctx, vm)
return d.ensureAuthorizedKeyOnWorkDisk(ctx, vm, image, prep)
}
func (workDiskCapability) AddDoctorChecks(_ context.Context, d *Daemon, report *system.Report) {

View file

@ -143,3 +143,15 @@ func TestContributeHooksPopulateGuestAndMachineConfig(t *testing.T) {
t.Fatalf("guest fstab = %q, want %q", fstab, want)
}
}
func TestRegisteredCapabilitiesIncludeOpencode(t *testing.T) {
d := &Daemon{}
var names []string
for _, capability := range d.registeredCapabilities() {
names = append(names, capability.Name())
}
want := []string{"work-disk", "opencode", "dns", "nat"}
if !reflect.DeepEqual(names, want) {
t.Fatalf("capabilities = %v, want %v", names, want)
}
}

View file

@ -32,6 +32,8 @@ type Daemon struct {
runner system.CommandRunner
logger *slog.Logger
mu sync.Mutex
createOpsMu sync.Mutex
createOps map[string]*vmCreateOperationState
vmLocksMu sync.Mutex
vmLocks map[string]*sync.Mutex
tapPoolMu sync.Mutex
@ -249,6 +251,27 @@ func (d *Daemon) dispatch(ctx context.Context, req rpc.Request) rpc.Response {
}
vm, err := d.CreateVM(ctx, params)
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
case "vm.create.begin":
params, err := rpc.DecodeParams[api.VMCreateParams](req)
if err != nil {
return rpc.NewError("bad_request", err.Error())
}
op, err := d.BeginVMCreate(ctx, params)
return marshalResultOrError(api.VMCreateBeginResult{Operation: op}, err)
case "vm.create.status":
params, err := rpc.DecodeParams[api.VMCreateStatusParams](req)
if err != nil {
return rpc.NewError("bad_request", err.Error())
}
op, err := d.VMCreateStatus(ctx, params.ID)
return marshalResultOrError(api.VMCreateStatusResult{Operation: op}, err)
case "vm.create.cancel":
params, err := rpc.DecodeParams[api.VMCreateStatusParams](req)
if err != nil {
return rpc.NewError("bad_request", err.Error())
}
err = d.CancelVMCreate(ctx, params.ID)
return marshalResultOrError(api.Empty{}, err)
case "vm.list":
vms, err := d.store.ListVMs(ctx)
return marshalResultOrError(api.VMListResult{VMs: vms}, err)
@ -376,6 +399,13 @@ func (d *Daemon) dispatch(ctx context.Context, req rpc.Request) rpc.Response {
}
image, err := d.RegisterImage(ctx, params)
return marshalResultOrError(api.ImageShowResult{Image: image}, err)
case "image.promote":
params, err := rpc.DecodeParams[api.ImageRefParams](req)
if err != nil {
return rpc.NewError("bad_request", err.Error())
}
image, err := d.PromoteImage(ctx, params.IDOrName)
return marshalResultOrError(api.ImageShowResult{Image: image}, err)
case "image.delete":
params, err := rpc.DecodeParams[api.ImageRefParams](req)
if err != nil {
@ -405,6 +435,7 @@ func (d *Daemon) backgroundLoop() {
if err := d.stopStaleVMs(context.Background()); err != nil && d.logger != nil {
d.logger.Error("background stale sweep failed", "error", err.Error())
}
d.pruneVMCreateOperations(time.Now().Add(-10 * time.Minute))
}
}
}

View file

@ -2,6 +2,7 @@ package daemon
import (
"bufio"
"bytes"
"context"
"encoding/json"
"net"
@ -13,6 +14,7 @@ import (
"banger/internal/api"
"banger/internal/model"
"banger/internal/paths"
"banger/internal/rpc"
"banger/internal/store"
)
@ -368,6 +370,178 @@ func TestRegisterImageRejectsManagedOverwrite(t *testing.T) {
}
}
func TestPromoteImageCopiesArtifactsAndPreservesIdentity(t *testing.T) {
dir := t.TempDir()
rootfs, kernel, initrd, modulesDir, packages := writeDefaultImageArtifacts(t, dir)
workSeed := filepath.Join(dir, "rootfs-docker.work-seed.ext4")
workSeedContent := []byte("seed-data")
if err := os.WriteFile(workSeed, workSeedContent, 0o644); err != nil {
t.Fatalf("WriteFile(workSeed): %v", err)
}
db := openDefaultImageStore(t, dir)
now := time.Date(2026, time.March, 20, 12, 0, 0, 0, time.UTC)
existing := model.Image{
ID: "promote-image-id",
Name: "default",
Managed: false,
RootfsPath: rootfs,
WorkSeedPath: workSeed,
KernelPath: kernel,
InitrdPath: initrd,
ModulesDir: modulesDir,
PackagesPath: packages,
Docker: true,
CreatedAt: now,
UpdatedAt: now,
}
if err := db.UpsertImage(context.Background(), existing); err != nil {
t.Fatalf("UpsertImage: %v", err)
}
vm := testVM("uses-default", existing.ID, "172.16.0.44")
if err := db.UpsertVM(context.Background(), vm); err != nil {
t.Fatalf("UpsertVM: %v", err)
}
d := &Daemon{
layout: modelPathsLayoutForTest(dir),
store: db,
}
image, err := d.PromoteImage(context.Background(), "default")
if err != nil {
t.Fatalf("PromoteImage: %v", err)
}
if !image.Managed {
t.Fatal("promoted image should be managed")
}
if image.ID != existing.ID || image.Name != existing.Name {
t.Fatalf("promoted image identity changed: %+v", image)
}
if !image.CreatedAt.Equal(existing.CreatedAt) {
t.Fatalf("CreatedAt = %s, want preserved %s", image.CreatedAt, existing.CreatedAt)
}
if !image.UpdatedAt.After(existing.UpdatedAt) {
t.Fatalf("UpdatedAt = %s, want newer than %s", image.UpdatedAt, existing.UpdatedAt)
}
wantArtifactDir := filepath.Join(d.layout.ImagesDir, existing.ID)
if image.ArtifactDir != wantArtifactDir {
t.Fatalf("ArtifactDir = %q, want %q", image.ArtifactDir, wantArtifactDir)
}
if image.RootfsPath != filepath.Join(wantArtifactDir, "rootfs.ext4") {
t.Fatalf("RootfsPath = %q, want managed copy", image.RootfsPath)
}
if image.WorkSeedPath != filepath.Join(wantArtifactDir, "work-seed.ext4") {
t.Fatalf("WorkSeedPath = %q, want managed copy", image.WorkSeedPath)
}
if image.KernelPath != kernel || image.InitrdPath != initrd || image.ModulesDir != modulesDir || image.PackagesPath != packages {
t.Fatalf("boot support paths changed unexpectedly: %+v", image)
}
rootfsContent, err := os.ReadFile(rootfs)
if err != nil {
t.Fatalf("ReadFile(rootfs): %v", err)
}
managedRootfsContent, err := os.ReadFile(image.RootfsPath)
if err != nil {
t.Fatalf("ReadFile(managed rootfs): %v", err)
}
if !bytes.Equal(managedRootfsContent, rootfsContent) {
t.Fatal("managed rootfs copy content mismatch")
}
managedWorkSeedContent, err := os.ReadFile(image.WorkSeedPath)
if err != nil {
t.Fatalf("ReadFile(managed work seed): %v", err)
}
if !bytes.Equal(managedWorkSeedContent, workSeedContent) {
t.Fatal("managed work seed copy content mismatch")
}
got, err := db.GetImageByName(context.Background(), "default")
if err != nil {
t.Fatalf("GetImageByName: %v", err)
}
if got.RootfsPath != image.RootfsPath || !got.Managed || got.ArtifactDir != image.ArtifactDir {
t.Fatalf("stored promoted image = %+v, want %+v", got, image)
}
gotVM, err := db.GetVMByID(context.Background(), vm.ID)
if err != nil {
t.Fatalf("GetVMByID: %v", err)
}
if gotVM.ImageID != existing.ID {
t.Fatalf("VM image ID = %q, want preserved %q", gotVM.ImageID, existing.ID)
}
}
func TestPromoteImageRejectsManagedImage(t *testing.T) {
dir := t.TempDir()
rootfs, kernel, initrd, modulesDir, packages := writeDefaultImageArtifacts(t, dir)
db := openDefaultImageStore(t, dir)
now := time.Date(2026, time.March, 20, 12, 0, 0, 0, time.UTC)
if err := db.UpsertImage(context.Background(), model.Image{
ID: "managed-id",
Name: "default",
Managed: true,
ArtifactDir: filepath.Join(dir, "images", "managed-id"),
RootfsPath: rootfs,
KernelPath: kernel,
InitrdPath: initrd,
ModulesDir: modulesDir,
PackagesPath: packages,
CreatedAt: now,
UpdatedAt: now,
}); err != nil {
t.Fatalf("UpsertImage: %v", err)
}
d := &Daemon{
layout: modelPathsLayoutForTest(dir),
store: db,
}
_, err := d.PromoteImage(context.Background(), "default")
if err == nil || !strings.Contains(err.Error(), "already managed") {
t.Fatalf("PromoteImage(managed) error = %v", err)
}
}
func TestPromoteImageSkipsMissingWorkSeed(t *testing.T) {
dir := t.TempDir()
rootfs, kernel, initrd, modulesDir, packages := writeDefaultImageArtifacts(t, dir)
db := openDefaultImageStore(t, dir)
now := time.Date(2026, time.March, 20, 12, 0, 0, 0, time.UTC)
existing := model.Image{
ID: "promote-missing-seed",
Name: "default",
Managed: false,
RootfsPath: rootfs,
WorkSeedPath: filepath.Join(dir, "missing.work-seed.ext4"),
KernelPath: kernel,
InitrdPath: initrd,
ModulesDir: modulesDir,
PackagesPath: packages,
CreatedAt: now,
UpdatedAt: now,
}
if err := db.UpsertImage(context.Background(), existing); err != nil {
t.Fatalf("UpsertImage: %v", err)
}
d := &Daemon{
layout: modelPathsLayoutForTest(dir),
store: db,
}
image, err := d.PromoteImage(context.Background(), "default")
if err != nil {
t.Fatalf("PromoteImage: %v", err)
}
if image.WorkSeedPath != "" {
t.Fatalf("WorkSeedPath = %q, want empty for missing source work seed", image.WorkSeedPath)
}
if _, err := os.Stat(filepath.Join(image.ArtifactDir, "work-seed.ext4")); !os.IsNotExist(err) {
t.Fatalf("managed work-seed should not exist, stat error = %v", err)
}
}
func openDefaultImageStore(t *testing.T, dir string) *store.Store {
t.Helper()
db, err := store.Open(filepath.Join(dir, "state.db"))
@ -405,6 +579,12 @@ func writeDefaultImageArtifacts(t *testing.T, dir string) (rootfs, kernel, initr
return rootfs, kernel, initrd, modulesDir, packages
}
func modelPathsLayoutForTest(dir string) paths.Layout {
return paths.Layout{
ImagesDir: filepath.Join(dir, "images"),
}
}
func TestStartVMDNSFailsWhenAddressBusy(t *testing.T) {
t.Parallel()

View file

@ -2,12 +2,17 @@ package daemon
import (
"context"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"errors"
"os"
"path/filepath"
"strconv"
"testing"
"banger/internal/guest"
"banger/internal/model"
)
@ -34,7 +39,7 @@ func TestEnsureWorkDiskClonesSeedImageAndResizes(t *testing.T) {
image := testImage("image-seeded")
image.WorkSeedPath = seedPath
if err := d.ensureWorkDisk(context.Background(), &vm, image); err != nil {
if _, err := d.ensureWorkDisk(context.Background(), &vm, image); err != nil {
t.Fatalf("ensureWorkDisk: %v", err)
}
runner.assertExhausted()
@ -90,3 +95,38 @@ func TestTapPoolWarmsAndReusesIdleTap(t *testing.T) {
}
runner.assertExhausted()
}
func TestEnsureAuthorizedKeyOnWorkDiskSkipsRepairForMatchingSeededFingerprint(t *testing.T) {
t.Parallel()
privateKey, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
t.Fatalf("GenerateKey: %v", err)
}
privateKeyPEM := pem.EncodeToMemory(&pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(privateKey),
})
sshKeyPath := filepath.Join(t.TempDir(), "id_rsa")
if err := os.WriteFile(sshKeyPath, privateKeyPEM, 0o600); err != nil {
t.Fatalf("WriteFile(private key): %v", err)
}
fingerprint, err := guest.AuthorizedPublicKeyFingerprint(sshKeyPath)
if err != nil {
t.Fatalf("AuthorizedPublicKeyFingerprint: %v", err)
}
runner := &scriptedRunner{t: t}
d := &Daemon{
runner: runner,
config: model.DaemonConfig{SSHKeyPath: sshKeyPath},
}
vm := testVM("seeded-fastpath", "image-seeded-fastpath", "172.16.0.62")
vm.Runtime.WorkDiskPath = filepath.Join(t.TempDir(), "root.ext4")
image := model.Image{SeededSSHPublicKeyFingerprint: fingerprint}
if err := d.ensureAuthorizedKeyOnWorkDisk(context.Background(), &vm, image, workDiskPreparation{ClonedFromSeed: true}); err != nil {
t.Fatalf("ensureAuthorizedKeyOnWorkDisk: %v", err)
}
runner.assertExhausted()
}

View file

@ -0,0 +1,86 @@
package daemon
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"banger/internal/guest"
"banger/internal/model"
"banger/internal/system"
)
func (d *Daemon) seedAuthorizedKeyOnExt4Image(ctx context.Context, imagePath string) (string, error) {
if strings.TrimSpace(d.config.SSHKeyPath) == "" {
return "", nil
}
fingerprint, err := guest.AuthorizedPublicKeyFingerprint(d.config.SSHKeyPath)
if err != nil {
return "", fmt.Errorf("derive authorized ssh key fingerprint: %w", err)
}
publicKey, err := guest.AuthorizedPublicKey(d.config.SSHKeyPath)
if err != nil {
return "", fmt.Errorf("derive authorized ssh key: %w", err)
}
mountDir, cleanup, err := system.MountTempDir(ctx, d.runner, imagePath, false)
if err != nil {
return "", err
}
defer cleanup()
if err := d.flattenNestedWorkHome(ctx, mountDir); err != nil {
return "", err
}
sshDir := filepath.Join(mountDir, ".ssh")
if _, err := d.runner.RunSudo(ctx, "mkdir", "-p", sshDir); err != nil {
return "", err
}
if _, err := d.runner.RunSudo(ctx, "chmod", "700", sshDir); err != nil {
return "", err
}
authorizedKeysPath := filepath.Join(sshDir, "authorized_keys")
existing, err := d.runner.RunSudo(ctx, "cat", authorizedKeysPath)
if err != nil {
existing = nil
}
merged := mergeAuthorizedKey(existing, publicKey)
tmpFile, err := os.CreateTemp("", "banger-image-authorized-keys-*")
if err != nil {
return "", err
}
tmpPath := tmpFile.Name()
if _, err := tmpFile.Write(merged); err != nil {
_ = tmpFile.Close()
_ = os.Remove(tmpPath)
return "", err
}
if err := tmpFile.Close(); err != nil {
_ = os.Remove(tmpPath)
return "", err
}
defer os.Remove(tmpPath)
if _, err := d.runner.RunSudo(ctx, "install", "-m", "600", tmpPath, authorizedKeysPath); err != nil {
return "", err
}
return fingerprint, nil
}
func (d *Daemon) refreshManagedWorkSeedFingerprint(ctx context.Context, image model.Image, fingerprint string) error {
if !image.Managed || strings.TrimSpace(image.WorkSeedPath) == "" || strings.TrimSpace(fingerprint) == "" {
return nil
}
seededFingerprint, err := d.seedAuthorizedKeyOnExt4Image(ctx, image.WorkSeedPath)
if err != nil {
return err
}
if seededFingerprint == "" || seededFingerprint == image.SeededSSHPublicKeyFingerprint {
return nil
}
image.SeededSSHPublicKeyFingerprint = seededFingerprint
image.UpdatedAt = model.Now()
return d.store.UpsertImage(ctx, image)
}

View file

@ -14,8 +14,10 @@ import (
"banger/internal/firecracker"
"banger/internal/guest"
"banger/internal/guestnet"
"banger/internal/hostnat"
"banger/internal/model"
"banger/internal/opencode"
"banger/internal/system"
"banger/internal/vsockagent"
)
@ -103,6 +105,10 @@ func (d *Daemon) runImageBuildNative(ctx context.Context, spec imageBuildSpec) (
return err
}
defer client.Close()
authorizedKey, err := guest.AuthorizedPublicKey(d.config.SSHKeyPath)
if err != nil {
return err
}
helperBytes, err := os.ReadFile(d.config.VSockAgentPath)
if err != nil {
@ -117,7 +123,7 @@ func (d *Daemon) runImageBuildNative(ctx context.Context, spec imageBuildSpec) (
if err := writeBuildLog(spec.BuildLog, "configuring guest"); err != nil {
return err
}
if err := client.RunScript(ctx, buildProvisionScript(vm.Name, d.config.DefaultDNS, packages, spec.InstallDocker), spec.BuildLog); err != nil {
if err := client.RunScript(ctx, buildProvisionScript(vm.Name, d.config.DefaultDNS, string(authorizedKey), packages, spec.InstallDocker), spec.BuildLog); err != nil {
return err
}
if strings.TrimSpace(spec.ModulesDir) != "" {
@ -250,7 +256,7 @@ func (d *Daemon) shutdownImageBuildVM(ctx context.Context, vm imageBuildVM) erro
return d.waitForExit(ctx, vm.PID, vm.APISock, 15*time.Second)
}
func buildProvisionScript(vmName, dnsServer string, packages []string, installDocker bool) string {
func buildProvisionScript(vmName, dnsServer, authorizedKey string, packages []string, installDocker bool) string {
var script bytes.Buffer
script.WriteString("set -euo pipefail\n")
fmt.Fprintf(&script, "printf 'nameserver %%s\\n' %s > /etc/resolv.conf\n", shellQuote(dnsServer))
@ -260,11 +266,14 @@ func buildProvisionScript(vmName, dnsServer string, packages []string, installDo
script.WriteString("sed -i '\\|^/dev/vdb[[:space:]]\\+/home[[:space:]]|d; \\|^/dev/vdc[[:space:]]\\+/var[[:space:]]|d' /etc/fstab\n")
script.WriteString("if ! grep -q '^tmpfs /run ' /etc/fstab; then echo 'tmpfs /run tmpfs defaults,nodev,nosuid,mode=0755 0 0' >> /etc/fstab; fi\n")
script.WriteString("if ! grep -q '^tmpfs /tmp ' /etc/fstab; then echo 'tmpfs /tmp tmpfs defaults,nodev,nosuid,mode=1777 0 0' >> /etc/fstab; fi\n")
appendAuthorizedKeySetup(&script, authorizedKey)
script.WriteString("apt-get update\n")
script.WriteString("DEBIAN_FRONTEND=noninteractive apt-get -y upgrade\n")
fmt.Fprintf(&script, "PACKAGES=%s\n", shellArray(packages))
script.WriteString("DEBIAN_FRONTEND=noninteractive apt-get -y install \"${PACKAGES[@]}\"\n")
appendGuestNetworkSetup(&script)
appendMiseSetup(&script)
appendOpenCodeServiceSetup(&script)
appendTmuxSetup(&script)
appendVSockPingSetup(&script)
if installDocker {
@ -279,6 +288,15 @@ func buildProvisionScript(vmName, dnsServer string, packages []string, installDo
return script.String()
}
func appendAuthorizedKeySetup(script *bytes.Buffer, authorizedKey string) {
script.WriteString("mkdir -p /root/.ssh\n")
script.WriteString("chmod 700 /root/.ssh\n")
script.WriteString("cat > /root/.ssh/authorized_keys <<'EOF'\n")
script.WriteString(strings.TrimSpace(authorizedKey))
script.WriteString("\nEOF\n")
script.WriteString("chmod 600 /root/.ssh/authorized_keys\n")
}
func buildModulesCommand(modulesBase string) string {
return fmt.Sprintf("bash -se <<'EOF'\nset -euo pipefail\nmkdir -p /lib/modules\ntar -C /lib/modules -xf -\ndepmod -a %s\nmkdir -p /etc/modules-load.d\nprintf 'nf_tables\\nnft_chain_nat\\nveth\\nbr_netfilter\\noverlay\\n' > /etc/modules-load.d/docker-netfilter.conf\nmkdir -p /etc/sysctl.d\ncat > /etc/sysctl.d/99-docker.conf <<'SYSCTL'\nnet.bridge.bridge-nf-call-iptables = 1\nnet.bridge.bridge-nf-call-ip6tables = 1\nnet.ipv4.ip_forward = 1\nSYSCTL\nsysctl --system >/dev/null 2>&1 || true\nEOF", shellQuote(modulesBase))
}
@ -286,6 +304,9 @@ func buildModulesCommand(modulesBase string) string {
func appendMiseSetup(script *bytes.Buffer) {
fmt.Fprintf(script, "curl -fsSL https://mise.run | MISE_INSTALL_PATH=%s MISE_VERSION=%s sh\n", shellQuote(defaultMiseInstallPath), shellQuote(defaultMiseVersion))
fmt.Fprintf(script, "%s use -g %s\n", shellQuote(defaultMiseInstallPath), shellQuote(defaultOpenCodeTool))
fmt.Fprintf(script, "%s reshim\n", shellQuote(defaultMiseInstallPath))
fmt.Fprintf(script, "if [[ ! -e %s ]]; then echo 'opencode shim not found after mise install' >&2; exit 1; fi\n", shellQuote(opencode.ShimPath))
fmt.Fprintf(script, "ln -snf %s %s\n", shellQuote(opencode.ShimPath), shellQuote(opencode.GuestBinaryPath))
script.WriteString("mkdir -p /etc/profile.d\n")
script.WriteString("cat > /etc/profile.d/mise.sh <<'EOF'\n")
fmt.Fprintf(script, "if [ -n \"${BASH_VERSION:-}\" ] && [ -x %s ]; then\n", shellQuote(defaultMiseInstallPath))
@ -296,6 +317,28 @@ func appendMiseSetup(script *bytes.Buffer) {
appendLineIfMissing(script, "/etc/bash.bashrc", defaultMiseActivateLine)
}
func appendGuestNetworkSetup(script *bytes.Buffer) {
script.WriteString("mkdir -p /usr/local/libexec /etc/systemd/system\n")
script.WriteString("cat > " + guestnet.GuestScriptPath + " <<'EOF'\n")
script.WriteString(guestnet.BootstrapScript())
script.WriteString("EOF\n")
script.WriteString("chmod 0755 " + guestnet.GuestScriptPath + "\n")
script.WriteString("cat > /etc/systemd/system/" + guestnet.SystemdServiceName + " <<'EOF'\n")
script.WriteString(guestnet.SystemdServiceUnit())
script.WriteString("EOF\n")
script.WriteString("chmod 0644 /etc/systemd/system/" + guestnet.SystemdServiceName + "\n")
script.WriteString("if command -v systemctl >/dev/null 2>&1; then systemctl daemon-reload || true; systemctl enable --now " + guestnet.SystemdServiceName + " || true; fi\n")
}
func appendOpenCodeServiceSetup(script *bytes.Buffer) {
script.WriteString("mkdir -p /etc/systemd/system\n")
script.WriteString("cat > /etc/systemd/system/" + opencode.ServiceName + " <<'EOF'\n")
script.WriteString(opencode.ServiceUnit())
script.WriteString("EOF\n")
script.WriteString("chmod 0644 /etc/systemd/system/" + opencode.ServiceName + "\n")
script.WriteString("if command -v systemctl >/dev/null 2>&1; then systemctl daemon-reload || true; systemctl enable --now " + opencode.ServiceName + " || true; fi\n")
}
func appendTmuxSetup(script *bytes.Buffer) {
fmt.Fprintf(script, "TMUX_PLUGIN_DIR=%s\n", shellQuote(defaultTMUXPluginDir))
fmt.Fprintf(script, "TMUX_RESURRECT_DIR=%s\n", shellQuote(defaultTMUXResurrectDir))

View file

@ -8,14 +8,28 @@ import (
func TestBuildProvisionScriptInstallsDefaultTools(t *testing.T) {
t.Parallel()
script := buildProvisionScript("devbox", "1.1.1.1", []string{"git", "curl"}, false)
script := buildProvisionScript("devbox", "1.1.1.1", "ssh-ed25519 AAAATESTKEY banger", []string{"git", "curl"}, false)
for _, snippet := range []string{
"mkdir -p /root/.ssh",
"cat > /root/.ssh/authorized_keys <<'EOF'",
"ssh-ed25519 AAAATESTKEY banger",
"cat > /usr/local/libexec/banger-network-bootstrap <<'EOF'",
"ip addr replace \"$guest_ip/$prefix\" dev \"$iface\"",
"cat > /etc/systemd/system/banger-network.service <<'EOF'",
"systemctl enable --now banger-network.service || true",
"curl -fsSL https://mise.run | MISE_INSTALL_PATH='/usr/local/bin/mise' MISE_VERSION='v2025.12.0' sh",
"'/usr/local/bin/mise' use -g 'github:anomalyco/opencode'",
"'/usr/local/bin/mise' reshim",
"if [[ ! -e '/root/.local/share/mise/shims/opencode' ]]; then echo 'opencode shim not found after mise install' >&2; exit 1; fi",
"ln -snf '/root/.local/share/mise/shims/opencode' '/usr/local/bin/opencode'",
"cat > /etc/profile.d/mise.sh <<'EOF'",
"if [ -n \"${BASH_VERSION:-}\" ] && [ -x '/usr/local/bin/mise' ]; then",
`eval "$(/usr/local/bin/mise activate bash)"`,
`if ! grep -Fqx 'eval "$(/usr/local/bin/mise activate bash)"' '/etc/bash.bashrc'; then`,
"cat > /etc/systemd/system/banger-opencode.service <<'EOF'",
"RequiresMountsFor=/root",
"ExecStart=/usr/local/bin/opencode serve --hostname 0.0.0.0 --port 4096",
"systemctl enable --now banger-opencode.service || true",
`git clone --depth 1 'https://github.com/tmux-plugins/tpm' "$TMUX_PLUGIN_DIR/tpm"`,
`git clone --depth 1 'https://github.com/tmux-plugins/tmux-resurrect' "$TMUX_PLUGIN_DIR/tmux-resurrect"`,
`git clone --depth 1 'https://github.com/tmux-plugins/tmux-continuum' "$TMUX_PLUGIN_DIR/tmux-continuum"`,

View file

@ -103,26 +103,33 @@ func (d *Daemon) BuildImage(ctx context.Context, params api.ImageBuildParams) (i
_ = os.RemoveAll(artifactDir)
return model.Image{}, err
}
seededSSHPublicKeyFingerprint, err := d.seedAuthorizedKeyOnExt4Image(ctx, workSeedPath)
if err != nil {
_ = logFile.Sync()
_ = os.RemoveAll(artifactDir)
return model.Image{}, err
}
if err := writePackagesMetadata(rootfsPath, d.config.DefaultPackagesFile); err != nil {
_ = logFile.Sync()
_ = os.RemoveAll(artifactDir)
return model.Image{}, err
}
image = model.Image{
ID: id,
Name: name,
Managed: true,
ArtifactDir: artifactDir,
RootfsPath: rootfsPath,
WorkSeedPath: workSeedPath,
KernelPath: kernelPath,
InitrdPath: initrdPath,
ModulesDir: modulesDir,
PackagesPath: d.config.DefaultPackagesFile,
BuildSize: params.Size,
Docker: params.Docker,
CreatedAt: now,
UpdatedAt: now,
ID: id,
Name: name,
Managed: true,
ArtifactDir: artifactDir,
RootfsPath: rootfsPath,
WorkSeedPath: workSeedPath,
KernelPath: kernelPath,
InitrdPath: initrdPath,
ModulesDir: modulesDir,
PackagesPath: d.config.DefaultPackagesFile,
BuildSize: params.Size,
SeededSSHPublicKeyFingerprint: seededSSHPublicKeyFingerprint,
Docker: params.Docker,
CreatedAt: now,
UpdatedAt: now,
}
if err := d.store.UpsertImage(ctx, image); err != nil {
return model.Image{}, err
@ -220,6 +227,105 @@ func (d *Daemon) RegisterImage(ctx context.Context, params api.ImageRegisterPara
return image, nil
}
func (d *Daemon) PromoteImage(ctx context.Context, idOrName string) (image model.Image, err error) {
d.mu.Lock()
defer d.mu.Unlock()
op := d.beginOperation("image.promote")
defer func() {
if err != nil {
op.fail(err, imageLogAttrs(image)...)
return
}
op.done(imageLogAttrs(image)...)
}()
image, err = d.FindImage(ctx, idOrName)
if err != nil {
return model.Image{}, err
}
if image.Managed {
return model.Image{}, fmt.Errorf("image %s is already managed", image.Name)
}
if err := validateImagePromotePaths(image.RootfsPath, image.KernelPath, image.InitrdPath, image.ModulesDir, image.PackagesPath); err != nil {
return model.Image{}, err
}
if strings.TrimSpace(d.layout.ImagesDir) == "" {
return model.Image{}, errors.New("images dir is not configured")
}
if err := os.MkdirAll(d.layout.ImagesDir, 0o755); err != nil {
return model.Image{}, err
}
artifactDir := filepath.Join(d.layout.ImagesDir, image.ID)
if _, statErr := os.Stat(artifactDir); statErr == nil {
return model.Image{}, fmt.Errorf("artifact dir already exists: %s", artifactDir)
} else if !os.IsNotExist(statErr) {
return model.Image{}, statErr
}
stageDir, err := os.MkdirTemp(d.layout.ImagesDir, image.ID+".promote-")
if err != nil {
return model.Image{}, err
}
cleanupStage := true
defer func() {
if cleanupStage {
_ = os.RemoveAll(stageDir)
}
}()
rootfsPath := filepath.Join(stageDir, "rootfs.ext4")
op.stage("copy_rootfs", "source_rootfs_path", image.RootfsPath, "target_rootfs_path", rootfsPath)
if err := system.CopyFilePreferClone(image.RootfsPath, rootfsPath); err != nil {
return model.Image{}, err
}
workSeedPath := ""
if image.WorkSeedPath != "" {
if _, statErr := os.Stat(image.WorkSeedPath); statErr != nil {
if os.IsNotExist(statErr) {
op.stage("skip_missing_work_seed", "source_work_seed_path", image.WorkSeedPath)
image.WorkSeedPath = ""
} else {
return model.Image{}, statErr
}
}
}
if image.WorkSeedPath != "" {
workSeedPath = filepath.Join(stageDir, "work-seed.ext4")
op.stage("copy_work_seed", "source_work_seed_path", image.WorkSeedPath, "target_work_seed_path", workSeedPath)
if err := system.CopyFilePreferClone(image.WorkSeedPath, workSeedPath); err != nil {
return model.Image{}, err
}
image.SeededSSHPublicKeyFingerprint, err = d.seedAuthorizedKeyOnExt4Image(ctx, workSeedPath)
if err != nil {
return model.Image{}, err
}
} else {
image.SeededSSHPublicKeyFingerprint = ""
}
op.stage("activate_artifacts", "artifact_dir", artifactDir)
if err := os.Rename(stageDir, artifactDir); err != nil {
return model.Image{}, err
}
cleanupStage = false
image.Managed = true
image.ArtifactDir = artifactDir
image.RootfsPath = filepath.Join(artifactDir, "rootfs.ext4")
if workSeedPath != "" {
image.WorkSeedPath = filepath.Join(artifactDir, "work-seed.ext4")
}
image.UpdatedAt = model.Now()
if err := d.store.UpsertImage(ctx, image); err != nil {
_ = os.RemoveAll(artifactDir)
return model.Image{}, err
}
return image, nil
}
func validateImageRegisterPaths(rootfsPath, workSeedPath, kernelPath, initrdPath, modulesDir, packagesPath string) error {
checks := system.NewPreflight()
checks.RequireFile(rootfsPath, "rootfs image", `pass --rootfs <path>`)
@ -239,6 +345,22 @@ func validateImageRegisterPaths(rootfsPath, workSeedPath, kernelPath, initrdPath
return checks.Err("image register failed")
}
func validateImagePromotePaths(rootfsPath, kernelPath, initrdPath, modulesDir, packagesPath string) error {
checks := system.NewPreflight()
checks.RequireFile(rootfsPath, "rootfs image", `re-register the image with a valid rootfs`)
checks.RequireFile(kernelPath, "kernel image", `re-register the image with a valid kernel`)
if initrdPath != "" {
checks.RequireFile(initrdPath, "initrd image", `re-register the image with a valid initrd`)
}
if modulesDir != "" {
checks.RequireDir(modulesDir, "kernel modules dir", `re-register the image with a valid modules dir`)
}
if packagesPath != "" {
checks.RequireFile(packagesPath, "packages manifest", `re-register the image with a valid packages manifest`)
}
return checks.Err("image promote failed")
}
func writePackagesMetadata(rootfsPath, packagesPath string) error {
if rootfsPath == "" || packagesPath == "" {
return nil

View file

@ -0,0 +1,18 @@
package daemon
import (
"context"
"banger/internal/model"
"banger/internal/opencode"
)
type opencodeCapability struct{}
func (opencodeCapability) Name() string { return "opencode" }
func (opencodeCapability) PostStart(ctx context.Context, d *Daemon, vm model.VMRecord, _ model.Image) error {
return opencode.WaitReady(ctx, d.logger, vm.Runtime.VSockPath, func(stage, detail string) {
vmCreateStage(ctx, stage, detail)
})
}

View file

@ -49,10 +49,12 @@ func (d *Daemon) CreateVM(ctx context.Context, params api.VMCreateParams) (vm mo
if imageName == "" {
imageName = d.config.DefaultImageName
}
vmCreateStage(ctx, "resolve_image", "resolving image")
image, err := d.FindImage(ctx, imageName)
if err != nil {
return model.VMRecord{}, err
}
vmCreateStage(ctx, "resolve_image", "using image "+image.Name)
op.stage("image_resolved", imageLogAttrs(image)...)
name := strings.TrimSpace(params.Name)
if name == "" {
@ -126,6 +128,8 @@ func (d *Daemon) CreateVM(ctx context.Context, params api.VMCreateParams) (vm mo
MetricsPath: filepath.Join(vmDir, "metrics.json"),
},
}
vmCreateBindVM(ctx, vm)
vmCreateStage(ctx, "reserve_vm", fmt.Sprintf("allocated %s (%s)", vm.Name, vm.Runtime.GuestIP))
if err := d.store.UpsertVM(ctx, vm); err != nil {
return model.VMRecord{}, err
}
@ -168,6 +172,7 @@ func (d *Daemon) startVMLocked(ctx context.Context, vm model.VMRecord, image mod
op.done(vmLogAttrs(vm)...)
}()
op.stage("preflight")
vmCreateStage(ctx, "preflight", "checking host prerequisites")
if err := d.validateStartPrereqs(ctx, vm, image); err != nil {
return model.VMRecord{}, err
}
@ -209,11 +214,13 @@ func (d *Daemon) startVMLocked(ctx context.Context, vm model.VMRecord, image mod
}
op.stage("system_overlay", "overlay_path", vm.Runtime.SystemOverlay)
vmCreateStage(ctx, "prepare_rootfs", "preparing system overlay")
if err := d.ensureSystemOverlay(ctx, &vm); err != nil {
return model.VMRecord{}, err
}
op.stage("dm_snapshot", "dm_name", dmName)
vmCreateStage(ctx, "prepare_rootfs", "creating root filesystem snapshot")
handles, err := d.createDMSnapshot(ctx, image.RootfsPath, vm.Runtime.SystemOverlay, dmName)
if err != nil {
return model.VMRecord{}, err
@ -241,10 +248,12 @@ func (d *Daemon) startVMLocked(ctx context.Context, vm model.VMRecord, image mod
}
op.stage("patch_root_overlay")
vmCreateStage(ctx, "prepare_rootfs", "writing guest configuration")
if err := d.patchRootOverlay(ctx, vm, image); err != nil {
return cleanupOnErr(err)
}
op.stage("prepare_host_features")
vmCreateStage(ctx, "prepare_host_features", "preparing host-side vm features")
if err := d.prepareCapabilityHosts(ctx, &vm, image); err != nil {
return cleanupOnErr(err)
}
@ -265,6 +274,7 @@ func (d *Daemon) startVMLocked(ctx context.Context, vm model.VMRecord, image mod
return cleanupOnErr(err)
}
op.stage("firecracker_launch", "log_path", vm.Runtime.LogPath, "metrics_path", vm.Runtime.MetricsPath)
vmCreateStage(ctx, "boot_firecracker", "starting firecracker")
firecrackerCtx := context.Background()
machineConfig := firecracker.MachineConfig{
BinaryPath: fcPath,
@ -304,15 +314,18 @@ func (d *Daemon) startVMLocked(ctx context.Context, vm model.VMRecord, image mod
return cleanupOnErr(err)
}
op.stage("vsock_access", "vsock_path", vm.Runtime.VSockPath, "vsock_cid", vm.Runtime.VSockCID)
vmCreateStage(ctx, "wait_vsock_agent", "waiting for guest vsock agent")
if err := d.ensureSocketAccess(ctx, vm.Runtime.VSockPath, "firecracker vsock socket"); err != nil {
return cleanupOnErr(err)
}
op.stage("post_start_features")
vmCreateStage(ctx, "wait_guest_ready", "waiting for guest services")
if err := d.postStartCapabilities(ctx, vm, image); err != nil {
return cleanupOnErr(err)
}
system.TouchNow(&vm)
op.stage("persist")
vmCreateStage(ctx, "finalize", "saving vm state")
if err := d.store.UpsertVM(ctx, vm); err != nil {
return cleanupOnErr(err)
}
@ -777,58 +790,75 @@ func (d *Daemon) patchRootOverlay(ctx context.Context, vm model.VMRecord, image
return nil
}
func (d *Daemon) ensureWorkDisk(ctx context.Context, vm *model.VMRecord, image model.Image) error {
type workDiskPreparation struct {
ClonedFromSeed bool
}
func (d *Daemon) ensureWorkDisk(ctx context.Context, vm *model.VMRecord, image model.Image) (workDiskPreparation, error) {
if exists(vm.Runtime.WorkDiskPath) {
return nil
return workDiskPreparation{}, nil
}
if exists(image.WorkSeedPath) {
vmCreateStage(ctx, "prepare_work_disk", "cloning work seed")
if err := system.CopyFilePreferClone(image.WorkSeedPath, vm.Runtime.WorkDiskPath); err != nil {
return err
return workDiskPreparation{}, err
}
seedInfo, err := os.Stat(image.WorkSeedPath)
if err != nil {
return err
return workDiskPreparation{}, err
}
if vm.Spec.WorkDiskSizeBytes < seedInfo.Size() {
return fmt.Errorf("requested work disk size %d is smaller than seed image %d", vm.Spec.WorkDiskSizeBytes, seedInfo.Size())
return workDiskPreparation{}, fmt.Errorf("requested work disk size %d is smaller than seed image %d", vm.Spec.WorkDiskSizeBytes, seedInfo.Size())
}
if vm.Spec.WorkDiskSizeBytes > seedInfo.Size() {
vmCreateStage(ctx, "prepare_work_disk", "resizing work disk")
if err := system.ResizeExt4Image(ctx, d.runner, vm.Runtime.WorkDiskPath, vm.Spec.WorkDiskSizeBytes); err != nil {
return err
return workDiskPreparation{}, err
}
}
return nil
return workDiskPreparation{ClonedFromSeed: true}, nil
}
vmCreateStage(ctx, "prepare_work_disk", "creating empty work disk")
if _, err := d.runner.Run(ctx, "truncate", "-s", strconv.FormatInt(vm.Spec.WorkDiskSizeBytes, 10), vm.Runtime.WorkDiskPath); err != nil {
return err
return workDiskPreparation{}, err
}
if _, err := d.runner.Run(ctx, "mkfs.ext4", "-F", vm.Runtime.WorkDiskPath); err != nil {
return err
return workDiskPreparation{}, err
}
rootMount, cleanupRoot, err := system.MountTempDir(ctx, d.runner, vm.Runtime.DMDev, true)
if err != nil {
return err
return workDiskPreparation{}, err
}
defer cleanupRoot()
workMount, cleanupWork, err := system.MountTempDir(ctx, d.runner, vm.Runtime.WorkDiskPath, false)
if err != nil {
return err
return workDiskPreparation{}, err
}
defer cleanupWork()
vmCreateStage(ctx, "prepare_work_disk", "copying /root into work disk")
if err := system.CopyDirContents(ctx, d.runner, filepath.Join(rootMount, "root"), workMount, true); err != nil {
return err
return workDiskPreparation{}, err
}
if err := d.flattenNestedWorkHome(ctx, workMount); err != nil {
return err
return workDiskPreparation{}, err
}
return nil
return workDiskPreparation{}, nil
}
func (d *Daemon) ensureAuthorizedKeyOnWorkDisk(ctx context.Context, vm *model.VMRecord) error {
func (d *Daemon) ensureAuthorizedKeyOnWorkDisk(ctx context.Context, vm *model.VMRecord, image model.Image, prep workDiskPreparation) error {
fingerprint, err := guest.AuthorizedPublicKeyFingerprint(d.config.SSHKeyPath)
if err != nil {
return fmt.Errorf("derive authorized ssh key fingerprint: %w", err)
}
if prep.ClonedFromSeed && image.SeededSSHPublicKeyFingerprint != "" && image.SeededSSHPublicKeyFingerprint == fingerprint {
vmCreateStage(ctx, "prepare_work_disk", "using seeded SSH access")
return nil
}
publicKey, err := guest.AuthorizedPublicKey(d.config.SSHKeyPath)
if err != nil {
return fmt.Errorf("derive authorized ssh key: %w", err)
}
vmCreateStage(ctx, "prepare_work_disk", "repairing SSH access on work disk")
workMount, cleanupWork, err := system.MountTempDir(ctx, d.runner, vm.Runtime.WorkDiskPath, false)
if err != nil {
return err
@ -873,6 +903,12 @@ func (d *Daemon) ensureAuthorizedKeyOnWorkDisk(ctx context.Context, vm *model.VM
if _, err := d.runner.RunSudo(ctx, "install", "-m", "600", tmpPath, authorizedKeysPath); err != nil {
return err
}
if prep.ClonedFromSeed && image.Managed {
vmCreateStage(ctx, "prepare_work_disk", "refreshing managed work seed")
if err := d.refreshManagedWorkSeedFingerprint(ctx, image, fingerprint); err != nil {
return err
}
}
return nil
}

View file

@ -0,0 +1,205 @@
package daemon
import (
"context"
"fmt"
"strings"
"sync"
"time"
"banger/internal/api"
"banger/internal/model"
)
type vmCreateProgressKey struct{}
type vmCreateOperationState struct {
mu sync.Mutex
cancel context.CancelFunc
op api.VMCreateOperation
}
func newVMCreateOperationState() (*vmCreateOperationState, error) {
id, err := model.NewID()
if err != nil {
return nil, err
}
now := model.Now()
return &vmCreateOperationState{
op: api.VMCreateOperation{
ID: id,
Stage: "queued",
Detail: "waiting to start",
StartedAt: now,
UpdatedAt: now,
},
}, nil
}
func withVMCreateProgress(ctx context.Context, op *vmCreateOperationState) context.Context {
if op == nil {
return ctx
}
return context.WithValue(ctx, vmCreateProgressKey{}, op)
}
func vmCreateProgressFromContext(ctx context.Context) *vmCreateOperationState {
if ctx == nil {
return nil
}
op, _ := ctx.Value(vmCreateProgressKey{}).(*vmCreateOperationState)
return op
}
func vmCreateStage(ctx context.Context, stage, detail string) {
if op := vmCreateProgressFromContext(ctx); op != nil {
op.stage(stage, detail)
}
}
func vmCreateBindVM(ctx context.Context, vm model.VMRecord) {
if op := vmCreateProgressFromContext(ctx); op != nil {
op.bindVM(vm)
}
}
func (op *vmCreateOperationState) setCancel(cancel context.CancelFunc) {
op.mu.Lock()
defer op.mu.Unlock()
op.cancel = cancel
}
func (op *vmCreateOperationState) bindVM(vm model.VMRecord) {
op.mu.Lock()
defer op.mu.Unlock()
op.op.VMID = vm.ID
op.op.VMName = vm.Name
}
func (op *vmCreateOperationState) stage(stage, detail string) {
op.mu.Lock()
defer op.mu.Unlock()
stage = strings.TrimSpace(stage)
detail = strings.TrimSpace(detail)
if stage == "" {
stage = op.op.Stage
}
if stage == op.op.Stage && detail == op.op.Detail {
return
}
op.op.Stage = stage
op.op.Detail = detail
op.op.UpdatedAt = model.Now()
}
func (op *vmCreateOperationState) done(vm model.VMRecord) {
op.mu.Lock()
defer op.mu.Unlock()
vmCopy := vm
op.op.VMID = vm.ID
op.op.VMName = vm.Name
op.op.Stage = "ready"
op.op.Detail = "vm is ready"
op.op.Done = true
op.op.Success = true
op.op.Error = ""
op.op.VM = &vmCopy
op.op.UpdatedAt = model.Now()
}
func (op *vmCreateOperationState) fail(err error) {
op.mu.Lock()
defer op.mu.Unlock()
op.op.Done = true
op.op.Success = false
if err != nil {
op.op.Error = err.Error()
}
if strings.TrimSpace(op.op.Detail) == "" {
op.op.Detail = "vm create failed"
}
op.op.UpdatedAt = model.Now()
}
func (op *vmCreateOperationState) snapshot() api.VMCreateOperation {
op.mu.Lock()
defer op.mu.Unlock()
snapshot := op.op
if snapshot.VM != nil {
vmCopy := *snapshot.VM
snapshot.VM = &vmCopy
}
return snapshot
}
func (op *vmCreateOperationState) cancelOperation() {
op.mu.Lock()
cancel := op.cancel
op.mu.Unlock()
if cancel != nil {
cancel()
}
}
func (d *Daemon) BeginVMCreate(_ context.Context, params api.VMCreateParams) (api.VMCreateOperation, error) {
op, err := newVMCreateOperationState()
if err != nil {
return api.VMCreateOperation{}, err
}
createCtx, cancel := context.WithCancel(context.Background())
op.setCancel(cancel)
d.createOpsMu.Lock()
if d.createOps == nil {
d.createOps = map[string]*vmCreateOperationState{}
}
d.createOps[op.op.ID] = op
d.createOpsMu.Unlock()
go d.runVMCreateOperation(withVMCreateProgress(createCtx, op), op, params)
return op.snapshot(), nil
}
func (d *Daemon) runVMCreateOperation(ctx context.Context, op *vmCreateOperationState, params api.VMCreateParams) {
vm, err := d.CreateVM(ctx, params)
if err != nil {
op.fail(err)
return
}
op.done(vm)
}
func (d *Daemon) VMCreateStatus(_ context.Context, id string) (api.VMCreateOperation, error) {
d.createOpsMu.Lock()
op, ok := d.createOps[strings.TrimSpace(id)]
d.createOpsMu.Unlock()
if !ok {
return api.VMCreateOperation{}, fmt.Errorf("vm create operation not found: %s", id)
}
return op.snapshot(), nil
}
func (d *Daemon) CancelVMCreate(_ context.Context, id string) error {
d.createOpsMu.Lock()
op, ok := d.createOps[strings.TrimSpace(id)]
d.createOpsMu.Unlock()
if !ok {
return fmt.Errorf("vm create operation not found: %s", id)
}
op.cancelOperation()
return nil
}
func (d *Daemon) pruneVMCreateOperations(olderThan time.Time) {
d.createOpsMu.Lock()
defer d.createOpsMu.Unlock()
for id, op := range d.createOps {
snapshot := op.snapshot()
if !snapshot.Done {
continue
}
if snapshot.UpdatedAt.Before(olderThan) {
delete(d.createOps, id)
}
}
}

View file

@ -716,7 +716,7 @@ func TestEnsureAuthorizedKeyOnWorkDiskRepairsNestedRootLayout(t *testing.T) {
vm := testVM("seed-repair", "image-seed-repair", "172.16.0.61")
vm.Runtime.WorkDiskPath = workDiskDir
if err := d.ensureAuthorizedKeyOnWorkDisk(context.Background(), &vm); err != nil {
if err := d.ensureAuthorizedKeyOnWorkDisk(context.Background(), &vm, model.Image{}, workDiskPreparation{}); err != nil {
t.Fatalf("ensureAuthorizedKeyOnWorkDisk: %v", err)
}
if _, err := os.Stat(filepath.Join(workDiskDir, "root")); !os.IsNotExist(err) {
@ -748,6 +748,61 @@ func TestCreateVMRejectsNonPositiveCPUAndMemory(t *testing.T) {
}
}
func TestBeginVMCreateCompletesAndReturnsStatus(t *testing.T) {
t.Parallel()
ctx := context.Background()
db := openDaemonStore(t)
image := testImage("default")
image.ID = "default-image-id"
image.Name = "default"
if err := db.UpsertImage(ctx, image); err != nil {
t.Fatalf("UpsertImage: %v", err)
}
d := &Daemon{
store: db,
layout: paths.Layout{
VMsDir: t.TempDir(),
},
config: model.DaemonConfig{
DefaultImageName: image.Name,
BridgeIP: model.DefaultBridgeIP,
},
}
op, err := d.BeginVMCreate(ctx, api.VMCreateParams{Name: "queued", NoStart: true})
if err != nil {
t.Fatalf("BeginVMCreate: %v", err)
}
if op.ID == "" {
t.Fatal("operation id should be populated")
}
deadline := time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
status, err := d.VMCreateStatus(ctx, op.ID)
if err != nil {
t.Fatalf("VMCreateStatus: %v", err)
}
if !status.Done {
time.Sleep(10 * time.Millisecond)
continue
}
if !status.Success {
t.Fatalf("status = %+v, want success", status)
}
if status.VM == nil || status.VM.Name != "queued" {
t.Fatalf("status VM = %+v, want queued vm", status.VM)
}
if status.VM.State != model.VMStateStopped {
t.Fatalf("status VM state = %s, want stopped", status.VM.State)
}
return
}
t.Fatal("vm create operation did not finish before timeout")
}
func TestCreateVMUsesDefaultsWhenCPUAndMemoryOmitted(t *testing.T) {
ctx := context.Background()
db := openDaemonStore(t)

View file

@ -4,6 +4,8 @@ import (
"archive/tar"
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"io"
@ -137,6 +139,15 @@ func AuthorizedPublicKey(path string) ([]byte, error) {
return ssh.MarshalAuthorizedKey(signer.PublicKey()), nil
}
func AuthorizedPublicKeyFingerprint(path string) (string, error) {
key, err := AuthorizedPublicKey(path)
if err != nil {
return "", err
}
sum := sha256.Sum256([]byte(strings.TrimSpace(string(key))))
return hex.EncodeToString(sum[:]), nil
}
func shellQuote(value string) string {
return "'" + strings.ReplaceAll(value, "'", `'"'"'`) + "'"
}

View file

@ -0,0 +1,132 @@
#!/bin/sh
set -eu
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
if ! command -v ip >/dev/null 2>&1; then
exit 0
fi
cmdline="$(cat /proc/cmdline 2>/dev/null || true)"
ip_arg=""
for arg in $cmdline; do
case "$arg" in
ip=*)
ip_arg="${arg#ip=}"
break
;;
esac
done
if [ -z "$ip_arg" ]; then
exit 0
fi
field() {
printf '%s' "$ip_arg" | cut -d: -f"$1"
}
mask_to_prefix() {
case "$1" in
[0-9]|[1-2][0-9]|3[0-2])
printf '%s\n' "$1"
return 0
;;
esac
prefix=0
old_ifs=$IFS
IFS=.
set -- $1
IFS=$old_ifs
if [ "$#" -ne 4 ]; then
return 1
fi
for octet in "$@"; do
case "$octet" in
255) prefix=$((prefix + 8)) ;;
254) prefix=$((prefix + 7)) ;;
252) prefix=$((prefix + 6)) ;;
248) prefix=$((prefix + 5)) ;;
240) prefix=$((prefix + 4)) ;;
224) prefix=$((prefix + 3)) ;;
192) prefix=$((prefix + 2)) ;;
128) prefix=$((prefix + 1)) ;;
0) ;;
*) return 1 ;;
esac
done
printf '%s\n' "$prefix"
}
find_iface() {
hint="$1"
if [ -n "$hint" ] && [ -d "/sys/class/net/$hint" ]; then
printf '%s\n' "$hint"
return 0
fi
for path in /sys/class/net/*; do
[ -e "$path" ] || continue
iface="${path##*/}"
if [ "$iface" = "lo" ]; then
continue
fi
printf '%s\n' "$iface"
return 0
done
return 1
}
guest_ip="$(field 1)"
gateway_ip="$(field 3)"
netmask="$(field 4)"
iface_hint="$(field 6)"
dns1="$(field 8)"
dns2="$(field 9)"
if [ -z "$guest_ip" ]; then
exit 0
fi
iface=""
attempt=0
while [ "$attempt" -lt 50 ]; do
iface="$(find_iface "$iface_hint" || true)"
if [ -n "$iface" ]; then
break
fi
attempt=$((attempt + 1))
sleep 0.2
done
if [ -z "$iface" ]; then
exit 0
fi
prefix="$(mask_to_prefix "$netmask" || printf '24\n')"
ip link set "$iface" up
ip addr replace "$guest_ip/$prefix" dev "$iface"
if [ -n "$gateway_ip" ]; then
ip route replace default via "$gateway_ip" dev "$iface"
fi
if [ -n "$dns1" ] || [ -n "$dns2" ]; then
tmp_resolv="/tmp/.banger-resolv.conf.$$"
: > "$tmp_resolv"
if [ -n "$dns1" ]; then
printf 'nameserver %s\n' "$dns1" >> "$tmp_resolv"
fi
if [ -n "$dns2" ]; then
printf 'nameserver %s\n' "$dns2" >> "$tmp_resolv"
fi
if [ -s "$tmp_resolv" ]; then
cat "$tmp_resolv" > /etc/resolv.conf
fi
rm -f "$tmp_resolv"
fi

View file

@ -0,0 +1,13 @@
[Unit]
Description=Banger guest network bootstrap
After=local-fs.target
Before=network.target network-online.target
ConditionPathExists=/proc/cmdline
[Service]
Type=oneshot
ExecStart=/usr/local/libexec/banger-network-bootstrap
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,4 @@
#!/bin/sh
if [ -x /usr/local/libexec/banger-network-bootstrap ]; then
/usr/local/libexec/banger-network-bootstrap
fi

View file

@ -0,0 +1,30 @@
package guestnet
import _ "embed"
const (
GuestScriptPath = "/usr/local/libexec/banger-network-bootstrap"
SystemdServiceName = "banger-network.service"
VoidCoreServicePath = "/etc/runit/core-services/20-banger-network.sh"
)
var (
//go:embed assets/bootstrap.sh
bootstrapScript string
//go:embed assets/systemd.service
systemdService string
//go:embed assets/void-core-service.sh
voidCoreService string
)
func BootstrapScript() string {
return bootstrapScript
}
func SystemdServiceUnit() string {
return systemdService
}
func VoidCoreService() string {
return voidCoreService
}

View file

@ -61,20 +61,21 @@ type DaemonConfig struct {
}
type Image struct {
ID string `json:"id"`
Name string `json:"name"`
Managed bool `json:"managed"`
ArtifactDir string `json:"artifact_dir,omitempty"`
RootfsPath string `json:"rootfs_path"`
WorkSeedPath string `json:"work_seed_path,omitempty"`
KernelPath string `json:"kernel_path"`
InitrdPath string `json:"initrd_path,omitempty"`
ModulesDir string `json:"modules_dir,omitempty"`
PackagesPath string `json:"packages_path,omitempty"`
BuildSize string `json:"build_size,omitempty"`
Docker bool `json:"docker"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
ID string `json:"id"`
Name string `json:"name"`
Managed bool `json:"managed"`
ArtifactDir string `json:"artifact_dir,omitempty"`
RootfsPath string `json:"rootfs_path"`
WorkSeedPath string `json:"work_seed_path,omitempty"`
KernelPath string `json:"kernel_path"`
InitrdPath string `json:"initrd_path,omitempty"`
ModulesDir string `json:"modules_dir,omitempty"`
PackagesPath string `json:"packages_path,omitempty"`
BuildSize string `json:"build_size,omitempty"`
SeededSSHPublicKeyFingerprint string `json:"seeded_ssh_public_key_fingerprint,omitempty"`
Docker bool `json:"docker"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}
type VMSpec struct {

View file

@ -0,0 +1,104 @@
package opencode
import (
"context"
"fmt"
"log/slog"
"strings"
"time"
"banger/internal/vsockagent"
)
const (
Port = 4096
Host = "0.0.0.0"
GuestBinaryPath = "/usr/local/bin/opencode"
ShimPath = "/root/.local/share/mise/shims/opencode"
ServiceName = "banger-opencode.service"
RunitServiceName = "banger-opencode"
ReadyTimeout = 15 * time.Second
pollInterval = 200 * time.Millisecond
)
func ServiceUnit() string {
return fmt.Sprintf(`[Unit]
Description=Banger opencode server
After=network.target
RequiresMountsFor=/root
[Service]
Type=simple
Environment=HOME=/root
WorkingDirectory=/root
ExecStart=%s serve --hostname %s --port %d
Restart=on-failure
RestartSec=1
[Install]
WantedBy=multi-user.target
`, GuestBinaryPath, Host, Port)
}
func RunitRunScript() string {
return fmt.Sprintf(`#!/bin/sh
set -e
export HOME=/root
cd /root
exec %s serve --hostname %s --port %d
`, GuestBinaryPath, Host, Port)
}
func Ready(listeners []vsockagent.PortListener) bool {
for _, listener := range listeners {
if strings.ToLower(strings.TrimSpace(listener.Proto)) != "tcp" {
continue
}
if listener.Port == Port {
return true
}
}
return false
}
func WaitReady(ctx context.Context, logger *slog.Logger, socketPath string, report func(stage, detail string)) error {
return waitReady(ctx, logger, socketPath, ReadyTimeout, report)
}
func waitReady(ctx context.Context, logger *slog.Logger, socketPath string, timeout time.Duration, report func(stage, detail string)) error {
waitCtx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
ticker := time.NewTicker(pollInterval)
defer ticker.Stop()
var lastErr error
for {
portsCtx, portsCancel := context.WithTimeout(waitCtx, 3*time.Second)
listeners, err := vsockagent.Ports(portsCtx, logger, socketPath)
portsCancel()
if err == nil {
if Ready(listeners) {
return nil
}
if report != nil {
report("wait_opencode", fmt.Sprintf("waiting for opencode on guest port %d", Port))
}
lastErr = fmt.Errorf("guest port %d is not listening yet", Port)
} else {
if report != nil {
report("wait_vsock_agent", "waiting for guest vsock agent")
}
lastErr = err
}
select {
case <-waitCtx.Done():
if lastErr != nil {
return fmt.Errorf("opencode server did not become ready on guest port %d: %w", Port, lastErr)
}
return fmt.Errorf("opencode server did not become ready on guest port %d before timeout", Port)
case <-ticker.C:
}
}
}

View file

@ -0,0 +1,116 @@
package opencode
import (
"context"
"fmt"
"net"
"os"
"path/filepath"
"strings"
"testing"
"time"
"banger/internal/vsockagent"
)
func TestServiceUnitContainsExpectedExecStart(t *testing.T) {
unit := ServiceUnit()
for _, snippet := range []string{
"RequiresMountsFor=/root",
"WorkingDirectory=/root",
"Environment=HOME=/root",
"ExecStart=/usr/local/bin/opencode serve --hostname 0.0.0.0 --port 4096",
"WantedBy=multi-user.target",
} {
if !strings.Contains(unit, snippet) {
t.Fatalf("service unit missing snippet %q\nunit:\n%s", snippet, unit)
}
}
}
func TestRunitRunScriptContainsExpectedExec(t *testing.T) {
script := RunitRunScript()
for _, snippet := range []string{
"export HOME=/root",
"cd /root",
"exec /usr/local/bin/opencode serve --hostname 0.0.0.0 --port 4096",
} {
if !strings.Contains(script, snippet) {
t.Fatalf("runit script missing snippet %q\nscript:\n%s", snippet, script)
}
}
}
func TestReadyMatchesTCPPort(t *testing.T) {
if Ready([]vsockagent.PortListener{{Proto: "udp", Port: Port}}) {
t.Fatal("udp listener should not satisfy readiness")
}
if Ready([]vsockagent.PortListener{{Proto: "tcp", Port: 8080}}) {
t.Fatal("wrong tcp port should not satisfy readiness")
}
if !Ready([]vsockagent.PortListener{{Proto: "tcp", Port: Port}}) {
t.Fatal("tcp listener on opencode port should satisfy readiness")
}
}
func TestWaitReadyReturnsWhenPortIsListening(t *testing.T) {
socketPath := filepath.Join(t.TempDir(), "opencode.vsock")
listener, err := net.Listen("unix", socketPath)
if err != nil {
t.Fatalf("listen: %v", err)
}
t.Cleanup(func() {
_ = listener.Close()
_ = os.Remove(socketPath)
})
serverDone := make(chan error, 1)
go func() {
conn, err := listener.Accept()
if err != nil {
serverDone <- err
return
}
defer conn.Close()
buf := make([]byte, 512)
n, err := conn.Read(buf)
if err != nil {
serverDone <- err
return
}
if got := string(buf[:n]); got != "CONNECT 42070\n" {
serverDone <- fmt.Errorf("unexpected connect message %q", got)
return
}
if _, err := conn.Write([]byte("OK 1\n")); err != nil {
serverDone <- err
return
}
reqBuf := make([]byte, 0, 512)
for {
n, err = conn.Read(buf)
if err != nil {
serverDone <- err
return
}
reqBuf = append(reqBuf, buf[:n]...)
if strings.Contains(string(reqBuf), "\r\n\r\n") {
break
}
}
if !strings.Contains(string(reqBuf), "GET /ports HTTP/1.1\r\n") {
serverDone <- fmt.Errorf("unexpected ports payload %q", string(reqBuf))
return
}
body := []byte(`{"listeners":[{"proto":"tcp","bind_address":"0.0.0.0","port":4096}]}`)
_, err = conn.Write([]byte(fmt.Sprintf("HTTP/1.1 200 OK\r\nContent-Type: application/json\r\nContent-Length: %d\r\n\r\n%s", len(body), body)))
serverDone <- err
}()
if err := waitReady(context.Background(), nil, socketPath, time.Second, nil); err != nil {
t.Fatalf("waitReady: %v", err)
}
if err := <-serverDone; err != nil {
t.Fatalf("server: %v", err)
}
}

View file

@ -80,6 +80,7 @@ func (s *Store) migrate() error {
modules_dir TEXT,
packages_path TEXT,
build_size TEXT,
seeded_ssh_public_key_fingerprint TEXT,
docker INTEGER NOT NULL DEFAULT 0,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL
@ -107,6 +108,9 @@ func (s *Store) migrate() error {
if err := ensureColumnExists(s.db, "images", "work_seed_path", "TEXT"); err != nil {
return err
}
if err := ensureColumnExists(s.db, "images", "seeded_ssh_public_key_fingerprint", "TEXT"); err != nil {
return err
}
return nil
}
@ -116,8 +120,8 @@ func (s *Store) UpsertImage(ctx context.Context, image model.Image) error {
const query = `
INSERT INTO images (
id, name, managed, artifact_dir, rootfs_path, work_seed_path, kernel_path, initrd_path,
modules_dir, packages_path, build_size, docker, created_at, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
modules_dir, packages_path, build_size, seeded_ssh_public_key_fingerprint, docker, created_at, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
name=excluded.name,
managed=excluded.managed,
@ -129,6 +133,7 @@ func (s *Store) UpsertImage(ctx context.Context, image model.Image) error {
modules_dir=excluded.modules_dir,
packages_path=excluded.packages_path,
build_size=excluded.build_size,
seeded_ssh_public_key_fingerprint=excluded.seeded_ssh_public_key_fingerprint,
docker=excluded.docker,
updated_at=excluded.updated_at`
_, err := s.db.ExecContext(ctx, query,
@ -143,6 +148,7 @@ func (s *Store) UpsertImage(ctx context.Context, image model.Image) error {
image.ModulesDir,
image.PackagesPath,
image.BuildSize,
image.SeededSSHPublicKeyFingerprint,
boolToInt(image.Docker),
image.CreatedAt.Format(time.RFC3339),
image.UpdatedAt.Format(time.RFC3339),
@ -151,15 +157,15 @@ func (s *Store) UpsertImage(ctx context.Context, image model.Image) error {
}
func (s *Store) GetImageByName(ctx context.Context, name string) (model.Image, error) {
return s.getImage(ctx, "SELECT id, name, managed, artifact_dir, rootfs_path, work_seed_path, kernel_path, initrd_path, modules_dir, packages_path, build_size, docker, created_at, updated_at FROM images WHERE name = ?", name)
return s.getImage(ctx, "SELECT id, name, managed, artifact_dir, rootfs_path, work_seed_path, kernel_path, initrd_path, modules_dir, packages_path, build_size, seeded_ssh_public_key_fingerprint, docker, created_at, updated_at FROM images WHERE name = ?", name)
}
func (s *Store) GetImageByID(ctx context.Context, id string) (model.Image, error) {
return s.getImage(ctx, "SELECT id, name, managed, artifact_dir, rootfs_path, work_seed_path, kernel_path, initrd_path, modules_dir, packages_path, build_size, docker, created_at, updated_at FROM images WHERE id = ?", id)
return s.getImage(ctx, "SELECT id, name, managed, artifact_dir, rootfs_path, work_seed_path, kernel_path, initrd_path, modules_dir, packages_path, build_size, seeded_ssh_public_key_fingerprint, docker, created_at, updated_at FROM images WHERE id = ?", id)
}
func (s *Store) ListImages(ctx context.Context) ([]model.Image, error) {
rows, err := s.db.QueryContext(ctx, "SELECT id, name, managed, artifact_dir, rootfs_path, work_seed_path, kernel_path, initrd_path, modules_dir, packages_path, build_size, docker, created_at, updated_at FROM images ORDER BY created_at ASC")
rows, err := s.db.QueryContext(ctx, "SELECT id, name, managed, artifact_dir, rootfs_path, work_seed_path, kernel_path, initrd_path, modules_dir, packages_path, build_size, seeded_ssh_public_key_fingerprint, docker, created_at, updated_at FROM images ORDER BY created_at ASC")
if err != nil {
return nil, err
}
@ -337,6 +343,7 @@ func scanImageRow(row scanner) (model.Image, error) {
var image model.Image
var managed, docker int
var workSeedPath sql.NullString
var seededSSHPublicKeyFingerprint sql.NullString
var createdAt, updatedAt string
err := row.Scan(
&image.ID,
@ -350,6 +357,7 @@ func scanImageRow(row scanner) (model.Image, error) {
&image.ModulesDir,
&image.PackagesPath,
&image.BuildSize,
&seededSSHPublicKeyFingerprint,
&docker,
&createdAt,
&updatedAt,
@ -360,6 +368,7 @@ func scanImageRow(row scanner) (model.Image, error) {
image.Managed = managed == 1
image.Docker = docker == 1
image.WorkSeedPath = workSeedPath.String
image.SeededSSHPublicKeyFingerprint = seededSSHPublicKeyFingerprint.String
image.CreatedAt, err = time.Parse(time.RFC3339, createdAt)
if err != nil {
return image, err

View file

@ -335,20 +335,21 @@ func openTestStore(t *testing.T) *Store {
func sampleImage(name string) model.Image {
now := fixedTime()
return model.Image{
ID: name + "-id",
Name: name,
Managed: true,
ArtifactDir: "/artifacts/" + name,
RootfsPath: "/images/" + name + ".ext4",
WorkSeedPath: "/images/" + name + ".work-seed.ext4",
KernelPath: "/kernels/" + name,
InitrdPath: "/initrd/" + name,
ModulesDir: "/modules/" + name,
PackagesPath: "/packages/" + name + ".apt",
BuildSize: "8G",
Docker: true,
CreatedAt: now,
UpdatedAt: now,
ID: name + "-id",
Name: name,
Managed: true,
ArtifactDir: "/artifacts/" + name,
RootfsPath: "/images/" + name + ".ext4",
WorkSeedPath: "/images/" + name + ".work-seed.ext4",
KernelPath: "/kernels/" + name,
InitrdPath: "/initrd/" + name,
ModulesDir: "/modules/" + name,
PackagesPath: "/packages/" + name + ".apt",
BuildSize: "8G",
SeededSSHPublicKeyFingerprint: "seeded-fingerprint",
Docker: true,
CreatedAt: now,
UpdatedAt: now,
}
}

View file

@ -397,9 +397,10 @@ func UpdateFSTab(existing string) string {
func BuildBootArgs(vmName, guestIP, bridgeIP, dns string) string {
return fmt.Sprintf(
"console=ttyS0 reboot=k panic=1 pci=off root=/dev/vda rw ip=%s::%s:255.255.255.0::eth0:off:%s hostname=%s systemd.mask=home.mount systemd.mask=var.mount",
"console=ttyS0 reboot=k panic=1 pci=off root=/dev/vda rw ip=%s::%s:255.255.255.0:%s:eth0:off:%s hostname=%s systemd.mask=home.mount systemd.mask=var.mount",
guestIP,
bridgeIP,
vmName,
dns,
vmName,
)

View file

@ -167,6 +167,16 @@ func TestReadNormalizedLines(t *testing.T) {
}
}
func TestBuildBootArgsIncludesHostnameInIPField(t *testing.T) {
t.Parallel()
got := BuildBootArgs("devbox", "172.16.0.2", "172.16.0.1", "1.1.1.1")
want := "console=ttyS0 reboot=k panic=1 pci=off root=/dev/vda rw ip=172.16.0.2::172.16.0.1:255.255.255.0:devbox:eth0:off:1.1.1.1 hostname=devbox systemd.mask=home.mount systemd.mask=var.mount"
if got != want {
t.Fatalf("BuildBootArgs() = %q, want %q", got, want)
}
}
func TestWriteExt4FileRemovesTempFileAndReturnsCopyError(t *testing.T) {
t.Parallel()