Remove image build --from-image; doctor treats catalog images as OK

The `image build` flow spun up a transient Firecracker VM, SSHed in,
and ran a large bash provisioning script to derive a new managed
image from an existing one. It overlapped heavily with the golden-
image Dockerfile flow (same mise/docker/tmux/opencode install logic
duplicated in Go as `imagemgr.BuildProvisionScript`) and had far more
machinery: async op state, RPC begin/status/cancel, webui form +
operation page, preflight checks, API types, tests. For custom
images, writing a Dockerfile is simpler and more reproducible.

Removed end-to-end:
- CLI `image build` subcommand + `absolutizeImageBuildPaths`.
- Daemon: BuildImage method, imagebuild.go (transient-VM orchestration),
  image_build_ops.go (async begin/status/cancel), imagemgr/build.go
  (the 247-line provisioning script generator and all its append*
  helpers), validateImageBuildPrereqs + addImageBuildPrereqs.
- RPC dispatches for image.build / .begin / .status / .cancel.
- opstate registry `imageBuildOps`, daemon seam `imageBuild`,
  background pruner call.
- API types: ImageBuildParams, ImageBuildOperation, ImageBuildBeginResult,
  ImageBuildStatusParams, ImageBuildStatusResult; model type
  ImageBuildRequest.
- Web UI: Backend interface methods, handlers, form, routes, template
  branches (images.html build form, operation.html build branch,
  dashboard.html Build button).
- Tests that directly exercised BuildImage.

Doctor polish (task C):
- Drop the "image build" preflight section entirely (its raison d'être
  is gone).
- Default-image check now accepts "not local but in imagecat" as OK:
  vm create auto-pulls on first use. Only flag when the image is
  neither locally registered nor in the catalog.

Net: 24 files touched, 1,373 lines deleted, 25 added.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
Thales Maciel 2026-04-18 15:54:29 -03:00
parent ace4782fce
commit ac7974f5b9
No known key found for this signature in database
GPG key ID: 33112E6833C34679
24 changed files with 25 additions and 1398 deletions

View file

@ -11,7 +11,6 @@ import (
"strings"
"testing"
"banger/internal/api"
"banger/internal/model"
"banger/internal/paths"
)
@ -131,119 +130,6 @@ func TestStartVMLockedLogsBridgeFailure(t *testing.T) {
}
}
func TestBuildImagePreservesBuildLogOnFailure(t *testing.T) {
ctx := context.Background()
store := openDaemonStore(t)
stateDir := filepath.Join(t.TempDir(), "state")
imagesDir := filepath.Join(stateDir, "images")
if err := os.MkdirAll(imagesDir, 0o755); err != nil {
t.Fatalf("mkdir images dir: %v", err)
}
binDir := t.TempDir()
for _, name := range []string{"sudo", "ip", "pgrep", "chown", "chmod", "kill", "iptables", "sysctl", "e2fsck", "resize2fs", "mkfs.ext4", "mount", "umount", "cp"} {
writeFakeExecutable(t, filepath.Join(binDir, name))
}
t.Setenv("PATH", binDir)
baseRootfs := filepath.Join(t.TempDir(), "base.ext4")
kernelPath := filepath.Join(t.TempDir(), "vmlinux")
sshKeyPath := filepath.Join(t.TempDir(), "id_ed25519")
firecrackerBin := filepath.Join(t.TempDir(), "firecracker")
vsockHelper := filepath.Join(t.TempDir(), "banger-vsock-agent")
for _, path := range []string{baseRootfs, kernelPath, sshKeyPath} {
if err := os.WriteFile(path, []byte("artifact"), 0o644); err != nil {
t.Fatalf("write %s: %v", path, err)
}
}
if err := os.WriteFile(vsockHelper, []byte("#!/bin/sh\nexit 0\n"), 0o755); err != nil {
t.Fatalf("write %s: %v", vsockHelper, err)
}
t.Setenv("BANGER_VSOCK_AGENT_BIN", vsockHelper)
if err := os.WriteFile(firecrackerBin, []byte("#!/bin/sh\nexit 0\n"), 0o755); err != nil {
t.Fatalf("write %s: %v", firecrackerBin, err)
}
runner := &scriptedRunner{
t: t,
steps: []runnerStep{
{call: runnerCall{name: "ip", args: []string{"route", "show", "default"}}, out: []byte("default via 192.0.2.1 dev eth0\n")},
},
}
var buf bytes.Buffer
logger, _, err := newDaemonLogger(&buf, "info")
if err != nil {
t.Fatalf("newDaemonLogger: %v", err)
}
baseImage := model.Image{
ID: "base-image",
Name: "base-image",
RootfsPath: baseRootfs,
KernelPath: kernelPath,
CreatedAt: model.Now(),
UpdatedAt: model.Now(),
}
if err := store.UpsertImage(ctx, baseImage); err != nil {
t.Fatalf("UpsertImage(base): %v", err)
}
d := &Daemon{
layout: paths.Layout{
StateDir: stateDir,
ImagesDir: imagesDir,
},
config: model.DaemonConfig{
DefaultImageName: "base-image",
SSHKeyPath: sshKeyPath,
FirecrackerBin: firecrackerBin,
},
store: store,
runner: runner,
logger: logger,
imageBuild: func(ctx context.Context, spec imageBuildSpec) error {
if _, err := fmt.Fprintln(spec.BuildLog, "builder-stdout"); err != nil {
return err
}
if spec.SourceRootfs != baseRootfs || spec.KernelPath == kernelPath || len(spec.Packages) == 0 {
t.Fatalf("unexpected image build spec: %+v", spec)
}
return errors.New("builder failed")
},
}
_, err = d.BuildImage(ctx, api.ImageBuildParams{
Name: "broken-image",
FromImage: baseImage.Name,
KernelPath: kernelPath,
})
if err == nil || !strings.Contains(err.Error(), "inspect ") {
t.Fatalf("BuildImage() error = %v, want build log hint", err)
}
buildLogs, globErr := filepath.Glob(filepath.Join(stateDir, "image-build", "*.log"))
if globErr != nil {
t.Fatalf("glob build logs: %v", globErr)
}
if len(buildLogs) != 1 {
t.Fatalf("build log count = %d, want 1", len(buildLogs))
}
logData, readErr := os.ReadFile(buildLogs[0])
if readErr != nil {
t.Fatalf("read build log: %v", readErr)
}
if !strings.Contains(string(logData), "builder-stdout") {
t.Fatalf("build log = %q, want builder output", string(logData))
}
runner.assertExhausted()
entries := parseLogEntries(t, buf.Bytes())
if !hasLogEntry(entries, map[string]string{"msg": "operation stage", "operation": "image.build", "stage": "launch_builder"}) {
t.Fatalf("expected launch_builder log, got %v", entries)
}
if !strings.Contains(buf.String(), buildLogs[0]) {
t.Fatalf("daemon logs = %q, want build log path %s", buf.String(), buildLogs[0])
}
}
func parseLogEntries(t *testing.T, data []byte) []map[string]any {
t.Helper()
lines := bytes.Split(bytes.TrimSpace(data), []byte("\n"))