banger/internal/daemon/imagebuild.go
Thales Maciel c13c8b11af
Extract imagemgr subpackage with pure image helpers
Moves the stateless helpers of the image subsystem into
internal/daemon/imagemgr:

paths.go — path validators (ValidateRegisterPaths,
ValidatePromotePaths), artifact staging (StageBootArtifacts,
StageOptionalArtifactPath), metadata (BuildMetadataPackages,
WritePackagesMetadata).

build.go — ResizeRootfs, WriteBuildLog, and the full guest
provisioning script generator (BuildProvisionScript, BuildModulesCommand
and all private script-append helpers) along with the mise/tmux/opencode
version constants.

The orchestrator methods (BuildImage, RegisterImage, PromoteImage,
DeleteImage, runImageBuildNative) stay on *Daemon: they still touch
d.store, d.imageOpsMu, d.beginOperation, capability hooks, and
fcproc-wrapped Daemon helpers — extracting them needs prerequisite
phases (operation protocol, workdisk helpers, tap pool). This commit is
strictly the pure-helper extraction that can land cleanly today.

imagebuild.go shrinks from 453 -> 225 LOC (half gone). images.go shrinks
from 450 -> 374 LOC. imagebuild_test.go updated to call the exported
imagemgr.BuildProvisionScript. Zero behavior change; all tests green.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-15 16:24:22 -03:00

225 lines
6.7 KiB
Go

package daemon
import (
"context"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"time"
"banger/internal/daemon/imagemgr"
"banger/internal/firecracker"
"banger/internal/guest"
"banger/internal/hostnat"
"banger/internal/model"
"banger/internal/system"
"banger/internal/vsockagent"
"strings"
)
type imageBuildSpec struct {
ID string
Name string
SourceRootfs string
RootfsPath string
BuildLog io.Writer
KernelPath string
InitrdPath string
ModulesDir string
Packages []string
InstallDocker bool
Size string
}
type imageBuildVM struct {
Name string
GuestIP string
TapDevice string
APISock string
PID int
}
func (d *Daemon) runImageBuild(ctx context.Context, spec imageBuildSpec) error {
if d.imageBuild != nil {
return d.imageBuild(ctx, spec)
}
return d.runImageBuildNative(ctx, spec)
}
func (d *Daemon) runImageBuildNative(ctx context.Context, spec imageBuildSpec) (err error) {
if err := system.CopyFilePreferClone(spec.SourceRootfs, spec.RootfsPath); err != nil {
return err
}
if spec.Size != "" {
if err := imagemgr.ResizeRootfs(spec.SourceRootfs, spec.RootfsPath, spec.Size); err != nil {
return err
}
}
vm, cleanup, err := d.startImageBuildVM(ctx, spec)
if err != nil {
return err
}
defer func() {
cleanupErr := cleanup(context.Background())
if cleanupErr != nil {
err = errors.Join(err, cleanupErr)
}
}()
sshAddress := vm.GuestIP + ":22"
if _, err := fmt.Fprintf(spec.BuildLog, "[image.build] waiting for ssh on %s\n", sshAddress); err != nil {
return err
}
waitCtx, cancel := context.WithTimeout(ctx, 60*time.Second)
defer cancel()
if err := guest.WaitForSSH(waitCtx, sshAddress, d.config.SSHKeyPath, time.Second); err != nil {
return err
}
client, err := guest.Dial(ctx, sshAddress, d.config.SSHKeyPath)
if err != nil {
return err
}
defer client.Close()
authorizedKey, err := guest.AuthorizedPublicKey(d.config.SSHKeyPath)
if err != nil {
return err
}
vsockAgentPath, err := d.vsockAgentBinary()
if err != nil {
return err
}
helperBytes, err := os.ReadFile(vsockAgentPath)
if err != nil {
return err
}
if err := imagemgr.WriteBuildLog(spec.BuildLog, "installing vsock agent"); err != nil {
return err
}
if err := client.UploadFile(ctx, vsockagent.GuestInstallPath, 0o755, helperBytes, spec.BuildLog); err != nil {
return err
}
if err := imagemgr.WriteBuildLog(spec.BuildLog, "configuring guest"); err != nil {
return err
}
if err := client.RunScript(ctx, imagemgr.BuildProvisionScript(vm.Name, d.config.DefaultDNS, string(authorizedKey), spec.Packages, spec.InstallDocker), spec.BuildLog); err != nil {
return err
}
if strings.TrimSpace(spec.ModulesDir) != "" {
if err := imagemgr.WriteBuildLog(spec.BuildLog, "copying kernel modules"); err != nil {
return err
}
if err := client.StreamTar(ctx, spec.ModulesDir, imagemgr.BuildModulesCommand(filepath.Base(spec.ModulesDir)), spec.BuildLog); err != nil {
return err
}
}
if err := imagemgr.WriteBuildLog(spec.BuildLog, "shutting down guest"); err != nil {
return err
}
if err := client.RunScript(ctx, "set -e\nsync\n", spec.BuildLog); err != nil {
return err
}
return d.shutdownImageBuildVM(ctx, vm)
}
func (d *Daemon) startImageBuildVM(ctx context.Context, spec imageBuildSpec) (imageBuildVM, func(context.Context) error, error) {
if err := d.ensureBridge(ctx); err != nil {
return imageBuildVM{}, nil, err
}
if err := d.ensureSocketDir(); err != nil {
return imageBuildVM{}, nil, err
}
fcPath, err := d.firecrackerBinary()
if err != nil {
return imageBuildVM{}, nil, err
}
shortID := system.ShortID(spec.ID)
guestIP, err := d.store.NextGuestIP(ctx, bridgePrefix(d.config.BridgeIP))
if err != nil {
return imageBuildVM{}, nil, err
}
vm := imageBuildVM{
Name: "image-build-" + shortID,
GuestIP: guestIP,
TapDevice: "tap-img-" + shortID,
APISock: filepath.Join(d.layout.RuntimeDir, "img-"+shortID+".sock"),
}
if err := os.RemoveAll(vm.APISock); err != nil && !os.IsNotExist(err) {
return imageBuildVM{}, nil, err
}
if err := d.createTap(ctx, vm.TapDevice); err != nil {
return imageBuildVM{}, nil, err
}
if err := hostnat.Ensure(ctx, d.runner, vm.GuestIP, vm.TapDevice, true); err != nil {
_, _ = d.runner.RunSudo(ctx, "ip", "link", "del", vm.TapDevice)
return imageBuildVM{}, nil, err
}
firecrackerCtx := context.Background()
machine, err := firecracker.NewMachine(firecrackerCtx, firecracker.MachineConfig{
BinaryPath: fcPath,
VMID: spec.ID,
SocketPath: vm.APISock,
LogPath: spec.RootfsPath + ".firecracker.log",
MetricsPath: filepath.Join(filepath.Dir(spec.RootfsPath), "metrics.json"),
KernelImagePath: spec.KernelPath,
InitrdPath: spec.InitrdPath,
KernelArgs: system.BuildBootArgsWithKernelIP(vm.Name, vm.GuestIP, d.config.BridgeIP, d.config.DefaultDNS),
Drives: []firecracker.DriveConfig{{
ID: "rootfs",
Path: spec.RootfsPath,
ReadOnly: false,
IsRoot: true,
}},
TapDevice: vm.TapDevice,
VCPUCount: model.DefaultVCPUCount,
MemoryMiB: model.DefaultMemoryMiB,
Logger: d.logger,
})
if err != nil {
_ = hostnat.Ensure(ctx, d.runner, vm.GuestIP, vm.TapDevice, false)
_, _ = d.runner.RunSudo(ctx, "ip", "link", "del", vm.TapDevice)
return imageBuildVM{}, nil, err
}
if err := machine.Start(firecrackerCtx); err != nil {
_ = hostnat.Ensure(ctx, d.runner, vm.GuestIP, vm.TapDevice, false)
_, _ = d.runner.RunSudo(ctx, "ip", "link", "del", vm.TapDevice)
return imageBuildVM{}, nil, err
}
vm.PID = d.resolveFirecrackerPID(firecrackerCtx, machine, vm.APISock)
if err := d.ensureSocketAccess(ctx, vm.APISock, "firecracker api socket"); err != nil {
_ = d.killVMProcess(context.Background(), vm.PID)
_ = hostnat.Ensure(ctx, d.runner, vm.GuestIP, vm.TapDevice, false)
_, _ = d.runner.RunSudo(ctx, "ip", "link", "del", vm.TapDevice)
return imageBuildVM{}, nil, err
}
cleanup := func(cleanupCtx context.Context) error {
if vm.PID > 0 && system.ProcessRunning(vm.PID, vm.APISock) {
_ = d.killVMProcess(cleanupCtx, vm.PID)
_ = d.waitForExit(cleanupCtx, vm.PID, vm.APISock, 10*time.Second)
}
_ = hostnat.Ensure(cleanupCtx, d.runner, vm.GuestIP, vm.TapDevice, false)
if vm.TapDevice != "" {
_, _ = d.runner.RunSudo(cleanupCtx, "ip", "link", "del", vm.TapDevice)
}
if vm.APISock != "" {
_ = os.Remove(vm.APISock)
}
return nil
}
return vm, cleanup, nil
}
func (d *Daemon) shutdownImageBuildVM(ctx context.Context, vm imageBuildVM) error {
buildVM := model.VMRecord{Runtime: model.VMRuntime{APISockPath: vm.APISock}}
if err := d.sendCtrlAltDel(ctx, buildVM); err != nil {
return err
}
return d.waitForExit(ctx, vm.PID, vm.APISock, 15*time.Second)
}