Add Go daemon-driven VM control plane
Replace the shell-only user workflow with `banger` and `bangerd`: Cobra commands, XDG/SQLite-backed state, managed VM and image lifecycle, and a Bubble Tea TUI for browsing and operating VMs.\n\nKeep Firecracker orchestration behind the daemon so VM specs become persistent objects, and add repo entrypoints for building, installing, and documenting the new flow while still delegating rootfs customization to the existing shell tooling.\n\nHarden the control plane around real usage by reclaiming Firecracker API sockets for the user, restarting stale daemons after rebuilds, and returning the correct `vm.create` payload so the CLI and TUI creation flow work reliably.\n\nValidation: `go test ./...`, `make build`, and a host-side smoke test with `./banger vm create --name codex-smoke`.
This commit is contained in:
parent
3cf33d1e0a
commit
ea72ea26fe
22 changed files with 5480 additions and 0 deletions
131
internal/daemon/images.go
Normal file
131
internal/daemon/images.go
Normal file
|
|
@ -0,0 +1,131 @@
|
|||
package daemon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
||||
"banger/internal/api"
|
||||
"banger/internal/model"
|
||||
)
|
||||
|
||||
func (d *Daemon) BuildImage(ctx context.Context, params api.ImageBuildParams) (model.Image, error) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
name := params.Name
|
||||
if name == "" {
|
||||
name = fmt.Sprintf("image-%d", model.Now().Unix())
|
||||
}
|
||||
if _, err := d.FindImage(ctx, name); err == nil {
|
||||
return model.Image{}, fmt.Errorf("image name already exists: %s", name)
|
||||
}
|
||||
if d.config.RepoRoot == "" {
|
||||
return model.Image{}, fmt.Errorf("repo root not found; set repo_root in config.toml")
|
||||
}
|
||||
baseRootfs := params.BaseRootfs
|
||||
if baseRootfs == "" {
|
||||
baseRootfs = d.config.DefaultBaseRootfs
|
||||
}
|
||||
if baseRootfs == "" {
|
||||
return model.Image{}, fmt.Errorf("base rootfs is required")
|
||||
}
|
||||
id, err := model.NewID()
|
||||
if err != nil {
|
||||
return model.Image{}, err
|
||||
}
|
||||
now := model.Now()
|
||||
artifactDir := filepath.Join(d.layout.ImagesDir, id)
|
||||
if err := os.MkdirAll(artifactDir, 0o755); err != nil {
|
||||
return model.Image{}, err
|
||||
}
|
||||
rootfsPath := filepath.Join(artifactDir, "rootfs.ext4")
|
||||
script := filepath.Join(d.config.RepoRoot, "customize.sh")
|
||||
if _, err := os.Stat(script); err != nil {
|
||||
return model.Image{}, fmt.Errorf("customize.sh not found at %s", script)
|
||||
}
|
||||
args := []string{script, baseRootfs, "--out", rootfsPath}
|
||||
if params.Size != "" {
|
||||
args = append(args, "--size", params.Size)
|
||||
}
|
||||
kernelPath := params.KernelPath
|
||||
if kernelPath == "" {
|
||||
kernelPath = d.config.DefaultKernel
|
||||
}
|
||||
if kernelPath != "" {
|
||||
args = append(args, "--kernel", kernelPath)
|
||||
}
|
||||
initrdPath := params.InitrdPath
|
||||
if initrdPath == "" {
|
||||
initrdPath = d.config.DefaultInitrd
|
||||
}
|
||||
if initrdPath != "" {
|
||||
args = append(args, "--initrd", initrdPath)
|
||||
}
|
||||
modulesDir := params.ModulesDir
|
||||
if modulesDir == "" {
|
||||
modulesDir = d.config.DefaultModulesDir
|
||||
}
|
||||
if modulesDir != "" {
|
||||
args = append(args, "--modules", modulesDir)
|
||||
}
|
||||
if params.Docker {
|
||||
args = append(args, "--docker")
|
||||
}
|
||||
cmd := exec.CommandContext(ctx, "bash", args...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Dir = d.config.RepoRoot
|
||||
if err := cmd.Run(); err != nil {
|
||||
_ = os.RemoveAll(artifactDir)
|
||||
return model.Image{}, err
|
||||
}
|
||||
image := model.Image{
|
||||
ID: id,
|
||||
Name: name,
|
||||
Managed: true,
|
||||
ArtifactDir: artifactDir,
|
||||
RootfsPath: rootfsPath,
|
||||
KernelPath: kernelPath,
|
||||
InitrdPath: initrdPath,
|
||||
ModulesDir: modulesDir,
|
||||
PackagesPath: d.config.DefaultPackagesFile,
|
||||
BuildSize: params.Size,
|
||||
Docker: params.Docker,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
}
|
||||
if err := d.store.UpsertImage(ctx, image); err != nil {
|
||||
return model.Image{}, err
|
||||
}
|
||||
return image, nil
|
||||
}
|
||||
|
||||
func (d *Daemon) DeleteImage(ctx context.Context, idOrName string) (model.Image, error) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
image, err := d.FindImage(ctx, idOrName)
|
||||
if err != nil {
|
||||
return model.Image{}, err
|
||||
}
|
||||
vms, err := d.store.FindVMsUsingImage(ctx, image.ID)
|
||||
if err != nil {
|
||||
return model.Image{}, err
|
||||
}
|
||||
if len(vms) > 0 {
|
||||
return model.Image{}, fmt.Errorf("image %s is still referenced by %d VM(s)", image.Name, len(vms))
|
||||
}
|
||||
if err := d.store.DeleteImage(ctx, image.ID); err != nil {
|
||||
return model.Image{}, err
|
||||
}
|
||||
if image.Managed && image.ArtifactDir != "" {
|
||||
if err := os.RemoveAll(image.ArtifactDir); err != nil {
|
||||
return model.Image{}, err
|
||||
}
|
||||
}
|
||||
return image, nil
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue