banger/internal/daemon/dispatch.go
Thales Maciel 4d8dca6b72
image: add banger image cache prune for OCI cache cleanup
OCI layer blobs accumulate forever — every pull writes layers to
~/.cache/banger/oci/blobs/sha256/<hex> via go-containerregistry's
filesystem cache, and nothing ever evicts them. The cache is purely
a re-pull-avoidance (every flattened image is independent of the
blobs that sourced it), so it's a perfect candidate for an opt-in
operator-driven prune.

New surface:
  * api: ImageCachePruneParams{DryRun}, ImageCachePruneResult
    {BytesFreed, BlobsFreed, DryRun, CacheDir}.
  * daemon: ImageService.PruneOCICache walks layout.OCICacheDir for
    a (bytes, blobs) tally, then — outside dry-run — atomically
    renames the cache aside, recreates it empty, and rm -rf's the
    aside dir. The rename-then-rm avoids leaving the cache in a
    half-removed state if a pull starts mid-prune (the in-flight
    pull's open files survive the rename via standard Linux
    semantics; it just sees a fresh empty cache afterwards). Missing
    cache dir is treated as zero — fresh installs that have never
    pulled an OCI image don't error.
  * dispatch: image.cache.prune RPC (paramHandler-wrapped, mirroring
    every other image RPC). Documented-methods test list updated.
  * cli: `banger image cache` group with a `prune` subcommand
    (--dry-run flag). Output is a single line: "freed 1.2 GiB
    across 47 blob(s) in /var/cache/banger/oci" or "would free …".
    formatBytes helper for the size pretty-print.

docs/oci-import.md: replaced the "Tech debt: cache eviction" bullet
with a "Cache lifecycle" section describing the new command and
the in-flight-pull caveat.

Tests: PruneOCICache covers the happy path (real prune empties the
cache, recreates an empty dir, doesn't leak the .pruning- aside),
the dry-run path (returns size, leaves blobs intact), and the
fresh-install path (cache dir absent → zero result, no error).
Smoke at JOBS=4 still green; live exercise against an empty cache
on a system install prints the expected zero summary.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-28 16:32:57 -03:00

304 lines
12 KiB
Go

package daemon
import (
"context"
"fmt"
"banger/internal/api"
"banger/internal/buildinfo"
"banger/internal/rpc"
)
// handler is the signature every RPC method dispatches through. Keeps
// Daemon.dispatch a one-liner — lookup + invoke — instead of the old
// ~240-line `switch`. Handlers close over a `*Daemon` parameter at
// call time (passed by the driver) rather than baked into the map,
// so tests that stand up a *Daemon with custom wiring re-use the
// same table without re-registering anything.
type handler func(ctx context.Context, d *Daemon, req rpc.Request) rpc.Response
// paramHandler wraps the common "decode params of type P, call
// service returning (R, error), wrap R" flow that 28 of 34 methods
// follow. Compile-time type-safe — no reflection. P and R are
// deduced from the function literal passed in, so per-handler
// registration reads as "what's the RPC shape + what's the service
// call" and nothing else.
func paramHandler[P any, R any](call func(ctx context.Context, d *Daemon, p P) (R, error)) handler {
return func(ctx context.Context, d *Daemon, req rpc.Request) rpc.Response {
p, err := rpc.DecodeParams[P](req)
if err != nil {
return rpc.NewError("bad_request", err.Error())
}
result, err := call(ctx, d, p)
return marshalResultOrError(result, err)
}
}
// noParamHandler is the decode-free variant for RPC methods that
// take no params (ping, shutdown, *.list, kernel.catalog).
func noParamHandler[R any](call func(ctx context.Context, d *Daemon) (R, error)) handler {
return func(ctx context.Context, d *Daemon, _ rpc.Request) rpc.Response {
result, err := call(ctx, d)
return marshalResultOrError(result, err)
}
}
// rpcHandlers maps every supported method name to its handler. Adding
// or removing a method is a single-line diff here — unlike the old
// switch, there's no four-line decode/call/wrap boilerplate to copy.
// The four special-case handlers (vm.logs, vm.ssh, ping, shutdown)
// live below the map; they need pre-service validation or raw result
// encoding that the generic wrapper can't express.
var rpcHandlers = map[string]handler{
"ping": pingHandler,
"shutdown": shutdownHandler,
"vm.create": paramHandler(vmCreateDispatch),
"vm.create.begin": paramHandler(vmCreateBeginDispatch),
"vm.create.status": paramHandler(vmCreateStatusDispatch),
"vm.create.cancel": paramHandler(vmCreateCancelDispatch),
"vm.list": noParamHandler(vmListDispatch),
"vm.show": paramHandler(vmShowDispatch),
"vm.start": paramHandler(vmStartDispatch),
"vm.stop": paramHandler(vmStopDispatch),
"vm.kill": paramHandler(vmKillDispatch),
"vm.restart": paramHandler(vmRestartDispatch),
"vm.delete": paramHandler(vmDeleteDispatch),
"vm.set": paramHandler(vmSetDispatch),
"vm.stats": paramHandler(vmStatsDispatch),
"vm.logs": vmLogsHandler,
"vm.ssh": vmSSHHandler,
"vm.health": paramHandler(vmHealthDispatch),
"vm.ping": paramHandler(vmPingDispatch),
"vm.ports": paramHandler(vmPortsDispatch),
"vm.workspace.prepare": paramHandler(workspacePrepareDispatch),
"vm.workspace.export": paramHandler(workspaceExportDispatch),
"image.list": noParamHandler(imageListDispatch),
"image.show": paramHandler(imageShowDispatch),
"image.register": paramHandler(imageRegisterDispatch),
"image.promote": paramHandler(imagePromoteDispatch),
"image.delete": paramHandler(imageDeleteDispatch),
"image.pull": paramHandler(imagePullDispatch),
"image.cache.prune": paramHandler(imageCachePruneDispatch),
"kernel.list": noParamHandler(kernelListDispatch),
"kernel.show": paramHandler(kernelShowDispatch),
"kernel.delete": paramHandler(kernelDeleteDispatch),
"kernel.import": paramHandler(kernelImportDispatch),
"kernel.pull": paramHandler(kernelPullDispatch),
"kernel.catalog": noParamHandler(kernelCatalogDispatch),
}
// ---- Service-call adapters (kept thin; the interesting shape is up
// ---- in the `paramHandler` generic. These exist so the map entries
// ---- stay readable at a glance.)
func vmCreateDispatch(ctx context.Context, d *Daemon, p api.VMCreateParams) (api.VMShowResult, error) {
vm, err := d.vm.CreateVM(ctx, p)
return api.VMShowResult{VM: vm}, err
}
func vmCreateBeginDispatch(ctx context.Context, d *Daemon, p api.VMCreateParams) (api.VMCreateBeginResult, error) {
op, err := d.vm.BeginVMCreate(ctx, p)
return api.VMCreateBeginResult{Operation: op}, err
}
func vmCreateStatusDispatch(ctx context.Context, d *Daemon, p api.VMCreateStatusParams) (api.VMCreateStatusResult, error) {
op, err := d.vm.VMCreateStatus(ctx, p.ID)
return api.VMCreateStatusResult{Operation: op}, err
}
func vmCreateCancelDispatch(ctx context.Context, d *Daemon, p api.VMCreateStatusParams) (api.Empty, error) {
return api.Empty{}, d.vm.CancelVMCreate(ctx, p.ID)
}
func vmListDispatch(ctx context.Context, d *Daemon) (api.VMListResult, error) {
vms, err := d.store.ListVMs(ctx)
return api.VMListResult{VMs: vms}, err
}
func vmShowDispatch(ctx context.Context, d *Daemon, p api.VMRefParams) (api.VMShowResult, error) {
vm, err := d.vm.FindVM(ctx, p.IDOrName)
return api.VMShowResult{VM: vm}, err
}
func vmStartDispatch(ctx context.Context, d *Daemon, p api.VMRefParams) (api.VMShowResult, error) {
vm, err := d.vm.StartVM(ctx, p.IDOrName)
return api.VMShowResult{VM: vm}, err
}
func vmStopDispatch(ctx context.Context, d *Daemon, p api.VMRefParams) (api.VMShowResult, error) {
vm, err := d.vm.StopVM(ctx, p.IDOrName)
return api.VMShowResult{VM: vm}, err
}
func vmKillDispatch(ctx context.Context, d *Daemon, p api.VMKillParams) (api.VMShowResult, error) {
vm, err := d.vm.KillVM(ctx, p)
return api.VMShowResult{VM: vm}, err
}
func vmRestartDispatch(ctx context.Context, d *Daemon, p api.VMRefParams) (api.VMShowResult, error) {
vm, err := d.vm.RestartVM(ctx, p.IDOrName)
return api.VMShowResult{VM: vm}, err
}
func vmDeleteDispatch(ctx context.Context, d *Daemon, p api.VMRefParams) (api.VMShowResult, error) {
vm, err := d.vm.DeleteVM(ctx, p.IDOrName)
return api.VMShowResult{VM: vm}, err
}
func vmSetDispatch(ctx context.Context, d *Daemon, p api.VMSetParams) (api.VMShowResult, error) {
vm, err := d.vm.SetVM(ctx, p)
return api.VMShowResult{VM: vm}, err
}
func vmStatsDispatch(ctx context.Context, d *Daemon, p api.VMRefParams) (api.VMStatsResult, error) {
vm, stats, err := d.stats.GetVMStats(ctx, p.IDOrName)
return api.VMStatsResult{VM: vm, Stats: stats}, err
}
func vmHealthDispatch(ctx context.Context, d *Daemon, p api.VMRefParams) (api.VMHealthResult, error) {
return d.stats.HealthVM(ctx, p.IDOrName)
}
func vmPingDispatch(ctx context.Context, d *Daemon, p api.VMRefParams) (api.VMPingResult, error) {
return d.stats.PingVM(ctx, p.IDOrName)
}
func vmPortsDispatch(ctx context.Context, d *Daemon, p api.VMRefParams) (api.VMPortsResult, error) {
return d.stats.PortsVM(ctx, p.IDOrName)
}
func workspacePrepareDispatch(ctx context.Context, d *Daemon, p api.VMWorkspacePrepareParams) (api.VMWorkspacePrepareResult, error) {
ws, err := d.ws.PrepareVMWorkspace(ctx, p)
return api.VMWorkspacePrepareResult{Workspace: ws}, err
}
func workspaceExportDispatch(ctx context.Context, d *Daemon, p api.WorkspaceExportParams) (api.WorkspaceExportResult, error) {
return d.ws.ExportVMWorkspace(ctx, p)
}
func imageListDispatch(ctx context.Context, d *Daemon) (api.ImageListResult, error) {
images, err := d.store.ListImages(ctx)
return api.ImageListResult{Images: images}, err
}
func imageShowDispatch(ctx context.Context, d *Daemon, p api.ImageRefParams) (api.ImageShowResult, error) {
image, err := d.img.FindImage(ctx, p.IDOrName)
return api.ImageShowResult{Image: image}, err
}
func imageRegisterDispatch(ctx context.Context, d *Daemon, p api.ImageRegisterParams) (api.ImageShowResult, error) {
image, err := d.img.RegisterImage(ctx, p)
return api.ImageShowResult{Image: image}, err
}
func imagePromoteDispatch(ctx context.Context, d *Daemon, p api.ImageRefParams) (api.ImageShowResult, error) {
image, err := d.img.PromoteImage(ctx, p.IDOrName)
return api.ImageShowResult{Image: image}, err
}
func imageDeleteDispatch(ctx context.Context, d *Daemon, p api.ImageRefParams) (api.ImageShowResult, error) {
image, err := d.img.DeleteImage(ctx, p.IDOrName)
return api.ImageShowResult{Image: image}, err
}
func imagePullDispatch(ctx context.Context, d *Daemon, p api.ImagePullParams) (api.ImageShowResult, error) {
image, err := d.img.PullImage(ctx, p)
return api.ImageShowResult{Image: image}, err
}
func imageCachePruneDispatch(ctx context.Context, d *Daemon, p api.ImageCachePruneParams) (api.ImageCachePruneResult, error) {
return d.img.PruneOCICache(ctx, p)
}
func kernelListDispatch(ctx context.Context, d *Daemon) (api.KernelListResult, error) {
return d.img.KernelList(ctx)
}
func kernelShowDispatch(ctx context.Context, d *Daemon, p api.KernelRefParams) (api.KernelShowResult, error) {
entry, err := d.img.KernelShow(ctx, p.Name)
return api.KernelShowResult{Entry: entry}, err
}
func kernelDeleteDispatch(ctx context.Context, d *Daemon, p api.KernelRefParams) (api.Empty, error) {
return api.Empty{}, d.img.KernelDelete(ctx, p.Name)
}
func kernelImportDispatch(ctx context.Context, d *Daemon, p api.KernelImportParams) (api.KernelShowResult, error) {
entry, err := d.img.KernelImport(ctx, p)
return api.KernelShowResult{Entry: entry}, err
}
func kernelPullDispatch(ctx context.Context, d *Daemon, p api.KernelPullParams) (api.KernelShowResult, error) {
entry, err := d.img.KernelPull(ctx, p)
return api.KernelShowResult{Entry: entry}, err
}
func kernelCatalogDispatch(ctx context.Context, d *Daemon) (api.KernelCatalogResult, error) {
return d.img.KernelCatalog(ctx)
}
// ---- Special-case handlers: pre-service validation, custom error
// ---- codes, or raw rpc.NewResult encoding — things the generic
// ---- wrapper can't express.
// pingHandler is info-only: no service call, just a snapshot of
// build metadata. Raw rpc.NewResult to match the pre-refactor
// encoding; marshalResultOrError would over-wrap this.
func pingHandler(_ context.Context, d *Daemon, _ rpc.Request) rpc.Response {
info := buildinfo.Current()
result, _ := rpc.NewResult(api.PingResult{
Status: "ok",
PID: d.pid,
Version: info.Version,
Commit: info.Commit,
BuiltAt: info.BuiltAt,
})
return result
}
// shutdownHandler triggers async daemon shutdown. `d.Close` runs in
// a goroutine so the RPC response reaches the client before the
// listener closes.
func shutdownHandler(_ context.Context, d *Daemon, _ rpc.Request) rpc.Response {
go d.Close()
result, _ := rpc.NewResult(api.ShutdownResult{Status: "stopping"})
return result
}
// vmLogsHandler needs the "not_found" error code (distinct from
// "operation_failed") when FindVM misses, so the CLI can print a
// cleaner message. The generic paramHandler maps every service err
// to "operation_failed".
func vmLogsHandler(ctx context.Context, d *Daemon, req rpc.Request) rpc.Response {
params, err := rpc.DecodeParams[api.VMRefParams](req)
if err != nil {
return rpc.NewError("bad_request", err.Error())
}
vm, err := d.vm.FindVM(ctx, params.IDOrName)
if err != nil {
return rpc.NewError("not_found", err.Error())
}
return marshalResultOrError(api.VMLogsResult{LogPath: vm.Runtime.LogPath}, nil)
}
// vmSSHHandler does two pre-service validations: FindVM / TouchVM
// for "not_found", then vmAlive for "not_running". Both distinct
// error codes feed cleaner CLI output.
func vmSSHHandler(ctx context.Context, d *Daemon, req rpc.Request) rpc.Response {
params, err := rpc.DecodeParams[api.VMRefParams](req)
if err != nil {
return rpc.NewError("bad_request", err.Error())
}
vm, err := d.vm.TouchVM(ctx, params.IDOrName)
if err != nil {
return rpc.NewError("not_found", err.Error())
}
if !d.vm.vmAlive(vm) {
return rpc.NewError("not_running", fmt.Sprintf("vm %s is not running", vm.Name))
}
return marshalResultOrError(api.VMSSHResult{Name: vm.Name, GuestIP: vm.Runtime.GuestIP}, nil)
}