Closes commit 3 of the god-service decomposition. VMService still
owned 45+ methods after the startVMLocked extraction and RPC table
landed in commits 1 and 2. Stats / ports / health / vsock-ping sit
in a corner of that surface that doesn't share any state with
lifecycle orchestration — nothing about "what's this VM's CPU
doing" belongs in the same service as Create/Start/Stop/Delete/Set.
New StatsService owns:
- GetVMStats / getVMStatsLocked / collectStats (stats collection)
- HealthVM / PingVM (vsock-agent health probe)
- PortsVM + buildVMPorts + probeWebListener + probeHTTPScheme +
dedupeVMPorts (listening-port enumeration)
- pollStats (background ticker refresh)
- stopStaleVMs (auto-stop sweep past config.AutoStopStaleAfter)
The three VMService touch-points stats genuinely needs — vmAlive,
vmHandles, the per-VM lock helpers, plus cleanupRuntime for the
stale-sweep tear-down — come in as function-typed closures, not a
*VMService pointer. StatsService has no back-reference to its
sibling. Mirrors the dependency-struct pattern WorkspaceService
already uses.
Wiring: d.stats is populated in wireServices AFTER d.vm (closures
must see a non-nil d.vm at call time). Dispatch table's four
entries (vm.stats / vm.health / vm.ping / vm.ports) now resolve
through d.stats. Background loop's pollStats / stopStaleVMs
tickers do the same. Dispatch surface from the RPC client's
perspective is byte-identical.
After this commit:
- vm_stats.go and ports.go are deleted; their content (plus the
stats-specific fields) lives in stats_service.go.
- VMService loses 12 methods. It's still the biggest service
(~30 methods, all lifecycle-supporting: handle cache, disk
provisioning, preflight, create-ops registry, lock helpers,
the lifecycle verbs themselves) but it's finally one coherent
concern instead of five.
Tests:
- TestWireServicesInstantiatesStatsService — pins that the
wiring order puts d.stats non-nil + its five closures all
populated. Prevents a silent background-loop regression.
- All existing tests that called d.vm.HealthVM / d.vm.PingVM /
d.vm.PortsVM / d.vm.collectStats were re-pointed at d.stats.
Smoke: all 21 scenarios green, including vm ports (exercises the
new PortsVM entry end-to-end) and the long-running workspace
scenarios (exercise the background stats poller implicitly).
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
299 lines
12 KiB
Go
299 lines
12 KiB
Go
package daemon
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
|
|
"banger/internal/api"
|
|
"banger/internal/buildinfo"
|
|
"banger/internal/rpc"
|
|
)
|
|
|
|
// handler is the signature every RPC method dispatches through. Keeps
|
|
// Daemon.dispatch a one-liner — lookup + invoke — instead of the old
|
|
// ~240-line `switch`. Handlers close over a `*Daemon` parameter at
|
|
// call time (passed by the driver) rather than baked into the map,
|
|
// so tests that stand up a *Daemon with custom wiring re-use the
|
|
// same table without re-registering anything.
|
|
type handler func(ctx context.Context, d *Daemon, req rpc.Request) rpc.Response
|
|
|
|
// paramHandler wraps the common "decode params of type P, call
|
|
// service returning (R, error), wrap R" flow that 28 of 34 methods
|
|
// follow. Compile-time type-safe — no reflection. P and R are
|
|
// deduced from the function literal passed in, so per-handler
|
|
// registration reads as "what's the RPC shape + what's the service
|
|
// call" and nothing else.
|
|
func paramHandler[P any, R any](call func(ctx context.Context, d *Daemon, p P) (R, error)) handler {
|
|
return func(ctx context.Context, d *Daemon, req rpc.Request) rpc.Response {
|
|
p, err := rpc.DecodeParams[P](req)
|
|
if err != nil {
|
|
return rpc.NewError("bad_request", err.Error())
|
|
}
|
|
result, err := call(ctx, d, p)
|
|
return marshalResultOrError(result, err)
|
|
}
|
|
}
|
|
|
|
// noParamHandler is the decode-free variant for RPC methods that
|
|
// take no params (ping, shutdown, *.list, kernel.catalog).
|
|
func noParamHandler[R any](call func(ctx context.Context, d *Daemon) (R, error)) handler {
|
|
return func(ctx context.Context, d *Daemon, _ rpc.Request) rpc.Response {
|
|
result, err := call(ctx, d)
|
|
return marshalResultOrError(result, err)
|
|
}
|
|
}
|
|
|
|
// rpcHandlers maps every supported method name to its handler. Adding
|
|
// or removing a method is a single-line diff here — unlike the old
|
|
// switch, there's no four-line decode/call/wrap boilerplate to copy.
|
|
// The four special-case handlers (vm.logs, vm.ssh, ping, shutdown)
|
|
// live below the map; they need pre-service validation or raw result
|
|
// encoding that the generic wrapper can't express.
|
|
var rpcHandlers = map[string]handler{
|
|
"ping": pingHandler,
|
|
"shutdown": shutdownHandler,
|
|
|
|
"vm.create": paramHandler(vmCreateDispatch),
|
|
"vm.create.begin": paramHandler(vmCreateBeginDispatch),
|
|
"vm.create.status": paramHandler(vmCreateStatusDispatch),
|
|
"vm.create.cancel": paramHandler(vmCreateCancelDispatch),
|
|
"vm.list": noParamHandler(vmListDispatch),
|
|
"vm.show": paramHandler(vmShowDispatch),
|
|
"vm.start": paramHandler(vmStartDispatch),
|
|
"vm.stop": paramHandler(vmStopDispatch),
|
|
"vm.kill": paramHandler(vmKillDispatch),
|
|
"vm.restart": paramHandler(vmRestartDispatch),
|
|
"vm.delete": paramHandler(vmDeleteDispatch),
|
|
"vm.set": paramHandler(vmSetDispatch),
|
|
"vm.stats": paramHandler(vmStatsDispatch),
|
|
"vm.logs": vmLogsHandler,
|
|
"vm.ssh": vmSSHHandler,
|
|
"vm.health": paramHandler(vmHealthDispatch),
|
|
"vm.ping": paramHandler(vmPingDispatch),
|
|
"vm.ports": paramHandler(vmPortsDispatch),
|
|
|
|
"vm.workspace.prepare": paramHandler(workspacePrepareDispatch),
|
|
"vm.workspace.export": paramHandler(workspaceExportDispatch),
|
|
|
|
"image.list": noParamHandler(imageListDispatch),
|
|
"image.show": paramHandler(imageShowDispatch),
|
|
"image.register": paramHandler(imageRegisterDispatch),
|
|
"image.promote": paramHandler(imagePromoteDispatch),
|
|
"image.delete": paramHandler(imageDeleteDispatch),
|
|
"image.pull": paramHandler(imagePullDispatch),
|
|
|
|
"kernel.list": noParamHandler(kernelListDispatch),
|
|
"kernel.show": paramHandler(kernelShowDispatch),
|
|
"kernel.delete": paramHandler(kernelDeleteDispatch),
|
|
"kernel.import": paramHandler(kernelImportDispatch),
|
|
"kernel.pull": paramHandler(kernelPullDispatch),
|
|
"kernel.catalog": noParamHandler(kernelCatalogDispatch),
|
|
}
|
|
|
|
// ---- Service-call adapters (kept thin; the interesting shape is up
|
|
// ---- in the `paramHandler` generic. These exist so the map entries
|
|
// ---- stay readable at a glance.)
|
|
|
|
func vmCreateDispatch(ctx context.Context, d *Daemon, p api.VMCreateParams) (api.VMShowResult, error) {
|
|
vm, err := d.vm.CreateVM(ctx, p)
|
|
return api.VMShowResult{VM: vm}, err
|
|
}
|
|
|
|
func vmCreateBeginDispatch(ctx context.Context, d *Daemon, p api.VMCreateParams) (api.VMCreateBeginResult, error) {
|
|
op, err := d.vm.BeginVMCreate(ctx, p)
|
|
return api.VMCreateBeginResult{Operation: op}, err
|
|
}
|
|
|
|
func vmCreateStatusDispatch(ctx context.Context, d *Daemon, p api.VMCreateStatusParams) (api.VMCreateStatusResult, error) {
|
|
op, err := d.vm.VMCreateStatus(ctx, p.ID)
|
|
return api.VMCreateStatusResult{Operation: op}, err
|
|
}
|
|
|
|
func vmCreateCancelDispatch(ctx context.Context, d *Daemon, p api.VMCreateStatusParams) (api.Empty, error) {
|
|
return api.Empty{}, d.vm.CancelVMCreate(ctx, p.ID)
|
|
}
|
|
|
|
func vmListDispatch(ctx context.Context, d *Daemon) (api.VMListResult, error) {
|
|
vms, err := d.store.ListVMs(ctx)
|
|
return api.VMListResult{VMs: vms}, err
|
|
}
|
|
|
|
func vmShowDispatch(ctx context.Context, d *Daemon, p api.VMRefParams) (api.VMShowResult, error) {
|
|
vm, err := d.vm.FindVM(ctx, p.IDOrName)
|
|
return api.VMShowResult{VM: vm}, err
|
|
}
|
|
|
|
func vmStartDispatch(ctx context.Context, d *Daemon, p api.VMRefParams) (api.VMShowResult, error) {
|
|
vm, err := d.vm.StartVM(ctx, p.IDOrName)
|
|
return api.VMShowResult{VM: vm}, err
|
|
}
|
|
|
|
func vmStopDispatch(ctx context.Context, d *Daemon, p api.VMRefParams) (api.VMShowResult, error) {
|
|
vm, err := d.vm.StopVM(ctx, p.IDOrName)
|
|
return api.VMShowResult{VM: vm}, err
|
|
}
|
|
|
|
func vmKillDispatch(ctx context.Context, d *Daemon, p api.VMKillParams) (api.VMShowResult, error) {
|
|
vm, err := d.vm.KillVM(ctx, p)
|
|
return api.VMShowResult{VM: vm}, err
|
|
}
|
|
|
|
func vmRestartDispatch(ctx context.Context, d *Daemon, p api.VMRefParams) (api.VMShowResult, error) {
|
|
vm, err := d.vm.RestartVM(ctx, p.IDOrName)
|
|
return api.VMShowResult{VM: vm}, err
|
|
}
|
|
|
|
func vmDeleteDispatch(ctx context.Context, d *Daemon, p api.VMRefParams) (api.VMShowResult, error) {
|
|
vm, err := d.vm.DeleteVM(ctx, p.IDOrName)
|
|
return api.VMShowResult{VM: vm}, err
|
|
}
|
|
|
|
func vmSetDispatch(ctx context.Context, d *Daemon, p api.VMSetParams) (api.VMShowResult, error) {
|
|
vm, err := d.vm.SetVM(ctx, p)
|
|
return api.VMShowResult{VM: vm}, err
|
|
}
|
|
|
|
func vmStatsDispatch(ctx context.Context, d *Daemon, p api.VMRefParams) (api.VMStatsResult, error) {
|
|
vm, stats, err := d.stats.GetVMStats(ctx, p.IDOrName)
|
|
return api.VMStatsResult{VM: vm, Stats: stats}, err
|
|
}
|
|
|
|
func vmHealthDispatch(ctx context.Context, d *Daemon, p api.VMRefParams) (api.VMHealthResult, error) {
|
|
return d.stats.HealthVM(ctx, p.IDOrName)
|
|
}
|
|
|
|
func vmPingDispatch(ctx context.Context, d *Daemon, p api.VMRefParams) (api.VMPingResult, error) {
|
|
return d.stats.PingVM(ctx, p.IDOrName)
|
|
}
|
|
|
|
func vmPortsDispatch(ctx context.Context, d *Daemon, p api.VMRefParams) (api.VMPortsResult, error) {
|
|
return d.stats.PortsVM(ctx, p.IDOrName)
|
|
}
|
|
|
|
func workspacePrepareDispatch(ctx context.Context, d *Daemon, p api.VMWorkspacePrepareParams) (api.VMWorkspacePrepareResult, error) {
|
|
ws, err := d.ws.PrepareVMWorkspace(ctx, p)
|
|
return api.VMWorkspacePrepareResult{Workspace: ws}, err
|
|
}
|
|
|
|
func workspaceExportDispatch(ctx context.Context, d *Daemon, p api.WorkspaceExportParams) (api.WorkspaceExportResult, error) {
|
|
return d.ws.ExportVMWorkspace(ctx, p)
|
|
}
|
|
|
|
func imageListDispatch(ctx context.Context, d *Daemon) (api.ImageListResult, error) {
|
|
images, err := d.store.ListImages(ctx)
|
|
return api.ImageListResult{Images: images}, err
|
|
}
|
|
|
|
func imageShowDispatch(ctx context.Context, d *Daemon, p api.ImageRefParams) (api.ImageShowResult, error) {
|
|
image, err := d.img.FindImage(ctx, p.IDOrName)
|
|
return api.ImageShowResult{Image: image}, err
|
|
}
|
|
|
|
func imageRegisterDispatch(ctx context.Context, d *Daemon, p api.ImageRegisterParams) (api.ImageShowResult, error) {
|
|
image, err := d.img.RegisterImage(ctx, p)
|
|
return api.ImageShowResult{Image: image}, err
|
|
}
|
|
|
|
func imagePromoteDispatch(ctx context.Context, d *Daemon, p api.ImageRefParams) (api.ImageShowResult, error) {
|
|
image, err := d.img.PromoteImage(ctx, p.IDOrName)
|
|
return api.ImageShowResult{Image: image}, err
|
|
}
|
|
|
|
func imageDeleteDispatch(ctx context.Context, d *Daemon, p api.ImageRefParams) (api.ImageShowResult, error) {
|
|
image, err := d.img.DeleteImage(ctx, p.IDOrName)
|
|
return api.ImageShowResult{Image: image}, err
|
|
}
|
|
|
|
func imagePullDispatch(ctx context.Context, d *Daemon, p api.ImagePullParams) (api.ImageShowResult, error) {
|
|
image, err := d.img.PullImage(ctx, p)
|
|
return api.ImageShowResult{Image: image}, err
|
|
}
|
|
|
|
func kernelListDispatch(ctx context.Context, d *Daemon) (api.KernelListResult, error) {
|
|
return d.img.KernelList(ctx)
|
|
}
|
|
|
|
func kernelShowDispatch(ctx context.Context, d *Daemon, p api.KernelRefParams) (api.KernelShowResult, error) {
|
|
entry, err := d.img.KernelShow(ctx, p.Name)
|
|
return api.KernelShowResult{Entry: entry}, err
|
|
}
|
|
|
|
func kernelDeleteDispatch(ctx context.Context, d *Daemon, p api.KernelRefParams) (api.Empty, error) {
|
|
return api.Empty{}, d.img.KernelDelete(ctx, p.Name)
|
|
}
|
|
|
|
func kernelImportDispatch(ctx context.Context, d *Daemon, p api.KernelImportParams) (api.KernelShowResult, error) {
|
|
entry, err := d.img.KernelImport(ctx, p)
|
|
return api.KernelShowResult{Entry: entry}, err
|
|
}
|
|
|
|
func kernelPullDispatch(ctx context.Context, d *Daemon, p api.KernelPullParams) (api.KernelShowResult, error) {
|
|
entry, err := d.img.KernelPull(ctx, p)
|
|
return api.KernelShowResult{Entry: entry}, err
|
|
}
|
|
|
|
func kernelCatalogDispatch(ctx context.Context, d *Daemon) (api.KernelCatalogResult, error) {
|
|
return d.img.KernelCatalog(ctx)
|
|
}
|
|
|
|
// ---- Special-case handlers: pre-service validation, custom error
|
|
// ---- codes, or raw rpc.NewResult encoding — things the generic
|
|
// ---- wrapper can't express.
|
|
|
|
// pingHandler is info-only: no service call, just a snapshot of
|
|
// build metadata. Raw rpc.NewResult to match the pre-refactor
|
|
// encoding; marshalResultOrError would over-wrap this.
|
|
func pingHandler(_ context.Context, d *Daemon, _ rpc.Request) rpc.Response {
|
|
info := buildinfo.Current()
|
|
result, _ := rpc.NewResult(api.PingResult{
|
|
Status: "ok",
|
|
PID: d.pid,
|
|
Version: info.Version,
|
|
Commit: info.Commit,
|
|
BuiltAt: info.BuiltAt,
|
|
})
|
|
return result
|
|
}
|
|
|
|
// shutdownHandler triggers async daemon shutdown. `d.Close` runs in
|
|
// a goroutine so the RPC response reaches the client before the
|
|
// listener closes.
|
|
func shutdownHandler(_ context.Context, d *Daemon, _ rpc.Request) rpc.Response {
|
|
go d.Close()
|
|
result, _ := rpc.NewResult(api.ShutdownResult{Status: "stopping"})
|
|
return result
|
|
}
|
|
|
|
// vmLogsHandler needs the "not_found" error code (distinct from
|
|
// "operation_failed") when FindVM misses, so the CLI can print a
|
|
// cleaner message. The generic paramHandler maps every service err
|
|
// to "operation_failed".
|
|
func vmLogsHandler(ctx context.Context, d *Daemon, req rpc.Request) rpc.Response {
|
|
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
|
if err != nil {
|
|
return rpc.NewError("bad_request", err.Error())
|
|
}
|
|
vm, err := d.vm.FindVM(ctx, params.IDOrName)
|
|
if err != nil {
|
|
return rpc.NewError("not_found", err.Error())
|
|
}
|
|
return marshalResultOrError(api.VMLogsResult{LogPath: vm.Runtime.LogPath}, nil)
|
|
}
|
|
|
|
// vmSSHHandler does two pre-service validations: FindVM / TouchVM
|
|
// for "not_found", then vmAlive for "not_running". Both distinct
|
|
// error codes feed cleaner CLI output.
|
|
func vmSSHHandler(ctx context.Context, d *Daemon, req rpc.Request) rpc.Response {
|
|
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
|
if err != nil {
|
|
return rpc.NewError("bad_request", err.Error())
|
|
}
|
|
vm, err := d.vm.TouchVM(ctx, params.IDOrName)
|
|
if err != nil {
|
|
return rpc.NewError("not_found", err.Error())
|
|
}
|
|
if !d.vm.vmAlive(vm) {
|
|
return rpc.NewError("not_running", fmt.Sprintf("vm %s is not running", vm.Name))
|
|
}
|
|
return marshalResultOrError(api.VMSSHResult{Name: vm.Name, GuestIP: vm.Runtime.GuestIP}, nil)
|
|
}
|