Replace the three hardcoded host→guest credential syncs (opencode,
claude, pi) with a generic `[[file_sync]]` config list. Default is
empty — users opt in to exactly what they want synced, with no
surprise about which tools banger "supports".
```toml
[[file_sync]]
host = "~/.local/share/opencode/auth.json"
guest = "~/.local/share/opencode/auth.json"
[[file_sync]]
host = "~/.aws" # directories are copied recursively
guest = "~/.aws"
[[file_sync]]
host = "~/bin/my-script"
guest = "~/bin/my-script"
mode = "0755" # optional; default 0600 for files
```
Semantics:
- Host `~/...` expands against the host user's $HOME. Absolute host
paths are used as-is.
- Guest must live under `~/` or `/root/...` — banger's work disk is
mounted at /root in the guest, so that's the syncable namespace.
Anything outside is rejected at config load.
- Validation at config load: reject empty paths, relative paths,
`..` traversal, `~user/...`, malformed mode strings. Errors name
the offending entry index.
- Missing host paths are a soft skip with a warn log (existing
behaviour). Other errors (read, mkdir, install) abort VM create.
- File entries: `install -o 0 -g 0 -m <mode>` (default 0600).
- Directory entries: walked in Go; each source file is installed
with its own source permissions preserved. The entry's `mode` is
ignored for directories.
Removed (all dead after this):
- `ensureOpencodeAuthOnWorkDisk`, `ensureClaudeAuthOnWorkDisk`,
`ensurePiAuthOnWorkDisk`, the shared `ensureAuthFileOnWorkDisk`,
their `warn*Skipped` helpers, `resolveHost{Opencode,Claude,Pi}AuthPath`,
and the work-disk relative-path + default display-path constants.
- The capability hook registering the three syncs now calls the
generic `runFileSync` once.
Seven tests exercising the old codepath deleted; six new tests cover
the new runFileSync (no-op on empty config, file copy, custom mode,
missing-host-skip, overwrite, recursive directory). Config-layer
test adds happy-path parsing and a case-per-shape table of invalid
entries (empty, relative host, guest outside /root, '..' traversal,
`~user`, bad mode).
README updated: replaces the "Credential sync" section with a
"File sync" section showing the new config shape.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
318 lines
9.8 KiB
Go
318 lines
9.8 KiB
Go
package daemon
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"net"
|
|
"os"
|
|
"strings"
|
|
|
|
"banger/internal/firecracker"
|
|
"banger/internal/guestconfig"
|
|
"banger/internal/model"
|
|
"banger/internal/system"
|
|
"banger/internal/vmdns"
|
|
)
|
|
|
|
type vmCapability interface {
|
|
Name() string
|
|
}
|
|
|
|
type startPreflightCapability interface {
|
|
AddStartPreflight(context.Context, *Daemon, *system.Preflight, model.VMRecord, model.Image)
|
|
}
|
|
|
|
type guestConfigCapability interface {
|
|
ContributeGuest(*guestconfig.Builder, model.VMRecord, model.Image)
|
|
}
|
|
|
|
type machineConfigCapability interface {
|
|
ContributeMachine(*firecracker.MachineConfig, model.VMRecord, model.Image)
|
|
}
|
|
|
|
type prepareHostCapability interface {
|
|
PrepareHost(context.Context, *Daemon, *model.VMRecord, model.Image) error
|
|
}
|
|
|
|
type postStartCapability interface {
|
|
PostStart(context.Context, *Daemon, model.VMRecord, model.Image) error
|
|
}
|
|
|
|
type cleanupCapability interface {
|
|
Cleanup(context.Context, *Daemon, model.VMRecord) error
|
|
}
|
|
|
|
type configChangeCapability interface {
|
|
ApplyConfigChange(context.Context, *Daemon, model.VMRecord, model.VMRecord) error
|
|
}
|
|
|
|
type doctorCapability interface {
|
|
AddDoctorChecks(context.Context, *Daemon, *system.Report)
|
|
}
|
|
|
|
func (d *Daemon) registeredCapabilities() []vmCapability {
|
|
if len(d.vmCaps) > 0 {
|
|
return d.vmCaps
|
|
}
|
|
return []vmCapability{
|
|
workDiskCapability{},
|
|
opencodeCapability{},
|
|
dnsCapability{},
|
|
natCapability{},
|
|
}
|
|
}
|
|
|
|
func (d *Daemon) addCapabilityStartPrereqs(ctx context.Context, checks *system.Preflight, vm model.VMRecord, image model.Image) {
|
|
for _, capability := range d.registeredCapabilities() {
|
|
if hook, ok := capability.(startPreflightCapability); ok {
|
|
hook.AddStartPreflight(ctx, d, checks, vm, image)
|
|
}
|
|
}
|
|
}
|
|
|
|
func (d *Daemon) contributeGuestConfig(builder *guestconfig.Builder, vm model.VMRecord, image model.Image) {
|
|
for _, capability := range d.registeredCapabilities() {
|
|
if hook, ok := capability.(guestConfigCapability); ok {
|
|
hook.ContributeGuest(builder, vm, image)
|
|
}
|
|
}
|
|
}
|
|
|
|
func (d *Daemon) contributeMachineConfig(cfg *firecracker.MachineConfig, vm model.VMRecord, image model.Image) {
|
|
for _, capability := range d.registeredCapabilities() {
|
|
if hook, ok := capability.(machineConfigCapability); ok {
|
|
hook.ContributeMachine(cfg, vm, image)
|
|
}
|
|
}
|
|
}
|
|
|
|
func (d *Daemon) prepareCapabilityHosts(ctx context.Context, vm *model.VMRecord, image model.Image) error {
|
|
prepared := make([]vmCapability, 0, len(d.registeredCapabilities()))
|
|
for _, capability := range d.registeredCapabilities() {
|
|
hook, ok := capability.(prepareHostCapability)
|
|
if !ok {
|
|
continue
|
|
}
|
|
if err := hook.PrepareHost(ctx, d, vm, image); err != nil {
|
|
d.cleanupPreparedCapabilities(context.Background(), vm, prepared)
|
|
return err
|
|
}
|
|
prepared = append(prepared, capability)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (d *Daemon) postStartCapabilities(ctx context.Context, vm model.VMRecord, image model.Image) error {
|
|
for _, capability := range d.registeredCapabilities() {
|
|
switch capability.Name() {
|
|
case "dns":
|
|
vmCreateStage(ctx, "apply_dns", "publishing vm dns record")
|
|
case "nat":
|
|
if vm.Spec.NATEnabled {
|
|
vmCreateStage(ctx, "apply_nat", "configuring nat")
|
|
}
|
|
}
|
|
if hook, ok := capability.(postStartCapability); ok {
|
|
if err := hook.PostStart(ctx, d, vm, image); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (d *Daemon) cleanupCapabilityState(ctx context.Context, vm model.VMRecord) error {
|
|
return d.cleanupPreparedCapabilities(ctx, &vm, d.registeredCapabilities())
|
|
}
|
|
|
|
func (d *Daemon) cleanupPreparedCapabilities(ctx context.Context, vm *model.VMRecord, capabilities []vmCapability) error {
|
|
var err error
|
|
for index := len(capabilities) - 1; index >= 0; index-- {
|
|
hook, ok := capabilities[index].(cleanupCapability)
|
|
if !ok {
|
|
continue
|
|
}
|
|
err = joinErr(err, hook.Cleanup(ctx, d, *vm))
|
|
}
|
|
return err
|
|
}
|
|
|
|
func (d *Daemon) applyCapabilityConfigChanges(ctx context.Context, before, after model.VMRecord) error {
|
|
for _, capability := range d.registeredCapabilities() {
|
|
if hook, ok := capability.(configChangeCapability); ok {
|
|
if err := hook.ApplyConfigChange(ctx, d, before, after); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (d *Daemon) addCapabilityDoctorChecks(ctx context.Context, report *system.Report) {
|
|
for _, capability := range d.registeredCapabilities() {
|
|
if hook, ok := capability.(doctorCapability); ok {
|
|
hook.AddDoctorChecks(ctx, d, report)
|
|
}
|
|
}
|
|
}
|
|
|
|
type workDiskCapability struct{}
|
|
|
|
func (workDiskCapability) Name() string { return "work-disk" }
|
|
|
|
func (workDiskCapability) AddStartPreflight(_ context.Context, _ *Daemon, checks *system.Preflight, vm model.VMRecord, image model.Image) {
|
|
if exists(vm.Runtime.WorkDiskPath) {
|
|
return
|
|
}
|
|
imageSeed := ""
|
|
if image.RootfsPath != "" {
|
|
imageSeed = image.WorkSeedPath
|
|
}
|
|
if exists(imageSeed) {
|
|
if info, err := os.Stat(imageSeed); err == nil && vm.Spec.WorkDiskSizeBytes > info.Size() {
|
|
checks.RequireCommand("e2fsck", toolHint("e2fsck"))
|
|
checks.RequireCommand("resize2fs", toolHint("resize2fs"))
|
|
}
|
|
return
|
|
}
|
|
for _, command := range []string{"mkfs.ext4", "mount", "umount", "cp"} {
|
|
checks.RequireCommand(command, toolHint(command))
|
|
}
|
|
}
|
|
|
|
func (workDiskCapability) ContributeGuest(builder *guestconfig.Builder, _ model.VMRecord, _ model.Image) {
|
|
builder.AddMount(guestconfig.MountSpec{
|
|
Source: "/dev/vdb",
|
|
Target: "/root",
|
|
FSType: "ext4",
|
|
Options: []string{"defaults"},
|
|
Dump: 0,
|
|
Pass: 2,
|
|
})
|
|
}
|
|
|
|
func (workDiskCapability) ContributeMachine(cfg *firecracker.MachineConfig, vm model.VMRecord, _ model.Image) {
|
|
cfg.Drives = append(cfg.Drives, firecracker.DriveConfig{
|
|
ID: "work",
|
|
Path: vm.Runtime.WorkDiskPath,
|
|
ReadOnly: false,
|
|
})
|
|
}
|
|
|
|
func (workDiskCapability) PrepareHost(ctx context.Context, d *Daemon, vm *model.VMRecord, image model.Image) error {
|
|
prep, err := d.ensureWorkDisk(ctx, vm, image)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err := d.ensureAuthorizedKeyOnWorkDisk(ctx, vm, image, prep); err != nil {
|
|
return err
|
|
}
|
|
if err := d.ensureGitIdentityOnWorkDisk(ctx, vm); err != nil {
|
|
return err
|
|
}
|
|
return d.runFileSync(ctx, vm)
|
|
}
|
|
|
|
func (workDiskCapability) AddDoctorChecks(_ context.Context, d *Daemon, report *system.Report) {
|
|
if d.store != nil && strings.TrimSpace(d.config.DefaultImageName) != "" {
|
|
if image, err := d.store.GetImageByName(context.Background(), d.config.DefaultImageName); err == nil && strings.TrimSpace(image.WorkSeedPath) != "" && exists(image.WorkSeedPath) {
|
|
checks := system.NewPreflight()
|
|
checks.RequireFile(image.WorkSeedPath, "default image work-seed", `rebuild the default image to regenerate the /root seed`)
|
|
report.AddPreflight("feature /root work disk", checks, "seeded /root work disk artifact available")
|
|
return
|
|
}
|
|
}
|
|
checks := system.NewPreflight()
|
|
for _, command := range []string{"mkfs.ext4", "mount", "umount", "cp"} {
|
|
checks.RequireCommand(command, toolHint(command))
|
|
}
|
|
report.AddPreflight("feature /root work disk", checks, "fallback /root work disk tooling available")
|
|
report.AddWarn("feature /root work disk", "default image has no work-seed artifact; new VM creates will be slower until the image is rebuilt")
|
|
}
|
|
|
|
type dnsCapability struct{}
|
|
|
|
func (dnsCapability) Name() string { return "dns" }
|
|
|
|
func (dnsCapability) PostStart(ctx context.Context, d *Daemon, vm model.VMRecord, _ model.Image) error {
|
|
return d.setDNS(ctx, vm.Name, vm.Runtime.GuestIP)
|
|
}
|
|
|
|
func (dnsCapability) Cleanup(ctx context.Context, d *Daemon, vm model.VMRecord) error {
|
|
return d.removeDNS(ctx, vm.Runtime.DNSName)
|
|
}
|
|
|
|
func (dnsCapability) AddDoctorChecks(_ context.Context, _ *Daemon, report *system.Report) {
|
|
conn, err := net.ListenPacket("udp", vmdns.DefaultListenAddr)
|
|
if err != nil {
|
|
if strings.Contains(strings.ToLower(err.Error()), "address already in use") {
|
|
report.AddWarn("feature vm dns", "listener address "+vmdns.DefaultListenAddr+" is already in use")
|
|
return
|
|
}
|
|
report.AddFail("feature vm dns", "cannot bind "+vmdns.DefaultListenAddr+": "+err.Error())
|
|
return
|
|
}
|
|
_ = conn.Close()
|
|
report.AddPass("feature vm dns", "listener can bind "+vmdns.DefaultListenAddr)
|
|
}
|
|
|
|
type natCapability struct{}
|
|
|
|
func (natCapability) Name() string { return "nat" }
|
|
|
|
func (natCapability) AddStartPreflight(ctx context.Context, d *Daemon, checks *system.Preflight, vm model.VMRecord, _ model.Image) {
|
|
if !vm.Spec.NATEnabled {
|
|
return
|
|
}
|
|
d.addNATPrereqs(ctx, checks)
|
|
}
|
|
|
|
func (natCapability) PostStart(ctx context.Context, d *Daemon, vm model.VMRecord, _ model.Image) error {
|
|
if !vm.Spec.NATEnabled {
|
|
return nil
|
|
}
|
|
return d.ensureNAT(ctx, vm, true)
|
|
}
|
|
|
|
func (natCapability) Cleanup(ctx context.Context, d *Daemon, vm model.VMRecord) error {
|
|
if !vm.Spec.NATEnabled {
|
|
return nil
|
|
}
|
|
if strings.TrimSpace(vm.Runtime.GuestIP) == "" || strings.TrimSpace(vm.Runtime.TapDevice) == "" {
|
|
if d.logger != nil {
|
|
d.logger.Debug("skipping nat cleanup without runtime network handles", append(vmLogAttrs(vm), "guest_ip", vm.Runtime.GuestIP, "tap_device", vm.Runtime.TapDevice)...)
|
|
}
|
|
return nil
|
|
}
|
|
return d.ensureNAT(ctx, vm, false)
|
|
}
|
|
|
|
func (natCapability) ApplyConfigChange(ctx context.Context, d *Daemon, before, after model.VMRecord) error {
|
|
if before.Spec.NATEnabled == after.Spec.NATEnabled {
|
|
return nil
|
|
}
|
|
if after.State != model.VMStateRunning || !system.ProcessRunning(after.Runtime.PID, after.Runtime.APISockPath) {
|
|
return nil
|
|
}
|
|
return d.ensureNAT(ctx, after, after.Spec.NATEnabled)
|
|
}
|
|
|
|
func (natCapability) AddDoctorChecks(ctx context.Context, d *Daemon, report *system.Report) {
|
|
checks := system.NewPreflight()
|
|
checks.RequireCommand("ip", toolHint("ip"))
|
|
d.addNATPrereqs(ctx, checks)
|
|
if len(checks.Problems()) > 0 {
|
|
report.Add(system.CheckStatusFail, "feature nat", checks.Problems()...)
|
|
return
|
|
}
|
|
uplink, err := d.defaultUplink(ctx)
|
|
if err != nil {
|
|
report.AddFail("feature nat", err.Error())
|
|
return
|
|
}
|
|
report.AddPass("feature nat", "iptables/sysctl available, uplink "+uplink)
|
|
}
|
|
|
|
func joinErr(current, next error) error {
|
|
return errors.Join(current, next)
|
|
}
|