Phase 4 of the daemon god-struct refactor. VM lifecycle, create-op
registry, handle cache, disk provisioning, stats polling, ports
query, and the per-VM lock set all move off *Daemon onto *VMService.
Daemon keeps thin forwarders only for FindVM / TouchVM (dispatch
surface) and is otherwise out of VM lifecycle. Lazy-init via
d.vmSvc() mirrors the earlier services so test literals like
\`&Daemon{store: db, runner: r}\` still get a functional service
without spelling one out.
Three small cleanups along the way:
* preflight helpers (validateStartPrereqs / addBaseStartPrereqs
/ addBaseStartCommandPrereqs / validateWorkDiskResizePrereqs)
move with the VM methods that call them.
* cleanupRuntime / rebuildDNS move to *VMService, with
HostNetwork primitives (findFirecrackerPID, cleanupDMSnapshot,
killVMProcess, releaseTap, waitForExit, sendCtrlAltDel)
reached through s.net instead of the hostNet() facade.
* vsockAgentBinary becomes a package-level function so both
*Daemon (doctor) and *VMService (preflight) call one entry
point instead of each owning a forwarder method.
WorkspaceService's peer deps switch from eager method values to
closures — vmSvc() constructs VMService with WorkspaceService as a
peer, so resolving d.vmSvc().FindVM at construction time recursed
through workspaceSvc() → vmSvc(). Closures defer the lookup to call
time.
Pure code motion: build + unit tests green, lint clean. No RPC
surface or lock-ordering changes.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
216 lines
7.8 KiB
Go
216 lines
7.8 KiB
Go
package daemon
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"os"
|
|
"path/filepath"
|
|
"strconv"
|
|
"strings"
|
|
|
|
"banger/internal/guestconfig"
|
|
"banger/internal/guestnet"
|
|
"banger/internal/model"
|
|
"banger/internal/system"
|
|
)
|
|
|
|
type workDiskPreparation struct {
|
|
ClonedFromSeed bool
|
|
}
|
|
|
|
func (s *VMService) ensureSystemOverlay(ctx context.Context, vm *model.VMRecord) error {
|
|
if exists(vm.Runtime.SystemOverlay) {
|
|
return nil
|
|
}
|
|
_, err := s.runner.Run(ctx, "truncate", "-s", strconv.FormatInt(vm.Spec.SystemOverlaySizeByte, 10), vm.Runtime.SystemOverlay)
|
|
return err
|
|
}
|
|
|
|
// patchRootOverlay writes the per-VM config files (resolv.conf,
|
|
// hostname, hosts, sshd drop-in, network bootstrap, fstab) into the
|
|
// rootfs overlay. Reads the DM device path from the handle cache,
|
|
// which the start flow populates before calling this.
|
|
func (s *VMService) patchRootOverlay(ctx context.Context, vm model.VMRecord, image model.Image) error {
|
|
dmDev := s.vmHandles(vm.ID).DMDev
|
|
if dmDev == "" {
|
|
return fmt.Errorf("vm %q: DM device not in handle cache — start flow out of order?", vm.ID)
|
|
}
|
|
resolv := []byte(fmt.Sprintf("nameserver %s\n", s.config.DefaultDNS))
|
|
hostname := []byte(vm.Name + "\n")
|
|
hosts := []byte(fmt.Sprintf("127.0.0.1 localhost\n127.0.1.1 %s\n", vm.Name))
|
|
sshdConfig := []byte(sshdGuestConfig())
|
|
fstab, err := system.ReadDebugFSText(ctx, s.runner, dmDev, "/etc/fstab")
|
|
if err != nil {
|
|
fstab = ""
|
|
}
|
|
builder := guestconfig.NewBuilder()
|
|
builder.WriteFile("/etc/resolv.conf", resolv)
|
|
builder.WriteFile("/etc/hostname", hostname)
|
|
builder.WriteFile("/etc/hosts", hosts)
|
|
builder.WriteFile(guestnet.ConfigPath, guestnet.ConfigFile(vm.Runtime.GuestIP, s.config.BridgeIP, s.config.DefaultDNS))
|
|
builder.WriteFile(guestnet.GuestScriptPath, []byte(guestnet.BootstrapScript()))
|
|
builder.WriteFile("/etc/ssh/sshd_config.d/99-banger.conf", sshdConfig)
|
|
builder.DropMountTarget("/home")
|
|
builder.DropMountTarget("/var")
|
|
builder.AddMount(guestconfig.MountSpec{
|
|
Source: "tmpfs",
|
|
Target: "/run",
|
|
FSType: "tmpfs",
|
|
Options: []string{"defaults", "nodev", "nosuid", "mode=0755"},
|
|
Dump: 0,
|
|
Pass: 0,
|
|
})
|
|
builder.AddMount(guestconfig.MountSpec{
|
|
Source: "tmpfs",
|
|
Target: "/tmp",
|
|
FSType: "tmpfs",
|
|
Options: []string{"defaults", "nodev", "nosuid", "mode=1777"},
|
|
Dump: 0,
|
|
Pass: 0,
|
|
})
|
|
s.capHooks.contributeGuest(builder, vm, image)
|
|
builder.WriteFile("/etc/fstab", []byte(builder.RenderFSTab(fstab)))
|
|
files := builder.Files()
|
|
for _, guestPath := range builder.FilePaths() {
|
|
data := files[guestPath]
|
|
if guestPath == guestnet.GuestScriptPath {
|
|
if err := system.WriteExt4FileMode(ctx, s.runner, dmDev, guestPath, 0o755, data); err != nil {
|
|
return err
|
|
}
|
|
continue
|
|
}
|
|
if err := system.WriteExt4File(ctx, s.runner, dmDev, guestPath, data); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (s *VMService) ensureWorkDisk(ctx context.Context, vm *model.VMRecord, image model.Image) (workDiskPreparation, error) {
|
|
if exists(vm.Runtime.WorkDiskPath) {
|
|
return workDiskPreparation{}, nil
|
|
}
|
|
if exists(image.WorkSeedPath) {
|
|
vmCreateStage(ctx, "prepare_work_disk", "cloning work seed")
|
|
if err := system.CopyFilePreferClone(image.WorkSeedPath, vm.Runtime.WorkDiskPath); err != nil {
|
|
return workDiskPreparation{}, err
|
|
}
|
|
seedInfo, err := os.Stat(image.WorkSeedPath)
|
|
if err != nil {
|
|
return workDiskPreparation{}, err
|
|
}
|
|
if vm.Spec.WorkDiskSizeBytes < seedInfo.Size() {
|
|
return workDiskPreparation{}, fmt.Errorf("requested work disk size %d is smaller than seed image %d", vm.Spec.WorkDiskSizeBytes, seedInfo.Size())
|
|
}
|
|
if vm.Spec.WorkDiskSizeBytes > seedInfo.Size() {
|
|
vmCreateStage(ctx, "prepare_work_disk", "resizing work disk")
|
|
if err := system.ResizeExt4Image(ctx, s.runner, vm.Runtime.WorkDiskPath, vm.Spec.WorkDiskSizeBytes); err != nil {
|
|
return workDiskPreparation{}, err
|
|
}
|
|
}
|
|
return workDiskPreparation{ClonedFromSeed: true}, nil
|
|
}
|
|
vmCreateStage(ctx, "prepare_work_disk", "creating empty work disk")
|
|
if _, err := s.runner.Run(ctx, "truncate", "-s", strconv.FormatInt(vm.Spec.WorkDiskSizeBytes, 10), vm.Runtime.WorkDiskPath); err != nil {
|
|
return workDiskPreparation{}, err
|
|
}
|
|
if _, err := s.runner.Run(ctx, "mkfs.ext4", "-F", vm.Runtime.WorkDiskPath); err != nil {
|
|
return workDiskPreparation{}, err
|
|
}
|
|
dmDev := s.vmHandles(vm.ID).DMDev
|
|
if dmDev == "" {
|
|
return workDiskPreparation{}, fmt.Errorf("vm %q: DM device not in handle cache", vm.ID)
|
|
}
|
|
rootMount, cleanupRoot, err := system.MountTempDir(ctx, s.runner, dmDev, true)
|
|
if err != nil {
|
|
return workDiskPreparation{}, err
|
|
}
|
|
defer cleanupRoot()
|
|
workMount, cleanupWork, err := system.MountTempDir(ctx, s.runner, vm.Runtime.WorkDiskPath, false)
|
|
if err != nil {
|
|
return workDiskPreparation{}, err
|
|
}
|
|
defer cleanupWork()
|
|
vmCreateStage(ctx, "prepare_work_disk", "copying /root into work disk")
|
|
if err := system.CopyDirContents(ctx, s.runner, filepath.Join(rootMount, "root"), workMount, true); err != nil {
|
|
return workDiskPreparation{}, err
|
|
}
|
|
if err := flattenNestedWorkHome(ctx, s.runner, workMount); err != nil {
|
|
return workDiskPreparation{}, err
|
|
}
|
|
return workDiskPreparation{}, nil
|
|
}
|
|
|
|
// sshdGuestConfig is the banger-authored drop-in that lands at
|
|
// /etc/ssh/sshd_config.d/99-banger.conf inside every guest.
|
|
//
|
|
// Banger VMs are single-user root sandboxes reachable only through the
|
|
// host bridge (default 172.16.0.0/24). The drop-in sets the minimum
|
|
// needed to make that usable while keeping the posture tight enough
|
|
// that a misconfigured host bridge does not immediately hand over an
|
|
// unauthenticated root shell.
|
|
//
|
|
// Why each line is here:
|
|
//
|
|
// - PermitRootLogin prohibit-password
|
|
// The guest IS root — there's no other account. prohibit-password
|
|
// allows pubkey login and blocks password auth at the source even
|
|
// if some future config flips PasswordAuthentication on.
|
|
//
|
|
// - PubkeyAuthentication yes
|
|
// The only auth method we expect. Explicit in case a future
|
|
// Debian default or distro package flips it off.
|
|
//
|
|
// - PasswordAuthentication no
|
|
//
|
|
// - KbdInteractiveAuthentication no
|
|
// Belt-and-braces: every interactive auth path is off, not just
|
|
// the PermitRootLogin path. These are already Debian defaults but
|
|
// stating them here means the drop-in documents the intent.
|
|
//
|
|
// - AuthorizedKeysFile /root/.ssh/authorized_keys
|
|
// Pins the lookup path so the banger-written file always wins,
|
|
// regardless of distro default ($HOME/.ssh/authorized_keys) and
|
|
// regardless of any per-image weirdness.
|
|
//
|
|
// Previously this file also contained `LogLevel DEBUG3` and
|
|
// `StrictModes no`. DEBUG3 was a leftover from debugging the
|
|
// first-boot flow and flooded journald in normal use. StrictModes no
|
|
// was a workaround for perm drift on /root inside the work disk; the
|
|
// real fix — normalising /root permissions at provisioning time — is
|
|
// in ensureAuthorizedKeyOnWorkDisk / seedAuthorizedKeyOnExt4Image.
|
|
func sshdGuestConfig() string {
|
|
return strings.Join([]string{
|
|
"PermitRootLogin prohibit-password",
|
|
"PubkeyAuthentication yes",
|
|
"PasswordAuthentication no",
|
|
"KbdInteractiveAuthentication no",
|
|
"AuthorizedKeysFile /root/.ssh/authorized_keys",
|
|
"",
|
|
}, "\n")
|
|
}
|
|
|
|
// flattenNestedWorkHome is a package-level helper used by the image,
|
|
// workspace-sync, and VM-disk paths, so it takes the runner explicitly
|
|
// rather than belonging to any one service struct.
|
|
func flattenNestedWorkHome(ctx context.Context, runner system.CommandRunner, workMount string) error {
|
|
nestedHome := filepath.Join(workMount, "root")
|
|
if !exists(nestedHome) {
|
|
return nil
|
|
}
|
|
if _, err := runner.RunSudo(ctx, "chmod", "755", nestedHome); err != nil {
|
|
return err
|
|
}
|
|
entries, err := os.ReadDir(nestedHome)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
for _, entry := range entries {
|
|
sourcePath := filepath.Join(nestedHome, entry.Name())
|
|
if _, err := runner.RunSudo(ctx, "cp", "-a", sourcePath, workMount+"/"); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
_, err = runner.RunSudo(ctx, "rm", "-rf", nestedHome)
|
|
return err
|
|
}
|