banger/internal/daemon/vm_disk.go
Thales Maciel 700a1e6e60
cleanup: drop pre-v0.1 migration scaffolding + legacy-behavior refs
banger hasn't shipped a public release — every "legacy", "pre-opt-in",
"previously", "migration note", "no longer" reference in the tree is
pinning against a state no real user's install has ever been in.
That scaffolding has weight: it's a coordinate system future readers
have to decode, and it keeps dead code alive.

Removed (code):
- internal/daemon/ssh_client_config.go
    - vmSSHConfigIncludeBegin / vmSSHConfigIncludeEnd constants and
      every `removeManagedBlock(existing, vm...)` call they enabled
      (legacy inline `Host *.vm` block scrub)
    - cleanupLegacySSHConfigDir (+ its caller in syncVMSSHClientConfig)
      — wiped a pre-opt-in sibling file under $ConfigDir/ssh
    - sameDirOrParent + resolvePathForComparison — only ever used
      by cleanupLegacySSHConfigDir
    - the "also check legacy marker" fallback in
      UserSSHIncludeInstalled / UninstallUserSSHInclude
- internal/store/migrations.go
    - migrateDropDeadImageColumns (migration 2) + its slice entry
    - dropColumnIfExists (orphaned after the above)
    - addColumnIfMissing + the whole "columns added across the pre-
      versioning lifetime" block at the end of migrateBaseline —
      subsumed into the baseline CREATE TABLE
    - `packages_path TEXT` column on the images table (the
      throwaway migration 2 dropped it, but there was never any
      reader)
- internal/daemon/vm.go
    - vmDNSRecordName local wrapper — was justified as "avoid
      pulling vmdns into every file"; three of four callers already
      imported vmdns directly, so inline the one stray call
- internal/cli/cli_test.go
    - TestLegacyRemovedCommandIsRejected (`tui` subcommand never
      shipped)

Removed / simplified (tests):
- ssh_client_config_test.go: dropped TestSameDirOrParentHandlesSymlinks,
  TestSyncVMSSHClientConfigPreservesUserKeyInLegacyDir,
  TestSyncVMSSHClientConfigNarrowsCleanupToLegacyFile,
  TestSyncVMSSHClientConfigLeavesUnexpectedLegacyContents,
  TestInstallUserSSHIncludeMigratesLegacyInlineBlock, plus the
  "legacy posture" regression strings in the remaining happy-path
  test; TestUninstallUserSSHIncludeRemovesBothMarkerBlocks collapsed
  to a single-block test
- migrations_test.go: dropped TestMigrateDropDeadImageColumns_AcrossInstallPaths,
  TestDropColumnIfExistsIsIdempotent; TestOpenReadOnlyDoesNotRunMigrations
  simplified to test against the baseline marker

Removed (docs):
- README.md "**Migration note.**" blockquote about the SSH-key path move
- docs/advanced.md parenthetical "(the old behaviour)"

Reworded (comments):
- Dropped "Previously this file also contained LogLevel DEBUG3..."
  history from vm_disk.go's sshdGuestConfig doc
- Dropped "Call sites that previously read vm.Runtime.{PID,...}"
  from vm_handles.go; now documents the current contract
- Dropped "Pre-v0.1 the defaults are" scaffolding in doctor_test.go
- Dropped "no longer does its own git inspection" phrasing in vm_run.go
- Dropped the "(also cleans up legacy inline block from pre-opt-in
  builds)" aside on the `ssh-config` CLI docstring
- Renamed test var `legacyKey` → `existingKey` in vm_test.go; its
  purpose was "pre-existing authorized_keys line," not banger-legacy

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-23 13:56:32 -03:00

209 lines
7.4 KiB
Go

package daemon
import (
"context"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"banger/internal/guestconfig"
"banger/internal/guestnet"
"banger/internal/model"
"banger/internal/system"
)
type workDiskPreparation struct {
ClonedFromSeed bool
}
func (s *VMService) ensureSystemOverlay(ctx context.Context, vm *model.VMRecord) error {
if exists(vm.Runtime.SystemOverlay) {
return nil
}
_, err := s.runner.Run(ctx, "truncate", "-s", strconv.FormatInt(vm.Spec.SystemOverlaySizeByte, 10), vm.Runtime.SystemOverlay)
return err
}
// patchRootOverlay writes the per-VM config files (resolv.conf,
// hostname, hosts, sshd drop-in, network bootstrap, fstab) into the
// rootfs overlay. Reads the DM device path from the handle cache,
// which the start flow populates before calling this.
func (s *VMService) patchRootOverlay(ctx context.Context, vm model.VMRecord, image model.Image) error {
dmDev := s.vmHandles(vm.ID).DMDev
if dmDev == "" {
return fmt.Errorf("vm %q: DM device not in handle cache — start flow out of order?", vm.ID)
}
resolv := []byte(fmt.Sprintf("nameserver %s\n", s.config.DefaultDNS))
hostname := []byte(vm.Name + "\n")
hosts := []byte(fmt.Sprintf("127.0.0.1 localhost\n127.0.1.1 %s\n", vm.Name))
sshdConfig := []byte(sshdGuestConfig())
fstab, err := system.ReadDebugFSText(ctx, s.runner, dmDev, "/etc/fstab")
if err != nil {
fstab = ""
}
builder := guestconfig.NewBuilder()
builder.WriteFile("/etc/resolv.conf", resolv)
builder.WriteFile("/etc/hostname", hostname)
builder.WriteFile("/etc/hosts", hosts)
builder.WriteFile(guestnet.ConfigPath, guestnet.ConfigFile(vm.Runtime.GuestIP, s.config.BridgeIP, s.config.DefaultDNS))
builder.WriteFile(guestnet.GuestScriptPath, []byte(guestnet.BootstrapScript()))
builder.WriteFile("/etc/ssh/sshd_config.d/99-banger.conf", sshdConfig)
builder.DropMountTarget("/home")
builder.DropMountTarget("/var")
builder.AddMount(guestconfig.MountSpec{
Source: "tmpfs",
Target: "/run",
FSType: "tmpfs",
Options: []string{"defaults", "nodev", "nosuid", "mode=0755"},
Dump: 0,
Pass: 0,
})
builder.AddMount(guestconfig.MountSpec{
Source: "tmpfs",
Target: "/tmp",
FSType: "tmpfs",
Options: []string{"defaults", "nodev", "nosuid", "mode=1777"},
Dump: 0,
Pass: 0,
})
s.capHooks.contributeGuest(builder, vm, image)
builder.WriteFile("/etc/fstab", []byte(builder.RenderFSTab(fstab)))
files := builder.Files()
for _, guestPath := range builder.FilePaths() {
data := files[guestPath]
if guestPath == guestnet.GuestScriptPath {
if err := system.WriteExt4FileMode(ctx, s.runner, dmDev, guestPath, 0o755, data); err != nil {
return err
}
continue
}
if err := system.WriteExt4File(ctx, s.runner, dmDev, guestPath, data); err != nil {
return err
}
}
return nil
}
func (s *VMService) ensureWorkDisk(ctx context.Context, vm *model.VMRecord, image model.Image) (workDiskPreparation, error) {
if exists(vm.Runtime.WorkDiskPath) {
return workDiskPreparation{}, nil
}
if exists(image.WorkSeedPath) {
vmCreateStage(ctx, "prepare_work_disk", "cloning work seed")
if err := system.CopyFilePreferClone(image.WorkSeedPath, vm.Runtime.WorkDiskPath); err != nil {
return workDiskPreparation{}, err
}
seedInfo, err := os.Stat(image.WorkSeedPath)
if err != nil {
return workDiskPreparation{}, err
}
if vm.Spec.WorkDiskSizeBytes < seedInfo.Size() {
return workDiskPreparation{}, fmt.Errorf("requested work disk size %d is smaller than seed image %d", vm.Spec.WorkDiskSizeBytes, seedInfo.Size())
}
if vm.Spec.WorkDiskSizeBytes > seedInfo.Size() {
vmCreateStage(ctx, "prepare_work_disk", "resizing work disk")
if err := system.ResizeExt4Image(ctx, s.runner, vm.Runtime.WorkDiskPath, vm.Spec.WorkDiskSizeBytes); err != nil {
return workDiskPreparation{}, err
}
}
return workDiskPreparation{ClonedFromSeed: true}, nil
}
vmCreateStage(ctx, "prepare_work_disk", "creating empty work disk")
if _, err := s.runner.Run(ctx, "truncate", "-s", strconv.FormatInt(vm.Spec.WorkDiskSizeBytes, 10), vm.Runtime.WorkDiskPath); err != nil {
return workDiskPreparation{}, err
}
if _, err := s.runner.Run(ctx, "mkfs.ext4", "-F", vm.Runtime.WorkDiskPath); err != nil {
return workDiskPreparation{}, err
}
dmDev := s.vmHandles(vm.ID).DMDev
if dmDev == "" {
return workDiskPreparation{}, fmt.Errorf("vm %q: DM device not in handle cache", vm.ID)
}
rootMount, cleanupRoot, err := system.MountTempDir(ctx, s.runner, dmDev, true)
if err != nil {
return workDiskPreparation{}, err
}
defer cleanupRoot()
workMount, cleanupWork, err := system.MountTempDir(ctx, s.runner, vm.Runtime.WorkDiskPath, false)
if err != nil {
return workDiskPreparation{}, err
}
defer cleanupWork()
vmCreateStage(ctx, "prepare_work_disk", "copying /root into work disk")
if err := system.CopyDirContents(ctx, s.runner, filepath.Join(rootMount, "root"), workMount, true); err != nil {
return workDiskPreparation{}, err
}
if err := flattenNestedWorkHome(ctx, s.runner, workMount); err != nil {
return workDiskPreparation{}, err
}
return workDiskPreparation{}, nil
}
// sshdGuestConfig is the banger-authored drop-in that lands at
// /etc/ssh/sshd_config.d/99-banger.conf inside every guest.
//
// Banger VMs are single-user root sandboxes reachable only through the
// host bridge (default 172.16.0.0/24). The drop-in sets the minimum
// needed to make that usable while keeping the posture tight enough
// that a misconfigured host bridge does not immediately hand over an
// unauthenticated root shell.
//
// Why each line is here:
//
// - PermitRootLogin prohibit-password
// The guest IS root — there's no other account. prohibit-password
// allows pubkey login and blocks password auth at the source even
// if some future config flips PasswordAuthentication on.
//
// - PubkeyAuthentication yes
// The only auth method we expect. Explicit in case a future
// Debian default or distro package flips it off.
//
// - PasswordAuthentication no
//
// - KbdInteractiveAuthentication no
// Belt-and-braces: every interactive auth path is off, not just
// the PermitRootLogin path. These are already Debian defaults but
// stating them here means the drop-in documents the intent.
//
// - AuthorizedKeysFile /root/.ssh/authorized_keys
// Pins the lookup path so the banger-written file always wins,
// regardless of distro default ($HOME/.ssh/authorized_keys) and
// regardless of any per-image weirdness.
func sshdGuestConfig() string {
return strings.Join([]string{
"PermitRootLogin prohibit-password",
"PubkeyAuthentication yes",
"PasswordAuthentication no",
"KbdInteractiveAuthentication no",
"AuthorizedKeysFile /root/.ssh/authorized_keys",
"",
}, "\n")
}
// flattenNestedWorkHome is a package-level helper used by the image,
// workspace-sync, and VM-disk paths, so it takes the runner explicitly
// rather than belonging to any one service struct.
func flattenNestedWorkHome(ctx context.Context, runner system.CommandRunner, workMount string) error {
nestedHome := filepath.Join(workMount, "root")
if !exists(nestedHome) {
return nil
}
if _, err := runner.RunSudo(ctx, "chmod", "755", nestedHome); err != nil {
return err
}
entries, err := os.ReadDir(nestedHome)
if err != nil {
return err
}
for _, entry := range entries {
sourcePath := filepath.Join(nestedHome, entry.Name())
if _, err := runner.RunSudo(ctx, "cp", "-a", sourcePath, workMount+"/"); err != nil {
return err
}
}
_, err = runner.RunSudo(ctx, "rm", "-rf", nestedHome)
return err
}