First phase of splitting the daemon god-struct into focused services with explicit ownership. HostNetwork now owns everything host-networking: the TAP interface pool (initializeTapPool / ensureTapPool / acquireTap / releaseTap / createTap), bridge + socket dir setup, firecracker process primitives (find/resolve/kill/wait/ensureSocketAccess/sendCtrlAltDel), DM snapshot lifecycle, NAT rule enforcement, guest DNS server lifecycle + routing setup, and the vsock-agent readiness probe. That's 7 files whose receivers flipped from *Daemon to *HostNetwork, plus a new host_network.go that declares the struct, its hostNetworkDeps, and the factored firecracker + DNS helpers that used to live in vm.go. Daemon gives up the tapPool and vmDNS fields entirely; they're now HostNetwork's business. Construction goes through newHostNetwork in Daemon.Open with an explicit dependency bag (runner, logger, config, layout, closing). A lazy-init hostNet() helper on Daemon supports test literals that don't wire net explicitly — production always populates it eagerly. Signature tightenings where the old receiver reached into VM-service state: - ensureNAT(ctx, vm, enable) → ensureNAT(ctx, guestIP, tap, enable). Callers resolve tap from the handle cache themselves. - initializeTapPool(ctx) → initializeTapPool(usedTaps []string). Daemon.Open enumerates VMs, collects taps from handles, hands the slice in. rebuildDNS stays on *Daemon as the orchestrator — it filters by vm-alive (a VMService concern handles will move to in phase 4) then calls HostNetwork.replaceDNS with the already-filtered map. Capability hooks continue to take *Daemon; they now use it as a facade to reach services (d.net.ensureNAT, d.hostNet().*). Planned CapabilityHost interface extraction is orthogonal, left for later. Tests: dns_routing_test.go + fastpath_test.go + nat_test.go + snapshot_test.go + open_close_test.go were touched to construct HostNetwork literals where they exercise its methods directly, or route through d.hostNet() where they exercise the Daemon entry points. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
78 lines
2.3 KiB
Go
78 lines
2.3 KiB
Go
package daemon
|
|
|
|
import (
|
|
"context"
|
|
|
|
"banger/internal/hostnat"
|
|
"banger/internal/model"
|
|
"banger/internal/system"
|
|
)
|
|
|
|
type natRule = hostnat.Rule
|
|
|
|
// ensureNAT takes tap explicitly rather than reading from a handle
|
|
// cache so HostNetwork stays decoupled from VM-service state.
|
|
// Callers (vm_lifecycle) resolve the tap device from the handle cache
|
|
// themselves and pass it in.
|
|
func (n *HostNetwork) ensureNAT(ctx context.Context, guestIP, tap string, enable bool) error {
|
|
return hostnat.Ensure(ctx, n.runner, guestIP, tap, enable)
|
|
}
|
|
|
|
func (n *HostNetwork) validateNATPrereqs(ctx context.Context) (string, error) {
|
|
checks := system.NewPreflight()
|
|
checks.RequireCommand("ip", toolHint("ip"))
|
|
n.addNATPrereqs(ctx, checks)
|
|
if err := checks.Err("nat preflight failed"); err != nil {
|
|
return "", err
|
|
}
|
|
return n.defaultUplink(ctx)
|
|
}
|
|
|
|
func (n *HostNetwork) addNATPrereqs(ctx context.Context, checks *system.Preflight) {
|
|
checks.RequireCommand("iptables", toolHint("iptables"))
|
|
checks.RequireCommand("sysctl", toolHint("sysctl"))
|
|
runner := n.runner
|
|
if runner == nil {
|
|
runner = system.NewRunner()
|
|
}
|
|
out, err := runner.Run(ctx, "ip", "route", "show", "default")
|
|
if err != nil {
|
|
checks.Addf("failed to inspect the default route for NAT: %v", err)
|
|
return
|
|
}
|
|
if _, err := parseDefaultUplink(string(out)); err != nil {
|
|
checks.Addf("failed to detect the uplink interface for NAT: %v", err)
|
|
}
|
|
}
|
|
|
|
func (n *HostNetwork) defaultUplink(ctx context.Context) (string, error) {
|
|
return hostnat.DefaultUplink(ctx, n.runner)
|
|
}
|
|
|
|
func parseDefaultUplink(output string) (string, error) {
|
|
return hostnat.ParseDefaultUplink(output)
|
|
}
|
|
|
|
// natRulesForVM builds the iptables rule set for vm + tap + uplink.
|
|
// tap is passed explicitly (rather than read from a handle cache)
|
|
// because natRulesForVM has no Daemon receiver — it's usable from
|
|
// test helpers that build rule expectations without a daemon.
|
|
func natRulesForVM(vm model.VMRecord, tap, uplink string) ([]natRule, error) {
|
|
return hostnat.Rules(vm.Runtime.GuestIP, tap, uplink)
|
|
}
|
|
|
|
func natRuleArgs(action string, rule natRule) []string {
|
|
return hostnat.RuleArgs(action, rule)
|
|
}
|
|
|
|
func natAddPlan(rules []natRule) [][]string {
|
|
return hostnat.AddPlan(rules)
|
|
}
|
|
|
|
func natRemovePlan(rules []natRule) [][]string {
|
|
return hostnat.RemovePlan(rules)
|
|
}
|
|
|
|
func natRuleKey(rule natRule) string {
|
|
return hostnat.RuleKey(rule)
|
|
}
|