Move the supported systemd path to two services: an owner-user bangerd for orchestration and a narrow root helper for bridge/tap, NAT/resolver, dm/loop, and Firecracker ownership. This removes repeated sudo from daily vm and image flows without leaving the general daemon running as root. Add install metadata, system install/status/restart/uninstall commands, and a system-owned runtime layout. Keep user SSH/config material in the owner home, lock file_sync to the owner home, and move daemon known_hosts handling out of the old root-owned control path. Route privileged lifecycle steps through typed privilegedOps calls, harden the two systemd units, and rewrite smoke plus docs around the supported service model. Verified with make build, make test, make lint, and make smoke on the supported systemd host path.
78 lines
2.3 KiB
Go
78 lines
2.3 KiB
Go
package daemon
|
|
|
|
import (
|
|
"context"
|
|
|
|
"banger/internal/hostnat"
|
|
"banger/internal/model"
|
|
"banger/internal/system"
|
|
)
|
|
|
|
type natRule = hostnat.Rule
|
|
|
|
// ensureNAT takes tap explicitly rather than reading from a handle
|
|
// cache so HostNetwork stays decoupled from VM-service state.
|
|
// Callers (vm_lifecycle) resolve the tap device from the handle cache
|
|
// themselves and pass it in.
|
|
func (n *HostNetwork) ensureNAT(ctx context.Context, guestIP, tap string, enable bool) error {
|
|
return n.privOps().EnsureNAT(ctx, guestIP, tap, enable)
|
|
}
|
|
|
|
func (n *HostNetwork) validateNATPrereqs(ctx context.Context) (string, error) {
|
|
checks := system.NewPreflight()
|
|
checks.RequireCommand("ip", toolHint("ip"))
|
|
n.addNATPrereqs(ctx, checks)
|
|
if err := checks.Err("nat preflight failed"); err != nil {
|
|
return "", err
|
|
}
|
|
return n.defaultUplink(ctx)
|
|
}
|
|
|
|
func (n *HostNetwork) addNATPrereqs(ctx context.Context, checks *system.Preflight) {
|
|
checks.RequireCommand("iptables", toolHint("iptables"))
|
|
checks.RequireCommand("sysctl", toolHint("sysctl"))
|
|
runner := n.runner
|
|
if runner == nil {
|
|
runner = system.NewRunner()
|
|
}
|
|
out, err := runner.Run(ctx, "ip", "route", "show", "default")
|
|
if err != nil {
|
|
checks.Addf("failed to inspect the default route for NAT: %v", err)
|
|
return
|
|
}
|
|
if _, err := parseDefaultUplink(string(out)); err != nil {
|
|
checks.Addf("failed to detect the uplink interface for NAT: %v", err)
|
|
}
|
|
}
|
|
|
|
func (n *HostNetwork) defaultUplink(ctx context.Context) (string, error) {
|
|
return hostnat.DefaultUplink(ctx, n.runner)
|
|
}
|
|
|
|
func parseDefaultUplink(output string) (string, error) {
|
|
return hostnat.ParseDefaultUplink(output)
|
|
}
|
|
|
|
// natRulesForVM builds the iptables rule set for vm + tap + uplink.
|
|
// tap is passed explicitly (rather than read from a handle cache)
|
|
// because natRulesForVM has no Daemon receiver — it's usable from
|
|
// test helpers that build rule expectations without a daemon.
|
|
func natRulesForVM(vm model.VMRecord, tap, uplink string) ([]natRule, error) {
|
|
return hostnat.Rules(vm.Runtime.GuestIP, tap, uplink)
|
|
}
|
|
|
|
func natRuleArgs(action string, rule natRule) []string {
|
|
return hostnat.RuleArgs(action, rule)
|
|
}
|
|
|
|
func natAddPlan(rules []natRule) [][]string {
|
|
return hostnat.AddPlan(rules)
|
|
}
|
|
|
|
func natRemovePlan(rules []natRule) [][]string {
|
|
return hostnat.RemovePlan(rules)
|
|
}
|
|
|
|
func natRuleKey(rule natRule) string {
|
|
return hostnat.RuleKey(rule)
|
|
}
|