Phase 4 of the daemon god-struct refactor. VM lifecycle, create-op
registry, handle cache, disk provisioning, stats polling, ports
query, and the per-VM lock set all move off *Daemon onto *VMService.
Daemon keeps thin forwarders only for FindVM / TouchVM (dispatch
surface) and is otherwise out of VM lifecycle. Lazy-init via
d.vmSvc() mirrors the earlier services so test literals like
\`&Daemon{store: db, runner: r}\` still get a functional service
without spelling one out.
Three small cleanups along the way:
* preflight helpers (validateStartPrereqs / addBaseStartPrereqs
/ addBaseStartCommandPrereqs / validateWorkDiskResizePrereqs)
move with the VM methods that call them.
* cleanupRuntime / rebuildDNS move to *VMService, with
HostNetwork primitives (findFirecrackerPID, cleanupDMSnapshot,
killVMProcess, releaseTap, waitForExit, sendCtrlAltDel)
reached through s.net instead of the hostNet() facade.
* vsockAgentBinary becomes a package-level function so both
*Daemon (doctor) and *VMService (preflight) call one entry
point instead of each owning a forwarder method.
WorkspaceService's peer deps switch from eager method values to
closures — vmSvc() constructs VMService with WorkspaceService as a
peer, so resolving d.vmSvc().FindVM at construction time recursed
through workspaceSvc() → vmSvc(). Closures defer the lookup to call
time.
Pure code motion: build + unit tests green, lint clean. No RPC
surface or lock-ordering changes.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
164 lines
4.6 KiB
Go
164 lines
4.6 KiB
Go
package daemon
|
|
|
|
import (
|
|
"context"
|
|
"crypto/tls"
|
|
"errors"
|
|
"fmt"
|
|
"io"
|
|
"net"
|
|
"net/http"
|
|
"sort"
|
|
"strconv"
|
|
"strings"
|
|
"time"
|
|
|
|
"banger/internal/api"
|
|
"banger/internal/model"
|
|
"banger/internal/vmdns"
|
|
"banger/internal/vsockagent"
|
|
)
|
|
|
|
const httpProbeTimeout = 750 * time.Millisecond
|
|
|
|
func (s *VMService) PortsVM(ctx context.Context, idOrName string) (result api.VMPortsResult, err error) {
|
|
_, err = s.withVMLockByRef(ctx, idOrName, func(vm model.VMRecord) (model.VMRecord, error) {
|
|
result.Name = vm.Name
|
|
result.DNSName = strings.TrimSpace(vm.Runtime.DNSName)
|
|
if result.DNSName == "" && strings.TrimSpace(vm.Name) != "" {
|
|
result.DNSName = vmdns.RecordName(vm.Name)
|
|
}
|
|
if !s.vmAlive(vm) {
|
|
return model.VMRecord{}, fmt.Errorf("vm %s is not running", vm.Name)
|
|
}
|
|
if strings.TrimSpace(vm.Runtime.GuestIP) == "" {
|
|
return model.VMRecord{}, errors.New("vm has no guest IP")
|
|
}
|
|
if strings.TrimSpace(vm.Runtime.VSockPath) == "" {
|
|
return model.VMRecord{}, errors.New("vm has no vsock path")
|
|
}
|
|
if vm.Runtime.VSockCID == 0 {
|
|
return model.VMRecord{}, errors.New("vm has no vsock cid")
|
|
}
|
|
if err := s.net.ensureSocketAccess(ctx, vm.Runtime.VSockPath, "firecracker vsock socket"); err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
portsCtx, cancel := context.WithTimeout(ctx, 3*time.Second)
|
|
defer cancel()
|
|
listeners, err := vsockagent.Ports(portsCtx, s.logger, vm.Runtime.VSockPath)
|
|
if err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
result.Ports = buildVMPorts(vm, listeners)
|
|
return vm, nil
|
|
})
|
|
return result, err
|
|
}
|
|
|
|
func buildVMPorts(vm model.VMRecord, listeners []vsockagent.PortListener) []api.VMPort {
|
|
endpointHost := strings.TrimSpace(vm.Runtime.DNSName)
|
|
if endpointHost == "" {
|
|
endpointHost = strings.TrimSpace(vm.Runtime.GuestIP)
|
|
}
|
|
probeHost := strings.TrimSpace(vm.Runtime.GuestIP)
|
|
ports := make([]api.VMPort, 0, len(listeners))
|
|
for _, listener := range listeners {
|
|
if listener.Port <= 0 {
|
|
continue
|
|
}
|
|
port := api.VMPort{
|
|
Proto: strings.ToLower(strings.TrimSpace(listener.Proto)),
|
|
BindAddress: strings.TrimSpace(listener.BindAddress),
|
|
Port: listener.Port,
|
|
PID: listener.PID,
|
|
Process: strings.TrimSpace(listener.Process),
|
|
Command: strings.TrimSpace(listener.Command),
|
|
Endpoint: net.JoinHostPort(endpointHost, strconv.Itoa(listener.Port)),
|
|
}
|
|
if port.Command == "" {
|
|
port.Command = port.Process
|
|
}
|
|
if port.Proto == "tcp" && probeHost != "" && endpointHost != "" {
|
|
if scheme, ok := probeWebListener(probeHost, listener.Port); ok {
|
|
port.Proto = scheme
|
|
port.Endpoint = scheme + "://" + net.JoinHostPort(endpointHost, strconv.Itoa(listener.Port)) + "/"
|
|
}
|
|
}
|
|
ports = append(ports, port)
|
|
}
|
|
sort.Slice(ports, func(i, j int) bool {
|
|
if ports[i].Proto != ports[j].Proto {
|
|
return ports[i].Proto < ports[j].Proto
|
|
}
|
|
if ports[i].Port != ports[j].Port {
|
|
return ports[i].Port < ports[j].Port
|
|
}
|
|
if ports[i].PID != ports[j].PID {
|
|
return ports[i].PID < ports[j].PID
|
|
}
|
|
if ports[i].Process != ports[j].Process {
|
|
return ports[i].Process < ports[j].Process
|
|
}
|
|
if ports[i].Command != ports[j].Command {
|
|
return ports[i].Command < ports[j].Command
|
|
}
|
|
return ports[i].BindAddress < ports[j].BindAddress
|
|
})
|
|
return dedupeVMPorts(ports)
|
|
}
|
|
|
|
func probeWebListener(guestIP string, port int) (string, bool) {
|
|
if probeHTTPScheme("https", guestIP, port) {
|
|
return "https", true
|
|
}
|
|
if probeHTTPScheme("http", guestIP, port) {
|
|
return "http", true
|
|
}
|
|
return "", false
|
|
}
|
|
|
|
func probeHTTPScheme(scheme, guestIP string, port int) bool {
|
|
if strings.TrimSpace(guestIP) == "" || port <= 0 {
|
|
return false
|
|
}
|
|
url := scheme + "://" + net.JoinHostPort(strings.TrimSpace(guestIP), strconv.Itoa(port)) + "/"
|
|
req, err := http.NewRequest(http.MethodGet, url, nil)
|
|
if err != nil {
|
|
return false
|
|
}
|
|
transport := &http.Transport{Proxy: nil}
|
|
if scheme == "https" {
|
|
transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
|
}
|
|
client := &http.Client{
|
|
Timeout: httpProbeTimeout,
|
|
CheckRedirect: func(req *http.Request, via []*http.Request) error {
|
|
return http.ErrUseLastResponse
|
|
},
|
|
Transport: transport,
|
|
}
|
|
resp, err := client.Do(req)
|
|
if err != nil {
|
|
return false
|
|
}
|
|
defer resp.Body.Close()
|
|
_, _ = io.Copy(io.Discard, io.LimitReader(resp.Body, 1))
|
|
return resp.ProtoMajor >= 1
|
|
}
|
|
|
|
func dedupeVMPorts(ports []api.VMPort) []api.VMPort {
|
|
if len(ports) < 2 {
|
|
return ports
|
|
}
|
|
deduped := make([]api.VMPort, 0, len(ports))
|
|
seen := make(map[string]struct{}, len(ports))
|
|
for _, port := range ports {
|
|
key := port.Proto + "\x00" + port.Endpoint
|
|
if _, ok := seen[key]; ok {
|
|
continue
|
|
}
|
|
seen[key] = struct{}{}
|
|
deduped = append(deduped, port)
|
|
}
|
|
return deduped
|
|
}
|