Factor the service + capability wiring out of Daemon.Open() into
wireServices(d), an idempotent helper that constructs HostNetwork,
ImageService, WorkspaceService, and VMService from whatever
infrastructure (runner, store, config, layout, logger, closing) is
already set on d. Open() calls it once after filling the composition
root; tests that build &Daemon{...} literals call it to get a working
service graph, preinstalling stubs on the fields they want to fake.
Drops the four lazy-init getters on *Daemon — d.hostNet(),
d.imageSvc(), d.workspaceSvc(), d.vmSvc() — whose sole purpose was
keeping test literals working. Every production call site now reads
d.net / d.img / d.ws / d.vm directly; the services are guaranteed
non-nil once Open returns. No behavior change.
Mechanical: all existing `d.xxxSvc()` calls (production + tests)
rewritten to field access; each `d := &Daemon{...}` in tests gets a
trailing wireServices(d) so the literal + wiring are side-by-side.
Tests that override a pre-built service (e.g. d.img = &ImageService{
bundleFetch: stub}) now set the override before wireServices so the
replacement propagates into VMService's peer pointer.
Also nil-guards HostNetwork.stopVMDNS and d.store in Close() so
partially-initialised daemons (pre-reconcile open failure) still
tear down cleanly — same contract the old lazy getters provided.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2035 lines
58 KiB
Go
2035 lines
58 KiB
Go
package daemon
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"crypto/rand"
|
|
"crypto/rsa"
|
|
"crypto/tls"
|
|
"crypto/x509"
|
|
"encoding/pem"
|
|
"errors"
|
|
"fmt"
|
|
"math/big"
|
|
"net"
|
|
"net/http"
|
|
"os"
|
|
"os/exec"
|
|
"path/filepath"
|
|
"strconv"
|
|
"strings"
|
|
"testing"
|
|
"time"
|
|
|
|
"banger/internal/api"
|
|
"banger/internal/model"
|
|
"banger/internal/paths"
|
|
"banger/internal/store"
|
|
"banger/internal/vmdns"
|
|
"banger/internal/vsockagent"
|
|
)
|
|
|
|
func TestFindVMPrefixResolution(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
ctx := context.Background()
|
|
db := openDaemonStore(t)
|
|
d := &Daemon{store: db}
|
|
wireServices(d)
|
|
|
|
for _, vm := range []model.VMRecord{
|
|
testVM("alpha", "image-alpha", "172.16.0.2"),
|
|
testVM("alpine", "image-alpha", "172.16.0.3"),
|
|
testVM("bravo", "image-alpha", "172.16.0.4"),
|
|
} {
|
|
upsertDaemonVM(t, ctx, db, vm)
|
|
}
|
|
|
|
vm, err := d.FindVM(ctx, "alpha")
|
|
if err != nil || vm.Name != "alpha" {
|
|
t.Fatalf("FindVM(alpha) = %+v, %v", vm, err)
|
|
}
|
|
|
|
vm, err = d.FindVM(ctx, "br")
|
|
if err != nil || vm.Name != "bravo" {
|
|
t.Fatalf("FindVM(br) = %+v, %v", vm, err)
|
|
}
|
|
|
|
_, err = d.FindVM(ctx, "al")
|
|
if err == nil || !strings.Contains(err.Error(), "multiple VMs match") {
|
|
t.Fatalf("FindVM(al) error = %v, want ambiguity", err)
|
|
}
|
|
|
|
_, err = d.FindVM(ctx, "missing")
|
|
if err == nil || !strings.Contains(err.Error(), `vm "missing" not found`) {
|
|
t.Fatalf("FindVM(missing) error = %v, want not-found", err)
|
|
}
|
|
}
|
|
|
|
func TestFindImagePrefixResolution(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
ctx := context.Background()
|
|
db := openDaemonStore(t)
|
|
d := &Daemon{store: db}
|
|
wireServices(d)
|
|
|
|
for _, image := range []model.Image{
|
|
testImage("base"),
|
|
testImage("basic"),
|
|
testImage("docker"),
|
|
} {
|
|
if err := db.UpsertImage(ctx, image); err != nil {
|
|
t.Fatalf("UpsertImage(%s): %v", image.Name, err)
|
|
}
|
|
}
|
|
|
|
image, err := d.FindImage(ctx, "base")
|
|
if err != nil || image.Name != "base" {
|
|
t.Fatalf("FindImage(base) = %+v, %v", image, err)
|
|
}
|
|
|
|
image, err = d.FindImage(ctx, "dock")
|
|
if err != nil || image.Name != "docker" {
|
|
t.Fatalf("FindImage(dock) = %+v, %v", image, err)
|
|
}
|
|
|
|
_, err = d.FindImage(ctx, "ba")
|
|
if err == nil || !strings.Contains(err.Error(), "multiple images match") {
|
|
t.Fatalf("FindImage(ba) error = %v, want ambiguity", err)
|
|
}
|
|
|
|
_, err = d.FindImage(ctx, "missing")
|
|
if err == nil || !strings.Contains(err.Error(), `image "missing" not found`) {
|
|
t.Fatalf("FindImage(missing) error = %v, want not-found", err)
|
|
}
|
|
}
|
|
|
|
func TestReconcileStopsStaleRunningVMAndClearsRuntimeHandles(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
ctx := context.Background()
|
|
db := openDaemonStore(t)
|
|
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
|
if err := os.WriteFile(apiSock, []byte{}, 0o644); err != nil {
|
|
t.Fatalf("WriteFile(api sock): %v", err)
|
|
}
|
|
vmDir := t.TempDir()
|
|
vm := testVM("stale", "image-stale", "172.16.0.9")
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.Runtime.APISockPath = apiSock
|
|
vm.Runtime.VMDir = vmDir
|
|
vm.Runtime.DNSName = ""
|
|
upsertDaemonVM(t, ctx, db, vm)
|
|
|
|
// Simulate the prior daemon crashing while this VM was running:
|
|
// the handles.json scratch file survives and names a stale PID +
|
|
// DM snapshot. Reconcile should discover the PID is gone, tear
|
|
// the kernel state down via the runner, and clear the scratch.
|
|
stale := model.VMHandles{
|
|
PID: 999999,
|
|
BaseLoop: "/dev/loop10",
|
|
COWLoop: "/dev/loop11",
|
|
DMName: "fc-rootfs-stale",
|
|
DMDev: "/dev/mapper/fc-rootfs-stale",
|
|
}
|
|
if err := writeHandlesFile(vmDir, stale); err != nil {
|
|
t.Fatalf("writeHandlesFile: %v", err)
|
|
}
|
|
|
|
runner := &scriptedRunner{
|
|
t: t,
|
|
steps: []runnerStep{
|
|
// First pgrep: rediscoverHandles tries to verify the PID.
|
|
{call: runnerCall{name: "pgrep", args: []string{"-n", "-f", apiSock}}, err: errors.New("exit status 1")},
|
|
// Second pgrep: cleanupRuntime asks again before killing.
|
|
{call: runnerCall{name: "pgrep", args: []string{"-n", "-f", apiSock}}, err: errors.New("exit status 1")},
|
|
sudoStep("", nil, "dmsetup", "remove", "fc-rootfs-stale"),
|
|
sudoStep("", nil, "losetup", "-d", "/dev/loop11"),
|
|
sudoStep("", nil, "losetup", "-d", "/dev/loop10"),
|
|
},
|
|
}
|
|
d := &Daemon{store: db, runner: runner}
|
|
wireServices(d)
|
|
|
|
if err := d.reconcile(ctx); err != nil {
|
|
t.Fatalf("reconcile: %v", err)
|
|
}
|
|
runner.assertExhausted()
|
|
|
|
got, err := db.GetVM(ctx, vm.ID)
|
|
if err != nil {
|
|
t.Fatalf("GetVM: %v", err)
|
|
}
|
|
if got.State != model.VMStateStopped || got.Runtime.State != model.VMStateStopped {
|
|
t.Fatalf("vm state after reconcile = %s/%s, want stopped", got.State, got.Runtime.State)
|
|
}
|
|
// The scratch file must be gone — stopped VMs don't carry handles.
|
|
if _, err := os.Stat(handlesFilePath(vmDir)); !os.IsNotExist(err) {
|
|
t.Fatalf("handles.json still present after reconcile: %v", err)
|
|
}
|
|
// And the in-memory cache must be empty.
|
|
if h, ok := d.vm.handles.get(vm.ID); ok && !h.IsZero() {
|
|
t.Fatalf("handle cache not cleared after reconcile: %+v", h)
|
|
}
|
|
}
|
|
|
|
func TestRebuildDNSIncludesOnlyLiveRunningVMs(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
ctx := context.Background()
|
|
db := openDaemonStore(t)
|
|
|
|
liveSock := filepath.Join(t.TempDir(), "live.sock")
|
|
liveCmd := startFakeFirecrackerProcess(t, liveSock)
|
|
t.Cleanup(func() {
|
|
_ = liveCmd.Process.Kill()
|
|
_ = liveCmd.Wait()
|
|
})
|
|
|
|
live := testVM("live", "image-live", "172.16.0.21")
|
|
live.State = model.VMStateRunning
|
|
live.Runtime.State = model.VMStateRunning
|
|
live.Runtime.APISockPath = liveSock
|
|
|
|
stale := testVM("stale", "image-stale", "172.16.0.22")
|
|
stale.State = model.VMStateRunning
|
|
stale.Runtime.State = model.VMStateRunning
|
|
stale.Runtime.APISockPath = filepath.Join(t.TempDir(), "stale.sock")
|
|
|
|
stopped := testVM("stopped", "image-stopped", "172.16.0.23")
|
|
|
|
for _, vm := range []model.VMRecord{live, stale, stopped} {
|
|
upsertDaemonVM(t, ctx, db, vm)
|
|
}
|
|
|
|
server, err := vmdns.New("127.0.0.1:0", nil)
|
|
if err != nil {
|
|
skipIfSocketRestricted(t, err)
|
|
t.Fatalf("vmdns.New: %v", err)
|
|
}
|
|
t.Cleanup(func() {
|
|
if err := server.Close(); err != nil {
|
|
t.Fatalf("server.Close: %v", err)
|
|
}
|
|
})
|
|
|
|
d := &Daemon{store: db, net: &HostNetwork{vmDNS: server}}
|
|
wireServices(d)
|
|
// rebuildDNS reads the alive check from the handle cache. Seed
|
|
// the live VM with its real PID; leave the stale entry with a PID
|
|
// that definitely isn't running (999999 ≫ max PID on most hosts).
|
|
d.vm.setVMHandlesInMemory(live.ID, model.VMHandles{PID: liveCmd.Process.Pid})
|
|
d.vm.setVMHandlesInMemory(stale.ID, model.VMHandles{PID: 999999})
|
|
if err := d.vm.rebuildDNS(ctx); err != nil {
|
|
t.Fatalf("rebuildDNS: %v", err)
|
|
}
|
|
|
|
if _, ok := server.Lookup("live.vm"); !ok {
|
|
t.Fatal("live.vm missing after rebuildDNS")
|
|
}
|
|
if _, ok := server.Lookup("stale.vm"); ok {
|
|
t.Fatal("stale.vm should not be published")
|
|
}
|
|
if _, ok := server.Lookup("stopped.vm"); ok {
|
|
t.Fatal("stopped.vm should not be published")
|
|
}
|
|
}
|
|
|
|
func TestSetVMRejectsStoppedOnlyChangesForRunningVM(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
ctx := context.Background()
|
|
db := openDaemonStore(t)
|
|
apiSock := filepath.Join(t.TempDir(), "running.sock")
|
|
cmd := startFakeFirecrackerProcess(t, apiSock)
|
|
t.Cleanup(func() {
|
|
_ = cmd.Process.Kill()
|
|
_ = cmd.Wait()
|
|
})
|
|
|
|
vm := testVM("running", "image-run", "172.16.0.10")
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.Runtime.APISockPath = apiSock
|
|
upsertDaemonVM(t, ctx, db, vm)
|
|
|
|
d := &Daemon{store: db}
|
|
wireServices(d)
|
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: cmd.Process.Pid})
|
|
tests := []struct {
|
|
name string
|
|
params api.VMSetParams
|
|
want string
|
|
}{
|
|
{
|
|
name: "vcpu",
|
|
params: api.VMSetParams{IDOrName: vm.ID, VCPUCount: ptr(4)},
|
|
want: "vcpu changes require the VM to be stopped",
|
|
},
|
|
{
|
|
name: "memory",
|
|
params: api.VMSetParams{IDOrName: vm.ID, MemoryMiB: ptr(2048)},
|
|
want: "memory changes require the VM to be stopped",
|
|
},
|
|
{
|
|
name: "disk",
|
|
params: api.VMSetParams{IDOrName: vm.ID, WorkDiskSize: "16G"},
|
|
want: "disk changes require the VM to be stopped",
|
|
},
|
|
}
|
|
|
|
for _, tt := range tests {
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
_, err := d.vm.SetVM(ctx, tt.params)
|
|
if err == nil || !strings.Contains(err.Error(), tt.want) {
|
|
t.Fatalf("SetVM(%s) error = %v, want %q", tt.name, err, tt.want)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestHealthVMReturnsHealthyForRunningGuest(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
ctx := context.Background()
|
|
db := openDaemonStore(t)
|
|
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
|
fake := startFakeFirecrackerProcess(t, apiSock)
|
|
t.Cleanup(func() {
|
|
_ = fake.Process.Kill()
|
|
_ = fake.Wait()
|
|
})
|
|
|
|
vsockSock := filepath.Join(t.TempDir(), "fc.vsock")
|
|
listener, err := net.Listen("unix", vsockSock)
|
|
if err != nil {
|
|
skipIfSocketRestricted(t, err)
|
|
t.Fatalf("listen vsock: %v", err)
|
|
}
|
|
t.Cleanup(func() {
|
|
_ = listener.Close()
|
|
_ = os.Remove(vsockSock)
|
|
})
|
|
serverDone := make(chan error, 1)
|
|
go func() {
|
|
conn, err := listener.Accept()
|
|
if err != nil {
|
|
serverDone <- err
|
|
return
|
|
}
|
|
defer conn.Close()
|
|
buf := make([]byte, 128)
|
|
n, err := conn.Read(buf)
|
|
if err != nil {
|
|
serverDone <- err
|
|
return
|
|
}
|
|
if got := string(buf[:n]); got != "CONNECT 42070\n" {
|
|
serverDone <- fmt.Errorf("unexpected connect message %q", got)
|
|
return
|
|
}
|
|
if _, err := conn.Write([]byte("OK 1\n")); err != nil {
|
|
serverDone <- err
|
|
return
|
|
}
|
|
reqBuf := make([]byte, 0, 512)
|
|
reqBuf = append(reqBuf, buf[:0]...)
|
|
for {
|
|
n, err = conn.Read(buf)
|
|
if err != nil {
|
|
serverDone <- err
|
|
return
|
|
}
|
|
reqBuf = append(reqBuf, buf[:n]...)
|
|
if strings.Contains(string(reqBuf), "\r\n\r\n") {
|
|
break
|
|
}
|
|
}
|
|
if got := string(reqBuf); !strings.Contains(got, "GET /healthz HTTP/1.1\r\n") {
|
|
serverDone <- fmt.Errorf("unexpected health payload %q", got)
|
|
return
|
|
}
|
|
_, err = conn.Write([]byte("HTTP/1.1 200 OK\r\nContent-Type: application/json\r\nContent-Length: 15\r\n\r\n{\"status\":\"ok\"}"))
|
|
serverDone <- err
|
|
}()
|
|
|
|
vm := testVM("alive", "image-alive", "172.16.0.41")
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.Runtime.APISockPath = apiSock
|
|
vm.Runtime.VSockPath = vsockSock
|
|
vm.Runtime.VSockCID = 10041
|
|
upsertDaemonVM(t, ctx, db, vm)
|
|
|
|
handlePID := fake.Process.Pid
|
|
runner := &scriptedRunner{
|
|
t: t,
|
|
steps: []runnerStep{
|
|
sudoStep("", nil, "chown", fmt.Sprintf("%d:%d", os.Getuid(), os.Getgid()), vsockSock),
|
|
sudoStep("", nil, "chmod", "600", vsockSock),
|
|
},
|
|
}
|
|
d := &Daemon{store: db, runner: runner}
|
|
wireServices(d)
|
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: handlePID})
|
|
result, err := d.vm.HealthVM(ctx, vm.Name)
|
|
if err != nil {
|
|
t.Fatalf("HealthVM: %v", err)
|
|
}
|
|
if !result.Healthy || result.Name != vm.Name {
|
|
t.Fatalf("HealthVM result = %+v, want healthy %s", result, vm.Name)
|
|
}
|
|
runner.assertExhausted()
|
|
if err := <-serverDone; err != nil {
|
|
t.Fatalf("server: %v", err)
|
|
}
|
|
}
|
|
|
|
func TestPingVMAliasReturnsAliveForHealthyVM(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
ctx := context.Background()
|
|
db := openDaemonStore(t)
|
|
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
|
fake := startFakeFirecrackerProcess(t, apiSock)
|
|
t.Cleanup(func() {
|
|
_ = fake.Process.Kill()
|
|
_ = fake.Wait()
|
|
})
|
|
vsockSock := filepath.Join(t.TempDir(), "fc.vsock")
|
|
listener, err := net.Listen("unix", vsockSock)
|
|
if err != nil {
|
|
skipIfSocketRestricted(t, err)
|
|
t.Fatalf("listen vsock: %v", err)
|
|
}
|
|
t.Cleanup(func() {
|
|
_ = listener.Close()
|
|
_ = os.Remove(vsockSock)
|
|
})
|
|
go func() {
|
|
conn, err := listener.Accept()
|
|
if err != nil {
|
|
return
|
|
}
|
|
defer conn.Close()
|
|
buf := make([]byte, 512)
|
|
_, _ = conn.Read(buf)
|
|
_, _ = conn.Write([]byte("OK 1\n"))
|
|
_, _ = conn.Read(buf)
|
|
_, _ = conn.Write([]byte("HTTP/1.1 200 OK\r\nContent-Type: application/json\r\nContent-Length: 15\r\n\r\n{\"status\":\"ok\"}"))
|
|
}()
|
|
vm := testVM("healthy-ping", "image-healthy", "172.16.0.42")
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.Runtime.APISockPath = apiSock
|
|
vm.Runtime.VSockPath = vsockSock
|
|
vm.Runtime.VSockCID = 10042
|
|
upsertDaemonVM(t, ctx, db, vm)
|
|
|
|
runner := &scriptedRunner{
|
|
t: t,
|
|
steps: []runnerStep{
|
|
sudoStep("", nil, "chown", fmt.Sprintf("%d:%d", os.Getuid(), os.Getgid()), vsockSock),
|
|
sudoStep("", nil, "chmod", "600", vsockSock),
|
|
},
|
|
}
|
|
d := &Daemon{store: db, runner: runner}
|
|
wireServices(d)
|
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: fake.Process.Pid})
|
|
result, err := d.vm.PingVM(ctx, vm.Name)
|
|
if err != nil {
|
|
t.Fatalf("PingVM: %v", err)
|
|
}
|
|
if !result.Alive {
|
|
t.Fatalf("PingVM result = %+v, want alive", result)
|
|
}
|
|
}
|
|
|
|
func TestWaitForGuestVSockAgentRetriesUntilHealthy(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
socketPath := filepath.Join(t.TempDir(), "fc.vsock")
|
|
listener, err := net.Listen("unix", socketPath)
|
|
if err != nil {
|
|
skipIfSocketRestricted(t, err)
|
|
t.Fatalf("listen vsock: %v", err)
|
|
}
|
|
t.Cleanup(func() {
|
|
_ = listener.Close()
|
|
_ = os.Remove(socketPath)
|
|
})
|
|
|
|
serverDone := make(chan error, 1)
|
|
go func() {
|
|
for attempt := 0; attempt < 2; attempt++ {
|
|
conn, err := listener.Accept()
|
|
if err != nil {
|
|
serverDone <- err
|
|
return
|
|
}
|
|
|
|
buf := make([]byte, 512)
|
|
n, err := conn.Read(buf)
|
|
if err != nil {
|
|
_ = conn.Close()
|
|
serverDone <- err
|
|
return
|
|
}
|
|
if got := string(buf[:n]); got != "CONNECT 42070\n" {
|
|
_ = conn.Close()
|
|
serverDone <- fmt.Errorf("unexpected connect message %q", got)
|
|
return
|
|
}
|
|
if _, err := conn.Write([]byte("OK 1\n")); err != nil {
|
|
_ = conn.Close()
|
|
serverDone <- err
|
|
return
|
|
}
|
|
|
|
if attempt == 0 {
|
|
_ = conn.Close()
|
|
continue
|
|
}
|
|
|
|
reqBuf := make([]byte, 0, 512)
|
|
for {
|
|
n, err = conn.Read(buf)
|
|
if err != nil {
|
|
_ = conn.Close()
|
|
serverDone <- err
|
|
return
|
|
}
|
|
reqBuf = append(reqBuf, buf[:n]...)
|
|
if strings.Contains(string(reqBuf), "\r\n\r\n") {
|
|
break
|
|
}
|
|
}
|
|
if got := string(reqBuf); !strings.Contains(got, "GET /healthz HTTP/1.1\r\n") {
|
|
_ = conn.Close()
|
|
serverDone <- fmt.Errorf("unexpected health payload %q", got)
|
|
return
|
|
}
|
|
_, err = conn.Write([]byte("HTTP/1.1 200 OK\r\nContent-Type: application/json\r\nContent-Length: 15\r\n\r\n{\"status\":\"ok\"}"))
|
|
_ = conn.Close()
|
|
serverDone <- err
|
|
return
|
|
}
|
|
serverDone <- errors.New("health probe did not retry")
|
|
}()
|
|
|
|
n := &HostNetwork{}
|
|
if err := n.waitForGuestVSockAgent(context.Background(), socketPath, time.Second); err != nil {
|
|
t.Fatalf("waitForGuestVSockAgent: %v", err)
|
|
}
|
|
if err := <-serverDone; err != nil {
|
|
t.Fatalf("server: %v", err)
|
|
}
|
|
}
|
|
|
|
func TestHealthVMReturnsFalseForStoppedVM(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
ctx := context.Background()
|
|
db := openDaemonStore(t)
|
|
vm := testVM("stopped-ping", "image-stopped", "172.16.0.42")
|
|
upsertDaemonVM(t, ctx, db, vm)
|
|
|
|
d := &Daemon{store: db}
|
|
wireServices(d)
|
|
result, err := d.vm.HealthVM(ctx, vm.Name)
|
|
if err != nil {
|
|
t.Fatalf("HealthVM: %v", err)
|
|
}
|
|
if result.Healthy {
|
|
t.Fatalf("HealthVM result = %+v, want not healthy", result)
|
|
}
|
|
}
|
|
|
|
func TestPortsVMReturnsEnrichedPortsAndWebSchemes(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
ctx := context.Background()
|
|
db := openDaemonStore(t)
|
|
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
|
fake := startFakeFirecrackerProcess(t, apiSock)
|
|
t.Cleanup(func() {
|
|
_ = fake.Process.Kill()
|
|
_ = fake.Wait()
|
|
})
|
|
|
|
webAddr := startHTTPServerOnTCP4(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
w.WriteHeader(http.StatusNoContent)
|
|
}))
|
|
tlsAddr := startHTTPSServerOnTCP4(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
w.WriteHeader(http.StatusAccepted)
|
|
}))
|
|
|
|
vsockSock := filepath.Join(t.TempDir(), "fc.vsock")
|
|
listener, err := net.Listen("unix", vsockSock)
|
|
if err != nil {
|
|
skipIfSocketRestricted(t, err)
|
|
t.Fatalf("listen vsock: %v", err)
|
|
}
|
|
t.Cleanup(func() {
|
|
_ = listener.Close()
|
|
_ = os.Remove(vsockSock)
|
|
})
|
|
serverDone := make(chan error, 1)
|
|
go func() {
|
|
conn, err := listener.Accept()
|
|
if err != nil {
|
|
serverDone <- err
|
|
return
|
|
}
|
|
defer conn.Close()
|
|
buf := make([]byte, 1024)
|
|
n, err := conn.Read(buf)
|
|
if err != nil {
|
|
serverDone <- err
|
|
return
|
|
}
|
|
if got := string(buf[:n]); got != "CONNECT 42070\n" {
|
|
serverDone <- fmt.Errorf("unexpected connect message %q", got)
|
|
return
|
|
}
|
|
if _, err := conn.Write([]byte("OK 1\n")); err != nil {
|
|
serverDone <- err
|
|
return
|
|
}
|
|
reqBuf := make([]byte, 0, 1024)
|
|
for {
|
|
n, err = conn.Read(buf)
|
|
if err != nil {
|
|
serverDone <- err
|
|
return
|
|
}
|
|
reqBuf = append(reqBuf, buf[:n]...)
|
|
if strings.Contains(string(reqBuf), "\r\n\r\n") {
|
|
break
|
|
}
|
|
}
|
|
if got := string(reqBuf); !strings.Contains(got, "GET /ports HTTP/1.1\r\n") {
|
|
serverDone <- fmt.Errorf("unexpected ports payload %q", got)
|
|
return
|
|
}
|
|
body := fmt.Sprintf(`{"listeners":[{"proto":"tcp","bind_address":"0.0.0.0","port":%d,"pid":44,"process":"python3","command":"python3 -m http.server %d"},{"proto":"tcp","bind_address":"0.0.0.0","port":%d,"pid":77,"process":"caddy","command":"caddy run"},{"proto":"udp","bind_address":"0.0.0.0","port":53,"pid":1,"process":"dnsd","command":"dnsd --foreground"}]}`, webAddr.Port, webAddr.Port, tlsAddr.Port)
|
|
resp := fmt.Sprintf("HTTP/1.1 200 OK\r\nContent-Type: application/json\r\nContent-Length: %d\r\n\r\n%s", len(body), body)
|
|
_, err = conn.Write([]byte(resp))
|
|
serverDone <- err
|
|
}()
|
|
|
|
vm := testVM("ports", "image-ports", "127.0.0.1")
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.Runtime.APISockPath = apiSock
|
|
vm.Runtime.VSockPath = vsockSock
|
|
vm.Runtime.VSockCID = 10043
|
|
upsertDaemonVM(t, ctx, db, vm)
|
|
|
|
runner := &scriptedRunner{
|
|
t: t,
|
|
steps: []runnerStep{
|
|
sudoStep("", nil, "chown", fmt.Sprintf("%d:%d", os.Getuid(), os.Getgid()), vsockSock),
|
|
sudoStep("", nil, "chmod", "600", vsockSock),
|
|
},
|
|
}
|
|
d := &Daemon{store: db, runner: runner}
|
|
wireServices(d)
|
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: fake.Process.Pid})
|
|
|
|
result, err := d.vm.PortsVM(ctx, vm.Name)
|
|
if err != nil {
|
|
t.Fatalf("PortsVM: %v", err)
|
|
}
|
|
if result.Name != vm.Name || result.DNSName != vm.Runtime.DNSName {
|
|
t.Fatalf("result = %+v, want name/dns", result)
|
|
}
|
|
if len(result.Ports) != 3 {
|
|
t.Fatalf("ports = %+v, want 3 entries", result.Ports)
|
|
}
|
|
wantHTTP := fmt.Sprintf("http://ports.vm:%d/", webAddr.Port)
|
|
wantHTTPS := fmt.Sprintf("https://ports.vm:%d/", tlsAddr.Port)
|
|
var httpPort, httpsPort, udpPort api.VMPort
|
|
for _, port := range result.Ports {
|
|
switch port.Port {
|
|
case webAddr.Port:
|
|
httpPort = port
|
|
case tlsAddr.Port:
|
|
httpsPort = port
|
|
case 53:
|
|
udpPort = port
|
|
}
|
|
}
|
|
if udpPort.Endpoint != "ports.vm:53" {
|
|
t.Fatalf("udp port = %+v, want endpoint only", udpPort)
|
|
}
|
|
if httpPort.Proto != "http" || httpPort.Endpoint != wantHTTP {
|
|
t.Fatalf("http port = %+v, want http endpoint %q", httpPort, wantHTTP)
|
|
}
|
|
if httpsPort.Proto != "https" || httpsPort.Endpoint != wantHTTPS {
|
|
t.Fatalf("https port = %+v, want https endpoint %q", httpsPort, wantHTTPS)
|
|
}
|
|
runner.assertExhausted()
|
|
if err := <-serverDone; err != nil {
|
|
t.Fatalf("server: %v", err)
|
|
}
|
|
}
|
|
|
|
func TestPortsVMReturnsErrorForStoppedVM(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
ctx := context.Background()
|
|
db := openDaemonStore(t)
|
|
vm := testVM("stopped-ports", "image-stopped", "172.16.0.50")
|
|
upsertDaemonVM(t, ctx, db, vm)
|
|
|
|
d := &Daemon{store: db}
|
|
wireServices(d)
|
|
_, err := d.vm.PortsVM(ctx, vm.Name)
|
|
if err == nil || !strings.Contains(err.Error(), "is not running") {
|
|
t.Fatalf("PortsVM error = %v, want not running", err)
|
|
}
|
|
}
|
|
|
|
func TestBuildVMPortsDeduplicatesSameRenderedEndpoint(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
vm := testVM("dedupe-ports", "image-ports", "")
|
|
vm.Runtime.DNSName = "dedupe-ports.vm"
|
|
|
|
ports := buildVMPorts(vm, []vsockagent.PortListener{
|
|
{
|
|
Proto: "tcp",
|
|
BindAddress: "0.0.0.0",
|
|
Port: 8080,
|
|
PID: 44,
|
|
Process: "docker-proxy",
|
|
Command: "/usr/bin/docker-proxy -proto tcp -host-ip 0.0.0.0 -host-port 8080",
|
|
},
|
|
{
|
|
Proto: "tcp",
|
|
BindAddress: "::",
|
|
Port: 8080,
|
|
PID: 45,
|
|
Process: "docker-proxy",
|
|
Command: "/usr/bin/docker-proxy -proto tcp -host-ip :: -host-port 8080",
|
|
},
|
|
{
|
|
Proto: "udp",
|
|
BindAddress: "0.0.0.0",
|
|
Port: 8080,
|
|
PID: 46,
|
|
Process: "dnsd",
|
|
Command: "dnsd --foreground",
|
|
},
|
|
})
|
|
if len(ports) != 2 {
|
|
t.Fatalf("ports = %+v, want tcp+udp entries after dedupe", ports)
|
|
}
|
|
if ports[0].Proto != "tcp" || ports[0].Endpoint != "dedupe-ports.vm:8080" {
|
|
t.Fatalf("first port = %+v, want deduped tcp endpoint", ports[0])
|
|
}
|
|
if ports[1].Proto != "udp" || ports[1].Endpoint != "dedupe-ports.vm:8080" {
|
|
t.Fatalf("second port = %+v, want distinct udp endpoint", ports[1])
|
|
}
|
|
}
|
|
|
|
func TestSetVMDiskResizeFailsPreflightWhenToolsMissing(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := openDaemonStore(t)
|
|
workDisk := filepath.Join(t.TempDir(), "root.ext4")
|
|
if err := os.WriteFile(workDisk, []byte("disk"), 0o644); err != nil {
|
|
t.Fatalf("WriteFile: %v", err)
|
|
}
|
|
vm := testVM("resize", "image-resize", "172.16.0.11")
|
|
vm.Runtime.WorkDiskPath = workDisk
|
|
vm.Spec.WorkDiskSizeBytes = 8 * 1024 * 1024 * 1024
|
|
upsertDaemonVM(t, ctx, db, vm)
|
|
|
|
t.Setenv("PATH", t.TempDir())
|
|
d := &Daemon{store: db}
|
|
wireServices(d)
|
|
_, err := d.vm.SetVM(ctx, api.VMSetParams{IDOrName: vm.ID, WorkDiskSize: "16G"})
|
|
if err == nil || !strings.Contains(err.Error(), "work disk resize preflight failed") {
|
|
t.Fatalf("SetVM() error = %v, want preflight failure", err)
|
|
}
|
|
}
|
|
|
|
func TestFlattenNestedWorkHomeCopiesEntriesIndividually(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
workMount := t.TempDir()
|
|
nestedHome := filepath.Join(workMount, "root")
|
|
if err := os.MkdirAll(filepath.Join(nestedHome, ".ssh"), 0o755); err != nil {
|
|
t.Fatalf("MkdirAll(.ssh): %v", err)
|
|
}
|
|
if err := os.WriteFile(filepath.Join(nestedHome, "notes.txt"), []byte("seed"), 0o644); err != nil {
|
|
t.Fatalf("WriteFile(notes.txt): %v", err)
|
|
}
|
|
|
|
runner := &scriptedRunner{
|
|
t: t,
|
|
steps: []runnerStep{
|
|
sudoStep("", nil, "chmod", "755", nestedHome),
|
|
sudoStep("", nil, "cp", "-a", filepath.Join(nestedHome, ".ssh"), workMount+"/"),
|
|
sudoStep("", nil, "cp", "-a", filepath.Join(nestedHome, "notes.txt"), workMount+"/"),
|
|
sudoStep("", nil, "rm", "-rf", nestedHome),
|
|
},
|
|
}
|
|
d := &Daemon{runner: runner}
|
|
wireServices(d)
|
|
|
|
if err := flattenNestedWorkHome(context.Background(), d.runner, workMount); err != nil {
|
|
t.Fatalf("flattenNestedWorkHome: %v", err)
|
|
}
|
|
runner.assertExhausted()
|
|
}
|
|
|
|
func TestEnsureAuthorizedKeyOnWorkDiskRepairsNestedRootLayout(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
workDiskDir := t.TempDir()
|
|
nestedHome := filepath.Join(workDiskDir, "root")
|
|
if err := os.MkdirAll(filepath.Join(nestedHome, ".ssh"), 0o700); err != nil {
|
|
t.Fatalf("MkdirAll(.ssh): %v", err)
|
|
}
|
|
if err := os.WriteFile(filepath.Join(nestedHome, ".bashrc"), []byte("export TEST_PROMPT=1\n"), 0o644); err != nil {
|
|
t.Fatalf("WriteFile(.bashrc): %v", err)
|
|
}
|
|
legacyKey := "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILEgacykey legacy@test\n"
|
|
if err := os.WriteFile(filepath.Join(nestedHome, ".ssh", "authorized_keys"), []byte(legacyKey), 0o600); err != nil {
|
|
t.Fatalf("WriteFile(authorized_keys): %v", err)
|
|
}
|
|
|
|
privateKey, err := rsa.GenerateKey(rand.Reader, 1024)
|
|
if err != nil {
|
|
t.Fatalf("GenerateKey: %v", err)
|
|
}
|
|
privateKeyPEM := pem.EncodeToMemory(&pem.Block{
|
|
Type: "RSA PRIVATE KEY",
|
|
Bytes: x509.MarshalPKCS1PrivateKey(privateKey),
|
|
})
|
|
sshKeyPath := filepath.Join(t.TempDir(), "id_rsa")
|
|
if err := os.WriteFile(sshKeyPath, privateKeyPEM, 0o600); err != nil {
|
|
t.Fatalf("WriteFile(private key): %v", err)
|
|
}
|
|
|
|
d := &Daemon{
|
|
runner: &filesystemRunner{t: t},
|
|
config: model.DaemonConfig{SSHKeyPath: sshKeyPath},
|
|
}
|
|
wireServices(d)
|
|
vm := testVM("seed-repair", "image-seed-repair", "172.16.0.61")
|
|
vm.Runtime.WorkDiskPath = workDiskDir
|
|
|
|
if err := d.ws.ensureAuthorizedKeyOnWorkDisk(context.Background(), &vm, model.Image{}, workDiskPreparation{}); err != nil {
|
|
t.Fatalf("ensureAuthorizedKeyOnWorkDisk: %v", err)
|
|
}
|
|
if _, err := os.Stat(filepath.Join(workDiskDir, "root")); !os.IsNotExist(err) {
|
|
t.Fatalf("nested root still exists: %v", err)
|
|
}
|
|
if _, err := os.Stat(filepath.Join(workDiskDir, ".bashrc")); err != nil {
|
|
t.Fatalf(".bashrc missing at top level: %v", err)
|
|
}
|
|
data, err := os.ReadFile(filepath.Join(workDiskDir, ".ssh", "authorized_keys"))
|
|
if err != nil {
|
|
t.Fatalf("ReadFile(authorized_keys): %v", err)
|
|
}
|
|
content := string(data)
|
|
if !strings.Contains(content, strings.TrimSpace(legacyKey)) {
|
|
t.Fatalf("authorized_keys missing legacy key: %q", content)
|
|
}
|
|
if !strings.Contains(content, "ssh-rsa ") {
|
|
t.Fatalf("authorized_keys missing managed key: %q", content)
|
|
}
|
|
}
|
|
|
|
func TestEnsureGitIdentityOnWorkDiskCopiesHostGlobalIdentity(t *testing.T) {
|
|
if _, err := exec.LookPath("git"); err != nil {
|
|
t.Skip("git not installed")
|
|
}
|
|
|
|
hostConfigPath := filepath.Join(t.TempDir(), "host.gitconfig")
|
|
t.Setenv("GIT_CONFIG_GLOBAL", hostConfigPath)
|
|
testSetGitConfig(t, "user.name", "Banger Host")
|
|
testSetGitConfig(t, "user.email", "host@example.com")
|
|
|
|
workDiskDir := t.TempDir()
|
|
d := &Daemon{runner: &filesystemRunner{t: t}}
|
|
wireServices(d)
|
|
vm := testVM("git-identity", "image-git-identity", "172.16.0.67")
|
|
vm.Runtime.WorkDiskPath = workDiskDir
|
|
|
|
if err := d.ws.ensureGitIdentityOnWorkDisk(context.Background(), &vm); err != nil {
|
|
t.Fatalf("ensureGitIdentityOnWorkDisk: %v", err)
|
|
}
|
|
|
|
guestConfigPath := filepath.Join(workDiskDir, workDiskGitConfigRelativePath)
|
|
if got := testGitConfigValue(t, guestConfigPath, "user.name"); got != "Banger Host" {
|
|
t.Fatalf("guest user.name = %q, want Banger Host", got)
|
|
}
|
|
if got := testGitConfigValue(t, guestConfigPath, "user.email"); got != "host@example.com" {
|
|
t.Fatalf("guest user.email = %q, want host@example.com", got)
|
|
}
|
|
}
|
|
|
|
func TestEnsureGitIdentityOnWorkDiskPreservesExistingGuestConfig(t *testing.T) {
|
|
if _, err := exec.LookPath("git"); err != nil {
|
|
t.Skip("git not installed")
|
|
}
|
|
|
|
hostConfigPath := filepath.Join(t.TempDir(), "host.gitconfig")
|
|
t.Setenv("GIT_CONFIG_GLOBAL", hostConfigPath)
|
|
testSetGitConfig(t, "user.name", "Fresh Name")
|
|
testSetGitConfig(t, "user.email", "fresh@example.com")
|
|
|
|
workDiskDir := t.TempDir()
|
|
guestConfigPath := filepath.Join(workDiskDir, workDiskGitConfigRelativePath)
|
|
if err := os.WriteFile(guestConfigPath, []byte("[safe]\n\tdirectory = /root/repo\n[user]\n\tname = stale\n"), 0o644); err != nil {
|
|
t.Fatalf("WriteFile(guest .gitconfig): %v", err)
|
|
}
|
|
|
|
d := &Daemon{runner: &filesystemRunner{t: t}}
|
|
wireServices(d)
|
|
vm := testVM("git-identity-preserve", "image-git-identity", "172.16.0.68")
|
|
vm.Runtime.WorkDiskPath = workDiskDir
|
|
|
|
if err := d.ws.ensureGitIdentityOnWorkDisk(context.Background(), &vm); err != nil {
|
|
t.Fatalf("ensureGitIdentityOnWorkDisk: %v", err)
|
|
}
|
|
|
|
if got := testGitConfigValue(t, guestConfigPath, "user.name"); got != "Fresh Name" {
|
|
t.Fatalf("guest user.name = %q, want Fresh Name", got)
|
|
}
|
|
if got := testGitConfigValue(t, guestConfigPath, "user.email"); got != "fresh@example.com" {
|
|
t.Fatalf("guest user.email = %q, want fresh@example.com", got)
|
|
}
|
|
if got := testGitConfigValue(t, guestConfigPath, "safe.directory"); got != "/root/repo" {
|
|
t.Fatalf("guest safe.directory = %q, want /root/repo", got)
|
|
}
|
|
}
|
|
|
|
func TestEnsureGitIdentityOnWorkDiskWarnsAndSkipsWhenHostIdentityIncomplete(t *testing.T) {
|
|
if _, err := exec.LookPath("git"); err != nil {
|
|
t.Skip("git not installed")
|
|
}
|
|
|
|
hostConfigPath := filepath.Join(t.TempDir(), "host.gitconfig")
|
|
t.Setenv("GIT_CONFIG_GLOBAL", hostConfigPath)
|
|
testSetGitConfig(t, "user.name", "Only Name")
|
|
|
|
workDiskDir := t.TempDir()
|
|
guestConfigPath := filepath.Join(workDiskDir, workDiskGitConfigRelativePath)
|
|
original := []byte("[user]\n\temail = keep@example.com\n")
|
|
if err := os.WriteFile(guestConfigPath, original, 0o644); err != nil {
|
|
t.Fatalf("WriteFile(guest .gitconfig): %v", err)
|
|
}
|
|
|
|
var buf bytes.Buffer
|
|
logger, _, err := newDaemonLogger(&buf, "info")
|
|
if err != nil {
|
|
t.Fatalf("newDaemonLogger: %v", err)
|
|
}
|
|
|
|
d := &Daemon{
|
|
runner: &filesystemRunner{t: t},
|
|
logger: logger,
|
|
}
|
|
wireServices(d)
|
|
vm := testVM("git-identity-missing", "image-git-identity", "172.16.0.69")
|
|
vm.Runtime.WorkDiskPath = workDiskDir
|
|
|
|
if err := d.ws.ensureGitIdentityOnWorkDisk(context.Background(), &vm); err != nil {
|
|
t.Fatalf("ensureGitIdentityOnWorkDisk: %v", err)
|
|
}
|
|
|
|
got, err := os.ReadFile(guestConfigPath)
|
|
if err != nil {
|
|
t.Fatalf("ReadFile(guest .gitconfig): %v", err)
|
|
}
|
|
if string(got) != string(original) {
|
|
t.Fatalf("guest .gitconfig = %q, want preserved %q", string(got), string(original))
|
|
}
|
|
|
|
entries := parseLogEntries(t, buf.Bytes())
|
|
if !hasLogEntry(entries, map[string]string{
|
|
"msg": "guest git identity sync skipped",
|
|
"vm_name": vm.Name,
|
|
"source": hostGlobalGitIdentitySource,
|
|
"error": "host git user.email is empty",
|
|
}) {
|
|
t.Fatalf("expected warn log, got %v", entries)
|
|
}
|
|
}
|
|
|
|
func TestRunFileSyncNoOpWhenConfigEmpty(t *testing.T) {
|
|
d := &Daemon{runner: &filesystemRunner{t: t}}
|
|
wireServices(d)
|
|
vm := testVM("no-sync", "image", "172.16.0.70")
|
|
if err := d.ws.runFileSync(context.Background(), &vm); err != nil {
|
|
t.Fatalf("runFileSync: %v", err)
|
|
}
|
|
}
|
|
|
|
func TestRunFileSyncCopiesFile(t *testing.T) {
|
|
homeDir := t.TempDir()
|
|
t.Setenv("HOME", homeDir)
|
|
srcPath := filepath.Join(homeDir, ".secrets", "token")
|
|
if err := os.MkdirAll(filepath.Dir(srcPath), 0o755); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
srcData := []byte(`{"token":"abc"}`)
|
|
if err := os.WriteFile(srcPath, srcData, 0o600); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
workDisk := t.TempDir()
|
|
d := &Daemon{
|
|
runner: &filesystemRunner{t: t},
|
|
config: model.DaemonConfig{
|
|
FileSync: []model.FileSyncEntry{
|
|
{Host: "~/.secrets/token", Guest: "~/.secrets/token"},
|
|
},
|
|
},
|
|
}
|
|
wireServices(d)
|
|
vm := testVM("sync-file", "image", "172.16.0.71")
|
|
vm.Runtime.WorkDiskPath = workDisk
|
|
if err := d.ws.runFileSync(context.Background(), &vm); err != nil {
|
|
t.Fatalf("runFileSync: %v", err)
|
|
}
|
|
|
|
dst := filepath.Join(workDisk, ".secrets", "token")
|
|
got, err := os.ReadFile(dst)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if string(got) != string(srcData) {
|
|
t.Fatalf("dst = %q, want %q", got, srcData)
|
|
}
|
|
info, err := os.Stat(dst)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if info.Mode().Perm() != 0o600 {
|
|
t.Fatalf("mode = %v, want 0600", info.Mode().Perm())
|
|
}
|
|
}
|
|
|
|
func TestRunFileSyncRespectsCustomMode(t *testing.T) {
|
|
homeDir := t.TempDir()
|
|
t.Setenv("HOME", homeDir)
|
|
srcPath := filepath.Join(homeDir, "script")
|
|
if err := os.WriteFile(srcPath, []byte("#!/bin/sh\nexit 0\n"), 0o600); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
workDisk := t.TempDir()
|
|
d := &Daemon{
|
|
runner: &filesystemRunner{t: t},
|
|
config: model.DaemonConfig{
|
|
FileSync: []model.FileSyncEntry{
|
|
{Host: "~/script", Guest: "~/bin/my-script", Mode: "0755"},
|
|
},
|
|
},
|
|
}
|
|
wireServices(d)
|
|
vm := testVM("sync-mode", "image", "172.16.0.72")
|
|
vm.Runtime.WorkDiskPath = workDisk
|
|
if err := d.ws.runFileSync(context.Background(), &vm); err != nil {
|
|
t.Fatalf("runFileSync: %v", err)
|
|
}
|
|
|
|
info, err := os.Stat(filepath.Join(workDisk, "bin", "my-script"))
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if info.Mode().Perm() != 0o755 {
|
|
t.Fatalf("mode = %v, want 0755", info.Mode().Perm())
|
|
}
|
|
}
|
|
|
|
func TestRunFileSyncSkipsMissingHostPath(t *testing.T) {
|
|
homeDir := t.TempDir()
|
|
t.Setenv("HOME", homeDir)
|
|
|
|
var buf bytes.Buffer
|
|
logger, _, err := newDaemonLogger(&buf, "info")
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
workDisk := t.TempDir()
|
|
d := &Daemon{
|
|
runner: &filesystemRunner{t: t},
|
|
logger: logger,
|
|
config: model.DaemonConfig{
|
|
FileSync: []model.FileSyncEntry{
|
|
{Host: "~/does-not-exist", Guest: "~/wherever"},
|
|
},
|
|
},
|
|
}
|
|
wireServices(d)
|
|
vm := testVM("sync-missing", "image", "172.16.0.73")
|
|
vm.Runtime.WorkDiskPath = workDisk
|
|
if err := d.ws.runFileSync(context.Background(), &vm); err != nil {
|
|
t.Fatalf("runFileSync: %v", err)
|
|
}
|
|
|
|
entries := parseLogEntries(t, buf.Bytes())
|
|
if !hasLogEntry(entries, map[string]string{
|
|
"msg": "file_sync skipped",
|
|
"vm_name": vm.Name,
|
|
"host_path": filepath.Join(homeDir, "does-not-exist"),
|
|
}) {
|
|
t.Fatalf("expected skipped log, got %v", entries)
|
|
}
|
|
}
|
|
|
|
func TestRunFileSyncOverwritesExistingGuestFile(t *testing.T) {
|
|
homeDir := t.TempDir()
|
|
t.Setenv("HOME", homeDir)
|
|
srcPath := filepath.Join(homeDir, "token")
|
|
if err := os.WriteFile(srcPath, []byte("fresh"), 0o600); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
workDisk := t.TempDir()
|
|
// Work disk is mounted at /root in the guest, so the guest path
|
|
// "/root/token" maps to workDisk/token here.
|
|
existing := filepath.Join(workDisk, "token")
|
|
if err := os.WriteFile(existing, []byte("stale"), 0o600); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
d := &Daemon{
|
|
runner: &filesystemRunner{t: t},
|
|
config: model.DaemonConfig{
|
|
FileSync: []model.FileSyncEntry{
|
|
{Host: "~/token", Guest: "/root/token"},
|
|
},
|
|
},
|
|
}
|
|
wireServices(d)
|
|
vm := testVM("sync-overwrite", "image", "172.16.0.74")
|
|
vm.Runtime.WorkDiskPath = workDisk
|
|
if err := d.ws.runFileSync(context.Background(), &vm); err != nil {
|
|
t.Fatalf("runFileSync: %v", err)
|
|
}
|
|
|
|
got, err := os.ReadFile(existing)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if string(got) != "fresh" {
|
|
t.Fatalf("guest file = %q, want fresh", got)
|
|
}
|
|
}
|
|
|
|
func TestRunFileSyncCopiesDirectoryRecursively(t *testing.T) {
|
|
homeDir := t.TempDir()
|
|
t.Setenv("HOME", homeDir)
|
|
srcDir := filepath.Join(homeDir, ".aws")
|
|
if err := os.MkdirAll(srcDir, 0o755); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := os.WriteFile(filepath.Join(srcDir, "credentials"), []byte("access"), 0o600); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
sub := filepath.Join(srcDir, "sso", "cache")
|
|
if err := os.MkdirAll(sub, 0o755); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := os.WriteFile(filepath.Join(sub, "token.json"), []byte("sso-token"), 0o600); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
workDisk := t.TempDir()
|
|
d := &Daemon{
|
|
runner: &filesystemRunner{t: t},
|
|
config: model.DaemonConfig{
|
|
FileSync: []model.FileSyncEntry{
|
|
{Host: "~/.aws", Guest: "~/.aws"},
|
|
},
|
|
},
|
|
}
|
|
wireServices(d)
|
|
vm := testVM("sync-dir", "image", "172.16.0.75")
|
|
vm.Runtime.WorkDiskPath = workDisk
|
|
if err := d.ws.runFileSync(context.Background(), &vm); err != nil {
|
|
t.Fatalf("runFileSync: %v", err)
|
|
}
|
|
|
|
creds, err := os.ReadFile(filepath.Join(workDisk, ".aws", "credentials"))
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if string(creds) != "access" {
|
|
t.Fatalf("credentials = %q, want access", creds)
|
|
}
|
|
ssoToken, err := os.ReadFile(filepath.Join(workDisk, ".aws", "sso", "cache", "token.json"))
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if string(ssoToken) != "sso-token" {
|
|
t.Fatalf("sso token = %q, want sso-token", ssoToken)
|
|
}
|
|
}
|
|
|
|
func TestCreateVMRejectsNonPositiveCPUAndMemory(t *testing.T) {
|
|
d := &Daemon{}
|
|
wireServices(d)
|
|
if _, err := d.vm.CreateVM(context.Background(), api.VMCreateParams{VCPUCount: ptr(0)}); err == nil || !strings.Contains(err.Error(), "vcpu must be a positive integer") {
|
|
t.Fatalf("CreateVM(vcpu=0) error = %v", err)
|
|
}
|
|
if _, err := d.vm.CreateVM(context.Background(), api.VMCreateParams{MemoryMiB: ptr(-1)}); err == nil || !strings.Contains(err.Error(), "memory must be a positive integer") {
|
|
t.Fatalf("CreateVM(memory=-1) error = %v", err)
|
|
}
|
|
}
|
|
|
|
func TestBeginVMCreateCompletesAndReturnsStatus(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
ctx := context.Background()
|
|
db := openDaemonStore(t)
|
|
image := testImage("default")
|
|
image.ID = "default-image-id"
|
|
image.Name = "default"
|
|
if err := db.UpsertImage(ctx, image); err != nil {
|
|
t.Fatalf("UpsertImage: %v", err)
|
|
}
|
|
|
|
d := &Daemon{
|
|
store: db,
|
|
layout: paths.Layout{
|
|
VMsDir: t.TempDir(),
|
|
},
|
|
config: model.DaemonConfig{
|
|
DefaultImageName: image.Name,
|
|
BridgeIP: model.DefaultBridgeIP,
|
|
},
|
|
}
|
|
wireServices(d)
|
|
|
|
op, err := d.vm.BeginVMCreate(ctx, api.VMCreateParams{Name: "queued", NoStart: true})
|
|
if err != nil {
|
|
t.Fatalf("BeginVMCreate: %v", err)
|
|
}
|
|
if op.ID == "" {
|
|
t.Fatal("operation id should be populated")
|
|
}
|
|
|
|
deadline := time.Now().Add(2 * time.Second)
|
|
for time.Now().Before(deadline) {
|
|
status, err := d.vm.VMCreateStatus(ctx, op.ID)
|
|
if err != nil {
|
|
t.Fatalf("VMCreateStatus: %v", err)
|
|
}
|
|
if !status.Done {
|
|
time.Sleep(10 * time.Millisecond)
|
|
continue
|
|
}
|
|
if !status.Success {
|
|
t.Fatalf("status = %+v, want success", status)
|
|
}
|
|
if status.VM == nil || status.VM.Name != "queued" {
|
|
t.Fatalf("status VM = %+v, want queued vm", status.VM)
|
|
}
|
|
if status.VM.State != model.VMStateStopped {
|
|
t.Fatalf("status VM state = %s, want stopped", status.VM.State)
|
|
}
|
|
return
|
|
}
|
|
t.Fatal("vm create operation did not finish before timeout")
|
|
}
|
|
|
|
func TestCreateVMUsesDefaultsWhenCPUAndMemoryOmitted(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := openDaemonStore(t)
|
|
image := testImage("default")
|
|
if err := db.UpsertImage(ctx, image); err != nil {
|
|
t.Fatalf("UpsertImage: %v", err)
|
|
}
|
|
d := &Daemon{
|
|
store: db,
|
|
layout: paths.Layout{
|
|
VMsDir: t.TempDir(),
|
|
},
|
|
config: model.DaemonConfig{
|
|
DefaultImageName: image.Name,
|
|
BridgeIP: model.DefaultBridgeIP,
|
|
},
|
|
}
|
|
wireServices(d)
|
|
|
|
vm, err := d.vm.CreateVM(ctx, api.VMCreateParams{Name: "defaults", ImageName: image.Name, NoStart: true})
|
|
if err != nil {
|
|
t.Fatalf("CreateVM: %v", err)
|
|
}
|
|
if vm.Spec.VCPUCount != model.DefaultVCPUCount {
|
|
t.Fatalf("VCPUCount = %d, want %d", vm.Spec.VCPUCount, model.DefaultVCPUCount)
|
|
}
|
|
if vm.Spec.MemoryMiB != model.DefaultMemoryMiB {
|
|
t.Fatalf("MemoryMiB = %d, want %d", vm.Spec.MemoryMiB, model.DefaultMemoryMiB)
|
|
}
|
|
}
|
|
|
|
func TestSetVMRejectsNonPositiveCPUAndMemory(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := openDaemonStore(t)
|
|
vm := testVM("validate", "image-validate", "172.16.0.13")
|
|
upsertDaemonVM(t, ctx, db, vm)
|
|
d := &Daemon{store: db}
|
|
wireServices(d)
|
|
|
|
if _, err := d.vm.SetVM(ctx, api.VMSetParams{IDOrName: vm.ID, VCPUCount: ptr(0)}); err == nil || !strings.Contains(err.Error(), "vcpu must be a positive integer") {
|
|
t.Fatalf("SetVM(vcpu=0) error = %v", err)
|
|
}
|
|
if _, err := d.vm.SetVM(ctx, api.VMSetParams{IDOrName: vm.ID, MemoryMiB: ptr(0)}); err == nil || !strings.Contains(err.Error(), "memory must be a positive integer") {
|
|
t.Fatalf("SetVM(memory=0) error = %v", err)
|
|
}
|
|
}
|
|
|
|
func TestCollectStatsIgnoresMalformedMetricsFile(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
overlay := filepath.Join(t.TempDir(), "system.cow")
|
|
workDisk := filepath.Join(t.TempDir(), "root.ext4")
|
|
metrics := filepath.Join(t.TempDir(), "metrics.json")
|
|
for _, path := range []string{overlay, workDisk} {
|
|
if err := os.WriteFile(path, []byte("allocated"), 0o644); err != nil {
|
|
t.Fatalf("WriteFile(%s): %v", path, err)
|
|
}
|
|
}
|
|
if err := os.WriteFile(metrics, []byte("{not-json}\n"), 0o644); err != nil {
|
|
t.Fatalf("WriteFile(metrics): %v", err)
|
|
}
|
|
|
|
d := &Daemon{}
|
|
wireServices(d)
|
|
stats, err := d.vm.collectStats(context.Background(), model.VMRecord{
|
|
Runtime: model.VMRuntime{
|
|
SystemOverlay: overlay,
|
|
WorkDiskPath: workDisk,
|
|
MetricsPath: metrics,
|
|
},
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("collectStats: %v", err)
|
|
}
|
|
if stats.MetricsRaw != nil {
|
|
t.Fatalf("MetricsRaw = %v, want nil for malformed metrics", stats.MetricsRaw)
|
|
}
|
|
if stats.SystemOverlayBytes == 0 || stats.WorkDiskBytes == 0 {
|
|
t.Fatalf("allocated bytes not captured: %+v", stats)
|
|
}
|
|
}
|
|
|
|
func TestValidateStartPrereqsReportsNATUplinkFailure(t *testing.T) {
|
|
ctx := context.Background()
|
|
binDir := t.TempDir()
|
|
for _, name := range []string{
|
|
"sudo", "ip", "dmsetup", "losetup", "blockdev", "truncate", "pgrep", "ps",
|
|
"chown", "chmod", "kill", "e2cp", "e2rm", "debugfs", "mkfs.ext4", "mount",
|
|
"umount", "cp", "iptables", "sysctl",
|
|
} {
|
|
writeFakeExecutable(t, filepath.Join(binDir, name))
|
|
}
|
|
t.Setenv("PATH", binDir)
|
|
|
|
firecrackerBin := filepath.Join(t.TempDir(), "firecracker")
|
|
rootfsPath := filepath.Join(t.TempDir(), "rootfs.ext4")
|
|
kernelPath := filepath.Join(t.TempDir(), "vmlinux")
|
|
for _, path := range []string{firecrackerBin, rootfsPath, kernelPath} {
|
|
writeFakeExecutable(t, path)
|
|
}
|
|
|
|
runner := &scriptedRunner{
|
|
t: t,
|
|
steps: []runnerStep{
|
|
{call: runnerCall{name: "ip", args: []string{"route", "show", "default"}}, out: []byte("10.0.0.0/24 dev br-fc\n")},
|
|
},
|
|
}
|
|
d := &Daemon{
|
|
runner: runner,
|
|
config: model.DaemonConfig{
|
|
FirecrackerBin: firecrackerBin,
|
|
},
|
|
}
|
|
wireServices(d)
|
|
vm := testVM("nat", "image-nat", "172.16.0.12")
|
|
vm.Spec.NATEnabled = true
|
|
vm.Runtime.WorkDiskPath = filepath.Join(t.TempDir(), "missing-root.ext4")
|
|
image := testImage("image-nat")
|
|
image.RootfsPath = rootfsPath
|
|
image.KernelPath = kernelPath
|
|
|
|
err := d.vm.validateStartPrereqs(ctx, vm, image)
|
|
if err == nil || !strings.Contains(err.Error(), "uplink interface for NAT") {
|
|
t.Fatalf("validateStartPrereqs() error = %v, want NAT uplink failure", err)
|
|
}
|
|
runner.assertExhausted()
|
|
}
|
|
|
|
func TestCleanupRuntimeRediscoversLiveFirecrackerPID(t *testing.T) {
|
|
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
|
fake := startFakeFirecrackerProcess(t, apiSock)
|
|
t.Cleanup(func() {
|
|
if fake.ProcessState == nil || !fake.ProcessState.Exited() {
|
|
_ = fake.Process.Kill()
|
|
_ = fake.Wait()
|
|
}
|
|
})
|
|
|
|
runner := &processKillingRunner{
|
|
scriptedRunner: &scriptedRunner{
|
|
t: t,
|
|
steps: []runnerStep{
|
|
{call: runnerCall{name: "pgrep", args: []string{"-n", "-f", apiSock}}, out: []byte(strconv.Itoa(fake.Process.Pid) + "\n")},
|
|
sudoStep("", nil, "kill", "-KILL", strconv.Itoa(fake.Process.Pid)),
|
|
},
|
|
},
|
|
proc: fake,
|
|
}
|
|
d := &Daemon{runner: runner}
|
|
wireServices(d)
|
|
vm := testVM("cleanup", "image-cleanup", "172.16.0.22")
|
|
vm.Runtime.APISockPath = apiSock
|
|
// Seed a stale PID so cleanupRuntime's findFirecrackerPID pgrep
|
|
// fallback wins — it rediscovers fake.Process.Pid from apiSock.
|
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: fake.Process.Pid + 999})
|
|
|
|
if err := d.vm.cleanupRuntime(context.Background(), vm, true); err != nil {
|
|
t.Fatalf("cleanupRuntime returned error: %v", err)
|
|
}
|
|
runner.assertExhausted()
|
|
if systemProcessRunning(fake.Process.Pid, apiSock) {
|
|
t.Fatalf("fake firecracker pid %d still looks running", fake.Process.Pid)
|
|
}
|
|
}
|
|
|
|
func TestDeleteStoppedNATVMDoesNotFailWithoutTapDevice(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
ctx := context.Background()
|
|
db := openDaemonStore(t)
|
|
vmDir := filepath.Join(t.TempDir(), "stopped-nat-vm")
|
|
if err := os.MkdirAll(vmDir, 0o755); err != nil {
|
|
t.Fatalf("MkdirAll: %v", err)
|
|
}
|
|
|
|
vm := testVM("stopped-nat", "image-stopped-nat", "172.16.0.24")
|
|
vm.Spec.NATEnabled = true
|
|
vm.Runtime.VMDir = vmDir
|
|
vm.State = model.VMStateStopped
|
|
vm.Runtime.State = model.VMStateStopped
|
|
upsertDaemonVM(t, ctx, db, vm)
|
|
|
|
d := &Daemon{store: db}
|
|
wireServices(d)
|
|
deleted, err := d.vm.DeleteVM(ctx, vm.Name)
|
|
if err != nil {
|
|
t.Fatalf("DeleteVM: %v", err)
|
|
}
|
|
if deleted.ID != vm.ID {
|
|
t.Fatalf("deleted VM = %+v, want %s", deleted, vm.ID)
|
|
}
|
|
if _, err := db.GetVMByID(ctx, vm.ID); err == nil {
|
|
t.Fatal("expected VM record to be deleted")
|
|
}
|
|
if _, err := os.Stat(vmDir); !os.IsNotExist(err) {
|
|
t.Fatalf("vm dir still exists or stat failed: %v", err)
|
|
}
|
|
}
|
|
|
|
func TestStopVMFallsBackToForcedCleanupAfterGracefulTimeout(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := openDaemonStore(t)
|
|
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
|
startFakeFirecrackerAPI(t, apiSock)
|
|
|
|
fake := startFakeFirecrackerProcess(t, apiSock)
|
|
t.Cleanup(func() {
|
|
if fake.ProcessState == nil || !fake.ProcessState.Exited() {
|
|
_ = fake.Process.Kill()
|
|
_ = fake.Wait()
|
|
}
|
|
})
|
|
|
|
oldGracefulWait := gracefulShutdownWait
|
|
gracefulShutdownWait = 50 * time.Millisecond
|
|
t.Cleanup(func() {
|
|
gracefulShutdownWait = oldGracefulWait
|
|
})
|
|
|
|
vm := testVM("stubborn", "image-stubborn", "172.16.0.23")
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.Runtime.APISockPath = apiSock
|
|
upsertDaemonVM(t, ctx, db, vm)
|
|
|
|
runner := &processKillingRunner{
|
|
scriptedRunner: &scriptedRunner{
|
|
t: t,
|
|
steps: []runnerStep{
|
|
sudoStep("", nil, "chown", fmt.Sprintf("%d:%d", os.Getuid(), os.Getgid()), apiSock),
|
|
sudoStep("", nil, "chmod", "600", apiSock),
|
|
{call: runnerCall{name: "pgrep", args: []string{"-n", "-f", apiSock}}, out: []byte(strconv.Itoa(fake.Process.Pid) + "\n")},
|
|
sudoStep("", nil, "kill", "-KILL", strconv.Itoa(fake.Process.Pid)),
|
|
},
|
|
},
|
|
proc: fake,
|
|
}
|
|
d := &Daemon{store: db, runner: runner}
|
|
wireServices(d)
|
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: fake.Process.Pid})
|
|
|
|
got, err := d.vm.StopVM(ctx, vm.ID)
|
|
if err != nil {
|
|
t.Fatalf("StopVM returned error: %v", err)
|
|
}
|
|
runner.assertExhausted()
|
|
if got.State != model.VMStateStopped || got.Runtime.State != model.VMStateStopped {
|
|
t.Fatalf("StopVM state = %s/%s, want stopped", got.State, got.Runtime.State)
|
|
}
|
|
// APISockPath + VSock paths are deterministic — they stay on the
|
|
// record for debugging and next-start reuse even after stop. The
|
|
// post-stop invariant is that the in-memory cache is empty.
|
|
if h, ok := d.vm.handles.get(vm.ID); ok && !h.IsZero() {
|
|
t.Fatalf("handle cache not cleared: %+v", h)
|
|
}
|
|
}
|
|
|
|
func TestWithVMLockByIDSerializesSameVM(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := openDaemonStore(t)
|
|
vm := testVM("serial", "image-serial", "172.16.0.30")
|
|
upsertDaemonVM(t, ctx, db, vm)
|
|
d := &Daemon{store: db}
|
|
wireServices(d)
|
|
|
|
firstEntered := make(chan struct{})
|
|
releaseFirst := make(chan struct{})
|
|
secondEntered := make(chan struct{})
|
|
errCh := make(chan error, 2)
|
|
|
|
go func() {
|
|
_, err := d.vm.withVMLockByID(ctx, vm.ID, func(vm model.VMRecord) (model.VMRecord, error) {
|
|
close(firstEntered)
|
|
<-releaseFirst
|
|
return vm, nil
|
|
})
|
|
errCh <- err
|
|
}()
|
|
|
|
select {
|
|
case <-firstEntered:
|
|
case <-time.After(500 * time.Millisecond):
|
|
t.Fatal("first lock holder did not enter")
|
|
}
|
|
|
|
go func() {
|
|
_, err := d.vm.withVMLockByID(ctx, vm.ID, func(vm model.VMRecord) (model.VMRecord, error) {
|
|
close(secondEntered)
|
|
return vm, nil
|
|
})
|
|
errCh <- err
|
|
}()
|
|
|
|
select {
|
|
case <-secondEntered:
|
|
t.Fatal("second same-vm lock holder entered before release")
|
|
case <-time.After(150 * time.Millisecond):
|
|
}
|
|
|
|
close(releaseFirst)
|
|
|
|
select {
|
|
case <-secondEntered:
|
|
case <-time.After(500 * time.Millisecond):
|
|
t.Fatal("second same-vm lock holder never entered")
|
|
}
|
|
|
|
for i := 0; i < 2; i++ {
|
|
if err := <-errCh; err != nil {
|
|
t.Fatalf("withVMLockByID returned error: %v", err)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestWithVMLockByIDAllowsDifferentVMsConcurrently(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := openDaemonStore(t)
|
|
vmA := testVM("alpha-lock", "image-alpha", "172.16.0.31")
|
|
vmB := testVM("bravo-lock", "image-bravo", "172.16.0.32")
|
|
for _, vm := range []model.VMRecord{vmA, vmB} {
|
|
upsertDaemonVM(t, ctx, db, vm)
|
|
}
|
|
d := &Daemon{store: db}
|
|
wireServices(d)
|
|
|
|
started := make(chan string, 2)
|
|
release := make(chan struct{})
|
|
errCh := make(chan error, 2)
|
|
run := func(id string) {
|
|
_, err := d.vm.withVMLockByID(ctx, id, func(vm model.VMRecord) (model.VMRecord, error) {
|
|
started <- vm.ID
|
|
<-release
|
|
return vm, nil
|
|
})
|
|
errCh <- err
|
|
}
|
|
|
|
go run(vmA.ID)
|
|
go run(vmB.ID)
|
|
|
|
for i := 0; i < 2; i++ {
|
|
select {
|
|
case <-started:
|
|
case <-time.After(500 * time.Millisecond):
|
|
t.Fatal("different VM locks did not overlap")
|
|
}
|
|
}
|
|
|
|
close(release)
|
|
|
|
for i := 0; i < 2; i++ {
|
|
if err := <-errCh; err != nil {
|
|
t.Fatalf("withVMLockByID returned error: %v", err)
|
|
}
|
|
}
|
|
}
|
|
|
|
func openDaemonStore(t *testing.T) *store.Store {
|
|
t.Helper()
|
|
db, err := store.Open(filepath.Join(t.TempDir(), "state.db"))
|
|
if err != nil {
|
|
t.Fatalf("store.Open: %v", err)
|
|
}
|
|
t.Cleanup(func() {
|
|
_ = db.Close()
|
|
})
|
|
return db
|
|
}
|
|
|
|
func upsertDaemonVM(t *testing.T, ctx context.Context, db *store.Store, vm model.VMRecord) {
|
|
t.Helper()
|
|
image := testImage(vm.ImageID)
|
|
image.ID = vm.ImageID
|
|
image.Name = vm.ImageID
|
|
if err := db.UpsertImage(ctx, image); err != nil {
|
|
t.Fatalf("UpsertImage(%s): %v", image.Name, err)
|
|
}
|
|
if err := db.UpsertVM(ctx, vm); err != nil {
|
|
t.Fatalf("UpsertVM(%s): %v", vm.Name, err)
|
|
}
|
|
}
|
|
|
|
func testVM(name, imageID, guestIP string) model.VMRecord {
|
|
now := time.Date(2026, time.March, 16, 12, 0, 0, 0, time.UTC)
|
|
return model.VMRecord{
|
|
ID: name + "-id",
|
|
Name: name,
|
|
ImageID: imageID,
|
|
State: model.VMStateStopped,
|
|
CreatedAt: now,
|
|
UpdatedAt: now,
|
|
LastTouchedAt: now,
|
|
Spec: model.VMSpec{
|
|
VCPUCount: 2,
|
|
MemoryMiB: 1024,
|
|
SystemOverlaySizeByte: model.DefaultSystemOverlaySize,
|
|
WorkDiskSizeBytes: model.DefaultWorkDiskSize,
|
|
},
|
|
Runtime: model.VMRuntime{
|
|
State: model.VMStateStopped,
|
|
GuestIP: guestIP,
|
|
DNSName: name + ".vm",
|
|
VMDir: filepath.Join("/state", name),
|
|
SystemOverlay: filepath.Join("/state", name, "system.cow"),
|
|
WorkDiskPath: filepath.Join("/state", name, "root.ext4"),
|
|
},
|
|
}
|
|
}
|
|
|
|
func testImage(name string) model.Image {
|
|
now := time.Date(2026, time.March, 16, 12, 0, 0, 0, time.UTC)
|
|
return model.Image{
|
|
ID: name + "-id",
|
|
Name: name,
|
|
RootfsPath: filepath.Join("/images", name+".ext4"),
|
|
KernelPath: filepath.Join("/kernels", name),
|
|
CreatedAt: now,
|
|
UpdatedAt: now,
|
|
}
|
|
}
|
|
|
|
func TestMergeAuthorizedKey(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
managed := []byte("ssh-ed25519 AAAATESTKEY banger\n")
|
|
existing := []byte("ssh-ed25519 AAAAOTHER other\n")
|
|
merged := mergeAuthorizedKey(existing, managed)
|
|
got := string(merged)
|
|
if !strings.Contains(got, "ssh-ed25519 AAAAOTHER other") {
|
|
t.Fatalf("merged keys dropped existing entry: %q", got)
|
|
}
|
|
if !strings.Contains(got, "ssh-ed25519 AAAATESTKEY banger") {
|
|
t.Fatalf("merged keys missing managed entry: %q", got)
|
|
}
|
|
if strings.Count(got, "ssh-ed25519 AAAATESTKEY banger") != 1 {
|
|
t.Fatalf("managed key duplicated in %q", got)
|
|
}
|
|
|
|
merged = mergeAuthorizedKey(merged, managed)
|
|
if strings.Count(string(merged), "ssh-ed25519 AAAATESTKEY banger") != 1 {
|
|
t.Fatalf("managed key duplicated after second merge: %q", string(merged))
|
|
}
|
|
}
|
|
|
|
func startFakeFirecrackerProcess(t *testing.T, apiSock string) *exec.Cmd {
|
|
t.Helper()
|
|
|
|
cmd := exec.Command("bash", "-lc", fmt.Sprintf("exec -a %q sleep 30", "firecracker --api-sock "+apiSock))
|
|
if err := cmd.Start(); err != nil {
|
|
t.Fatalf("start fake firecracker: %v", err)
|
|
}
|
|
|
|
deadline := time.Now().Add(2 * time.Second)
|
|
for time.Now().Before(deadline) {
|
|
if cmd.Process != nil && cmd.Process.Pid > 0 && systemProcessRunning(cmd.Process.Pid, apiSock) {
|
|
return cmd
|
|
}
|
|
time.Sleep(20 * time.Millisecond)
|
|
}
|
|
_ = cmd.Process.Kill()
|
|
_ = cmd.Wait()
|
|
t.Fatalf("fake firecracker process never looked running for %s", apiSock)
|
|
return nil
|
|
}
|
|
|
|
func startFakeFirecrackerAPI(t *testing.T, apiSock string) {
|
|
t.Helper()
|
|
|
|
if err := os.MkdirAll(filepath.Dir(apiSock), 0o755); err != nil {
|
|
t.Fatalf("MkdirAll(%s): %v", filepath.Dir(apiSock), err)
|
|
}
|
|
listener, err := net.Listen("unix", apiSock)
|
|
if err != nil {
|
|
skipIfSocketRestricted(t, err)
|
|
t.Fatalf("listen unix %s: %v", apiSock, err)
|
|
}
|
|
mux := http.NewServeMux()
|
|
mux.HandleFunc("/actions", func(w http.ResponseWriter, r *http.Request) {
|
|
if r.Method != http.MethodPut {
|
|
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
|
return
|
|
}
|
|
w.WriteHeader(http.StatusNoContent)
|
|
})
|
|
server := &http.Server{Handler: mux}
|
|
go func() {
|
|
_ = server.Serve(listener)
|
|
}()
|
|
t.Cleanup(func() {
|
|
_ = server.Close()
|
|
_ = os.Remove(apiSock)
|
|
})
|
|
}
|
|
|
|
func skipIfSocketRestricted(t *testing.T, err error) {
|
|
t.Helper()
|
|
if err == nil {
|
|
return
|
|
}
|
|
if strings.Contains(strings.ToLower(err.Error()), "operation not permitted") {
|
|
t.Skipf("socket creation is restricted in this environment: %v", err)
|
|
}
|
|
}
|
|
|
|
func startHTTPServerOnTCP4(t *testing.T, handler http.Handler) *net.TCPAddr {
|
|
t.Helper()
|
|
listener, err := net.Listen("tcp4", "127.0.0.1:0")
|
|
if err != nil {
|
|
skipIfSocketRestricted(t, err)
|
|
t.Fatalf("listen http: %v", err)
|
|
}
|
|
server := &http.Server{Handler: handler}
|
|
go func() {
|
|
_ = server.Serve(listener)
|
|
}()
|
|
t.Cleanup(func() {
|
|
_ = server.Close()
|
|
})
|
|
return listener.Addr().(*net.TCPAddr)
|
|
}
|
|
|
|
func startHTTPSServerOnTCP4(t *testing.T, handler http.Handler) *net.TCPAddr {
|
|
t.Helper()
|
|
privateKey, err := rsa.GenerateKey(rand.Reader, 1024)
|
|
if err != nil {
|
|
t.Fatalf("GenerateKey: %v", err)
|
|
}
|
|
template := &x509.Certificate{
|
|
SerialNumber: big.NewInt(1),
|
|
NotBefore: time.Now().Add(-time.Hour),
|
|
NotAfter: time.Now().Add(time.Hour),
|
|
IPAddresses: []net.IP{net.ParseIP("127.0.0.1")},
|
|
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
|
|
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
|
}
|
|
der, err := x509.CreateCertificate(rand.Reader, template, template, &privateKey.PublicKey, privateKey)
|
|
if err != nil {
|
|
t.Fatalf("CreateCertificate: %v", err)
|
|
}
|
|
certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: der})
|
|
keyPEM := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)})
|
|
cert, err := tls.X509KeyPair(certPEM, keyPEM)
|
|
if err != nil {
|
|
t.Fatalf("X509KeyPair: %v", err)
|
|
}
|
|
listener, err := net.Listen("tcp4", "127.0.0.1:0")
|
|
if err != nil {
|
|
skipIfSocketRestricted(t, err)
|
|
t.Fatalf("listen https: %v", err)
|
|
}
|
|
server := &http.Server{Handler: handler}
|
|
go func() {
|
|
_ = server.Serve(tls.NewListener(listener, &tls.Config{Certificates: []tls.Certificate{cert}}))
|
|
}()
|
|
t.Cleanup(func() {
|
|
_ = server.Close()
|
|
})
|
|
return listener.Addr().(*net.TCPAddr)
|
|
}
|
|
|
|
func testSetGitConfig(t *testing.T, key, value string) {
|
|
t.Helper()
|
|
|
|
cmd := exec.Command("git", "config", "--global", key, value)
|
|
output, err := cmd.CombinedOutput()
|
|
if err != nil {
|
|
t.Fatalf("git config --global %s: %v: %s", key, err, strings.TrimSpace(string(output)))
|
|
}
|
|
}
|
|
|
|
func testGitConfigValue(t *testing.T, configPath, key string) string {
|
|
t.Helper()
|
|
|
|
cmd := exec.Command("git", "config", "--file", configPath, "--get", key)
|
|
output, err := cmd.CombinedOutput()
|
|
if err != nil {
|
|
t.Fatalf("git config --file %s --get %s: %v: %s", configPath, key, err, strings.TrimSpace(string(output)))
|
|
}
|
|
return strings.TrimSpace(string(output))
|
|
}
|
|
|
|
type processKillingRunner struct {
|
|
*scriptedRunner
|
|
proc *exec.Cmd
|
|
}
|
|
|
|
type filesystemRunner struct {
|
|
t *testing.T
|
|
}
|
|
|
|
func (r *filesystemRunner) Run(ctx context.Context, name string, args ...string) ([]byte, error) {
|
|
r.t.Helper()
|
|
if name == "git" {
|
|
cmd := exec.CommandContext(ctx, name, args...)
|
|
var stdout bytes.Buffer
|
|
var stderr bytes.Buffer
|
|
cmd.Stdout = &stdout
|
|
cmd.Stderr = &stderr
|
|
if err := cmd.Run(); err != nil {
|
|
if stderr.Len() > 0 {
|
|
return stdout.Bytes(), fmt.Errorf("%w: %s", err, strings.TrimSpace(stderr.String()))
|
|
}
|
|
return stdout.Bytes(), err
|
|
}
|
|
return stdout.Bytes(), nil
|
|
}
|
|
return nil, fmt.Errorf("unexpected Run call: %s %v", name, args)
|
|
}
|
|
|
|
func (r *filesystemRunner) RunSudo(ctx context.Context, args ...string) ([]byte, error) {
|
|
r.t.Helper()
|
|
if len(args) == 0 {
|
|
return nil, errors.New("missing sudo command")
|
|
}
|
|
switch args[0] {
|
|
case "mount":
|
|
if len(args) != 3 {
|
|
return nil, fmt.Errorf("unexpected mount args: %v", args)
|
|
}
|
|
source, mountDir := args[1], args[2]
|
|
if err := os.Remove(mountDir); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := os.Symlink(source, mountDir); err != nil {
|
|
return nil, err
|
|
}
|
|
return nil, nil
|
|
case "umount":
|
|
return nil, nil
|
|
case "chmod":
|
|
if len(args) != 3 {
|
|
return nil, fmt.Errorf("unexpected chmod args: %v", args)
|
|
}
|
|
mode, err := strconv.ParseUint(args[1], 8, 32)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return nil, os.Chmod(args[2], os.FileMode(mode))
|
|
case "cp":
|
|
if len(args) != 4 || args[1] != "-a" {
|
|
return nil, fmt.Errorf("unexpected cp args: %v", args)
|
|
}
|
|
return nil, copyIntoDir(args[2], args[3])
|
|
case "rm":
|
|
if len(args) != 3 || args[1] != "-rf" {
|
|
return nil, fmt.Errorf("unexpected rm args: %v", args)
|
|
}
|
|
return nil, os.RemoveAll(args[2])
|
|
case "mkdir":
|
|
if len(args) != 3 || args[1] != "-p" {
|
|
return nil, fmt.Errorf("unexpected mkdir args: %v", args)
|
|
}
|
|
return nil, os.MkdirAll(args[2], 0o755)
|
|
case "cat":
|
|
if len(args) != 2 {
|
|
return nil, fmt.Errorf("unexpected cat args: %v", args)
|
|
}
|
|
return os.ReadFile(args[1])
|
|
case "install":
|
|
// Minimal install(1): expected forms are
|
|
// install -m MODE SRC DST (5 args)
|
|
// install -o 0 -g 0 -m MODE SRC DST (9 args, ignored owners)
|
|
src, dst, mode, err := parseInstallArgs(args)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
data, err := os.ReadFile(src)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
if err := os.MkdirAll(filepath.Dir(dst), 0o755); err != nil {
|
|
return nil, err
|
|
}
|
|
return nil, os.WriteFile(dst, data, os.FileMode(mode))
|
|
case "chown":
|
|
// Recognised forms, both no-op under test (we run as the test
|
|
// user and os.Chown would need CAP_CHOWN):
|
|
// chown OWNER TARGET
|
|
// chown -R OWNER TARGET
|
|
switch {
|
|
case len(args) == 3:
|
|
return nil, nil
|
|
case len(args) == 4 && args[1] == "-R":
|
|
return nil, nil
|
|
default:
|
|
return nil, fmt.Errorf("unexpected chown args: %v", args)
|
|
}
|
|
default:
|
|
return nil, fmt.Errorf("unexpected sudo command: %v", args)
|
|
}
|
|
}
|
|
|
|
// parseInstallArgs recognises the `install` invocations banger emits
|
|
// and returns (source, destination, parsed mode). Anything else is an
|
|
// error so the test stub stays a closed set.
|
|
func parseInstallArgs(args []string) (string, string, os.FileMode, error) {
|
|
switch len(args) {
|
|
case 5:
|
|
if args[1] != "-m" {
|
|
return "", "", 0, fmt.Errorf("unexpected install args: %v", args)
|
|
}
|
|
mode, err := strconv.ParseUint(args[2], 8, 32)
|
|
if err != nil {
|
|
return "", "", 0, err
|
|
}
|
|
return args[3], args[4], os.FileMode(mode), nil
|
|
case 9:
|
|
if args[1] != "-o" || args[3] != "-g" || args[5] != "-m" {
|
|
return "", "", 0, fmt.Errorf("unexpected install args: %v", args)
|
|
}
|
|
mode, err := strconv.ParseUint(args[6], 8, 32)
|
|
if err != nil {
|
|
return "", "", 0, err
|
|
}
|
|
return args[7], args[8], os.FileMode(mode), nil
|
|
}
|
|
return "", "", 0, fmt.Errorf("unexpected install args: %v", args)
|
|
}
|
|
|
|
func copyIntoDir(sourcePath, targetDir string) error {
|
|
targetDir = strings.TrimSuffix(targetDir, "/")
|
|
info, err := os.Stat(sourcePath)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
destPath := filepath.Join(targetDir, filepath.Base(sourcePath))
|
|
if info.IsDir() {
|
|
if err := os.MkdirAll(destPath, info.Mode().Perm()); err != nil {
|
|
return err
|
|
}
|
|
entries, err := os.ReadDir(sourcePath)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
for _, entry := range entries {
|
|
if err := copyIntoDir(filepath.Join(sourcePath, entry.Name()), destPath); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
return os.Chmod(destPath, info.Mode().Perm())
|
|
}
|
|
data, err := os.ReadFile(sourcePath)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err := os.MkdirAll(filepath.Dir(destPath), 0o755); err != nil {
|
|
return err
|
|
}
|
|
return os.WriteFile(destPath, data, info.Mode().Perm())
|
|
}
|
|
|
|
func (r *processKillingRunner) Run(ctx context.Context, name string, args ...string) ([]byte, error) {
|
|
return r.scriptedRunner.Run(ctx, name, args...)
|
|
}
|
|
|
|
func (r *processKillingRunner) RunSudo(ctx context.Context, args ...string) ([]byte, error) {
|
|
out, err := r.scriptedRunner.RunSudo(ctx, args...)
|
|
if err != nil {
|
|
return out, err
|
|
}
|
|
if len(args) >= 3 && args[0] == "kill" && args[1] == "-KILL" && r.proc != nil && (r.proc.ProcessState == nil || !r.proc.ProcessState.Exited()) {
|
|
_ = r.proc.Process.Kill()
|
|
_ = r.proc.Wait()
|
|
}
|
|
return out, nil
|
|
}
|
|
|
|
func systemProcessRunning(pid int, apiSock string) bool {
|
|
data, err := os.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "cmdline"))
|
|
if err != nil {
|
|
return false
|
|
}
|
|
cmdline := strings.ReplaceAll(string(data), "\x00", " ")
|
|
return strings.Contains(cmdline, "firecracker") && strings.Contains(cmdline, apiSock)
|
|
}
|
|
|
|
func writeFakeExecutable(t *testing.T, path string) {
|
|
t.Helper()
|
|
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
|
|
t.Fatalf("MkdirAll(%s): %v", filepath.Dir(path), err)
|
|
}
|
|
if err := os.WriteFile(path, []byte("#!/bin/sh\nexit 0\n"), 0o755); err != nil {
|
|
t.Fatalf("WriteFile(%s): %v", path, err)
|
|
}
|
|
}
|
|
|
|
func ptr[T any](value T) *T {
|
|
return &value
|
|
}
|