Separates what a VM IS (durable intent + identity + deterministic
derived paths — `VMRuntime`) from what is CURRENTLY TRUE about it
(firecracker PID, tap device, loop devices, dm-snapshot target — new
`VMHandles`). The durable state lives in the SQLite `vms` row; the
transient state lives in an in-memory cache on the daemon plus a
per-VM `handles.json` scratch file inside VMDir, rebuilt at startup
from OS inspection. Nothing kernel-level rides the SQLite schema
anymore.
Why:
Persisting ephemeral process handles to SQLite forced reconcile to
treat "running with a stale PID" as a first-class case and mix it
with real state transitions. The schema described what we last
observed, not what the VM is. Every time the observation model
shifted (tap pool, DM naming, pgrep fallback) the reconcile logic
grew a new branch. Splitting lets each layer own what it's good at:
durable records describe intent, in-memory cache + scratch file
describe momentary reality.
Shape:
- `model.VMHandles` = PID, TapDevice, BaseLoop, COWLoop, DMName,
DMDev. Never in SQLite.
- `VMRuntime` keeps: State, GuestIP, APISockPath, VSockPath,
VSockCID, LogPath, MetricsPath, DNSName, VMDir, SystemOverlay,
WorkDiskPath, LastError. All durable or deterministic.
- `handleCache` on `*Daemon` — mutex-guarded map + scratch-file
plumbing (`writeHandlesFile` / `readHandlesFile` /
`rediscoverHandles`). See `internal/daemon/vm_handles.go`.
- `d.vmAlive(vm)` replaces the 20+ inline
`vm.State==Running && ProcessRunning(vm.Runtime.PID, apiSock)`
spreads. Single source of truth for liveness.
- Startup reconcile: per running VM, load the scratch file, pgrep
the api sock, either keep (cache seeded from scratch) or demote
to stopped (scratch handles passed to cleanupRuntime first so DM
/ loops / tap actually get torn down).
Verification:
- `go test ./...` green.
- Live: `banger vm run --name handles-test -- cat /etc/hostname`
starts; `handles.json` appears in VMDir with the expected PID,
tap, loops, DM.
- `kill -9 $(pgrep bangerd)` while the VM is running, re-invoke the
CLI, daemon auto-starts, reconcile recognises the VM as alive,
`banger vm ssh` still connects, `banger vm delete` cleans up.
Tests added:
- vm_handles_test.go: scratch-file roundtrip, missing/corrupt file
behaviour, cache concurrency, rediscoverHandles prefers pgrep
over scratch, returns scratch contents even when process is
dead (so cleanup can tear down kernel state).
- vm_test.go: reconcile test rewritten to exercise the new flow
(write scratch → reconcile reads it → verifies process is gone →
issues dmsetup/losetup teardown).
ARCHITECTURE.md updated; `handles` added to Daemon field docs.
615 lines
19 KiB
Go
615 lines
19 KiB
Go
package daemon
|
|
|
|
import (
|
|
"context"
|
|
"io"
|
|
"log/slog"
|
|
"os"
|
|
"path/filepath"
|
|
"strings"
|
|
"sync/atomic"
|
|
"testing"
|
|
"time"
|
|
|
|
"banger/internal/api"
|
|
"banger/internal/daemon/workspace"
|
|
"banger/internal/model"
|
|
)
|
|
|
|
// exportGuestClient is a scriptable fake for RunScriptOutput used in export tests.
|
|
// Each call to RunScriptOutput returns the next response from the queue.
|
|
type exportGuestClient struct {
|
|
responses []exportGuestResponse
|
|
scripts []string
|
|
callIndex int
|
|
}
|
|
|
|
type exportGuestResponse struct {
|
|
output []byte
|
|
err error
|
|
}
|
|
|
|
func (e *exportGuestClient) Close() error { return nil }
|
|
|
|
func (e *exportGuestClient) RunScript(_ context.Context, _ string, _ io.Writer) error {
|
|
return nil
|
|
}
|
|
|
|
func (e *exportGuestClient) RunScriptOutput(_ context.Context, script string) ([]byte, error) {
|
|
e.scripts = append(e.scripts, script)
|
|
if e.callIndex >= len(e.responses) {
|
|
return nil, nil
|
|
}
|
|
r := e.responses[e.callIndex]
|
|
e.callIndex++
|
|
return r.output, r.err
|
|
}
|
|
|
|
func (e *exportGuestClient) UploadFile(_ context.Context, _ string, _ os.FileMode, _ []byte, _ io.Writer) error {
|
|
return nil
|
|
}
|
|
|
|
func (e *exportGuestClient) StreamTar(_ context.Context, _ string, _ string, _ io.Writer) error {
|
|
return nil
|
|
}
|
|
|
|
func (e *exportGuestClient) StreamTarEntries(_ context.Context, _ string, _ []string, _ string, _ io.Writer) error {
|
|
return nil
|
|
}
|
|
|
|
func newExportTestDaemonStore(t *testing.T, fake *exportGuestClient) *Daemon {
|
|
t.Helper()
|
|
db := openDaemonStore(t)
|
|
d := &Daemon{
|
|
store: db,
|
|
config: model.DaemonConfig{SSHKeyPath: filepath.Join(t.TempDir(), "id_ed25519")},
|
|
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
|
|
}
|
|
d.guestDial = func(_ context.Context, _ string, _ string) (guestSSHClient, error) {
|
|
return fake, nil
|
|
}
|
|
return d
|
|
}
|
|
|
|
func TestExportVMWorkspace_HappyPath(t *testing.T) {
|
|
t.Parallel()
|
|
ctx := context.Background()
|
|
|
|
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
|
firecracker := startFakeFirecracker(t, apiSock)
|
|
|
|
vm := testVM("exportbox", "image-export", "172.16.0.100")
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.Runtime.APISockPath = apiSock
|
|
|
|
patch := []byte("diff --git a/file.go b/file.go\nindex 0000000..1111111 100644\n")
|
|
names := []byte("file.go\n")
|
|
|
|
fake := &exportGuestClient{
|
|
responses: []exportGuestResponse{
|
|
{output: patch},
|
|
{output: names},
|
|
},
|
|
}
|
|
d := newExportTestDaemonStore(t, fake)
|
|
upsertDaemonVM(t, ctx, d.store, vm)
|
|
d.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
|
|
|
result, err := d.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
|
IDOrName: vm.Name,
|
|
GuestPath: "/root/repo",
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("ExportVMWorkspace: %v", err)
|
|
}
|
|
if !result.HasChanges {
|
|
t.Fatal("HasChanges = false, want true")
|
|
}
|
|
if string(result.Patch) != string(patch) {
|
|
t.Fatalf("Patch = %q, want %q", result.Patch, patch)
|
|
}
|
|
if result.GuestPath != "/root/repo" {
|
|
t.Fatalf("GuestPath = %q, want /root/repo", result.GuestPath)
|
|
}
|
|
if len(result.ChangedFiles) != 1 || result.ChangedFiles[0] != "file.go" {
|
|
t.Fatalf("ChangedFiles = %v, want [file.go]", result.ChangedFiles)
|
|
}
|
|
if fake.callIndex != 2 {
|
|
t.Fatalf("RunScriptOutput call count = %d, want 2", fake.callIndex)
|
|
}
|
|
// No base_commit provided: diff ref must be HEAD.
|
|
for _, script := range fake.scripts {
|
|
if !strings.Contains(script, "HEAD") {
|
|
t.Fatalf("script missing HEAD ref: %q", script)
|
|
}
|
|
}
|
|
if result.BaseCommit != "HEAD" {
|
|
t.Fatalf("BaseCommit = %q, want HEAD", result.BaseCommit)
|
|
}
|
|
}
|
|
|
|
func TestExportVMWorkspace_WithBaseCommit(t *testing.T) {
|
|
t.Parallel()
|
|
ctx := context.Background()
|
|
|
|
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
|
firecracker := startFakeFirecracker(t, apiSock)
|
|
|
|
vm := testVM("exportbox-base", "image-export", "172.16.0.105")
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.Runtime.APISockPath = apiSock
|
|
|
|
// Simulate: worker committed inside the VM. Without base_commit the diff
|
|
// against the new HEAD would be empty. With base_commit we capture
|
|
// everything since the original checkout.
|
|
patch := []byte("diff --git a/worker.go b/worker.go\nindex 0000000..abcdef 100644\n")
|
|
names := []byte("worker.go\n")
|
|
|
|
fake := &exportGuestClient{
|
|
responses: []exportGuestResponse{
|
|
{output: patch},
|
|
{output: names},
|
|
},
|
|
}
|
|
d := newExportTestDaemonStore(t, fake)
|
|
upsertDaemonVM(t, ctx, d.store, vm)
|
|
d.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
|
|
|
const prepareCommit = "abc1234deadbeef"
|
|
result, err := d.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
|
IDOrName: vm.Name,
|
|
BaseCommit: prepareCommit,
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("ExportVMWorkspace: %v", err)
|
|
}
|
|
if !result.HasChanges {
|
|
t.Fatal("HasChanges = false, want true")
|
|
}
|
|
if result.BaseCommit != prepareCommit {
|
|
t.Fatalf("BaseCommit = %q, want %q", result.BaseCommit, prepareCommit)
|
|
}
|
|
// Both scripts must reference the caller-supplied commit, not HEAD.
|
|
for _, script := range fake.scripts {
|
|
if strings.Contains(script, " HEAD") {
|
|
t.Fatalf("script used HEAD instead of base_commit: %q", script)
|
|
}
|
|
if !strings.Contains(script, prepareCommit) {
|
|
t.Fatalf("script missing base_commit %q: %q", prepareCommit, script)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestExportVMWorkspace_BaseCommitFallsBackToHEAD(t *testing.T) {
|
|
t.Parallel()
|
|
ctx := context.Background()
|
|
|
|
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
|
firecracker := startFakeFirecracker(t, apiSock)
|
|
|
|
vm := testVM("exportbox-nobase", "image-export", "172.16.0.106")
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.Runtime.APISockPath = apiSock
|
|
|
|
fake := &exportGuestClient{
|
|
responses: []exportGuestResponse{
|
|
{output: nil},
|
|
{output: nil},
|
|
},
|
|
}
|
|
d := newExportTestDaemonStore(t, fake)
|
|
upsertDaemonVM(t, ctx, d.store, vm)
|
|
d.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
|
|
|
result, err := d.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
|
IDOrName: vm.Name,
|
|
BaseCommit: "", // omitted
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("ExportVMWorkspace: %v", err)
|
|
}
|
|
if result.BaseCommit != "HEAD" {
|
|
t.Fatalf("BaseCommit = %q, want HEAD when not supplied", result.BaseCommit)
|
|
}
|
|
for _, script := range fake.scripts {
|
|
if !strings.Contains(script, "HEAD") {
|
|
t.Fatalf("script missing HEAD fallback: %q", script)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestExportVMWorkspace_NoChanges(t *testing.T) {
|
|
t.Parallel()
|
|
ctx := context.Background()
|
|
|
|
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
|
firecracker := startFakeFirecracker(t, apiSock)
|
|
|
|
vm := testVM("exportbox-empty", "image-export", "172.16.0.101")
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.Runtime.APISockPath = apiSock
|
|
|
|
// Both scripts return empty output (no changes).
|
|
fake := &exportGuestClient{
|
|
responses: []exportGuestResponse{
|
|
{output: nil},
|
|
{output: nil},
|
|
},
|
|
}
|
|
d := newExportTestDaemonStore(t, fake)
|
|
upsertDaemonVM(t, ctx, d.store, vm)
|
|
d.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
|
|
|
result, err := d.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
|
IDOrName: vm.Name,
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("ExportVMWorkspace: %v", err)
|
|
}
|
|
if result.HasChanges {
|
|
t.Fatal("HasChanges = true, want false")
|
|
}
|
|
if len(result.Patch) != 0 {
|
|
t.Fatalf("Patch = %q, want empty", result.Patch)
|
|
}
|
|
if len(result.ChangedFiles) != 0 {
|
|
t.Fatalf("ChangedFiles = %v, want empty", result.ChangedFiles)
|
|
}
|
|
}
|
|
|
|
func TestExportVMWorkspace_DefaultGuestPath(t *testing.T) {
|
|
t.Parallel()
|
|
ctx := context.Background()
|
|
|
|
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
|
firecracker := startFakeFirecracker(t, apiSock)
|
|
|
|
vm := testVM("exportbox-default", "image-export", "172.16.0.102")
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.Runtime.APISockPath = apiSock
|
|
|
|
fake := &exportGuestClient{
|
|
responses: []exportGuestResponse{
|
|
{output: nil},
|
|
{output: nil},
|
|
},
|
|
}
|
|
d := newExportTestDaemonStore(t, fake)
|
|
upsertDaemonVM(t, ctx, d.store, vm)
|
|
d.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
|
|
|
// GuestPath omitted — should default to /root/repo.
|
|
result, err := d.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
|
IDOrName: vm.Name,
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("ExportVMWorkspace: %v", err)
|
|
}
|
|
if result.GuestPath != "/root/repo" {
|
|
t.Fatalf("GuestPath = %q, want /root/repo", result.GuestPath)
|
|
}
|
|
}
|
|
|
|
func TestExportVMWorkspace_VMNotRunning(t *testing.T) {
|
|
t.Parallel()
|
|
ctx := context.Background()
|
|
|
|
vm := testVM("exportbox-stopped", "image-export", "172.16.0.103")
|
|
vm.State = model.VMStateStopped
|
|
|
|
fake := &exportGuestClient{}
|
|
d := newExportTestDaemonStore(t, fake)
|
|
upsertDaemonVM(t, ctx, d.store, vm)
|
|
// VM is stopped — no handle seed; vmAlive must return false.
|
|
|
|
_, err := d.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
|
IDOrName: vm.Name,
|
|
})
|
|
if err == nil || !strings.Contains(err.Error(), "not running") {
|
|
t.Fatalf("error = %v, want 'not running' error", err)
|
|
}
|
|
if fake.callIndex != 0 {
|
|
t.Fatal("RunScriptOutput should not be called when VM is not running")
|
|
}
|
|
}
|
|
|
|
func TestExportVMWorkspace_MultipleChangedFiles(t *testing.T) {
|
|
t.Parallel()
|
|
ctx := context.Background()
|
|
|
|
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
|
firecracker := startFakeFirecracker(t, apiSock)
|
|
|
|
vm := testVM("exportbox-multi", "image-export", "172.16.0.104")
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.Runtime.APISockPath = apiSock
|
|
|
|
patch := []byte("diff --git a/a.go b/a.go\n--- a/a.go\n+++ b/a.go\n")
|
|
names := []byte("a.go\nb.go\nnew/file.go\n")
|
|
|
|
fake := &exportGuestClient{
|
|
responses: []exportGuestResponse{
|
|
{output: patch},
|
|
{output: names},
|
|
},
|
|
}
|
|
d := newExportTestDaemonStore(t, fake)
|
|
upsertDaemonVM(t, ctx, d.store, vm)
|
|
d.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
|
|
|
result, err := d.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
|
IDOrName: vm.Name,
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("ExportVMWorkspace: %v", err)
|
|
}
|
|
if len(result.ChangedFiles) != 3 {
|
|
t.Fatalf("ChangedFiles = %v, want 3 entries", result.ChangedFiles)
|
|
}
|
|
want := []string{"a.go", "b.go", "new/file.go"}
|
|
for i, f := range want {
|
|
if result.ChangedFiles[i] != f {
|
|
t.Fatalf("ChangedFiles[%d] = %q, want %q", i, result.ChangedFiles[i], f)
|
|
}
|
|
}
|
|
}
|
|
|
|
// TestPrepareVMWorkspace_ReleasesVMLockDuringGuestIO is a regression
|
|
// guard for an earlier design that held the per-VM mutex across SSH
|
|
// dial, tar streaming, and remote chmod. A long import could then
|
|
// block unrelated lifecycle ops (vm stop / delete / restart) on the
|
|
// same VM until it completed. The fix switched to a dedicated
|
|
// workspaceLocks set for I/O, with vmLocks held only for the brief
|
|
// state-validation phase. This test kicks off a prepare that blocks
|
|
// inside the import step and then asserts the VM mutex is acquirable
|
|
// while the prepare is mid-flight.
|
|
func TestPrepareVMWorkspace_ReleasesVMLockDuringGuestIO(t *testing.T) {
|
|
// Not parallel: mutates package-level workspaceInspectRepoFunc /
|
|
// workspaceImportFunc seams, which the other prepare-concurrency
|
|
// test also swaps.
|
|
ctx := context.Background()
|
|
|
|
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
|
firecracker := startFakeFirecracker(t, apiSock)
|
|
|
|
vm := testVM("lockbox", "image-x", "172.16.0.210")
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.Runtime.APISockPath = apiSock
|
|
|
|
d := &Daemon{
|
|
store: openDaemonStore(t),
|
|
config: model.DaemonConfig{SSHKeyPath: filepath.Join(t.TempDir(), "id_ed25519")},
|
|
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
|
|
}
|
|
d.guestWaitForSSH = func(_ context.Context, _, _ string, _ time.Duration) error { return nil }
|
|
d.guestDial = func(_ context.Context, _, _ string) (guestSSHClient, error) {
|
|
return &exportGuestClient{}, nil
|
|
}
|
|
upsertDaemonVM(t, ctx, d.store, vm)
|
|
d.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
|
|
|
// Replace the seams. InspectRepo returns a trivial spec so the
|
|
// real filesystem isn't touched; Import blocks until we say go.
|
|
origInspect := workspaceInspectRepoFunc
|
|
origImport := workspaceImportFunc
|
|
t.Cleanup(func() {
|
|
workspaceInspectRepoFunc = origInspect
|
|
workspaceImportFunc = origImport
|
|
})
|
|
|
|
importStarted := make(chan struct{})
|
|
releaseImport := make(chan struct{})
|
|
workspaceInspectRepoFunc = func(context.Context, string, string, string) (workspace.RepoSpec, error) {
|
|
return workspace.RepoSpec{RepoName: "fake", RepoRoot: "/tmp/fake"}, nil
|
|
}
|
|
workspaceImportFunc = func(context.Context, workspace.GuestClient, workspace.RepoSpec, string, model.WorkspacePrepareMode) error {
|
|
close(importStarted)
|
|
<-releaseImport
|
|
return nil
|
|
}
|
|
|
|
// Kick off prepare in a goroutine. It will block inside the import.
|
|
prepareDone := make(chan error, 1)
|
|
go func() {
|
|
_, err := d.PrepareVMWorkspace(ctx, api.VMWorkspacePrepareParams{
|
|
IDOrName: vm.Name,
|
|
SourcePath: "/tmp/fake",
|
|
})
|
|
prepareDone <- err
|
|
}()
|
|
|
|
// Wait for prepare to reach the guest-I/O phase (past the VM
|
|
// mutex) before testing the assertion.
|
|
select {
|
|
case <-importStarted:
|
|
case <-time.After(2 * time.Second):
|
|
t.Fatal("import never started; prepare blocked before reaching guest I/O")
|
|
}
|
|
|
|
// With the fix in place, the VM mutex is free even though the
|
|
// import is in flight. Acquiring it must not wait.
|
|
acquired := make(chan struct{})
|
|
go func() {
|
|
unlock := d.lockVMID(vm.ID)
|
|
close(acquired)
|
|
unlock()
|
|
}()
|
|
select {
|
|
case <-acquired:
|
|
case <-time.After(500 * time.Millisecond):
|
|
close(releaseImport) // unblock the goroutine so the test can exit
|
|
<-prepareDone
|
|
t.Fatal("VM mutex held during guest I/O — lifecycle ops would block behind workspace prepare")
|
|
}
|
|
|
|
// Now let the import finish and make sure prepare returns.
|
|
close(releaseImport)
|
|
select {
|
|
case err := <-prepareDone:
|
|
if err != nil {
|
|
t.Fatalf("prepare returned error: %v", err)
|
|
}
|
|
case <-time.After(2 * time.Second):
|
|
t.Fatal("prepare did not return after import unblocked")
|
|
}
|
|
}
|
|
|
|
// TestPrepareVMWorkspace_SerialisesConcurrentPreparesOnSameVM asserts
|
|
// the workspaceLocks scope: two concurrent prepares on the same VM do
|
|
// NOT interleave, even though they no longer take the core VM mutex.
|
|
func TestPrepareVMWorkspace_SerialisesConcurrentPreparesOnSameVM(t *testing.T) {
|
|
// Not parallel: see note on ReleasesVMLockDuringGuestIO.
|
|
ctx := context.Background()
|
|
|
|
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
|
firecracker := startFakeFirecracker(t, apiSock)
|
|
|
|
vm := testVM("serialbox", "image-x", "172.16.0.211")
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.Runtime.APISockPath = apiSock
|
|
|
|
d := &Daemon{
|
|
store: openDaemonStore(t),
|
|
config: model.DaemonConfig{SSHKeyPath: filepath.Join(t.TempDir(), "id_ed25519")},
|
|
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
|
|
}
|
|
d.guestWaitForSSH = func(_ context.Context, _, _ string, _ time.Duration) error { return nil }
|
|
d.guestDial = func(_ context.Context, _, _ string) (guestSSHClient, error) {
|
|
return &exportGuestClient{}, nil
|
|
}
|
|
upsertDaemonVM(t, ctx, d.store, vm)
|
|
d.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
|
|
|
origInspect := workspaceInspectRepoFunc
|
|
origImport := workspaceImportFunc
|
|
t.Cleanup(func() {
|
|
workspaceInspectRepoFunc = origInspect
|
|
workspaceImportFunc = origImport
|
|
})
|
|
|
|
workspaceInspectRepoFunc = func(context.Context, string, string, string) (workspace.RepoSpec, error) {
|
|
return workspace.RepoSpec{RepoName: "fake", RepoRoot: "/tmp/fake"}, nil
|
|
}
|
|
|
|
// Counter of simultaneous Import calls. Should never exceed 1.
|
|
var active int32
|
|
var maxObserved int32
|
|
release := make(chan struct{})
|
|
workspaceImportFunc = func(context.Context, workspace.GuestClient, workspace.RepoSpec, string, model.WorkspacePrepareMode) error {
|
|
n := atomic.AddInt32(&active, 1)
|
|
for {
|
|
prev := atomic.LoadInt32(&maxObserved)
|
|
if n <= prev || atomic.CompareAndSwapInt32(&maxObserved, prev, n) {
|
|
break
|
|
}
|
|
}
|
|
<-release
|
|
atomic.AddInt32(&active, -1)
|
|
return nil
|
|
}
|
|
|
|
const n = 3
|
|
done := make(chan error, n)
|
|
for i := 0; i < n; i++ {
|
|
go func() {
|
|
_, err := d.PrepareVMWorkspace(ctx, api.VMWorkspacePrepareParams{
|
|
IDOrName: vm.Name,
|
|
SourcePath: "/tmp/fake",
|
|
})
|
|
done <- err
|
|
}()
|
|
}
|
|
|
|
// Give goroutines a moment to queue up.
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
if got := atomic.LoadInt32(&active); got != 1 {
|
|
close(release) // unblock to avoid hang
|
|
for i := 0; i < n; i++ {
|
|
<-done
|
|
}
|
|
t.Fatalf("%d concurrent imports, want exactly 1 (workspace lock should serialise)", got)
|
|
}
|
|
|
|
// Drain: release imports one at a time.
|
|
for i := 0; i < n; i++ {
|
|
release <- struct{}{}
|
|
}
|
|
close(release)
|
|
for i := 0; i < n; i++ {
|
|
if err := <-done; err != nil {
|
|
t.Errorf("prepare #%d error: %v", i, err)
|
|
}
|
|
}
|
|
if got := atomic.LoadInt32(&maxObserved); got != 1 {
|
|
t.Fatalf("peak concurrent imports = %d, want 1", got)
|
|
}
|
|
}
|
|
|
|
// TestExportVMWorkspace_DoesNotMutateRealIndex is a regression guard
|
|
// for an earlier design where `git add -A` ran against the guest's
|
|
// real `.git/index`, leaving staged changes behind after what the user
|
|
// thought was a read-only observation. Every export script must now
|
|
// route `git add -A` through a throwaway index selected by
|
|
// GIT_INDEX_FILE, and every script must clean that file up.
|
|
func TestExportVMWorkspace_DoesNotMutateRealIndex(t *testing.T) {
|
|
t.Parallel()
|
|
ctx := context.Background()
|
|
|
|
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
|
firecracker := startFakeFirecracker(t, apiSock)
|
|
|
|
vm := testVM("exportbox-readonly", "image-export", "172.16.0.107")
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.Runtime.APISockPath = apiSock
|
|
|
|
fake := &exportGuestClient{
|
|
responses: []exportGuestResponse{
|
|
{output: []byte("diff --git a/x b/x\n")},
|
|
{output: []byte("x\n")},
|
|
},
|
|
}
|
|
d := newExportTestDaemonStore(t, fake)
|
|
upsertDaemonVM(t, ctx, d.store, vm)
|
|
d.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
|
|
|
if _, err := d.ExportVMWorkspace(ctx, api.WorkspaceExportParams{IDOrName: vm.Name}); err != nil {
|
|
t.Fatalf("ExportVMWorkspace: %v", err)
|
|
}
|
|
|
|
if len(fake.scripts) == 0 {
|
|
t.Fatal("expected at least one export script to be sent")
|
|
}
|
|
for i, script := range fake.scripts {
|
|
if !strings.Contains(script, "GIT_INDEX_FILE") {
|
|
t.Errorf("script[%d] missing GIT_INDEX_FILE routing:\n%s", i, script)
|
|
}
|
|
// git add -A must ONLY appear on a line that also sets
|
|
// GIT_INDEX_FILE. A bare occurrence would mutate the real
|
|
// index.
|
|
for _, line := range strings.Split(script, "\n") {
|
|
if strings.Contains(line, "git add -A") && !strings.Contains(line, "GIT_INDEX_FILE") {
|
|
t.Errorf("script[%d] has unscoped `git add -A`:\n%s", i, script)
|
|
break
|
|
}
|
|
}
|
|
if !strings.Contains(script, "git read-tree") {
|
|
t.Errorf("script[%d] missing git read-tree (temp index seed):\n%s", i, script)
|
|
}
|
|
if !strings.Contains(script, "mktemp") {
|
|
t.Errorf("script[%d] missing mktemp for temp index:\n%s", i, script)
|
|
}
|
|
if !strings.Contains(script, "trap") || !strings.Contains(script, "rm") {
|
|
t.Errorf("script[%d] missing temp-index cleanup trap:\n%s", i, script)
|
|
}
|
|
}
|
|
}
|