banger hasn't shipped a public release — every "legacy", "pre-opt-in",
"previously", "migration note", "no longer" reference in the tree is
pinning against a state no real user's install has ever been in.
That scaffolding has weight: it's a coordinate system future readers
have to decode, and it keeps dead code alive.
Removed (code):
- internal/daemon/ssh_client_config.go
- vmSSHConfigIncludeBegin / vmSSHConfigIncludeEnd constants and
every `removeManagedBlock(existing, vm...)` call they enabled
(legacy inline `Host *.vm` block scrub)
- cleanupLegacySSHConfigDir (+ its caller in syncVMSSHClientConfig)
— wiped a pre-opt-in sibling file under $ConfigDir/ssh
- sameDirOrParent + resolvePathForComparison — only ever used
by cleanupLegacySSHConfigDir
- the "also check legacy marker" fallback in
UserSSHIncludeInstalled / UninstallUserSSHInclude
- internal/store/migrations.go
- migrateDropDeadImageColumns (migration 2) + its slice entry
- dropColumnIfExists (orphaned after the above)
- addColumnIfMissing + the whole "columns added across the pre-
versioning lifetime" block at the end of migrateBaseline —
subsumed into the baseline CREATE TABLE
- `packages_path TEXT` column on the images table (the
throwaway migration 2 dropped it, but there was never any
reader)
- internal/daemon/vm.go
- vmDNSRecordName local wrapper — was justified as "avoid
pulling vmdns into every file"; three of four callers already
imported vmdns directly, so inline the one stray call
- internal/cli/cli_test.go
- TestLegacyRemovedCommandIsRejected (`tui` subcommand never
shipped)
Removed / simplified (tests):
- ssh_client_config_test.go: dropped TestSameDirOrParentHandlesSymlinks,
TestSyncVMSSHClientConfigPreservesUserKeyInLegacyDir,
TestSyncVMSSHClientConfigNarrowsCleanupToLegacyFile,
TestSyncVMSSHClientConfigLeavesUnexpectedLegacyContents,
TestInstallUserSSHIncludeMigratesLegacyInlineBlock, plus the
"legacy posture" regression strings in the remaining happy-path
test; TestUninstallUserSSHIncludeRemovesBothMarkerBlocks collapsed
to a single-block test
- migrations_test.go: dropped TestMigrateDropDeadImageColumns_AcrossInstallPaths,
TestDropColumnIfExistsIsIdempotent; TestOpenReadOnlyDoesNotRunMigrations
simplified to test against the baseline marker
Removed (docs):
- README.md "**Migration note.**" blockquote about the SSH-key path move
- docs/advanced.md parenthetical "(the old behaviour)"
Reworded (comments):
- Dropped "Previously this file also contained LogLevel DEBUG3..."
history from vm_disk.go's sshdGuestConfig doc
- Dropped "Call sites that previously read vm.Runtime.{PID,...}"
from vm_handles.go; now documents the current contract
- Dropped "Pre-v0.1 the defaults are" scaffolding in doctor_test.go
- Dropped "no longer does its own git inspection" phrasing in vm_run.go
- Dropped the "(also cleans up legacy inline block from pre-opt-in
builds)" aside on the `ssh-config` CLI docstring
- Renamed test var `legacyKey` → `existingKey` in vm_test.go; its
purpose was "pre-existing authorized_keys line," not banger-legacy
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
604 lines
18 KiB
Go
604 lines
18 KiB
Go
package daemon
|
|
|
|
import (
|
|
"context"
|
|
"io"
|
|
"log/slog"
|
|
"os"
|
|
"path/filepath"
|
|
"strings"
|
|
"sync/atomic"
|
|
"testing"
|
|
"time"
|
|
|
|
"banger/internal/api"
|
|
"banger/internal/daemon/workspace"
|
|
"banger/internal/model"
|
|
)
|
|
|
|
// exportGuestClient is a scriptable fake for RunScriptOutput used in export tests.
|
|
// Each call to RunScriptOutput returns the next response from the queue.
|
|
type exportGuestClient struct {
|
|
responses []exportGuestResponse
|
|
scripts []string
|
|
callIndex int
|
|
}
|
|
|
|
type exportGuestResponse struct {
|
|
output []byte
|
|
err error
|
|
}
|
|
|
|
func (e *exportGuestClient) Close() error { return nil }
|
|
|
|
func (e *exportGuestClient) RunScript(_ context.Context, _ string, _ io.Writer) error {
|
|
return nil
|
|
}
|
|
|
|
func (e *exportGuestClient) RunScriptOutput(_ context.Context, script string) ([]byte, error) {
|
|
e.scripts = append(e.scripts, script)
|
|
if e.callIndex >= len(e.responses) {
|
|
return nil, nil
|
|
}
|
|
r := e.responses[e.callIndex]
|
|
e.callIndex++
|
|
return r.output, r.err
|
|
}
|
|
|
|
func (e *exportGuestClient) UploadFile(_ context.Context, _ string, _ os.FileMode, _ []byte, _ io.Writer) error {
|
|
return nil
|
|
}
|
|
|
|
func (e *exportGuestClient) StreamTar(_ context.Context, _ string, _ string, _ io.Writer) error {
|
|
return nil
|
|
}
|
|
|
|
func (e *exportGuestClient) StreamTarEntries(_ context.Context, _ string, _ []string, _ string, _ io.Writer) error {
|
|
return nil
|
|
}
|
|
|
|
func newExportTestDaemonStore(t *testing.T, fake *exportGuestClient) *Daemon {
|
|
t.Helper()
|
|
db := openDaemonStore(t)
|
|
d := &Daemon{
|
|
store: db,
|
|
config: model.DaemonConfig{SSHKeyPath: filepath.Join(t.TempDir(), "id_ed25519")},
|
|
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
|
|
}
|
|
wireServices(d)
|
|
d.guestDial = func(_ context.Context, _ string, _ string) (guestSSHClient, error) {
|
|
return fake, nil
|
|
}
|
|
return d
|
|
}
|
|
|
|
func TestExportVMWorkspace_HappyPath(t *testing.T) {
|
|
t.Parallel()
|
|
ctx := context.Background()
|
|
|
|
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
|
firecracker := startFakeFirecracker(t, apiSock)
|
|
|
|
vm := testVM("exportbox", "image-export", "172.16.0.100")
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.Runtime.APISockPath = apiSock
|
|
|
|
patch := []byte("diff --git a/file.go b/file.go\nindex 0000000..1111111 100644\n")
|
|
names := []byte("file.go\n")
|
|
|
|
fake := &exportGuestClient{
|
|
responses: []exportGuestResponse{
|
|
{output: patch},
|
|
{output: names},
|
|
},
|
|
}
|
|
d := newExportTestDaemonStore(t, fake)
|
|
upsertDaemonVM(t, ctx, d.store, vm)
|
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
|
|
|
result, err := d.ws.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
|
IDOrName: vm.Name,
|
|
GuestPath: "/root/repo",
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("ExportVMWorkspace: %v", err)
|
|
}
|
|
if !result.HasChanges {
|
|
t.Fatal("HasChanges = false, want true")
|
|
}
|
|
if string(result.Patch) != string(patch) {
|
|
t.Fatalf("Patch = %q, want %q", result.Patch, patch)
|
|
}
|
|
if result.GuestPath != "/root/repo" {
|
|
t.Fatalf("GuestPath = %q, want /root/repo", result.GuestPath)
|
|
}
|
|
if len(result.ChangedFiles) != 1 || result.ChangedFiles[0] != "file.go" {
|
|
t.Fatalf("ChangedFiles = %v, want [file.go]", result.ChangedFiles)
|
|
}
|
|
if fake.callIndex != 2 {
|
|
t.Fatalf("RunScriptOutput call count = %d, want 2", fake.callIndex)
|
|
}
|
|
// No base_commit provided: diff ref must be HEAD.
|
|
for _, script := range fake.scripts {
|
|
if !strings.Contains(script, "HEAD") {
|
|
t.Fatalf("script missing HEAD ref: %q", script)
|
|
}
|
|
}
|
|
if result.BaseCommit != "HEAD" {
|
|
t.Fatalf("BaseCommit = %q, want HEAD", result.BaseCommit)
|
|
}
|
|
}
|
|
|
|
func TestExportVMWorkspace_WithBaseCommit(t *testing.T) {
|
|
t.Parallel()
|
|
ctx := context.Background()
|
|
|
|
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
|
firecracker := startFakeFirecracker(t, apiSock)
|
|
|
|
vm := testVM("exportbox-base", "image-export", "172.16.0.105")
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.Runtime.APISockPath = apiSock
|
|
|
|
// Simulate: worker committed inside the VM. Without base_commit the diff
|
|
// against the new HEAD would be empty. With base_commit we capture
|
|
// everything since the original checkout.
|
|
patch := []byte("diff --git a/worker.go b/worker.go\nindex 0000000..abcdef 100644\n")
|
|
names := []byte("worker.go\n")
|
|
|
|
fake := &exportGuestClient{
|
|
responses: []exportGuestResponse{
|
|
{output: patch},
|
|
{output: names},
|
|
},
|
|
}
|
|
d := newExportTestDaemonStore(t, fake)
|
|
upsertDaemonVM(t, ctx, d.store, vm)
|
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
|
|
|
const prepareCommit = "abc1234deadbeef"
|
|
result, err := d.ws.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
|
IDOrName: vm.Name,
|
|
BaseCommit: prepareCommit,
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("ExportVMWorkspace: %v", err)
|
|
}
|
|
if !result.HasChanges {
|
|
t.Fatal("HasChanges = false, want true")
|
|
}
|
|
if result.BaseCommit != prepareCommit {
|
|
t.Fatalf("BaseCommit = %q, want %q", result.BaseCommit, prepareCommit)
|
|
}
|
|
// Both scripts must reference the caller-supplied commit, not HEAD.
|
|
for _, script := range fake.scripts {
|
|
if strings.Contains(script, " HEAD") {
|
|
t.Fatalf("script used HEAD instead of base_commit: %q", script)
|
|
}
|
|
if !strings.Contains(script, prepareCommit) {
|
|
t.Fatalf("script missing base_commit %q: %q", prepareCommit, script)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestExportVMWorkspace_BaseCommitFallsBackToHEAD(t *testing.T) {
|
|
t.Parallel()
|
|
ctx := context.Background()
|
|
|
|
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
|
firecracker := startFakeFirecracker(t, apiSock)
|
|
|
|
vm := testVM("exportbox-nobase", "image-export", "172.16.0.106")
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.Runtime.APISockPath = apiSock
|
|
|
|
fake := &exportGuestClient{
|
|
responses: []exportGuestResponse{
|
|
{output: nil},
|
|
{output: nil},
|
|
},
|
|
}
|
|
d := newExportTestDaemonStore(t, fake)
|
|
upsertDaemonVM(t, ctx, d.store, vm)
|
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
|
|
|
result, err := d.ws.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
|
IDOrName: vm.Name,
|
|
BaseCommit: "", // omitted
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("ExportVMWorkspace: %v", err)
|
|
}
|
|
if result.BaseCommit != "HEAD" {
|
|
t.Fatalf("BaseCommit = %q, want HEAD when not supplied", result.BaseCommit)
|
|
}
|
|
for _, script := range fake.scripts {
|
|
if !strings.Contains(script, "HEAD") {
|
|
t.Fatalf("script missing HEAD fallback: %q", script)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestExportVMWorkspace_NoChanges(t *testing.T) {
|
|
t.Parallel()
|
|
ctx := context.Background()
|
|
|
|
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
|
firecracker := startFakeFirecracker(t, apiSock)
|
|
|
|
vm := testVM("exportbox-empty", "image-export", "172.16.0.101")
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.Runtime.APISockPath = apiSock
|
|
|
|
// Both scripts return empty output (no changes).
|
|
fake := &exportGuestClient{
|
|
responses: []exportGuestResponse{
|
|
{output: nil},
|
|
{output: nil},
|
|
},
|
|
}
|
|
d := newExportTestDaemonStore(t, fake)
|
|
upsertDaemonVM(t, ctx, d.store, vm)
|
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
|
|
|
result, err := d.ws.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
|
IDOrName: vm.Name,
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("ExportVMWorkspace: %v", err)
|
|
}
|
|
if result.HasChanges {
|
|
t.Fatal("HasChanges = true, want false")
|
|
}
|
|
if len(result.Patch) != 0 {
|
|
t.Fatalf("Patch = %q, want empty", result.Patch)
|
|
}
|
|
if len(result.ChangedFiles) != 0 {
|
|
t.Fatalf("ChangedFiles = %v, want empty", result.ChangedFiles)
|
|
}
|
|
}
|
|
|
|
func TestExportVMWorkspace_DefaultGuestPath(t *testing.T) {
|
|
t.Parallel()
|
|
ctx := context.Background()
|
|
|
|
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
|
firecracker := startFakeFirecracker(t, apiSock)
|
|
|
|
vm := testVM("exportbox-default", "image-export", "172.16.0.102")
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.Runtime.APISockPath = apiSock
|
|
|
|
fake := &exportGuestClient{
|
|
responses: []exportGuestResponse{
|
|
{output: nil},
|
|
{output: nil},
|
|
},
|
|
}
|
|
d := newExportTestDaemonStore(t, fake)
|
|
upsertDaemonVM(t, ctx, d.store, vm)
|
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
|
|
|
// GuestPath omitted — should default to /root/repo.
|
|
result, err := d.ws.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
|
IDOrName: vm.Name,
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("ExportVMWorkspace: %v", err)
|
|
}
|
|
if result.GuestPath != "/root/repo" {
|
|
t.Fatalf("GuestPath = %q, want /root/repo", result.GuestPath)
|
|
}
|
|
}
|
|
|
|
func TestExportVMWorkspace_VMNotRunning(t *testing.T) {
|
|
t.Parallel()
|
|
ctx := context.Background()
|
|
|
|
vm := testVM("exportbox-stopped", "image-export", "172.16.0.103")
|
|
vm.State = model.VMStateStopped
|
|
|
|
fake := &exportGuestClient{}
|
|
d := newExportTestDaemonStore(t, fake)
|
|
upsertDaemonVM(t, ctx, d.store, vm)
|
|
// VM is stopped — no handle seed; vmAlive must return false.
|
|
|
|
_, err := d.ws.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
|
IDOrName: vm.Name,
|
|
})
|
|
if err == nil || !strings.Contains(err.Error(), "not running") {
|
|
t.Fatalf("error = %v, want 'not running' error", err)
|
|
}
|
|
if fake.callIndex != 0 {
|
|
t.Fatal("RunScriptOutput should not be called when VM is not running")
|
|
}
|
|
}
|
|
|
|
func TestExportVMWorkspace_MultipleChangedFiles(t *testing.T) {
|
|
t.Parallel()
|
|
ctx := context.Background()
|
|
|
|
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
|
firecracker := startFakeFirecracker(t, apiSock)
|
|
|
|
vm := testVM("exportbox-multi", "image-export", "172.16.0.104")
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.Runtime.APISockPath = apiSock
|
|
|
|
patch := []byte("diff --git a/a.go b/a.go\n--- a/a.go\n+++ b/a.go\n")
|
|
names := []byte("a.go\nb.go\nnew/file.go\n")
|
|
|
|
fake := &exportGuestClient{
|
|
responses: []exportGuestResponse{
|
|
{output: patch},
|
|
{output: names},
|
|
},
|
|
}
|
|
d := newExportTestDaemonStore(t, fake)
|
|
upsertDaemonVM(t, ctx, d.store, vm)
|
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
|
|
|
result, err := d.ws.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
|
IDOrName: vm.Name,
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("ExportVMWorkspace: %v", err)
|
|
}
|
|
if len(result.ChangedFiles) != 3 {
|
|
t.Fatalf("ChangedFiles = %v, want 3 entries", result.ChangedFiles)
|
|
}
|
|
want := []string{"a.go", "b.go", "new/file.go"}
|
|
for i, f := range want {
|
|
if result.ChangedFiles[i] != f {
|
|
t.Fatalf("ChangedFiles[%d] = %q, want %q", i, result.ChangedFiles[i], f)
|
|
}
|
|
}
|
|
}
|
|
|
|
// TestPrepareVMWorkspace_ReleasesVMLockDuringGuestIO is a regression
|
|
// guard for an earlier design that held the per-VM mutex across SSH
|
|
// dial, tar streaming, and remote chmod. A long import could then
|
|
// block unrelated lifecycle ops (vm stop / delete / restart) on the
|
|
// same VM until it completed. The fix switched to a dedicated
|
|
// workspaceLocks set for I/O, with vmLocks held only for the brief
|
|
// state-validation phase. This test kicks off a prepare that blocks
|
|
// inside the import step and then asserts the VM mutex is acquirable
|
|
// while the prepare is mid-flight.
|
|
func TestPrepareVMWorkspace_ReleasesVMLockDuringGuestIO(t *testing.T) {
|
|
t.Parallel()
|
|
ctx := context.Background()
|
|
|
|
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
|
firecracker := startFakeFirecracker(t, apiSock)
|
|
|
|
vm := testVM("lockbox", "image-x", "172.16.0.210")
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.Runtime.APISockPath = apiSock
|
|
|
|
d := &Daemon{
|
|
store: openDaemonStore(t),
|
|
config: model.DaemonConfig{SSHKeyPath: filepath.Join(t.TempDir(), "id_ed25519")},
|
|
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
|
|
}
|
|
wireServices(d)
|
|
d.guestWaitForSSH = func(_ context.Context, _, _ string, _ time.Duration) error { return nil }
|
|
d.guestDial = func(_ context.Context, _, _ string) (guestSSHClient, error) {
|
|
return &exportGuestClient{}, nil
|
|
}
|
|
upsertDaemonVM(t, ctx, d.store, vm)
|
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
|
|
|
// Install the workspace seams on this daemon instance. InspectRepo
|
|
// returns a trivial spec so the real filesystem isn't touched;
|
|
// Import blocks until we say go.
|
|
importStarted := make(chan struct{})
|
|
releaseImport := make(chan struct{})
|
|
d.ws.workspaceInspectRepo = func(context.Context, string, string, string, bool) (workspace.RepoSpec, error) {
|
|
return workspace.RepoSpec{RepoName: "fake", RepoRoot: "/tmp/fake"}, nil
|
|
}
|
|
d.ws.workspaceImport = func(context.Context, workspace.GuestClient, workspace.RepoSpec, string, model.WorkspacePrepareMode) error {
|
|
close(importStarted)
|
|
<-releaseImport
|
|
return nil
|
|
}
|
|
|
|
// Kick off prepare in a goroutine. It will block inside the import.
|
|
prepareDone := make(chan error, 1)
|
|
go func() {
|
|
_, err := d.ws.PrepareVMWorkspace(ctx, api.VMWorkspacePrepareParams{
|
|
IDOrName: vm.Name,
|
|
SourcePath: "/tmp/fake",
|
|
})
|
|
prepareDone <- err
|
|
}()
|
|
|
|
// Wait for prepare to reach the guest-I/O phase (past the VM
|
|
// mutex) before testing the assertion.
|
|
select {
|
|
case <-importStarted:
|
|
case <-time.After(2 * time.Second):
|
|
t.Fatal("import never started; prepare blocked before reaching guest I/O")
|
|
}
|
|
|
|
// With the fix in place, the VM mutex is free even though the
|
|
// import is in flight. Acquiring it must not wait.
|
|
acquired := make(chan struct{})
|
|
go func() {
|
|
unlock := d.vm.lockVMID(vm.ID)
|
|
close(acquired)
|
|
unlock()
|
|
}()
|
|
select {
|
|
case <-acquired:
|
|
case <-time.After(500 * time.Millisecond):
|
|
close(releaseImport) // unblock the goroutine so the test can exit
|
|
<-prepareDone
|
|
t.Fatal("VM mutex held during guest I/O — lifecycle ops would block behind workspace prepare")
|
|
}
|
|
|
|
// Now let the import finish and make sure prepare returns.
|
|
close(releaseImport)
|
|
select {
|
|
case err := <-prepareDone:
|
|
if err != nil {
|
|
t.Fatalf("prepare returned error: %v", err)
|
|
}
|
|
case <-time.After(2 * time.Second):
|
|
t.Fatal("prepare did not return after import unblocked")
|
|
}
|
|
}
|
|
|
|
// TestPrepareVMWorkspace_SerialisesConcurrentPreparesOnSameVM asserts
|
|
// the workspaceLocks scope: two concurrent prepares on the same VM
|
|
// serialise via workspaceLocks even though they don't hold the core
|
|
// VM mutex, so a lifecycle op (stop/delete) isn't blocked.
|
|
func TestPrepareVMWorkspace_SerialisesConcurrentPreparesOnSameVM(t *testing.T) {
|
|
t.Parallel()
|
|
ctx := context.Background()
|
|
|
|
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
|
firecracker := startFakeFirecracker(t, apiSock)
|
|
|
|
vm := testVM("serialbox", "image-x", "172.16.0.211")
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.Runtime.APISockPath = apiSock
|
|
|
|
d := &Daemon{
|
|
store: openDaemonStore(t),
|
|
config: model.DaemonConfig{SSHKeyPath: filepath.Join(t.TempDir(), "id_ed25519")},
|
|
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
|
|
}
|
|
wireServices(d)
|
|
d.guestWaitForSSH = func(_ context.Context, _, _ string, _ time.Duration) error { return nil }
|
|
d.guestDial = func(_ context.Context, _, _ string) (guestSSHClient, error) {
|
|
return &exportGuestClient{}, nil
|
|
}
|
|
upsertDaemonVM(t, ctx, d.store, vm)
|
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
|
|
|
d.ws.workspaceInspectRepo = func(context.Context, string, string, string, bool) (workspace.RepoSpec, error) {
|
|
return workspace.RepoSpec{RepoName: "fake", RepoRoot: "/tmp/fake"}, nil
|
|
}
|
|
|
|
// Counter of simultaneous Import calls. Should never exceed 1.
|
|
var active int32
|
|
var maxObserved int32
|
|
release := make(chan struct{})
|
|
d.ws.workspaceImport = func(context.Context, workspace.GuestClient, workspace.RepoSpec, string, model.WorkspacePrepareMode) error {
|
|
n := atomic.AddInt32(&active, 1)
|
|
for {
|
|
prev := atomic.LoadInt32(&maxObserved)
|
|
if n <= prev || atomic.CompareAndSwapInt32(&maxObserved, prev, n) {
|
|
break
|
|
}
|
|
}
|
|
<-release
|
|
atomic.AddInt32(&active, -1)
|
|
return nil
|
|
}
|
|
|
|
const n = 3
|
|
done := make(chan error, n)
|
|
for i := 0; i < n; i++ {
|
|
go func() {
|
|
_, err := d.ws.PrepareVMWorkspace(ctx, api.VMWorkspacePrepareParams{
|
|
IDOrName: vm.Name,
|
|
SourcePath: "/tmp/fake",
|
|
})
|
|
done <- err
|
|
}()
|
|
}
|
|
|
|
// Give goroutines a moment to queue up.
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
if got := atomic.LoadInt32(&active); got != 1 {
|
|
close(release) // unblock to avoid hang
|
|
for i := 0; i < n; i++ {
|
|
<-done
|
|
}
|
|
t.Fatalf("%d concurrent imports, want exactly 1 (workspace lock should serialise)", got)
|
|
}
|
|
|
|
// Drain: release imports one at a time.
|
|
for i := 0; i < n; i++ {
|
|
release <- struct{}{}
|
|
}
|
|
close(release)
|
|
for i := 0; i < n; i++ {
|
|
if err := <-done; err != nil {
|
|
t.Errorf("prepare #%d error: %v", i, err)
|
|
}
|
|
}
|
|
if got := atomic.LoadInt32(&maxObserved); got != 1 {
|
|
t.Fatalf("peak concurrent imports = %d, want 1", got)
|
|
}
|
|
}
|
|
|
|
// TestExportVMWorkspace_DoesNotMutateRealIndex is a regression guard
|
|
// for an earlier design where `git add -A` ran against the guest's
|
|
// real `.git/index`, leaving staged changes behind after what the user
|
|
// thought was a read-only observation. Every export script must now
|
|
// route `git add -A` through a throwaway index selected by
|
|
// GIT_INDEX_FILE, and every script must clean that file up.
|
|
func TestExportVMWorkspace_DoesNotMutateRealIndex(t *testing.T) {
|
|
t.Parallel()
|
|
ctx := context.Background()
|
|
|
|
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
|
firecracker := startFakeFirecracker(t, apiSock)
|
|
|
|
vm := testVM("exportbox-noindex-mutation", "image-export", "172.16.0.107")
|
|
vm.State = model.VMStateRunning
|
|
vm.Runtime.State = model.VMStateRunning
|
|
vm.Runtime.APISockPath = apiSock
|
|
|
|
fake := &exportGuestClient{
|
|
responses: []exportGuestResponse{
|
|
{output: []byte("diff --git a/x b/x\n")},
|
|
{output: []byte("x\n")},
|
|
},
|
|
}
|
|
d := newExportTestDaemonStore(t, fake)
|
|
upsertDaemonVM(t, ctx, d.store, vm)
|
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
|
|
|
if _, err := d.ws.ExportVMWorkspace(ctx, api.WorkspaceExportParams{IDOrName: vm.Name}); err != nil {
|
|
t.Fatalf("ExportVMWorkspace: %v", err)
|
|
}
|
|
|
|
if len(fake.scripts) == 0 {
|
|
t.Fatal("expected at least one export script to be sent")
|
|
}
|
|
for i, script := range fake.scripts {
|
|
if !strings.Contains(script, "GIT_INDEX_FILE") {
|
|
t.Errorf("script[%d] missing GIT_INDEX_FILE routing:\n%s", i, script)
|
|
}
|
|
// git add -A must ONLY appear on a line that also sets
|
|
// GIT_INDEX_FILE. A bare occurrence would mutate the real
|
|
// index.
|
|
for _, line := range strings.Split(script, "\n") {
|
|
if strings.Contains(line, "git add -A") && !strings.Contains(line, "GIT_INDEX_FILE") {
|
|
t.Errorf("script[%d] has unscoped `git add -A`:\n%s", i, script)
|
|
break
|
|
}
|
|
}
|
|
if !strings.Contains(script, "git read-tree") {
|
|
t.Errorf("script[%d] missing git read-tree (temp index seed):\n%s", i, script)
|
|
}
|
|
if !strings.Contains(script, "mktemp") {
|
|
t.Errorf("script[%d] missing mktemp for temp index:\n%s", i, script)
|
|
}
|
|
if !strings.Contains(script, "trap") || !strings.Contains(script, "rm") {
|
|
t.Errorf("script[%d] missing temp-index cleanup trap:\n%s", i, script)
|
|
}
|
|
}
|
|
}
|