Third phase of splitting the daemon god-struct. WorkspaceService now
owns workspace.prepare / workspace.export plus the ssh-key +
git-identity + arbitrary-file sync that runs as part of VM start's
prepare_work_disk capability hook. workspaceLocks (the per-VM tar
serialisation set) lives on the service.
workspace.go and vm_authsync.go flipped receivers from *Daemon to
*WorkspaceService. The workspaceInspectRepo / workspaceImport test
seams moved onto the service as fields.
Peer-service dependencies go through narrow function-typed fields:
vmResolver, aliveChecker, waitGuestSSH, dialGuest, imageResolver,
imageWorkSeed, withVMLockByRef, beginOperation. WorkspaceService
never touches VMService / HostNetwork / ImageService directly —
only the exact operations the Daemon hands it at construction.
Daemon lazy-init helper workspaceSvc() mirrors the Phase 1/2
pattern. Test literals still write `&Daemon{store: db, runner: r}`
and get a wired workspace service for free. Tests that override the
inspect/import seams (workspace_test.go, ~4 sites) assign them on
d.workspaceSvc() instead of on the daemon literal.
Dispatch in daemon.go: vm.workspace.prepare and vm.workspace.export
now forward one-liners to d.workspaceSvc().
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
132 lines
4 KiB
Go
132 lines
4 KiB
Go
package daemon
|
|
|
|
import (
|
|
"context"
|
|
"crypto/rand"
|
|
"crypto/rsa"
|
|
"crypto/x509"
|
|
"encoding/pem"
|
|
"errors"
|
|
"os"
|
|
"path/filepath"
|
|
"strconv"
|
|
"testing"
|
|
|
|
"banger/internal/guest"
|
|
"banger/internal/model"
|
|
)
|
|
|
|
func TestEnsureWorkDiskClonesSeedImageAndResizes(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
vmDir := t.TempDir()
|
|
seedPath := filepath.Join(t.TempDir(), "root.work-seed.ext4")
|
|
if err := os.WriteFile(seedPath, []byte("seed-data"), 0o644); err != nil {
|
|
t.Fatalf("WriteFile(seed): %v", err)
|
|
}
|
|
workDiskPath := filepath.Join(vmDir, "root.ext4")
|
|
runner := &scriptedRunner{
|
|
t: t,
|
|
steps: []runnerStep{
|
|
{call: runnerCall{name: "e2fsck", args: []string{"-p", "-f", workDiskPath}}},
|
|
{call: runnerCall{name: "resize2fs", args: []string{workDiskPath}}},
|
|
},
|
|
}
|
|
d := &Daemon{runner: runner}
|
|
vm := testVM("seeded", "image-seeded", "172.16.0.60")
|
|
vm.Runtime.WorkDiskPath = workDiskPath
|
|
vm.Spec.WorkDiskSizeBytes = 2 * 1024 * 1024
|
|
image := testImage("image-seeded")
|
|
image.WorkSeedPath = seedPath
|
|
|
|
if _, err := d.ensureWorkDisk(context.Background(), &vm, image); err != nil {
|
|
t.Fatalf("ensureWorkDisk: %v", err)
|
|
}
|
|
runner.assertExhausted()
|
|
|
|
info, err := os.Stat(workDiskPath)
|
|
if err != nil {
|
|
t.Fatalf("Stat(work disk): %v", err)
|
|
}
|
|
if info.Size() != vm.Spec.WorkDiskSizeBytes {
|
|
t.Fatalf("work disk size = %d, want %d", info.Size(), vm.Spec.WorkDiskSizeBytes)
|
|
}
|
|
}
|
|
|
|
func TestTapPoolWarmsAndReusesIdleTap(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
runner := &scriptedRunner{
|
|
t: t,
|
|
steps: []runnerStep{
|
|
{call: runnerCall{name: "ip", args: []string{"link", "show", "tap-pool-0"}}, err: errors.New("exit status 1")},
|
|
sudoStep("", nil, "ip", "tuntap", "add", "dev", "tap-pool-0", "mode", "tap", "user", strconv.Itoa(os.Getuid()), "group", strconv.Itoa(os.Getgid())),
|
|
sudoStep("", nil, "ip", "link", "set", "tap-pool-0", "master", model.DefaultBridgeName),
|
|
sudoStep("", nil, "ip", "link", "set", "tap-pool-0", "up"),
|
|
sudoStep("", nil, "ip", "link", "set", model.DefaultBridgeName, "up"),
|
|
},
|
|
}
|
|
d := &Daemon{
|
|
runner: runner,
|
|
config: model.DaemonConfig{
|
|
BridgeName: model.DefaultBridgeName,
|
|
TapPoolSize: 1,
|
|
},
|
|
closing: make(chan struct{}),
|
|
}
|
|
|
|
d.hostNet().ensureTapPool(context.Background())
|
|
tapName, err := d.hostNet().acquireTap(context.Background(), "tap-fallback")
|
|
if err != nil {
|
|
t.Fatalf("acquireTap: %v", err)
|
|
}
|
|
if tapName != "tap-pool-0" {
|
|
t.Fatalf("tapName = %q, want tap-pool-0", tapName)
|
|
}
|
|
if err := d.hostNet().releaseTap(context.Background(), tapName); err != nil {
|
|
t.Fatalf("releaseTap: %v", err)
|
|
}
|
|
tapName, err = d.hostNet().acquireTap(context.Background(), "tap-fallback")
|
|
if err != nil {
|
|
t.Fatalf("acquireTap second time: %v", err)
|
|
}
|
|
if tapName != "tap-pool-0" {
|
|
t.Fatalf("tapName second = %q, want tap-pool-0", tapName)
|
|
}
|
|
runner.assertExhausted()
|
|
}
|
|
|
|
func TestEnsureAuthorizedKeyOnWorkDiskSkipsRepairForMatchingSeededFingerprint(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
privateKey, err := rsa.GenerateKey(rand.Reader, 1024)
|
|
if err != nil {
|
|
t.Fatalf("GenerateKey: %v", err)
|
|
}
|
|
privateKeyPEM := pem.EncodeToMemory(&pem.Block{
|
|
Type: "RSA PRIVATE KEY",
|
|
Bytes: x509.MarshalPKCS1PrivateKey(privateKey),
|
|
})
|
|
sshKeyPath := filepath.Join(t.TempDir(), "id_rsa")
|
|
if err := os.WriteFile(sshKeyPath, privateKeyPEM, 0o600); err != nil {
|
|
t.Fatalf("WriteFile(private key): %v", err)
|
|
}
|
|
fingerprint, err := guest.AuthorizedPublicKeyFingerprint(sshKeyPath)
|
|
if err != nil {
|
|
t.Fatalf("AuthorizedPublicKeyFingerprint: %v", err)
|
|
}
|
|
|
|
runner := &scriptedRunner{t: t}
|
|
d := &Daemon{
|
|
runner: runner,
|
|
config: model.DaemonConfig{SSHKeyPath: sshKeyPath},
|
|
}
|
|
vm := testVM("seeded-fastpath", "image-seeded-fastpath", "172.16.0.62")
|
|
vm.Runtime.WorkDiskPath = filepath.Join(t.TempDir(), "root.ext4")
|
|
image := model.Image{SeededSSHPublicKeyFingerprint: fingerprint}
|
|
|
|
if err := d.workspaceSvc().ensureAuthorizedKeyOnWorkDisk(context.Background(), &vm, image, workDiskPreparation{ClonedFromSeed: true}); err != nil {
|
|
t.Fatalf("ensureAuthorizedKeyOnWorkDisk: %v", err)
|
|
}
|
|
runner.assertExhausted()
|
|
}
|