daemon split (6/n): extract wireServices + drop lazy service getters
Factor the service + capability wiring out of Daemon.Open() into
wireServices(d), an idempotent helper that constructs HostNetwork,
ImageService, WorkspaceService, and VMService from whatever
infrastructure (runner, store, config, layout, logger, closing) is
already set on d. Open() calls it once after filling the composition
root; tests that build &Daemon{...} literals call it to get a working
service graph, preinstalling stubs on the fields they want to fake.
Drops the four lazy-init getters on *Daemon — d.hostNet(),
d.imageSvc(), d.workspaceSvc(), d.vmSvc() — whose sole purpose was
keeping test literals working. Every production call site now reads
d.net / d.img / d.ws / d.vm directly; the services are guaranteed
non-nil once Open returns. No behavior change.
Mechanical: all existing `d.xxxSvc()` calls (production + tests)
rewritten to field access; each `d := &Daemon{...}` in tests gets a
trailing wireServices(d) so the literal + wiring are side-by-side.
Tests that override a pre-built service (e.g. d.img = &ImageService{
bundleFetch: stub}) now set the override before wireServices so the
replacement propagates into VMService's peer pointer.
Also nil-guards HostNetwork.stopVMDNS and d.store in Close() so
partially-initialised daemons (pre-reconcile open failure) still
tear down cleanly — same contract the old lazy getters provided.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
0cfd8a5451
commit
16702bd5e1
22 changed files with 353 additions and 293 deletions
|
|
@ -29,6 +29,7 @@ func TestFindOrAutoPullImageReturnsLocalWithoutPulling(t *testing.T) {
|
||||||
return imagecat.Manifest{}, nil
|
return imagecat.Manifest{}, nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
id, _ := model.NewID()
|
id, _ := model.NewID()
|
||||||
if err := d.store.UpsertImage(context.Background(), model.Image{
|
if err := d.store.UpsertImage(context.Background(), model.Image{
|
||||||
ID: id,
|
ID: id,
|
||||||
|
|
@ -38,7 +39,7 @@ func TestFindOrAutoPullImageReturnsLocalWithoutPulling(t *testing.T) {
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
image, err := d.vmSvc().findOrAutoPullImage(context.Background(), "my-local-image")
|
image, err := d.vm.findOrAutoPullImage(context.Background(), "my-local-image")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("findOrAutoPullImage: %v", err)
|
t.Fatalf("findOrAutoPullImage: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -67,8 +68,9 @@ func TestFindOrAutoPullImagePullsFromCatalog(t *testing.T) {
|
||||||
return stubBundleFetch(imagecat.Manifest{KernelRef: "generic-6.12"})(ctx, destDir, entry)
|
return stubBundleFetch(imagecat.Manifest{KernelRef: "generic-6.12"})(ctx, destDir, entry)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
// "debian-bookworm" is in the embedded imagecat catalog.
|
// "debian-bookworm" is in the embedded imagecat catalog.
|
||||||
image, err := d.vmSvc().findOrAutoPullImage(context.Background(), "debian-bookworm")
|
image, err := d.vm.findOrAutoPullImage(context.Background(), "debian-bookworm")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("findOrAutoPullImage: %v", err)
|
t.Fatalf("findOrAutoPullImage: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -86,7 +88,8 @@ func TestFindOrAutoPullImageReturnsOriginalErrorWhenNotInCatalog(t *testing.T) {
|
||||||
store: openDaemonStore(t),
|
store: openDaemonStore(t),
|
||||||
runner: system.NewRunner(),
|
runner: system.NewRunner(),
|
||||||
}
|
}
|
||||||
_, err := d.vmSvc().findOrAutoPullImage(context.Background(), "not-in-catalog-or-store")
|
wireServices(d)
|
||||||
|
_, err := d.vm.findOrAutoPullImage(context.Background(), "not-in-catalog-or-store")
|
||||||
if err == nil || !strings.Contains(err.Error(), "not found") {
|
if err == nil || !strings.Contains(err.Error(), "not found") {
|
||||||
t.Fatalf("err = %v, want not-found", err)
|
t.Fatalf("err = %v, want not-found", err)
|
||||||
}
|
}
|
||||||
|
|
@ -96,8 +99,9 @@ func TestReadOrAutoPullKernelReturnsLocalWithoutPulling(t *testing.T) {
|
||||||
kernelsDir := t.TempDir()
|
kernelsDir := t.TempDir()
|
||||||
seedKernel(t, kernelsDir, "generic-6.12")
|
seedKernel(t, kernelsDir, "generic-6.12")
|
||||||
d := &Daemon{layout: paths.Layout{KernelsDir: kernelsDir}}
|
d := &Daemon{layout: paths.Layout{KernelsDir: kernelsDir}}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
entry, err := d.imageSvc().readOrAutoPullKernel(context.Background(), "generic-6.12")
|
entry, err := d.img.readOrAutoPullKernel(context.Background(), "generic-6.12")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("readOrAutoPullKernel: %v", err)
|
t.Fatalf("readOrAutoPullKernel: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -108,7 +112,8 @@ func TestReadOrAutoPullKernelReturnsLocalWithoutPulling(t *testing.T) {
|
||||||
|
|
||||||
func TestReadOrAutoPullKernelErrorsWhenNotInCatalog(t *testing.T) {
|
func TestReadOrAutoPullKernelErrorsWhenNotInCatalog(t *testing.T) {
|
||||||
d := &Daemon{layout: paths.Layout{KernelsDir: t.TempDir()}}
|
d := &Daemon{layout: paths.Layout{KernelsDir: t.TempDir()}}
|
||||||
_, err := d.imageSvc().readOrAutoPullKernel(context.Background(), "nonexistent-kernel")
|
wireServices(d)
|
||||||
|
_, err := d.img.readOrAutoPullKernel(context.Background(), "nonexistent-kernel")
|
||||||
if err == nil || !strings.Contains(err.Error(), "not found") {
|
if err == nil || !strings.Contains(err.Error(), "not found") {
|
||||||
t.Fatalf("err = %v, want not-found", err)
|
t.Fatalf("err = %v, want not-found", err)
|
||||||
}
|
}
|
||||||
|
|
@ -130,7 +135,8 @@ func TestReadOrAutoPullKernelSurfacesNonNotExistError(t *testing.T) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
d := &Daemon{layout: paths.Layout{KernelsDir: kernelsDir}}
|
d := &Daemon{layout: paths.Layout{KernelsDir: kernelsDir}}
|
||||||
_, err := d.imageSvc().readOrAutoPullKernel(context.Background(), "broken-kernel")
|
wireServices(d)
|
||||||
|
_, err := d.img.readOrAutoPullKernel(context.Background(), "broken-kernel")
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("want error")
|
t.Fatal("want error")
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -199,17 +199,17 @@ func (workDiskCapability) ContributeMachine(cfg *firecracker.MachineConfig, vm m
|
||||||
}
|
}
|
||||||
|
|
||||||
func (workDiskCapability) PrepareHost(ctx context.Context, d *Daemon, vm *model.VMRecord, image model.Image) error {
|
func (workDiskCapability) PrepareHost(ctx context.Context, d *Daemon, vm *model.VMRecord, image model.Image) error {
|
||||||
prep, err := d.vmSvc().ensureWorkDisk(ctx, vm, image)
|
prep, err := d.vm.ensureWorkDisk(ctx, vm, image)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := d.workspaceSvc().ensureAuthorizedKeyOnWorkDisk(ctx, vm, image, prep); err != nil {
|
if err := d.ws.ensureAuthorizedKeyOnWorkDisk(ctx, vm, image, prep); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := d.workspaceSvc().ensureGitIdentityOnWorkDisk(ctx, vm); err != nil {
|
if err := d.ws.ensureGitIdentityOnWorkDisk(ctx, vm); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return d.workspaceSvc().runFileSync(ctx, vm)
|
return d.ws.runFileSync(ctx, vm)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (workDiskCapability) AddDoctorChecks(_ context.Context, d *Daemon, report *system.Report) {
|
func (workDiskCapability) AddDoctorChecks(_ context.Context, d *Daemon, report *system.Report) {
|
||||||
|
|
@ -234,11 +234,11 @@ type dnsCapability struct{}
|
||||||
func (dnsCapability) Name() string { return "dns" }
|
func (dnsCapability) Name() string { return "dns" }
|
||||||
|
|
||||||
func (dnsCapability) PostStart(ctx context.Context, d *Daemon, vm model.VMRecord, _ model.Image) error {
|
func (dnsCapability) PostStart(ctx context.Context, d *Daemon, vm model.VMRecord, _ model.Image) error {
|
||||||
return d.hostNet().setDNS(ctx, vm.Name, vm.Runtime.GuestIP)
|
return d.net.setDNS(ctx, vm.Name, vm.Runtime.GuestIP)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dnsCapability) Cleanup(_ context.Context, d *Daemon, vm model.VMRecord) error {
|
func (dnsCapability) Cleanup(_ context.Context, d *Daemon, vm model.VMRecord) error {
|
||||||
return d.hostNet().removeDNS(vm.Runtime.DNSName)
|
return d.net.removeDNS(vm.Runtime.DNSName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dnsCapability) AddDoctorChecks(_ context.Context, _ *Daemon, report *system.Report) {
|
func (dnsCapability) AddDoctorChecks(_ context.Context, _ *Daemon, report *system.Report) {
|
||||||
|
|
@ -263,49 +263,49 @@ func (natCapability) AddStartPreflight(ctx context.Context, d *Daemon, checks *s
|
||||||
if !vm.Spec.NATEnabled {
|
if !vm.Spec.NATEnabled {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
d.hostNet().addNATPrereqs(ctx, checks)
|
d.net.addNATPrereqs(ctx, checks)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (natCapability) PostStart(ctx context.Context, d *Daemon, vm model.VMRecord, _ model.Image) error {
|
func (natCapability) PostStart(ctx context.Context, d *Daemon, vm model.VMRecord, _ model.Image) error {
|
||||||
if !vm.Spec.NATEnabled {
|
if !vm.Spec.NATEnabled {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return d.hostNet().ensureNAT(ctx, vm.Runtime.GuestIP, d.vmSvc().vmHandles(vm.ID).TapDevice, true)
|
return d.net.ensureNAT(ctx, vm.Runtime.GuestIP, d.vm.vmHandles(vm.ID).TapDevice, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (natCapability) Cleanup(ctx context.Context, d *Daemon, vm model.VMRecord) error {
|
func (natCapability) Cleanup(ctx context.Context, d *Daemon, vm model.VMRecord) error {
|
||||||
if !vm.Spec.NATEnabled {
|
if !vm.Spec.NATEnabled {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
tap := d.vmSvc().vmHandles(vm.ID).TapDevice
|
tap := d.vm.vmHandles(vm.ID).TapDevice
|
||||||
if strings.TrimSpace(vm.Runtime.GuestIP) == "" || strings.TrimSpace(tap) == "" {
|
if strings.TrimSpace(vm.Runtime.GuestIP) == "" || strings.TrimSpace(tap) == "" {
|
||||||
if d.logger != nil {
|
if d.logger != nil {
|
||||||
d.logger.Debug("skipping nat cleanup without runtime network handles", append(vmLogAttrs(vm), "guest_ip", vm.Runtime.GuestIP, "tap_device", tap)...)
|
d.logger.Debug("skipping nat cleanup without runtime network handles", append(vmLogAttrs(vm), "guest_ip", vm.Runtime.GuestIP, "tap_device", tap)...)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return d.hostNet().ensureNAT(ctx, vm.Runtime.GuestIP, tap, false)
|
return d.net.ensureNAT(ctx, vm.Runtime.GuestIP, tap, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (natCapability) ApplyConfigChange(ctx context.Context, d *Daemon, before, after model.VMRecord) error {
|
func (natCapability) ApplyConfigChange(ctx context.Context, d *Daemon, before, after model.VMRecord) error {
|
||||||
if before.Spec.NATEnabled == after.Spec.NATEnabled {
|
if before.Spec.NATEnabled == after.Spec.NATEnabled {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if !d.vmSvc().vmAlive(after) {
|
if !d.vm.vmAlive(after) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return d.hostNet().ensureNAT(ctx, after.Runtime.GuestIP, d.vmSvc().vmHandles(after.ID).TapDevice, after.Spec.NATEnabled)
|
return d.net.ensureNAT(ctx, after.Runtime.GuestIP, d.vm.vmHandles(after.ID).TapDevice, after.Spec.NATEnabled)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (natCapability) AddDoctorChecks(ctx context.Context, d *Daemon, report *system.Report) {
|
func (natCapability) AddDoctorChecks(ctx context.Context, d *Daemon, report *system.Report) {
|
||||||
checks := system.NewPreflight()
|
checks := system.NewPreflight()
|
||||||
checks.RequireCommand("ip", toolHint("ip"))
|
checks.RequireCommand("ip", toolHint("ip"))
|
||||||
d.hostNet().addNATPrereqs(ctx, checks)
|
d.net.addNATPrereqs(ctx, checks)
|
||||||
if len(checks.Problems()) > 0 {
|
if len(checks.Problems()) > 0 {
|
||||||
report.Add(system.CheckStatusFail, "feature nat", checks.Problems()...)
|
report.Add(system.CheckStatusFail, "feature nat", checks.Problems()...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
uplink, err := d.hostNet().defaultUplink(ctx)
|
uplink, err := d.net.defaultUplink(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
report.AddFail("feature nat", err.Error())
|
report.AddFail("feature nat", err.Error())
|
||||||
return
|
return
|
||||||
|
|
|
||||||
|
|
@ -104,6 +104,7 @@ func TestPrepareCapabilityHostsRollsBackPreparedCapabilitiesInReverseOrder(t *te
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
err := d.prepareCapabilityHosts(context.Background(), &vm, model.Image{})
|
err := d.prepareCapabilityHosts(context.Background(), &vm, model.Image{})
|
||||||
if err == nil || err.Error() != "boom" {
|
if err == nil || err.Error() != "boom" {
|
||||||
|
|
@ -128,6 +129,7 @@ func TestContributeHooksPopulateGuestAndMachineConfig(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
builder := guestconfig.NewBuilder()
|
builder := guestconfig.NewBuilder()
|
||||||
d.contributeGuestConfig(builder, model.VMRecord{}, model.Image{})
|
d.contributeGuestConfig(builder, model.VMRecord{}, model.Image{})
|
||||||
|
|
@ -146,6 +148,7 @@ func TestContributeHooksPopulateGuestAndMachineConfig(t *testing.T) {
|
||||||
|
|
||||||
func TestRegisteredCapabilitiesInOrder(t *testing.T) {
|
func TestRegisteredCapabilitiesInOrder(t *testing.T) {
|
||||||
d := &Daemon{}
|
d := &Daemon{}
|
||||||
|
wireServices(d)
|
||||||
var names []string
|
var names []string
|
||||||
for _, capability := range d.registeredCapabilities() {
|
for _, capability := range d.registeredCapabilities() {
|
||||||
names = append(names, capability.Name())
|
names = append(names, capability.Name())
|
||||||
|
|
|
||||||
|
|
@ -76,6 +76,7 @@ func TestPullImageDoesNotSerialiseOnDifferentNames(t *testing.T) {
|
||||||
pullAndFlatten: slowPullAndFlatten,
|
pullAndFlatten: slowPullAndFlatten,
|
||||||
finalizePulledRootfs: stubFinalizePulledRootfs,
|
finalizePulledRootfs: stubFinalizePulledRootfs,
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
mkParams := func(name string) api.ImagePullParams {
|
mkParams := func(name string) api.ImagePullParams {
|
||||||
return api.ImagePullParams{
|
return api.ImagePullParams{
|
||||||
|
|
@ -162,6 +163,7 @@ func TestPullImageRejectsNameClashAtPublish(t *testing.T) {
|
||||||
pullAndFlatten: pullAndFlatten,
|
pullAndFlatten: pullAndFlatten,
|
||||||
finalizePulledRootfs: stubFinalizePulledRootfs,
|
finalizePulledRootfs: stubFinalizePulledRootfs,
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
params := api.ImagePullParams{
|
params := api.ImagePullParams{
|
||||||
Ref: "example.invalid/contender:latest",
|
Ref: "example.invalid/contender:latest",
|
||||||
|
|
|
||||||
|
|
@ -81,14 +81,8 @@ func Open(ctx context.Context) (d *Daemon, err error) {
|
||||||
logger: logger,
|
logger: logger,
|
||||||
closing: closing,
|
closing: closing,
|
||||||
pid: os.Getpid(),
|
pid: os.Getpid(),
|
||||||
net: newHostNetwork(hostNetworkDeps{
|
|
||||||
runner: runner,
|
|
||||||
logger: logger,
|
|
||||||
config: cfg,
|
|
||||||
layout: layout,
|
|
||||||
closing: closing,
|
|
||||||
}),
|
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
// From here on, every failure path must run Close() so the host
|
// From here on, every failure path must run Close() so the host
|
||||||
// state we touched (DNS listener goroutine, resolvectl routing,
|
// state we touched (DNS listener goroutine, resolvectl routing,
|
||||||
// SQLite handle, future side effects) gets unwound. Close is
|
// SQLite handle, future side effects) gets unwound. Close is
|
||||||
|
|
@ -103,7 +97,7 @@ func Open(ctx context.Context) (d *Daemon, err error) {
|
||||||
|
|
||||||
d.ensureVMSSHClientConfig()
|
d.ensureVMSSHClientConfig()
|
||||||
d.logger.Info("daemon opened", "socket", layout.SocketPath, "state_dir", layout.StateDir, "log_level", cfg.LogLevel)
|
d.logger.Info("daemon opened", "socket", layout.SocketPath, "state_dir", layout.StateDir, "log_level", cfg.LogLevel)
|
||||||
if err = d.hostNet().startVMDNS(vmdns.DefaultListenAddr); err != nil {
|
if err = d.net.startVMDNS(vmdns.DefaultListenAddr); err != nil {
|
||||||
d.logger.Error("daemon open failed", "stage", "start_vm_dns", "error", err.Error())
|
d.logger.Error("daemon open failed", "stage", "start_vm_dns", "error", err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -111,7 +105,7 @@ func Open(ctx context.Context) (d *Daemon, err error) {
|
||||||
d.logger.Error("daemon open failed", "stage", "reconcile", "error", err.Error())
|
d.logger.Error("daemon open failed", "stage", "reconcile", "error", err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
d.hostNet().ensureVMDNSResolverRouting(ctx)
|
d.net.ensureVMDNSResolverRouting(ctx)
|
||||||
// Seed HostNetwork's pool index from taps already claimed by VMs
|
// Seed HostNetwork's pool index from taps already claimed by VMs
|
||||||
// on disk so newly warmed pool entries don't collide with them.
|
// on disk so newly warmed pool entries don't collide with them.
|
||||||
if d.config.TapPoolSize > 0 && d.store != nil {
|
if d.config.TapPoolSize > 0 && d.store != nil {
|
||||||
|
|
@ -122,13 +116,13 @@ func Open(ctx context.Context) (d *Daemon, err error) {
|
||||||
}
|
}
|
||||||
used := make([]string, 0, len(vms))
|
used := make([]string, 0, len(vms))
|
||||||
for _, vm := range vms {
|
for _, vm := range vms {
|
||||||
if tap := d.vmSvc().vmHandles(vm.ID).TapDevice; tap != "" {
|
if tap := d.vm.vmHandles(vm.ID).TapDevice; tap != "" {
|
||||||
used = append(used, tap)
|
used = append(used, tap)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
d.hostNet().initializeTapPool(used)
|
d.net.initializeTapPool(used)
|
||||||
}
|
}
|
||||||
go d.hostNet().ensureTapPool(context.Background())
|
go d.net.ensureTapPool(context.Background())
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -142,7 +136,11 @@ func (d *Daemon) Close() error {
|
||||||
if d.listener != nil {
|
if d.listener != nil {
|
||||||
_ = d.listener.Close()
|
_ = d.listener.Close()
|
||||||
}
|
}
|
||||||
err = errors.Join(d.hostNet().clearVMDNSResolverRouting(context.Background()), d.hostNet().stopVMDNS(), d.store.Close())
|
var closeErr error
|
||||||
|
if d.store != nil {
|
||||||
|
closeErr = d.store.Close()
|
||||||
|
}
|
||||||
|
err = errors.Join(d.net.clearVMDNSResolverRouting(context.Background()), d.net.stopVMDNS(), closeErr)
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -282,28 +280,28 @@ func (d *Daemon) dispatch(ctx context.Context, req rpc.Request) rpc.Response {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
vm, err := d.vmSvc().CreateVM(ctx, params)
|
vm, err := d.vm.CreateVM(ctx, params)
|
||||||
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
|
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
|
||||||
case "vm.create.begin":
|
case "vm.create.begin":
|
||||||
params, err := rpc.DecodeParams[api.VMCreateParams](req)
|
params, err := rpc.DecodeParams[api.VMCreateParams](req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
op, err := d.vmSvc().BeginVMCreate(ctx, params)
|
op, err := d.vm.BeginVMCreate(ctx, params)
|
||||||
return marshalResultOrError(api.VMCreateBeginResult{Operation: op}, err)
|
return marshalResultOrError(api.VMCreateBeginResult{Operation: op}, err)
|
||||||
case "vm.create.status":
|
case "vm.create.status":
|
||||||
params, err := rpc.DecodeParams[api.VMCreateStatusParams](req)
|
params, err := rpc.DecodeParams[api.VMCreateStatusParams](req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
op, err := d.vmSvc().VMCreateStatus(ctx, params.ID)
|
op, err := d.vm.VMCreateStatus(ctx, params.ID)
|
||||||
return marshalResultOrError(api.VMCreateStatusResult{Operation: op}, err)
|
return marshalResultOrError(api.VMCreateStatusResult{Operation: op}, err)
|
||||||
case "vm.create.cancel":
|
case "vm.create.cancel":
|
||||||
params, err := rpc.DecodeParams[api.VMCreateStatusParams](req)
|
params, err := rpc.DecodeParams[api.VMCreateStatusParams](req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
err = d.vmSvc().CancelVMCreate(ctx, params.ID)
|
err = d.vm.CancelVMCreate(ctx, params.ID)
|
||||||
return marshalResultOrError(api.Empty{}, err)
|
return marshalResultOrError(api.Empty{}, err)
|
||||||
case "vm.list":
|
case "vm.list":
|
||||||
vms, err := d.store.ListVMs(ctx)
|
vms, err := d.store.ListVMs(ctx)
|
||||||
|
|
@ -313,63 +311,63 @@ func (d *Daemon) dispatch(ctx context.Context, req rpc.Request) rpc.Response {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
vm, err := d.vmSvc().FindVM(ctx, params.IDOrName)
|
vm, err := d.vm.FindVM(ctx, params.IDOrName)
|
||||||
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
|
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
|
||||||
case "vm.start":
|
case "vm.start":
|
||||||
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
vm, err := d.vmSvc().StartVM(ctx, params.IDOrName)
|
vm, err := d.vm.StartVM(ctx, params.IDOrName)
|
||||||
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
|
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
|
||||||
case "vm.stop":
|
case "vm.stop":
|
||||||
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
vm, err := d.vmSvc().StopVM(ctx, params.IDOrName)
|
vm, err := d.vm.StopVM(ctx, params.IDOrName)
|
||||||
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
|
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
|
||||||
case "vm.kill":
|
case "vm.kill":
|
||||||
params, err := rpc.DecodeParams[api.VMKillParams](req)
|
params, err := rpc.DecodeParams[api.VMKillParams](req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
vm, err := d.vmSvc().KillVM(ctx, params)
|
vm, err := d.vm.KillVM(ctx, params)
|
||||||
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
|
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
|
||||||
case "vm.restart":
|
case "vm.restart":
|
||||||
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
vm, err := d.vmSvc().RestartVM(ctx, params.IDOrName)
|
vm, err := d.vm.RestartVM(ctx, params.IDOrName)
|
||||||
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
|
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
|
||||||
case "vm.delete":
|
case "vm.delete":
|
||||||
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
vm, err := d.vmSvc().DeleteVM(ctx, params.IDOrName)
|
vm, err := d.vm.DeleteVM(ctx, params.IDOrName)
|
||||||
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
|
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
|
||||||
case "vm.set":
|
case "vm.set":
|
||||||
params, err := rpc.DecodeParams[api.VMSetParams](req)
|
params, err := rpc.DecodeParams[api.VMSetParams](req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
vm, err := d.vmSvc().SetVM(ctx, params)
|
vm, err := d.vm.SetVM(ctx, params)
|
||||||
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
|
return marshalResultOrError(api.VMShowResult{VM: vm}, err)
|
||||||
case "vm.stats":
|
case "vm.stats":
|
||||||
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
vm, stats, err := d.vmSvc().GetVMStats(ctx, params.IDOrName)
|
vm, stats, err := d.vm.GetVMStats(ctx, params.IDOrName)
|
||||||
return marshalResultOrError(api.VMStatsResult{VM: vm, Stats: stats}, err)
|
return marshalResultOrError(api.VMStatsResult{VM: vm, Stats: stats}, err)
|
||||||
case "vm.logs":
|
case "vm.logs":
|
||||||
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
vm, err := d.vmSvc().FindVM(ctx, params.IDOrName)
|
vm, err := d.vm.FindVM(ctx, params.IDOrName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("not_found", err.Error())
|
return rpc.NewError("not_found", err.Error())
|
||||||
}
|
}
|
||||||
|
|
@ -379,11 +377,11 @@ func (d *Daemon) dispatch(ctx context.Context, req rpc.Request) rpc.Response {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
vm, err := d.vmSvc().TouchVM(ctx, params.IDOrName)
|
vm, err := d.vm.TouchVM(ctx, params.IDOrName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("not_found", err.Error())
|
return rpc.NewError("not_found", err.Error())
|
||||||
}
|
}
|
||||||
if !d.vmSvc().vmAlive(vm) {
|
if !d.vm.vmAlive(vm) {
|
||||||
return rpc.NewError("not_running", fmt.Sprintf("vm %s is not running", vm.Name))
|
return rpc.NewError("not_running", fmt.Sprintf("vm %s is not running", vm.Name))
|
||||||
}
|
}
|
||||||
return marshalResultOrError(api.VMSSHResult{Name: vm.Name, GuestIP: vm.Runtime.GuestIP}, nil)
|
return marshalResultOrError(api.VMSSHResult{Name: vm.Name, GuestIP: vm.Runtime.GuestIP}, nil)
|
||||||
|
|
@ -392,35 +390,35 @@ func (d *Daemon) dispatch(ctx context.Context, req rpc.Request) rpc.Response {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
result, err := d.vmSvc().HealthVM(ctx, params.IDOrName)
|
result, err := d.vm.HealthVM(ctx, params.IDOrName)
|
||||||
return marshalResultOrError(result, err)
|
return marshalResultOrError(result, err)
|
||||||
case "vm.ping":
|
case "vm.ping":
|
||||||
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
result, err := d.vmSvc().PingVM(ctx, params.IDOrName)
|
result, err := d.vm.PingVM(ctx, params.IDOrName)
|
||||||
return marshalResultOrError(result, err)
|
return marshalResultOrError(result, err)
|
||||||
case "vm.ports":
|
case "vm.ports":
|
||||||
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
result, err := d.vmSvc().PortsVM(ctx, params.IDOrName)
|
result, err := d.vm.PortsVM(ctx, params.IDOrName)
|
||||||
return marshalResultOrError(result, err)
|
return marshalResultOrError(result, err)
|
||||||
case "vm.workspace.prepare":
|
case "vm.workspace.prepare":
|
||||||
params, err := rpc.DecodeParams[api.VMWorkspacePrepareParams](req)
|
params, err := rpc.DecodeParams[api.VMWorkspacePrepareParams](req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
workspace, err := d.workspaceSvc().PrepareVMWorkspace(ctx, params)
|
workspace, err := d.ws.PrepareVMWorkspace(ctx, params)
|
||||||
return marshalResultOrError(api.VMWorkspacePrepareResult{Workspace: workspace}, err)
|
return marshalResultOrError(api.VMWorkspacePrepareResult{Workspace: workspace}, err)
|
||||||
case "vm.workspace.export":
|
case "vm.workspace.export":
|
||||||
params, err := rpc.DecodeParams[api.WorkspaceExportParams](req)
|
params, err := rpc.DecodeParams[api.WorkspaceExportParams](req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
result, err := d.workspaceSvc().ExportVMWorkspace(ctx, params)
|
result, err := d.ws.ExportVMWorkspace(ctx, params)
|
||||||
return marshalResultOrError(result, err)
|
return marshalResultOrError(result, err)
|
||||||
case "image.list":
|
case "image.list":
|
||||||
images, err := d.store.ListImages(ctx)
|
images, err := d.store.ListImages(ctx)
|
||||||
|
|
@ -430,68 +428,68 @@ func (d *Daemon) dispatch(ctx context.Context, req rpc.Request) rpc.Response {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
image, err := d.imageSvc().FindImage(ctx, params.IDOrName)
|
image, err := d.img.FindImage(ctx, params.IDOrName)
|
||||||
return marshalResultOrError(api.ImageShowResult{Image: image}, err)
|
return marshalResultOrError(api.ImageShowResult{Image: image}, err)
|
||||||
case "image.register":
|
case "image.register":
|
||||||
params, err := rpc.DecodeParams[api.ImageRegisterParams](req)
|
params, err := rpc.DecodeParams[api.ImageRegisterParams](req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
image, err := d.imageSvc().RegisterImage(ctx, params)
|
image, err := d.img.RegisterImage(ctx, params)
|
||||||
return marshalResultOrError(api.ImageShowResult{Image: image}, err)
|
return marshalResultOrError(api.ImageShowResult{Image: image}, err)
|
||||||
case "image.promote":
|
case "image.promote":
|
||||||
params, err := rpc.DecodeParams[api.ImageRefParams](req)
|
params, err := rpc.DecodeParams[api.ImageRefParams](req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
image, err := d.imageSvc().PromoteImage(ctx, params.IDOrName)
|
image, err := d.img.PromoteImage(ctx, params.IDOrName)
|
||||||
return marshalResultOrError(api.ImageShowResult{Image: image}, err)
|
return marshalResultOrError(api.ImageShowResult{Image: image}, err)
|
||||||
case "image.delete":
|
case "image.delete":
|
||||||
params, err := rpc.DecodeParams[api.ImageRefParams](req)
|
params, err := rpc.DecodeParams[api.ImageRefParams](req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
image, err := d.imageSvc().DeleteImage(ctx, params.IDOrName)
|
image, err := d.img.DeleteImage(ctx, params.IDOrName)
|
||||||
return marshalResultOrError(api.ImageShowResult{Image: image}, err)
|
return marshalResultOrError(api.ImageShowResult{Image: image}, err)
|
||||||
case "image.pull":
|
case "image.pull":
|
||||||
params, err := rpc.DecodeParams[api.ImagePullParams](req)
|
params, err := rpc.DecodeParams[api.ImagePullParams](req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
image, err := d.imageSvc().PullImage(ctx, params)
|
image, err := d.img.PullImage(ctx, params)
|
||||||
return marshalResultOrError(api.ImageShowResult{Image: image}, err)
|
return marshalResultOrError(api.ImageShowResult{Image: image}, err)
|
||||||
case "kernel.list":
|
case "kernel.list":
|
||||||
return marshalResultOrError(d.imageSvc().KernelList(ctx))
|
return marshalResultOrError(d.img.KernelList(ctx))
|
||||||
case "kernel.show":
|
case "kernel.show":
|
||||||
params, err := rpc.DecodeParams[api.KernelRefParams](req)
|
params, err := rpc.DecodeParams[api.KernelRefParams](req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
entry, err := d.imageSvc().KernelShow(ctx, params.Name)
|
entry, err := d.img.KernelShow(ctx, params.Name)
|
||||||
return marshalResultOrError(api.KernelShowResult{Entry: entry}, err)
|
return marshalResultOrError(api.KernelShowResult{Entry: entry}, err)
|
||||||
case "kernel.delete":
|
case "kernel.delete":
|
||||||
params, err := rpc.DecodeParams[api.KernelRefParams](req)
|
params, err := rpc.DecodeParams[api.KernelRefParams](req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
err = d.imageSvc().KernelDelete(ctx, params.Name)
|
err = d.img.KernelDelete(ctx, params.Name)
|
||||||
return marshalResultOrError(api.Empty{}, err)
|
return marshalResultOrError(api.Empty{}, err)
|
||||||
case "kernel.import":
|
case "kernel.import":
|
||||||
params, err := rpc.DecodeParams[api.KernelImportParams](req)
|
params, err := rpc.DecodeParams[api.KernelImportParams](req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
entry, err := d.imageSvc().KernelImport(ctx, params)
|
entry, err := d.img.KernelImport(ctx, params)
|
||||||
return marshalResultOrError(api.KernelShowResult{Entry: entry}, err)
|
return marshalResultOrError(api.KernelShowResult{Entry: entry}, err)
|
||||||
case "kernel.pull":
|
case "kernel.pull":
|
||||||
params, err := rpc.DecodeParams[api.KernelPullParams](req)
|
params, err := rpc.DecodeParams[api.KernelPullParams](req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rpc.NewError("bad_request", err.Error())
|
return rpc.NewError("bad_request", err.Error())
|
||||||
}
|
}
|
||||||
entry, err := d.imageSvc().KernelPull(ctx, params)
|
entry, err := d.img.KernelPull(ctx, params)
|
||||||
return marshalResultOrError(api.KernelShowResult{Entry: entry}, err)
|
return marshalResultOrError(api.KernelShowResult{Entry: entry}, err)
|
||||||
case "kernel.catalog":
|
case "kernel.catalog":
|
||||||
return marshalResultOrError(d.imageSvc().KernelCatalog(ctx))
|
return marshalResultOrError(d.img.KernelCatalog(ctx))
|
||||||
default:
|
default:
|
||||||
return rpc.NewError("unknown_method", req.Method)
|
return rpc.NewError("unknown_method", req.Method)
|
||||||
}
|
}
|
||||||
|
|
@ -507,14 +505,14 @@ func (d *Daemon) backgroundLoop() {
|
||||||
case <-d.closing:
|
case <-d.closing:
|
||||||
return
|
return
|
||||||
case <-statsTicker.C:
|
case <-statsTicker.C:
|
||||||
if err := d.vmSvc().pollStats(context.Background()); err != nil && d.logger != nil {
|
if err := d.vm.pollStats(context.Background()); err != nil && d.logger != nil {
|
||||||
d.logger.Error("background stats poll failed", "error", err.Error())
|
d.logger.Error("background stats poll failed", "error", err.Error())
|
||||||
}
|
}
|
||||||
case <-staleTicker.C:
|
case <-staleTicker.C:
|
||||||
if err := d.vmSvc().stopStaleVMs(context.Background()); err != nil && d.logger != nil {
|
if err := d.vm.stopStaleVMs(context.Background()); err != nil && d.logger != nil {
|
||||||
d.logger.Error("background stale sweep failed", "error", err.Error())
|
d.logger.Error("background stale sweep failed", "error", err.Error())
|
||||||
}
|
}
|
||||||
d.vmSvc().pruneVMCreateOperations(time.Now().Add(-10 * time.Minute))
|
d.vm.pruneVMCreateOperations(time.Now().Add(-10 * time.Minute))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -531,18 +529,18 @@ func (d *Daemon) reconcile(ctx context.Context) error {
|
||||||
return op.fail(err)
|
return op.fail(err)
|
||||||
}
|
}
|
||||||
for _, vm := range vms {
|
for _, vm := range vms {
|
||||||
if err := d.vmSvc().withVMLockByIDErr(ctx, vm.ID, func(vm model.VMRecord) error {
|
if err := d.vm.withVMLockByIDErr(ctx, vm.ID, func(vm model.VMRecord) error {
|
||||||
if vm.State != model.VMStateRunning {
|
if vm.State != model.VMStateRunning {
|
||||||
// Belt-and-braces: a stopped VM should never have a
|
// Belt-and-braces: a stopped VM should never have a
|
||||||
// scratch file or a cache entry. Clean up anything
|
// scratch file or a cache entry. Clean up anything
|
||||||
// left by an ungraceful previous daemon crash.
|
// left by an ungraceful previous daemon crash.
|
||||||
d.vmSvc().clearVMHandles(vm)
|
d.vm.clearVMHandles(vm)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// Rebuild the in-memory handle cache by loading the per-VM
|
// Rebuild the in-memory handle cache by loading the per-VM
|
||||||
// scratch file and verifying the firecracker process is
|
// scratch file and verifying the firecracker process is
|
||||||
// still alive.
|
// still alive.
|
||||||
h, alive, err := d.vmSvc().rediscoverHandles(ctx, vm)
|
h, alive, err := d.vm.rediscoverHandles(ctx, vm)
|
||||||
if err != nil && d.logger != nil {
|
if err != nil && d.logger != nil {
|
||||||
d.logger.Warn("rediscover handles failed", "vm_id", vm.ID, "error", err.Error())
|
d.logger.Warn("rediscover handles failed", "vm_id", vm.ID, "error", err.Error())
|
||||||
}
|
}
|
||||||
|
|
@ -550,22 +548,22 @@ func (d *Daemon) reconcile(ctx context.Context) error {
|
||||||
// claimed. If alive, subsequent vmAlive() calls pass; if
|
// claimed. If alive, subsequent vmAlive() calls pass; if
|
||||||
// not, cleanupRuntime needs these handles to know which
|
// not, cleanupRuntime needs these handles to know which
|
||||||
// kernel resources (DM / loops / tap) to tear down.
|
// kernel resources (DM / loops / tap) to tear down.
|
||||||
d.vmSvc().setVMHandlesInMemory(vm.ID, h)
|
d.vm.setVMHandlesInMemory(vm.ID, h)
|
||||||
if alive {
|
if alive {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
op.stage("stale_vm", vmLogAttrs(vm)...)
|
op.stage("stale_vm", vmLogAttrs(vm)...)
|
||||||
_ = d.vmSvc().cleanupRuntime(ctx, vm, true)
|
_ = d.vm.cleanupRuntime(ctx, vm, true)
|
||||||
vm.State = model.VMStateStopped
|
vm.State = model.VMStateStopped
|
||||||
vm.Runtime.State = model.VMStateStopped
|
vm.Runtime.State = model.VMStateStopped
|
||||||
d.vmSvc().clearVMHandles(vm)
|
d.vm.clearVMHandles(vm)
|
||||||
vm.UpdatedAt = model.Now()
|
vm.UpdatedAt = model.Now()
|
||||||
return d.store.UpsertVM(ctx, vm)
|
return d.store.UpsertVM(ctx, vm)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return op.fail(err, "vm_id", vm.ID)
|
return op.fail(err, "vm_id", vm.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := d.vmSvc().rebuildDNS(ctx); err != nil {
|
if err := d.vm.rebuildDNS(ctx); err != nil {
|
||||||
return op.fail(err)
|
return op.fail(err)
|
||||||
}
|
}
|
||||||
op.done()
|
op.done()
|
||||||
|
|
@ -576,18 +574,94 @@ func (d *Daemon) reconcile(ctx context.Context) error {
|
||||||
// Dispatch code reads the facade directly; tests that pre-date the
|
// Dispatch code reads the facade directly; tests that pre-date the
|
||||||
// service split keep compiling.
|
// service split keep compiling.
|
||||||
func (d *Daemon) FindVM(ctx context.Context, idOrName string) (model.VMRecord, error) {
|
func (d *Daemon) FindVM(ctx context.Context, idOrName string) (model.VMRecord, error) {
|
||||||
return d.vmSvc().FindVM(ctx, idOrName)
|
return d.vm.FindVM(ctx, idOrName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindImage stays on Daemon as a thin forwarder to the image service
|
// FindImage stays on Daemon as a thin forwarder to the image service
|
||||||
// lookup so callers reading dispatch code see the obvious facade, and
|
// lookup so callers reading dispatch code see the obvious facade, and
|
||||||
// tests that pre-date the service split still compile.
|
// tests that pre-date the service split still compile.
|
||||||
func (d *Daemon) FindImage(ctx context.Context, idOrName string) (model.Image, error) {
|
func (d *Daemon) FindImage(ctx context.Context, idOrName string) (model.Image, error) {
|
||||||
return d.imageSvc().FindImage(ctx, idOrName)
|
return d.img.FindImage(ctx, idOrName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Daemon) TouchVM(ctx context.Context, idOrName string) (model.VMRecord, error) {
|
func (d *Daemon) TouchVM(ctx context.Context, idOrName string) (model.VMRecord, error) {
|
||||||
return d.vmSvc().TouchVM(ctx, idOrName)
|
return d.vm.TouchVM(ctx, idOrName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// wireServices populates the four focused services and their peer
|
||||||
|
// references from the infrastructure already on d (runner, logger,
|
||||||
|
// config, layout, store, closing, plus the SSH-client test seams).
|
||||||
|
// Idempotent: each service is skipped if the field is already non-nil,
|
||||||
|
// so tests can preinstall stubs for the services they want to fake and
|
||||||
|
// let wireServices fill the rest. The peer-service closures on
|
||||||
|
// WorkspaceService capture d rather than a direct *VMService pointer so
|
||||||
|
// the ws↔vm construction order doesn't recurse: the closures read d.vm
|
||||||
|
// at call time, by which point it is populated.
|
||||||
|
func wireServices(d *Daemon) {
|
||||||
|
if d.net == nil {
|
||||||
|
d.net = newHostNetwork(hostNetworkDeps{
|
||||||
|
runner: d.runner,
|
||||||
|
logger: d.logger,
|
||||||
|
config: d.config,
|
||||||
|
layout: d.layout,
|
||||||
|
closing: d.closing,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if d.img == nil {
|
||||||
|
d.img = newImageService(imageServiceDeps{
|
||||||
|
runner: d.runner,
|
||||||
|
logger: d.logger,
|
||||||
|
config: d.config,
|
||||||
|
layout: d.layout,
|
||||||
|
store: d.store,
|
||||||
|
beginOperation: func(name string, attrs ...any) *operationLog {
|
||||||
|
return d.beginOperation(name, attrs...)
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if d.ws == nil {
|
||||||
|
d.ws = newWorkspaceService(workspaceServiceDeps{
|
||||||
|
runner: d.runner,
|
||||||
|
logger: d.logger,
|
||||||
|
config: d.config,
|
||||||
|
layout: d.layout,
|
||||||
|
store: d.store,
|
||||||
|
vmResolver: func(ctx context.Context, idOrName string) (model.VMRecord, error) {
|
||||||
|
return d.vm.FindVM(ctx, idOrName)
|
||||||
|
},
|
||||||
|
aliveChecker: func(vm model.VMRecord) bool {
|
||||||
|
return d.vm.vmAlive(vm)
|
||||||
|
},
|
||||||
|
waitGuestSSH: d.waitForGuestSSH,
|
||||||
|
dialGuest: d.dialGuest,
|
||||||
|
imageResolver: func(ctx context.Context, idOrName string) (model.Image, error) {
|
||||||
|
return d.FindImage(ctx, idOrName)
|
||||||
|
},
|
||||||
|
imageWorkSeed: func(ctx context.Context, image model.Image, fingerprint string) error {
|
||||||
|
return d.img.refreshManagedWorkSeedFingerprint(ctx, image, fingerprint)
|
||||||
|
},
|
||||||
|
withVMLockByRef: func(ctx context.Context, idOrName string, fn func(model.VMRecord) (model.VMRecord, error)) (model.VMRecord, error) {
|
||||||
|
return d.vm.withVMLockByRef(ctx, idOrName, fn)
|
||||||
|
},
|
||||||
|
beginOperation: d.beginOperation,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if d.vm == nil {
|
||||||
|
d.vm = newVMService(vmServiceDeps{
|
||||||
|
runner: d.runner,
|
||||||
|
logger: d.logger,
|
||||||
|
config: d.config,
|
||||||
|
layout: d.layout,
|
||||||
|
store: d.store,
|
||||||
|
net: d.net,
|
||||||
|
img: d.img,
|
||||||
|
ws: d.ws,
|
||||||
|
guestWaitForSSH: d.guestWaitForSSH,
|
||||||
|
guestDial: d.guestDial,
|
||||||
|
capHooks: d.buildCapabilityHooks(),
|
||||||
|
beginOperation: d.beginOperation,
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func marshalResultOrError(v any, err error) rpc.Response {
|
func marshalResultOrError(v any, err error) rpc.Response {
|
||||||
|
|
|
||||||
|
|
@ -22,8 +22,9 @@ func TestRegisterImageRequiresKernel(t *testing.T) {
|
||||||
t.Fatalf("write rootfs: %v", err)
|
t.Fatalf("write rootfs: %v", err)
|
||||||
}
|
}
|
||||||
d := &Daemon{store: openDaemonStore(t)}
|
d := &Daemon{store: openDaemonStore(t)}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
_, err := d.imageSvc().RegisterImage(context.Background(), api.ImageRegisterParams{
|
_, err := d.img.RegisterImage(context.Background(), api.ImageRegisterParams{
|
||||||
Name: "missing-kernel",
|
Name: "missing-kernel",
|
||||||
RootfsPath: rootfs,
|
RootfsPath: rootfs,
|
||||||
})
|
})
|
||||||
|
|
@ -34,6 +35,7 @@ func TestRegisterImageRequiresKernel(t *testing.T) {
|
||||||
|
|
||||||
func TestDispatchPingIncludesBuildInfo(t *testing.T) {
|
func TestDispatchPingIncludesBuildInfo(t *testing.T) {
|
||||||
d := &Daemon{pid: 42}
|
d := &Daemon{pid: 42}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
resp := d.dispatch(context.Background(), rpc.Request{Version: rpc.Version, Method: "ping"})
|
resp := d.dispatch(context.Background(), rpc.Request{Version: rpc.Version, Method: "ping"})
|
||||||
if !resp.OK {
|
if !resp.OK {
|
||||||
|
|
@ -100,7 +102,8 @@ func TestPromoteImageCopiesBootArtifactsIntoArtifactDir(t *testing.T) {
|
||||||
store: db,
|
store: db,
|
||||||
runner: system.NewRunner(),
|
runner: system.NewRunner(),
|
||||||
}
|
}
|
||||||
got, err := d.imageSvc().PromoteImage(context.Background(), image.Name)
|
wireServices(d)
|
||||||
|
got, err := d.img.PromoteImage(context.Background(), image.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("PromoteImage: %v", err)
|
t.Fatalf("PromoteImage: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -24,16 +24,17 @@ func Doctor(ctx context.Context) (system.Report, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return system.Report{}, err
|
return system.Report{}, err
|
||||||
}
|
}
|
||||||
|
db, storeErr := store.Open(layout.DBPath)
|
||||||
d := &Daemon{
|
d := &Daemon{
|
||||||
layout: layout,
|
layout: layout,
|
||||||
config: cfg,
|
config: cfg,
|
||||||
runner: system.NewRunner(),
|
runner: system.NewRunner(),
|
||||||
}
|
}
|
||||||
db, storeErr := store.Open(layout.DBPath)
|
|
||||||
if storeErr == nil {
|
if storeErr == nil {
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
d.store = db
|
d.store = db
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
return d.doctorReport(ctx, storeErr), nil
|
return d.doctorReport(ctx, storeErr), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -167,7 +168,7 @@ func defaultImageInCatalog(name string) bool {
|
||||||
|
|
||||||
func (d *Daemon) coreVMLifecycleChecks() *system.Preflight {
|
func (d *Daemon) coreVMLifecycleChecks() *system.Preflight {
|
||||||
checks := system.NewPreflight()
|
checks := system.NewPreflight()
|
||||||
d.vmSvc().addBaseStartCommandPrereqs(checks)
|
d.vm.addBaseStartCommandPrereqs(checks)
|
||||||
return checks
|
return checks
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -33,13 +33,14 @@ func TestEnsureWorkDiskClonesSeedImageAndResizes(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
d := &Daemon{runner: runner}
|
d := &Daemon{runner: runner}
|
||||||
|
wireServices(d)
|
||||||
vm := testVM("seeded", "image-seeded", "172.16.0.60")
|
vm := testVM("seeded", "image-seeded", "172.16.0.60")
|
||||||
vm.Runtime.WorkDiskPath = workDiskPath
|
vm.Runtime.WorkDiskPath = workDiskPath
|
||||||
vm.Spec.WorkDiskSizeBytes = 2 * 1024 * 1024
|
vm.Spec.WorkDiskSizeBytes = 2 * 1024 * 1024
|
||||||
image := testImage("image-seeded")
|
image := testImage("image-seeded")
|
||||||
image.WorkSeedPath = seedPath
|
image.WorkSeedPath = seedPath
|
||||||
|
|
||||||
if _, err := d.vmSvc().ensureWorkDisk(context.Background(), &vm, image); err != nil {
|
if _, err := d.vm.ensureWorkDisk(context.Background(), &vm, image); err != nil {
|
||||||
t.Fatalf("ensureWorkDisk: %v", err)
|
t.Fatalf("ensureWorkDisk: %v", err)
|
||||||
}
|
}
|
||||||
runner.assertExhausted()
|
runner.assertExhausted()
|
||||||
|
|
@ -74,19 +75,20 @@ func TestTapPoolWarmsAndReusesIdleTap(t *testing.T) {
|
||||||
},
|
},
|
||||||
closing: make(chan struct{}),
|
closing: make(chan struct{}),
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
d.hostNet().ensureTapPool(context.Background())
|
d.net.ensureTapPool(context.Background())
|
||||||
tapName, err := d.hostNet().acquireTap(context.Background(), "tap-fallback")
|
tapName, err := d.net.acquireTap(context.Background(), "tap-fallback")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("acquireTap: %v", err)
|
t.Fatalf("acquireTap: %v", err)
|
||||||
}
|
}
|
||||||
if tapName != "tap-pool-0" {
|
if tapName != "tap-pool-0" {
|
||||||
t.Fatalf("tapName = %q, want tap-pool-0", tapName)
|
t.Fatalf("tapName = %q, want tap-pool-0", tapName)
|
||||||
}
|
}
|
||||||
if err := d.hostNet().releaseTap(context.Background(), tapName); err != nil {
|
if err := d.net.releaseTap(context.Background(), tapName); err != nil {
|
||||||
t.Fatalf("releaseTap: %v", err)
|
t.Fatalf("releaseTap: %v", err)
|
||||||
}
|
}
|
||||||
tapName, err = d.hostNet().acquireTap(context.Background(), "tap-fallback")
|
tapName, err = d.net.acquireTap(context.Background(), "tap-fallback")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("acquireTap second time: %v", err)
|
t.Fatalf("acquireTap second time: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -121,11 +123,12 @@ func TestEnsureAuthorizedKeyOnWorkDiskSkipsRepairForMatchingSeededFingerprint(t
|
||||||
runner: runner,
|
runner: runner,
|
||||||
config: model.DaemonConfig{SSHKeyPath: sshKeyPath},
|
config: model.DaemonConfig{SSHKeyPath: sshKeyPath},
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
vm := testVM("seeded-fastpath", "image-seeded-fastpath", "172.16.0.62")
|
vm := testVM("seeded-fastpath", "image-seeded-fastpath", "172.16.0.62")
|
||||||
vm.Runtime.WorkDiskPath = filepath.Join(t.TempDir(), "root.ext4")
|
vm.Runtime.WorkDiskPath = filepath.Join(t.TempDir(), "root.ext4")
|
||||||
image := model.Image{SeededSSHPublicKeyFingerprint: fingerprint}
|
image := model.Image{SeededSSHPublicKeyFingerprint: fingerprint}
|
||||||
|
|
||||||
if err := d.workspaceSvc().ensureAuthorizedKeyOnWorkDisk(context.Background(), &vm, image, workDiskPreparation{ClonedFromSeed: true}); err != nil {
|
if err := d.ws.ensureAuthorizedKeyOnWorkDisk(context.Background(), &vm, image, workDiskPreparation{ClonedFromSeed: true}); err != nil {
|
||||||
t.Fatalf("ensureAuthorizedKeyOnWorkDisk: %v", err)
|
t.Fatalf("ensureAuthorizedKeyOnWorkDisk: %v", err)
|
||||||
}
|
}
|
||||||
runner.assertExhausted()
|
runner.assertExhausted()
|
||||||
|
|
|
||||||
|
|
@ -64,27 +64,6 @@ func newHostNetwork(deps hostNetworkDeps) *HostNetwork {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// hostNet returns the HostNetwork service, lazily constructing it from
|
|
||||||
// the Daemon's current fields if a test literal didn't wire one up.
|
|
||||||
// Production paths go through Daemon.Open, which always populates d.net
|
|
||||||
// eagerly; this lazy path exists only so tests that build `&Daemon{...}`
|
|
||||||
// literals without spelling out a HostNetwork don't have to learn the
|
|
||||||
// new construction pattern. Every call from production code that
|
|
||||||
// touches HostNetwork funnels through here.
|
|
||||||
func (d *Daemon) hostNet() *HostNetwork {
|
|
||||||
if d.net != nil {
|
|
||||||
return d.net
|
|
||||||
}
|
|
||||||
d.net = newHostNetwork(hostNetworkDeps{
|
|
||||||
runner: d.runner,
|
|
||||||
logger: d.logger,
|
|
||||||
config: d.config,
|
|
||||||
layout: d.layout,
|
|
||||||
closing: d.closing,
|
|
||||||
})
|
|
||||||
return d.net
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- DNS server lifecycle -------------------------------------------
|
// --- DNS server lifecycle -------------------------------------------
|
||||||
|
|
||||||
func (n *HostNetwork) startVMDNS(addr string) error {
|
func (n *HostNetwork) startVMDNS(addr string) error {
|
||||||
|
|
@ -100,7 +79,7 @@ func (n *HostNetwork) startVMDNS(addr string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *HostNetwork) stopVMDNS() error {
|
func (n *HostNetwork) stopVMDNS() error {
|
||||||
if n.vmDNS == nil {
|
if n == nil || n.vmDNS == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
err := n.vmDNS.Close()
|
err := n.vmDNS.Close()
|
||||||
|
|
|
||||||
|
|
@ -106,24 +106,3 @@ func (s *ImageService) FindImage(ctx context.Context, idOrName string) (model.Im
|
||||||
}
|
}
|
||||||
return model.Image{}, fmt.Errorf("image %q not found", idOrName)
|
return model.Image{}, fmt.Errorf("image %q not found", idOrName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// imageSvc is the Daemon-side getter that lazy-inits ImageService from
|
|
||||||
// current Daemon fields. Mirrors hostNet() so test literals can keep
|
|
||||||
// using `&Daemon{store: db, runner: r, ...}` and still end up with a
|
|
||||||
// working ImageService.
|
|
||||||
func (d *Daemon) imageSvc() *ImageService {
|
|
||||||
if d.img != nil {
|
|
||||||
return d.img
|
|
||||||
}
|
|
||||||
d.img = newImageService(imageServiceDeps{
|
|
||||||
runner: d.runner,
|
|
||||||
logger: d.logger,
|
|
||||||
config: d.config,
|
|
||||||
layout: d.layout,
|
|
||||||
store: d.store,
|
|
||||||
beginOperation: func(name string, attrs ...any) *operationLog {
|
|
||||||
return d.beginOperation(name, attrs...)
|
|
||||||
},
|
|
||||||
})
|
|
||||||
return d.img
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -73,6 +73,7 @@ func TestPullImageBundlePathRegistersFromCatalog(t *testing.T) {
|
||||||
runner: d.runner,
|
runner: d.runner,
|
||||||
bundleFetch: stubBundleFetch(imagecat.Manifest{KernelRef: "generic-6.12"}),
|
bundleFetch: stubBundleFetch(imagecat.Manifest{KernelRef: "generic-6.12"}),
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
entry := imagecat.CatEntry{
|
entry := imagecat.CatEntry{
|
||||||
Name: "debian-bookworm",
|
Name: "debian-bookworm",
|
||||||
|
|
@ -126,6 +127,7 @@ func TestPullImageBundlePathOverrideNameAndKernelRef(t *testing.T) {
|
||||||
runner: d.runner,
|
runner: d.runner,
|
||||||
bundleFetch: stubBundleFetch(imagecat.Manifest{KernelRef: "generic-6.12"}),
|
bundleFetch: stubBundleFetch(imagecat.Manifest{KernelRef: "generic-6.12"}),
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
entry := imagecat.CatEntry{
|
entry := imagecat.CatEntry{
|
||||||
Name: "debian-bookworm", Arch: "x86_64",
|
Name: "debian-bookworm", Arch: "x86_64",
|
||||||
|
|
@ -167,6 +169,7 @@ func TestPullImageBundlePathRejectsExistingName(t *testing.T) {
|
||||||
runner: d.runner,
|
runner: d.runner,
|
||||||
bundleFetch: stubBundleFetch(imagecat.Manifest{KernelRef: "generic-6.12"}),
|
bundleFetch: stubBundleFetch(imagecat.Manifest{KernelRef: "generic-6.12"}),
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
id, _ := model.NewID()
|
id, _ := model.NewID()
|
||||||
if err := d.store.UpsertImage(context.Background(), model.Image{
|
if err := d.store.UpsertImage(context.Background(), model.Image{
|
||||||
ID: id, Name: "debian-bookworm",
|
ID: id, Name: "debian-bookworm",
|
||||||
|
|
@ -196,6 +199,7 @@ func TestPullImageBundlePathRequiresSomeKernelSource(t *testing.T) {
|
||||||
runner: d.runner,
|
runner: d.runner,
|
||||||
bundleFetch: stubBundleFetch(imagecat.Manifest{}),
|
bundleFetch: stubBundleFetch(imagecat.Manifest{}),
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
// Catalog entry has no kernel_ref, no --kernel-ref/--kernel passed.
|
// Catalog entry has no kernel_ref, no --kernel-ref/--kernel passed.
|
||||||
_, err := d.img.pullFromBundle(context.Background(), api.ImagePullParams{Ref: "x"}, imagecat.CatEntry{
|
_, err := d.img.pullFromBundle(context.Background(), api.ImagePullParams{Ref: "x"}, imagecat.CatEntry{
|
||||||
Name: "x", TarballURL: "https://example.com/x.tar.zst", TarballSHA256: "abc",
|
Name: "x", TarballURL: "https://example.com/x.tar.zst", TarballSHA256: "abc",
|
||||||
|
|
@ -223,6 +227,7 @@ func TestPullImageBundleFetchFailurePropagates(t *testing.T) {
|
||||||
return imagecat.Manifest{}, errors.New("r2 exploded")
|
return imagecat.Manifest{}, errors.New("r2 exploded")
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
_, err := d.img.pullFromBundle(context.Background(), api.ImagePullParams{Ref: "x"}, imagecat.CatEntry{
|
_, err := d.img.pullFromBundle(context.Background(), api.ImagePullParams{Ref: "x"}, imagecat.CatEntry{
|
||||||
Name: "x", KernelRef: "generic-6.12",
|
Name: "x", KernelRef: "generic-6.12",
|
||||||
TarballURL: "https://example.com/x.tar.zst", TarballSHA256: "abc",
|
TarballURL: "https://example.com/x.tar.zst", TarballSHA256: "abc",
|
||||||
|
|
@ -262,6 +267,7 @@ func TestPullImageDispatchFallsThroughToOCIWhenNoCatalogHit(t *testing.T) {
|
||||||
finalizePulledRootfs: stubFinalizePulledRootfs,
|
finalizePulledRootfs: stubFinalizePulledRootfs,
|
||||||
bundleFetch: stubBundleFetch(imagecat.Manifest{}),
|
bundleFetch: stubBundleFetch(imagecat.Manifest{}),
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
_, err := d.img.PullImage(context.Background(), api.ImagePullParams{
|
_, err := d.img.PullImage(context.Background(), api.ImagePullParams{
|
||||||
// Not a catalog name (catalog is empty in the embedded default).
|
// Not a catalog name (catalog is empty in the embedded default).
|
||||||
|
|
|
||||||
|
|
@ -82,6 +82,7 @@ func TestPullImageHappyPath(t *testing.T) {
|
||||||
pullAndFlatten: stubPullAndFlatten,
|
pullAndFlatten: stubPullAndFlatten,
|
||||||
finalizePulledRootfs: stubFinalizePulledRootfs,
|
finalizePulledRootfs: stubFinalizePulledRootfs,
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
image, err := d.img.PullImage(context.Background(), api.ImagePullParams{
|
image, err := d.img.PullImage(context.Background(), api.ImagePullParams{
|
||||||
Ref: "docker.io/library/debian:bookworm",
|
Ref: "docker.io/library/debian:bookworm",
|
||||||
|
|
@ -132,6 +133,7 @@ func TestPullImageRejectsExistingName(t *testing.T) {
|
||||||
pullAndFlatten: stubPullAndFlatten,
|
pullAndFlatten: stubPullAndFlatten,
|
||||||
finalizePulledRootfs: stubFinalizePulledRootfs,
|
finalizePulledRootfs: stubFinalizePulledRootfs,
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
// Seed a preexisting image with the would-be derived name.
|
// Seed a preexisting image with the would-be derived name.
|
||||||
id, _ := model.NewID()
|
id, _ := model.NewID()
|
||||||
if err := d.store.UpsertImage(context.Background(), model.Image{
|
if err := d.store.UpsertImage(context.Background(), model.Image{
|
||||||
|
|
@ -165,6 +167,7 @@ func TestPullImageRequiresKernel(t *testing.T) {
|
||||||
pullAndFlatten: stubPullAndFlatten,
|
pullAndFlatten: stubPullAndFlatten,
|
||||||
finalizePulledRootfs: stubFinalizePulledRootfs,
|
finalizePulledRootfs: stubFinalizePulledRootfs,
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
_, err := d.img.PullImage(context.Background(), api.ImagePullParams{
|
_, err := d.img.PullImage(context.Background(), api.ImagePullParams{
|
||||||
Ref: "docker.io/library/debian:bookworm",
|
Ref: "docker.io/library/debian:bookworm",
|
||||||
})
|
})
|
||||||
|
|
@ -192,6 +195,7 @@ func TestPullImageCleansStagingOnFailure(t *testing.T) {
|
||||||
pullAndFlatten: failureSeam,
|
pullAndFlatten: failureSeam,
|
||||||
finalizePulledRootfs: stubFinalizePulledRootfs,
|
finalizePulledRootfs: stubFinalizePulledRootfs,
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
_, err := d.img.PullImage(context.Background(), api.ImagePullParams{
|
_, err := d.img.PullImage(context.Background(), api.ImagePullParams{
|
||||||
Ref: "docker.io/library/debian:bookworm",
|
Ref: "docker.io/library/debian:bookworm",
|
||||||
KernelPath: kernel,
|
KernelPath: kernel,
|
||||||
|
|
|
||||||
|
|
@ -38,7 +38,8 @@ func TestKernelListReturnsSeededEntries(t *testing.T) {
|
||||||
seedKernelEntry(t, kernelsDir, "alpine-3.23")
|
seedKernelEntry(t, kernelsDir, "alpine-3.23")
|
||||||
|
|
||||||
d := &Daemon{layout: paths.Layout{KernelsDir: kernelsDir}}
|
d := &Daemon{layout: paths.Layout{KernelsDir: kernelsDir}}
|
||||||
result, err := d.imageSvc().KernelList(context.Background())
|
wireServices(d)
|
||||||
|
result, err := d.img.KernelList(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("KernelList: %v", err)
|
t.Fatalf("KernelList: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -59,6 +60,7 @@ func TestKernelShowAndDeleteThroughDispatch(t *testing.T) {
|
||||||
seedKernelEntry(t, kernelsDir, "void-6.12")
|
seedKernelEntry(t, kernelsDir, "void-6.12")
|
||||||
|
|
||||||
d := &Daemon{layout: paths.Layout{KernelsDir: kernelsDir}}
|
d := &Daemon{layout: paths.Layout{KernelsDir: kernelsDir}}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
showParams, _ := json.Marshal(api.KernelRefParams{Name: "void-6.12"})
|
showParams, _ := json.Marshal(api.KernelRefParams{Name: "void-6.12"})
|
||||||
resp := d.dispatch(context.Background(), rpc.Request{Version: rpc.Version, Method: "kernel.show", Params: showParams})
|
resp := d.dispatch(context.Background(), rpc.Request{Version: rpc.Version, Method: "kernel.show", Params: showParams})
|
||||||
|
|
@ -86,7 +88,8 @@ func TestKernelShowAndDeleteThroughDispatch(t *testing.T) {
|
||||||
|
|
||||||
func TestKernelShowMissingEntry(t *testing.T) {
|
func TestKernelShowMissingEntry(t *testing.T) {
|
||||||
d := &Daemon{layout: paths.Layout{KernelsDir: t.TempDir()}}
|
d := &Daemon{layout: paths.Layout{KernelsDir: t.TempDir()}}
|
||||||
_, err := d.imageSvc().KernelShow(context.Background(), "nope")
|
wireServices(d)
|
||||||
|
_, err := d.img.KernelShow(context.Background(), "nope")
|
||||||
if err == nil || !strings.Contains(err.Error(), "not found") {
|
if err == nil || !strings.Contains(err.Error(), "not found") {
|
||||||
t.Fatalf("KernelShow missing: err=%v", err)
|
t.Fatalf("KernelShow missing: err=%v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -94,7 +97,8 @@ func TestKernelShowMissingEntry(t *testing.T) {
|
||||||
|
|
||||||
func TestKernelDeleteRejectsInvalidName(t *testing.T) {
|
func TestKernelDeleteRejectsInvalidName(t *testing.T) {
|
||||||
d := &Daemon{layout: paths.Layout{KernelsDir: t.TempDir()}}
|
d := &Daemon{layout: paths.Layout{KernelsDir: t.TempDir()}}
|
||||||
if err := d.imageSvc().KernelDelete(context.Background(), "../escape"); err == nil {
|
wireServices(d)
|
||||||
|
if err := d.img.KernelDelete(context.Background(), "../escape"); err == nil {
|
||||||
t.Fatalf("KernelDelete should reject traversal")
|
t.Fatalf("KernelDelete should reject traversal")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -112,8 +116,9 @@ func TestRegisterImageResolvesKernelRef(t *testing.T) {
|
||||||
layout: paths.Layout{KernelsDir: kernelsDir},
|
layout: paths.Layout{KernelsDir: kernelsDir},
|
||||||
store: openDaemonStore(t),
|
store: openDaemonStore(t),
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
image, err := d.imageSvc().RegisterImage(context.Background(), api.ImageRegisterParams{
|
image, err := d.img.RegisterImage(context.Background(), api.ImageRegisterParams{
|
||||||
Name: "testbox",
|
Name: "testbox",
|
||||||
RootfsPath: rootfs,
|
RootfsPath: rootfs,
|
||||||
KernelRef: "void-6.12",
|
KernelRef: "void-6.12",
|
||||||
|
|
@ -139,7 +144,8 @@ func TestRegisterImageRejectsKernelRefAndPath(t *testing.T) {
|
||||||
layout: paths.Layout{KernelsDir: kernelsDir},
|
layout: paths.Layout{KernelsDir: kernelsDir},
|
||||||
store: openDaemonStore(t),
|
store: openDaemonStore(t),
|
||||||
}
|
}
|
||||||
_, err := d.imageSvc().RegisterImage(context.Background(), api.ImageRegisterParams{
|
wireServices(d)
|
||||||
|
_, err := d.img.RegisterImage(context.Background(), api.ImageRegisterParams{
|
||||||
Name: "testbox",
|
Name: "testbox",
|
||||||
RootfsPath: rootfs,
|
RootfsPath: rootfs,
|
||||||
KernelRef: "void-6.12",
|
KernelRef: "void-6.12",
|
||||||
|
|
@ -174,8 +180,9 @@ func TestKernelImportCopiesArtifactsAndWritesManifest(t *testing.T) {
|
||||||
layout: paths.Layout{KernelsDir: kernelsDir},
|
layout: paths.Layout{KernelsDir: kernelsDir},
|
||||||
runner: system.NewRunner(),
|
runner: system.NewRunner(),
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
entry, err := d.imageSvc().KernelImport(context.Background(), api.KernelImportParams{
|
entry, err := d.img.KernelImport(context.Background(), api.KernelImportParams{
|
||||||
Name: "void-6.12",
|
Name: "void-6.12",
|
||||||
FromDir: src,
|
FromDir: src,
|
||||||
Distro: "void",
|
Distro: "void",
|
||||||
|
|
@ -210,7 +217,8 @@ func TestKernelPullRejectsUnknownCatalogEntry(t *testing.T) {
|
||||||
layout: paths.Layout{KernelsDir: t.TempDir()},
|
layout: paths.Layout{KernelsDir: t.TempDir()},
|
||||||
runner: system.NewRunner(),
|
runner: system.NewRunner(),
|
||||||
}
|
}
|
||||||
_, err := d.imageSvc().KernelPull(context.Background(), api.KernelPullParams{Name: "unknown"})
|
wireServices(d)
|
||||||
|
_, err := d.img.KernelPull(context.Background(), api.KernelPullParams{Name: "unknown"})
|
||||||
if err == nil || !strings.Contains(err.Error(), "not in catalog") {
|
if err == nil || !strings.Contains(err.Error(), "not in catalog") {
|
||||||
t.Fatalf("KernelPull unknown: err=%v", err)
|
t.Fatalf("KernelPull unknown: err=%v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -224,7 +232,8 @@ func TestKernelPullRefusesOverwriteWithoutForce(t *testing.T) {
|
||||||
layout: paths.Layout{KernelsDir: kernelsDir},
|
layout: paths.Layout{KernelsDir: kernelsDir},
|
||||||
runner: system.NewRunner(),
|
runner: system.NewRunner(),
|
||||||
}
|
}
|
||||||
_, err := d.imageSvc().KernelPull(context.Background(), api.KernelPullParams{Name: "void-6.12"})
|
wireServices(d)
|
||||||
|
_, err := d.img.KernelPull(context.Background(), api.KernelPullParams{Name: "void-6.12"})
|
||||||
if err == nil || !strings.Contains(err.Error(), "already pulled") {
|
if err == nil || !strings.Contains(err.Error(), "already pulled") {
|
||||||
t.Fatalf("KernelPull without --force: err=%v", err)
|
t.Fatalf("KernelPull without --force: err=%v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -232,7 +241,8 @@ func TestKernelPullRefusesOverwriteWithoutForce(t *testing.T) {
|
||||||
|
|
||||||
func TestKernelCatalogReportsPulledStatus(t *testing.T) {
|
func TestKernelCatalogReportsPulledStatus(t *testing.T) {
|
||||||
d := &Daemon{layout: paths.Layout{KernelsDir: t.TempDir()}}
|
d := &Daemon{layout: paths.Layout{KernelsDir: t.TempDir()}}
|
||||||
result, err := d.imageSvc().KernelCatalog(context.Background())
|
wireServices(d)
|
||||||
|
result, err := d.img.KernelCatalog(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("KernelCatalog: %v", err)
|
t.Fatalf("KernelCatalog: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -247,7 +257,8 @@ func TestKernelImportRejectsMissingFromDir(t *testing.T) {
|
||||||
layout: paths.Layout{KernelsDir: t.TempDir()},
|
layout: paths.Layout{KernelsDir: t.TempDir()},
|
||||||
runner: system.NewRunner(),
|
runner: system.NewRunner(),
|
||||||
}
|
}
|
||||||
_, err := d.imageSvc().KernelImport(context.Background(), api.KernelImportParams{Name: "x"})
|
wireServices(d)
|
||||||
|
_, err := d.img.KernelImport(context.Background(), api.KernelImportParams{Name: "x"})
|
||||||
if err == nil || !strings.Contains(err.Error(), "--from") {
|
if err == nil || !strings.Contains(err.Error(), "--from") {
|
||||||
t.Fatalf("KernelImport without --from: err=%v", err)
|
t.Fatalf("KernelImport without --from: err=%v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -262,7 +273,8 @@ func TestRegisterImageMissingKernelRef(t *testing.T) {
|
||||||
layout: paths.Layout{KernelsDir: t.TempDir()},
|
layout: paths.Layout{KernelsDir: t.TempDir()},
|
||||||
store: openDaemonStore(t),
|
store: openDaemonStore(t),
|
||||||
}
|
}
|
||||||
_, err := d.imageSvc().RegisterImage(context.Background(), api.ImageRegisterParams{
|
wireServices(d)
|
||||||
|
_, err := d.img.RegisterImage(context.Background(), api.ImageRegisterParams{
|
||||||
Name: "testbox",
|
Name: "testbox",
|
||||||
RootfsPath: rootfs,
|
RootfsPath: rootfs,
|
||||||
KernelRef: "never-imported",
|
KernelRef: "never-imported",
|
||||||
|
|
|
||||||
|
|
@ -114,8 +114,9 @@ func TestStartVMLockedLogsBridgeFailure(t *testing.T) {
|
||||||
runner: runner,
|
runner: runner,
|
||||||
logger: logger,
|
logger: logger,
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
_, err = d.vmSvc().startVMLocked(ctx, vm, image)
|
_, err = d.vm.startVMLocked(ctx, vm, image)
|
||||||
if err == nil || !strings.Contains(err.Error(), "bridge up failed") {
|
if err == nil || !strings.Contains(err.Error(), "bridge up failed") {
|
||||||
t.Fatalf("startVMLocked() error = %v, want bridge failure", err)
|
t.Fatalf("startVMLocked() error = %v, want bridge failure", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -55,7 +55,7 @@ func TestCloseOnPartiallyInitialisedDaemon(t *testing.T) {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
verify: func(t *testing.T, d *Daemon) {
|
verify: func(t *testing.T, d *Daemon) {
|
||||||
if d.hostNet().vmDNS != nil {
|
if d.net.vmDNS != nil {
|
||||||
t.Error("vmDNS not cleared by Close")
|
t.Error("vmDNS not cleared by Close")
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
@ -89,6 +89,7 @@ func TestCloseIdempotentUnderConcurrency(t *testing.T) {
|
||||||
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
|
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
|
||||||
config: model.DaemonConfig{BridgeName: ""},
|
config: model.DaemonConfig{BridgeName: ""},
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
var count atomic.Int32
|
var count atomic.Int32
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
|
|
|
||||||
|
|
@ -73,8 +73,9 @@ func TestCreateDMSnapshotFailsWithoutRollbackWhenBaseLoopSetupFails(t *testing.T
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
d := &Daemon{runner: runner}
|
d := &Daemon{runner: runner}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
_, err := d.hostNet().createDMSnapshot(context.Background(), "/rootfs.ext4", "/cow.ext4", "fc-rootfs-test")
|
_, err := d.net.createDMSnapshot(context.Background(), "/rootfs.ext4", "/cow.ext4", "fc-rootfs-test")
|
||||||
if !errors.Is(err, attachErr) {
|
if !errors.Is(err, attachErr) {
|
||||||
t.Fatalf("error = %v, want %v", err, attachErr)
|
t.Fatalf("error = %v, want %v", err, attachErr)
|
||||||
}
|
}
|
||||||
|
|
@ -97,8 +98,9 @@ func TestCreateDMSnapshotRollsBackBaseLoopWhenCowLoopSetupFails(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
d := &Daemon{runner: runner}
|
d := &Daemon{runner: runner}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
_, err := d.hostNet().createDMSnapshot(context.Background(), "/rootfs.ext4", "/cow.ext4", "fc-rootfs-test")
|
_, err := d.net.createDMSnapshot(context.Background(), "/rootfs.ext4", "/cow.ext4", "fc-rootfs-test")
|
||||||
if !errors.Is(err, attachErr) {
|
if !errors.Is(err, attachErr) {
|
||||||
t.Fatalf("error = %v, want %v", err, attachErr)
|
t.Fatalf("error = %v, want %v", err, attachErr)
|
||||||
}
|
}
|
||||||
|
|
@ -120,8 +122,9 @@ func TestCreateDMSnapshotRollsBackBothLoopsWhenBlockdevFails(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
d := &Daemon{runner: runner}
|
d := &Daemon{runner: runner}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
_, err := d.hostNet().createDMSnapshot(context.Background(), "/rootfs.ext4", "/cow.ext4", "fc-rootfs-test")
|
_, err := d.net.createDMSnapshot(context.Background(), "/rootfs.ext4", "/cow.ext4", "fc-rootfs-test")
|
||||||
if !errors.Is(err, blockdevErr) {
|
if !errors.Is(err, blockdevErr) {
|
||||||
t.Fatalf("error = %v, want %v", err, blockdevErr)
|
t.Fatalf("error = %v, want %v", err, blockdevErr)
|
||||||
}
|
}
|
||||||
|
|
@ -144,8 +147,9 @@ func TestCreateDMSnapshotRollsBackLoopsWhenDMSetupFails(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
d := &Daemon{runner: runner}
|
d := &Daemon{runner: runner}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
_, err := d.hostNet().createDMSnapshot(context.Background(), "/rootfs.ext4", "/cow.ext4", "fc-rootfs-test")
|
_, err := d.net.createDMSnapshot(context.Background(), "/rootfs.ext4", "/cow.ext4", "fc-rootfs-test")
|
||||||
if !errors.Is(err, dmErr) {
|
if !errors.Is(err, dmErr) {
|
||||||
t.Fatalf("error = %v, want %v", err, dmErr)
|
t.Fatalf("error = %v, want %v", err, dmErr)
|
||||||
}
|
}
|
||||||
|
|
@ -173,8 +177,9 @@ func TestCreateDMSnapshotJoinsRollbackErrors(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
d := &Daemon{runner: runner}
|
d := &Daemon{runner: runner}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
_, err := d.hostNet().createDMSnapshot(context.Background(), "/rootfs.ext4", "/cow.ext4", "fc-rootfs-test")
|
_, err := d.net.createDMSnapshot(context.Background(), "/rootfs.ext4", "/cow.ext4", "fc-rootfs-test")
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("expected createDMSnapshot to return an error")
|
t.Fatal("expected createDMSnapshot to return an error")
|
||||||
}
|
}
|
||||||
|
|
@ -197,8 +202,9 @@ func TestCreateDMSnapshotReturnsHandlesOnSuccess(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
d := &Daemon{runner: runner}
|
d := &Daemon{runner: runner}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
handles, err := d.hostNet().createDMSnapshot(context.Background(), "/rootfs.ext4", "/cow.ext4", "fc-rootfs-test")
|
handles, err := d.net.createDMSnapshot(context.Background(), "/rootfs.ext4", "/cow.ext4", "fc-rootfs-test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("createDMSnapshot returned error: %v", err)
|
t.Fatalf("createDMSnapshot returned error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -226,8 +232,9 @@ func TestCleanupDMSnapshotRemovesResourcesInReverseOrder(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
d := &Daemon{runner: runner}
|
d := &Daemon{runner: runner}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
err := d.hostNet().cleanupDMSnapshot(context.Background(), dmSnapshotHandles{
|
err := d.net.cleanupDMSnapshot(context.Background(), dmSnapshotHandles{
|
||||||
BaseLoop: "/dev/loop10",
|
BaseLoop: "/dev/loop10",
|
||||||
COWLoop: "/dev/loop11",
|
COWLoop: "/dev/loop11",
|
||||||
DMName: "fc-rootfs-test",
|
DMName: "fc-rootfs-test",
|
||||||
|
|
@ -250,8 +257,9 @@ func TestCleanupDMSnapshotUsesPartialHandles(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
d := &Daemon{runner: runner}
|
d := &Daemon{runner: runner}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
err := d.hostNet().cleanupDMSnapshot(context.Background(), dmSnapshotHandles{
|
err := d.net.cleanupDMSnapshot(context.Background(), dmSnapshotHandles{
|
||||||
BaseLoop: "/dev/loop10",
|
BaseLoop: "/dev/loop10",
|
||||||
DMDev: "/dev/mapper/fc-rootfs-test",
|
DMDev: "/dev/mapper/fc-rootfs-test",
|
||||||
})
|
})
|
||||||
|
|
@ -276,8 +284,9 @@ func TestCleanupDMSnapshotJoinsTeardownErrors(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
d := &Daemon{runner: runner}
|
d := &Daemon{runner: runner}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
err := d.hostNet().cleanupDMSnapshot(context.Background(), dmSnapshotHandles{
|
err := d.net.cleanupDMSnapshot(context.Background(), dmSnapshotHandles{
|
||||||
BaseLoop: "/dev/loop10",
|
BaseLoop: "/dev/loop10",
|
||||||
COWLoop: "/dev/loop11",
|
COWLoop: "/dev/loop11",
|
||||||
DMName: "fc-rootfs-test",
|
DMName: "fc-rootfs-test",
|
||||||
|
|
@ -306,8 +315,9 @@ func TestRemoveDMSnapshotRetriesBusyDevice(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
d := &Daemon{runner: runner}
|
d := &Daemon{runner: runner}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
if err := d.hostNet().removeDMSnapshot(context.Background(), "fc-rootfs-test"); err != nil {
|
if err := d.net.removeDMSnapshot(context.Background(), "fc-rootfs-test"); err != nil {
|
||||||
t.Fatalf("removeDMSnapshot returned error: %v", err)
|
t.Fatalf("removeDMSnapshot returned error: %v", err)
|
||||||
}
|
}
|
||||||
runner.assertExhausted()
|
runner.assertExhausted()
|
||||||
|
|
|
||||||
|
|
@ -27,6 +27,7 @@ func TestReserveVMAllowsNameThatPrefixesExistingVM(t *testing.T) {
|
||||||
layout: paths.Layout{VMsDir: filepath.Join(tmp, "vms"), RuntimeDir: filepath.Join(tmp, "runtime")},
|
layout: paths.Layout{VMsDir: filepath.Join(tmp, "vms"), RuntimeDir: filepath.Join(tmp, "runtime")},
|
||||||
config: model.DaemonConfig{BridgeIP: model.DefaultBridgeIP},
|
config: model.DaemonConfig{BridgeIP: model.DefaultBridgeIP},
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
existing := testVM("longname-sandbox-foobar", "image-x", "172.16.0.50")
|
existing := testVM("longname-sandbox-foobar", "image-x", "172.16.0.50")
|
||||||
upsertDaemonVM(t, ctx, d.store, existing)
|
upsertDaemonVM(t, ctx, d.store, existing)
|
||||||
|
|
@ -41,14 +42,14 @@ func TestReserveVMAllowsNameThatPrefixesExistingVM(t *testing.T) {
|
||||||
// New VM name is a prefix of the existing id (which is
|
// New VM name is a prefix of the existing id (which is
|
||||||
// "longname-sandbox-foobar-id" per testVM). Old FindVM-based check
|
// "longname-sandbox-foobar-id" per testVM). Old FindVM-based check
|
||||||
// would reject this.
|
// would reject this.
|
||||||
if vm, err := d.vmSvc().reserveVM(ctx, "longname", image, model.VMSpec{VCPUCount: 1, MemoryMiB: 128}); err != nil {
|
if vm, err := d.vm.reserveVM(ctx, "longname", image, model.VMSpec{VCPUCount: 1, MemoryMiB: 128}); err != nil {
|
||||||
t.Fatalf("reserveVM(prefix of id): %v", err)
|
t.Fatalf("reserveVM(prefix of id): %v", err)
|
||||||
} else if vm.Name != "longname" {
|
} else if vm.Name != "longname" {
|
||||||
t.Fatalf("reserveVM returned name=%q, want longname", vm.Name)
|
t.Fatalf("reserveVM returned name=%q, want longname", vm.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prefix of the existing name ("longname-sandbox") must also work.
|
// Prefix of the existing name ("longname-sandbox") must also work.
|
||||||
if vm, err := d.vmSvc().reserveVM(ctx, "longname-sandbox", image, model.VMSpec{VCPUCount: 1, MemoryMiB: 128}); err != nil {
|
if vm, err := d.vm.reserveVM(ctx, "longname-sandbox", image, model.VMSpec{VCPUCount: 1, MemoryMiB: 128}); err != nil {
|
||||||
t.Fatalf("reserveVM(prefix of name): %v", err)
|
t.Fatalf("reserveVM(prefix of name): %v", err)
|
||||||
} else if vm.Name != "longname-sandbox" {
|
} else if vm.Name != "longname-sandbox" {
|
||||||
t.Fatalf("reserveVM returned name=%q, want longname-sandbox", vm.Name)
|
t.Fatalf("reserveVM returned name=%q, want longname-sandbox", vm.Name)
|
||||||
|
|
@ -66,6 +67,7 @@ func TestReserveVMRejectsExactDuplicateName(t *testing.T) {
|
||||||
layout: paths.Layout{VMsDir: filepath.Join(tmp, "vms"), RuntimeDir: filepath.Join(tmp, "runtime")},
|
layout: paths.Layout{VMsDir: filepath.Join(tmp, "vms"), RuntimeDir: filepath.Join(tmp, "runtime")},
|
||||||
config: model.DaemonConfig{BridgeIP: model.DefaultBridgeIP},
|
config: model.DaemonConfig{BridgeIP: model.DefaultBridgeIP},
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
existing := testVM("sandbox", "image-x", "172.16.0.51")
|
existing := testVM("sandbox", "image-x", "172.16.0.51")
|
||||||
upsertDaemonVM(t, ctx, d.store, existing)
|
upsertDaemonVM(t, ctx, d.store, existing)
|
||||||
|
|
||||||
|
|
@ -76,7 +78,7 @@ func TestReserveVMRejectsExactDuplicateName(t *testing.T) {
|
||||||
t.Fatalf("UpsertImage: %v", err)
|
t.Fatalf("UpsertImage: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := d.vmSvc().reserveVM(ctx, "sandbox", image, model.VMSpec{VCPUCount: 1, MemoryMiB: 128})
|
_, err := d.vm.reserveVM(ctx, "sandbox", image, model.VMSpec{VCPUCount: 1, MemoryMiB: 128})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("reserveVM with duplicate name should have failed")
|
t.Fatal("reserveVM with duplicate name should have failed")
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -111,11 +111,12 @@ func TestRediscoverHandlesLoadsScratchWhenProcessDead(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
d := &Daemon{runner: runner}
|
d := &Daemon{runner: runner}
|
||||||
|
wireServices(d)
|
||||||
vm := testVM("gone", "image-gone", "172.16.0.250")
|
vm := testVM("gone", "image-gone", "172.16.0.250")
|
||||||
vm.Runtime.APISockPath = apiSock
|
vm.Runtime.APISockPath = apiSock
|
||||||
vm.Runtime.VMDir = vmDir
|
vm.Runtime.VMDir = vmDir
|
||||||
|
|
||||||
got, alive, err := d.vmSvc().rediscoverHandles(context.Background(), vm)
|
got, alive, err := d.vm.rediscoverHandles(context.Background(), vm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("rediscoverHandles: %v", err)
|
t.Fatalf("rediscoverHandles: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -148,11 +149,12 @@ func TestRediscoverHandlesPrefersLivePIDOverScratch(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
d := &Daemon{runner: runner}
|
d := &Daemon{runner: runner}
|
||||||
|
wireServices(d)
|
||||||
vm := testVM("moved", "image-moved", "172.16.0.251")
|
vm := testVM("moved", "image-moved", "172.16.0.251")
|
||||||
vm.Runtime.APISockPath = apiSock
|
vm.Runtime.APISockPath = apiSock
|
||||||
vm.Runtime.VMDir = vmDir
|
vm.Runtime.VMDir = vmDir
|
||||||
|
|
||||||
got, alive, err := d.vmSvc().rediscoverHandles(context.Background(), vm)
|
got, alive, err := d.vm.rediscoverHandles(context.Background(), vm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("rediscoverHandles: %v", err)
|
t.Fatalf("rediscoverHandles: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -177,15 +179,16 @@ func TestClearVMHandlesRemovesScratchFile(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
d := &Daemon{}
|
d := &Daemon{}
|
||||||
|
wireServices(d)
|
||||||
vm := testVM("sweep", "image-sweep", "172.16.0.252")
|
vm := testVM("sweep", "image-sweep", "172.16.0.252")
|
||||||
vm.Runtime.VMDir = vmDir
|
vm.Runtime.VMDir = vmDir
|
||||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: 42})
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: 42})
|
||||||
d.vmSvc().clearVMHandles(vm)
|
d.vm.clearVMHandles(vm)
|
||||||
|
|
||||||
if _, err := os.Stat(handlesFilePath(vmDir)); !os.IsNotExist(err) {
|
if _, err := os.Stat(handlesFilePath(vmDir)); !os.IsNotExist(err) {
|
||||||
t.Fatalf("scratch file still present: %v", err)
|
t.Fatalf("scratch file still present: %v", err)
|
||||||
}
|
}
|
||||||
if h, ok := d.vmSvc().handles.get(vm.ID); ok && !h.IsZero() {
|
if h, ok := d.vm.handles.get(vm.ID); ok && !h.IsZero() {
|
||||||
t.Fatalf("cache entry survives clear: %+v", h)
|
t.Fatalf("cache entry survives clear: %+v", h)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -123,30 +123,6 @@ func newVMService(deps vmServiceDeps) *VMService {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// vmSvc is Daemon's lazy-init getter. Mirrors hostNet() / imageSvc() /
|
|
||||||
// workspaceSvc() so test literals like `&Daemon{store: db, runner: r}`
|
|
||||||
// still get a functional VMService without spelling one out.
|
|
||||||
func (d *Daemon) vmSvc() *VMService {
|
|
||||||
if d.vm != nil {
|
|
||||||
return d.vm
|
|
||||||
}
|
|
||||||
d.vm = newVMService(vmServiceDeps{
|
|
||||||
runner: d.runner,
|
|
||||||
logger: d.logger,
|
|
||||||
config: d.config,
|
|
||||||
layout: d.layout,
|
|
||||||
store: d.store,
|
|
||||||
net: d.hostNet(),
|
|
||||||
img: d.imageSvc(),
|
|
||||||
ws: d.workspaceSvc(),
|
|
||||||
guestWaitForSSH: d.guestWaitForSSH,
|
|
||||||
guestDial: d.guestDial,
|
|
||||||
capHooks: d.buildCapabilityHooks(),
|
|
||||||
beginOperation: d.beginOperation,
|
|
||||||
})
|
|
||||||
return d.vm
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildCapabilityHooks adapts Daemon's existing capability-dispatch
|
// buildCapabilityHooks adapts Daemon's existing capability-dispatch
|
||||||
// methods into the capabilityHooks bag VMService takes. Keeps the
|
// methods into the capabilityHooks bag VMService takes. Keeps the
|
||||||
// registry + capability types on *Daemon while letting VMService call
|
// registry + capability types on *Daemon while letting VMService call
|
||||||
|
|
|
||||||
|
|
@ -35,6 +35,7 @@ func TestFindVMPrefixResolution(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
db := openDaemonStore(t)
|
db := openDaemonStore(t)
|
||||||
d := &Daemon{store: db}
|
d := &Daemon{store: db}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
for _, vm := range []model.VMRecord{
|
for _, vm := range []model.VMRecord{
|
||||||
testVM("alpha", "image-alpha", "172.16.0.2"),
|
testVM("alpha", "image-alpha", "172.16.0.2"),
|
||||||
|
|
@ -71,6 +72,7 @@ func TestFindImagePrefixResolution(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
db := openDaemonStore(t)
|
db := openDaemonStore(t)
|
||||||
d := &Daemon{store: db}
|
d := &Daemon{store: db}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
for _, image := range []model.Image{
|
for _, image := range []model.Image{
|
||||||
testImage("base"),
|
testImage("base"),
|
||||||
|
|
@ -149,6 +151,7 @@ func TestReconcileStopsStaleRunningVMAndClearsRuntimeHandles(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
d := &Daemon{store: db, runner: runner}
|
d := &Daemon{store: db, runner: runner}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
if err := d.reconcile(ctx); err != nil {
|
if err := d.reconcile(ctx); err != nil {
|
||||||
t.Fatalf("reconcile: %v", err)
|
t.Fatalf("reconcile: %v", err)
|
||||||
|
|
@ -167,7 +170,7 @@ func TestReconcileStopsStaleRunningVMAndClearsRuntimeHandles(t *testing.T) {
|
||||||
t.Fatalf("handles.json still present after reconcile: %v", err)
|
t.Fatalf("handles.json still present after reconcile: %v", err)
|
||||||
}
|
}
|
||||||
// And the in-memory cache must be empty.
|
// And the in-memory cache must be empty.
|
||||||
if h, ok := d.vmSvc().handles.get(vm.ID); ok && !h.IsZero() {
|
if h, ok := d.vm.handles.get(vm.ID); ok && !h.IsZero() {
|
||||||
t.Fatalf("handle cache not cleared after reconcile: %+v", h)
|
t.Fatalf("handle cache not cleared after reconcile: %+v", h)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -213,12 +216,13 @@ func TestRebuildDNSIncludesOnlyLiveRunningVMs(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
d := &Daemon{store: db, net: &HostNetwork{vmDNS: server}}
|
d := &Daemon{store: db, net: &HostNetwork{vmDNS: server}}
|
||||||
|
wireServices(d)
|
||||||
// rebuildDNS reads the alive check from the handle cache. Seed
|
// rebuildDNS reads the alive check from the handle cache. Seed
|
||||||
// the live VM with its real PID; leave the stale entry with a PID
|
// the live VM with its real PID; leave the stale entry with a PID
|
||||||
// that definitely isn't running (999999 ≫ max PID on most hosts).
|
// that definitely isn't running (999999 ≫ max PID on most hosts).
|
||||||
d.vmSvc().setVMHandlesInMemory(live.ID, model.VMHandles{PID: liveCmd.Process.Pid})
|
d.vm.setVMHandlesInMemory(live.ID, model.VMHandles{PID: liveCmd.Process.Pid})
|
||||||
d.vmSvc().setVMHandlesInMemory(stale.ID, model.VMHandles{PID: 999999})
|
d.vm.setVMHandlesInMemory(stale.ID, model.VMHandles{PID: 999999})
|
||||||
if err := d.vmSvc().rebuildDNS(ctx); err != nil {
|
if err := d.vm.rebuildDNS(ctx); err != nil {
|
||||||
t.Fatalf("rebuildDNS: %v", err)
|
t.Fatalf("rebuildDNS: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -252,7 +256,8 @@ func TestSetVMRejectsStoppedOnlyChangesForRunningVM(t *testing.T) {
|
||||||
upsertDaemonVM(t, ctx, db, vm)
|
upsertDaemonVM(t, ctx, db, vm)
|
||||||
|
|
||||||
d := &Daemon{store: db}
|
d := &Daemon{store: db}
|
||||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: cmd.Process.Pid})
|
wireServices(d)
|
||||||
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: cmd.Process.Pid})
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
params api.VMSetParams
|
params api.VMSetParams
|
||||||
|
|
@ -277,7 +282,7 @@ func TestSetVMRejectsStoppedOnlyChangesForRunningVM(t *testing.T) {
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
_, err := d.vmSvc().SetVM(ctx, tt.params)
|
_, err := d.vm.SetVM(ctx, tt.params)
|
||||||
if err == nil || !strings.Contains(err.Error(), tt.want) {
|
if err == nil || !strings.Contains(err.Error(), tt.want) {
|
||||||
t.Fatalf("SetVM(%s) error = %v, want %q", tt.name, err, tt.want)
|
t.Fatalf("SetVM(%s) error = %v, want %q", tt.name, err, tt.want)
|
||||||
}
|
}
|
||||||
|
|
@ -367,8 +372,9 @@ func TestHealthVMReturnsHealthyForRunningGuest(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
d := &Daemon{store: db, runner: runner}
|
d := &Daemon{store: db, runner: runner}
|
||||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: handlePID})
|
wireServices(d)
|
||||||
result, err := d.vmSvc().HealthVM(ctx, vm.Name)
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: handlePID})
|
||||||
|
result, err := d.vm.HealthVM(ctx, vm.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("HealthVM: %v", err)
|
t.Fatalf("HealthVM: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -430,8 +436,9 @@ func TestPingVMAliasReturnsAliveForHealthyVM(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
d := &Daemon{store: db, runner: runner}
|
d := &Daemon{store: db, runner: runner}
|
||||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: fake.Process.Pid})
|
wireServices(d)
|
||||||
result, err := d.vmSvc().PingVM(ctx, vm.Name)
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: fake.Process.Pid})
|
||||||
|
result, err := d.vm.PingVM(ctx, vm.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("PingVM: %v", err)
|
t.Fatalf("PingVM: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -530,7 +537,8 @@ func TestHealthVMReturnsFalseForStoppedVM(t *testing.T) {
|
||||||
upsertDaemonVM(t, ctx, db, vm)
|
upsertDaemonVM(t, ctx, db, vm)
|
||||||
|
|
||||||
d := &Daemon{store: db}
|
d := &Daemon{store: db}
|
||||||
result, err := d.vmSvc().HealthVM(ctx, vm.Name)
|
wireServices(d)
|
||||||
|
result, err := d.vm.HealthVM(ctx, vm.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("HealthVM: %v", err)
|
t.Fatalf("HealthVM: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -628,9 +636,10 @@ func TestPortsVMReturnsEnrichedPortsAndWebSchemes(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
d := &Daemon{store: db, runner: runner}
|
d := &Daemon{store: db, runner: runner}
|
||||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: fake.Process.Pid})
|
wireServices(d)
|
||||||
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: fake.Process.Pid})
|
||||||
|
|
||||||
result, err := d.vmSvc().PortsVM(ctx, vm.Name)
|
result, err := d.vm.PortsVM(ctx, vm.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("PortsVM: %v", err)
|
t.Fatalf("PortsVM: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -677,7 +686,8 @@ func TestPortsVMReturnsErrorForStoppedVM(t *testing.T) {
|
||||||
upsertDaemonVM(t, ctx, db, vm)
|
upsertDaemonVM(t, ctx, db, vm)
|
||||||
|
|
||||||
d := &Daemon{store: db}
|
d := &Daemon{store: db}
|
||||||
_, err := d.vmSvc().PortsVM(ctx, vm.Name)
|
wireServices(d)
|
||||||
|
_, err := d.vm.PortsVM(ctx, vm.Name)
|
||||||
if err == nil || !strings.Contains(err.Error(), "is not running") {
|
if err == nil || !strings.Contains(err.Error(), "is not running") {
|
||||||
t.Fatalf("PortsVM error = %v, want not running", err)
|
t.Fatalf("PortsVM error = %v, want not running", err)
|
||||||
}
|
}
|
||||||
|
|
@ -740,7 +750,8 @@ func TestSetVMDiskResizeFailsPreflightWhenToolsMissing(t *testing.T) {
|
||||||
|
|
||||||
t.Setenv("PATH", t.TempDir())
|
t.Setenv("PATH", t.TempDir())
|
||||||
d := &Daemon{store: db}
|
d := &Daemon{store: db}
|
||||||
_, err := d.vmSvc().SetVM(ctx, api.VMSetParams{IDOrName: vm.ID, WorkDiskSize: "16G"})
|
wireServices(d)
|
||||||
|
_, err := d.vm.SetVM(ctx, api.VMSetParams{IDOrName: vm.ID, WorkDiskSize: "16G"})
|
||||||
if err == nil || !strings.Contains(err.Error(), "work disk resize preflight failed") {
|
if err == nil || !strings.Contains(err.Error(), "work disk resize preflight failed") {
|
||||||
t.Fatalf("SetVM() error = %v, want preflight failure", err)
|
t.Fatalf("SetVM() error = %v, want preflight failure", err)
|
||||||
}
|
}
|
||||||
|
|
@ -768,6 +779,7 @@ func TestFlattenNestedWorkHomeCopiesEntriesIndividually(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
d := &Daemon{runner: runner}
|
d := &Daemon{runner: runner}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
if err := flattenNestedWorkHome(context.Background(), d.runner, workMount); err != nil {
|
if err := flattenNestedWorkHome(context.Background(), d.runner, workMount); err != nil {
|
||||||
t.Fatalf("flattenNestedWorkHome: %v", err)
|
t.Fatalf("flattenNestedWorkHome: %v", err)
|
||||||
|
|
@ -808,10 +820,11 @@ func TestEnsureAuthorizedKeyOnWorkDiskRepairsNestedRootLayout(t *testing.T) {
|
||||||
runner: &filesystemRunner{t: t},
|
runner: &filesystemRunner{t: t},
|
||||||
config: model.DaemonConfig{SSHKeyPath: sshKeyPath},
|
config: model.DaemonConfig{SSHKeyPath: sshKeyPath},
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
vm := testVM("seed-repair", "image-seed-repair", "172.16.0.61")
|
vm := testVM("seed-repair", "image-seed-repair", "172.16.0.61")
|
||||||
vm.Runtime.WorkDiskPath = workDiskDir
|
vm.Runtime.WorkDiskPath = workDiskDir
|
||||||
|
|
||||||
if err := d.workspaceSvc().ensureAuthorizedKeyOnWorkDisk(context.Background(), &vm, model.Image{}, workDiskPreparation{}); err != nil {
|
if err := d.ws.ensureAuthorizedKeyOnWorkDisk(context.Background(), &vm, model.Image{}, workDiskPreparation{}); err != nil {
|
||||||
t.Fatalf("ensureAuthorizedKeyOnWorkDisk: %v", err)
|
t.Fatalf("ensureAuthorizedKeyOnWorkDisk: %v", err)
|
||||||
}
|
}
|
||||||
if _, err := os.Stat(filepath.Join(workDiskDir, "root")); !os.IsNotExist(err) {
|
if _, err := os.Stat(filepath.Join(workDiskDir, "root")); !os.IsNotExist(err) {
|
||||||
|
|
@ -845,10 +858,11 @@ func TestEnsureGitIdentityOnWorkDiskCopiesHostGlobalIdentity(t *testing.T) {
|
||||||
|
|
||||||
workDiskDir := t.TempDir()
|
workDiskDir := t.TempDir()
|
||||||
d := &Daemon{runner: &filesystemRunner{t: t}}
|
d := &Daemon{runner: &filesystemRunner{t: t}}
|
||||||
|
wireServices(d)
|
||||||
vm := testVM("git-identity", "image-git-identity", "172.16.0.67")
|
vm := testVM("git-identity", "image-git-identity", "172.16.0.67")
|
||||||
vm.Runtime.WorkDiskPath = workDiskDir
|
vm.Runtime.WorkDiskPath = workDiskDir
|
||||||
|
|
||||||
if err := d.workspaceSvc().ensureGitIdentityOnWorkDisk(context.Background(), &vm); err != nil {
|
if err := d.ws.ensureGitIdentityOnWorkDisk(context.Background(), &vm); err != nil {
|
||||||
t.Fatalf("ensureGitIdentityOnWorkDisk: %v", err)
|
t.Fatalf("ensureGitIdentityOnWorkDisk: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -878,10 +892,11 @@ func TestEnsureGitIdentityOnWorkDiskPreservesExistingGuestConfig(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
d := &Daemon{runner: &filesystemRunner{t: t}}
|
d := &Daemon{runner: &filesystemRunner{t: t}}
|
||||||
|
wireServices(d)
|
||||||
vm := testVM("git-identity-preserve", "image-git-identity", "172.16.0.68")
|
vm := testVM("git-identity-preserve", "image-git-identity", "172.16.0.68")
|
||||||
vm.Runtime.WorkDiskPath = workDiskDir
|
vm.Runtime.WorkDiskPath = workDiskDir
|
||||||
|
|
||||||
if err := d.workspaceSvc().ensureGitIdentityOnWorkDisk(context.Background(), &vm); err != nil {
|
if err := d.ws.ensureGitIdentityOnWorkDisk(context.Background(), &vm); err != nil {
|
||||||
t.Fatalf("ensureGitIdentityOnWorkDisk: %v", err)
|
t.Fatalf("ensureGitIdentityOnWorkDisk: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -922,10 +937,11 @@ func TestEnsureGitIdentityOnWorkDiskWarnsAndSkipsWhenHostIdentityIncomplete(t *t
|
||||||
runner: &filesystemRunner{t: t},
|
runner: &filesystemRunner{t: t},
|
||||||
logger: logger,
|
logger: logger,
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
vm := testVM("git-identity-missing", "image-git-identity", "172.16.0.69")
|
vm := testVM("git-identity-missing", "image-git-identity", "172.16.0.69")
|
||||||
vm.Runtime.WorkDiskPath = workDiskDir
|
vm.Runtime.WorkDiskPath = workDiskDir
|
||||||
|
|
||||||
if err := d.workspaceSvc().ensureGitIdentityOnWorkDisk(context.Background(), &vm); err != nil {
|
if err := d.ws.ensureGitIdentityOnWorkDisk(context.Background(), &vm); err != nil {
|
||||||
t.Fatalf("ensureGitIdentityOnWorkDisk: %v", err)
|
t.Fatalf("ensureGitIdentityOnWorkDisk: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -950,8 +966,9 @@ func TestEnsureGitIdentityOnWorkDiskWarnsAndSkipsWhenHostIdentityIncomplete(t *t
|
||||||
|
|
||||||
func TestRunFileSyncNoOpWhenConfigEmpty(t *testing.T) {
|
func TestRunFileSyncNoOpWhenConfigEmpty(t *testing.T) {
|
||||||
d := &Daemon{runner: &filesystemRunner{t: t}}
|
d := &Daemon{runner: &filesystemRunner{t: t}}
|
||||||
|
wireServices(d)
|
||||||
vm := testVM("no-sync", "image", "172.16.0.70")
|
vm := testVM("no-sync", "image", "172.16.0.70")
|
||||||
if err := d.workspaceSvc().runFileSync(context.Background(), &vm); err != nil {
|
if err := d.ws.runFileSync(context.Background(), &vm); err != nil {
|
||||||
t.Fatalf("runFileSync: %v", err)
|
t.Fatalf("runFileSync: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -977,9 +994,10 @@ func TestRunFileSyncCopiesFile(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
vm := testVM("sync-file", "image", "172.16.0.71")
|
vm := testVM("sync-file", "image", "172.16.0.71")
|
||||||
vm.Runtime.WorkDiskPath = workDisk
|
vm.Runtime.WorkDiskPath = workDisk
|
||||||
if err := d.workspaceSvc().runFileSync(context.Background(), &vm); err != nil {
|
if err := d.ws.runFileSync(context.Background(), &vm); err != nil {
|
||||||
t.Fatalf("runFileSync: %v", err)
|
t.Fatalf("runFileSync: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1017,9 +1035,10 @@ func TestRunFileSyncRespectsCustomMode(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
vm := testVM("sync-mode", "image", "172.16.0.72")
|
vm := testVM("sync-mode", "image", "172.16.0.72")
|
||||||
vm.Runtime.WorkDiskPath = workDisk
|
vm.Runtime.WorkDiskPath = workDisk
|
||||||
if err := d.workspaceSvc().runFileSync(context.Background(), &vm); err != nil {
|
if err := d.ws.runFileSync(context.Background(), &vm); err != nil {
|
||||||
t.Fatalf("runFileSync: %v", err)
|
t.Fatalf("runFileSync: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1052,9 +1071,10 @@ func TestRunFileSyncSkipsMissingHostPath(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
vm := testVM("sync-missing", "image", "172.16.0.73")
|
vm := testVM("sync-missing", "image", "172.16.0.73")
|
||||||
vm.Runtime.WorkDiskPath = workDisk
|
vm.Runtime.WorkDiskPath = workDisk
|
||||||
if err := d.workspaceSvc().runFileSync(context.Background(), &vm); err != nil {
|
if err := d.ws.runFileSync(context.Background(), &vm); err != nil {
|
||||||
t.Fatalf("runFileSync: %v", err)
|
t.Fatalf("runFileSync: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1091,9 +1111,10 @@ func TestRunFileSyncOverwritesExistingGuestFile(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
vm := testVM("sync-overwrite", "image", "172.16.0.74")
|
vm := testVM("sync-overwrite", "image", "172.16.0.74")
|
||||||
vm.Runtime.WorkDiskPath = workDisk
|
vm.Runtime.WorkDiskPath = workDisk
|
||||||
if err := d.workspaceSvc().runFileSync(context.Background(), &vm); err != nil {
|
if err := d.ws.runFileSync(context.Background(), &vm); err != nil {
|
||||||
t.Fatalf("runFileSync: %v", err)
|
t.Fatalf("runFileSync: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1133,9 +1154,10 @@ func TestRunFileSyncCopiesDirectoryRecursively(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
vm := testVM("sync-dir", "image", "172.16.0.75")
|
vm := testVM("sync-dir", "image", "172.16.0.75")
|
||||||
vm.Runtime.WorkDiskPath = workDisk
|
vm.Runtime.WorkDiskPath = workDisk
|
||||||
if err := d.workspaceSvc().runFileSync(context.Background(), &vm); err != nil {
|
if err := d.ws.runFileSync(context.Background(), &vm); err != nil {
|
||||||
t.Fatalf("runFileSync: %v", err)
|
t.Fatalf("runFileSync: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1157,10 +1179,11 @@ func TestRunFileSyncCopiesDirectoryRecursively(t *testing.T) {
|
||||||
|
|
||||||
func TestCreateVMRejectsNonPositiveCPUAndMemory(t *testing.T) {
|
func TestCreateVMRejectsNonPositiveCPUAndMemory(t *testing.T) {
|
||||||
d := &Daemon{}
|
d := &Daemon{}
|
||||||
if _, err := d.vmSvc().CreateVM(context.Background(), api.VMCreateParams{VCPUCount: ptr(0)}); err == nil || !strings.Contains(err.Error(), "vcpu must be a positive integer") {
|
wireServices(d)
|
||||||
|
if _, err := d.vm.CreateVM(context.Background(), api.VMCreateParams{VCPUCount: ptr(0)}); err == nil || !strings.Contains(err.Error(), "vcpu must be a positive integer") {
|
||||||
t.Fatalf("CreateVM(vcpu=0) error = %v", err)
|
t.Fatalf("CreateVM(vcpu=0) error = %v", err)
|
||||||
}
|
}
|
||||||
if _, err := d.vmSvc().CreateVM(context.Background(), api.VMCreateParams{MemoryMiB: ptr(-1)}); err == nil || !strings.Contains(err.Error(), "memory must be a positive integer") {
|
if _, err := d.vm.CreateVM(context.Background(), api.VMCreateParams{MemoryMiB: ptr(-1)}); err == nil || !strings.Contains(err.Error(), "memory must be a positive integer") {
|
||||||
t.Fatalf("CreateVM(memory=-1) error = %v", err)
|
t.Fatalf("CreateVM(memory=-1) error = %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1187,8 +1210,9 @@ func TestBeginVMCreateCompletesAndReturnsStatus(t *testing.T) {
|
||||||
BridgeIP: model.DefaultBridgeIP,
|
BridgeIP: model.DefaultBridgeIP,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
op, err := d.vmSvc().BeginVMCreate(ctx, api.VMCreateParams{Name: "queued", NoStart: true})
|
op, err := d.vm.BeginVMCreate(ctx, api.VMCreateParams{Name: "queued", NoStart: true})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("BeginVMCreate: %v", err)
|
t.Fatalf("BeginVMCreate: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -1198,7 +1222,7 @@ func TestBeginVMCreateCompletesAndReturnsStatus(t *testing.T) {
|
||||||
|
|
||||||
deadline := time.Now().Add(2 * time.Second)
|
deadline := time.Now().Add(2 * time.Second)
|
||||||
for time.Now().Before(deadline) {
|
for time.Now().Before(deadline) {
|
||||||
status, err := d.vmSvc().VMCreateStatus(ctx, op.ID)
|
status, err := d.vm.VMCreateStatus(ctx, op.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("VMCreateStatus: %v", err)
|
t.Fatalf("VMCreateStatus: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -1237,8 +1261,9 @@ func TestCreateVMUsesDefaultsWhenCPUAndMemoryOmitted(t *testing.T) {
|
||||||
BridgeIP: model.DefaultBridgeIP,
|
BridgeIP: model.DefaultBridgeIP,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
vm, err := d.vmSvc().CreateVM(ctx, api.VMCreateParams{Name: "defaults", ImageName: image.Name, NoStart: true})
|
vm, err := d.vm.CreateVM(ctx, api.VMCreateParams{Name: "defaults", ImageName: image.Name, NoStart: true})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("CreateVM: %v", err)
|
t.Fatalf("CreateVM: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -1256,11 +1281,12 @@ func TestSetVMRejectsNonPositiveCPUAndMemory(t *testing.T) {
|
||||||
vm := testVM("validate", "image-validate", "172.16.0.13")
|
vm := testVM("validate", "image-validate", "172.16.0.13")
|
||||||
upsertDaemonVM(t, ctx, db, vm)
|
upsertDaemonVM(t, ctx, db, vm)
|
||||||
d := &Daemon{store: db}
|
d := &Daemon{store: db}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
if _, err := d.vmSvc().SetVM(ctx, api.VMSetParams{IDOrName: vm.ID, VCPUCount: ptr(0)}); err == nil || !strings.Contains(err.Error(), "vcpu must be a positive integer") {
|
if _, err := d.vm.SetVM(ctx, api.VMSetParams{IDOrName: vm.ID, VCPUCount: ptr(0)}); err == nil || !strings.Contains(err.Error(), "vcpu must be a positive integer") {
|
||||||
t.Fatalf("SetVM(vcpu=0) error = %v", err)
|
t.Fatalf("SetVM(vcpu=0) error = %v", err)
|
||||||
}
|
}
|
||||||
if _, err := d.vmSvc().SetVM(ctx, api.VMSetParams{IDOrName: vm.ID, MemoryMiB: ptr(0)}); err == nil || !strings.Contains(err.Error(), "memory must be a positive integer") {
|
if _, err := d.vm.SetVM(ctx, api.VMSetParams{IDOrName: vm.ID, MemoryMiB: ptr(0)}); err == nil || !strings.Contains(err.Error(), "memory must be a positive integer") {
|
||||||
t.Fatalf("SetVM(memory=0) error = %v", err)
|
t.Fatalf("SetVM(memory=0) error = %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1281,7 +1307,8 @@ func TestCollectStatsIgnoresMalformedMetricsFile(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
d := &Daemon{}
|
d := &Daemon{}
|
||||||
stats, err := d.vmSvc().collectStats(context.Background(), model.VMRecord{
|
wireServices(d)
|
||||||
|
stats, err := d.vm.collectStats(context.Background(), model.VMRecord{
|
||||||
Runtime: model.VMRuntime{
|
Runtime: model.VMRuntime{
|
||||||
SystemOverlay: overlay,
|
SystemOverlay: overlay,
|
||||||
WorkDiskPath: workDisk,
|
WorkDiskPath: workDisk,
|
||||||
|
|
@ -1330,6 +1357,7 @@ func TestValidateStartPrereqsReportsNATUplinkFailure(t *testing.T) {
|
||||||
FirecrackerBin: firecrackerBin,
|
FirecrackerBin: firecrackerBin,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
vm := testVM("nat", "image-nat", "172.16.0.12")
|
vm := testVM("nat", "image-nat", "172.16.0.12")
|
||||||
vm.Spec.NATEnabled = true
|
vm.Spec.NATEnabled = true
|
||||||
vm.Runtime.WorkDiskPath = filepath.Join(t.TempDir(), "missing-root.ext4")
|
vm.Runtime.WorkDiskPath = filepath.Join(t.TempDir(), "missing-root.ext4")
|
||||||
|
|
@ -1337,7 +1365,7 @@ func TestValidateStartPrereqsReportsNATUplinkFailure(t *testing.T) {
|
||||||
image.RootfsPath = rootfsPath
|
image.RootfsPath = rootfsPath
|
||||||
image.KernelPath = kernelPath
|
image.KernelPath = kernelPath
|
||||||
|
|
||||||
err := d.vmSvc().validateStartPrereqs(ctx, vm, image)
|
err := d.vm.validateStartPrereqs(ctx, vm, image)
|
||||||
if err == nil || !strings.Contains(err.Error(), "uplink interface for NAT") {
|
if err == nil || !strings.Contains(err.Error(), "uplink interface for NAT") {
|
||||||
t.Fatalf("validateStartPrereqs() error = %v, want NAT uplink failure", err)
|
t.Fatalf("validateStartPrereqs() error = %v, want NAT uplink failure", err)
|
||||||
}
|
}
|
||||||
|
|
@ -1365,13 +1393,14 @@ func TestCleanupRuntimeRediscoversLiveFirecrackerPID(t *testing.T) {
|
||||||
proc: fake,
|
proc: fake,
|
||||||
}
|
}
|
||||||
d := &Daemon{runner: runner}
|
d := &Daemon{runner: runner}
|
||||||
|
wireServices(d)
|
||||||
vm := testVM("cleanup", "image-cleanup", "172.16.0.22")
|
vm := testVM("cleanup", "image-cleanup", "172.16.0.22")
|
||||||
vm.Runtime.APISockPath = apiSock
|
vm.Runtime.APISockPath = apiSock
|
||||||
// Seed a stale PID so cleanupRuntime's findFirecrackerPID pgrep
|
// Seed a stale PID so cleanupRuntime's findFirecrackerPID pgrep
|
||||||
// fallback wins — it rediscovers fake.Process.Pid from apiSock.
|
// fallback wins — it rediscovers fake.Process.Pid from apiSock.
|
||||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: fake.Process.Pid + 999})
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: fake.Process.Pid + 999})
|
||||||
|
|
||||||
if err := d.vmSvc().cleanupRuntime(context.Background(), vm, true); err != nil {
|
if err := d.vm.cleanupRuntime(context.Background(), vm, true); err != nil {
|
||||||
t.Fatalf("cleanupRuntime returned error: %v", err)
|
t.Fatalf("cleanupRuntime returned error: %v", err)
|
||||||
}
|
}
|
||||||
runner.assertExhausted()
|
runner.assertExhausted()
|
||||||
|
|
@ -1398,7 +1427,8 @@ func TestDeleteStoppedNATVMDoesNotFailWithoutTapDevice(t *testing.T) {
|
||||||
upsertDaemonVM(t, ctx, db, vm)
|
upsertDaemonVM(t, ctx, db, vm)
|
||||||
|
|
||||||
d := &Daemon{store: db}
|
d := &Daemon{store: db}
|
||||||
deleted, err := d.vmSvc().DeleteVM(ctx, vm.Name)
|
wireServices(d)
|
||||||
|
deleted, err := d.vm.DeleteVM(ctx, vm.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("DeleteVM: %v", err)
|
t.Fatalf("DeleteVM: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -1452,9 +1482,10 @@ func TestStopVMFallsBackToForcedCleanupAfterGracefulTimeout(t *testing.T) {
|
||||||
proc: fake,
|
proc: fake,
|
||||||
}
|
}
|
||||||
d := &Daemon{store: db, runner: runner}
|
d := &Daemon{store: db, runner: runner}
|
||||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: fake.Process.Pid})
|
wireServices(d)
|
||||||
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: fake.Process.Pid})
|
||||||
|
|
||||||
got, err := d.vmSvc().StopVM(ctx, vm.ID)
|
got, err := d.vm.StopVM(ctx, vm.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("StopVM returned error: %v", err)
|
t.Fatalf("StopVM returned error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -1465,7 +1496,7 @@ func TestStopVMFallsBackToForcedCleanupAfterGracefulTimeout(t *testing.T) {
|
||||||
// APISockPath + VSock paths are deterministic — they stay on the
|
// APISockPath + VSock paths are deterministic — they stay on the
|
||||||
// record for debugging and next-start reuse even after stop. The
|
// record for debugging and next-start reuse even after stop. The
|
||||||
// post-stop invariant is that the in-memory cache is empty.
|
// post-stop invariant is that the in-memory cache is empty.
|
||||||
if h, ok := d.vmSvc().handles.get(vm.ID); ok && !h.IsZero() {
|
if h, ok := d.vm.handles.get(vm.ID); ok && !h.IsZero() {
|
||||||
t.Fatalf("handle cache not cleared: %+v", h)
|
t.Fatalf("handle cache not cleared: %+v", h)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1476,6 +1507,7 @@ func TestWithVMLockByIDSerializesSameVM(t *testing.T) {
|
||||||
vm := testVM("serial", "image-serial", "172.16.0.30")
|
vm := testVM("serial", "image-serial", "172.16.0.30")
|
||||||
upsertDaemonVM(t, ctx, db, vm)
|
upsertDaemonVM(t, ctx, db, vm)
|
||||||
d := &Daemon{store: db}
|
d := &Daemon{store: db}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
firstEntered := make(chan struct{})
|
firstEntered := make(chan struct{})
|
||||||
releaseFirst := make(chan struct{})
|
releaseFirst := make(chan struct{})
|
||||||
|
|
@ -1483,7 +1515,7 @@ func TestWithVMLockByIDSerializesSameVM(t *testing.T) {
|
||||||
errCh := make(chan error, 2)
|
errCh := make(chan error, 2)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
_, err := d.vmSvc().withVMLockByID(ctx, vm.ID, func(vm model.VMRecord) (model.VMRecord, error) {
|
_, err := d.vm.withVMLockByID(ctx, vm.ID, func(vm model.VMRecord) (model.VMRecord, error) {
|
||||||
close(firstEntered)
|
close(firstEntered)
|
||||||
<-releaseFirst
|
<-releaseFirst
|
||||||
return vm, nil
|
return vm, nil
|
||||||
|
|
@ -1498,7 +1530,7 @@ func TestWithVMLockByIDSerializesSameVM(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
_, err := d.vmSvc().withVMLockByID(ctx, vm.ID, func(vm model.VMRecord) (model.VMRecord, error) {
|
_, err := d.vm.withVMLockByID(ctx, vm.ID, func(vm model.VMRecord) (model.VMRecord, error) {
|
||||||
close(secondEntered)
|
close(secondEntered)
|
||||||
return vm, nil
|
return vm, nil
|
||||||
})
|
})
|
||||||
|
|
@ -1535,12 +1567,13 @@ func TestWithVMLockByIDAllowsDifferentVMsConcurrently(t *testing.T) {
|
||||||
upsertDaemonVM(t, ctx, db, vm)
|
upsertDaemonVM(t, ctx, db, vm)
|
||||||
}
|
}
|
||||||
d := &Daemon{store: db}
|
d := &Daemon{store: db}
|
||||||
|
wireServices(d)
|
||||||
|
|
||||||
started := make(chan string, 2)
|
started := make(chan string, 2)
|
||||||
release := make(chan struct{})
|
release := make(chan struct{})
|
||||||
errCh := make(chan error, 2)
|
errCh := make(chan error, 2)
|
||||||
run := func(id string) {
|
run := func(id string) {
|
||||||
_, err := d.vmSvc().withVMLockByID(ctx, id, func(vm model.VMRecord) (model.VMRecord, error) {
|
_, err := d.vm.withVMLockByID(ctx, id, func(vm model.VMRecord) (model.VMRecord, error) {
|
||||||
started <- vm.ID
|
started <- vm.ID
|
||||||
<-release
|
<-release
|
||||||
return vm, nil
|
return vm, nil
|
||||||
|
|
|
||||||
|
|
@ -83,44 +83,3 @@ func newWorkspaceService(deps workspaceServiceDeps) *WorkspaceService {
|
||||||
beginOperation: deps.beginOperation,
|
beginOperation: deps.beginOperation,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// workspaceSvc is Daemon's lazy-init getter. Mirrors hostNet() /
|
|
||||||
// imageSvc() so test literals like &Daemon{store: db, runner: r, ...}
|
|
||||||
// still get a functional WorkspaceService without spelling one out.
|
|
||||||
func (d *Daemon) workspaceSvc() *WorkspaceService {
|
|
||||||
if d.ws != nil {
|
|
||||||
return d.ws
|
|
||||||
}
|
|
||||||
// Peer seams capture d by closure instead of pointing to
|
|
||||||
// d.vmSvc() / d.imageSvc() directly. vmSvc() constructs VMService
|
|
||||||
// with WorkspaceService as a peer, so resolving the peer service
|
|
||||||
// eagerly here would recurse. Closures defer the lookup to call
|
|
||||||
// time, by which point the cycle is broken because d.vm / d.img
|
|
||||||
// are already populated.
|
|
||||||
d.ws = newWorkspaceService(workspaceServiceDeps{
|
|
||||||
runner: d.runner,
|
|
||||||
logger: d.logger,
|
|
||||||
config: d.config,
|
|
||||||
layout: d.layout,
|
|
||||||
store: d.store,
|
|
||||||
vmResolver: func(ctx context.Context, idOrName string) (model.VMRecord, error) {
|
|
||||||
return d.vmSvc().FindVM(ctx, idOrName)
|
|
||||||
},
|
|
||||||
aliveChecker: func(vm model.VMRecord) bool {
|
|
||||||
return d.vmSvc().vmAlive(vm)
|
|
||||||
},
|
|
||||||
waitGuestSSH: d.waitForGuestSSH,
|
|
||||||
dialGuest: d.dialGuest,
|
|
||||||
imageResolver: func(ctx context.Context, idOrName string) (model.Image, error) {
|
|
||||||
return d.FindImage(ctx, idOrName)
|
|
||||||
},
|
|
||||||
imageWorkSeed: func(ctx context.Context, image model.Image, fingerprint string) error {
|
|
||||||
return d.imageSvc().refreshManagedWorkSeedFingerprint(ctx, image, fingerprint)
|
|
||||||
},
|
|
||||||
withVMLockByRef: func(ctx context.Context, idOrName string, fn func(model.VMRecord) (model.VMRecord, error)) (model.VMRecord, error) {
|
|
||||||
return d.vmSvc().withVMLockByRef(ctx, idOrName, fn)
|
|
||||||
},
|
|
||||||
beginOperation: d.beginOperation,
|
|
||||||
})
|
|
||||||
return d.ws
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -65,6 +65,7 @@ func newExportTestDaemonStore(t *testing.T, fake *exportGuestClient) *Daemon {
|
||||||
config: model.DaemonConfig{SSHKeyPath: filepath.Join(t.TempDir(), "id_ed25519")},
|
config: model.DaemonConfig{SSHKeyPath: filepath.Join(t.TempDir(), "id_ed25519")},
|
||||||
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
|
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
d.guestDial = func(_ context.Context, _ string, _ string) (guestSSHClient, error) {
|
d.guestDial = func(_ context.Context, _ string, _ string) (guestSSHClient, error) {
|
||||||
return fake, nil
|
return fake, nil
|
||||||
}
|
}
|
||||||
|
|
@ -94,9 +95,9 @@ func TestExportVMWorkspace_HappyPath(t *testing.T) {
|
||||||
}
|
}
|
||||||
d := newExportTestDaemonStore(t, fake)
|
d := newExportTestDaemonStore(t, fake)
|
||||||
upsertDaemonVM(t, ctx, d.store, vm)
|
upsertDaemonVM(t, ctx, d.store, vm)
|
||||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||||
|
|
||||||
result, err := d.workspaceSvc().ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
result, err := d.ws.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
||||||
IDOrName: vm.Name,
|
IDOrName: vm.Name,
|
||||||
GuestPath: "/root/repo",
|
GuestPath: "/root/repo",
|
||||||
})
|
})
|
||||||
|
|
@ -155,10 +156,10 @@ func TestExportVMWorkspace_WithBaseCommit(t *testing.T) {
|
||||||
}
|
}
|
||||||
d := newExportTestDaemonStore(t, fake)
|
d := newExportTestDaemonStore(t, fake)
|
||||||
upsertDaemonVM(t, ctx, d.store, vm)
|
upsertDaemonVM(t, ctx, d.store, vm)
|
||||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||||
|
|
||||||
const prepareCommit = "abc1234deadbeef"
|
const prepareCommit = "abc1234deadbeef"
|
||||||
result, err := d.workspaceSvc().ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
result, err := d.ws.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
||||||
IDOrName: vm.Name,
|
IDOrName: vm.Name,
|
||||||
BaseCommit: prepareCommit,
|
BaseCommit: prepareCommit,
|
||||||
})
|
})
|
||||||
|
|
@ -202,9 +203,9 @@ func TestExportVMWorkspace_BaseCommitFallsBackToHEAD(t *testing.T) {
|
||||||
}
|
}
|
||||||
d := newExportTestDaemonStore(t, fake)
|
d := newExportTestDaemonStore(t, fake)
|
||||||
upsertDaemonVM(t, ctx, d.store, vm)
|
upsertDaemonVM(t, ctx, d.store, vm)
|
||||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||||
|
|
||||||
result, err := d.workspaceSvc().ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
result, err := d.ws.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
||||||
IDOrName: vm.Name,
|
IDOrName: vm.Name,
|
||||||
BaseCommit: "", // omitted
|
BaseCommit: "", // omitted
|
||||||
})
|
})
|
||||||
|
|
@ -242,9 +243,9 @@ func TestExportVMWorkspace_NoChanges(t *testing.T) {
|
||||||
}
|
}
|
||||||
d := newExportTestDaemonStore(t, fake)
|
d := newExportTestDaemonStore(t, fake)
|
||||||
upsertDaemonVM(t, ctx, d.store, vm)
|
upsertDaemonVM(t, ctx, d.store, vm)
|
||||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||||
|
|
||||||
result, err := d.workspaceSvc().ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
result, err := d.ws.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
||||||
IDOrName: vm.Name,
|
IDOrName: vm.Name,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -281,10 +282,10 @@ func TestExportVMWorkspace_DefaultGuestPath(t *testing.T) {
|
||||||
}
|
}
|
||||||
d := newExportTestDaemonStore(t, fake)
|
d := newExportTestDaemonStore(t, fake)
|
||||||
upsertDaemonVM(t, ctx, d.store, vm)
|
upsertDaemonVM(t, ctx, d.store, vm)
|
||||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||||
|
|
||||||
// GuestPath omitted — should default to /root/repo.
|
// GuestPath omitted — should default to /root/repo.
|
||||||
result, err := d.workspaceSvc().ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
result, err := d.ws.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
||||||
IDOrName: vm.Name,
|
IDOrName: vm.Name,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -307,7 +308,7 @@ func TestExportVMWorkspace_VMNotRunning(t *testing.T) {
|
||||||
upsertDaemonVM(t, ctx, d.store, vm)
|
upsertDaemonVM(t, ctx, d.store, vm)
|
||||||
// VM is stopped — no handle seed; vmAlive must return false.
|
// VM is stopped — no handle seed; vmAlive must return false.
|
||||||
|
|
||||||
_, err := d.workspaceSvc().ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
_, err := d.ws.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
||||||
IDOrName: vm.Name,
|
IDOrName: vm.Name,
|
||||||
})
|
})
|
||||||
if err == nil || !strings.Contains(err.Error(), "not running") {
|
if err == nil || !strings.Contains(err.Error(), "not running") {
|
||||||
|
|
@ -341,9 +342,9 @@ func TestExportVMWorkspace_MultipleChangedFiles(t *testing.T) {
|
||||||
}
|
}
|
||||||
d := newExportTestDaemonStore(t, fake)
|
d := newExportTestDaemonStore(t, fake)
|
||||||
upsertDaemonVM(t, ctx, d.store, vm)
|
upsertDaemonVM(t, ctx, d.store, vm)
|
||||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||||
|
|
||||||
result, err := d.workspaceSvc().ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
result, err := d.ws.ExportVMWorkspace(ctx, api.WorkspaceExportParams{
|
||||||
IDOrName: vm.Name,
|
IDOrName: vm.Name,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -386,22 +387,23 @@ func TestPrepareVMWorkspace_ReleasesVMLockDuringGuestIO(t *testing.T) {
|
||||||
config: model.DaemonConfig{SSHKeyPath: filepath.Join(t.TempDir(), "id_ed25519")},
|
config: model.DaemonConfig{SSHKeyPath: filepath.Join(t.TempDir(), "id_ed25519")},
|
||||||
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
|
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
d.guestWaitForSSH = func(_ context.Context, _, _ string, _ time.Duration) error { return nil }
|
d.guestWaitForSSH = func(_ context.Context, _, _ string, _ time.Duration) error { return nil }
|
||||||
d.guestDial = func(_ context.Context, _, _ string) (guestSSHClient, error) {
|
d.guestDial = func(_ context.Context, _, _ string) (guestSSHClient, error) {
|
||||||
return &exportGuestClient{}, nil
|
return &exportGuestClient{}, nil
|
||||||
}
|
}
|
||||||
upsertDaemonVM(t, ctx, d.store, vm)
|
upsertDaemonVM(t, ctx, d.store, vm)
|
||||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||||
|
|
||||||
// Install the workspace seams on this daemon instance. InspectRepo
|
// Install the workspace seams on this daemon instance. InspectRepo
|
||||||
// returns a trivial spec so the real filesystem isn't touched;
|
// returns a trivial spec so the real filesystem isn't touched;
|
||||||
// Import blocks until we say go.
|
// Import blocks until we say go.
|
||||||
importStarted := make(chan struct{})
|
importStarted := make(chan struct{})
|
||||||
releaseImport := make(chan struct{})
|
releaseImport := make(chan struct{})
|
||||||
d.workspaceSvc().workspaceInspectRepo = func(context.Context, string, string, string) (workspace.RepoSpec, error) {
|
d.ws.workspaceInspectRepo = func(context.Context, string, string, string) (workspace.RepoSpec, error) {
|
||||||
return workspace.RepoSpec{RepoName: "fake", RepoRoot: "/tmp/fake"}, nil
|
return workspace.RepoSpec{RepoName: "fake", RepoRoot: "/tmp/fake"}, nil
|
||||||
}
|
}
|
||||||
d.workspaceSvc().workspaceImport = func(context.Context, workspace.GuestClient, workspace.RepoSpec, string, model.WorkspacePrepareMode) error {
|
d.ws.workspaceImport = func(context.Context, workspace.GuestClient, workspace.RepoSpec, string, model.WorkspacePrepareMode) error {
|
||||||
close(importStarted)
|
close(importStarted)
|
||||||
<-releaseImport
|
<-releaseImport
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -410,7 +412,7 @@ func TestPrepareVMWorkspace_ReleasesVMLockDuringGuestIO(t *testing.T) {
|
||||||
// Kick off prepare in a goroutine. It will block inside the import.
|
// Kick off prepare in a goroutine. It will block inside the import.
|
||||||
prepareDone := make(chan error, 1)
|
prepareDone := make(chan error, 1)
|
||||||
go func() {
|
go func() {
|
||||||
_, err := d.workspaceSvc().PrepareVMWorkspace(ctx, api.VMWorkspacePrepareParams{
|
_, err := d.ws.PrepareVMWorkspace(ctx, api.VMWorkspacePrepareParams{
|
||||||
IDOrName: vm.Name,
|
IDOrName: vm.Name,
|
||||||
SourcePath: "/tmp/fake",
|
SourcePath: "/tmp/fake",
|
||||||
})
|
})
|
||||||
|
|
@ -429,7 +431,7 @@ func TestPrepareVMWorkspace_ReleasesVMLockDuringGuestIO(t *testing.T) {
|
||||||
// import is in flight. Acquiring it must not wait.
|
// import is in flight. Acquiring it must not wait.
|
||||||
acquired := make(chan struct{})
|
acquired := make(chan struct{})
|
||||||
go func() {
|
go func() {
|
||||||
unlock := d.vmSvc().lockVMID(vm.ID)
|
unlock := d.vm.lockVMID(vm.ID)
|
||||||
close(acquired)
|
close(acquired)
|
||||||
unlock()
|
unlock()
|
||||||
}()
|
}()
|
||||||
|
|
@ -473,14 +475,15 @@ func TestPrepareVMWorkspace_SerialisesConcurrentPreparesOnSameVM(t *testing.T) {
|
||||||
config: model.DaemonConfig{SSHKeyPath: filepath.Join(t.TempDir(), "id_ed25519")},
|
config: model.DaemonConfig{SSHKeyPath: filepath.Join(t.TempDir(), "id_ed25519")},
|
||||||
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
|
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
|
||||||
}
|
}
|
||||||
|
wireServices(d)
|
||||||
d.guestWaitForSSH = func(_ context.Context, _, _ string, _ time.Duration) error { return nil }
|
d.guestWaitForSSH = func(_ context.Context, _, _ string, _ time.Duration) error { return nil }
|
||||||
d.guestDial = func(_ context.Context, _, _ string) (guestSSHClient, error) {
|
d.guestDial = func(_ context.Context, _, _ string) (guestSSHClient, error) {
|
||||||
return &exportGuestClient{}, nil
|
return &exportGuestClient{}, nil
|
||||||
}
|
}
|
||||||
upsertDaemonVM(t, ctx, d.store, vm)
|
upsertDaemonVM(t, ctx, d.store, vm)
|
||||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||||
|
|
||||||
d.workspaceSvc().workspaceInspectRepo = func(context.Context, string, string, string) (workspace.RepoSpec, error) {
|
d.ws.workspaceInspectRepo = func(context.Context, string, string, string) (workspace.RepoSpec, error) {
|
||||||
return workspace.RepoSpec{RepoName: "fake", RepoRoot: "/tmp/fake"}, nil
|
return workspace.RepoSpec{RepoName: "fake", RepoRoot: "/tmp/fake"}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -488,7 +491,7 @@ func TestPrepareVMWorkspace_SerialisesConcurrentPreparesOnSameVM(t *testing.T) {
|
||||||
var active int32
|
var active int32
|
||||||
var maxObserved int32
|
var maxObserved int32
|
||||||
release := make(chan struct{})
|
release := make(chan struct{})
|
||||||
d.workspaceSvc().workspaceImport = func(context.Context, workspace.GuestClient, workspace.RepoSpec, string, model.WorkspacePrepareMode) error {
|
d.ws.workspaceImport = func(context.Context, workspace.GuestClient, workspace.RepoSpec, string, model.WorkspacePrepareMode) error {
|
||||||
n := atomic.AddInt32(&active, 1)
|
n := atomic.AddInt32(&active, 1)
|
||||||
for {
|
for {
|
||||||
prev := atomic.LoadInt32(&maxObserved)
|
prev := atomic.LoadInt32(&maxObserved)
|
||||||
|
|
@ -505,7 +508,7 @@ func TestPrepareVMWorkspace_SerialisesConcurrentPreparesOnSameVM(t *testing.T) {
|
||||||
done := make(chan error, n)
|
done := make(chan error, n)
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
go func() {
|
go func() {
|
||||||
_, err := d.workspaceSvc().PrepareVMWorkspace(ctx, api.VMWorkspacePrepareParams{
|
_, err := d.ws.PrepareVMWorkspace(ctx, api.VMWorkspacePrepareParams{
|
||||||
IDOrName: vm.Name,
|
IDOrName: vm.Name,
|
||||||
SourcePath: "/tmp/fake",
|
SourcePath: "/tmp/fake",
|
||||||
})
|
})
|
||||||
|
|
@ -565,9 +568,9 @@ func TestExportVMWorkspace_DoesNotMutateRealIndex(t *testing.T) {
|
||||||
}
|
}
|
||||||
d := newExportTestDaemonStore(t, fake)
|
d := newExportTestDaemonStore(t, fake)
|
||||||
upsertDaemonVM(t, ctx, d.store, vm)
|
upsertDaemonVM(t, ctx, d.store, vm)
|
||||||
d.vmSvc().setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
d.vm.setVMHandlesInMemory(vm.ID, model.VMHandles{PID: firecracker.Process.Pid})
|
||||||
|
|
||||||
if _, err := d.workspaceSvc().ExportVMWorkspace(ctx, api.WorkspaceExportParams{IDOrName: vm.Name}); err != nil {
|
if _, err := d.ws.ExportVMWorkspace(ctx, api.WorkspaceExportParams{IDOrName: vm.Name}); err != nil {
|
||||||
t.Fatalf("ExportVMWorkspace: %v", err)
|
t.Fatalf("ExportVMWorkspace: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue