daemon split (2/5): extract *ImageService service

Second phase of splitting the daemon god-struct. ImageService now owns
all image + kernel registry operations: register/promote/delete/pull
for images (bundle + OCI paths), the six kernel commands, and the
shared SSH-key/work-seed injection helpers. imageOpsMu (the
publication-window lock) lives on the service; so do the three OCI
pull test seams pullAndFlatten / finalizePulledRootfs / bundleFetch.
The four files images.go, images_pull.go, image_seed.go, kernels.go
flipped their receivers from *Daemon to *ImageService.

FindImage moved with the service. Daemon keeps a thin FindImage
forwarder so callers reading the dispatch code see the obvious
facade and tests that pre-date the split still compile.

flattenNestedWorkHome — called from image_seed.go, vm_authsync.go,
and vm_disk.go across future service boundaries — became a
package-level helper taking a CommandRunner explicitly. Daemon keeps
a deprecated forwarder for now; the other services will use the
package form.

Lazy-init helper imageSvc() on Daemon mirrors hostNet() from
Phase 1, so test literals like &Daemon{store: db, runner: r, ...}
that don't spell out an ImageService still get a working one.
Tests that override the image test seams (autopull_test,
concurrency_test, images_pull_test, images_pull_bundle_test) now
assign d.img = &ImageService{...seams...}; the two-statement pattern
matches what Phase 1 established for HostNetwork.

Dispatch in daemon.go is cleaner now: every image/kernel RPC handler
is a single-liner forwarding to d.imageSvc().*. Phase 5 will do the
same for VM lifecycle.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
Thales Maciel 2026-04-20 20:30:32 -03:00
parent 362009d747
commit d7614a3b2b
No known key found for this signature in database
GPG key ID: 33112E6833C34679
15 changed files with 389 additions and 209 deletions

View file

@ -20,7 +20,7 @@ import (
// validation + kernel resolution run without imageOpsMu — only the
// lookup-then-upsert atom is held under the lock so concurrent
// registers of the same name don't race.
func (d *Daemon) RegisterImage(ctx context.Context, params api.ImageRegisterParams) (image model.Image, err error) {
func (s *ImageService) RegisterImage(ctx context.Context, params api.ImageRegisterParams) (image model.Image, err error) {
name := strings.TrimSpace(params.Name)
if name == "" {
return model.Image{}, fmt.Errorf("image name is required")
@ -39,7 +39,7 @@ func (d *Daemon) RegisterImage(ctx context.Context, params api.ImageRegisterPara
}
}
}
kernelPath, initrdPath, modulesDir, err := d.resolveKernelInputs(ctx, params.KernelRef, params.KernelPath, params.InitrdPath, params.ModulesDir)
kernelPath, initrdPath, modulesDir, err := s.resolveKernelInputs(ctx, params.KernelRef, params.KernelPath, params.InitrdPath, params.ModulesDir)
if err != nil {
return model.Image{}, err
}
@ -48,11 +48,11 @@ func (d *Daemon) RegisterImage(ctx context.Context, params api.ImageRegisterPara
return model.Image{}, err
}
d.imageOpsMu.Lock()
defer d.imageOpsMu.Unlock()
s.imageOpsMu.Lock()
defer s.imageOpsMu.Unlock()
now := model.Now()
existing, lookupErr := d.store.GetImageByName(ctx, name)
existing, lookupErr := s.store.GetImageByName(ctx, name)
switch {
case lookupErr == nil:
if existing.Managed {
@ -88,7 +88,7 @@ func (d *Daemon) RegisterImage(ctx context.Context, params api.ImageRegisterPara
return model.Image{}, lookupErr
}
if err := d.store.UpsertImage(ctx, image); err != nil {
if err := s.store.UpsertImage(ctx, image); err != nil {
return model.Image{}, err
}
return image, nil
@ -99,8 +99,8 @@ func (d *Daemon) RegisterImage(ctx context.Context, params api.ImageRegisterPara
// SSH-key seeding, and boot-artifact staging all happen outside
// imageOpsMu — only the find/rename/upsert commit atom holds the
// lock.
func (d *Daemon) PromoteImage(ctx context.Context, idOrName string) (image model.Image, err error) {
op := d.beginOperation("image.promote")
func (s *ImageService) PromoteImage(ctx context.Context, idOrName string) (image model.Image, err error) {
op := s.beginOperation("image.promote")
defer func() {
if err != nil {
op.fail(err, imageLogAttrs(image)...)
@ -109,7 +109,7 @@ func (d *Daemon) PromoteImage(ctx context.Context, idOrName string) (image model
op.done(imageLogAttrs(image)...)
}()
image, err = d.FindImage(ctx, idOrName)
image, err = s.FindImage(ctx, idOrName)
if err != nil {
return model.Image{}, err
}
@ -119,21 +119,21 @@ func (d *Daemon) PromoteImage(ctx context.Context, idOrName string) (image model
if err := imagemgr.ValidatePromotePaths(image.RootfsPath, image.KernelPath, image.InitrdPath, image.ModulesDir); err != nil {
return model.Image{}, err
}
if strings.TrimSpace(d.layout.ImagesDir) == "" {
if strings.TrimSpace(s.layout.ImagesDir) == "" {
return model.Image{}, errors.New("images dir is not configured")
}
if err := os.MkdirAll(d.layout.ImagesDir, 0o755); err != nil {
if err := os.MkdirAll(s.layout.ImagesDir, 0o755); err != nil {
return model.Image{}, err
}
artifactDir := filepath.Join(d.layout.ImagesDir, image.ID)
artifactDir := filepath.Join(s.layout.ImagesDir, image.ID)
if _, statErr := os.Stat(artifactDir); statErr == nil {
return model.Image{}, fmt.Errorf("artifact dir already exists: %s", artifactDir)
} else if !os.IsNotExist(statErr) {
return model.Image{}, statErr
}
stageDir, err := os.MkdirTemp(d.layout.ImagesDir, image.ID+".promote-")
stageDir, err := os.MkdirTemp(s.layout.ImagesDir, image.ID+".promote-")
if err != nil {
return model.Image{}, err
}
@ -167,14 +167,14 @@ func (d *Daemon) PromoteImage(ctx context.Context, idOrName string) (image model
if err := system.CopyFilePreferClone(image.WorkSeedPath, workSeedPath); err != nil {
return model.Image{}, err
}
image.SeededSSHPublicKeyFingerprint, err = d.seedAuthorizedKeyOnExt4Image(ctx, workSeedPath)
image.SeededSSHPublicKeyFingerprint, err = s.seedAuthorizedKeyOnExt4Image(ctx, workSeedPath)
if err != nil {
return model.Image{}, err
}
} else {
image.SeededSSHPublicKeyFingerprint = ""
}
_, initrdPath, modulesDir, err := imagemgr.StageBootArtifacts(ctx, d.runner, stageDir, image.KernelPath, image.InitrdPath, image.ModulesDir)
_, initrdPath, modulesDir, err := imagemgr.StageBootArtifacts(ctx, s.runner, stageDir, image.KernelPath, image.InitrdPath, image.ModulesDir)
if err != nil {
return model.Image{}, err
}
@ -191,13 +191,13 @@ func (d *Daemon) PromoteImage(ctx context.Context, idOrName string) (image model
image.UpdatedAt = model.Now()
op.stage("activate_artifacts", "artifact_dir", artifactDir)
d.imageOpsMu.Lock()
defer d.imageOpsMu.Unlock()
s.imageOpsMu.Lock()
defer s.imageOpsMu.Unlock()
if err := os.Rename(stageDir, artifactDir); err != nil {
return model.Image{}, err
}
cleanupStage = false
if err := d.store.UpsertImage(ctx, image); err != nil {
if err := s.store.UpsertImage(ctx, image); err != nil {
_ = os.RemoveAll(artifactDir)
return model.Image{}, err
}
@ -208,22 +208,22 @@ func (d *Daemon) PromoteImage(ctx context.Context, idOrName string) (image model
// imageOpsMu so a concurrent CreateVM can't slip an image_id reference
// in between the check and the delete. File cleanup happens after the
// lock is released — the store row is the authoritative handle.
func (d *Daemon) DeleteImage(ctx context.Context, idOrName string) (model.Image, error) {
func (s *ImageService) DeleteImage(ctx context.Context, idOrName string) (model.Image, error) {
image, err := func() (model.Image, error) {
d.imageOpsMu.Lock()
defer d.imageOpsMu.Unlock()
img, err := d.FindImage(ctx, idOrName)
s.imageOpsMu.Lock()
defer s.imageOpsMu.Unlock()
img, err := s.FindImage(ctx, idOrName)
if err != nil {
return model.Image{}, err
}
vms, err := d.store.FindVMsUsingImage(ctx, img.ID)
vms, err := s.store.FindVMsUsingImage(ctx, img.ID)
if err != nil {
return model.Image{}, err
}
if len(vms) > 0 {
return model.Image{}, fmt.Errorf("image %s is still referenced by %d VM(s)", img.Name, len(vms))
}
if err := d.store.DeleteImage(ctx, img.ID); err != nil {
if err := s.store.DeleteImage(ctx, img.ID); err != nil {
return model.Image{}, err
}
return img, nil
@ -253,7 +253,7 @@ func firstNonEmpty(values ...string) string {
// When kernelRef is given but not yet pulled locally, an auto-pull from the
// embedded kernelcat catalog fires so the caller doesn't have to manage
// kernel/image ordering by hand.
func (d *Daemon) resolveKernelInputs(ctx context.Context, kernelRef, kernelPath, initrdPath, modulesDir string) (string, string, string, error) {
func (s *ImageService) resolveKernelInputs(ctx context.Context, kernelRef, kernelPath, initrdPath, modulesDir string) (string, string, string, error) {
kernelRef = strings.TrimSpace(kernelRef)
kernelPath = strings.TrimSpace(kernelPath)
initrdPath = strings.TrimSpace(initrdPath)
@ -263,7 +263,7 @@ func (d *Daemon) resolveKernelInputs(ctx context.Context, kernelRef, kernelPath,
if kernelPath != "" || initrdPath != "" || modulesDir != "" {
return "", "", "", fmt.Errorf("--kernel-ref is mutually exclusive with --kernel/--initrd/--modules")
}
entry, err := d.readOrAutoPullKernel(ctx, kernelRef)
entry, err := s.readOrAutoPullKernel(ctx, kernelRef)
if err != nil {
return "", "", "", err
}
@ -278,8 +278,8 @@ func (d *Daemon) resolveKernelInputs(ctx context.Context, kernelRef, kernelPath,
// readOrAutoPullKernel tries the local kernelcat first; on miss, checks
// the embedded catalog and auto-pulls the bundle.
func (d *Daemon) readOrAutoPullKernel(ctx context.Context, kernelRef string) (kernelcat.Entry, error) {
entry, err := kernelcat.ReadLocal(d.layout.KernelsDir, kernelRef)
func (s *ImageService) readOrAutoPullKernel(ctx context.Context, kernelRef string) (kernelcat.Entry, error) {
entry, err := kernelcat.ReadLocal(s.layout.KernelsDir, kernelRef)
if err == nil {
return entry, nil
}
@ -294,8 +294,8 @@ func (d *Daemon) readOrAutoPullKernel(ctx context.Context, kernelRef string) (ke
return kernelcat.Entry{}, fmt.Errorf("kernel %q not found in catalog; run 'banger kernel list --available' to browse", kernelRef)
}
vmCreateStage(ctx, "auto_pull_kernel", fmt.Sprintf("pulling kernel %s from catalog", kernelRef))
if _, pullErr := d.KernelPull(ctx, api.KernelPullParams{Name: kernelRef}); pullErr != nil {
if _, pullErr := s.KernelPull(ctx, api.KernelPullParams{Name: kernelRef}); pullErr != nil {
return kernelcat.Entry{}, fmt.Errorf("auto-pull kernel %q: %w", kernelRef, pullErr)
}
return kernelcat.ReadLocal(d.layout.KernelsDir, kernelRef)
return kernelcat.ReadLocal(s.layout.KernelsDir, kernelRef)
}