Move subsystem state/locks off Daemon into owning types

Daemon no longer owns a coarse mu shared across unrelated concerns.
Each subsystem now carries its own state and lock:

- tapPool: entries, next, and mu move onto a new tapPool struct.
- sessionRegistry: sessionControllers + its mutex move off Daemon.
- opRegistry[T asyncOp]: generic registry collapses the two ad-hoc
  vm-create and image-build operation maps (and their mutexes) into one
  shared type; the Begin/Status/Cancel/Prune methods simplify.
- vmLockSet: the sync.Map of per-VM mutexes moves into its own type;
  lockVMID forwards.
- Daemon.mu splits into imageOpsMu (image-registry mutations) and
  createVMMu (CreateVM serialisation) so image ops and VM creates no
  longer block each other.

Lock ordering collapses to vmLocks[id] -> {createVMMu, imageOpsMu} ->
subsystem-local leaves. doc.go and ARCHITECTURE.md updated.

No behavior change; tests green.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Thales Maciel 2026-04-15 15:58:33 -03:00
parent ea0db1e17e
commit 59f2766139
No known key found for this signature in database
GPG key ID: 33112E6833C34679
11 changed files with 238 additions and 152 deletions

View file

@ -32,16 +32,13 @@ type Daemon struct {
store *store.Store
runner system.CommandRunner
logger *slog.Logger
mu sync.Mutex
createOpsMu sync.Mutex
createOps map[string]*vmCreateOperationState
imageBuildOpsMu sync.Mutex
imageBuildOps map[string]*imageBuildOperationState
vmLocks sync.Map // map[string]*sync.Mutex; keyed by VM ID
sessionControllers map[string]*guestSessionController
tapPoolMu sync.Mutex
tapPool []string
tapPoolNext int
imageOpsMu sync.Mutex
createVMMu sync.Mutex
createOps opRegistry[*vmCreateOperationState]
imageBuildOps opRegistry[*imageBuildOperationState]
vmLocks vmLockSet
sessions sessionRegistry
tapPool tapPool
closing chan struct{}
once sync.Once
pid int
@ -85,9 +82,9 @@ func Open(ctx context.Context) (d *Daemon, err error) {
store: db,
runner: system.NewRunner(),
logger: logger,
closing: make(chan struct{}),
pid: os.Getpid(),
sessionControllers: make(map[string]*guestSessionController),
closing: make(chan struct{}),
pid: os.Getpid(),
sessions: newSessionRegistry(),
}
d.ensureVMSSHClientConfig()
d.logger.Info("daemon opened", "socket", layout.SocketPath, "state_dir", layout.StateDir, "log_level", cfg.LogLevel)
@ -720,14 +717,7 @@ func (d *Daemon) withVMLockByIDErr(ctx context.Context, id string, fn func(model
}
func (d *Daemon) lockVMID(id string) func() {
// LoadOrStore is atomic: exactly one *sync.Mutex wins for each ID.
// Both the map lookup and the conditional insert happen without a
// release-then-reacquire gap, eliminating the TOCTOU window that
// existed when vmLocksMu was released before lock.Lock() was called.
val, _ := d.vmLocks.LoadOrStore(id, &sync.Mutex{})
mu := val.(*sync.Mutex)
mu.Lock()
return mu.Unlock
return d.vmLocks.lock(id)
}
func marshalResultOrError(v any, err error) rpc.Response {