Daemon no longer owns a coarse mu shared across unrelated concerns.
Each subsystem now carries its own state and lock:
- tapPool: entries, next, and mu move onto a new tapPool struct.
- sessionRegistry: sessionControllers + its mutex move off Daemon.
- opRegistry[T asyncOp]: generic registry collapses the two ad-hoc
vm-create and image-build operation maps (and their mutexes) into one
shared type; the Begin/Status/Cancel/Prune methods simplify.
- vmLockSet: the sync.Map of per-VM mutexes moves into its own type;
lockVMID forwards.
- Daemon.mu splits into imageOpsMu (image-registry mutations) and
createVMMu (CreateVM serialisation) so image ops and VM creates no
longer block each other.
Lock ordering collapses to vmLocks[id] -> {createVMMu, imageOpsMu} ->
subsystem-local leaves. doc.go and ARCHITECTURE.md updated.
No behavior change; tests green.
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
55 lines
1.2 KiB
Go
55 lines
1.2 KiB
Go
package daemon
|
|
|
|
import (
|
|
"sync"
|
|
"time"
|
|
)
|
|
|
|
// asyncOp is the protocol shared by the long-running operation state types
|
|
// (VM create, image build). Each operation has a stable ID, a done flag that
|
|
// flips to true when its goroutine finishes, an UpdatedAt for pruning, and a
|
|
// way to signal cancellation to its goroutine.
|
|
type asyncOp interface {
|
|
opID() string
|
|
opIsDone() bool
|
|
opUpdatedAt() time.Time
|
|
opCancel()
|
|
}
|
|
|
|
// opRegistry is a mutex-guarded map of in-flight operations keyed by op ID.
|
|
// One registry per operation kind; each owns its own lock, so registries do
|
|
// not contend with each other or with Daemon.mu.
|
|
type opRegistry[T asyncOp] struct {
|
|
mu sync.Mutex
|
|
byID map[string]T
|
|
}
|
|
|
|
func (r *opRegistry[T]) insert(op T) {
|
|
r.mu.Lock()
|
|
defer r.mu.Unlock()
|
|
if r.byID == nil {
|
|
r.byID = map[string]T{}
|
|
}
|
|
r.byID[op.opID()] = op
|
|
}
|
|
|
|
func (r *opRegistry[T]) get(id string) (T, bool) {
|
|
r.mu.Lock()
|
|
defer r.mu.Unlock()
|
|
op, ok := r.byID[id]
|
|
return op, ok
|
|
}
|
|
|
|
// prune drops completed operations last updated before the cutoff.
|
|
func (r *opRegistry[T]) prune(before time.Time) {
|
|
r.mu.Lock()
|
|
defer r.mu.Unlock()
|
|
for id, op := range r.byID {
|
|
if !op.opIsDone() {
|
|
continue
|
|
}
|
|
if op.opUpdatedAt().Before(before) {
|
|
delete(r.byID, id)
|
|
}
|
|
}
|
|
}
|