diff --git a/internal/daemon/daemon.go b/internal/daemon/daemon.go index 9a3b84d..0658107 100644 --- a/internal/daemon/daemon.go +++ b/internal/daemon/daemon.go @@ -37,8 +37,7 @@ type Daemon struct { createOps map[string]*vmCreateOperationState imageBuildOpsMu sync.Mutex imageBuildOps map[string]*imageBuildOperationState - vmLocksMu sync.Mutex - vmLocks map[string]*sync.Mutex + vmLocks sync.Map // map[string]*sync.Mutex; keyed by VM ID sessionControllers map[string]*guestSessionController tapPoolMu sync.Mutex tapPool []string @@ -720,19 +719,14 @@ func (d *Daemon) withVMLockByIDErr(ctx context.Context, id string, fn func(model } func (d *Daemon) lockVMID(id string) func() { - d.vmLocksMu.Lock() - if d.vmLocks == nil { - d.vmLocks = make(map[string]*sync.Mutex) - } - lock, ok := d.vmLocks[id] - if !ok { - lock = &sync.Mutex{} - d.vmLocks[id] = lock - } - d.vmLocksMu.Unlock() - - lock.Lock() - return lock.Unlock + // LoadOrStore is atomic: exactly one *sync.Mutex wins for each ID. + // Both the map lookup and the conditional insert happen without a + // release-then-reacquire gap, eliminating the TOCTOU window that + // existed when vmLocksMu was released before lock.Lock() was called. + val, _ := d.vmLocks.LoadOrStore(id, &sync.Mutex{}) + mu := val.(*sync.Mutex) + mu.Lock() + return mu.Unlock } func marshalResultOrError(v any, err error) rpc.Response {