Fix TOCTOU race in lockVMID
The old pattern held vmLocksMu to get/create a *sync.Mutex, then released vmLocksMu before calling lock.Lock(). In the gap between the two operations a concurrent goroutine could observe the entry, and any future cleanup path that deleted map entries could let a third goroutine create a fresh *sync.Mutex for the same ID — leaving two callers holding independent locks with no mutual exclusion. Fix: replace the manual map + vmLocksMu pair with sync.Map and LoadOrStore. LoadOrStore is atomic at the map level: exactly one *sync.Mutex wins for each VM ID, with no release-then-reacquire gap between the lookup and the insert. vmLocksMu is removed.
This commit is contained in:
parent
9afa0e97ce
commit
43dfda14f8
1 changed files with 9 additions and 15 deletions
|
|
@ -37,8 +37,7 @@ type Daemon struct {
|
||||||
createOps map[string]*vmCreateOperationState
|
createOps map[string]*vmCreateOperationState
|
||||||
imageBuildOpsMu sync.Mutex
|
imageBuildOpsMu sync.Mutex
|
||||||
imageBuildOps map[string]*imageBuildOperationState
|
imageBuildOps map[string]*imageBuildOperationState
|
||||||
vmLocksMu sync.Mutex
|
vmLocks sync.Map // map[string]*sync.Mutex; keyed by VM ID
|
||||||
vmLocks map[string]*sync.Mutex
|
|
||||||
sessionControllers map[string]*guestSessionController
|
sessionControllers map[string]*guestSessionController
|
||||||
tapPoolMu sync.Mutex
|
tapPoolMu sync.Mutex
|
||||||
tapPool []string
|
tapPool []string
|
||||||
|
|
@ -720,19 +719,14 @@ func (d *Daemon) withVMLockByIDErr(ctx context.Context, id string, fn func(model
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Daemon) lockVMID(id string) func() {
|
func (d *Daemon) lockVMID(id string) func() {
|
||||||
d.vmLocksMu.Lock()
|
// LoadOrStore is atomic: exactly one *sync.Mutex wins for each ID.
|
||||||
if d.vmLocks == nil {
|
// Both the map lookup and the conditional insert happen without a
|
||||||
d.vmLocks = make(map[string]*sync.Mutex)
|
// release-then-reacquire gap, eliminating the TOCTOU window that
|
||||||
}
|
// existed when vmLocksMu was released before lock.Lock() was called.
|
||||||
lock, ok := d.vmLocks[id]
|
val, _ := d.vmLocks.LoadOrStore(id, &sync.Mutex{})
|
||||||
if !ok {
|
mu := val.(*sync.Mutex)
|
||||||
lock = &sync.Mutex{}
|
mu.Lock()
|
||||||
d.vmLocks[id] = lock
|
return mu.Unlock
|
||||||
}
|
|
||||||
d.vmLocksMu.Unlock()
|
|
||||||
|
|
||||||
lock.Lock()
|
|
||||||
return lock.Unlock
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func marshalResultOrError(v any, err error) rpc.Response {
|
func marshalResultOrError(v any, err error) rpc.Response {
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue