Add concurrent multi-VM CLI actions

Teach the lifecycle and set commands to accept multiple VM refs, resolve them from one vm list snapshot, dedupe repeated refs, and fan out the existing single-target RPCs concurrently. Valid targets still run when other refs are ambiguous or missing, and batch output stays in first-seen order.

Refactor the daemon off the single global VM mutation lock by adding per-VM locks for start/stop/restart/delete/kill/set, touch, reconcile, stale-stop, and stats updates. That keeps same-VM operations serialized while allowing different VMs to progress in parallel, including newly created VMs once their ID exists.

Verified with go test ./... and make build.
This commit is contained in:
Thales Maciel 2026-03-18 14:04:16 -03:00
parent 2d5bcb5516
commit 4812693c1e
No known key found for this signature in database
GPG key ID: 33112E6833C34679
5 changed files with 542 additions and 118 deletions

View file

@ -538,6 +538,108 @@ func TestStopVMFallsBackToForcedCleanupAfterGracefulTimeout(t *testing.T) {
}
}
func TestWithVMLockByIDSerializesSameVM(t *testing.T) {
ctx := context.Background()
db := openDaemonStore(t)
vm := testVM("serial", "image-serial", "172.16.0.30")
if err := db.UpsertVM(ctx, vm); err != nil {
t.Fatalf("UpsertVM: %v", err)
}
d := &Daemon{store: db}
firstEntered := make(chan struct{})
releaseFirst := make(chan struct{})
secondEntered := make(chan struct{})
errCh := make(chan error, 2)
go func() {
_, err := d.withVMLockByID(ctx, vm.ID, func(vm model.VMRecord) (model.VMRecord, error) {
close(firstEntered)
<-releaseFirst
return vm, nil
})
errCh <- err
}()
select {
case <-firstEntered:
case <-time.After(500 * time.Millisecond):
t.Fatal("first lock holder did not enter")
}
go func() {
_, err := d.withVMLockByID(ctx, vm.ID, func(vm model.VMRecord) (model.VMRecord, error) {
close(secondEntered)
return vm, nil
})
errCh <- err
}()
select {
case <-secondEntered:
t.Fatal("second same-vm lock holder entered before release")
case <-time.After(150 * time.Millisecond):
}
close(releaseFirst)
select {
case <-secondEntered:
case <-time.After(500 * time.Millisecond):
t.Fatal("second same-vm lock holder never entered")
}
for i := 0; i < 2; i++ {
if err := <-errCh; err != nil {
t.Fatalf("withVMLockByID returned error: %v", err)
}
}
}
func TestWithVMLockByIDAllowsDifferentVMsConcurrently(t *testing.T) {
ctx := context.Background()
db := openDaemonStore(t)
vmA := testVM("alpha-lock", "image-alpha", "172.16.0.31")
vmB := testVM("bravo-lock", "image-bravo", "172.16.0.32")
for _, vm := range []model.VMRecord{vmA, vmB} {
if err := db.UpsertVM(ctx, vm); err != nil {
t.Fatalf("UpsertVM(%s): %v", vm.Name, err)
}
}
d := &Daemon{store: db}
started := make(chan string, 2)
release := make(chan struct{})
errCh := make(chan error, 2)
run := func(id string) {
_, err := d.withVMLockByID(ctx, id, func(vm model.VMRecord) (model.VMRecord, error) {
started <- vm.ID
<-release
return vm, nil
})
errCh <- err
}
go run(vmA.ID)
go run(vmB.ID)
for i := 0; i < 2; i++ {
select {
case <-started:
case <-time.After(500 * time.Millisecond):
t.Fatal("different VM locks did not overlap")
}
}
close(release)
for i := 0; i < 2; i++ {
if err := <-errCh; err != nil {
t.Fatalf("withVMLockByID returned error: %v", err)
}
}
}
func openDaemonStore(t *testing.T) *store.Store {
t.Helper()
db, err := store.Open(filepath.Join(t.TempDir(), "state.db"))