banger/internal/daemon/tap_pool.go
Thales Maciel 59f2766139
Move subsystem state/locks off Daemon into owning types
Daemon no longer owns a coarse mu shared across unrelated concerns.
Each subsystem now carries its own state and lock:

- tapPool: entries, next, and mu move onto a new tapPool struct.
- sessionRegistry: sessionControllers + its mutex move off Daemon.
- opRegistry[T asyncOp]: generic registry collapses the two ad-hoc
  vm-create and image-build operation maps (and their mutexes) into one
  shared type; the Begin/Status/Cancel/Prune methods simplify.
- vmLockSet: the sync.Map of per-VM mutexes moves into its own type;
  lockVMID forwards.
- Daemon.mu splits into imageOpsMu (image-registry mutations) and
  createVMMu (CreateVM serialisation) so image ops and VM creates no
  longer block each other.

Lock ordering collapses to vmLocks[id] -> {createVMMu, imageOpsMu} ->
subsystem-local leaves. doc.go and ARCHITECTURE.md updated.

No behavior change; tests green.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-15 15:58:33 -03:00

130 lines
2.8 KiB
Go

package daemon
import (
"context"
"fmt"
"strconv"
"strings"
"sync"
)
const tapPoolPrefix = "tap-pool-"
// tapPool owns the idle TAP interface cache plus the monotonic index used to
// name new pool entries. All access goes through mu.
type tapPool struct {
mu sync.Mutex
entries []string
next int
}
func (d *Daemon) initializeTapPool(ctx context.Context) error {
if d.config.TapPoolSize <= 0 || d.store == nil {
return nil
}
vms, err := d.store.ListVMs(ctx)
if err != nil {
return err
}
next := 0
for _, vm := range vms {
if index, ok := parseTapPoolIndex(vm.Runtime.TapDevice); ok && index >= next {
next = index + 1
}
}
d.tapPool.mu.Lock()
d.tapPool.next = next
d.tapPool.mu.Unlock()
return nil
}
func (d *Daemon) ensureTapPool(ctx context.Context) {
if d.config.TapPoolSize <= 0 {
return
}
for {
select {
case <-ctx.Done():
return
case <-d.closing:
return
default:
}
d.tapPool.mu.Lock()
if len(d.tapPool.entries) >= d.config.TapPoolSize {
d.tapPool.mu.Unlock()
return
}
tapName := fmt.Sprintf("%s%d", tapPoolPrefix, d.tapPool.next)
d.tapPool.next++
d.tapPool.mu.Unlock()
if err := d.createTap(ctx, tapName); err != nil {
if d.logger != nil {
d.logger.Warn("tap pool warmup failed", "tap_device", tapName, "error", err.Error())
}
return
}
d.tapPool.mu.Lock()
d.tapPool.entries = append(d.tapPool.entries, tapName)
d.tapPool.mu.Unlock()
if d.logger != nil {
d.logger.Debug("tap added to idle pool", "tap_device", tapName)
}
}
}
func (d *Daemon) acquireTap(ctx context.Context, fallbackName string) (string, error) {
d.tapPool.mu.Lock()
if n := len(d.tapPool.entries); n > 0 {
tapName := d.tapPool.entries[n-1]
d.tapPool.entries = d.tapPool.entries[:n-1]
d.tapPool.mu.Unlock()
return tapName, nil
}
d.tapPool.mu.Unlock()
if err := d.createTap(ctx, fallbackName); err != nil {
return "", err
}
return fallbackName, nil
}
func (d *Daemon) releaseTap(ctx context.Context, tapName string) error {
tapName = strings.TrimSpace(tapName)
if tapName == "" {
return nil
}
if isTapPoolName(tapName) {
d.tapPool.mu.Lock()
if len(d.tapPool.entries) < d.config.TapPoolSize {
d.tapPool.entries = append(d.tapPool.entries, tapName)
d.tapPool.mu.Unlock()
return nil
}
d.tapPool.mu.Unlock()
}
_, err := d.runner.RunSudo(ctx, "ip", "link", "del", tapName)
if err == nil {
go d.ensureTapPool(context.Background())
}
return err
}
func isTapPoolName(tapName string) bool {
return strings.HasPrefix(strings.TrimSpace(tapName), tapPoolPrefix)
}
func parseTapPoolIndex(tapName string) (int, bool) {
if !isTapPoolName(tapName) {
return 0, false
}
value, err := strconv.Atoi(strings.TrimPrefix(strings.TrimSpace(tapName), tapPoolPrefix))
if err != nil {
return 0, false
}
return value, true
}