Beat VM create wall time without changing VM semantics. Generate a work-seed ext4 sidecar during image builds and rootfs rebuilds, then clone and resize that seed for each new VM instead of rebuilding /root from scratch. Plumb the new seed artifact through config, runtime metadata, store state, runtime-bundle defaults, doctor checks, and default-image reconciliation so older images still fall back cleanly. Add a daemon TAP pool to keep idle bridge-attached devices warm, expose stage timing in lifecycle logs, add a create/SSH benchmark script plus Make target, and teach verify.sh that tap-pool-* devices are reusable capacity rather than cleanup leaks. Validated with go test ./..., make build, ./verify.sh, and make bench-create ARGS="--runs 2".
121 lines
2.5 KiB
Go
121 lines
2.5 KiB
Go
package daemon
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"strconv"
|
|
"strings"
|
|
)
|
|
|
|
const tapPoolPrefix = "tap-pool-"
|
|
|
|
func (d *Daemon) initializeTapPool(ctx context.Context) error {
|
|
if d.config.TapPoolSize <= 0 || d.store == nil {
|
|
return nil
|
|
}
|
|
vms, err := d.store.ListVMs(ctx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
next := 0
|
|
for _, vm := range vms {
|
|
if index, ok := parseTapPoolIndex(vm.Runtime.TapDevice); ok && index >= next {
|
|
next = index + 1
|
|
}
|
|
}
|
|
d.tapPoolMu.Lock()
|
|
d.tapPoolNext = next
|
|
d.tapPoolMu.Unlock()
|
|
return nil
|
|
}
|
|
|
|
func (d *Daemon) ensureTapPool(ctx context.Context) {
|
|
if d.config.TapPoolSize <= 0 {
|
|
return
|
|
}
|
|
for {
|
|
select {
|
|
case <-ctx.Done():
|
|
return
|
|
case <-d.closing:
|
|
return
|
|
default:
|
|
}
|
|
|
|
d.tapPoolMu.Lock()
|
|
if len(d.tapPool) >= d.config.TapPoolSize {
|
|
d.tapPoolMu.Unlock()
|
|
return
|
|
}
|
|
tapName := fmt.Sprintf("%s%d", tapPoolPrefix, d.tapPoolNext)
|
|
d.tapPoolNext++
|
|
d.tapPoolMu.Unlock()
|
|
|
|
if err := d.createTap(ctx, tapName); err != nil {
|
|
if d.logger != nil {
|
|
d.logger.Warn("tap pool warmup failed", "tap_device", tapName, "error", err.Error())
|
|
}
|
|
return
|
|
}
|
|
|
|
d.tapPoolMu.Lock()
|
|
d.tapPool = append(d.tapPool, tapName)
|
|
d.tapPoolMu.Unlock()
|
|
|
|
if d.logger != nil {
|
|
d.logger.Debug("tap added to idle pool", "tap_device", tapName)
|
|
}
|
|
}
|
|
}
|
|
|
|
func (d *Daemon) acquireTap(ctx context.Context, fallbackName string) (string, error) {
|
|
d.tapPoolMu.Lock()
|
|
if n := len(d.tapPool); n > 0 {
|
|
tapName := d.tapPool[n-1]
|
|
d.tapPool = d.tapPool[:n-1]
|
|
d.tapPoolMu.Unlock()
|
|
return tapName, nil
|
|
}
|
|
d.tapPoolMu.Unlock()
|
|
|
|
if err := d.createTap(ctx, fallbackName); err != nil {
|
|
return "", err
|
|
}
|
|
return fallbackName, nil
|
|
}
|
|
|
|
func (d *Daemon) releaseTap(ctx context.Context, tapName string) error {
|
|
tapName = strings.TrimSpace(tapName)
|
|
if tapName == "" {
|
|
return nil
|
|
}
|
|
if isTapPoolName(tapName) {
|
|
d.tapPoolMu.Lock()
|
|
if len(d.tapPool) < d.config.TapPoolSize {
|
|
d.tapPool = append(d.tapPool, tapName)
|
|
d.tapPoolMu.Unlock()
|
|
return nil
|
|
}
|
|
d.tapPoolMu.Unlock()
|
|
}
|
|
_, err := d.runner.RunSudo(ctx, "ip", "link", "del", tapName)
|
|
if err == nil {
|
|
go d.ensureTapPool(context.Background())
|
|
}
|
|
return err
|
|
}
|
|
|
|
func isTapPoolName(tapName string) bool {
|
|
return strings.HasPrefix(strings.TrimSpace(tapName), tapPoolPrefix)
|
|
}
|
|
|
|
func parseTapPoolIndex(tapName string) (int, bool) {
|
|
if !isTapPoolName(tapName) {
|
|
return 0, false
|
|
}
|
|
value, err := strconv.Atoi(strings.TrimPrefix(strings.TrimSpace(tapName), tapPoolPrefix))
|
|
if err != nil {
|
|
return 0, false
|
|
}
|
|
return value, true
|
|
}
|