banger/internal/firecracker/client.go
Thales Maciel 787b234029
Fix VM startup regressions after shell-out cleanup
The shell-out reduction pass introduced two linked startup regressions in the hot path for vm create.

Make flattenNestedWorkHome repair the temporary nested /root tree without trying to read a root-owned 0700 directory as the calling user: chmod the scratch directory under sudo, then copy each child entry individually before removing it. Add a regression test for that overlap/permission case.

Restore the Firecracker launch wrapper that sets umask 000 before exec. Firecracker was creating the API socket, but the SDK could not use it during machine.Start after the direct sudo launch, so vm create timed out waiting on a socket that already existed.

Validated with go test ./... and make build.
2026-03-18 12:18:34 -03:00

195 lines
4.4 KiB
Go

package firecracker
import (
"context"
"io"
"log/slog"
"os"
"os/exec"
"strings"
"sync"
sdk "github.com/firecracker-microvm/firecracker-go-sdk"
models "github.com/firecracker-microvm/firecracker-go-sdk/client/models"
"github.com/sirupsen/logrus"
)
type MachineConfig struct {
BinaryPath string
VMID string
SocketPath string
LogPath string
MetricsPath string
KernelImagePath string
InitrdPath string
KernelArgs string
RootDrivePath string
WorkDrivePath string
TapDevice string
VCPUCount int
MemoryMiB int
Logger *slog.Logger
}
type Machine struct {
machine *sdk.Machine
logFile *os.File
closeOnce sync.Once
}
type Client struct {
client *sdk.Client
}
func NewMachine(ctx context.Context, cfg MachineConfig) (*Machine, error) {
logFile, err := openLogFile(cfg.LogPath)
if err != nil {
return nil, err
}
cmd := buildProcessRunner(cfg, logFile)
machine, err := sdk.NewMachine(
ctx,
buildConfig(cfg),
sdk.WithProcessRunner(cmd),
sdk.WithLogger(newLogger(cfg.Logger)),
)
if err != nil {
if logFile != nil {
_ = logFile.Close()
}
return nil, err
}
return &Machine{machine: machine, logFile: logFile}, nil
}
func (m *Machine) Start(ctx context.Context) error {
if err := m.machine.Start(ctx); err != nil {
m.closeLog()
return err
}
go func() {
_ = m.machine.Wait(context.Background())
m.closeLog()
}()
return nil
}
func (m *Machine) PID() (int, error) {
return m.machine.PID()
}
func New(apiSock string, logger *slog.Logger) *Client {
return &Client{client: sdk.NewClient(apiSock, newLogger(logger), false)}
}
func (c *Client) SendCtrlAltDel(ctx context.Context) error {
action := models.InstanceActionInfoActionTypeSendCtrlAltDel
_, err := c.client.CreateSyncAction(ctx, &models.InstanceActionInfo{
ActionType: &action,
})
return err
}
func openLogFile(path string) (*os.File, error) {
if path == "" {
return nil, nil
}
return os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
}
func buildConfig(cfg MachineConfig) sdk.Config {
drivesBuilder := sdk.NewDrivesBuilder(cfg.RootDrivePath).
WithRootDrive(cfg.RootDrivePath, sdk.WithDriveID("rootfs"), sdk.WithReadOnly(false))
if strings.TrimSpace(cfg.WorkDrivePath) != "" {
drivesBuilder = drivesBuilder.AddDrive(cfg.WorkDrivePath, false, sdk.WithDriveID("work"))
}
drives := drivesBuilder.Build()
return sdk.Config{
SocketPath: cfg.SocketPath,
LogPath: cfg.LogPath,
MetricsPath: cfg.MetricsPath,
KernelImagePath: cfg.KernelImagePath,
InitrdPath: cfg.InitrdPath,
KernelArgs: cfg.KernelArgs,
Drives: drives,
NetworkInterfaces: sdk.NetworkInterfaces{{
StaticConfiguration: &sdk.StaticNetworkConfiguration{
HostDevName: cfg.TapDevice,
},
}},
MachineCfg: models.MachineConfiguration{
VcpuCount: sdk.Int64(int64(cfg.VCPUCount)),
MemSizeMib: sdk.Int64(int64(cfg.MemoryMiB)),
Smt: sdk.Bool(false),
},
VMID: cfg.VMID,
}
}
func buildProcessRunner(cfg MachineConfig, logFile *os.File) *exec.Cmd {
script := "umask 000 && exec " + shellQuote(cfg.BinaryPath) +
" --api-sock " + shellQuote(cfg.SocketPath) +
" --id " + shellQuote(cfg.VMID)
cmd := exec.Command("sudo", "-n", "sh", "-c", script)
cmd.Stdin = nil
if logFile != nil {
cmd.Stdout = logFile
cmd.Stderr = logFile
}
return cmd
}
func shellQuote(value string) string {
return "'" + strings.ReplaceAll(value, "'", `'"'"'`) + "'"
}
func newLogger(base *slog.Logger) *logrus.Entry {
logger := logrus.New()
logger.SetOutput(io.Discard)
logger.SetLevel(logrus.DebugLevel)
logger.AddHook(slogHook{logger: base})
return logrus.NewEntry(logger)
}
type slogHook struct {
logger *slog.Logger
}
func (h slogHook) Levels() []logrus.Level {
return logrus.AllLevels
}
func (h slogHook) Fire(entry *logrus.Entry) error {
if h.logger == nil {
return nil
}
level := slog.LevelDebug
switch entry.Level {
case logrus.PanicLevel, logrus.FatalLevel, logrus.ErrorLevel:
level = slog.LevelError
case logrus.WarnLevel:
level = slog.LevelWarn
default:
level = slog.LevelDebug
}
attrs := make([]any, 0, len(entry.Data)*2+2)
attrs = append(attrs, "component", "firecracker_sdk")
for key, value := range entry.Data {
attrs = append(attrs, key, value)
}
h.logger.Log(context.Background(), level, entry.Message, attrs...)
return nil
}
func (m *Machine) closeLog() {
m.closeOnce.Do(func() {
if m.logFile != nil {
_ = m.logFile.Close()
}
})
}