banger/internal/firecracker/client.go
Thales Maciel 60294e8c90
Fix VM lifecycle issues behind verify.sh
Make the Firecracker and bangerd processes outlive short-lived CLI request contexts so vm create no longer kills the VMM or daemon as soon as the RPC returns.

Fix fresh-VM SSH by flattening the seeded /root work disk when the copied home tree lands under a nested root/ directory, and write a guest sshd override to keep root pubkey auth explicit while debugging.

Harden teardown and smoke diagnostics: verify.sh now reports early Firecracker exit and delete failures directly, while dm snapshot cleanup tolerates already-gone handles and retries busy mapper removal long enough for Firecracker to release the device.

Validation: go test ./..., make build, bash -n verify.sh, direct SSH against a fresh VM, and a live ./verify.sh run that now completes with [verify] ok.
2026-03-17 14:43:09 -03:00

199 lines
4.4 KiB
Go

package firecracker
import (
"context"
"io"
"log/slog"
"os"
"os/exec"
"strings"
"sync"
sdk "github.com/firecracker-microvm/firecracker-go-sdk"
models "github.com/firecracker-microvm/firecracker-go-sdk/client/models"
"github.com/sirupsen/logrus"
)
type MachineConfig struct {
BinaryPath string
VMID string
SocketPath string
LogPath string
MetricsPath string
KernelImagePath string
InitrdPath string
KernelArgs string
RootDrivePath string
WorkDrivePath string
TapDevice string
VCPUCount int
MemoryMiB int
Logger *slog.Logger
}
type Machine struct {
machine *sdk.Machine
logFile *os.File
closeOnce sync.Once
}
type Client struct {
client *sdk.Client
}
func NewMachine(ctx context.Context, cfg MachineConfig) (*Machine, error) {
logFile, err := openLogFile(cfg.LogPath)
if err != nil {
return nil, err
}
cmd := buildProcessRunner(cfg, logFile)
machine, err := sdk.NewMachine(
ctx,
buildConfig(cfg),
sdk.WithProcessRunner(cmd),
sdk.WithLogger(newLogger(cfg.Logger)),
)
if err != nil {
if logFile != nil {
_ = logFile.Close()
}
return nil, err
}
return &Machine{machine: machine, logFile: logFile}, nil
}
func (m *Machine) Start(ctx context.Context) error {
if err := m.machine.Start(ctx); err != nil {
m.closeLog()
return err
}
go func() {
_ = m.machine.Wait(context.Background())
m.closeLog()
}()
return nil
}
func (m *Machine) PID() (int, error) {
return m.machine.PID()
}
func New(apiSock string, logger *slog.Logger) *Client {
return &Client{client: sdk.NewClient(apiSock, newLogger(logger), false)}
}
func (c *Client) SendCtrlAltDel(ctx context.Context) error {
action := models.InstanceActionInfoActionTypeSendCtrlAltDel
_, err := c.client.CreateSyncAction(ctx, &models.InstanceActionInfo{
ActionType: &action,
})
return err
}
func openLogFile(path string) (*os.File, error) {
if path == "" {
return nil, nil
}
return os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
}
func buildConfig(cfg MachineConfig) sdk.Config {
drives := sdk.NewDrivesBuilder(
cfg.RootDrivePath,
).
WithRootDrive(cfg.RootDrivePath, sdk.WithDriveID("rootfs"), sdk.WithReadOnly(false)).
AddDrive(cfg.WorkDrivePath, false, sdk.WithDriveID("work")).
Build()
return sdk.Config{
SocketPath: cfg.SocketPath,
LogPath: cfg.LogPath,
MetricsPath: cfg.MetricsPath,
KernelImagePath: cfg.KernelImagePath,
InitrdPath: cfg.InitrdPath,
KernelArgs: cfg.KernelArgs,
Drives: drives,
NetworkInterfaces: sdk.NetworkInterfaces{{
StaticConfiguration: &sdk.StaticNetworkConfiguration{
HostDevName: cfg.TapDevice,
},
}},
MachineCfg: models.MachineConfiguration{
VcpuCount: sdk.Int64(int64(cfg.VCPUCount)),
MemSizeMib: sdk.Int64(int64(cfg.MemoryMiB)),
Smt: sdk.Bool(false),
},
VMID: cfg.VMID,
}
}
func buildProcessRunner(cfg MachineConfig, logFile *os.File) *exec.Cmd {
script := strings.Join([]string{
"umask 000",
"exec " + shellQuote(cfg.BinaryPath) +
" --api-sock " + shellQuote(cfg.SocketPath) +
" --id " + shellQuote(cfg.VMID),
}, " && ")
cmd := exec.Command("sudo", "-n", "sh", "-c", script)
cmd.Stdin = nil
if logFile != nil {
cmd.Stdout = logFile
cmd.Stderr = logFile
}
return cmd
}
func shellQuote(value string) string {
return "'" + strings.ReplaceAll(value, "'", `'"'"'`) + "'"
}
func newLogger(base *slog.Logger) *logrus.Entry {
logger := logrus.New()
logger.SetOutput(io.Discard)
logger.SetLevel(logrus.DebugLevel)
logger.AddHook(slogHook{logger: base})
return logrus.NewEntry(logger)
}
type slogHook struct {
logger *slog.Logger
}
func (h slogHook) Levels() []logrus.Level {
return logrus.AllLevels
}
func (h slogHook) Fire(entry *logrus.Entry) error {
if h.logger == nil {
return nil
}
level := slog.LevelDebug
switch entry.Level {
case logrus.PanicLevel, logrus.FatalLevel, logrus.ErrorLevel:
level = slog.LevelError
case logrus.WarnLevel:
level = slog.LevelWarn
default:
level = slog.LevelDebug
}
attrs := make([]any, 0, len(entry.Data)*2+2)
attrs = append(attrs, "component", "firecracker_sdk")
for key, value := range entry.Data {
attrs = append(attrs, key, value)
}
h.logger.Log(context.Background(), level, entry.Message, attrs...)
return nil
}
func (m *Machine) closeLog() {
m.closeOnce.Do(func() {
if m.logFile != nil {
_ = m.logFile.Close()
}
})
}