VM start, image build, and network/setup failures were hard to diagnose because bangerd emitted almost no lifecycle logs and the Firecracker SDK logger was discarded. This adds a daemon-wide JSON logger with configurable log level so failures leave breadcrumbs instead of only side effects. Log the main daemon and VM lifecycle stages, preserve raw Firecracker and image-build helper output in dedicated files, and include those log paths in daemon status and returned errors. Bridge SDK logrus output into the daemon logger at debug level so low-level Firecracker diagnostics are available without making normal info logs unreadable. Validation: go test ./... and make build. Left unrelated worktree changes out of this commit, including internal/api/types.go, the deleted shell scripts, and my-rootfs.ext4.
199 lines
4.4 KiB
Go
199 lines
4.4 KiB
Go
package firecracker
|
|
|
|
import (
|
|
"context"
|
|
"io"
|
|
"log/slog"
|
|
"os"
|
|
"os/exec"
|
|
"strings"
|
|
"sync"
|
|
|
|
sdk "github.com/firecracker-microvm/firecracker-go-sdk"
|
|
models "github.com/firecracker-microvm/firecracker-go-sdk/client/models"
|
|
"github.com/sirupsen/logrus"
|
|
)
|
|
|
|
type MachineConfig struct {
|
|
BinaryPath string
|
|
VMID string
|
|
SocketPath string
|
|
LogPath string
|
|
MetricsPath string
|
|
KernelImagePath string
|
|
InitrdPath string
|
|
KernelArgs string
|
|
RootDrivePath string
|
|
WorkDrivePath string
|
|
TapDevice string
|
|
VCPUCount int
|
|
MemoryMiB int
|
|
Logger *slog.Logger
|
|
}
|
|
|
|
type Machine struct {
|
|
machine *sdk.Machine
|
|
logFile *os.File
|
|
closeOnce sync.Once
|
|
}
|
|
|
|
type Client struct {
|
|
client *sdk.Client
|
|
}
|
|
|
|
func NewMachine(ctx context.Context, cfg MachineConfig) (*Machine, error) {
|
|
logFile, err := openLogFile(cfg.LogPath)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
cmd := buildProcessRunner(ctx, cfg, logFile)
|
|
machine, err := sdk.NewMachine(
|
|
ctx,
|
|
buildConfig(cfg),
|
|
sdk.WithProcessRunner(cmd),
|
|
sdk.WithLogger(newLogger(cfg.Logger)),
|
|
)
|
|
if err != nil {
|
|
if logFile != nil {
|
|
_ = logFile.Close()
|
|
}
|
|
return nil, err
|
|
}
|
|
|
|
return &Machine{machine: machine, logFile: logFile}, nil
|
|
}
|
|
|
|
func (m *Machine) Start(ctx context.Context) error {
|
|
if err := m.machine.Start(ctx); err != nil {
|
|
m.closeLog()
|
|
return err
|
|
}
|
|
|
|
go func() {
|
|
_ = m.machine.Wait(context.Background())
|
|
m.closeLog()
|
|
}()
|
|
|
|
return nil
|
|
}
|
|
|
|
func (m *Machine) PID() (int, error) {
|
|
return m.machine.PID()
|
|
}
|
|
|
|
func New(apiSock string, logger *slog.Logger) *Client {
|
|
return &Client{client: sdk.NewClient(apiSock, newLogger(logger), false)}
|
|
}
|
|
|
|
func (c *Client) SendCtrlAltDel(ctx context.Context) error {
|
|
action := models.InstanceActionInfoActionTypeSendCtrlAltDel
|
|
_, err := c.client.CreateSyncAction(ctx, &models.InstanceActionInfo{
|
|
ActionType: &action,
|
|
})
|
|
return err
|
|
}
|
|
|
|
func openLogFile(path string) (*os.File, error) {
|
|
if path == "" {
|
|
return nil, nil
|
|
}
|
|
return os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
|
|
}
|
|
|
|
func buildConfig(cfg MachineConfig) sdk.Config {
|
|
drives := sdk.NewDrivesBuilder(
|
|
cfg.RootDrivePath,
|
|
).
|
|
WithRootDrive(cfg.RootDrivePath, sdk.WithDriveID("rootfs"), sdk.WithReadOnly(false)).
|
|
AddDrive(cfg.WorkDrivePath, false, sdk.WithDriveID("work")).
|
|
Build()
|
|
|
|
return sdk.Config{
|
|
SocketPath: cfg.SocketPath,
|
|
LogPath: cfg.LogPath,
|
|
MetricsPath: cfg.MetricsPath,
|
|
KernelImagePath: cfg.KernelImagePath,
|
|
InitrdPath: cfg.InitrdPath,
|
|
KernelArgs: cfg.KernelArgs,
|
|
Drives: drives,
|
|
NetworkInterfaces: sdk.NetworkInterfaces{{
|
|
StaticConfiguration: &sdk.StaticNetworkConfiguration{
|
|
HostDevName: cfg.TapDevice,
|
|
},
|
|
}},
|
|
MachineCfg: models.MachineConfiguration{
|
|
VcpuCount: sdk.Int64(int64(cfg.VCPUCount)),
|
|
MemSizeMib: sdk.Int64(int64(cfg.MemoryMiB)),
|
|
Smt: sdk.Bool(false),
|
|
},
|
|
VMID: cfg.VMID,
|
|
}
|
|
}
|
|
|
|
func buildProcessRunner(ctx context.Context, cfg MachineConfig, logFile *os.File) *exec.Cmd {
|
|
script := strings.Join([]string{
|
|
"umask 000",
|
|
"exec " + shellQuote(cfg.BinaryPath) +
|
|
" --api-sock " + shellQuote(cfg.SocketPath) +
|
|
" --id " + shellQuote(cfg.VMID),
|
|
}, " && ")
|
|
|
|
cmd := exec.CommandContext(ctx, "sudo", "-n", "sh", "-c", script)
|
|
cmd.Stdin = nil
|
|
if logFile != nil {
|
|
cmd.Stdout = logFile
|
|
cmd.Stderr = logFile
|
|
}
|
|
return cmd
|
|
}
|
|
|
|
func shellQuote(value string) string {
|
|
return "'" + strings.ReplaceAll(value, "'", `'"'"'`) + "'"
|
|
}
|
|
|
|
func newLogger(base *slog.Logger) *logrus.Entry {
|
|
logger := logrus.New()
|
|
logger.SetOutput(io.Discard)
|
|
logger.SetLevel(logrus.DebugLevel)
|
|
logger.AddHook(slogHook{logger: base})
|
|
return logrus.NewEntry(logger)
|
|
}
|
|
|
|
type slogHook struct {
|
|
logger *slog.Logger
|
|
}
|
|
|
|
func (h slogHook) Levels() []logrus.Level {
|
|
return logrus.AllLevels
|
|
}
|
|
|
|
func (h slogHook) Fire(entry *logrus.Entry) error {
|
|
if h.logger == nil {
|
|
return nil
|
|
}
|
|
level := slog.LevelDebug
|
|
switch entry.Level {
|
|
case logrus.PanicLevel, logrus.FatalLevel, logrus.ErrorLevel:
|
|
level = slog.LevelError
|
|
case logrus.WarnLevel:
|
|
level = slog.LevelWarn
|
|
default:
|
|
level = slog.LevelDebug
|
|
}
|
|
attrs := make([]any, 0, len(entry.Data)*2+2)
|
|
attrs = append(attrs, "component", "firecracker_sdk")
|
|
for key, value := range entry.Data {
|
|
attrs = append(attrs, key, value)
|
|
}
|
|
h.logger.Log(context.Background(), level, entry.Message, attrs...)
|
|
return nil
|
|
}
|
|
|
|
func (m *Machine) closeLog() {
|
|
m.closeOnce.Do(func() {
|
|
if m.logFile != nil {
|
|
_ = m.logFile.Close()
|
|
}
|
|
})
|
|
}
|