Remind users when a VM is still running after hanger vm ssh exits instead of silently dropping them back to the host shell.\n\nAttach a Firecracker vsock device to each VM, persist the host vsock path/CID,\nadd a new guest-side banger-vsock-pingd responder to the runtime bundle and both\nimage-build paths, and expose a vm.ping RPC that the CLI and TUI call after SSH\nreturns. Doctor and start/build preflight now validate the helper plus\n/dev/vhost-vsock so the feature fails early and clearly.\n\nValidated with go mod tidy, bash -n customize.sh, git diff --check, make build,\nand GOCACHE=/tmp/banger-gocache go test ./... outside the sandbox because the\ndaemon tests need real Unix/UDP sockets. Rebuild the image/rootfs used for new\nVMs so the guest ping service is present.
284 lines
6.4 KiB
Go
284 lines
6.4 KiB
Go
package firecracker
|
|
|
|
import (
|
|
"bufio"
|
|
"context"
|
|
"fmt"
|
|
"io"
|
|
"log/slog"
|
|
"os"
|
|
"os/exec"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
|
|
sdk "github.com/firecracker-microvm/firecracker-go-sdk"
|
|
models "github.com/firecracker-microvm/firecracker-go-sdk/client/models"
|
|
sdkvsock "github.com/firecracker-microvm/firecracker-go-sdk/vsock"
|
|
"github.com/sirupsen/logrus"
|
|
|
|
"banger/internal/vsockping"
|
|
)
|
|
|
|
type MachineConfig struct {
|
|
BinaryPath string
|
|
VMID string
|
|
SocketPath string
|
|
LogPath string
|
|
MetricsPath string
|
|
KernelImagePath string
|
|
InitrdPath string
|
|
KernelArgs string
|
|
Drives []DriveConfig
|
|
TapDevice string
|
|
VSockPath string
|
|
VSockCID uint32
|
|
VCPUCount int
|
|
MemoryMiB int
|
|
Logger *slog.Logger
|
|
}
|
|
|
|
type DriveConfig struct {
|
|
ID string
|
|
Path string
|
|
ReadOnly bool
|
|
IsRoot bool
|
|
}
|
|
|
|
type Machine struct {
|
|
machine *sdk.Machine
|
|
logFile *os.File
|
|
closeOnce sync.Once
|
|
}
|
|
|
|
type Client struct {
|
|
client *sdk.Client
|
|
}
|
|
|
|
func NewMachine(ctx context.Context, cfg MachineConfig) (*Machine, error) {
|
|
logFile, err := openLogFile(cfg.LogPath)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
cmd := buildProcessRunner(cfg, logFile)
|
|
machine, err := sdk.NewMachine(
|
|
ctx,
|
|
buildConfig(cfg),
|
|
sdk.WithProcessRunner(cmd),
|
|
sdk.WithLogger(newLogger(cfg.Logger)),
|
|
)
|
|
if err != nil {
|
|
if logFile != nil {
|
|
_ = logFile.Close()
|
|
}
|
|
return nil, err
|
|
}
|
|
|
|
return &Machine{machine: machine, logFile: logFile}, nil
|
|
}
|
|
|
|
func (m *Machine) Start(ctx context.Context) error {
|
|
if err := m.machine.Start(ctx); err != nil {
|
|
m.closeLog()
|
|
return err
|
|
}
|
|
|
|
go func() {
|
|
_ = m.machine.Wait(context.Background())
|
|
m.closeLog()
|
|
}()
|
|
|
|
return nil
|
|
}
|
|
|
|
func (m *Machine) PID() (int, error) {
|
|
return m.machine.PID()
|
|
}
|
|
|
|
func New(apiSock string, logger *slog.Logger) *Client {
|
|
return &Client{client: sdk.NewClient(apiSock, newLogger(logger), false)}
|
|
}
|
|
|
|
func (c *Client) SendCtrlAltDel(ctx context.Context) error {
|
|
action := models.InstanceActionInfoActionTypeSendCtrlAltDel
|
|
_, err := c.client.CreateSyncAction(ctx, &models.InstanceActionInfo{
|
|
ActionType: &action,
|
|
})
|
|
return err
|
|
}
|
|
|
|
func openLogFile(path string) (*os.File, error) {
|
|
if path == "" {
|
|
return nil, nil
|
|
}
|
|
return os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
|
|
}
|
|
|
|
func buildConfig(cfg MachineConfig) sdk.Config {
|
|
rootDrive, extraDrives := splitDrives(cfg.Drives)
|
|
drivesBuilder := sdk.NewDrivesBuilder(rootDrive.Path).
|
|
WithRootDrive(rootDrive.Path, sdk.WithDriveID(defaultDriveID(rootDrive, "rootfs")), sdk.WithReadOnly(rootDrive.ReadOnly))
|
|
for _, drive := range extraDrives {
|
|
if strings.TrimSpace(drive.Path) == "" {
|
|
continue
|
|
}
|
|
drivesBuilder = drivesBuilder.AddDrive(drive.Path, drive.ReadOnly, sdk.WithDriveID(defaultDriveID(drive, "drive")))
|
|
}
|
|
drives := drivesBuilder.Build()
|
|
|
|
return sdk.Config{
|
|
SocketPath: cfg.SocketPath,
|
|
LogPath: cfg.LogPath,
|
|
MetricsPath: cfg.MetricsPath,
|
|
KernelImagePath: cfg.KernelImagePath,
|
|
InitrdPath: cfg.InitrdPath,
|
|
KernelArgs: cfg.KernelArgs,
|
|
Drives: drives,
|
|
NetworkInterfaces: sdk.NetworkInterfaces{{
|
|
StaticConfiguration: &sdk.StaticNetworkConfiguration{
|
|
HostDevName: cfg.TapDevice,
|
|
},
|
|
}},
|
|
VsockDevices: buildVsockDevices(cfg),
|
|
MachineCfg: models.MachineConfiguration{
|
|
VcpuCount: sdk.Int64(int64(cfg.VCPUCount)),
|
|
MemSizeMib: sdk.Int64(int64(cfg.MemoryMiB)),
|
|
Smt: sdk.Bool(false),
|
|
},
|
|
VMID: cfg.VMID,
|
|
}
|
|
}
|
|
|
|
func buildVsockDevices(cfg MachineConfig) []sdk.VsockDevice {
|
|
if strings.TrimSpace(cfg.VSockPath) == "" || cfg.VSockCID == 0 {
|
|
return nil
|
|
}
|
|
return []sdk.VsockDevice{{
|
|
ID: "vsock",
|
|
Path: cfg.VSockPath,
|
|
CID: cfg.VSockCID,
|
|
}}
|
|
}
|
|
|
|
func splitDrives(drives []DriveConfig) (DriveConfig, []DriveConfig) {
|
|
root := DriveConfig{ID: "rootfs"}
|
|
var extras []DriveConfig
|
|
for _, drive := range drives {
|
|
if strings.TrimSpace(drive.Path) == "" {
|
|
continue
|
|
}
|
|
if drive.IsRoot {
|
|
root = drive
|
|
if root.ID == "" {
|
|
root.ID = "rootfs"
|
|
}
|
|
continue
|
|
}
|
|
extras = append(extras, drive)
|
|
}
|
|
return root, extras
|
|
}
|
|
|
|
func defaultDriveID(drive DriveConfig, fallback string) string {
|
|
if strings.TrimSpace(drive.ID) != "" {
|
|
return drive.ID
|
|
}
|
|
return fallback
|
|
}
|
|
|
|
func buildProcessRunner(cfg MachineConfig, logFile *os.File) *exec.Cmd {
|
|
script := "umask 000 && exec " + shellQuote(cfg.BinaryPath) +
|
|
" --api-sock " + shellQuote(cfg.SocketPath) +
|
|
" --id " + shellQuote(cfg.VMID)
|
|
cmd := exec.Command("sudo", "-n", "sh", "-c", script)
|
|
cmd.Stdin = nil
|
|
if logFile != nil {
|
|
cmd.Stdout = logFile
|
|
cmd.Stderr = logFile
|
|
}
|
|
return cmd
|
|
}
|
|
|
|
func shellQuote(value string) string {
|
|
return "'" + strings.ReplaceAll(value, "'", `'"'"'`) + "'"
|
|
}
|
|
|
|
func newLogger(base *slog.Logger) *logrus.Entry {
|
|
logger := logrus.New()
|
|
logger.SetOutput(io.Discard)
|
|
logger.SetLevel(logrus.DebugLevel)
|
|
logger.AddHook(slogHook{logger: base})
|
|
return logrus.NewEntry(logger)
|
|
}
|
|
|
|
func PingVSock(ctx context.Context, logger *slog.Logger, socketPath string) error {
|
|
conn, err := sdkvsock.DialContext(
|
|
ctx,
|
|
socketPath,
|
|
vsockping.Port,
|
|
sdkvsock.WithRetryTimeout(3*time.Second),
|
|
sdkvsock.WithRetryInterval(100*time.Millisecond),
|
|
sdkvsock.WithLogger(newLogger(logger)),
|
|
)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer conn.Close()
|
|
|
|
if deadline, ok := ctx.Deadline(); ok {
|
|
_ = conn.SetDeadline(deadline)
|
|
} else {
|
|
_ = conn.SetDeadline(time.Now().Add(3 * time.Second))
|
|
}
|
|
|
|
if _, err := io.WriteString(conn, vsockping.RequestLine); err != nil {
|
|
return err
|
|
}
|
|
line, err := bufio.NewReader(conn).ReadString('\n')
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if strings.TrimSpace(line) != strings.TrimSpace(vsockping.ResponseLine) {
|
|
return fmt.Errorf("unexpected vsock response %q", strings.TrimSpace(line))
|
|
}
|
|
return nil
|
|
}
|
|
|
|
type slogHook struct {
|
|
logger *slog.Logger
|
|
}
|
|
|
|
func (h slogHook) Levels() []logrus.Level {
|
|
return logrus.AllLevels
|
|
}
|
|
|
|
func (h slogHook) Fire(entry *logrus.Entry) error {
|
|
if h.logger == nil {
|
|
return nil
|
|
}
|
|
level := slog.LevelDebug
|
|
switch entry.Level {
|
|
case logrus.PanicLevel, logrus.FatalLevel, logrus.ErrorLevel:
|
|
level = slog.LevelError
|
|
case logrus.WarnLevel:
|
|
level = slog.LevelWarn
|
|
default:
|
|
level = slog.LevelDebug
|
|
}
|
|
attrs := make([]any, 0, len(entry.Data)*2+2)
|
|
attrs = append(attrs, "component", "firecracker_sdk")
|
|
for key, value := range entry.Data {
|
|
attrs = append(attrs, key, value)
|
|
}
|
|
h.logger.Log(context.Background(), level, entry.Message, attrs...)
|
|
return nil
|
|
}
|
|
|
|
func (m *Machine) closeLog() {
|
|
m.closeOnce.Do(func() {
|
|
if m.logFile != nil {
|
|
_ = m.logFile.Close()
|
|
}
|
|
})
|
|
}
|