Guest host-key verification was off in all three SSH paths:
* Go SSH (internal/guest/ssh.go) used ssh.InsecureIgnoreHostKey
* `banger vm ssh` passed StrictHostKeyChecking=no
+ UserKnownHostsFile=/dev/null
* `~/.ssh/config` Host *.vm shipped the same posture into the
user's global config
Now each path verifies against a banger-owned known_hosts file at
`~/.local/state/banger/ssh/known_hosts` with TOFU semantics:
* First dial to a VM pins the key.
* Subsequent dials require an exact match. A mismatch fails with
an explicit "possible MITM" error.
* `vm delete` removes the entries so a future VM reusing the IP
or name re-pins cleanly.
* The user's `~/.ssh/known_hosts` is untouched.
Changes:
internal/guest/known_hosts.go (new) — OpenSSH-compatible parser,
TOFUHostKeyCallback, RemoveKnownHosts. Process-wide mutex
around the file.
internal/guest/ssh.go — Dial and WaitForSSH grew a knownHostsPath
parameter threaded through the callback. Empty path keeps the
insecure callback (tests + throwaway tools only; documented).
internal/daemon/{guest_sessions,session_attach,session_lifecycle,
session_stream}.go — call sites pass d.layout.KnownHostsPath.
internal/daemon/ssh_client_config.go — the ~/.ssh/config Host *.vm
block now points at banger's known_hosts and uses
StrictHostKeyChecking=accept-new. Missing path → fail closed.
internal/daemon/vm_lifecycle.go — deleteVMLocked drops known_hosts
entries for the VM's IP and DNS name via removeVMKnownHosts.
internal/cli/banger.go — sshCommandArgs swaps StrictHostKeyChecking
no + /dev/null for banger's file + accept-new. Path resolution
failure falls through to StrictHostKeyChecking=yes.
internal/paths/paths.go — Layout gains SSHDir + KnownHostsPath;
Ensure creates SSHDir at 0700.
Tests (internal/guest/known_hosts_test.go): pin on first use, accept
matching key on second dial, reject mismatch, empty path skips
checking, RemoveKnownHosts drops the entry, re-pin works after
remove. Existing daemon + cli tests updated to assert the new
posture and regression-guard against the old flags.
Live verified: vm run writes the pin to banger's known_hosts at 0600
inside a 0700 dir; banger vm ssh + ssh root@<vm>.vm both succeed
using the pin; vm delete clears it.
3744 lines
119 KiB
Go
3744 lines
119 KiB
Go
package cli
|
|
|
|
import (
|
|
"archive/tar"
|
|
"bufio"
|
|
"bytes"
|
|
"context"
|
|
"crypto/sha256"
|
|
"encoding/hex"
|
|
"encoding/json"
|
|
"errors"
|
|
"fmt"
|
|
"io"
|
|
"io/fs"
|
|
"net"
|
|
"net/url"
|
|
"os"
|
|
"os/exec"
|
|
"path/filepath"
|
|
"sort"
|
|
"strings"
|
|
"sync"
|
|
"syscall"
|
|
"text/tabwriter"
|
|
"time"
|
|
|
|
"banger/internal/api"
|
|
"banger/internal/buildinfo"
|
|
"banger/internal/config"
|
|
"banger/internal/daemon"
|
|
"banger/internal/guest"
|
|
"banger/internal/hostnat"
|
|
"banger/internal/imagecat"
|
|
"banger/internal/imagepull"
|
|
"banger/internal/model"
|
|
"banger/internal/paths"
|
|
"banger/internal/rpc"
|
|
"banger/internal/sessionstream"
|
|
"banger/internal/system"
|
|
"banger/internal/toolingplan"
|
|
"banger/internal/vmdns"
|
|
"banger/internal/vsockagent"
|
|
|
|
"github.com/klauspost/compress/zstd"
|
|
"github.com/spf13/cobra"
|
|
)
|
|
|
|
var (
|
|
bangerdPathFunc = paths.BangerdPath
|
|
daemonExePath = func(pid int) string {
|
|
return filepath.Join("/proc", fmt.Sprintf("%d", pid), "exe")
|
|
}
|
|
doctorFunc = daemon.Doctor
|
|
sshExecFunc = func(ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, args []string) error {
|
|
sshCmd := exec.CommandContext(ctx, "ssh", args...)
|
|
sshCmd.Stdout = stdout
|
|
sshCmd.Stderr = stderr
|
|
sshCmd.Stdin = stdin
|
|
return sshCmd.Run()
|
|
}
|
|
hostCommandOutputFunc = func(ctx context.Context, name string, args ...string) ([]byte, error) {
|
|
cmd := exec.CommandContext(ctx, name, args...)
|
|
output, err := cmd.CombinedOutput()
|
|
if err == nil {
|
|
return output, nil
|
|
}
|
|
command := strings.TrimSpace(strings.Join(append([]string{name}, args...), " "))
|
|
detail := strings.TrimSpace(string(output))
|
|
if detail == "" {
|
|
return output, fmt.Errorf("%s: %w", command, err)
|
|
}
|
|
return output, fmt.Errorf("%s: %w: %s", command, err, detail)
|
|
}
|
|
vmHealthFunc = func(ctx context.Context, socketPath, idOrName string) (api.VMHealthResult, error) {
|
|
return rpc.Call[api.VMHealthResult](ctx, socketPath, "vm.health", api.VMRefParams{IDOrName: idOrName})
|
|
}
|
|
vmSSHFunc = func(ctx context.Context, socketPath, idOrName string) (api.VMSSHResult, error) {
|
|
return rpc.Call[api.VMSSHResult](ctx, socketPath, "vm.ssh", api.VMRefParams{IDOrName: idOrName})
|
|
}
|
|
vmDeleteFunc = func(ctx context.Context, socketPath, idOrName string) error {
|
|
_, err := rpc.Call[api.VMShowResult](ctx, socketPath, "vm.delete", api.VMRefParams{IDOrName: idOrName})
|
|
return err
|
|
}
|
|
vmListFunc = func(ctx context.Context, socketPath string) (api.VMListResult, error) {
|
|
return rpc.Call[api.VMListResult](ctx, socketPath, "vm.list", api.Empty{})
|
|
}
|
|
daemonPingFunc = func(ctx context.Context, socketPath string) (api.PingResult, error) {
|
|
return rpc.Call[api.PingResult](ctx, socketPath, "ping", api.Empty{})
|
|
}
|
|
vmCreateBeginFunc = func(ctx context.Context, socketPath string, params api.VMCreateParams) (api.VMCreateBeginResult, error) {
|
|
return rpc.Call[api.VMCreateBeginResult](ctx, socketPath, "vm.create.begin", params)
|
|
}
|
|
vmCreateStatusFunc = func(ctx context.Context, socketPath, operationID string) (api.VMCreateStatusResult, error) {
|
|
return rpc.Call[api.VMCreateStatusResult](ctx, socketPath, "vm.create.status", api.VMCreateStatusParams{ID: operationID})
|
|
}
|
|
vmCreateCancelFunc = func(ctx context.Context, socketPath, operationID string) error {
|
|
_, err := rpc.Call[api.Empty](ctx, socketPath, "vm.create.cancel", api.VMCreateStatusParams{ID: operationID})
|
|
return err
|
|
}
|
|
vmPortsFunc = func(ctx context.Context, socketPath, idOrName string) (api.VMPortsResult, error) {
|
|
return rpc.Call[api.VMPortsResult](ctx, socketPath, "vm.ports", api.VMRefParams{IDOrName: idOrName})
|
|
}
|
|
vmWorkspacePrepareFunc = func(ctx context.Context, socketPath string, params api.VMWorkspacePrepareParams) (api.VMWorkspacePrepareResult, error) {
|
|
return rpc.Call[api.VMWorkspacePrepareResult](ctx, socketPath, "vm.workspace.prepare", params)
|
|
}
|
|
vmWorkspaceExportFunc = func(ctx context.Context, socketPath string, params api.WorkspaceExportParams) (api.WorkspaceExportResult, error) {
|
|
return rpc.Call[api.WorkspaceExportResult](ctx, socketPath, "vm.workspace.export", params)
|
|
}
|
|
guestSessionStartFunc = func(ctx context.Context, socketPath string, params api.GuestSessionStartParams) (api.GuestSessionShowResult, error) {
|
|
return rpc.Call[api.GuestSessionShowResult](ctx, socketPath, "guest.session.start", params)
|
|
}
|
|
guestSessionGetFunc = func(ctx context.Context, socketPath string, params api.GuestSessionRefParams) (api.GuestSessionShowResult, error) {
|
|
return rpc.Call[api.GuestSessionShowResult](ctx, socketPath, "guest.session.get", params)
|
|
}
|
|
guestSessionListFunc = func(ctx context.Context, socketPath, idOrName string) (api.GuestSessionListResult, error) {
|
|
return rpc.Call[api.GuestSessionListResult](ctx, socketPath, "guest.session.list", api.VMRefParams{IDOrName: idOrName})
|
|
}
|
|
guestSessionStopFunc = func(ctx context.Context, socketPath string, params api.GuestSessionRefParams) (api.GuestSessionShowResult, error) {
|
|
return rpc.Call[api.GuestSessionShowResult](ctx, socketPath, "guest.session.stop", params)
|
|
}
|
|
guestSessionKillFunc = func(ctx context.Context, socketPath string, params api.GuestSessionRefParams) (api.GuestSessionShowResult, error) {
|
|
return rpc.Call[api.GuestSessionShowResult](ctx, socketPath, "guest.session.kill", params)
|
|
}
|
|
guestSessionLogsFunc = func(ctx context.Context, socketPath string, params api.GuestSessionLogsParams) (api.GuestSessionLogsResult, error) {
|
|
return rpc.Call[api.GuestSessionLogsResult](ctx, socketPath, "guest.session.logs", params)
|
|
}
|
|
guestSessionAttachBeginFunc = func(ctx context.Context, socketPath string, params api.GuestSessionAttachBeginParams) (api.GuestSessionAttachBeginResult, error) {
|
|
return rpc.Call[api.GuestSessionAttachBeginResult](ctx, socketPath, "guest.session.attach.begin", params)
|
|
}
|
|
guestSessionSendFunc = func(ctx context.Context, socketPath string, params api.GuestSessionSendParams) (api.GuestSessionSendResult, error) {
|
|
return rpc.Call[api.GuestSessionSendResult](ctx, socketPath, "guest.session.send", params)
|
|
}
|
|
guestWaitForSSHFunc = func(ctx context.Context, address, privateKeyPath string, interval time.Duration) error {
|
|
knownHosts, _ := bangerKnownHostsPath()
|
|
return guest.WaitForSSH(ctx, address, privateKeyPath, knownHosts, interval)
|
|
}
|
|
guestDialFunc = func(ctx context.Context, address, privateKeyPath string) (vmRunGuestClient, error) {
|
|
knownHosts, _ := bangerKnownHostsPath()
|
|
return guest.Dial(ctx, address, privateKeyPath, knownHosts)
|
|
}
|
|
prepareVMRunRepoCopyFunc = prepareVMRunRepoCopy
|
|
buildVMRunToolingPlanFunc = toolingplan.Build
|
|
cwdFunc = os.Getwd
|
|
)
|
|
|
|
type vmRunGuestClient interface {
|
|
Close() error
|
|
UploadFile(ctx context.Context, remotePath string, mode os.FileMode, data []byte, logWriter io.Writer) error
|
|
RunScript(ctx context.Context, script string, logWriter io.Writer) error
|
|
StreamTar(ctx context.Context, sourceDir, remoteCommand string, logWriter io.Writer) error
|
|
StreamTarEntries(ctx context.Context, sourceDir string, entries []string, remoteCommand string, logWriter io.Writer) error
|
|
}
|
|
|
|
type vmRunRepoSpec struct {
|
|
SourcePath string
|
|
RepoRoot string
|
|
RepoName string
|
|
HeadCommit string
|
|
CurrentBranch string
|
|
BranchName string
|
|
FromRef string
|
|
BaseCommit string
|
|
OriginURL string
|
|
GitUserName string
|
|
GitUserEmail string
|
|
OverlayPaths []string
|
|
}
|
|
|
|
const vmRunShallowFetchDepth = 10
|
|
|
|
const vmRunToolingInstallTimeoutSeconds = 120
|
|
|
|
// vmRunSSHTimeout bounds how long `vm run` waits for guest ssh after
|
|
// the vsock agent is ready. vsock readiness already means systemd
|
|
// reached the banger-vsock-agent unit in multi-user.target, so sshd
|
|
// should be up within seconds; a minute plus change is generous
|
|
// headroom for a slow first boot while still short enough that a
|
|
// wedged sshd surfaces promptly instead of hanging forever. Var, not
|
|
// const, so tests can shrink it.
|
|
var vmRunSSHTimeout = 90 * time.Second
|
|
|
|
func NewBangerCommand() *cobra.Command {
|
|
root := &cobra.Command{
|
|
Use: "banger",
|
|
Short: "Manage development VMs and images",
|
|
SilenceUsage: true,
|
|
SilenceErrors: true,
|
|
RunE: helpNoArgs,
|
|
}
|
|
root.AddCommand(newDaemonCommand(), newDoctorCommand(), newImageCommand(), newInternalCommand(), newKernelCommand(), newVersionCommand(), newPSCommand(), newVMCommand())
|
|
return root
|
|
}
|
|
|
|
func newDoctorCommand() *cobra.Command {
|
|
return &cobra.Command{
|
|
Use: "doctor",
|
|
Short: "Check host and runtime readiness",
|
|
Args: noArgsUsage("usage: banger doctor"),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
report, err := doctorFunc(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err := printDoctorReport(cmd.OutOrStdout(), report); err != nil {
|
|
return err
|
|
}
|
|
if report.HasFailures() {
|
|
return errors.New("doctor found failing checks")
|
|
}
|
|
return nil
|
|
},
|
|
}
|
|
}
|
|
|
|
func newVersionCommand() *cobra.Command {
|
|
return &cobra.Command{
|
|
Use: "version",
|
|
Short: "Show banger build information",
|
|
Args: noArgsUsage("usage: banger version"),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
_, err := fmt.Fprint(cmd.OutOrStdout(), formatBuildInfoBlock(buildinfo.Current()))
|
|
return err
|
|
},
|
|
}
|
|
}
|
|
|
|
func newInternalCommand() *cobra.Command {
|
|
cmd := &cobra.Command{
|
|
Use: "internal",
|
|
Hidden: true,
|
|
RunE: helpNoArgs,
|
|
}
|
|
cmd.AddCommand(
|
|
newInternalNATCommand(),
|
|
newInternalWorkSeedCommand(),
|
|
newInternalSSHKeyPathCommand(),
|
|
newInternalFirecrackerPathCommand(),
|
|
newInternalVSockAgentPathCommand(),
|
|
newInternalMakeBundleCommand(),
|
|
)
|
|
return cmd
|
|
}
|
|
|
|
func newInternalSSHKeyPathCommand() *cobra.Command {
|
|
return &cobra.Command{
|
|
Use: "ssh-key-path",
|
|
Hidden: true,
|
|
Args: noArgsUsage("usage: banger internal ssh-key-path"),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, err := paths.Resolve()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
cfg, err := config.Load(layout)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
_, err = fmt.Fprintln(cmd.OutOrStdout(), cfg.SSHKeyPath)
|
|
return err
|
|
},
|
|
}
|
|
}
|
|
|
|
func newInternalFirecrackerPathCommand() *cobra.Command {
|
|
return &cobra.Command{
|
|
Use: "firecracker-path",
|
|
Hidden: true,
|
|
Args: noArgsUsage("usage: banger internal firecracker-path"),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, err := paths.Resolve()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
cfg, err := config.Load(layout)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if strings.TrimSpace(cfg.FirecrackerBin) == "" {
|
|
return errors.New("firecracker binary not configured; install firecracker or set firecracker_bin")
|
|
}
|
|
_, err = fmt.Fprintln(cmd.OutOrStdout(), cfg.FirecrackerBin)
|
|
return err
|
|
},
|
|
}
|
|
}
|
|
|
|
func newInternalVSockAgentPathCommand() *cobra.Command {
|
|
return &cobra.Command{
|
|
Use: "vsock-agent-path",
|
|
Hidden: true,
|
|
Args: noArgsUsage("usage: banger internal vsock-agent-path"),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
path, err := paths.CompanionBinaryPath("banger-vsock-agent")
|
|
if err != nil {
|
|
return err
|
|
}
|
|
_, err = fmt.Fprintln(cmd.OutOrStdout(), path)
|
|
return err
|
|
},
|
|
}
|
|
}
|
|
|
|
func newInternalMakeBundleCommand() *cobra.Command {
|
|
var (
|
|
rootfsTarPath string
|
|
name string
|
|
distro string
|
|
arch string
|
|
kernelRef string
|
|
description string
|
|
sizeSpec string
|
|
outPath string
|
|
)
|
|
cmd := &cobra.Command{
|
|
Use: "make-bundle",
|
|
Hidden: true,
|
|
Short: "Build a banger image bundle (.tar.zst) from a flat rootfs tar",
|
|
Args: noArgsUsage("usage: banger internal make-bundle --rootfs-tar <file|-> --name <n> --out <bundle.tar.zst>"),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
return runInternalMakeBundle(cmd, internalMakeBundleOpts{
|
|
rootfsTarPath: rootfsTarPath,
|
|
name: name,
|
|
distro: distro,
|
|
arch: arch,
|
|
kernelRef: kernelRef,
|
|
description: description,
|
|
sizeSpec: sizeSpec,
|
|
outPath: outPath,
|
|
})
|
|
},
|
|
}
|
|
cmd.Flags().StringVar(&rootfsTarPath, "rootfs-tar", "", "flat rootfs tar file, or '-' for stdin")
|
|
cmd.Flags().StringVar(&name, "name", "", "bundle name (filesystem-safe identifier)")
|
|
cmd.Flags().StringVar(&distro, "distro", "", "distro label (e.g. debian)")
|
|
cmd.Flags().StringVar(&arch, "arch", "x86_64", "architecture label")
|
|
cmd.Flags().StringVar(&kernelRef, "kernel-ref", "", "kernelcat entry name this image pairs with")
|
|
cmd.Flags().StringVar(&description, "description", "", "short description")
|
|
cmd.Flags().StringVar(&sizeSpec, "size", "", "rootfs ext4 size (e.g. 4G); defaults to tree size + 25%")
|
|
cmd.Flags().StringVar(&outPath, "out", "", "output bundle path (.tar.zst)")
|
|
return cmd
|
|
}
|
|
|
|
type internalMakeBundleOpts struct {
|
|
rootfsTarPath string
|
|
name string
|
|
distro string
|
|
arch string
|
|
kernelRef string
|
|
description string
|
|
sizeSpec string
|
|
outPath string
|
|
}
|
|
|
|
func runInternalMakeBundle(cmd *cobra.Command, opts internalMakeBundleOpts) error {
|
|
if err := imagecat.ValidateName(opts.name); err != nil {
|
|
return err
|
|
}
|
|
if strings.TrimSpace(opts.rootfsTarPath) == "" {
|
|
return errors.New("--rootfs-tar is required")
|
|
}
|
|
if strings.TrimSpace(opts.outPath) == "" {
|
|
return errors.New("--out is required")
|
|
}
|
|
if strings.TrimSpace(opts.arch) == "" {
|
|
opts.arch = "x86_64"
|
|
}
|
|
|
|
var sizeBytes int64
|
|
if s := strings.TrimSpace(opts.sizeSpec); s != "" {
|
|
n, err := model.ParseSize(s)
|
|
if err != nil {
|
|
return fmt.Errorf("parse --size: %w", err)
|
|
}
|
|
sizeBytes = n
|
|
}
|
|
|
|
ctx := cmd.Context()
|
|
stagingRoot, err := os.MkdirTemp("", "banger-mkbundle-")
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer os.RemoveAll(stagingRoot)
|
|
rootfsTree := filepath.Join(stagingRoot, "rootfs")
|
|
if err := os.MkdirAll(rootfsTree, 0o755); err != nil {
|
|
return err
|
|
}
|
|
|
|
// Open tar input (file or stdin).
|
|
var tarReader io.Reader
|
|
if opts.rootfsTarPath == "-" {
|
|
tarReader = cmd.InOrStdin()
|
|
} else {
|
|
f, err := os.Open(opts.rootfsTarPath)
|
|
if err != nil {
|
|
return fmt.Errorf("open rootfs tar: %w", err)
|
|
}
|
|
defer f.Close()
|
|
tarReader = f
|
|
}
|
|
|
|
fmt.Fprintln(cmd.ErrOrStderr(), "[make-bundle] extracting rootfs")
|
|
meta, err := imagepull.FlattenTar(ctx, tarReader, rootfsTree)
|
|
if err != nil {
|
|
return fmt.Errorf("flatten rootfs: %w", err)
|
|
}
|
|
|
|
// docker create drops /.dockerenv (and containerd drops
|
|
// /run/.containerenv) into the container's writable layer, so
|
|
// `docker export` includes them in the tar. systemd-detect-virt
|
|
// reads those files and flags the boot as virtualization=docker,
|
|
// which disables udev device-unit activation (including the work-
|
|
// disk dev-vdb.device) and leaves systemd waiting forever. Strip
|
|
// them before building the ext4.
|
|
for _, marker := range []string{".dockerenv", "run/.containerenv"} {
|
|
path := filepath.Join(rootfsTree, marker)
|
|
if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
|
|
return fmt.Errorf("strip %s: %w", marker, err)
|
|
}
|
|
delete(meta.Entries, marker)
|
|
}
|
|
|
|
if sizeBytes <= 0 {
|
|
treeSize, err := dirSize(rootfsTree)
|
|
if err != nil {
|
|
return fmt.Errorf("size rootfs tree: %w", err)
|
|
}
|
|
// +50% headroom. mkfs.ext4 needs space for inode tables,
|
|
// block-group descriptors, journal, and the default 5%
|
|
// reserved-blocks margin on top of the raw data.
|
|
sizeBytes = treeSize + treeSize/2
|
|
if sizeBytes < imagepull.MinExt4Size {
|
|
sizeBytes = imagepull.MinExt4Size
|
|
}
|
|
}
|
|
|
|
ext4Path := filepath.Join(stagingRoot, imagecat.RootfsFilename)
|
|
runner := system.NewRunner()
|
|
fmt.Fprintf(cmd.ErrOrStderr(), "[make-bundle] building rootfs.ext4 (%d bytes)\n", sizeBytes)
|
|
if err := imagepull.BuildExt4(ctx, runner, rootfsTree, ext4Path, sizeBytes); err != nil {
|
|
return fmt.Errorf("build ext4: %w", err)
|
|
}
|
|
fmt.Fprintln(cmd.ErrOrStderr(), "[make-bundle] applying ownership fixup")
|
|
if err := imagepull.ApplyOwnership(ctx, runner, ext4Path, meta); err != nil {
|
|
return fmt.Errorf("apply ownership: %w", err)
|
|
}
|
|
fmt.Fprintln(cmd.ErrOrStderr(), "[make-bundle] injecting guest agents")
|
|
vsockBin, err := paths.CompanionBinaryPath("banger-vsock-agent")
|
|
if err != nil {
|
|
return fmt.Errorf("locate vsock agent: %w", err)
|
|
}
|
|
if err := imagepull.InjectGuestAgents(ctx, runner, ext4Path, imagepull.GuestAgentAssets{VsockAgentBin: vsockBin}); err != nil {
|
|
return fmt.Errorf("inject guest agents: %w", err)
|
|
}
|
|
|
|
// Write manifest.json.
|
|
manifest := imagecat.Manifest{
|
|
Name: opts.name,
|
|
Distro: strings.TrimSpace(opts.distro),
|
|
Arch: opts.arch,
|
|
KernelRef: strings.TrimSpace(opts.kernelRef),
|
|
Description: strings.TrimSpace(opts.description),
|
|
}
|
|
manifestPath := filepath.Join(stagingRoot, imagecat.ManifestFilename)
|
|
manifestData, err := json.MarshalIndent(manifest, "", " ")
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err := os.WriteFile(manifestPath, append(manifestData, '\n'), 0o644); err != nil {
|
|
return err
|
|
}
|
|
|
|
fmt.Fprintln(cmd.ErrOrStderr(), "[make-bundle] packaging bundle")
|
|
if err := writeBundleTarZst(opts.outPath, ext4Path, manifestPath); err != nil {
|
|
return fmt.Errorf("write bundle: %w", err)
|
|
}
|
|
|
|
sum, err := sha256HexFile(opts.outPath)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
stat, err := os.Stat(opts.outPath)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
fmt.Fprintf(cmd.OutOrStdout(), "bundle: %s\nsha256: %s\nsize: %d\n", opts.outPath, sum, stat.Size())
|
|
return nil
|
|
}
|
|
|
|
// dirSize returns the sum of regular-file sizes under root (no symlink follow).
|
|
func dirSize(root string) (int64, error) {
|
|
var total int64
|
|
err := filepath.WalkDir(root, func(_ string, d fs.DirEntry, err error) error {
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if !d.Type().IsRegular() {
|
|
return nil
|
|
}
|
|
info, err := d.Info()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
total += info.Size()
|
|
return nil
|
|
})
|
|
return total, err
|
|
}
|
|
|
|
// writeBundleTarZst packages rootfs.ext4 + manifest.json into outPath as tar+zstd.
|
|
func writeBundleTarZst(outPath, rootfsPath, manifestPath string) error {
|
|
if err := os.MkdirAll(filepath.Dir(outPath), 0o755); err != nil {
|
|
return err
|
|
}
|
|
out, err := os.OpenFile(outPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer out.Close()
|
|
zw, err := zstd.NewWriter(out, zstd.WithEncoderLevel(zstd.SpeedBestCompression))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
tw := tar.NewWriter(zw)
|
|
for _, src := range []struct{ path, name string }{
|
|
{rootfsPath, imagecat.RootfsFilename},
|
|
{manifestPath, imagecat.ManifestFilename},
|
|
} {
|
|
if err := writeBundleFile(tw, src.path, src.name); err != nil {
|
|
_ = tw.Close()
|
|
_ = zw.Close()
|
|
return err
|
|
}
|
|
}
|
|
if err := tw.Close(); err != nil {
|
|
_ = zw.Close()
|
|
return err
|
|
}
|
|
if err := zw.Close(); err != nil {
|
|
return err
|
|
}
|
|
return out.Close()
|
|
}
|
|
|
|
func writeBundleFile(tw *tar.Writer, src, name string) error {
|
|
f, err := os.Open(src)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer f.Close()
|
|
fi, err := f.Stat()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err := tw.WriteHeader(&tar.Header{
|
|
Name: name,
|
|
Size: fi.Size(),
|
|
Mode: 0o644,
|
|
Typeflag: tar.TypeReg,
|
|
ModTime: fi.ModTime(),
|
|
}); err != nil {
|
|
return err
|
|
}
|
|
_, err = io.Copy(tw, f)
|
|
return err
|
|
}
|
|
|
|
func sha256HexFile(path string) (string, error) {
|
|
f, err := os.Open(path)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
defer f.Close()
|
|
h := sha256.New()
|
|
if _, err := io.Copy(h, f); err != nil {
|
|
return "", err
|
|
}
|
|
return hex.EncodeToString(h.Sum(nil)), nil
|
|
}
|
|
|
|
func newInternalWorkSeedCommand() *cobra.Command {
|
|
var rootfsPath string
|
|
var outPath string
|
|
cmd := &cobra.Command{
|
|
Use: "work-seed",
|
|
Hidden: true,
|
|
Args: noArgsUsage("usage: banger internal work-seed --rootfs <path> [--out <path>]"),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
rootfsPath = strings.TrimSpace(rootfsPath)
|
|
outPath = strings.TrimSpace(outPath)
|
|
if rootfsPath == "" {
|
|
return errors.New("rootfs path is required")
|
|
}
|
|
if outPath == "" {
|
|
outPath = system.WorkSeedPath(rootfsPath)
|
|
}
|
|
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
|
return err
|
|
}
|
|
return system.BuildWorkSeedImage(cmd.Context(), system.NewRunner(), rootfsPath, outPath)
|
|
},
|
|
}
|
|
cmd.Flags().StringVar(&rootfsPath, "rootfs", "", "rootfs image path")
|
|
cmd.Flags().StringVar(&outPath, "out", "", "output work-seed image path")
|
|
return cmd
|
|
}
|
|
|
|
func newInternalNATCommand() *cobra.Command {
|
|
cmd := &cobra.Command{
|
|
Use: "nat",
|
|
Hidden: true,
|
|
RunE: helpNoArgs,
|
|
}
|
|
cmd.AddCommand(
|
|
newInternalNATActionCommand("up", true),
|
|
newInternalNATActionCommand("down", false),
|
|
)
|
|
return cmd
|
|
}
|
|
|
|
func newInternalNATActionCommand(use string, enable bool) *cobra.Command {
|
|
var guestIP string
|
|
var tapDevice string
|
|
cmd := &cobra.Command{
|
|
Use: use,
|
|
Hidden: true,
|
|
Args: noArgsUsage("usage: banger internal nat " + use + " --guest-ip <ip> --tap <tap-device>"),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
guestIP = strings.TrimSpace(guestIP)
|
|
tapDevice = strings.TrimSpace(tapDevice)
|
|
if guestIP == "" {
|
|
return errors.New("guest IP is required")
|
|
}
|
|
if tapDevice == "" {
|
|
return errors.New("tap device is required")
|
|
}
|
|
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
|
return err
|
|
}
|
|
return hostnat.Ensure(cmd.Context(), system.NewRunner(), guestIP, tapDevice, enable)
|
|
},
|
|
}
|
|
cmd.Flags().StringVar(&guestIP, "guest-ip", "", "guest IPv4 address")
|
|
cmd.Flags().StringVar(&tapDevice, "tap", "", "tap device name")
|
|
return cmd
|
|
}
|
|
|
|
func newDaemonCommand() *cobra.Command {
|
|
cmd := &cobra.Command{
|
|
Use: "daemon",
|
|
Short: "Manage the banger daemon",
|
|
RunE: helpNoArgs,
|
|
}
|
|
cmd.AddCommand(
|
|
&cobra.Command{
|
|
Use: "status",
|
|
Short: "Show daemon status",
|
|
Args: noArgsUsage("usage: banger daemon status"),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, err := paths.Resolve()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
ping, pingErr := daemonPingFunc(cmd.Context(), layout.SocketPath)
|
|
if pingErr != nil {
|
|
_, err = fmt.Fprintf(cmd.OutOrStdout(), "stopped\nsocket: %s\nlog: %s\ndns: %s\n", layout.SocketPath, layout.DaemonLog, vmdns.DefaultListenAddr)
|
|
return err
|
|
}
|
|
info := buildinfo.Normalize(ping.Version, ping.Commit, ping.BuiltAt)
|
|
_, err = fmt.Fprintf(cmd.OutOrStdout(), "running\npid: %d\n%ssocket: %s\nlog: %s\ndns: %s\n", ping.PID, formatBuildInfoBlock(info), layout.SocketPath, layout.DaemonLog, vmdns.DefaultListenAddr)
|
|
return err
|
|
},
|
|
},
|
|
&cobra.Command{
|
|
Use: "stop",
|
|
Short: "Stop the daemon",
|
|
Args: noArgsUsage("usage: banger daemon stop"),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
|
return err
|
|
}
|
|
layout, err := paths.Resolve()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
_, err = rpc.Call[api.ShutdownResult](cmd.Context(), layout.SocketPath, "shutdown", api.Empty{})
|
|
if err != nil {
|
|
if os.IsNotExist(err) || strings.Contains(err.Error(), "connect") {
|
|
_, writeErr := fmt.Fprintln(cmd.OutOrStdout(), "daemon not running")
|
|
return writeErr
|
|
}
|
|
return err
|
|
}
|
|
_, err = fmt.Fprintln(cmd.OutOrStdout(), "stopping")
|
|
return err
|
|
},
|
|
},
|
|
&cobra.Command{
|
|
Use: "socket",
|
|
Short: "Print the daemon socket path",
|
|
Args: noArgsUsage("usage: banger daemon socket"),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, err := paths.Resolve()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
_, err = fmt.Fprintln(cmd.OutOrStdout(), layout.SocketPath)
|
|
return err
|
|
},
|
|
},
|
|
)
|
|
return cmd
|
|
}
|
|
|
|
func newVMCommand() *cobra.Command {
|
|
cmd := &cobra.Command{
|
|
Use: "vm",
|
|
Short: "Manage virtual machines",
|
|
RunE: helpNoArgs,
|
|
}
|
|
cmd.AddCommand(
|
|
newVMCreateCommand(),
|
|
newVMRunCommand(),
|
|
newVMListCommand(),
|
|
newVMShowCommand(),
|
|
newVMActionCommand("start", "Start a VM", "vm.start"),
|
|
newVMActionCommand("stop", "Stop a VM", "vm.stop"),
|
|
newVMKillCommand(),
|
|
newVMActionCommand("restart", "Restart a VM", "vm.restart"),
|
|
newVMActionCommand("delete", "Delete a VM", "vm.delete", "rm"),
|
|
newVMPruneCommand(),
|
|
newVMSetCommand(),
|
|
newVMSSHCommand(),
|
|
newVMWorkspaceCommand(),
|
|
newVMSessionCommand(),
|
|
newVMLogsCommand(),
|
|
newVMStatsCommand(),
|
|
newVMPortsCommand(),
|
|
)
|
|
return cmd
|
|
}
|
|
|
|
func newVMRunCommand() *cobra.Command {
|
|
defaults := effectiveVMDefaults()
|
|
var (
|
|
name string
|
|
imageName string
|
|
vcpu = defaults.VCPUCount
|
|
memory = defaults.MemoryMiB
|
|
systemOverlaySize = model.FormatSizeBytes(defaults.SystemOverlaySizeByte)
|
|
workDiskSize = model.FormatSizeBytes(defaults.WorkDiskSizeBytes)
|
|
natEnabled bool
|
|
branchName string
|
|
fromRef = "HEAD"
|
|
removeOnExit bool
|
|
)
|
|
cmd := &cobra.Command{
|
|
Use: "run [path] [-- command args...]",
|
|
Short: "Create and enter a sandbox VM",
|
|
Long: strings.TrimSpace(`
|
|
Create a sandbox VM and either drop into an interactive shell or run a command.
|
|
|
|
Three modes:
|
|
banger vm run bare sandbox, drops into ssh
|
|
banger vm run ./repo workspace sandbox, drops into ssh at /root/repo
|
|
banger vm run ./repo -- make test workspace, runs command, exits with its status
|
|
`),
|
|
Args: cobra.ArbitraryArgs,
|
|
Example: strings.TrimSpace(`
|
|
banger vm run
|
|
banger vm run ../repo --name agent-box --branch feature/demo
|
|
banger vm run ../repo -- make test
|
|
banger vm run -- uname -a
|
|
`),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
if cmd.Flags().Changed("branch") && strings.TrimSpace(branchName) == "" {
|
|
return errors.New("--branch requires a branch name")
|
|
}
|
|
if cmd.Flags().Changed("from") && strings.TrimSpace(branchName) == "" {
|
|
return errors.New("--from requires --branch")
|
|
}
|
|
|
|
pathArgs, commandArgs := splitVMRunArgs(cmd, args)
|
|
if len(pathArgs) > 1 {
|
|
return errors.New("usage: banger vm run [path] [-- command args...]")
|
|
}
|
|
sourcePath := ""
|
|
if len(pathArgs) == 1 {
|
|
sourcePath = pathArgs[0]
|
|
}
|
|
if sourcePath == "" && strings.TrimSpace(branchName) != "" {
|
|
return errors.New("--branch requires a path argument")
|
|
}
|
|
|
|
var specPtr *vmRunRepoSpec
|
|
if sourcePath != "" {
|
|
spec, err := inspectVMRunRepo(cmd.Context(), sourcePath, branchName, fromRef)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
specPtr = &spec
|
|
}
|
|
|
|
layout, err := paths.Resolve()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
cfg, err := config.Load(layout)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if specPtr != nil {
|
|
if err := validateVMRunPrereqs(cfg); err != nil {
|
|
return err
|
|
}
|
|
} else {
|
|
if err := validateSSHPrereqs(cfg); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
params, err := vmCreateParamsFromFlags(cmd, name, imageName, vcpu, memory, systemOverlaySize, workDiskSize, natEnabled, false)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
|
return err
|
|
}
|
|
layout, cfg, err = ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return runVMRun(cmd.Context(), layout.SocketPath, cfg, cmd.InOrStdin(), cmd.OutOrStdout(), cmd.ErrOrStderr(), params, specPtr, commandArgs, removeOnExit)
|
|
},
|
|
}
|
|
cmd.Flags().StringVar(&name, "name", "", "vm name")
|
|
cmd.Flags().StringVar(&imageName, "image", "", "image name or id (defaults to config's default_image_name; auto-pulled from imagecat if missing)")
|
|
cmd.Flags().IntVar(&vcpu, "vcpu", defaults.VCPUCount, "vcpu count")
|
|
cmd.Flags().IntVar(&memory, "memory", defaults.MemoryMiB, "memory in MiB")
|
|
cmd.Flags().StringVar(&systemOverlaySize, "system-overlay-size", model.FormatSizeBytes(defaults.SystemOverlaySizeByte), "system overlay size")
|
|
cmd.Flags().StringVar(&workDiskSize, "disk-size", model.FormatSizeBytes(defaults.WorkDiskSizeBytes), "work disk size")
|
|
cmd.Flags().BoolVar(&natEnabled, "nat", false, "enable NAT")
|
|
cmd.Flags().StringVar(&branchName, "branch", "", "create and switch to a new guest branch")
|
|
cmd.Flags().StringVar(&fromRef, "from", "HEAD", "base ref for --branch")
|
|
cmd.Flags().BoolVar(&removeOnExit, "rm", false, "delete the VM after the ssh session / command exits")
|
|
_ = cmd.RegisterFlagCompletionFunc("image", completeImageNames)
|
|
return cmd
|
|
}
|
|
|
|
func newVMKillCommand() *cobra.Command {
|
|
var signal string
|
|
cmd := &cobra.Command{
|
|
Use: "kill <id-or-name>...",
|
|
Short: "Send a signal to a VM process",
|
|
Args: minArgsUsage(1, "usage: banger vm kill [--signal SIGTERM|SIGKILL|...] <id-or-name>..."),
|
|
ValidArgsFunction: completeVMNames,
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
|
return err
|
|
}
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if len(args) > 1 {
|
|
return runVMBatchAction(cmd, layout.SocketPath, args, func(ctx context.Context, id string) (model.VMRecord, error) {
|
|
result, err := rpc.Call[api.VMShowResult](
|
|
ctx,
|
|
layout.SocketPath,
|
|
"vm.kill",
|
|
api.VMKillParams{IDOrName: id, Signal: signal},
|
|
)
|
|
if err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
return result.VM, nil
|
|
})
|
|
}
|
|
result, err := rpc.Call[api.VMShowResult](
|
|
cmd.Context(),
|
|
layout.SocketPath,
|
|
"vm.kill",
|
|
api.VMKillParams{IDOrName: args[0], Signal: signal},
|
|
)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return printVMSummary(cmd.OutOrStdout(), result.VM)
|
|
},
|
|
}
|
|
cmd.Flags().StringVar(&signal, "signal", "TERM", "signal name to send")
|
|
return cmd
|
|
}
|
|
|
|
func newVMPruneCommand() *cobra.Command {
|
|
var force bool
|
|
cmd := &cobra.Command{
|
|
Use: "prune",
|
|
Short: "Delete every VM that isn't running",
|
|
Long: "Scan for VMs in state other than 'running' (stopped, created, error) and delete them after confirmation. Use -f to skip the prompt.",
|
|
Args: noArgsUsage("usage: banger vm prune [-f|--force]"),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
|
return err
|
|
}
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return runVMPrune(cmd, layout.SocketPath, force)
|
|
},
|
|
}
|
|
cmd.Flags().BoolVarP(&force, "force", "f", false, "skip the confirmation prompt")
|
|
return cmd
|
|
}
|
|
|
|
func runVMPrune(cmd *cobra.Command, socketPath string, force bool) error {
|
|
ctx := cmd.Context()
|
|
stdout := cmd.OutOrStdout()
|
|
stderr := cmd.ErrOrStderr()
|
|
|
|
list, err := vmListFunc(ctx, socketPath)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
var victims []model.VMRecord
|
|
for _, vm := range list.VMs {
|
|
if vm.State != model.VMStateRunning {
|
|
victims = append(victims, vm)
|
|
}
|
|
}
|
|
if len(victims) == 0 {
|
|
_, err := fmt.Fprintln(stdout, "no non-running VMs to prune")
|
|
return err
|
|
}
|
|
|
|
fmt.Fprintf(stdout, "The following %d VM(s) will be deleted:\n", len(victims))
|
|
w := tabwriter.NewWriter(stdout, 0, 0, 2, ' ', 0)
|
|
fmt.Fprintln(w, " ID\tNAME\tSTATE")
|
|
for _, vm := range victims {
|
|
fmt.Fprintf(w, " %s\t%s\t%s\n", shortID(vm.ID), vm.Name, vm.State)
|
|
}
|
|
if err := w.Flush(); err != nil {
|
|
return err
|
|
}
|
|
|
|
if !force {
|
|
ok, err := promptYesNo(cmd.InOrStdin(), stdout, "Delete these VMs? [y/N] ")
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if !ok {
|
|
_, err := fmt.Fprintln(stdout, "aborted")
|
|
return err
|
|
}
|
|
}
|
|
|
|
var failed int
|
|
for _, vm := range victims {
|
|
ref := vm.Name
|
|
if ref == "" {
|
|
ref = shortID(vm.ID)
|
|
}
|
|
if err := vmDeleteFunc(ctx, socketPath, vm.ID); err != nil {
|
|
fmt.Fprintf(stderr, "delete %s: %v\n", ref, err)
|
|
failed++
|
|
continue
|
|
}
|
|
fmt.Fprintln(stdout, "deleted", ref)
|
|
}
|
|
if failed > 0 {
|
|
return fmt.Errorf("%d VM(s) failed to delete", failed)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// promptYesNo reads a line from in and returns true iff the trimmed
|
|
// lowercase answer is "y" or "yes". EOF is treated as "no". Any other
|
|
// read error is surfaced to the caller.
|
|
func promptYesNo(in io.Reader, out io.Writer, prompt string) (bool, error) {
|
|
if _, err := fmt.Fprint(out, prompt); err != nil {
|
|
return false, err
|
|
}
|
|
reader := bufio.NewReader(in)
|
|
line, err := reader.ReadString('\n')
|
|
if err != nil && err != io.EOF {
|
|
return false, err
|
|
}
|
|
answer := strings.ToLower(strings.TrimSpace(line))
|
|
return answer == "y" || answer == "yes", nil
|
|
}
|
|
|
|
func newVMCreateCommand() *cobra.Command {
|
|
defaults := effectiveVMDefaults()
|
|
var (
|
|
name string
|
|
imageName string
|
|
vcpu = defaults.VCPUCount
|
|
memory = defaults.MemoryMiB
|
|
systemOverlaySize = model.FormatSizeBytes(defaults.SystemOverlaySizeByte)
|
|
workDiskSize = model.FormatSizeBytes(defaults.WorkDiskSizeBytes)
|
|
natEnabled bool
|
|
noStart bool
|
|
)
|
|
cmd := &cobra.Command{
|
|
Use: "create",
|
|
Short: "Create a VM",
|
|
Args: noArgsUsage("usage: banger vm create"),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
params, err := vmCreateParamsFromFlags(cmd, name, imageName, vcpu, memory, systemOverlaySize, workDiskSize, natEnabled, noStart)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
|
return err
|
|
}
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
vm, err := runVMCreate(cmd.Context(), layout.SocketPath, cmd.ErrOrStderr(), params)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return printVMSummary(cmd.OutOrStdout(), vm)
|
|
},
|
|
}
|
|
cmd.Flags().StringVar(&name, "name", "", "vm name")
|
|
cmd.Flags().StringVar(&imageName, "image", "", "image name or id (defaults to config's default_image_name; auto-pulled from imagecat if missing)")
|
|
cmd.Flags().IntVar(&vcpu, "vcpu", defaults.VCPUCount, "vcpu count")
|
|
cmd.Flags().IntVar(&memory, "memory", defaults.MemoryMiB, "memory in MiB")
|
|
cmd.Flags().StringVar(&systemOverlaySize, "system-overlay-size", model.FormatSizeBytes(defaults.SystemOverlaySizeByte), "system overlay size")
|
|
cmd.Flags().StringVar(&workDiskSize, "disk-size", model.FormatSizeBytes(defaults.WorkDiskSizeBytes), "work disk size")
|
|
cmd.Flags().BoolVar(&natEnabled, "nat", false, "enable NAT")
|
|
cmd.Flags().BoolVar(&noStart, "no-start", false, "create without starting")
|
|
_ = cmd.RegisterFlagCompletionFunc("image", completeImageNames)
|
|
return cmd
|
|
}
|
|
|
|
type vmListOptions struct {
|
|
showAll bool
|
|
latest bool
|
|
quiet bool
|
|
}
|
|
|
|
func newPSCommand() *cobra.Command {
|
|
return newVMListLikeCommand("ps", nil, "usage: banger ps")
|
|
}
|
|
|
|
func newVMListCommand() *cobra.Command {
|
|
return newVMListLikeCommand("list", []string{"ls", "ps"}, "usage: banger vm list")
|
|
}
|
|
|
|
func newVMListLikeCommand(use string, aliases []string, usage string) *cobra.Command {
|
|
var opts vmListOptions
|
|
cmd := &cobra.Command{
|
|
Use: use,
|
|
Aliases: aliases,
|
|
Short: "List VMs",
|
|
Args: noArgsUsage(usage),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
return runVMList(cmd, opts)
|
|
},
|
|
}
|
|
cmd.Flags().BoolVarP(&opts.showAll, "all", "a", false, "show all VMs")
|
|
cmd.Flags().BoolVarP(&opts.latest, "latest", "l", false, "show only the latest VM")
|
|
cmd.Flags().BoolVarP(&opts.quiet, "quiet", "q", false, "only show VM IDs")
|
|
return cmd
|
|
}
|
|
|
|
func runVMList(cmd *cobra.Command, opts vmListOptions) error {
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
result, err := rpc.Call[api.VMListResult](cmd.Context(), layout.SocketPath, "vm.list", api.Empty{})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
vms := selectVMListVMs(result.VMs, opts.showAll, opts.latest)
|
|
if opts.quiet {
|
|
return printVMIDList(cmd.OutOrStdout(), vms)
|
|
}
|
|
images, err := rpc.Call[api.ImageListResult](cmd.Context(), layout.SocketPath, "image.list", api.Empty{})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return printVMListTable(cmd.OutOrStdout(), vms, imageNameIndex(images.Images))
|
|
}
|
|
|
|
func selectVMListVMs(vms []model.VMRecord, showAll, latest bool) []model.VMRecord {
|
|
filtered := make([]model.VMRecord, 0, len(vms))
|
|
for _, vm := range vms {
|
|
if !showAll && vm.State != model.VMStateRunning {
|
|
continue
|
|
}
|
|
filtered = append(filtered, vm)
|
|
}
|
|
if !latest || len(filtered) <= 1 {
|
|
return filtered
|
|
}
|
|
latestVM := filtered[0]
|
|
for _, vm := range filtered[1:] {
|
|
if vm.CreatedAt.After(latestVM.CreatedAt) {
|
|
latestVM = vm
|
|
continue
|
|
}
|
|
if vm.CreatedAt.Equal(latestVM.CreatedAt) && vm.UpdatedAt.After(latestVM.UpdatedAt) {
|
|
latestVM = vm
|
|
}
|
|
}
|
|
return []model.VMRecord{latestVM}
|
|
}
|
|
|
|
func newVMShowCommand() *cobra.Command {
|
|
return &cobra.Command{
|
|
Use: "show <id-or-name>",
|
|
Short: "Show VM details",
|
|
Args: exactArgsUsage(1, "usage: banger vm show <id-or-name>"),
|
|
ValidArgsFunction: completeVMNameOnlyAtPos0,
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
result, err := rpc.Call[api.VMShowResult](cmd.Context(), layout.SocketPath, "vm.show", api.VMRefParams{IDOrName: args[0]})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return printJSON(cmd.OutOrStdout(), result.VM)
|
|
},
|
|
}
|
|
}
|
|
|
|
func newVMActionCommand(use, short, method string, aliases ...string) *cobra.Command {
|
|
return &cobra.Command{
|
|
Use: use + " <id-or-name>...",
|
|
Aliases: aliases,
|
|
Short: short,
|
|
Args: minArgsUsage(1, fmt.Sprintf("usage: banger vm %s <id-or-name>...", use)),
|
|
ValidArgsFunction: completeVMNames,
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
|
return err
|
|
}
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if len(args) > 1 {
|
|
return runVMBatchAction(cmd, layout.SocketPath, args, func(ctx context.Context, id string) (model.VMRecord, error) {
|
|
result, err := rpc.Call[api.VMShowResult](ctx, layout.SocketPath, method, api.VMRefParams{IDOrName: id})
|
|
if err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
return result.VM, nil
|
|
})
|
|
}
|
|
result, err := rpc.Call[api.VMShowResult](cmd.Context(), layout.SocketPath, method, api.VMRefParams{IDOrName: args[0]})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return printVMSummary(cmd.OutOrStdout(), result.VM)
|
|
},
|
|
}
|
|
}
|
|
|
|
func newVMSetCommand() *cobra.Command {
|
|
var (
|
|
vcpu int
|
|
memory int
|
|
diskSize string
|
|
nat bool
|
|
noNat bool
|
|
)
|
|
cmd := &cobra.Command{
|
|
Use: "set <id-or-name>...",
|
|
Short: "Update stopped VM settings",
|
|
Args: minArgsUsage(1, "usage: banger vm set [--vcpu N] [--memory MiB] [--disk-size SIZE] [--nat|--no-nat] <id-or-name>..."),
|
|
ValidArgsFunction: completeVMNames,
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
params, err := vmSetParamsFromFlags(args[0], vcpu, memory, diskSize, nat, noNat)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
|
return err
|
|
}
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if len(args) > 1 {
|
|
return runVMBatchAction(cmd, layout.SocketPath, args, func(ctx context.Context, id string) (model.VMRecord, error) {
|
|
batchParams := params
|
|
batchParams.IDOrName = id
|
|
result, err := rpc.Call[api.VMShowResult](ctx, layout.SocketPath, "vm.set", batchParams)
|
|
if err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
return result.VM, nil
|
|
})
|
|
}
|
|
result, err := rpc.Call[api.VMShowResult](cmd.Context(), layout.SocketPath, "vm.set", params)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return printVMSummary(cmd.OutOrStdout(), result.VM)
|
|
},
|
|
}
|
|
cmd.Flags().IntVar(&vcpu, "vcpu", -1, "vcpu count")
|
|
cmd.Flags().IntVar(&memory, "memory", -1, "memory in MiB")
|
|
cmd.Flags().StringVar(&diskSize, "disk-size", "", "new work disk size")
|
|
cmd.Flags().BoolVar(&nat, "nat", false, "enable NAT")
|
|
cmd.Flags().BoolVar(&noNat, "no-nat", false, "disable NAT")
|
|
return cmd
|
|
}
|
|
|
|
func newVMSSHCommand() *cobra.Command {
|
|
return &cobra.Command{
|
|
Use: "ssh <id-or-name> [ssh args...]",
|
|
Short: "SSH into a running VM",
|
|
Args: minArgsUsage(1, "usage: banger vm ssh <id-or-name> [ssh args...]"),
|
|
ValidArgsFunction: completeVMNameOnlyAtPos0,
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, cfg, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err := validateSSHPrereqs(cfg); err != nil {
|
|
return err
|
|
}
|
|
result, err := vmSSHFunc(cmd.Context(), layout.SocketPath, args[0])
|
|
if err != nil {
|
|
return err
|
|
}
|
|
sshArgs, err := sshCommandArgs(cfg, result.GuestIP, args[1:])
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return runSSHSession(cmd.Context(), layout.SocketPath, result.Name, cmd.InOrStdin(), cmd.OutOrStdout(), cmd.ErrOrStderr(), sshArgs, false)
|
|
},
|
|
}
|
|
}
|
|
|
|
func newVMWorkspaceCommand() *cobra.Command {
|
|
cmd := &cobra.Command{
|
|
Use: "workspace",
|
|
Short: "Manage repository workspaces inside a running VM",
|
|
RunE: helpNoArgs,
|
|
}
|
|
cmd.AddCommand(
|
|
newVMWorkspacePrepareCommand(),
|
|
newVMWorkspaceExportCommand(),
|
|
)
|
|
return cmd
|
|
}
|
|
|
|
func newVMWorkspacePrepareCommand() *cobra.Command {
|
|
var guestPath string
|
|
var branchName string
|
|
var fromRef string
|
|
var mode string
|
|
var readOnly bool
|
|
cmd := &cobra.Command{
|
|
Use: "prepare <id-or-name> [path]",
|
|
Short: "Copy a local repo into a running VM",
|
|
Long: "Prepare a repository workspace from a local git checkout into a running VM. The default guest path is /root/repo and the default mode is shallow_overlay. Repositories with git submodules must use --mode full_copy.",
|
|
Args: minArgsUsage(1, "usage: banger vm workspace prepare <id-or-name> [path]"),
|
|
ValidArgsFunction: completeVMNameOnlyAtPos0,
|
|
Example: strings.TrimSpace(`
|
|
banger vm workspace prepare devbox
|
|
banger vm workspace prepare devbox ../repo --guest-path /root/repo --readonly
|
|
banger vm workspace prepare devbox ../repo --mode full_copy
|
|
`),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
sourcePath := ""
|
|
if len(args) > 1 {
|
|
sourcePath = args[1]
|
|
}
|
|
resolvedPath, err := resolveVMRunSourcePath(sourcePath)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
prepareFrom := ""
|
|
if strings.TrimSpace(branchName) != "" {
|
|
prepareFrom = fromRef
|
|
}
|
|
result, err := vmWorkspacePrepareFunc(cmd.Context(), layout.SocketPath, api.VMWorkspacePrepareParams{
|
|
IDOrName: args[0],
|
|
SourcePath: resolvedPath,
|
|
GuestPath: guestPath,
|
|
Branch: branchName,
|
|
From: prepareFrom,
|
|
Mode: mode,
|
|
ReadOnly: readOnly,
|
|
})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return printJSON(cmd.OutOrStdout(), result.Workspace)
|
|
},
|
|
}
|
|
cmd.Flags().StringVar(&guestPath, "guest-path", "/root/repo", "guest workspace path")
|
|
cmd.Flags().StringVar(&branchName, "branch", "", "create and switch to a new guest branch")
|
|
cmd.Flags().StringVar(&fromRef, "from", "HEAD", "base ref for --branch")
|
|
cmd.Flags().StringVar(&mode, "mode", string(model.WorkspacePrepareModeShallowOverlay), "workspace mode: shallow_overlay, full_copy, metadata_only")
|
|
cmd.Flags().BoolVar(&readOnly, "readonly", false, "make the prepared workspace read-only")
|
|
return cmd
|
|
}
|
|
|
|
func newVMWorkspaceExportCommand() *cobra.Command {
|
|
var guestPath string
|
|
var outputPath string
|
|
var baseCommit string
|
|
cmd := &cobra.Command{
|
|
Use: "export <id-or-name>",
|
|
Short: "Pull changes from a guest workspace back to the host as a patch",
|
|
Long: "Emit a binary-safe unified diff of every change inside the guest workspace (committed since base + uncommitted + untracked, minus .gitignore). Non-mutating — the guest's index and working tree are untouched. Pass --base-commit with the head_commit from workspace prepare to capture changes even when the worker ran git commit inside the VM. Without --base-commit the diff is against the current guest HEAD, which misses committed changes.",
|
|
Args: exactArgsUsage(1, "usage: banger vm workspace export <id-or-name>"),
|
|
ValidArgsFunction: completeVMNameOnlyAtPos0,
|
|
Example: strings.TrimSpace(`
|
|
banger vm workspace export devbox | git apply
|
|
banger vm workspace export devbox --base-commit abc1234 | git apply
|
|
banger vm workspace export devbox --output worker.diff
|
|
banger vm workspace export devbox --guest-path /root/project --output changes.diff
|
|
`),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
result, err := vmWorkspaceExportFunc(cmd.Context(), layout.SocketPath, api.WorkspaceExportParams{
|
|
IDOrName: args[0],
|
|
GuestPath: guestPath,
|
|
BaseCommit: baseCommit,
|
|
})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if !result.HasChanges {
|
|
_, _ = fmt.Fprintln(cmd.ErrOrStderr(), "no changes")
|
|
return nil
|
|
}
|
|
if outputPath != "" {
|
|
if err := os.WriteFile(outputPath, result.Patch, 0o644); err != nil {
|
|
return fmt.Errorf("write patch: %w", err)
|
|
}
|
|
_, err = fmt.Fprintf(cmd.ErrOrStderr(), "patch written to %s (%d bytes, %d files)\n",
|
|
outputPath, len(result.Patch), len(result.ChangedFiles))
|
|
return err
|
|
}
|
|
_, err = cmd.OutOrStdout().Write(result.Patch)
|
|
return err
|
|
},
|
|
}
|
|
cmd.Flags().StringVar(&guestPath, "guest-path", "/root/repo", "guest workspace path")
|
|
cmd.Flags().StringVar(&outputPath, "output", "", "write patch to this file instead of stdout")
|
|
cmd.Flags().StringVar(&baseCommit, "base-commit", "", "diff from this commit (use head_commit from workspace prepare to capture worker git commits)")
|
|
return cmd
|
|
}
|
|
|
|
func newVMSessionCommand() *cobra.Command {
|
|
cmd := &cobra.Command{
|
|
Use: "session",
|
|
Short: "Manage long-lived guest commands inside a VM",
|
|
Long: "Start, inspect, stop, and attach to daemon-managed guest commands. Pipe-mode sessions expose live stdio for interactive protocols. Attach is exclusive and currently uses a same-host local bridge.",
|
|
RunE: helpNoArgs,
|
|
}
|
|
cmd.AddCommand(
|
|
newVMSessionStartCommand(),
|
|
newVMSessionListCommand(),
|
|
newVMSessionShowCommand(),
|
|
newVMSessionLogsCommand(),
|
|
newVMSessionStopCommand(),
|
|
newVMSessionKillCommand(),
|
|
newVMSessionAttachCommand(),
|
|
newVMSessionSendCommand(),
|
|
)
|
|
return cmd
|
|
}
|
|
|
|
func newVMSessionStartCommand() *cobra.Command {
|
|
var name string
|
|
var cwd string
|
|
var stdinMode string
|
|
var envPairs []string
|
|
var tagPairs []string
|
|
var requiredCommands []string
|
|
cmd := &cobra.Command{
|
|
Use: "start <id-or-name> <command> [args...]",
|
|
Short: "Start a managed guest command",
|
|
Long: "Start a daemon-managed guest command. The daemon verifies that the guest working directory exists and that the requested command is present in guest PATH before launch. Use --stdin-mode pipe when you need live attach.",
|
|
Args: minArgsUsage(2, "usage: banger vm session start <id-or-name> [flags] -- <command> [args...]"),
|
|
ValidArgsFunction: completeVMNameOnlyAtPos0,
|
|
Example: strings.TrimSpace(`
|
|
banger vm session start devbox --name planner --cwd /root/repo --stdin-mode pipe --require-command git -- pi --mode rpc --no-session
|
|
banger vm session start devbox --name shell --stdin-mode pipe -- bash -lc 'exec bash'
|
|
`),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
env, err := parseKeyValuePairs(envPairs)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
tags, err := parseKeyValuePairs(tagPairs)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
result, err := guestSessionStartFunc(cmd.Context(), layout.SocketPath, api.GuestSessionStartParams{
|
|
VMIDOrName: args[0],
|
|
Name: name,
|
|
Command: args[1],
|
|
Args: append([]string(nil), args[2:]...),
|
|
CWD: cwd,
|
|
Env: env,
|
|
StdinMode: stdinMode,
|
|
Tags: tags,
|
|
RequiredCommands: append([]string(nil), requiredCommands...),
|
|
})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err := printGuestSessionSummary(cmd.OutOrStdout(), result.Session); err != nil {
|
|
return err
|
|
}
|
|
if result.Session.Status == model.GuestSessionStatusFailed && strings.TrimSpace(result.Session.LaunchMessage) != "" {
|
|
_, _ = fmt.Fprintf(cmd.ErrOrStderr(), "warning: session failed at %s: %s\n", result.Session.LaunchStage, result.Session.LaunchMessage)
|
|
}
|
|
return nil
|
|
},
|
|
}
|
|
cmd.Flags().StringVar(&name, "name", "", "session name")
|
|
cmd.Flags().StringVar(&cwd, "cwd", "", "guest working directory; must already exist")
|
|
cmd.Flags().StringVar(&stdinMode, "stdin-mode", string(model.GuestSessionStdinClosed), "stdin mode: closed or pipe (pipe enables attach)")
|
|
cmd.Flags().StringArrayVar(&envPairs, "env", nil, "environment entry in KEY=VALUE form")
|
|
cmd.Flags().StringArrayVar(&tagPairs, "tag", nil, "session tag in KEY=VALUE form")
|
|
cmd.Flags().StringArrayVar(&requiredCommands, "require-command", nil, "extra guest command that must exist in PATH before launch; repeatable")
|
|
return cmd
|
|
}
|
|
|
|
func newVMSessionListCommand() *cobra.Command {
|
|
return &cobra.Command{
|
|
Use: "list <id-or-name>",
|
|
Aliases: []string{"ls"},
|
|
Short: "List managed guest commands for a VM",
|
|
Args: exactArgsUsage(1, "usage: banger vm session list <id-or-name>"),
|
|
ValidArgsFunction: completeVMNameOnlyAtPos0,
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
result, err := guestSessionListFunc(cmd.Context(), layout.SocketPath, args[0])
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return printGuestSessionTable(cmd.OutOrStdout(), result.Sessions)
|
|
},
|
|
}
|
|
}
|
|
|
|
func newVMSessionShowCommand() *cobra.Command {
|
|
return &cobra.Command{
|
|
Use: "show <id-or-name> <session>",
|
|
Short: "Show managed guest command details",
|
|
Args: exactArgsUsage(2, "usage: banger vm session show <id-or-name> <session>"),
|
|
ValidArgsFunction: completeSessionNames,
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
result, err := guestSessionGetFunc(cmd.Context(), layout.SocketPath, api.GuestSessionRefParams{VMIDOrName: args[0], SessionIDOrName: args[1]})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return printJSON(cmd.OutOrStdout(), result.Session)
|
|
},
|
|
}
|
|
}
|
|
|
|
func newVMSessionLogsCommand() *cobra.Command {
|
|
var stream string
|
|
var tailLines int
|
|
cmd := &cobra.Command{
|
|
Use: "logs <id-or-name> <session>",
|
|
Short: "Show stdout or stderr for a guest session",
|
|
Args: exactArgsUsage(2, "usage: banger vm session logs [--stream stdout|stderr] [-n LINES] <id-or-name> <session>"),
|
|
ValidArgsFunction: completeSessionNames,
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
result, err := guestSessionLogsFunc(cmd.Context(), layout.SocketPath, api.GuestSessionLogsParams{VMIDOrName: args[0], SessionIDOrName: args[1], Stream: stream, TailLines: tailLines})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
_, err = fmt.Fprint(cmd.OutOrStdout(), result.Content)
|
|
return err
|
|
},
|
|
}
|
|
cmd.Flags().StringVar(&stream, "stream", "stdout", "log stream to read")
|
|
cmd.Flags().IntVarP(&tailLines, "lines", "n", 200, "number of lines to tail")
|
|
return cmd
|
|
}
|
|
|
|
func newVMSessionStopCommand() *cobra.Command {
|
|
return &cobra.Command{
|
|
Use: "stop <id-or-name> <session>",
|
|
Short: "Send SIGTERM to a guest session",
|
|
Args: exactArgsUsage(2, "usage: banger vm session stop <id-or-name> <session>"),
|
|
ValidArgsFunction: completeSessionNames,
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
result, err := guestSessionStopFunc(cmd.Context(), layout.SocketPath, api.GuestSessionRefParams{VMIDOrName: args[0], SessionIDOrName: args[1]})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return printGuestSessionSummary(cmd.OutOrStdout(), result.Session)
|
|
},
|
|
}
|
|
}
|
|
|
|
func newVMSessionKillCommand() *cobra.Command {
|
|
return &cobra.Command{
|
|
Use: "kill <id-or-name> <session>",
|
|
Short: "Send SIGKILL to a guest session",
|
|
Args: exactArgsUsage(2, "usage: banger vm session kill <id-or-name> <session>"),
|
|
ValidArgsFunction: completeSessionNames,
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
result, err := guestSessionKillFunc(cmd.Context(), layout.SocketPath, api.GuestSessionRefParams{VMIDOrName: args[0], SessionIDOrName: args[1]})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return printGuestSessionSummary(cmd.OutOrStdout(), result.Session)
|
|
},
|
|
}
|
|
}
|
|
|
|
func newVMSessionAttachCommand() *cobra.Command {
|
|
return &cobra.Command{
|
|
Use: "attach <id-or-name> <session>",
|
|
Short: "Attach local stdio to an attachable guest session",
|
|
Long: "Attach local stdio to a pipe-mode session through a daemon-created local Unix socket bridge. Only one active attach is allowed at a time, and the client must run on the same host as the daemon.",
|
|
Args: exactArgsUsage(2, "usage: banger vm session attach <id-or-name> <session>"),
|
|
ValidArgsFunction: completeSessionNames,
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
result, err := guestSessionAttachBeginFunc(cmd.Context(), layout.SocketPath, api.GuestSessionAttachBeginParams{VMIDOrName: args[0], SessionIDOrName: args[1]})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
socketPath := strings.TrimSpace(result.SocketPath)
|
|
if socketPath == "" && result.TransportKind == "unix_socket" {
|
|
socketPath = strings.TrimSpace(result.TransportTarget)
|
|
}
|
|
return runGuestSessionAttach(cmd.Context(), cmd.InOrStdin(), cmd.OutOrStdout(), cmd.ErrOrStderr(), socketPath)
|
|
},
|
|
}
|
|
}
|
|
|
|
func newVMSessionSendCommand() *cobra.Command {
|
|
var message string
|
|
cmd := &cobra.Command{
|
|
Use: "send <id-or-name> <session>",
|
|
Short: "Write bytes to a running guest session's stdin pipe",
|
|
Long: "Write a payload to the stdin pipe of a running pipe-mode guest session without holding the exclusive attach. Use --message for an inline JSONL string, or pipe bytes via stdin when --message is omitted. A trailing newline is appended to --message values that lack one.",
|
|
Args: exactArgsUsage(2, "usage: banger vm session send <id-or-name> <session> [--message '<json>']"),
|
|
ValidArgsFunction: completeSessionNames,
|
|
Example: strings.TrimSpace(`
|
|
banger vm session send devbox planner --message '{"type":"abort"}'
|
|
banger vm session send devbox planner --message '{"type":"steer","message":"Focus on src/"}'
|
|
echo '{"type":"prompt","prompt":"Summarize."}' | banger vm session send devbox planner
|
|
`),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
var payload []byte
|
|
if message != "" {
|
|
payload = []byte(message)
|
|
if len(payload) > 0 && payload[len(payload)-1] != '\n' {
|
|
payload = append(payload, '\n')
|
|
}
|
|
} else {
|
|
payload, err = io.ReadAll(cmd.InOrStdin())
|
|
if err != nil {
|
|
return fmt.Errorf("read stdin: %w", err)
|
|
}
|
|
}
|
|
result, err := guestSessionSendFunc(cmd.Context(), layout.SocketPath, api.GuestSessionSendParams{
|
|
VMIDOrName: args[0],
|
|
SessionIDOrName: args[1],
|
|
Payload: payload,
|
|
})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
_, err = fmt.Fprintf(cmd.OutOrStdout(), "sent %d bytes to session %s\n", result.BytesWritten, result.Session.Name)
|
|
return err
|
|
},
|
|
}
|
|
cmd.Flags().StringVar(&message, "message", "", "JSONL message to send; a trailing newline is appended if absent")
|
|
return cmd
|
|
}
|
|
|
|
func parseKeyValuePairs(values []string) (map[string]string, error) {
|
|
if len(values) == 0 {
|
|
return nil, nil
|
|
}
|
|
result := make(map[string]string, len(values))
|
|
for _, value := range values {
|
|
key, raw, ok := strings.Cut(value, "=")
|
|
if !ok || strings.TrimSpace(key) == "" {
|
|
return nil, fmt.Errorf("invalid key=value entry %q", value)
|
|
}
|
|
result[strings.TrimSpace(key)] = raw
|
|
}
|
|
return result, nil
|
|
}
|
|
|
|
func printGuestSessionSummary(out anyWriter, session model.GuestSession) error {
|
|
_, err := fmt.Fprintf(out, "%s\t%s\t%s\t%s\t%s\n", session.ID, session.Name, session.Status, session.Command, session.CWD)
|
|
return err
|
|
}
|
|
|
|
func printGuestSessionTable(out io.Writer, sessions []model.GuestSession) error {
|
|
tw := tabwriter.NewWriter(out, 0, 0, 2, ' ', 0)
|
|
if _, err := fmt.Fprintln(tw, "ID\tNAME\tSTATUS\tATTACH\tCOMMAND\tCWD"); err != nil {
|
|
return err
|
|
}
|
|
for _, session := range sessions {
|
|
attach := "no"
|
|
if session.Attachable {
|
|
attach = "yes"
|
|
}
|
|
if _, err := fmt.Fprintf(tw, "%s\t%s\t%s\t%s\t%s\t%s\n", shortID(session.ID), session.Name, session.Status, attach, session.Command, session.CWD); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
return tw.Flush()
|
|
}
|
|
|
|
func runGuestSessionAttach(ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, socketPath string) error {
|
|
conn, err := (&net.Dialer{}).DialContext(ctx, "unix", socketPath)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer conn.Close()
|
|
writeErrCh := make(chan error, 1)
|
|
go func() {
|
|
writeErrCh <- streamGuestSessionAttachInput(conn, stdin)
|
|
}()
|
|
for {
|
|
channel, payload, err := sessionstream.ReadFrame(conn)
|
|
if err != nil {
|
|
if ctx.Err() != nil {
|
|
return ctx.Err()
|
|
}
|
|
if errors.Is(err, io.EOF) {
|
|
return nil
|
|
}
|
|
return err
|
|
}
|
|
switch channel {
|
|
case sessionstream.ChannelStdout:
|
|
if _, err := stdout.Write(payload); err != nil {
|
|
return err
|
|
}
|
|
case sessionstream.ChannelStderr:
|
|
if _, err := stderr.Write(payload); err != nil {
|
|
return err
|
|
}
|
|
case sessionstream.ChannelControl:
|
|
message, err := sessionstream.ReadControl(payload)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
switch message.Type {
|
|
case "exit":
|
|
if message.ExitCode != nil && *message.ExitCode != 0 {
|
|
return fmt.Errorf("guest session exited with code %d", *message.ExitCode)
|
|
}
|
|
return nil
|
|
case "error":
|
|
if strings.TrimSpace(message.Error) == "" {
|
|
return errors.New("guest session attach failed")
|
|
}
|
|
return errors.New(message.Error)
|
|
}
|
|
}
|
|
select {
|
|
case err := <-writeErrCh:
|
|
if err != nil {
|
|
return err
|
|
}
|
|
default:
|
|
}
|
|
}
|
|
}
|
|
|
|
func streamGuestSessionAttachInput(conn net.Conn, stdin io.Reader) error {
|
|
if stdin == nil {
|
|
return sessionstream.WriteControl(conn, sessionstream.ControlMessage{Type: "eof"})
|
|
}
|
|
buffer := make([]byte, 32*1024)
|
|
for {
|
|
n, err := stdin.Read(buffer)
|
|
if n > 0 {
|
|
if writeErr := sessionstream.WriteFrame(conn, sessionstream.ChannelStdin, buffer[:n]); writeErr != nil {
|
|
return writeErr
|
|
}
|
|
}
|
|
if err != nil {
|
|
if errors.Is(err, io.EOF) {
|
|
return sessionstream.WriteControl(conn, sessionstream.ControlMessage{Type: "eof"})
|
|
}
|
|
return err
|
|
}
|
|
}
|
|
}
|
|
|
|
func newVMLogsCommand() *cobra.Command {
|
|
var follow bool
|
|
cmd := &cobra.Command{
|
|
Use: "logs <id-or-name>",
|
|
Short: "Show VM logs",
|
|
Args: exactArgsUsage(1, "usage: banger vm logs [-f] <id-or-name>"),
|
|
ValidArgsFunction: completeVMNameOnlyAtPos0,
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
result, err := rpc.Call[api.VMLogsResult](cmd.Context(), layout.SocketPath, "vm.logs", api.VMRefParams{IDOrName: args[0]})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if result.LogPath == "" {
|
|
return errors.New("vm has no log path")
|
|
}
|
|
return system.CopyStream(cmd.OutOrStdout(), system.TailCommand(result.LogPath, follow))
|
|
},
|
|
}
|
|
cmd.Flags().BoolVarP(&follow, "follow", "f", false, "follow logs")
|
|
return cmd
|
|
}
|
|
|
|
func newVMStatsCommand() *cobra.Command {
|
|
return &cobra.Command{
|
|
Use: "stats <id-or-name>",
|
|
Short: "Show VM stats",
|
|
Args: exactArgsUsage(1, "usage: banger vm stats <id-or-name>"),
|
|
ValidArgsFunction: completeVMNameOnlyAtPos0,
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
result, err := rpc.Call[api.VMStatsResult](cmd.Context(), layout.SocketPath, "vm.stats", api.VMRefParams{IDOrName: args[0]})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return printJSON(cmd.OutOrStdout(), result)
|
|
},
|
|
}
|
|
}
|
|
|
|
func newVMPortsCommand() *cobra.Command {
|
|
return &cobra.Command{
|
|
Use: "ports <id-or-name>",
|
|
Short: "Show host-reachable listening guest ports",
|
|
Args: exactArgsUsage(1, "usage: banger vm ports <id-or-name>"),
|
|
ValidArgsFunction: completeVMNameOnlyAtPos0,
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
result, err := vmPortsFunc(cmd.Context(), layout.SocketPath, args[0])
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return printVMPortsTable(cmd.OutOrStdout(), result)
|
|
},
|
|
}
|
|
}
|
|
|
|
func newImageCommand() *cobra.Command {
|
|
cmd := &cobra.Command{
|
|
Use: "image",
|
|
Short: "Manage images",
|
|
RunE: helpNoArgs,
|
|
}
|
|
cmd.AddCommand(
|
|
newImageRegisterCommand(),
|
|
newImagePullCommand(),
|
|
newImagePromoteCommand(),
|
|
newImageListCommand(),
|
|
newImageShowCommand(),
|
|
newImageDeleteCommand(),
|
|
)
|
|
return cmd
|
|
}
|
|
|
|
func newImageRegisterCommand() *cobra.Command {
|
|
var params api.ImageRegisterParams
|
|
cmd := &cobra.Command{
|
|
Use: "register",
|
|
Short: "Register or update an unmanaged image",
|
|
Args: noArgsUsage("usage: banger image register --name <name> --rootfs <path> [--work-seed <path>] (--kernel <path> [--initrd <path>] [--modules <dir>] | --kernel-ref <name>)"),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
if strings.TrimSpace(params.KernelRef) != "" && (params.KernelPath != "" || params.InitrdPath != "" || params.ModulesDir != "") {
|
|
return errors.New("--kernel-ref is mutually exclusive with --kernel/--initrd/--modules")
|
|
}
|
|
if err := absolutizeImageRegisterPaths(¶ms); err != nil {
|
|
return err
|
|
}
|
|
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
|
return err
|
|
}
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
result, err := rpc.Call[api.ImageShowResult](cmd.Context(), layout.SocketPath, "image.register", params)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return printImageSummary(cmd.OutOrStdout(), result.Image)
|
|
},
|
|
}
|
|
cmd.Flags().StringVar(¶ms.Name, "name", "", "image name")
|
|
cmd.Flags().StringVar(¶ms.RootfsPath, "rootfs", "", "rootfs path")
|
|
cmd.Flags().StringVar(¶ms.WorkSeedPath, "work-seed", "", "work-seed path")
|
|
cmd.Flags().StringVar(¶ms.KernelPath, "kernel", "", "kernel path")
|
|
cmd.Flags().StringVar(¶ms.InitrdPath, "initrd", "", "initrd path")
|
|
cmd.Flags().StringVar(¶ms.ModulesDir, "modules", "", "modules dir")
|
|
cmd.Flags().StringVar(¶ms.KernelRef, "kernel-ref", "", "name of a cataloged kernel (see 'banger kernel list')")
|
|
cmd.Flags().BoolVar(¶ms.Docker, "docker", false, "mark image as docker-prepared")
|
|
_ = cmd.RegisterFlagCompletionFunc("kernel-ref", completeKernelNames)
|
|
return cmd
|
|
}
|
|
|
|
func newImagePullCommand() *cobra.Command {
|
|
var (
|
|
params api.ImagePullParams
|
|
sizeRaw string
|
|
)
|
|
cmd := &cobra.Command{
|
|
Use: "pull <name-or-oci-ref>",
|
|
Short: "Pull an image bundle (catalog name) or OCI image and register it",
|
|
Long: strings.TrimSpace(`
|
|
Pull an image into banger. Two paths:
|
|
|
|
• Catalog name (e.g. 'debian-bookworm')
|
|
Fetches a pre-built bundle from the embedded imagecat catalog.
|
|
Kernel-ref comes from the catalog entry; --kernel-ref still
|
|
overrides.
|
|
|
|
• OCI reference (e.g. 'docker.io/library/debian:bookworm')
|
|
Pulls the image, flattens its layers, fixes ownership, injects
|
|
banger's guest agents. --kernel-ref or direct --kernel/--initrd/
|
|
--modules are required.
|
|
|
|
Use 'banger image catalog' to see available catalog names (once that
|
|
subcommand lands).
|
|
`),
|
|
Example: strings.TrimSpace(`
|
|
banger image pull debian-bookworm
|
|
banger image pull debian-bookworm --name sandbox
|
|
banger image pull docker.io/library/debian:bookworm --kernel-ref generic-6.12
|
|
`),
|
|
Args: exactArgsUsage(1, "usage: banger image pull <name-or-oci-ref> [--name <name>] [--kernel-ref <name>] [--kernel <path>] [--initrd <path>] [--modules <dir>] [--size <human>]"),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
params.Ref = args[0]
|
|
if strings.TrimSpace(params.KernelRef) != "" && (params.KernelPath != "" || params.InitrdPath != "" || params.ModulesDir != "") {
|
|
return errors.New("--kernel-ref is mutually exclusive with --kernel/--initrd/--modules")
|
|
}
|
|
if strings.TrimSpace(sizeRaw) != "" {
|
|
size, err := model.ParseSize(sizeRaw)
|
|
if err != nil {
|
|
return fmt.Errorf("--size: %w", err)
|
|
}
|
|
params.SizeBytes = size
|
|
}
|
|
if err := absolutizePaths(¶ms.KernelPath, ¶ms.InitrdPath, ¶ms.ModulesDir); err != nil {
|
|
return err
|
|
}
|
|
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
|
return err
|
|
}
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
var result api.ImageShowResult
|
|
err = withHeartbeat(cmd.ErrOrStderr(), "image pull", func() error {
|
|
var callErr error
|
|
result, callErr = rpc.Call[api.ImageShowResult](cmd.Context(), layout.SocketPath, "image.pull", params)
|
|
return callErr
|
|
})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return printImageSummary(cmd.OutOrStdout(), result.Image)
|
|
},
|
|
}
|
|
cmd.Flags().StringVar(¶ms.Name, "name", "", "image name (defaults to the ref's repo+tag, sanitised)")
|
|
cmd.Flags().StringVar(¶ms.KernelPath, "kernel", "", "kernel path")
|
|
cmd.Flags().StringVar(¶ms.InitrdPath, "initrd", "", "initrd path")
|
|
cmd.Flags().StringVar(¶ms.ModulesDir, "modules", "", "modules dir")
|
|
cmd.Flags().StringVar(¶ms.KernelRef, "kernel-ref", "", "name of a cataloged kernel (see 'banger kernel list')")
|
|
cmd.Flags().StringVar(&sizeRaw, "size", "", "ext4 image size (e.g. 4GiB); defaults to content + 25%, min 1GiB")
|
|
_ = cmd.RegisterFlagCompletionFunc("kernel-ref", completeKernelNames)
|
|
return cmd
|
|
}
|
|
|
|
func newImagePromoteCommand() *cobra.Command {
|
|
return &cobra.Command{
|
|
Use: "promote <id-or-name>",
|
|
Short: "Promote an unmanaged image to a managed artifact",
|
|
Args: exactArgsUsage(1, "usage: banger image promote <id-or-name>"),
|
|
ValidArgsFunction: completeImageNameOnlyAtPos0,
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
|
return err
|
|
}
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
result, err := rpc.Call[api.ImageShowResult](cmd.Context(), layout.SocketPath, "image.promote", api.ImageRefParams{IDOrName: args[0]})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return printImageSummary(cmd.OutOrStdout(), result.Image)
|
|
},
|
|
}
|
|
}
|
|
|
|
func newImageListCommand() *cobra.Command {
|
|
return &cobra.Command{
|
|
Use: "list",
|
|
Aliases: []string{"ls"},
|
|
Short: "List images",
|
|
Args: noArgsUsage("usage: banger image list"),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
result, err := rpc.Call[api.ImageListResult](cmd.Context(), layout.SocketPath, "image.list", api.Empty{})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return printImageListTable(cmd.OutOrStdout(), result.Images)
|
|
},
|
|
}
|
|
}
|
|
|
|
func newImageShowCommand() *cobra.Command {
|
|
return &cobra.Command{
|
|
Use: "show <id-or-name>",
|
|
Short: "Show image details",
|
|
Args: exactArgsUsage(1, "usage: banger image show <id-or-name>"),
|
|
ValidArgsFunction: completeImageNameOnlyAtPos0,
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
result, err := rpc.Call[api.ImageShowResult](cmd.Context(), layout.SocketPath, "image.show", api.ImageRefParams{IDOrName: args[0]})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return printJSON(cmd.OutOrStdout(), result.Image)
|
|
},
|
|
}
|
|
}
|
|
|
|
func newImageDeleteCommand() *cobra.Command {
|
|
return &cobra.Command{
|
|
Use: "delete <id-or-name>",
|
|
Aliases: []string{"rm"},
|
|
Short: "Delete an image",
|
|
Args: exactArgsUsage(1, "usage: banger image delete <id-or-name>"),
|
|
ValidArgsFunction: completeImageNameOnlyAtPos0,
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
|
return err
|
|
}
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
result, err := rpc.Call[api.ImageShowResult](cmd.Context(), layout.SocketPath, "image.delete", api.ImageRefParams{IDOrName: args[0]})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return printImageSummary(cmd.OutOrStdout(), result.Image)
|
|
},
|
|
}
|
|
}
|
|
|
|
func newKernelCommand() *cobra.Command {
|
|
cmd := &cobra.Command{
|
|
Use: "kernel",
|
|
Short: "Manage the local kernel catalog",
|
|
RunE: helpNoArgs,
|
|
}
|
|
cmd.AddCommand(
|
|
newKernelListCommand(),
|
|
newKernelShowCommand(),
|
|
newKernelRmCommand(),
|
|
newKernelImportCommand(),
|
|
newKernelPullCommand(),
|
|
)
|
|
return cmd
|
|
}
|
|
|
|
func newKernelPullCommand() *cobra.Command {
|
|
var force bool
|
|
cmd := &cobra.Command{
|
|
Use: "pull <name>",
|
|
Short: "Download a cataloged kernel bundle",
|
|
Args: exactArgsUsage(1, "usage: banger kernel pull <name> [--force]"),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
var result api.KernelShowResult
|
|
err = withHeartbeat(cmd.ErrOrStderr(), "kernel pull", func() error {
|
|
var callErr error
|
|
result, callErr = rpc.Call[api.KernelShowResult](cmd.Context(), layout.SocketPath, "kernel.pull", api.KernelPullParams{Name: args[0], Force: force})
|
|
return callErr
|
|
})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return printJSON(cmd.OutOrStdout(), result.Entry)
|
|
},
|
|
}
|
|
cmd.Flags().BoolVar(&force, "force", false, "re-pull even if already present")
|
|
return cmd
|
|
}
|
|
|
|
func newKernelImportCommand() *cobra.Command {
|
|
var params api.KernelImportParams
|
|
cmd := &cobra.Command{
|
|
Use: "import <name>",
|
|
Short: "Import a kernel bundle produced by scripts/make-*-kernel.sh",
|
|
Long: "Copy the kernel, optional initrd, and optional modules directory from <from> into the local kernel catalog keyed by <name>. <from> is usually build/manual/void-kernel or build/manual/alpine-kernel.",
|
|
Args: exactArgsUsage(1, "usage: banger kernel import <name> --from <dir>"),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
params.Name = args[0]
|
|
if strings.TrimSpace(params.FromDir) == "" {
|
|
return errors.New("--from <dir> is required")
|
|
}
|
|
abs, err := filepath.Abs(params.FromDir)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
params.FromDir = abs
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
result, err := rpc.Call[api.KernelShowResult](cmd.Context(), layout.SocketPath, "kernel.import", params)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return printJSON(cmd.OutOrStdout(), result.Entry)
|
|
},
|
|
}
|
|
cmd.Flags().StringVar(¶ms.FromDir, "from", "", "directory produced by make-*-kernel.sh (e.g. build/manual/void-kernel)")
|
|
cmd.Flags().StringVar(¶ms.Distro, "distro", "", "distribution label stored in the manifest (e.g. void, alpine)")
|
|
cmd.Flags().StringVar(¶ms.Arch, "arch", "", "architecture label stored in the manifest (e.g. x86_64)")
|
|
return cmd
|
|
}
|
|
|
|
func newKernelListCommand() *cobra.Command {
|
|
var available bool
|
|
cmd := &cobra.Command{
|
|
Use: "list",
|
|
Aliases: []string{"ls"},
|
|
Short: "List kernels (local by default, or --available for the catalog)",
|
|
Args: noArgsUsage("usage: banger kernel list [--available]"),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if available {
|
|
result, err := rpc.Call[api.KernelCatalogResult](cmd.Context(), layout.SocketPath, "kernel.catalog", api.Empty{})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return printKernelCatalogTable(cmd.OutOrStdout(), result.Entries)
|
|
}
|
|
result, err := rpc.Call[api.KernelListResult](cmd.Context(), layout.SocketPath, "kernel.list", api.Empty{})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return printKernelListTable(cmd.OutOrStdout(), result.Entries)
|
|
},
|
|
}
|
|
cmd.Flags().BoolVar(&available, "available", false, "show the built-in catalog (with pulled/available status) instead of local entries")
|
|
return cmd
|
|
}
|
|
|
|
func newKernelShowCommand() *cobra.Command {
|
|
return &cobra.Command{
|
|
Use: "show <name>",
|
|
Short: "Show kernel catalog entry details",
|
|
Args: exactArgsUsage(1, "usage: banger kernel show <name>"),
|
|
ValidArgsFunction: completeKernelNameOnlyAtPos0,
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
result, err := rpc.Call[api.KernelShowResult](cmd.Context(), layout.SocketPath, "kernel.show", api.KernelRefParams{Name: args[0]})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return printJSON(cmd.OutOrStdout(), result.Entry)
|
|
},
|
|
}
|
|
}
|
|
|
|
func newKernelRmCommand() *cobra.Command {
|
|
return &cobra.Command{
|
|
Use: "rm <name>",
|
|
Aliases: []string{"remove", "delete"},
|
|
Short: "Remove a kernel catalog entry",
|
|
Args: exactArgsUsage(1, "usage: banger kernel rm <name>"),
|
|
ValidArgsFunction: completeKernelNameOnlyAtPos0,
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
layout, _, err := ensureDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if _, err := rpc.Call[api.Empty](cmd.Context(), layout.SocketPath, "kernel.delete", api.KernelRefParams{Name: args[0]}); err != nil {
|
|
return err
|
|
}
|
|
_, err = fmt.Fprintf(cmd.OutOrStdout(), "removed %s\n", args[0])
|
|
return err
|
|
},
|
|
}
|
|
}
|
|
|
|
func printKernelListTable(out anyWriter, entries []api.KernelEntry) error {
|
|
w := tabwriter.NewWriter(out, 0, 8, 2, ' ', 0)
|
|
if _, err := fmt.Fprintln(w, "NAME\tDISTRO\tARCH\tKERNEL\tIMPORTED"); err != nil {
|
|
return err
|
|
}
|
|
for _, entry := range entries {
|
|
if _, err := fmt.Fprintf(
|
|
w,
|
|
"%s\t%s\t%s\t%s\t%s\n",
|
|
entry.Name,
|
|
dashIfEmpty(entry.Distro),
|
|
dashIfEmpty(entry.Arch),
|
|
dashIfEmpty(entry.KernelVersion),
|
|
dashIfEmpty(entry.ImportedAt),
|
|
); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
return w.Flush()
|
|
}
|
|
|
|
func printKernelCatalogTable(out anyWriter, entries []api.KernelCatalogEntry) error {
|
|
w := tabwriter.NewWriter(out, 0, 8, 2, ' ', 0)
|
|
if _, err := fmt.Fprintln(w, "NAME\tDISTRO\tARCH\tKERNEL\tSIZE\tSTATE"); err != nil {
|
|
return err
|
|
}
|
|
for _, entry := range entries {
|
|
state := "available"
|
|
if entry.Pulled {
|
|
state = "pulled"
|
|
}
|
|
if _, err := fmt.Fprintf(
|
|
w,
|
|
"%s\t%s\t%s\t%s\t%s\t%s\n",
|
|
entry.Name,
|
|
dashIfEmpty(entry.Distro),
|
|
dashIfEmpty(entry.Arch),
|
|
dashIfEmpty(entry.KernelVersion),
|
|
humanSize(entry.SizeBytes),
|
|
state,
|
|
); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
return w.Flush()
|
|
}
|
|
|
|
func humanSize(bytes int64) string {
|
|
if bytes <= 0 {
|
|
return "-"
|
|
}
|
|
const (
|
|
kib = 1024
|
|
mib = 1024 * kib
|
|
gib = 1024 * mib
|
|
)
|
|
switch {
|
|
case bytes >= gib:
|
|
return fmt.Sprintf("%.1fGiB", float64(bytes)/float64(gib))
|
|
case bytes >= mib:
|
|
return fmt.Sprintf("%.1fMiB", float64(bytes)/float64(mib))
|
|
case bytes >= kib:
|
|
return fmt.Sprintf("%.1fKiB", float64(bytes)/float64(kib))
|
|
default:
|
|
return fmt.Sprintf("%dB", bytes)
|
|
}
|
|
}
|
|
|
|
func dashIfEmpty(s string) string {
|
|
if strings.TrimSpace(s) == "" {
|
|
return "-"
|
|
}
|
|
return s
|
|
}
|
|
|
|
func helpNoArgs(cmd *cobra.Command, args []string) error {
|
|
if len(args) != 0 {
|
|
return fmt.Errorf("unknown arguments: %s", strings.Join(args, " "))
|
|
}
|
|
return cmd.Help()
|
|
}
|
|
|
|
func noArgsUsage(usage string) cobra.PositionalArgs {
|
|
return func(cmd *cobra.Command, args []string) error {
|
|
if len(args) != 0 {
|
|
return errors.New(usage)
|
|
}
|
|
return nil
|
|
}
|
|
}
|
|
|
|
func exactArgsUsage(n int, usage string) cobra.PositionalArgs {
|
|
return func(cmd *cobra.Command, args []string) error {
|
|
if len(args) != n {
|
|
return errors.New(usage)
|
|
}
|
|
return nil
|
|
}
|
|
}
|
|
|
|
func minArgsUsage(n int, usage string) cobra.PositionalArgs {
|
|
return func(cmd *cobra.Command, args []string) error {
|
|
if len(args) < n {
|
|
return errors.New(usage)
|
|
}
|
|
return nil
|
|
}
|
|
}
|
|
|
|
func maxArgsUsage(n int, usage string) cobra.PositionalArgs {
|
|
return func(cmd *cobra.Command, args []string) error {
|
|
if len(args) > n {
|
|
return errors.New(usage)
|
|
}
|
|
return nil
|
|
}
|
|
}
|
|
|
|
type resolvedVMTarget struct {
|
|
Index int
|
|
Ref string
|
|
VM model.VMRecord
|
|
}
|
|
|
|
type vmRefResolutionError struct {
|
|
Index int
|
|
Ref string
|
|
Err error
|
|
}
|
|
|
|
type vmBatchActionResult struct {
|
|
Target resolvedVMTarget
|
|
VM model.VMRecord
|
|
Err error
|
|
}
|
|
|
|
func runVMBatchAction(cmd *cobra.Command, socketPath string, refs []string, action func(context.Context, string) (model.VMRecord, error)) error {
|
|
listResult, err := rpc.Call[api.VMListResult](cmd.Context(), socketPath, "vm.list", api.Empty{})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
targets, resolutionErrs := resolveVMTargets(listResult.VMs, refs)
|
|
results := executeVMActionBatch(cmd.Context(), targets, action)
|
|
|
|
failed := false
|
|
for _, resolutionErr := range resolutionErrs {
|
|
if _, err := fmt.Fprintf(cmd.ErrOrStderr(), "%s: %v\n", resolutionErr.Ref, resolutionErr.Err); err != nil {
|
|
return err
|
|
}
|
|
failed = true
|
|
}
|
|
for _, result := range results {
|
|
if result.Err != nil {
|
|
if _, err := fmt.Fprintf(cmd.ErrOrStderr(), "%s: %v\n", result.Target.Ref, result.Err); err != nil {
|
|
return err
|
|
}
|
|
failed = true
|
|
continue
|
|
}
|
|
if err := printVMSummary(cmd.OutOrStdout(), result.VM); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
if failed {
|
|
return errors.New("one or more VM operations failed")
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func resolveVMTargets(vms []model.VMRecord, refs []string) ([]resolvedVMTarget, []vmRefResolutionError) {
|
|
targets := make([]resolvedVMTarget, 0, len(refs))
|
|
resolutionErrs := make([]vmRefResolutionError, 0)
|
|
seen := make(map[string]struct{}, len(refs))
|
|
for index, ref := range refs {
|
|
vm, err := resolveVMRef(vms, ref)
|
|
if err != nil {
|
|
resolutionErrs = append(resolutionErrs, vmRefResolutionError{Index: index, Ref: ref, Err: err})
|
|
continue
|
|
}
|
|
if _, ok := seen[vm.ID]; ok {
|
|
continue
|
|
}
|
|
seen[vm.ID] = struct{}{}
|
|
targets = append(targets, resolvedVMTarget{Index: index, Ref: ref, VM: vm})
|
|
}
|
|
return targets, resolutionErrs
|
|
}
|
|
|
|
func resolveVMRef(vms []model.VMRecord, ref string) (model.VMRecord, error) {
|
|
ref = strings.TrimSpace(ref)
|
|
if ref == "" {
|
|
return model.VMRecord{}, errors.New("vm id or name is required")
|
|
}
|
|
exactMatches := make([]model.VMRecord, 0, 1)
|
|
for _, vm := range vms {
|
|
if vm.ID == ref || vm.Name == ref {
|
|
exactMatches = append(exactMatches, vm)
|
|
}
|
|
}
|
|
switch len(exactMatches) {
|
|
case 1:
|
|
return exactMatches[0], nil
|
|
case 0:
|
|
default:
|
|
return model.VMRecord{}, fmt.Errorf("multiple VMs match %q", ref)
|
|
}
|
|
|
|
prefixMatches := make([]model.VMRecord, 0, 1)
|
|
for _, vm := range vms {
|
|
if strings.HasPrefix(vm.ID, ref) || strings.HasPrefix(vm.Name, ref) {
|
|
prefixMatches = append(prefixMatches, vm)
|
|
}
|
|
}
|
|
switch len(prefixMatches) {
|
|
case 1:
|
|
return prefixMatches[0], nil
|
|
case 0:
|
|
return model.VMRecord{}, fmt.Errorf("vm %q not found", ref)
|
|
default:
|
|
return model.VMRecord{}, fmt.Errorf("multiple VMs match %q", ref)
|
|
}
|
|
}
|
|
|
|
func executeVMActionBatch(ctx context.Context, targets []resolvedVMTarget, action func(context.Context, string) (model.VMRecord, error)) []vmBatchActionResult {
|
|
results := make([]vmBatchActionResult, len(targets))
|
|
var wg sync.WaitGroup
|
|
wg.Add(len(targets))
|
|
for index, target := range targets {
|
|
index := index
|
|
target := target
|
|
go func() {
|
|
defer wg.Done()
|
|
vm, err := action(ctx, target.VM.ID)
|
|
results[index] = vmBatchActionResult{
|
|
Target: target,
|
|
VM: vm,
|
|
Err: err,
|
|
}
|
|
}()
|
|
}
|
|
wg.Wait()
|
|
return results
|
|
}
|
|
|
|
func ensureDaemon(ctx context.Context) (paths.Layout, model.DaemonConfig, error) {
|
|
layout, err := paths.Resolve()
|
|
if err != nil {
|
|
return paths.Layout{}, model.DaemonConfig{}, err
|
|
}
|
|
cfg, err := config.Load(layout)
|
|
if err != nil {
|
|
return paths.Layout{}, model.DaemonConfig{}, err
|
|
}
|
|
if ping, err := daemonPingFunc(ctx, layout.SocketPath); err == nil {
|
|
if daemonOutdated(ping.PID) {
|
|
if err := restartDaemon(ctx, layout, ping.PID); err != nil {
|
|
return paths.Layout{}, model.DaemonConfig{}, err
|
|
}
|
|
return layout, cfg, nil
|
|
}
|
|
return layout, cfg, nil
|
|
}
|
|
if err := startDaemon(ctx, layout); err != nil {
|
|
return paths.Layout{}, model.DaemonConfig{}, err
|
|
}
|
|
return layout, cfg, nil
|
|
}
|
|
|
|
func daemonOutdated(pid int) bool {
|
|
if pid <= 0 {
|
|
return false
|
|
}
|
|
daemonBin, err := bangerdPathFunc()
|
|
if err != nil {
|
|
return false
|
|
}
|
|
currentInfo, err := os.Stat(daemonBin)
|
|
if err != nil {
|
|
return false
|
|
}
|
|
runningInfo, err := os.Stat(daemonExePath(pid))
|
|
if err != nil {
|
|
return false
|
|
}
|
|
return !os.SameFile(currentInfo, runningInfo)
|
|
}
|
|
|
|
func restartDaemon(ctx context.Context, layout paths.Layout, pid int) error {
|
|
stopCtx, cancel := context.WithTimeout(ctx, 2*time.Second)
|
|
defer cancel()
|
|
|
|
_, _ = rpc.Call[api.ShutdownResult](stopCtx, layout.SocketPath, "shutdown", api.Empty{})
|
|
if waitForPIDExit(pid, 2*time.Second) {
|
|
return startDaemon(ctx, layout)
|
|
}
|
|
if proc, err := os.FindProcess(pid); err == nil {
|
|
_ = proc.Signal(syscall.SIGTERM)
|
|
}
|
|
if !waitForPIDExit(pid, 2*time.Second) {
|
|
return fmt.Errorf("timed out restarting stale daemon pid %d", pid)
|
|
}
|
|
return startDaemon(ctx, layout)
|
|
}
|
|
|
|
func waitForPIDExit(pid int, timeout time.Duration) bool {
|
|
deadline := time.Now().Add(timeout)
|
|
for time.Now().Before(deadline) {
|
|
if !pidRunning(pid) {
|
|
return true
|
|
}
|
|
time.Sleep(50 * time.Millisecond)
|
|
}
|
|
return !pidRunning(pid)
|
|
}
|
|
|
|
func pidRunning(pid int) bool {
|
|
if pid <= 0 {
|
|
return false
|
|
}
|
|
proc, err := os.FindProcess(pid)
|
|
if err != nil {
|
|
return false
|
|
}
|
|
return proc.Signal(syscall.Signal(0)) == nil
|
|
}
|
|
|
|
func startDaemon(ctx context.Context, layout paths.Layout) error {
|
|
if err := paths.Ensure(layout); err != nil {
|
|
return err
|
|
}
|
|
logFile, err := os.OpenFile(layout.DaemonLog, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer logFile.Close()
|
|
|
|
daemonBin, err := paths.BangerdPath()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
cmd := buildDaemonCommand(daemonBin)
|
|
cmd.Stdout = logFile
|
|
cmd.Stderr = logFile
|
|
cmd.Stdin = nil
|
|
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
|
if err := cmd.Start(); err != nil {
|
|
return err
|
|
}
|
|
if err := rpc.WaitForSocket(layout.SocketPath, 5*time.Second); err != nil {
|
|
return fmt.Errorf("daemon failed to start; inspect %s: %w", layout.DaemonLog, err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func buildDaemonCommand(daemonBin string) *exec.Cmd {
|
|
return exec.Command(daemonBin)
|
|
}
|
|
|
|
func vmSetParamsFromFlags(idOrName string, vcpu, memory int, diskSize string, nat, noNat bool) (api.VMSetParams, error) {
|
|
if nat && noNat {
|
|
return api.VMSetParams{}, errors.New("use only one of --nat or --no-nat")
|
|
}
|
|
params := api.VMSetParams{IDOrName: idOrName, WorkDiskSize: diskSize}
|
|
if vcpu >= 0 {
|
|
if err := validatePositiveSetting("vcpu", vcpu); err != nil {
|
|
return api.VMSetParams{}, err
|
|
}
|
|
params.VCPUCount = &vcpu
|
|
}
|
|
if memory >= 0 {
|
|
if err := validatePositiveSetting("memory", memory); err != nil {
|
|
return api.VMSetParams{}, err
|
|
}
|
|
params.MemoryMiB = &memory
|
|
}
|
|
if nat || noNat {
|
|
value := nat && !noNat
|
|
params.NATEnabled = &value
|
|
}
|
|
if params.VCPUCount == nil && params.MemoryMiB == nil && params.WorkDiskSize == "" && params.NATEnabled == nil {
|
|
return api.VMSetParams{}, errors.New("no VM settings changed")
|
|
}
|
|
return params, nil
|
|
}
|
|
|
|
func vmCreateParamsFromFlags(cmd *cobra.Command, name, imageName string, vcpu, memory int, systemOverlaySize, workDiskSize string, natEnabled, noStart bool) (api.VMCreateParams, error) {
|
|
// The flag defaults are already resolved from config + host
|
|
// heuristics at command-build time, so we always forward the flag
|
|
// values to the daemon. This makes the CLI the single source of
|
|
// truth for effective defaults and lets the progress renderer show
|
|
// exactly what the VM will be sized at.
|
|
if err := validatePositiveSetting("vcpu", vcpu); err != nil {
|
|
return api.VMCreateParams{}, err
|
|
}
|
|
if err := validatePositiveSetting("memory", memory); err != nil {
|
|
return api.VMCreateParams{}, err
|
|
}
|
|
params := api.VMCreateParams{
|
|
Name: name,
|
|
ImageName: imageName,
|
|
NATEnabled: natEnabled,
|
|
NoStart: noStart,
|
|
VCPUCount: &vcpu,
|
|
MemoryMiB: &memory,
|
|
SystemOverlaySize: systemOverlaySize,
|
|
WorkDiskSize: workDiskSize,
|
|
}
|
|
return params, nil
|
|
}
|
|
|
|
// effectiveVMDefaults resolves the default sizing applied to commands
|
|
// that accept --vcpu / --memory / --disk-size flags when the user
|
|
// doesn't set them. It combines config overrides (if any) with
|
|
// host-derived heuristics, falling back to baked-in constants.
|
|
//
|
|
// Called at command-build time, which runs before any RunE. It
|
|
// reads config.toml and /proc — any read error collapses to builtin
|
|
// constants so the CLI stays usable even on a misconfigured host.
|
|
func effectiveVMDefaults() model.VMDefaults {
|
|
var override model.VMDefaultsOverride
|
|
if layout, err := paths.Resolve(); err == nil {
|
|
if cfg, err := config.Load(layout); err == nil {
|
|
override = cfg.VMDefaults
|
|
}
|
|
}
|
|
host, err := system.ReadHostResources()
|
|
if err != nil {
|
|
return model.ResolveVMDefaults(override, 0, 0)
|
|
}
|
|
return model.ResolveVMDefaults(override, host.CPUCount, host.TotalMemoryBytes)
|
|
}
|
|
|
|
// printVMSpecLine writes a one-line sizing summary to out. Always
|
|
// emitted (even non-TTY) so logs and CI output carry the numbers.
|
|
func printVMSpecLine(out io.Writer, params api.VMCreateParams) {
|
|
vcpu := model.DefaultVCPUCount
|
|
if params.VCPUCount != nil {
|
|
vcpu = *params.VCPUCount
|
|
}
|
|
memory := model.DefaultMemoryMiB
|
|
if params.MemoryMiB != nil {
|
|
memory = *params.MemoryMiB
|
|
}
|
|
diskBytes := int64(model.DefaultWorkDiskSize)
|
|
if strings.TrimSpace(params.WorkDiskSize) != "" {
|
|
if parsed, err := model.ParseSize(params.WorkDiskSize); err == nil {
|
|
diskBytes = parsed
|
|
}
|
|
}
|
|
_, _ = fmt.Fprintf(out, "spec: %d vcpu · %d MiB · %s disk\n",
|
|
vcpu, memory, model.FormatSizeBytes(diskBytes))
|
|
}
|
|
|
|
func validatePositiveSetting(label string, value int) error {
|
|
if value <= 0 {
|
|
return fmt.Errorf("%s must be a positive integer", label)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func runSSHSession(ctx context.Context, socketPath, vmRef string, stdin io.Reader, stdout, stderr io.Writer, sshArgs []string, skipReminder bool) error {
|
|
sshErr := sshExecFunc(ctx, stdin, stdout, stderr, sshArgs)
|
|
if skipReminder || !shouldCheckSSHReminder(sshErr) || ctx.Err() != nil {
|
|
return sshErr
|
|
}
|
|
pingCtx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
|
defer cancel()
|
|
health, err := vmHealthFunc(pingCtx, socketPath, vmRef)
|
|
if err != nil {
|
|
_, _ = fmt.Fprintln(stderr, vsockagent.WarningMessage(vmRef, err))
|
|
return sshErr
|
|
}
|
|
if health.Healthy {
|
|
name := health.Name
|
|
if strings.TrimSpace(name) == "" {
|
|
name = vmRef
|
|
}
|
|
_, _ = fmt.Fprintln(stderr, vsockagent.ReminderMessage(name))
|
|
}
|
|
return sshErr
|
|
}
|
|
|
|
func shouldCheckSSHReminder(err error) bool {
|
|
if err == nil {
|
|
return true
|
|
}
|
|
var exitErr *exec.ExitError
|
|
if !errors.As(err, &exitErr) {
|
|
return false
|
|
}
|
|
return exitErr.ExitCode() != 255
|
|
}
|
|
|
|
func sshCommandArgs(cfg model.DaemonConfig, guestIP string, extra []string) ([]string, error) {
|
|
if guestIP == "" {
|
|
return nil, errors.New("vm has no guest IP")
|
|
}
|
|
args := []string{}
|
|
args = append(args, "-F", "/dev/null")
|
|
if cfg.SSHKeyPath != "" {
|
|
args = append(args, "-i", cfg.SSHKeyPath)
|
|
}
|
|
// Host-key verification uses a banger-owned known_hosts file
|
|
// populated by the daemon's first successful Go-SSH dial to each
|
|
// VM (trust-on-first-use). `accept-new` means: accept-and-pin on
|
|
// first contact; strict-verify afterwards. The user's own
|
|
// ~/.known_hosts is untouched.
|
|
knownHosts, khErr := bangerKnownHostsPath()
|
|
args = append(
|
|
args,
|
|
"-o", "IdentitiesOnly=yes",
|
|
"-o", "BatchMode=yes",
|
|
"-o", "PreferredAuthentications=publickey",
|
|
"-o", "PasswordAuthentication=no",
|
|
"-o", "KbdInteractiveAuthentication=no",
|
|
)
|
|
if khErr == nil {
|
|
args = append(args,
|
|
"-o", "UserKnownHostsFile="+knownHosts,
|
|
"-o", "StrictHostKeyChecking=accept-new",
|
|
)
|
|
} else {
|
|
// If we can't resolve the banger path (unusual — paths.Resolve
|
|
// basically can't fail), fall through to a hard-fail posture
|
|
// rather than silently disabling verification.
|
|
args = append(args,
|
|
"-o", "StrictHostKeyChecking=yes",
|
|
)
|
|
}
|
|
args = append(args, "root@"+guestIP)
|
|
args = append(args, extra...)
|
|
return args, nil
|
|
}
|
|
|
|
// bangerKnownHostsPath resolves the TOFU file the daemon writes into
|
|
// and the CLI reads back. Both sides must agree on the path or the
|
|
// pin doesn't round-trip.
|
|
func bangerKnownHostsPath() (string, error) {
|
|
layout, err := paths.Resolve()
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
return layout.KnownHostsPath, nil
|
|
}
|
|
|
|
func validateSSHPrereqs(cfg model.DaemonConfig) error {
|
|
checks := system.NewPreflight()
|
|
checks.RequireCommand("ssh", "install openssh-client")
|
|
if strings.TrimSpace(cfg.SSHKeyPath) != "" {
|
|
checks.RequireFile(cfg.SSHKeyPath, "ssh private key", `set "ssh_key_path" or let banger create its default key`)
|
|
}
|
|
return checks.Err("ssh preflight failed")
|
|
}
|
|
|
|
func validateVMRunPrereqs(cfg model.DaemonConfig) error {
|
|
checks := system.NewPreflight()
|
|
checks.RequireCommand("git", "install git")
|
|
if strings.TrimSpace(cfg.SSHKeyPath) != "" {
|
|
checks.RequireFile(cfg.SSHKeyPath, "ssh private key", `set "ssh_key_path" or let banger create its default key`)
|
|
}
|
|
return checks.Err("vm run preflight failed")
|
|
}
|
|
|
|
func inspectVMRunRepo(ctx context.Context, rawPath, branchName, fromRef string) (vmRunRepoSpec, error) {
|
|
sourcePath, err := resolveVMRunSourcePath(rawPath)
|
|
if err != nil {
|
|
return vmRunRepoSpec{}, err
|
|
}
|
|
|
|
repoRoot, err := gitTrimmedOutput(ctx, sourcePath, "rev-parse", "--show-toplevel")
|
|
if err != nil {
|
|
return vmRunRepoSpec{}, fmt.Errorf("%s is not inside a git repository", sourcePath)
|
|
}
|
|
isBare, err := gitTrimmedOutput(ctx, repoRoot, "rev-parse", "--is-bare-repository")
|
|
if err != nil {
|
|
return vmRunRepoSpec{}, fmt.Errorf("inspect git repository %s: %w", repoRoot, err)
|
|
}
|
|
if isBare == "true" {
|
|
return vmRunRepoSpec{}, fmt.Errorf("vm run requires a non-bare git repository: %s", repoRoot)
|
|
}
|
|
if err := ensureVMRunRepoHasNoSubmodules(ctx, repoRoot); err != nil {
|
|
return vmRunRepoSpec{}, err
|
|
}
|
|
|
|
headCommit, err := gitTrimmedOutput(ctx, repoRoot, "rev-parse", "HEAD^{commit}")
|
|
if err != nil {
|
|
return vmRunRepoSpec{}, fmt.Errorf("git repository %s must have at least one commit", repoRoot)
|
|
}
|
|
currentBranch, err := gitTrimmedOutput(ctx, repoRoot, "branch", "--show-current")
|
|
if err != nil {
|
|
return vmRunRepoSpec{}, fmt.Errorf("resolve current branch for %s: %w", repoRoot, err)
|
|
}
|
|
|
|
baseCommit := headCommit
|
|
resolvedFromRef := ""
|
|
branchName = strings.TrimSpace(branchName)
|
|
if branchName != "" {
|
|
fromRef = strings.TrimSpace(fromRef)
|
|
if fromRef == "" {
|
|
return vmRunRepoSpec{}, errors.New("--from cannot be empty")
|
|
}
|
|
resolvedFromRef = fromRef
|
|
baseCommit, err = gitTrimmedOutput(ctx, repoRoot, "rev-parse", fromRef+"^{commit}")
|
|
if err != nil {
|
|
return vmRunRepoSpec{}, fmt.Errorf("resolve --from %q: %w", fromRef, err)
|
|
}
|
|
}
|
|
|
|
gitUserName, err := gitResolvedConfigValue(ctx, repoRoot, "user.name")
|
|
if err != nil {
|
|
return vmRunRepoSpec{}, fmt.Errorf("resolve git user.name for %s: %w", repoRoot, err)
|
|
}
|
|
gitUserEmail, err := gitResolvedConfigValue(ctx, repoRoot, "user.email")
|
|
if err != nil {
|
|
return vmRunRepoSpec{}, fmt.Errorf("resolve git user.email for %s: %w", repoRoot, err)
|
|
}
|
|
originURL, err := gitResolvedConfigValue(ctx, repoRoot, "remote.origin.url")
|
|
if err != nil {
|
|
return vmRunRepoSpec{}, fmt.Errorf("resolve origin url for %s: %w", repoRoot, err)
|
|
}
|
|
|
|
overlayPaths, err := listVMRunOverlayPaths(ctx, repoRoot)
|
|
if err != nil {
|
|
return vmRunRepoSpec{}, err
|
|
}
|
|
|
|
return vmRunRepoSpec{
|
|
SourcePath: sourcePath,
|
|
RepoRoot: repoRoot,
|
|
RepoName: filepath.Base(repoRoot),
|
|
HeadCommit: headCommit,
|
|
CurrentBranch: currentBranch,
|
|
BranchName: branchName,
|
|
FromRef: resolvedFromRef,
|
|
BaseCommit: baseCommit,
|
|
OriginURL: originURL,
|
|
GitUserName: gitUserName,
|
|
GitUserEmail: gitUserEmail,
|
|
OverlayPaths: overlayPaths,
|
|
}, nil
|
|
}
|
|
|
|
func resolveVMRunSourcePath(rawPath string) (string, error) {
|
|
if strings.TrimSpace(rawPath) == "" {
|
|
wd, err := cwdFunc()
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
rawPath = wd
|
|
}
|
|
absPath, err := filepath.Abs(rawPath)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
info, err := os.Stat(absPath)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
if !info.IsDir() {
|
|
return "", fmt.Errorf("%s is not a directory", absPath)
|
|
}
|
|
return absPath, nil
|
|
}
|
|
|
|
func ensureVMRunRepoHasNoSubmodules(ctx context.Context, repoRoot string) error {
|
|
output, err := gitOutput(ctx, repoRoot, "ls-files", "--stage", "-z")
|
|
if err != nil {
|
|
return fmt.Errorf("inspect git index for %s: %w", repoRoot, err)
|
|
}
|
|
for _, record := range parseNullSeparatedOutput(output) {
|
|
if strings.HasPrefix(record, "160000 ") {
|
|
return fmt.Errorf("vm run does not yet support git submodules: %s", repoRoot)
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func listVMRunOverlayPaths(ctx context.Context, repoRoot string) ([]string, error) {
|
|
trackedOutput, err := gitOutput(ctx, repoRoot, "ls-files", "-z")
|
|
if err != nil {
|
|
return nil, fmt.Errorf("list tracked files for %s: %w", repoRoot, err)
|
|
}
|
|
untrackedOutput, err := gitOutput(ctx, repoRoot, "ls-files", "--others", "--exclude-standard", "-z")
|
|
if err != nil {
|
|
return nil, fmt.Errorf("list untracked files for %s: %w", repoRoot, err)
|
|
}
|
|
|
|
paths := make([]string, 0)
|
|
seen := make(map[string]struct{})
|
|
for _, relPath := range parseNullSeparatedOutput(trackedOutput) {
|
|
if relPath == "" {
|
|
continue
|
|
}
|
|
if _, err := os.Lstat(filepath.Join(repoRoot, relPath)); err != nil {
|
|
if os.IsNotExist(err) {
|
|
continue
|
|
}
|
|
return nil, err
|
|
}
|
|
seen[relPath] = struct{}{}
|
|
paths = append(paths, relPath)
|
|
}
|
|
for _, relPath := range parseNullSeparatedOutput(untrackedOutput) {
|
|
if relPath == "" {
|
|
continue
|
|
}
|
|
if _, ok := seen[relPath]; ok {
|
|
continue
|
|
}
|
|
seen[relPath] = struct{}{}
|
|
paths = append(paths, relPath)
|
|
}
|
|
sort.Strings(paths)
|
|
return paths, nil
|
|
}
|
|
|
|
func gitOutput(ctx context.Context, dir string, args ...string) ([]byte, error) {
|
|
fullArgs := make([]string, 0, len(args)+2)
|
|
if strings.TrimSpace(dir) != "" {
|
|
fullArgs = append(fullArgs, "-C", dir)
|
|
}
|
|
fullArgs = append(fullArgs, args...)
|
|
return hostCommandOutputFunc(ctx, "git", fullArgs...)
|
|
}
|
|
|
|
func gitTrimmedOutput(ctx context.Context, dir string, args ...string) (string, error) {
|
|
output, err := gitOutput(ctx, dir, args...)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
return strings.TrimSpace(string(output)), nil
|
|
}
|
|
|
|
func gitResolvedConfigValue(ctx context.Context, dir, key string) (string, error) {
|
|
return gitTrimmedOutput(ctx, dir, "config", "--default", "", "--get", key)
|
|
}
|
|
|
|
func parseNullSeparatedOutput(output []byte) []string {
|
|
chunks := bytes.Split(output, []byte{0})
|
|
values := make([]string, 0, len(chunks))
|
|
for _, chunk := range chunks {
|
|
value := strings.TrimSpace(string(chunk))
|
|
if value == "" {
|
|
continue
|
|
}
|
|
values = append(values, value)
|
|
}
|
|
return values
|
|
}
|
|
|
|
// splitVMRunArgs partitions cobra positional args into the optional path
|
|
// argument and the trailing command (everything after a `--` separator).
|
|
// The path slice may contain 0..1 entries; the command slice may be empty.
|
|
func splitVMRunArgs(cmd *cobra.Command, args []string) (pathArgs, commandArgs []string) {
|
|
dash := cmd.ArgsLenAtDash()
|
|
if dash < 0 {
|
|
return args, nil
|
|
}
|
|
if dash > len(args) {
|
|
dash = len(args)
|
|
}
|
|
return args[:dash], args[dash:]
|
|
}
|
|
|
|
// ExitCodeError wraps a remote command's exit status so the CLI's main()
|
|
// can propagate it verbatim. Only errors explicitly wrapped in this
|
|
// type get forwarded as process exit codes — plain *exec.ExitError
|
|
// values (from unrelated subprocesses like mkfs.ext4) must still
|
|
// surface as regular errors so the user sees a message.
|
|
type ExitCodeError struct {
|
|
Code int
|
|
}
|
|
|
|
func (e ExitCodeError) Error() string {
|
|
return fmt.Sprintf("exit status %d", e.Code)
|
|
}
|
|
|
|
func runVMRun(ctx context.Context, socketPath string, cfg model.DaemonConfig, stdin io.Reader, stdout, stderr io.Writer, params api.VMCreateParams, spec *vmRunRepoSpec, command []string, removeOnExit bool) error {
|
|
progress := newVMRunProgressRenderer(stderr)
|
|
vm, err := runVMCreate(ctx, socketPath, stderr, params)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
vmRef := strings.TrimSpace(vm.Name)
|
|
if vmRef == "" {
|
|
vmRef = shortID(vm.ID)
|
|
}
|
|
// --rm cleanup is wired AFTER ssh is confirmed. An ssh-wait
|
|
// timeout leaves the VM alive for `vm logs` inspection (our
|
|
// error message tells the user that); the cleanup only fires
|
|
// once the session phase runs.
|
|
shouldRemove := false
|
|
if removeOnExit {
|
|
defer func() {
|
|
if !shouldRemove {
|
|
return
|
|
}
|
|
// Use a fresh context so Ctrl-C during the session
|
|
// doesn't abort the delete RPC.
|
|
cleanupCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
if err := vmDeleteFunc(cleanupCtx, socketPath, vmRef); err != nil {
|
|
printVMRunWarning(stderr, fmt.Sprintf("--rm cleanup failed: %v (leaked vm %q; delete manually)", err, vmRef))
|
|
}
|
|
}()
|
|
}
|
|
sshAddress := net.JoinHostPort(vm.Runtime.GuestIP, "22")
|
|
progress.render("waiting for guest ssh")
|
|
sshCtx, cancelSSH := context.WithTimeout(ctx, vmRunSSHTimeout)
|
|
if err := guestWaitForSSHFunc(sshCtx, sshAddress, cfg.SSHKeyPath, 250*time.Millisecond); err != nil {
|
|
cancelSSH()
|
|
// Surface parent-context cancellation (Ctrl-C, caller
|
|
// timeout) as-is. Only the guest-side timeout needs the
|
|
// actionable hint.
|
|
if errors.Is(ctx.Err(), context.Canceled) || errors.Is(ctx.Err(), context.DeadlineExceeded) {
|
|
return fmt.Errorf("vm %q: %w", vmRef, ctx.Err())
|
|
}
|
|
return fmt.Errorf(
|
|
"vm %q is running but guest ssh did not come up within %s. "+
|
|
"sshd is the likely suspect — inspect the guest console with "+
|
|
"`banger vm logs %s` (look for `Failed to start ssh.service`). "+
|
|
"The VM is still alive; leave it for inspection or remove with `banger vm delete %s`. "+
|
|
"underlying error: %w",
|
|
vmRef, vmRunSSHTimeout, vmRef, vmRef, err,
|
|
)
|
|
}
|
|
cancelSSH()
|
|
shouldRemove = removeOnExit
|
|
if spec != nil {
|
|
progress.render("preparing guest workspace")
|
|
if _, err := vmWorkspacePrepareFunc(ctx, socketPath, api.VMWorkspacePrepareParams{
|
|
IDOrName: vmRef,
|
|
SourcePath: spec.SourcePath,
|
|
GuestPath: vmRunGuestDir(),
|
|
Branch: spec.BranchName,
|
|
From: spec.FromRef,
|
|
Mode: string(model.WorkspacePrepareModeShallowOverlay),
|
|
}); err != nil {
|
|
return fmt.Errorf("vm %q is running but workspace prepare failed: %w", vmRef, err)
|
|
}
|
|
if len(command) == 0 {
|
|
client, err := guestDialFunc(ctx, sshAddress, cfg.SSHKeyPath)
|
|
if err != nil {
|
|
return fmt.Errorf("vm %q is running but guest ssh is unavailable: %w", vmRef, err)
|
|
}
|
|
if err := startVMRunToolingHarness(ctx, client, *spec, progress); err != nil {
|
|
printVMRunWarning(stderr, fmt.Sprintf("guest tooling bootstrap start failed: %v", err))
|
|
}
|
|
_ = client.Close()
|
|
}
|
|
}
|
|
sshArgs, err := sshCommandArgs(cfg, vm.Runtime.GuestIP, command)
|
|
if err != nil {
|
|
return fmt.Errorf("vm %q is running but ssh args could not be built: %w", vmRef, err)
|
|
}
|
|
if len(command) > 0 {
|
|
progress.render("running command in guest")
|
|
if err := sshExecFunc(ctx, stdin, stdout, stderr, sshArgs); err != nil {
|
|
var exitErr *exec.ExitError
|
|
if errors.As(err, &exitErr) {
|
|
return ExitCodeError{Code: exitErr.ExitCode()}
|
|
}
|
|
return err
|
|
}
|
|
return nil
|
|
}
|
|
progress.render("attaching to guest")
|
|
return runSSHSession(ctx, socketPath, vmRef, stdin, stdout, stderr, sshArgs, removeOnExit)
|
|
}
|
|
|
|
func importVMRunRepoToGuest(ctx context.Context, client vmRunGuestClient, spec vmRunRepoSpec, progress *vmRunProgressRenderer) error {
|
|
if progress != nil {
|
|
progress.render("preparing shallow repo")
|
|
}
|
|
repoCopyDir, cleanup, err := prepareVMRunRepoCopyFunc(ctx, spec)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer cleanup()
|
|
if progress != nil {
|
|
progress.render("copying repo metadata to guest")
|
|
}
|
|
var copyLog bytes.Buffer
|
|
remoteCommand := fmt.Sprintf("rm -rf %s && mkdir -p %s && tar -o -C %s --strip-components=1 -xf -", shellQuote(vmRunGuestDir()), shellQuote(vmRunGuestDir()), shellQuote(vmRunGuestDir()))
|
|
if err := client.StreamTar(ctx, repoCopyDir, remoteCommand, ©Log); err != nil {
|
|
return formatVMRunStepError("copy guest git metadata", err, copyLog.String())
|
|
}
|
|
if progress != nil {
|
|
progress.render("preparing guest checkout")
|
|
}
|
|
var scriptLog bytes.Buffer
|
|
if err := client.RunScript(ctx, vmRunCheckoutScript(spec), &scriptLog); err != nil {
|
|
return formatVMRunStepError("prepare guest checkout", err, scriptLog.String())
|
|
}
|
|
if progress != nil {
|
|
progress.render("overlaying host working tree")
|
|
}
|
|
var overlayLog bytes.Buffer
|
|
remoteCommand = fmt.Sprintf("tar -o -C %s --strip-components=1 -xf -", shellQuote(vmRunGuestDir()))
|
|
if err := client.StreamTarEntries(ctx, spec.RepoRoot, spec.OverlayPaths, remoteCommand, &overlayLog); err != nil {
|
|
return formatVMRunStepError("overlay host working tree", err, overlayLog.String())
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func prepareVMRunRepoCopy(ctx context.Context, spec vmRunRepoSpec) (string, func(), error) {
|
|
tempRoot, err := os.MkdirTemp("", "banger-vm-run-*")
|
|
if err != nil {
|
|
return "", nil, err
|
|
}
|
|
cleanup := func() {
|
|
_ = os.RemoveAll(tempRoot)
|
|
}
|
|
repoCopyDir := filepath.Join(tempRoot, spec.RepoName)
|
|
cloneArgs := []string{"clone", "--no-checkout", "--depth", fmt.Sprintf("%d", vmRunShallowFetchDepth)}
|
|
if strings.TrimSpace(spec.CurrentBranch) != "" {
|
|
cloneArgs = append(cloneArgs, "--single-branch", "--branch", spec.CurrentBranch)
|
|
}
|
|
cloneArgs = append(cloneArgs, gitFileURL(spec.RepoRoot), repoCopyDir)
|
|
if err := runHostCommand(ctx, "git", cloneArgs...); err != nil {
|
|
cleanup()
|
|
return "", nil, fmt.Errorf("clone shallow repo copy: %w", err)
|
|
}
|
|
checkoutCommit := vmRunCheckoutCommit(spec)
|
|
if err := runHostCommand(ctx, "git", "-C", repoCopyDir, "cat-file", "-e", checkoutCommit+"^{commit}"); err != nil {
|
|
if err := runHostCommand(ctx, "git", "-C", repoCopyDir, "fetch", "--depth", fmt.Sprintf("%d", vmRunShallowFetchDepth), gitFileURL(spec.RepoRoot), checkoutCommit); err != nil {
|
|
cleanup()
|
|
return "", nil, fmt.Errorf("fetch shallow repo commit %s: %w", checkoutCommit, err)
|
|
}
|
|
}
|
|
if strings.TrimSpace(spec.OriginURL) != "" {
|
|
if err := runHostCommand(ctx, "git", "-C", repoCopyDir, "remote", "set-url", "origin", spec.OriginURL); err != nil {
|
|
cleanup()
|
|
return "", nil, fmt.Errorf("set origin remote: %w", err)
|
|
}
|
|
} else {
|
|
if err := runHostCommand(ctx, "git", "-C", repoCopyDir, "remote", "remove", "origin"); err != nil {
|
|
cleanup()
|
|
return "", nil, fmt.Errorf("remove placeholder origin remote: %w", err)
|
|
}
|
|
}
|
|
return repoCopyDir, cleanup, nil
|
|
}
|
|
|
|
func vmRunCheckoutCommit(spec vmRunRepoSpec) string {
|
|
if strings.TrimSpace(spec.BranchName) != "" {
|
|
return spec.BaseCommit
|
|
}
|
|
return spec.HeadCommit
|
|
}
|
|
|
|
func gitFileURL(path string) string {
|
|
return (&url.URL{Scheme: "file", Path: filepath.ToSlash(path)}).String()
|
|
}
|
|
|
|
func runHostCommand(ctx context.Context, name string, args ...string) error {
|
|
_, err := hostCommandOutputFunc(ctx, name, args...)
|
|
return err
|
|
}
|
|
|
|
func vmRunCheckoutScript(spec vmRunRepoSpec) string {
|
|
guestDir := vmRunGuestDir()
|
|
var script strings.Builder
|
|
script.WriteString("set -euo pipefail\n")
|
|
fmt.Fprintf(&script, "DIR=%s\n", shellQuote(guestDir))
|
|
script.WriteString("git config --global --add safe.directory \"$DIR\"\n")
|
|
switch {
|
|
case strings.TrimSpace(spec.BranchName) != "":
|
|
fmt.Fprintf(&script, "git -C \"$DIR\" checkout -B %s %s\n", shellQuote(spec.BranchName), shellQuote(spec.BaseCommit))
|
|
case strings.TrimSpace(spec.CurrentBranch) != "":
|
|
fmt.Fprintf(&script, "git -C \"$DIR\" checkout -B %s %s\n", shellQuote(spec.CurrentBranch), shellQuote(spec.HeadCommit))
|
|
default:
|
|
fmt.Fprintf(&script, "git -C \"$DIR\" checkout --detach %s\n", shellQuote(spec.HeadCommit))
|
|
}
|
|
script.WriteString("find \"$DIR\" -mindepth 1 -maxdepth 1 ! -name .git -exec rm -rf {} +\n")
|
|
if strings.TrimSpace(spec.GitUserName) != "" && strings.TrimSpace(spec.GitUserEmail) != "" {
|
|
fmt.Fprintf(&script, "git -C \"$DIR\" config user.name %s\n", shellQuote(spec.GitUserName))
|
|
fmt.Fprintf(&script, "git -C \"$DIR\" config user.email %s\n", shellQuote(spec.GitUserEmail))
|
|
}
|
|
return script.String()
|
|
}
|
|
|
|
func vmRunGuestDir() string {
|
|
return "/root/repo"
|
|
}
|
|
|
|
func vmRunToolingHarnessPath(repoName string) string {
|
|
|
|
return filepath.ToSlash(filepath.Join("/tmp", "banger-vm-run-tooling-"+repoName+".sh"))
|
|
}
|
|
|
|
func vmRunToolingHarnessLogPath(repoName string) string {
|
|
return filepath.ToSlash(filepath.Join("/root/.cache/banger", "vm-run-tooling-"+repoName+".log"))
|
|
}
|
|
|
|
func startVMRunToolingHarness(ctx context.Context, client vmRunGuestClient, spec vmRunRepoSpec, progress *vmRunProgressRenderer) error {
|
|
if progress != nil {
|
|
progress.render("starting guest tooling bootstrap")
|
|
}
|
|
plan := buildVMRunToolingPlanFunc(ctx, spec.RepoRoot)
|
|
var uploadLog bytes.Buffer
|
|
if err := client.UploadFile(ctx, vmRunToolingHarnessPath(spec.RepoName), 0o755, []byte(vmRunToolingHarnessScript(spec, plan)), &uploadLog); err != nil {
|
|
return formatVMRunStepError("upload guest tooling bootstrap", err, uploadLog.String())
|
|
}
|
|
var launchLog bytes.Buffer
|
|
if err := client.RunScript(ctx, vmRunToolingHarnessLaunchScript(spec), &launchLog); err != nil {
|
|
return formatVMRunStepError("launch guest tooling bootstrap", err, launchLog.String())
|
|
}
|
|
if progress != nil {
|
|
progress.render("guest tooling log: " + vmRunToolingHarnessLogPath(spec.RepoName))
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func vmRunToolingHarnessScript(spec vmRunRepoSpec, plan toolingplan.Plan) string {
|
|
var script strings.Builder
|
|
script.WriteString("set -uo pipefail\n")
|
|
fmt.Fprintf(&script, "DIR=%s\n", shellQuote(vmRunGuestDir()))
|
|
script.WriteString("export PATH=/usr/local/bin:/root/.local/share/mise/shims:$PATH\n")
|
|
script.WriteString("if [ -f /etc/profile.d/mise.sh ]; then . /etc/profile.d/mise.sh || true; fi\n")
|
|
script.WriteString("log() { printf '%s\\n' \"$*\"; }\n")
|
|
script.WriteString("run_best_effort() {\n")
|
|
script.WriteString(" \"$@\"\n")
|
|
script.WriteString(" rc=$?\n")
|
|
script.WriteString(" if [ \"$rc\" -ne 0 ]; then\n")
|
|
script.WriteString(" log \"command failed ($rc): $*\"\n")
|
|
script.WriteString(" fi\n")
|
|
script.WriteString(" return 0\n")
|
|
script.WriteString("}\n")
|
|
script.WriteString("run_bounded_best_effort() {\n")
|
|
script.WriteString(" timeout_secs=\"$1\"\n")
|
|
script.WriteString(" shift\n")
|
|
script.WriteString(" timeout_marker=\"$(mktemp)\"\n")
|
|
script.WriteString(" rm -f \"$timeout_marker\"\n")
|
|
script.WriteString(" \"$@\" &\n")
|
|
script.WriteString(" cmd_pid=$!\n")
|
|
script.WriteString(" (\n")
|
|
script.WriteString(" sleep \"$timeout_secs\"\n")
|
|
script.WriteString(" if kill -0 \"$cmd_pid\" 2>/dev/null; then\n")
|
|
script.WriteString(" : >\"$timeout_marker\"\n")
|
|
script.WriteString(" log \"command timed out after ${timeout_secs}s: $*\"\n")
|
|
script.WriteString(" kill -TERM \"$cmd_pid\" 2>/dev/null || true\n")
|
|
script.WriteString(" if command -v pkill >/dev/null 2>&1; then pkill -TERM -P \"$cmd_pid\" 2>/dev/null || true; fi\n")
|
|
script.WriteString(" sleep 2\n")
|
|
script.WriteString(" kill -KILL \"$cmd_pid\" 2>/dev/null || true\n")
|
|
script.WriteString(" if command -v pkill >/dev/null 2>&1; then pkill -KILL -P \"$cmd_pid\" 2>/dev/null || true; fi\n")
|
|
script.WriteString(" fi\n")
|
|
script.WriteString(" ) &\n")
|
|
script.WriteString(" watchdog_pid=$!\n")
|
|
script.WriteString(" wait \"$cmd_pid\"\n")
|
|
script.WriteString(" rc=$?\n")
|
|
script.WriteString(" kill \"$watchdog_pid\" 2>/dev/null || true\n")
|
|
script.WriteString(" wait \"$watchdog_pid\" 2>/dev/null || true\n")
|
|
script.WriteString(" if [ -f \"$timeout_marker\" ]; then\n")
|
|
script.WriteString(" rm -f \"$timeout_marker\"\n")
|
|
script.WriteString(" return 0\n")
|
|
script.WriteString(" fi\n")
|
|
script.WriteString(" rm -f \"$timeout_marker\"\n")
|
|
script.WriteString(" if [ \"$rc\" -ne 0 ]; then\n")
|
|
script.WriteString(" log \"command failed ($rc): $*\"\n")
|
|
script.WriteString(" fi\n")
|
|
script.WriteString(" return 0\n")
|
|
script.WriteString("}\n")
|
|
script.WriteString("cd \"$DIR\" || { log \"missing repo directory: $DIR\"; exit 0; }\n")
|
|
script.WriteString("MISE_BIN=\"$(command -v mise || true)\"\n")
|
|
script.WriteString("if [ -z \"$MISE_BIN\" ]; then log \"mise not found; skipping guest tooling bootstrap\"; exit 0; fi\n")
|
|
script.WriteString("log \"starting guest tooling bootstrap in $DIR\"\n")
|
|
if len(plan.RepoManagedTools) > 0 {
|
|
fmt.Fprintf(&script, "log %s\n", shellQuote("repo-managed mise tools: "+strings.Join(plan.RepoManagedTools, ", ")))
|
|
}
|
|
script.WriteString("if [ -f .mise.toml ] || [ -f .tool-versions ]; then\n")
|
|
script.WriteString(" log \"running mise install from repo declarations\"\n")
|
|
script.WriteString(" run_best_effort \"$MISE_BIN\" install\n")
|
|
script.WriteString("fi\n")
|
|
fmt.Fprintf(&script, "INSTALL_TIMEOUT_SECS=%d\n", vmRunToolingInstallTimeoutSeconds)
|
|
for _, step := range plan.Steps {
|
|
stepLabel := fmt.Sprintf("deterministic install: %s@%s (%s)", step.Tool, step.Version, step.Source)
|
|
fmt.Fprintf(&script, "log %s\n", shellQuote(stepLabel))
|
|
fmt.Fprintf(&script, "run_bounded_best_effort \"$INSTALL_TIMEOUT_SECS\" \"$MISE_BIN\" use -g --pin %s\n", shellQuote(step.Tool+"@"+step.Version))
|
|
}
|
|
for _, skip := range plan.Skips {
|
|
skipLabel := fmt.Sprintf("deterministic skip: %s (%s)", skip.Target, skip.Reason)
|
|
fmt.Fprintf(&script, "log %s\n", shellQuote(skipLabel))
|
|
}
|
|
if len(plan.Steps) > 0 {
|
|
script.WriteString("run_best_effort \"$MISE_BIN\" reshim\n")
|
|
}
|
|
script.WriteString("log \"guest tooling bootstrap finished\"\n")
|
|
return script.String()
|
|
}
|
|
|
|
func vmRunToolingHarnessLaunchScript(spec vmRunRepoSpec) string {
|
|
var script strings.Builder
|
|
script.WriteString("set -euo pipefail\n")
|
|
fmt.Fprintf(&script, "HELPER=%s\n", shellQuote(vmRunToolingHarnessPath(spec.RepoName)))
|
|
fmt.Fprintf(&script, "LOG=%s\n", shellQuote(vmRunToolingHarnessLogPath(spec.RepoName)))
|
|
script.WriteString("mkdir -p \"$(dirname \"$LOG\")\"\n")
|
|
script.WriteString("nohup bash \"$HELPER\" >\"$LOG\" 2>&1 </dev/null &\n")
|
|
script.WriteString("disown || true\n")
|
|
return script.String()
|
|
}
|
|
|
|
func formatVMRunStepError(action string, err error, log string) error {
|
|
log = strings.TrimSpace(log)
|
|
if log == "" {
|
|
return fmt.Errorf("%s: %w", action, err)
|
|
}
|
|
return fmt.Errorf("%s: %w: %s", action, err, log)
|
|
}
|
|
|
|
type vmRunProgressRenderer struct {
|
|
out io.Writer
|
|
enabled bool
|
|
lastLine string
|
|
}
|
|
|
|
func newVMRunProgressRenderer(out io.Writer) *vmRunProgressRenderer {
|
|
return &vmRunProgressRenderer{
|
|
out: out,
|
|
enabled: out != nil,
|
|
}
|
|
}
|
|
|
|
func (r *vmRunProgressRenderer) render(detail string) {
|
|
if r == nil || !r.enabled {
|
|
return
|
|
}
|
|
line := formatVMRunProgress(detail)
|
|
if line == "" || line == r.lastLine {
|
|
return
|
|
}
|
|
r.lastLine = line
|
|
_, _ = fmt.Fprintln(r.out, line)
|
|
}
|
|
|
|
func formatVMRunProgress(detail string) string {
|
|
|
|
detail = strings.TrimSpace(detail)
|
|
if detail == "" {
|
|
return ""
|
|
}
|
|
return "[vm run] " + detail
|
|
}
|
|
|
|
func printVMRunWarning(out io.Writer, detail string) {
|
|
detail = strings.TrimSpace(detail)
|
|
if out == nil || detail == "" {
|
|
return
|
|
}
|
|
_, _ = fmt.Fprintln(out, "[vm run] warning: "+detail)
|
|
}
|
|
|
|
func shellQuote(value string) string {
|
|
return "'" + strings.ReplaceAll(value, "'", `'"'"'`) + "'"
|
|
}
|
|
|
|
func absolutizeImageRegisterPaths(params *api.ImageRegisterParams) error {
|
|
return absolutizePaths(
|
|
¶ms.RootfsPath,
|
|
¶ms.WorkSeedPath,
|
|
¶ms.KernelPath,
|
|
¶ms.InitrdPath,
|
|
¶ms.ModulesDir,
|
|
)
|
|
}
|
|
|
|
func absolutizePaths(values ...*string) error {
|
|
var err error
|
|
for _, value := range values {
|
|
if *value == "" || filepath.IsAbs(*value) {
|
|
continue
|
|
}
|
|
*value, err = filepath.Abs(*value)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func printJSON(out anyWriter, v any) error {
|
|
data, err := json.MarshalIndent(v, "", " ")
|
|
if err != nil {
|
|
return err
|
|
}
|
|
_, err = fmt.Fprintln(out, string(data))
|
|
return err
|
|
}
|
|
|
|
func printVMSummary(out anyWriter, vm model.VMRecord) error {
|
|
_, err := fmt.Fprintf(
|
|
out,
|
|
"%s\t%s\t%s\t%s\t%s\t%s\n",
|
|
shortID(vm.ID),
|
|
vm.Name,
|
|
vm.State,
|
|
vm.Runtime.GuestIP,
|
|
model.FormatSizeBytes(vm.Spec.WorkDiskSizeBytes),
|
|
vm.Runtime.DNSName,
|
|
)
|
|
return err
|
|
}
|
|
|
|
func printVMIDList(out anyWriter, vms []model.VMRecord) error {
|
|
for _, vm := range vms {
|
|
if _, err := fmt.Fprintln(out, vm.ID); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func printVMListTable(out anyWriter, vms []model.VMRecord, imageNames map[string]string) error {
|
|
w := tabwriter.NewWriter(out, 0, 8, 2, ' ', 0)
|
|
if _, err := fmt.Fprintln(w, "ID\tNAME\tSTATE\tIMAGE\tIP\tVCPU\tMEM\tDISK\tCREATED"); err != nil {
|
|
return err
|
|
}
|
|
for _, vm := range vms {
|
|
if _, err := fmt.Fprintf(
|
|
w,
|
|
"%s\t%s\t%s\t%s\t%s\t%d\t%d MiB\t%s\t%s\n",
|
|
shortID(vm.ID),
|
|
vm.Name,
|
|
vm.State,
|
|
vmImageLabel(vm.ImageID, imageNames),
|
|
vm.Runtime.GuestIP,
|
|
vm.Spec.VCPUCount,
|
|
vm.Spec.MemoryMiB,
|
|
model.FormatSizeBytes(vm.Spec.WorkDiskSizeBytes),
|
|
relativeTime(vm.CreatedAt),
|
|
); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
return w.Flush()
|
|
}
|
|
|
|
func printImageSummary(out anyWriter, image model.Image) error {
|
|
_, err := fmt.Fprintf(out, "%s\t%s\t%t\t%s\n", shortID(image.ID), image.Name, image.Managed, image.RootfsPath)
|
|
return err
|
|
}
|
|
|
|
func imageNameIndex(images []model.Image) map[string]string {
|
|
index := make(map[string]string, len(images))
|
|
for _, image := range images {
|
|
index[image.ID] = image.Name
|
|
}
|
|
return index
|
|
}
|
|
|
|
func vmImageLabel(imageID string, imageNames map[string]string) string {
|
|
if name := strings.TrimSpace(imageNames[imageID]); name != "" {
|
|
return name
|
|
}
|
|
return shortID(imageID)
|
|
}
|
|
|
|
func printImageListTable(out anyWriter, images []model.Image) error {
|
|
w := tabwriter.NewWriter(out, 0, 8, 2, ' ', 0)
|
|
if _, err := fmt.Fprintln(w, "ID\tNAME\tMANAGED\tROOTFS SIZE\tCREATED"); err != nil {
|
|
return err
|
|
}
|
|
for _, image := range images {
|
|
if _, err := fmt.Fprintf(
|
|
w,
|
|
"%s\t%s\t%t\t%s\t%s\n",
|
|
shortID(image.ID),
|
|
image.Name,
|
|
image.Managed,
|
|
rootfsSizeLabel(image.RootfsPath),
|
|
relativeTime(image.CreatedAt),
|
|
); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
return w.Flush()
|
|
}
|
|
|
|
func rootfsSizeLabel(path string) string {
|
|
info, err := os.Stat(path)
|
|
if err != nil {
|
|
return "-"
|
|
}
|
|
if info.Size() <= 0 {
|
|
return "0"
|
|
}
|
|
return model.FormatSizeBytes(info.Size())
|
|
}
|
|
|
|
func printVMPortsTable(out anyWriter, result api.VMPortsResult) error {
|
|
type portRow struct {
|
|
Proto string
|
|
Endpoint string
|
|
Process string
|
|
Command string
|
|
Port int
|
|
}
|
|
rows := make([]portRow, 0, len(result.Ports))
|
|
for _, port := range result.Ports {
|
|
rows = append(rows, portRow{
|
|
Proto: port.Proto,
|
|
Endpoint: port.Endpoint,
|
|
Process: port.Process,
|
|
Command: port.Command,
|
|
Port: port.Port,
|
|
})
|
|
}
|
|
sort.Slice(rows, func(i, j int) bool {
|
|
if rows[i].Proto != rows[j].Proto {
|
|
return rows[i].Proto < rows[j].Proto
|
|
}
|
|
if rows[i].Port != rows[j].Port {
|
|
return rows[i].Port < rows[j].Port
|
|
}
|
|
if rows[i].Process != rows[j].Process {
|
|
return rows[i].Process < rows[j].Process
|
|
}
|
|
return rows[i].Command < rows[j].Command
|
|
})
|
|
if len(rows) == 0 {
|
|
return nil
|
|
}
|
|
|
|
w := tabwriter.NewWriter(out, 0, 8, 2, ' ', 0)
|
|
if _, err := fmt.Fprintln(w, "PROTO\tENDPOINT\tPROCESS\tCOMMAND"); err != nil {
|
|
return err
|
|
}
|
|
for _, row := range rows {
|
|
if _, err := fmt.Fprintf(
|
|
w,
|
|
"%s\t%s\t%s\t%s\n",
|
|
row.Proto,
|
|
emptyDash(row.Endpoint),
|
|
emptyDash(row.Process),
|
|
emptyDash(row.Command),
|
|
); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
return w.Flush()
|
|
}
|
|
|
|
func printDoctorReport(out anyWriter, report system.Report) error {
|
|
for _, check := range report.Checks {
|
|
status := strings.ToUpper(string(check.Status))
|
|
if _, err := fmt.Fprintf(out, "%s\t%s\n", status, check.Name); err != nil {
|
|
return err
|
|
}
|
|
for _, detail := range check.Details {
|
|
if _, err := fmt.Fprintf(out, " - %s\n", detail); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func emptyDash(value string) string {
|
|
value = strings.TrimSpace(value)
|
|
if value == "" {
|
|
return "-"
|
|
}
|
|
return value
|
|
}
|
|
|
|
type anyWriter interface {
|
|
Write(p []byte) (n int, err error)
|
|
}
|
|
|
|
func runVMCreate(ctx context.Context, socketPath string, stderr io.Writer, params api.VMCreateParams) (model.VMRecord, error) {
|
|
printVMSpecLine(stderr, params)
|
|
begin, err := vmCreateBeginFunc(ctx, socketPath, params)
|
|
if err != nil {
|
|
return model.VMRecord{}, err
|
|
}
|
|
renderer := newVMCreateProgressRenderer(stderr)
|
|
renderer.render(begin.Operation)
|
|
|
|
op := begin.Operation
|
|
for {
|
|
if op.Done {
|
|
renderer.render(op)
|
|
if op.Success && op.VM != nil {
|
|
return *op.VM, nil
|
|
}
|
|
if strings.TrimSpace(op.Error) == "" {
|
|
return model.VMRecord{}, errors.New("vm create failed")
|
|
}
|
|
return model.VMRecord{}, errors.New(op.Error)
|
|
}
|
|
|
|
select {
|
|
case <-ctx.Done():
|
|
cancelCtx, cancel := context.WithTimeout(context.Background(), time.Second)
|
|
defer cancel()
|
|
_ = vmCreateCancelFunc(cancelCtx, socketPath, op.ID)
|
|
return model.VMRecord{}, ctx.Err()
|
|
case <-time.After(200 * time.Millisecond):
|
|
}
|
|
|
|
status, err := vmCreateStatusFunc(ctx, socketPath, op.ID)
|
|
if err != nil {
|
|
if ctx.Err() != nil {
|
|
cancelCtx, cancel := context.WithTimeout(context.Background(), time.Second)
|
|
defer cancel()
|
|
_ = vmCreateCancelFunc(cancelCtx, socketPath, op.ID)
|
|
return model.VMRecord{}, ctx.Err()
|
|
}
|
|
return model.VMRecord{}, err
|
|
}
|
|
op = status.Operation
|
|
renderer.render(op)
|
|
}
|
|
}
|
|
|
|
type vmCreateProgressRenderer struct {
|
|
out io.Writer
|
|
enabled bool
|
|
lastLine string
|
|
}
|
|
|
|
func newVMCreateProgressRenderer(out io.Writer) *vmCreateProgressRenderer {
|
|
return &vmCreateProgressRenderer{
|
|
out: out,
|
|
enabled: writerSupportsProgress(out),
|
|
}
|
|
}
|
|
|
|
func (r *vmCreateProgressRenderer) render(op api.VMCreateOperation) {
|
|
if r == nil || !r.enabled {
|
|
return
|
|
}
|
|
line := formatVMCreateProgress(op)
|
|
if line == "" || line == r.lastLine {
|
|
return
|
|
}
|
|
r.lastLine = line
|
|
_, _ = fmt.Fprintln(r.out, line)
|
|
}
|
|
|
|
func writerSupportsProgress(out io.Writer) bool {
|
|
file, ok := out.(*os.File)
|
|
if !ok {
|
|
return false
|
|
}
|
|
info, err := file.Stat()
|
|
if err != nil {
|
|
return false
|
|
}
|
|
return info.Mode()&os.ModeCharDevice != 0
|
|
}
|
|
|
|
// withHeartbeat runs fn while emitting a dot to stderr every 2
|
|
// seconds so the user sees long-running RPCs (bundle downloads, etc.)
|
|
// aren't wedged. No-op when stderr isn't a terminal, so piped or
|
|
// logged output stays clean.
|
|
func withHeartbeat(stderr io.Writer, label string, fn func() error) error {
|
|
if !writerSupportsProgress(stderr) {
|
|
return fn()
|
|
}
|
|
fmt.Fprintf(stderr, "[%s] ", label)
|
|
stop := make(chan struct{})
|
|
done := make(chan struct{})
|
|
go func() {
|
|
defer close(done)
|
|
ticker := time.NewTicker(2 * time.Second)
|
|
defer ticker.Stop()
|
|
for {
|
|
select {
|
|
case <-stop:
|
|
return
|
|
case <-ticker.C:
|
|
fmt.Fprint(stderr, ".")
|
|
}
|
|
}
|
|
}()
|
|
err := fn()
|
|
close(stop)
|
|
<-done
|
|
fmt.Fprintln(stderr)
|
|
return err
|
|
}
|
|
|
|
func formatVMCreateProgress(op api.VMCreateOperation) string {
|
|
stage := strings.TrimSpace(op.Stage)
|
|
detail := strings.TrimSpace(op.Detail)
|
|
label := vmCreateStageLabel(stage)
|
|
if label == "" && detail == "" {
|
|
return ""
|
|
}
|
|
if label == "" {
|
|
return "[vm create] " + detail
|
|
}
|
|
if detail == "" {
|
|
return "[vm create] " + label
|
|
}
|
|
return "[vm create] " + label + ": " + detail
|
|
}
|
|
|
|
func vmCreateStageLabel(stage string) string {
|
|
switch strings.TrimSpace(stage) {
|
|
case "queued":
|
|
return "queued"
|
|
case "resolve_image":
|
|
return "resolving image"
|
|
case "reserve_vm":
|
|
return "allocating vm"
|
|
case "preflight":
|
|
return "checking host prerequisites"
|
|
case "prepare_rootfs":
|
|
return "preparing root filesystem"
|
|
case "prepare_host_features":
|
|
return "preparing host features"
|
|
case "prepare_work_disk":
|
|
return "preparing work disk"
|
|
case "boot_firecracker":
|
|
return "starting firecracker"
|
|
case "wait_vsock_agent":
|
|
return "waiting for vsock agent"
|
|
case "wait_guest_ready":
|
|
return "waiting for guest services"
|
|
case "apply_dns":
|
|
return "publishing dns"
|
|
case "apply_nat":
|
|
return "configuring nat"
|
|
case "finalize":
|
|
return "finalizing"
|
|
case "ready":
|
|
return "ready"
|
|
default:
|
|
return strings.ReplaceAll(stage, "_", " ")
|
|
}
|
|
}
|
|
|
|
func shortID(id string) string {
|
|
if len(id) <= 12 {
|
|
return id
|
|
}
|
|
return id[:12]
|
|
}
|
|
|
|
func relativeTime(t time.Time) string {
|
|
if t.IsZero() {
|
|
return "-"
|
|
}
|
|
delta := time.Since(t)
|
|
switch {
|
|
case delta < 30*time.Second:
|
|
return "moments ago"
|
|
case delta < time.Minute:
|
|
return fmt.Sprintf("%d seconds ago", int(delta.Seconds()))
|
|
case delta < 2*time.Minute:
|
|
return "1 minute ago"
|
|
case delta < time.Hour:
|
|
return fmt.Sprintf("%d minutes ago", int(delta.Minutes()))
|
|
case delta < 2*time.Hour:
|
|
return "1 hour ago"
|
|
case delta < 24*time.Hour:
|
|
return fmt.Sprintf("%d hours ago", int(delta.Hours()))
|
|
case delta < 48*time.Hour:
|
|
return "1 day ago"
|
|
case delta < 7*24*time.Hour:
|
|
return fmt.Sprintf("%d days ago", int(delta.Hours()/24))
|
|
case delta < 14*24*time.Hour:
|
|
return "1 week ago"
|
|
default:
|
|
return fmt.Sprintf("%d weeks ago", int(delta.Hours()/(24*7)))
|
|
}
|
|
}
|
|
|
|
func formatBuildInfoBlock(info buildinfo.Info) string {
|
|
return fmt.Sprintf("version: %s\ncommit: %s\nbuilt_at: %s\n", info.Version, info.Commit, info.BuiltAt)
|
|
}
|