cli: split banger.go god file into focused files
Pure code motion — banger.go 3508→240 LOC, same-package decomposition keeps all identifiers visible without export changes. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
3a5f4cd40d
commit
3f6ecb4376
12 changed files with 3478 additions and 3268 deletions
File diff suppressed because it is too large
Load diff
83
internal/cli/commands_daemon.go
Normal file
83
internal/cli/commands_daemon.go
Normal file
|
|
@ -0,0 +1,83 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"banger/internal/api"
|
||||
"banger/internal/buildinfo"
|
||||
"banger/internal/paths"
|
||||
"banger/internal/rpc"
|
||||
"banger/internal/system"
|
||||
"banger/internal/vmdns"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func newDaemonCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "daemon",
|
||||
Short: "Manage the banger daemon",
|
||||
RunE: helpNoArgs,
|
||||
}
|
||||
cmd.AddCommand(
|
||||
&cobra.Command{
|
||||
Use: "status",
|
||||
Short: "Show daemon status",
|
||||
Args: noArgsUsage("usage: banger daemon status"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, err := paths.Resolve()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ping, pingErr := daemonPingFunc(cmd.Context(), layout.SocketPath)
|
||||
if pingErr != nil {
|
||||
_, err = fmt.Fprintf(cmd.OutOrStdout(), "stopped\nsocket: %s\nlog: %s\ndns: %s\n", layout.SocketPath, layout.DaemonLog, vmdns.DefaultListenAddr)
|
||||
return err
|
||||
}
|
||||
info := buildinfo.Normalize(ping.Version, ping.Commit, ping.BuiltAt)
|
||||
_, err = fmt.Fprintf(cmd.OutOrStdout(), "running\npid: %d\n%ssocket: %s\nlog: %s\ndns: %s\n", ping.PID, formatBuildInfoBlock(info), layout.SocketPath, layout.DaemonLog, vmdns.DefaultListenAddr)
|
||||
return err
|
||||
},
|
||||
},
|
||||
&cobra.Command{
|
||||
Use: "stop",
|
||||
Short: "Stop the daemon",
|
||||
Args: noArgsUsage("usage: banger daemon stop"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
layout, err := paths.Resolve()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = rpc.Call[api.ShutdownResult](cmd.Context(), layout.SocketPath, "shutdown", api.Empty{})
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) || strings.Contains(err.Error(), "connect") {
|
||||
_, writeErr := fmt.Fprintln(cmd.OutOrStdout(), "daemon not running")
|
||||
return writeErr
|
||||
}
|
||||
return err
|
||||
}
|
||||
_, err = fmt.Fprintln(cmd.OutOrStdout(), "stopping")
|
||||
return err
|
||||
},
|
||||
},
|
||||
&cobra.Command{
|
||||
Use: "socket",
|
||||
Short: "Print the daemon socket path",
|
||||
Args: noArgsUsage("usage: banger daemon socket"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, err := paths.Resolve()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fmt.Fprintln(cmd.OutOrStdout(), layout.SocketPath)
|
||||
return err
|
||||
},
|
||||
},
|
||||
)
|
||||
return cmd
|
||||
}
|
||||
231
internal/cli/commands_image.go
Normal file
231
internal/cli/commands_image.go
Normal file
|
|
@ -0,0 +1,231 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"banger/internal/api"
|
||||
"banger/internal/model"
|
||||
"banger/internal/rpc"
|
||||
"banger/internal/system"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func newImageCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "image",
|
||||
Short: "Manage images",
|
||||
RunE: helpNoArgs,
|
||||
}
|
||||
cmd.AddCommand(
|
||||
newImageRegisterCommand(),
|
||||
newImagePullCommand(),
|
||||
newImagePromoteCommand(),
|
||||
newImageListCommand(),
|
||||
newImageShowCommand(),
|
||||
newImageDeleteCommand(),
|
||||
)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newImageRegisterCommand() *cobra.Command {
|
||||
var params api.ImageRegisterParams
|
||||
cmd := &cobra.Command{
|
||||
Use: "register",
|
||||
Short: "Register or update an unmanaged image",
|
||||
Args: noArgsUsage("usage: banger image register --name <name> --rootfs <path> [--work-seed <path>] (--kernel <path> [--initrd <path>] [--modules <dir>] | --kernel-ref <name>)"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if strings.TrimSpace(params.KernelRef) != "" && (params.KernelPath != "" || params.InitrdPath != "" || params.ModulesDir != "") {
|
||||
return errors.New("--kernel-ref is mutually exclusive with --kernel/--initrd/--modules")
|
||||
}
|
||||
if err := absolutizeImageRegisterPaths(¶ms); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result, err := rpc.Call[api.ImageShowResult](cmd.Context(), layout.SocketPath, "image.register", params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printImageSummary(cmd.OutOrStdout(), result.Image)
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(¶ms.Name, "name", "", "image name")
|
||||
cmd.Flags().StringVar(¶ms.RootfsPath, "rootfs", "", "rootfs path")
|
||||
cmd.Flags().StringVar(¶ms.WorkSeedPath, "work-seed", "", "work-seed path")
|
||||
cmd.Flags().StringVar(¶ms.KernelPath, "kernel", "", "kernel path")
|
||||
cmd.Flags().StringVar(¶ms.InitrdPath, "initrd", "", "initrd path")
|
||||
cmd.Flags().StringVar(¶ms.ModulesDir, "modules", "", "modules dir")
|
||||
cmd.Flags().StringVar(¶ms.KernelRef, "kernel-ref", "", "name of a cataloged kernel (see 'banger kernel list')")
|
||||
cmd.Flags().BoolVar(¶ms.Docker, "docker", false, "mark image as docker-prepared")
|
||||
_ = cmd.RegisterFlagCompletionFunc("kernel-ref", completeKernelNames)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newImagePullCommand() *cobra.Command {
|
||||
var (
|
||||
params api.ImagePullParams
|
||||
sizeRaw string
|
||||
)
|
||||
cmd := &cobra.Command{
|
||||
Use: "pull <name-or-oci-ref>",
|
||||
Short: "Pull an image bundle (catalog name) or OCI image and register it",
|
||||
Long: strings.TrimSpace(`
|
||||
Pull an image into banger. Two paths:
|
||||
|
||||
• Catalog name (e.g. 'debian-bookworm')
|
||||
Fetches a pre-built bundle from the embedded imagecat catalog.
|
||||
Kernel-ref comes from the catalog entry; --kernel-ref still
|
||||
overrides.
|
||||
|
||||
• OCI reference (e.g. 'docker.io/library/debian:bookworm')
|
||||
Pulls the image, flattens its layers, fixes ownership, injects
|
||||
banger's guest agents. --kernel-ref or direct --kernel/--initrd/
|
||||
--modules are required.
|
||||
|
||||
Use 'banger image catalog' to see available catalog names (once that
|
||||
subcommand lands).
|
||||
`),
|
||||
Example: strings.TrimSpace(`
|
||||
banger image pull debian-bookworm
|
||||
banger image pull debian-bookworm --name sandbox
|
||||
banger image pull docker.io/library/debian:bookworm --kernel-ref generic-6.12
|
||||
`),
|
||||
Args: exactArgsUsage(1, "usage: banger image pull <name-or-oci-ref> [--name <name>] [--kernel-ref <name>] [--kernel <path>] [--initrd <path>] [--modules <dir>] [--size <human>]"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
params.Ref = args[0]
|
||||
if strings.TrimSpace(params.KernelRef) != "" && (params.KernelPath != "" || params.InitrdPath != "" || params.ModulesDir != "") {
|
||||
return errors.New("--kernel-ref is mutually exclusive with --kernel/--initrd/--modules")
|
||||
}
|
||||
if strings.TrimSpace(sizeRaw) != "" {
|
||||
size, err := model.ParseSize(sizeRaw)
|
||||
if err != nil {
|
||||
return fmt.Errorf("--size: %w", err)
|
||||
}
|
||||
params.SizeBytes = size
|
||||
}
|
||||
if err := absolutizePaths(¶ms.KernelPath, ¶ms.InitrdPath, ¶ms.ModulesDir); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var result api.ImageShowResult
|
||||
err = withHeartbeat(cmd.ErrOrStderr(), "image pull", func() error {
|
||||
var callErr error
|
||||
result, callErr = rpc.Call[api.ImageShowResult](cmd.Context(), layout.SocketPath, "image.pull", params)
|
||||
return callErr
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printImageSummary(cmd.OutOrStdout(), result.Image)
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(¶ms.Name, "name", "", "image name (defaults to the ref's repo+tag, sanitised)")
|
||||
cmd.Flags().StringVar(¶ms.KernelPath, "kernel", "", "kernel path")
|
||||
cmd.Flags().StringVar(¶ms.InitrdPath, "initrd", "", "initrd path")
|
||||
cmd.Flags().StringVar(¶ms.ModulesDir, "modules", "", "modules dir")
|
||||
cmd.Flags().StringVar(¶ms.KernelRef, "kernel-ref", "", "name of a cataloged kernel (see 'banger kernel list')")
|
||||
cmd.Flags().StringVar(&sizeRaw, "size", "", "ext4 image size (e.g. 4GiB); defaults to content + 25%, min 1GiB")
|
||||
_ = cmd.RegisterFlagCompletionFunc("kernel-ref", completeKernelNames)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newImagePromoteCommand() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "promote <id-or-name>",
|
||||
Short: "Promote an unmanaged image to a managed artifact",
|
||||
Args: exactArgsUsage(1, "usage: banger image promote <id-or-name>"),
|
||||
ValidArgsFunction: completeImageNameOnlyAtPos0,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result, err := rpc.Call[api.ImageShowResult](cmd.Context(), layout.SocketPath, "image.promote", api.ImageRefParams{IDOrName: args[0]})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printImageSummary(cmd.OutOrStdout(), result.Image)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newImageListCommand() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "list",
|
||||
Aliases: []string{"ls"},
|
||||
Short: "List images",
|
||||
Args: noArgsUsage("usage: banger image list"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result, err := rpc.Call[api.ImageListResult](cmd.Context(), layout.SocketPath, "image.list", api.Empty{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printImageListTable(cmd.OutOrStdout(), result.Images)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newImageShowCommand() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "show <id-or-name>",
|
||||
Short: "Show image details",
|
||||
Args: exactArgsUsage(1, "usage: banger image show <id-or-name>"),
|
||||
ValidArgsFunction: completeImageNameOnlyAtPos0,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result, err := rpc.Call[api.ImageShowResult](cmd.Context(), layout.SocketPath, "image.show", api.ImageRefParams{IDOrName: args[0]})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printJSON(cmd.OutOrStdout(), result.Image)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newImageDeleteCommand() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "delete <id-or-name>",
|
||||
Aliases: []string{"rm"},
|
||||
Short: "Delete an image",
|
||||
Args: exactArgsUsage(1, "usage: banger image delete <id-or-name>"),
|
||||
ValidArgsFunction: completeImageNameOnlyAtPos0,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result, err := rpc.Call[api.ImageShowResult](cmd.Context(), layout.SocketPath, "image.delete", api.ImageRefParams{IDOrName: args[0]})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printImageSummary(cmd.OutOrStdout(), result.Image)
|
||||
},
|
||||
}
|
||||
}
|
||||
441
internal/cli/commands_internal.go
Normal file
441
internal/cli/commands_internal.go
Normal file
|
|
@ -0,0 +1,441 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"banger/internal/config"
|
||||
"banger/internal/hostnat"
|
||||
"banger/internal/imagecat"
|
||||
"banger/internal/imagepull"
|
||||
"banger/internal/model"
|
||||
"banger/internal/paths"
|
||||
"banger/internal/system"
|
||||
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func newInternalCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "internal",
|
||||
Hidden: true,
|
||||
RunE: helpNoArgs,
|
||||
}
|
||||
cmd.AddCommand(
|
||||
newInternalNATCommand(),
|
||||
newInternalWorkSeedCommand(),
|
||||
newInternalSSHKeyPathCommand(),
|
||||
newInternalFirecrackerPathCommand(),
|
||||
newInternalVSockAgentPathCommand(),
|
||||
newInternalMakeBundleCommand(),
|
||||
)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newInternalSSHKeyPathCommand() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "ssh-key-path",
|
||||
Hidden: true,
|
||||
Args: noArgsUsage("usage: banger internal ssh-key-path"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, err := paths.Resolve()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg, err := config.Load(layout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fmt.Fprintln(cmd.OutOrStdout(), cfg.SSHKeyPath)
|
||||
return err
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newInternalFirecrackerPathCommand() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "firecracker-path",
|
||||
Hidden: true,
|
||||
Args: noArgsUsage("usage: banger internal firecracker-path"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, err := paths.Resolve()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg, err := config.Load(layout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.TrimSpace(cfg.FirecrackerBin) == "" {
|
||||
return errors.New("firecracker binary not configured; install firecracker or set firecracker_bin")
|
||||
}
|
||||
_, err = fmt.Fprintln(cmd.OutOrStdout(), cfg.FirecrackerBin)
|
||||
return err
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newInternalVSockAgentPathCommand() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "vsock-agent-path",
|
||||
Hidden: true,
|
||||
Args: noArgsUsage("usage: banger internal vsock-agent-path"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
path, err := paths.CompanionBinaryPath("banger-vsock-agent")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fmt.Fprintln(cmd.OutOrStdout(), path)
|
||||
return err
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newInternalMakeBundleCommand() *cobra.Command {
|
||||
var (
|
||||
rootfsTarPath string
|
||||
name string
|
||||
distro string
|
||||
arch string
|
||||
kernelRef string
|
||||
description string
|
||||
sizeSpec string
|
||||
outPath string
|
||||
)
|
||||
cmd := &cobra.Command{
|
||||
Use: "make-bundle",
|
||||
Hidden: true,
|
||||
Short: "Build a banger image bundle (.tar.zst) from a flat rootfs tar",
|
||||
Args: noArgsUsage("usage: banger internal make-bundle --rootfs-tar <file|-> --name <n> --out <bundle.tar.zst>"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runInternalMakeBundle(cmd, internalMakeBundleOpts{
|
||||
rootfsTarPath: rootfsTarPath,
|
||||
name: name,
|
||||
distro: distro,
|
||||
arch: arch,
|
||||
kernelRef: kernelRef,
|
||||
description: description,
|
||||
sizeSpec: sizeSpec,
|
||||
outPath: outPath,
|
||||
})
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&rootfsTarPath, "rootfs-tar", "", "flat rootfs tar file, or '-' for stdin")
|
||||
cmd.Flags().StringVar(&name, "name", "", "bundle name (filesystem-safe identifier)")
|
||||
cmd.Flags().StringVar(&distro, "distro", "", "distro label (e.g. debian)")
|
||||
cmd.Flags().StringVar(&arch, "arch", "x86_64", "architecture label")
|
||||
cmd.Flags().StringVar(&kernelRef, "kernel-ref", "", "kernelcat entry name this image pairs with")
|
||||
cmd.Flags().StringVar(&description, "description", "", "short description")
|
||||
cmd.Flags().StringVar(&sizeSpec, "size", "", "rootfs ext4 size (e.g. 4G); defaults to tree size + 25%")
|
||||
cmd.Flags().StringVar(&outPath, "out", "", "output bundle path (.tar.zst)")
|
||||
return cmd
|
||||
}
|
||||
|
||||
type internalMakeBundleOpts struct {
|
||||
rootfsTarPath string
|
||||
name string
|
||||
distro string
|
||||
arch string
|
||||
kernelRef string
|
||||
description string
|
||||
sizeSpec string
|
||||
outPath string
|
||||
}
|
||||
|
||||
func runInternalMakeBundle(cmd *cobra.Command, opts internalMakeBundleOpts) error {
|
||||
if err := imagecat.ValidateName(opts.name); err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.TrimSpace(opts.rootfsTarPath) == "" {
|
||||
return errors.New("--rootfs-tar is required")
|
||||
}
|
||||
if strings.TrimSpace(opts.outPath) == "" {
|
||||
return errors.New("--out is required")
|
||||
}
|
||||
if strings.TrimSpace(opts.arch) == "" {
|
||||
opts.arch = "x86_64"
|
||||
}
|
||||
|
||||
var sizeBytes int64
|
||||
if s := strings.TrimSpace(opts.sizeSpec); s != "" {
|
||||
n, err := model.ParseSize(s)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parse --size: %w", err)
|
||||
}
|
||||
sizeBytes = n
|
||||
}
|
||||
|
||||
ctx := cmd.Context()
|
||||
stagingRoot, err := os.MkdirTemp("", "banger-mkbundle-")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(stagingRoot)
|
||||
rootfsTree := filepath.Join(stagingRoot, "rootfs")
|
||||
if err := os.MkdirAll(rootfsTree, 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var tarReader io.Reader
|
||||
if opts.rootfsTarPath == "-" {
|
||||
tarReader = cmd.InOrStdin()
|
||||
} else {
|
||||
f, err := os.Open(opts.rootfsTarPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open rootfs tar: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
tarReader = f
|
||||
}
|
||||
|
||||
fmt.Fprintln(cmd.ErrOrStderr(), "[make-bundle] extracting rootfs")
|
||||
meta, err := imagepull.FlattenTar(ctx, tarReader, rootfsTree)
|
||||
if err != nil {
|
||||
return fmt.Errorf("flatten rootfs: %w", err)
|
||||
}
|
||||
|
||||
// docker create drops /.dockerenv (and containerd drops
|
||||
// /run/.containerenv) into the container's writable layer, so
|
||||
// `docker export` includes them in the tar. systemd-detect-virt
|
||||
// reads those files and flags the boot as virtualization=docker,
|
||||
// which disables udev device-unit activation (including the work-
|
||||
// disk dev-vdb.device) and leaves systemd waiting forever. Strip
|
||||
// them before building the ext4.
|
||||
for _, marker := range []string{".dockerenv", "run/.containerenv"} {
|
||||
path := filepath.Join(rootfsTree, marker)
|
||||
if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("strip %s: %w", marker, err)
|
||||
}
|
||||
delete(meta.Entries, marker)
|
||||
}
|
||||
|
||||
if sizeBytes <= 0 {
|
||||
treeSize, err := dirSize(rootfsTree)
|
||||
if err != nil {
|
||||
return fmt.Errorf("size rootfs tree: %w", err)
|
||||
}
|
||||
// +50% headroom for ext4 overhead (inode tables, block-group
|
||||
// descriptors, journal, 5% reserved margin).
|
||||
sizeBytes = treeSize + treeSize/2
|
||||
if sizeBytes < imagepull.MinExt4Size {
|
||||
sizeBytes = imagepull.MinExt4Size
|
||||
}
|
||||
}
|
||||
|
||||
ext4Path := filepath.Join(stagingRoot, imagecat.RootfsFilename)
|
||||
runner := system.NewRunner()
|
||||
fmt.Fprintf(cmd.ErrOrStderr(), "[make-bundle] building rootfs.ext4 (%d bytes)\n", sizeBytes)
|
||||
if err := imagepull.BuildExt4(ctx, runner, rootfsTree, ext4Path, sizeBytes); err != nil {
|
||||
return fmt.Errorf("build ext4: %w", err)
|
||||
}
|
||||
fmt.Fprintln(cmd.ErrOrStderr(), "[make-bundle] applying ownership fixup")
|
||||
if err := imagepull.ApplyOwnership(ctx, runner, ext4Path, meta); err != nil {
|
||||
return fmt.Errorf("apply ownership: %w", err)
|
||||
}
|
||||
fmt.Fprintln(cmd.ErrOrStderr(), "[make-bundle] injecting guest agents")
|
||||
vsockBin, err := paths.CompanionBinaryPath("banger-vsock-agent")
|
||||
if err != nil {
|
||||
return fmt.Errorf("locate vsock agent: %w", err)
|
||||
}
|
||||
if err := imagepull.InjectGuestAgents(ctx, runner, ext4Path, imagepull.GuestAgentAssets{VsockAgentBin: vsockBin}); err != nil {
|
||||
return fmt.Errorf("inject guest agents: %w", err)
|
||||
}
|
||||
|
||||
manifest := imagecat.Manifest{
|
||||
Name: opts.name,
|
||||
Distro: strings.TrimSpace(opts.distro),
|
||||
Arch: opts.arch,
|
||||
KernelRef: strings.TrimSpace(opts.kernelRef),
|
||||
Description: strings.TrimSpace(opts.description),
|
||||
}
|
||||
manifestPath := filepath.Join(stagingRoot, imagecat.ManifestFilename)
|
||||
manifestData, err := json.MarshalIndent(manifest, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.WriteFile(manifestPath, append(manifestData, '\n'), 0o644); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprintln(cmd.ErrOrStderr(), "[make-bundle] packaging bundle")
|
||||
if err := writeBundleTarZst(opts.outPath, ext4Path, manifestPath); err != nil {
|
||||
return fmt.Errorf("write bundle: %w", err)
|
||||
}
|
||||
|
||||
sum, err := sha256HexFile(opts.outPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stat, err := os.Stat(opts.outPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(cmd.OutOrStdout(), "bundle: %s\nsha256: %s\nsize: %d\n", opts.outPath, sum, stat.Size())
|
||||
return nil
|
||||
}
|
||||
|
||||
func dirSize(root string) (int64, error) {
|
||||
var total int64
|
||||
err := filepath.WalkDir(root, func(_ string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !d.Type().IsRegular() {
|
||||
return nil
|
||||
}
|
||||
info, err := d.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
total += info.Size()
|
||||
return nil
|
||||
})
|
||||
return total, err
|
||||
}
|
||||
|
||||
func writeBundleTarZst(outPath, rootfsPath, manifestPath string) error {
|
||||
if err := os.MkdirAll(filepath.Dir(outPath), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
out, err := os.OpenFile(outPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer out.Close()
|
||||
zw, err := zstd.NewWriter(out, zstd.WithEncoderLevel(zstd.SpeedBestCompression))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tw := tar.NewWriter(zw)
|
||||
for _, src := range []struct{ path, name string }{
|
||||
{rootfsPath, imagecat.RootfsFilename},
|
||||
{manifestPath, imagecat.ManifestFilename},
|
||||
} {
|
||||
if err := writeBundleFile(tw, src.path, src.name); err != nil {
|
||||
_ = tw.Close()
|
||||
_ = zw.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
_ = zw.Close()
|
||||
return err
|
||||
}
|
||||
if err := zw.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return out.Close()
|
||||
}
|
||||
|
||||
func writeBundleFile(tw *tar.Writer, src, name string) error {
|
||||
f, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tw.WriteHeader(&tar.Header{
|
||||
Name: name,
|
||||
Size: fi.Size(),
|
||||
Mode: 0o644,
|
||||
Typeflag: tar.TypeReg,
|
||||
ModTime: fi.ModTime(),
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(tw, f)
|
||||
return err
|
||||
}
|
||||
|
||||
func sha256HexFile(path string) (string, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
h := sha256.New()
|
||||
if _, err := io.Copy(h, f); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return hex.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func newInternalWorkSeedCommand() *cobra.Command {
|
||||
var rootfsPath string
|
||||
var outPath string
|
||||
cmd := &cobra.Command{
|
||||
Use: "work-seed",
|
||||
Hidden: true,
|
||||
Args: noArgsUsage("usage: banger internal work-seed --rootfs <path> [--out <path>]"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
rootfsPath = strings.TrimSpace(rootfsPath)
|
||||
outPath = strings.TrimSpace(outPath)
|
||||
if rootfsPath == "" {
|
||||
return errors.New("rootfs path is required")
|
||||
}
|
||||
if outPath == "" {
|
||||
outPath = system.WorkSeedPath(rootfsPath)
|
||||
}
|
||||
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
return system.BuildWorkSeedImage(cmd.Context(), system.NewRunner(), rootfsPath, outPath)
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&rootfsPath, "rootfs", "", "rootfs image path")
|
||||
cmd.Flags().StringVar(&outPath, "out", "", "output work-seed image path")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newInternalNATCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "nat",
|
||||
Hidden: true,
|
||||
RunE: helpNoArgs,
|
||||
}
|
||||
cmd.AddCommand(
|
||||
newInternalNATActionCommand("up", true),
|
||||
newInternalNATActionCommand("down", false),
|
||||
)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newInternalNATActionCommand(use string, enable bool) *cobra.Command {
|
||||
var guestIP string
|
||||
var tapDevice string
|
||||
cmd := &cobra.Command{
|
||||
Use: use,
|
||||
Hidden: true,
|
||||
Args: noArgsUsage("usage: banger internal nat " + use + " --guest-ip <ip> --tap <tap-device>"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
guestIP = strings.TrimSpace(guestIP)
|
||||
tapDevice = strings.TrimSpace(tapDevice)
|
||||
if guestIP == "" {
|
||||
return errors.New("guest IP is required")
|
||||
}
|
||||
if tapDevice == "" {
|
||||
return errors.New("tap device is required")
|
||||
}
|
||||
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
return hostnat.Ensure(cmd.Context(), system.NewRunner(), guestIP, tapDevice, enable)
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&guestIP, "guest-ip", "", "guest IPv4 address")
|
||||
cmd.Flags().StringVar(&tapDevice, "tap", "", "tap device name")
|
||||
return cmd
|
||||
}
|
||||
161
internal/cli/commands_kernel.go
Normal file
161
internal/cli/commands_kernel.go
Normal file
|
|
@ -0,0 +1,161 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"banger/internal/api"
|
||||
"banger/internal/rpc"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func newKernelCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "kernel",
|
||||
Short: "Manage the local kernel catalog",
|
||||
RunE: helpNoArgs,
|
||||
}
|
||||
cmd.AddCommand(
|
||||
newKernelListCommand(),
|
||||
newKernelShowCommand(),
|
||||
newKernelRmCommand(),
|
||||
newKernelImportCommand(),
|
||||
newKernelPullCommand(),
|
||||
)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newKernelPullCommand() *cobra.Command {
|
||||
var force bool
|
||||
cmd := &cobra.Command{
|
||||
Use: "pull <name>",
|
||||
Short: "Download a cataloged kernel bundle",
|
||||
Args: exactArgsUsage(1, "usage: banger kernel pull <name> [--force]"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var result api.KernelShowResult
|
||||
err = withHeartbeat(cmd.ErrOrStderr(), "kernel pull", func() error {
|
||||
var callErr error
|
||||
result, callErr = rpc.Call[api.KernelShowResult](cmd.Context(), layout.SocketPath, "kernel.pull", api.KernelPullParams{Name: args[0], Force: force})
|
||||
return callErr
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printJSON(cmd.OutOrStdout(), result.Entry)
|
||||
},
|
||||
}
|
||||
cmd.Flags().BoolVar(&force, "force", false, "re-pull even if already present")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newKernelImportCommand() *cobra.Command {
|
||||
var params api.KernelImportParams
|
||||
cmd := &cobra.Command{
|
||||
Use: "import <name>",
|
||||
Short: "Import a kernel bundle produced by scripts/make-*-kernel.sh",
|
||||
Long: "Copy the kernel, optional initrd, and optional modules directory from <from> into the local kernel catalog keyed by <name>. <from> is usually build/manual/void-kernel or build/manual/alpine-kernel.",
|
||||
Args: exactArgsUsage(1, "usage: banger kernel import <name> --from <dir>"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
params.Name = args[0]
|
||||
if strings.TrimSpace(params.FromDir) == "" {
|
||||
return errors.New("--from <dir> is required")
|
||||
}
|
||||
abs, err := filepath.Abs(params.FromDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
params.FromDir = abs
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result, err := rpc.Call[api.KernelShowResult](cmd.Context(), layout.SocketPath, "kernel.import", params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printJSON(cmd.OutOrStdout(), result.Entry)
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(¶ms.FromDir, "from", "", "directory produced by make-*-kernel.sh (e.g. build/manual/void-kernel)")
|
||||
cmd.Flags().StringVar(¶ms.Distro, "distro", "", "distribution label stored in the manifest (e.g. void, alpine)")
|
||||
cmd.Flags().StringVar(¶ms.Arch, "arch", "", "architecture label stored in the manifest (e.g. x86_64)")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newKernelListCommand() *cobra.Command {
|
||||
var available bool
|
||||
cmd := &cobra.Command{
|
||||
Use: "list",
|
||||
Aliases: []string{"ls"},
|
||||
Short: "List kernels (local by default, or --available for the catalog)",
|
||||
Args: noArgsUsage("usage: banger kernel list [--available]"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if available {
|
||||
result, err := rpc.Call[api.KernelCatalogResult](cmd.Context(), layout.SocketPath, "kernel.catalog", api.Empty{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printKernelCatalogTable(cmd.OutOrStdout(), result.Entries)
|
||||
}
|
||||
result, err := rpc.Call[api.KernelListResult](cmd.Context(), layout.SocketPath, "kernel.list", api.Empty{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printKernelListTable(cmd.OutOrStdout(), result.Entries)
|
||||
},
|
||||
}
|
||||
cmd.Flags().BoolVar(&available, "available", false, "show the built-in catalog (with pulled/available status) instead of local entries")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newKernelShowCommand() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "show <name>",
|
||||
Short: "Show kernel catalog entry details",
|
||||
Args: exactArgsUsage(1, "usage: banger kernel show <name>"),
|
||||
ValidArgsFunction: completeKernelNameOnlyAtPos0,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result, err := rpc.Call[api.KernelShowResult](cmd.Context(), layout.SocketPath, "kernel.show", api.KernelRefParams{Name: args[0]})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printJSON(cmd.OutOrStdout(), result.Entry)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newKernelRmCommand() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "rm <name>",
|
||||
Aliases: []string{"remove", "delete"},
|
||||
Short: "Remove a kernel catalog entry",
|
||||
Args: exactArgsUsage(1, "usage: banger kernel rm <name>"),
|
||||
ValidArgsFunction: completeKernelNameOnlyAtPos0,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := rpc.Call[api.Empty](cmd.Context(), layout.SocketPath, "kernel.delete", api.KernelRefParams{Name: args[0]}); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fmt.Fprintf(cmd.OutOrStdout(), "removed %s\n", args[0])
|
||||
return err
|
||||
},
|
||||
}
|
||||
}
|
||||
924
internal/cli/commands_vm.go
Normal file
924
internal/cli/commands_vm.go
Normal file
|
|
@ -0,0 +1,924 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
|
||||
"banger/internal/api"
|
||||
"banger/internal/config"
|
||||
"banger/internal/daemon/workspace"
|
||||
"banger/internal/model"
|
||||
"banger/internal/paths"
|
||||
"banger/internal/rpc"
|
||||
"banger/internal/system"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func newVMCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "vm",
|
||||
Short: "Manage virtual machines",
|
||||
RunE: helpNoArgs,
|
||||
}
|
||||
cmd.AddCommand(
|
||||
newVMCreateCommand(),
|
||||
newVMRunCommand(),
|
||||
newVMListCommand(),
|
||||
newVMShowCommand(),
|
||||
newVMActionCommand("start", "Start a VM", "vm.start"),
|
||||
newVMActionCommand("stop", "Stop a VM", "vm.stop"),
|
||||
newVMKillCommand(),
|
||||
newVMActionCommand("restart", "Restart a VM", "vm.restart"),
|
||||
newVMActionCommand("delete", "Delete a VM", "vm.delete", "rm"),
|
||||
newVMPruneCommand(),
|
||||
newVMSetCommand(),
|
||||
newVMSSHCommand(),
|
||||
newVMWorkspaceCommand(),
|
||||
newVMSessionCommand(),
|
||||
newVMLogsCommand(),
|
||||
newVMStatsCommand(),
|
||||
newVMPortsCommand(),
|
||||
)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newVMRunCommand() *cobra.Command {
|
||||
defaults := effectiveVMDefaults()
|
||||
var (
|
||||
name string
|
||||
imageName string
|
||||
vcpu = defaults.VCPUCount
|
||||
memory = defaults.MemoryMiB
|
||||
systemOverlaySize = model.FormatSizeBytes(defaults.SystemOverlaySizeByte)
|
||||
workDiskSize = model.FormatSizeBytes(defaults.WorkDiskSizeBytes)
|
||||
natEnabled bool
|
||||
branchName string
|
||||
fromRef = "HEAD"
|
||||
removeOnExit bool
|
||||
)
|
||||
cmd := &cobra.Command{
|
||||
Use: "run [path] [-- command args...]",
|
||||
Short: "Create and enter a sandbox VM",
|
||||
Long: strings.TrimSpace(`
|
||||
Create a sandbox VM and either drop into an interactive shell or run a command.
|
||||
|
||||
Three modes:
|
||||
banger vm run bare sandbox, drops into ssh
|
||||
banger vm run ./repo workspace sandbox, drops into ssh at /root/repo
|
||||
banger vm run ./repo -- make test workspace, runs command, exits with its status
|
||||
`),
|
||||
Args: cobra.ArbitraryArgs,
|
||||
Example: strings.TrimSpace(`
|
||||
banger vm run
|
||||
banger vm run ../repo --name agent-box --branch feature/demo
|
||||
banger vm run ../repo -- make test
|
||||
banger vm run -- uname -a
|
||||
`),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if cmd.Flags().Changed("branch") && strings.TrimSpace(branchName) == "" {
|
||||
return errors.New("--branch requires a branch name")
|
||||
}
|
||||
if cmd.Flags().Changed("from") && strings.TrimSpace(branchName) == "" {
|
||||
return errors.New("--from requires --branch")
|
||||
}
|
||||
|
||||
pathArgs, commandArgs := splitVMRunArgs(cmd, args)
|
||||
if len(pathArgs) > 1 {
|
||||
return errors.New("usage: banger vm run [path] [-- command args...]")
|
||||
}
|
||||
sourcePath := ""
|
||||
if len(pathArgs) == 1 {
|
||||
sourcePath = pathArgs[0]
|
||||
}
|
||||
if sourcePath == "" && strings.TrimSpace(branchName) != "" {
|
||||
return errors.New("--branch requires a path argument")
|
||||
}
|
||||
|
||||
var repoPtr *vmRunRepo
|
||||
if sourcePath != "" {
|
||||
resolved, err := vmRunPreflightRepo(cmd.Context(), sourcePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
repoPtr = &vmRunRepo{sourcePath: resolved, branchName: branchName, fromRef: fromRef}
|
||||
}
|
||||
|
||||
layout, err := paths.Resolve()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg, err := config.Load(layout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if repoPtr != nil {
|
||||
if err := validateVMRunPrereqs(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := validateSSHPrereqs(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
params, err := vmCreateParamsFromFlags(cmd, name, imageName, vcpu, memory, systemOverlaySize, workDiskSize, natEnabled, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
layout, cfg, err = ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return runVMRun(cmd.Context(), layout.SocketPath, cfg, cmd.InOrStdin(), cmd.OutOrStdout(), cmd.ErrOrStderr(), params, repoPtr, commandArgs, removeOnExit)
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&name, "name", "", "vm name")
|
||||
cmd.Flags().StringVar(&imageName, "image", "", "image name or id (defaults to config's default_image_name; auto-pulled from imagecat if missing)")
|
||||
cmd.Flags().IntVar(&vcpu, "vcpu", defaults.VCPUCount, "vcpu count")
|
||||
cmd.Flags().IntVar(&memory, "memory", defaults.MemoryMiB, "memory in MiB")
|
||||
cmd.Flags().StringVar(&systemOverlaySize, "system-overlay-size", model.FormatSizeBytes(defaults.SystemOverlaySizeByte), "system overlay size")
|
||||
cmd.Flags().StringVar(&workDiskSize, "disk-size", model.FormatSizeBytes(defaults.WorkDiskSizeBytes), "work disk size")
|
||||
cmd.Flags().BoolVar(&natEnabled, "nat", false, "enable NAT")
|
||||
cmd.Flags().StringVar(&branchName, "branch", "", "create and switch to a new guest branch")
|
||||
cmd.Flags().StringVar(&fromRef, "from", "HEAD", "base ref for --branch")
|
||||
cmd.Flags().BoolVar(&removeOnExit, "rm", false, "delete the VM after the ssh session / command exits")
|
||||
_ = cmd.RegisterFlagCompletionFunc("image", completeImageNames)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newVMKillCommand() *cobra.Command {
|
||||
var signal string
|
||||
cmd := &cobra.Command{
|
||||
Use: "kill <id-or-name>...",
|
||||
Short: "Send a signal to a VM process",
|
||||
Args: minArgsUsage(1, "usage: banger vm kill [--signal SIGTERM|SIGKILL|...] <id-or-name>..."),
|
||||
ValidArgsFunction: completeVMNames,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(args) > 1 {
|
||||
return runVMBatchAction(cmd, layout.SocketPath, args, func(ctx context.Context, id string) (model.VMRecord, error) {
|
||||
result, err := rpc.Call[api.VMShowResult](
|
||||
ctx,
|
||||
layout.SocketPath,
|
||||
"vm.kill",
|
||||
api.VMKillParams{IDOrName: id, Signal: signal},
|
||||
)
|
||||
if err != nil {
|
||||
return model.VMRecord{}, err
|
||||
}
|
||||
return result.VM, nil
|
||||
})
|
||||
}
|
||||
result, err := rpc.Call[api.VMShowResult](
|
||||
cmd.Context(),
|
||||
layout.SocketPath,
|
||||
"vm.kill",
|
||||
api.VMKillParams{IDOrName: args[0], Signal: signal},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printVMSummary(cmd.OutOrStdout(), result.VM)
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&signal, "signal", "TERM", "signal name to send")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newVMPruneCommand() *cobra.Command {
|
||||
var force bool
|
||||
cmd := &cobra.Command{
|
||||
Use: "prune",
|
||||
Short: "Delete every VM that isn't running",
|
||||
Long: "Scan for VMs in state other than 'running' (stopped, created, error) and delete them after confirmation. Use -f to skip the prompt.",
|
||||
Args: noArgsUsage("usage: banger vm prune [-f|--force]"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return runVMPrune(cmd, layout.SocketPath, force)
|
||||
},
|
||||
}
|
||||
cmd.Flags().BoolVarP(&force, "force", "f", false, "skip the confirmation prompt")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runVMPrune(cmd *cobra.Command, socketPath string, force bool) error {
|
||||
ctx := cmd.Context()
|
||||
stdout := cmd.OutOrStdout()
|
||||
stderr := cmd.ErrOrStderr()
|
||||
|
||||
list, err := vmListFunc(ctx, socketPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var victims []model.VMRecord
|
||||
for _, vm := range list.VMs {
|
||||
if vm.State != model.VMStateRunning {
|
||||
victims = append(victims, vm)
|
||||
}
|
||||
}
|
||||
if len(victims) == 0 {
|
||||
_, err := fmt.Fprintln(stdout, "no non-running VMs to prune")
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprintf(stdout, "The following %d VM(s) will be deleted:\n", len(victims))
|
||||
w := tabwriter.NewWriter(stdout, 0, 0, 2, ' ', 0)
|
||||
fmt.Fprintln(w, " ID\tNAME\tSTATE")
|
||||
for _, vm := range victims {
|
||||
fmt.Fprintf(w, " %s\t%s\t%s\n", shortID(vm.ID), vm.Name, vm.State)
|
||||
}
|
||||
if err := w.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !force {
|
||||
ok, err := promptYesNo(cmd.InOrStdin(), stdout, "Delete these VMs? [y/N] ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
_, err := fmt.Fprintln(stdout, "aborted")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var failed int
|
||||
for _, vm := range victims {
|
||||
ref := vm.Name
|
||||
if ref == "" {
|
||||
ref = shortID(vm.ID)
|
||||
}
|
||||
if err := vmDeleteFunc(ctx, socketPath, vm.ID); err != nil {
|
||||
fmt.Fprintf(stderr, "delete %s: %v\n", ref, err)
|
||||
failed++
|
||||
continue
|
||||
}
|
||||
fmt.Fprintln(stdout, "deleted", ref)
|
||||
}
|
||||
if failed > 0 {
|
||||
return fmt.Errorf("%d VM(s) failed to delete", failed)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// promptYesNo reads a line from in and returns true iff the trimmed
|
||||
// lowercase answer is "y" or "yes". EOF is "no"; other read errors
|
||||
// surface to the caller.
|
||||
func promptYesNo(in io.Reader, out io.Writer, prompt string) (bool, error) {
|
||||
if _, err := fmt.Fprint(out, prompt); err != nil {
|
||||
return false, err
|
||||
}
|
||||
reader := bufio.NewReader(in)
|
||||
line, err := reader.ReadString('\n')
|
||||
if err != nil && err != io.EOF {
|
||||
return false, err
|
||||
}
|
||||
answer := strings.ToLower(strings.TrimSpace(line))
|
||||
return answer == "y" || answer == "yes", nil
|
||||
}
|
||||
|
||||
func newVMCreateCommand() *cobra.Command {
|
||||
defaults := effectiveVMDefaults()
|
||||
var (
|
||||
name string
|
||||
imageName string
|
||||
vcpu = defaults.VCPUCount
|
||||
memory = defaults.MemoryMiB
|
||||
systemOverlaySize = model.FormatSizeBytes(defaults.SystemOverlaySizeByte)
|
||||
workDiskSize = model.FormatSizeBytes(defaults.WorkDiskSizeBytes)
|
||||
natEnabled bool
|
||||
noStart bool
|
||||
)
|
||||
cmd := &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Create a VM",
|
||||
Args: noArgsUsage("usage: banger vm create"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
params, err := vmCreateParamsFromFlags(cmd, name, imageName, vcpu, memory, systemOverlaySize, workDiskSize, natEnabled, noStart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
vm, err := runVMCreate(cmd.Context(), layout.SocketPath, cmd.ErrOrStderr(), params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printVMSummary(cmd.OutOrStdout(), vm)
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&name, "name", "", "vm name")
|
||||
cmd.Flags().StringVar(&imageName, "image", "", "image name or id (defaults to config's default_image_name; auto-pulled from imagecat if missing)")
|
||||
cmd.Flags().IntVar(&vcpu, "vcpu", defaults.VCPUCount, "vcpu count")
|
||||
cmd.Flags().IntVar(&memory, "memory", defaults.MemoryMiB, "memory in MiB")
|
||||
cmd.Flags().StringVar(&systemOverlaySize, "system-overlay-size", model.FormatSizeBytes(defaults.SystemOverlaySizeByte), "system overlay size")
|
||||
cmd.Flags().StringVar(&workDiskSize, "disk-size", model.FormatSizeBytes(defaults.WorkDiskSizeBytes), "work disk size")
|
||||
cmd.Flags().BoolVar(&natEnabled, "nat", false, "enable NAT")
|
||||
cmd.Flags().BoolVar(&noStart, "no-start", false, "create without starting")
|
||||
_ = cmd.RegisterFlagCompletionFunc("image", completeImageNames)
|
||||
return cmd
|
||||
}
|
||||
|
||||
type vmListOptions struct {
|
||||
showAll bool
|
||||
latest bool
|
||||
quiet bool
|
||||
}
|
||||
|
||||
func newPSCommand() *cobra.Command {
|
||||
return newVMListLikeCommand("ps", nil, "usage: banger ps")
|
||||
}
|
||||
|
||||
func newVMListCommand() *cobra.Command {
|
||||
return newVMListLikeCommand("list", []string{"ls", "ps"}, "usage: banger vm list")
|
||||
}
|
||||
|
||||
func newVMListLikeCommand(use string, aliases []string, usage string) *cobra.Command {
|
||||
var opts vmListOptions
|
||||
cmd := &cobra.Command{
|
||||
Use: use,
|
||||
Aliases: aliases,
|
||||
Short: "List VMs",
|
||||
Args: noArgsUsage(usage),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runVMList(cmd, opts)
|
||||
},
|
||||
}
|
||||
cmd.Flags().BoolVarP(&opts.showAll, "all", "a", false, "show all VMs")
|
||||
cmd.Flags().BoolVarP(&opts.latest, "latest", "l", false, "show only the latest VM")
|
||||
cmd.Flags().BoolVarP(&opts.quiet, "quiet", "q", false, "only show VM IDs")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runVMList(cmd *cobra.Command, opts vmListOptions) error {
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result, err := rpc.Call[api.VMListResult](cmd.Context(), layout.SocketPath, "vm.list", api.Empty{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
vms := selectVMListVMs(result.VMs, opts.showAll, opts.latest)
|
||||
if opts.quiet {
|
||||
return printVMIDList(cmd.OutOrStdout(), vms)
|
||||
}
|
||||
images, err := rpc.Call[api.ImageListResult](cmd.Context(), layout.SocketPath, "image.list", api.Empty{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printVMListTable(cmd.OutOrStdout(), vms, imageNameIndex(images.Images))
|
||||
}
|
||||
|
||||
func selectVMListVMs(vms []model.VMRecord, showAll, latest bool) []model.VMRecord {
|
||||
filtered := make([]model.VMRecord, 0, len(vms))
|
||||
for _, vm := range vms {
|
||||
if !showAll && vm.State != model.VMStateRunning {
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, vm)
|
||||
}
|
||||
if !latest || len(filtered) <= 1 {
|
||||
return filtered
|
||||
}
|
||||
latestVM := filtered[0]
|
||||
for _, vm := range filtered[1:] {
|
||||
if vm.CreatedAt.After(latestVM.CreatedAt) {
|
||||
latestVM = vm
|
||||
continue
|
||||
}
|
||||
if vm.CreatedAt.Equal(latestVM.CreatedAt) && vm.UpdatedAt.After(latestVM.UpdatedAt) {
|
||||
latestVM = vm
|
||||
}
|
||||
}
|
||||
return []model.VMRecord{latestVM}
|
||||
}
|
||||
|
||||
func newVMShowCommand() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "show <id-or-name>",
|
||||
Short: "Show VM details",
|
||||
Args: exactArgsUsage(1, "usage: banger vm show <id-or-name>"),
|
||||
ValidArgsFunction: completeVMNameOnlyAtPos0,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result, err := rpc.Call[api.VMShowResult](cmd.Context(), layout.SocketPath, "vm.show", api.VMRefParams{IDOrName: args[0]})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printJSON(cmd.OutOrStdout(), result.VM)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newVMActionCommand(use, short, method string, aliases ...string) *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: use + " <id-or-name>...",
|
||||
Aliases: aliases,
|
||||
Short: short,
|
||||
Args: minArgsUsage(1, fmt.Sprintf("usage: banger vm %s <id-or-name>...", use)),
|
||||
ValidArgsFunction: completeVMNames,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(args) > 1 {
|
||||
return runVMBatchAction(cmd, layout.SocketPath, args, func(ctx context.Context, id string) (model.VMRecord, error) {
|
||||
result, err := rpc.Call[api.VMShowResult](ctx, layout.SocketPath, method, api.VMRefParams{IDOrName: id})
|
||||
if err != nil {
|
||||
return model.VMRecord{}, err
|
||||
}
|
||||
return result.VM, nil
|
||||
})
|
||||
}
|
||||
result, err := rpc.Call[api.VMShowResult](cmd.Context(), layout.SocketPath, method, api.VMRefParams{IDOrName: args[0]})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printVMSummary(cmd.OutOrStdout(), result.VM)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newVMSetCommand() *cobra.Command {
|
||||
var (
|
||||
vcpu int
|
||||
memory int
|
||||
diskSize string
|
||||
nat bool
|
||||
noNat bool
|
||||
)
|
||||
cmd := &cobra.Command{
|
||||
Use: "set <id-or-name>...",
|
||||
Short: "Update stopped VM settings",
|
||||
Args: minArgsUsage(1, "usage: banger vm set [--vcpu N] [--memory MiB] [--disk-size SIZE] [--nat|--no-nat] <id-or-name>..."),
|
||||
ValidArgsFunction: completeVMNames,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
params, err := vmSetParamsFromFlags(args[0], vcpu, memory, diskSize, nat, noNat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := system.EnsureSudo(cmd.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(args) > 1 {
|
||||
return runVMBatchAction(cmd, layout.SocketPath, args, func(ctx context.Context, id string) (model.VMRecord, error) {
|
||||
batchParams := params
|
||||
batchParams.IDOrName = id
|
||||
result, err := rpc.Call[api.VMShowResult](ctx, layout.SocketPath, "vm.set", batchParams)
|
||||
if err != nil {
|
||||
return model.VMRecord{}, err
|
||||
}
|
||||
return result.VM, nil
|
||||
})
|
||||
}
|
||||
result, err := rpc.Call[api.VMShowResult](cmd.Context(), layout.SocketPath, "vm.set", params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printVMSummary(cmd.OutOrStdout(), result.VM)
|
||||
},
|
||||
}
|
||||
cmd.Flags().IntVar(&vcpu, "vcpu", -1, "vcpu count")
|
||||
cmd.Flags().IntVar(&memory, "memory", -1, "memory in MiB")
|
||||
cmd.Flags().StringVar(&diskSize, "disk-size", "", "new work disk size")
|
||||
cmd.Flags().BoolVar(&nat, "nat", false, "enable NAT")
|
||||
cmd.Flags().BoolVar(&noNat, "no-nat", false, "disable NAT")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newVMSSHCommand() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "ssh <id-or-name> [ssh args...]",
|
||||
Short: "SSH into a running VM",
|
||||
Args: minArgsUsage(1, "usage: banger vm ssh <id-or-name> [ssh args...]"),
|
||||
ValidArgsFunction: completeVMNameOnlyAtPos0,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, cfg, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateSSHPrereqs(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
result, err := vmSSHFunc(cmd.Context(), layout.SocketPath, args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sshArgs, err := sshCommandArgs(cfg, result.GuestIP, args[1:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return runSSHSession(cmd.Context(), layout.SocketPath, result.Name, cmd.InOrStdin(), cmd.OutOrStdout(), cmd.ErrOrStderr(), sshArgs, false)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newVMWorkspaceCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "workspace",
|
||||
Short: "Manage repository workspaces inside a running VM",
|
||||
RunE: helpNoArgs,
|
||||
}
|
||||
cmd.AddCommand(
|
||||
newVMWorkspacePrepareCommand(),
|
||||
newVMWorkspaceExportCommand(),
|
||||
)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newVMWorkspacePrepareCommand() *cobra.Command {
|
||||
var guestPath string
|
||||
var branchName string
|
||||
var fromRef string
|
||||
var mode string
|
||||
var readOnly bool
|
||||
cmd := &cobra.Command{
|
||||
Use: "prepare <id-or-name> [path]",
|
||||
Short: "Copy a local repo into a running VM",
|
||||
Long: "Prepare a repository workspace from a local git checkout into a running VM. The default guest path is /root/repo and the default mode is shallow_overlay. Repositories with git submodules must use --mode full_copy.",
|
||||
Args: minArgsUsage(1, "usage: banger vm workspace prepare <id-or-name> [path]"),
|
||||
ValidArgsFunction: completeVMNameOnlyAtPos0,
|
||||
Example: strings.TrimSpace(`
|
||||
banger vm workspace prepare devbox
|
||||
banger vm workspace prepare devbox ../repo --guest-path /root/repo --readonly
|
||||
banger vm workspace prepare devbox ../repo --mode full_copy
|
||||
`),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sourcePath := ""
|
||||
if len(args) > 1 {
|
||||
sourcePath = args[1]
|
||||
}
|
||||
if strings.TrimSpace(sourcePath) == "" {
|
||||
wd, err := cwdFunc()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sourcePath = wd
|
||||
}
|
||||
resolvedPath, err := workspace.ResolveSourcePath(sourcePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
prepareFrom := ""
|
||||
if strings.TrimSpace(branchName) != "" {
|
||||
prepareFrom = fromRef
|
||||
}
|
||||
result, err := vmWorkspacePrepareFunc(cmd.Context(), layout.SocketPath, api.VMWorkspacePrepareParams{
|
||||
IDOrName: args[0],
|
||||
SourcePath: resolvedPath,
|
||||
GuestPath: guestPath,
|
||||
Branch: branchName,
|
||||
From: prepareFrom,
|
||||
Mode: mode,
|
||||
ReadOnly: readOnly,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printJSON(cmd.OutOrStdout(), result.Workspace)
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&guestPath, "guest-path", "/root/repo", "guest workspace path")
|
||||
cmd.Flags().StringVar(&branchName, "branch", "", "create and switch to a new guest branch")
|
||||
cmd.Flags().StringVar(&fromRef, "from", "HEAD", "base ref for --branch")
|
||||
cmd.Flags().StringVar(&mode, "mode", string(model.WorkspacePrepareModeShallowOverlay), "workspace mode: shallow_overlay, full_copy, metadata_only")
|
||||
cmd.Flags().BoolVar(&readOnly, "readonly", false, "make the prepared workspace read-only")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newVMWorkspaceExportCommand() *cobra.Command {
|
||||
var guestPath string
|
||||
var outputPath string
|
||||
var baseCommit string
|
||||
cmd := &cobra.Command{
|
||||
Use: "export <id-or-name>",
|
||||
Short: "Pull changes from a guest workspace back to the host as a patch",
|
||||
Long: "Emit a binary-safe unified diff of every change inside the guest workspace (committed since base + uncommitted + untracked, minus .gitignore). Non-mutating — the guest's index and working tree are untouched. Pass --base-commit with the head_commit from workspace prepare to capture changes even when the worker ran git commit inside the VM. Without --base-commit the diff is against the current guest HEAD, which misses committed changes.",
|
||||
Args: exactArgsUsage(1, "usage: banger vm workspace export <id-or-name>"),
|
||||
ValidArgsFunction: completeVMNameOnlyAtPos0,
|
||||
Example: strings.TrimSpace(`
|
||||
banger vm workspace export devbox | git apply
|
||||
banger vm workspace export devbox --base-commit abc1234 | git apply
|
||||
banger vm workspace export devbox --output worker.diff
|
||||
banger vm workspace export devbox --guest-path /root/project --output changes.diff
|
||||
`),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result, err := vmWorkspaceExportFunc(cmd.Context(), layout.SocketPath, api.WorkspaceExportParams{
|
||||
IDOrName: args[0],
|
||||
GuestPath: guestPath,
|
||||
BaseCommit: baseCommit,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !result.HasChanges {
|
||||
_, _ = fmt.Fprintln(cmd.ErrOrStderr(), "no changes")
|
||||
return nil
|
||||
}
|
||||
if outputPath != "" {
|
||||
if err := os.WriteFile(outputPath, result.Patch, 0o644); err != nil {
|
||||
return fmt.Errorf("write patch: %w", err)
|
||||
}
|
||||
_, err = fmt.Fprintf(cmd.ErrOrStderr(), "patch written to %s (%d bytes, %d files)\n",
|
||||
outputPath, len(result.Patch), len(result.ChangedFiles))
|
||||
return err
|
||||
}
|
||||
_, err = cmd.OutOrStdout().Write(result.Patch)
|
||||
return err
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&guestPath, "guest-path", "/root/repo", "guest workspace path")
|
||||
cmd.Flags().StringVar(&outputPath, "output", "", "write patch to this file instead of stdout")
|
||||
cmd.Flags().StringVar(&baseCommit, "base-commit", "", "diff from this commit (use head_commit from workspace prepare to capture worker git commits)")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newVMLogsCommand() *cobra.Command {
|
||||
var follow bool
|
||||
cmd := &cobra.Command{
|
||||
Use: "logs <id-or-name>",
|
||||
Short: "Show VM logs",
|
||||
Args: exactArgsUsage(1, "usage: banger vm logs [-f] <id-or-name>"),
|
||||
ValidArgsFunction: completeVMNameOnlyAtPos0,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result, err := rpc.Call[api.VMLogsResult](cmd.Context(), layout.SocketPath, "vm.logs", api.VMRefParams{IDOrName: args[0]})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if result.LogPath == "" {
|
||||
return errors.New("vm has no log path")
|
||||
}
|
||||
return system.CopyStream(cmd.OutOrStdout(), system.TailCommand(result.LogPath, follow))
|
||||
},
|
||||
}
|
||||
cmd.Flags().BoolVarP(&follow, "follow", "f", false, "follow logs")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newVMStatsCommand() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "stats <id-or-name>",
|
||||
Short: "Show VM stats",
|
||||
Args: exactArgsUsage(1, "usage: banger vm stats <id-or-name>"),
|
||||
ValidArgsFunction: completeVMNameOnlyAtPos0,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result, err := rpc.Call[api.VMStatsResult](cmd.Context(), layout.SocketPath, "vm.stats", api.VMRefParams{IDOrName: args[0]})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printJSON(cmd.OutOrStdout(), result)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newVMPortsCommand() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "ports <id-or-name>",
|
||||
Short: "Show host-reachable listening guest ports",
|
||||
Args: exactArgsUsage(1, "usage: banger vm ports <id-or-name>"),
|
||||
ValidArgsFunction: completeVMNameOnlyAtPos0,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result, err := vmPortsFunc(cmd.Context(), layout.SocketPath, args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printVMPortsTable(cmd.OutOrStdout(), result)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type resolvedVMTarget struct {
|
||||
Index int
|
||||
Ref string
|
||||
VM model.VMRecord
|
||||
}
|
||||
|
||||
type vmRefResolutionError struct {
|
||||
Index int
|
||||
Ref string
|
||||
Err error
|
||||
}
|
||||
|
||||
type vmBatchActionResult struct {
|
||||
Target resolvedVMTarget
|
||||
VM model.VMRecord
|
||||
Err error
|
||||
}
|
||||
|
||||
func runVMBatchAction(cmd *cobra.Command, socketPath string, refs []string, action func(context.Context, string) (model.VMRecord, error)) error {
|
||||
listResult, err := rpc.Call[api.VMListResult](cmd.Context(), socketPath, "vm.list", api.Empty{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
targets, resolutionErrs := resolveVMTargets(listResult.VMs, refs)
|
||||
results := executeVMActionBatch(cmd.Context(), targets, action)
|
||||
|
||||
failed := false
|
||||
for _, resolutionErr := range resolutionErrs {
|
||||
if _, err := fmt.Fprintf(cmd.ErrOrStderr(), "%s: %v\n", resolutionErr.Ref, resolutionErr.Err); err != nil {
|
||||
return err
|
||||
}
|
||||
failed = true
|
||||
}
|
||||
for _, result := range results {
|
||||
if result.Err != nil {
|
||||
if _, err := fmt.Fprintf(cmd.ErrOrStderr(), "%s: %v\n", result.Target.Ref, result.Err); err != nil {
|
||||
return err
|
||||
}
|
||||
failed = true
|
||||
continue
|
||||
}
|
||||
if err := printVMSummary(cmd.OutOrStdout(), result.VM); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if failed {
|
||||
return errors.New("one or more VM operations failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func resolveVMTargets(vms []model.VMRecord, refs []string) ([]resolvedVMTarget, []vmRefResolutionError) {
|
||||
targets := make([]resolvedVMTarget, 0, len(refs))
|
||||
resolutionErrs := make([]vmRefResolutionError, 0)
|
||||
seen := make(map[string]struct{}, len(refs))
|
||||
for index, ref := range refs {
|
||||
vm, err := resolveVMRef(vms, ref)
|
||||
if err != nil {
|
||||
resolutionErrs = append(resolutionErrs, vmRefResolutionError{Index: index, Ref: ref, Err: err})
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[vm.ID]; ok {
|
||||
continue
|
||||
}
|
||||
seen[vm.ID] = struct{}{}
|
||||
targets = append(targets, resolvedVMTarget{Index: index, Ref: ref, VM: vm})
|
||||
}
|
||||
return targets, resolutionErrs
|
||||
}
|
||||
|
||||
func resolveVMRef(vms []model.VMRecord, ref string) (model.VMRecord, error) {
|
||||
ref = strings.TrimSpace(ref)
|
||||
if ref == "" {
|
||||
return model.VMRecord{}, errors.New("vm id or name is required")
|
||||
}
|
||||
exactMatches := make([]model.VMRecord, 0, 1)
|
||||
for _, vm := range vms {
|
||||
if vm.ID == ref || vm.Name == ref {
|
||||
exactMatches = append(exactMatches, vm)
|
||||
}
|
||||
}
|
||||
switch len(exactMatches) {
|
||||
case 1:
|
||||
return exactMatches[0], nil
|
||||
case 0:
|
||||
default:
|
||||
return model.VMRecord{}, fmt.Errorf("multiple VMs match %q", ref)
|
||||
}
|
||||
|
||||
prefixMatches := make([]model.VMRecord, 0, 1)
|
||||
for _, vm := range vms {
|
||||
if strings.HasPrefix(vm.ID, ref) || strings.HasPrefix(vm.Name, ref) {
|
||||
prefixMatches = append(prefixMatches, vm)
|
||||
}
|
||||
}
|
||||
switch len(prefixMatches) {
|
||||
case 1:
|
||||
return prefixMatches[0], nil
|
||||
case 0:
|
||||
return model.VMRecord{}, fmt.Errorf("vm %q not found", ref)
|
||||
default:
|
||||
return model.VMRecord{}, fmt.Errorf("multiple VMs match %q", ref)
|
||||
}
|
||||
}
|
||||
|
||||
func executeVMActionBatch(ctx context.Context, targets []resolvedVMTarget, action func(context.Context, string) (model.VMRecord, error)) []vmBatchActionResult {
|
||||
results := make([]vmBatchActionResult, len(targets))
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(targets))
|
||||
for index, target := range targets {
|
||||
index := index
|
||||
target := target
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
vm, err := action(ctx, target.VM.ID)
|
||||
results[index] = vmBatchActionResult{
|
||||
Target: target,
|
||||
VM: vm,
|
||||
Err: err,
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
return results
|
||||
}
|
||||
|
||||
func vmSetParamsFromFlags(idOrName string, vcpu, memory int, diskSize string, nat, noNat bool) (api.VMSetParams, error) {
|
||||
if nat && noNat {
|
||||
return api.VMSetParams{}, errors.New("use only one of --nat or --no-nat")
|
||||
}
|
||||
params := api.VMSetParams{IDOrName: idOrName, WorkDiskSize: diskSize}
|
||||
if vcpu >= 0 {
|
||||
if err := validatePositiveSetting("vcpu", vcpu); err != nil {
|
||||
return api.VMSetParams{}, err
|
||||
}
|
||||
params.VCPUCount = &vcpu
|
||||
}
|
||||
if memory >= 0 {
|
||||
if err := validatePositiveSetting("memory", memory); err != nil {
|
||||
return api.VMSetParams{}, err
|
||||
}
|
||||
params.MemoryMiB = &memory
|
||||
}
|
||||
if nat || noNat {
|
||||
value := nat && !noNat
|
||||
params.NATEnabled = &value
|
||||
}
|
||||
if params.VCPUCount == nil && params.MemoryMiB == nil && params.WorkDiskSize == "" && params.NATEnabled == nil {
|
||||
return api.VMSetParams{}, errors.New("no VM settings changed")
|
||||
}
|
||||
return params, nil
|
||||
}
|
||||
|
||||
func vmCreateParamsFromFlags(cmd *cobra.Command, name, imageName string, vcpu, memory int, systemOverlaySize, workDiskSize string, natEnabled, noStart bool) (api.VMCreateParams, error) {
|
||||
// Flag defaults were resolved from config + host heuristics at
|
||||
// command-build time, so we always forward the flag values. The CLI
|
||||
// becomes the single source of truth for effective defaults and the
|
||||
// progress renderer shows the exact sizing.
|
||||
if err := validatePositiveSetting("vcpu", vcpu); err != nil {
|
||||
return api.VMCreateParams{}, err
|
||||
}
|
||||
if err := validatePositiveSetting("memory", memory); err != nil {
|
||||
return api.VMCreateParams{}, err
|
||||
}
|
||||
params := api.VMCreateParams{
|
||||
Name: name,
|
||||
ImageName: imageName,
|
||||
NATEnabled: natEnabled,
|
||||
NoStart: noStart,
|
||||
VCPUCount: &vcpu,
|
||||
MemoryMiB: &memory,
|
||||
SystemOverlaySize: systemOverlaySize,
|
||||
WorkDiskSize: workDiskSize,
|
||||
}
|
||||
return params, nil
|
||||
}
|
||||
370
internal/cli/commands_vm_session.go
Normal file
370
internal/cli/commands_vm_session.go
Normal file
|
|
@ -0,0 +1,370 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"banger/internal/api"
|
||||
"banger/internal/model"
|
||||
"banger/internal/sessionstream"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func newVMSessionCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "session",
|
||||
Short: "Manage long-lived guest commands inside a VM",
|
||||
Long: "Start, inspect, stop, and attach to daemon-managed guest commands. Pipe-mode sessions expose live stdio for interactive protocols. Attach is exclusive and currently uses a same-host local bridge.",
|
||||
RunE: helpNoArgs,
|
||||
}
|
||||
cmd.AddCommand(
|
||||
newVMSessionStartCommand(),
|
||||
newVMSessionListCommand(),
|
||||
newVMSessionShowCommand(),
|
||||
newVMSessionLogsCommand(),
|
||||
newVMSessionStopCommand(),
|
||||
newVMSessionKillCommand(),
|
||||
newVMSessionAttachCommand(),
|
||||
newVMSessionSendCommand(),
|
||||
)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newVMSessionStartCommand() *cobra.Command {
|
||||
var name string
|
||||
var cwd string
|
||||
var stdinMode string
|
||||
var envPairs []string
|
||||
var tagPairs []string
|
||||
var requiredCommands []string
|
||||
cmd := &cobra.Command{
|
||||
Use: "start <id-or-name> <command> [args...]",
|
||||
Short: "Start a managed guest command",
|
||||
Long: "Start a daemon-managed guest command. The daemon verifies that the guest working directory exists and that the requested command is present in guest PATH before launch. Use --stdin-mode pipe when you need live attach.",
|
||||
Args: minArgsUsage(2, "usage: banger vm session start <id-or-name> [flags] -- <command> [args...]"),
|
||||
ValidArgsFunction: completeVMNameOnlyAtPos0,
|
||||
Example: strings.TrimSpace(`
|
||||
banger vm session start devbox --name planner --cwd /root/repo --stdin-mode pipe --require-command git -- pi --mode rpc --no-session
|
||||
banger vm session start devbox --name shell --stdin-mode pipe -- bash -lc 'exec bash'
|
||||
`),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
env, err := parseKeyValuePairs(envPairs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tags, err := parseKeyValuePairs(tagPairs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result, err := guestSessionStartFunc(cmd.Context(), layout.SocketPath, api.GuestSessionStartParams{
|
||||
VMIDOrName: args[0],
|
||||
Name: name,
|
||||
Command: args[1],
|
||||
Args: append([]string(nil), args[2:]...),
|
||||
CWD: cwd,
|
||||
Env: env,
|
||||
StdinMode: stdinMode,
|
||||
Tags: tags,
|
||||
RequiredCommands: append([]string(nil), requiredCommands...),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := printGuestSessionSummary(cmd.OutOrStdout(), result.Session); err != nil {
|
||||
return err
|
||||
}
|
||||
if result.Session.Status == model.GuestSessionStatusFailed && strings.TrimSpace(result.Session.LaunchMessage) != "" {
|
||||
_, _ = fmt.Fprintf(cmd.ErrOrStderr(), "warning: session failed at %s: %s\n", result.Session.LaunchStage, result.Session.LaunchMessage)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&name, "name", "", "session name")
|
||||
cmd.Flags().StringVar(&cwd, "cwd", "", "guest working directory; must already exist")
|
||||
cmd.Flags().StringVar(&stdinMode, "stdin-mode", string(model.GuestSessionStdinClosed), "stdin mode: closed or pipe (pipe enables attach)")
|
||||
cmd.Flags().StringArrayVar(&envPairs, "env", nil, "environment entry in KEY=VALUE form")
|
||||
cmd.Flags().StringArrayVar(&tagPairs, "tag", nil, "session tag in KEY=VALUE form")
|
||||
cmd.Flags().StringArrayVar(&requiredCommands, "require-command", nil, "extra guest command that must exist in PATH before launch; repeatable")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newVMSessionListCommand() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "list <id-or-name>",
|
||||
Aliases: []string{"ls"},
|
||||
Short: "List managed guest commands for a VM",
|
||||
Args: exactArgsUsage(1, "usage: banger vm session list <id-or-name>"),
|
||||
ValidArgsFunction: completeVMNameOnlyAtPos0,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result, err := guestSessionListFunc(cmd.Context(), layout.SocketPath, args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printGuestSessionTable(cmd.OutOrStdout(), result.Sessions)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newVMSessionShowCommand() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "show <id-or-name> <session>",
|
||||
Short: "Show managed guest command details",
|
||||
Args: exactArgsUsage(2, "usage: banger vm session show <id-or-name> <session>"),
|
||||
ValidArgsFunction: completeSessionNames,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result, err := guestSessionGetFunc(cmd.Context(), layout.SocketPath, api.GuestSessionRefParams{VMIDOrName: args[0], SessionIDOrName: args[1]})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printJSON(cmd.OutOrStdout(), result.Session)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newVMSessionLogsCommand() *cobra.Command {
|
||||
var stream string
|
||||
var tailLines int
|
||||
cmd := &cobra.Command{
|
||||
Use: "logs <id-or-name> <session>",
|
||||
Short: "Show stdout or stderr for a guest session",
|
||||
Args: exactArgsUsage(2, "usage: banger vm session logs [--stream stdout|stderr] [-n LINES] <id-or-name> <session>"),
|
||||
ValidArgsFunction: completeSessionNames,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result, err := guestSessionLogsFunc(cmd.Context(), layout.SocketPath, api.GuestSessionLogsParams{VMIDOrName: args[0], SessionIDOrName: args[1], Stream: stream, TailLines: tailLines})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fmt.Fprint(cmd.OutOrStdout(), result.Content)
|
||||
return err
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&stream, "stream", "stdout", "log stream to read")
|
||||
cmd.Flags().IntVarP(&tailLines, "lines", "n", 200, "number of lines to tail")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newVMSessionStopCommand() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "stop <id-or-name> <session>",
|
||||
Short: "Send SIGTERM to a guest session",
|
||||
Args: exactArgsUsage(2, "usage: banger vm session stop <id-or-name> <session>"),
|
||||
ValidArgsFunction: completeSessionNames,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result, err := guestSessionStopFunc(cmd.Context(), layout.SocketPath, api.GuestSessionRefParams{VMIDOrName: args[0], SessionIDOrName: args[1]})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printGuestSessionSummary(cmd.OutOrStdout(), result.Session)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newVMSessionKillCommand() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "kill <id-or-name> <session>",
|
||||
Short: "Send SIGKILL to a guest session",
|
||||
Args: exactArgsUsage(2, "usage: banger vm session kill <id-or-name> <session>"),
|
||||
ValidArgsFunction: completeSessionNames,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result, err := guestSessionKillFunc(cmd.Context(), layout.SocketPath, api.GuestSessionRefParams{VMIDOrName: args[0], SessionIDOrName: args[1]})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printGuestSessionSummary(cmd.OutOrStdout(), result.Session)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newVMSessionAttachCommand() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "attach <id-or-name> <session>",
|
||||
Short: "Attach local stdio to an attachable guest session",
|
||||
Long: "Attach local stdio to a pipe-mode session through a daemon-created local Unix socket bridge. Only one active attach is allowed at a time, and the client must run on the same host as the daemon.",
|
||||
Args: exactArgsUsage(2, "usage: banger vm session attach <id-or-name> <session>"),
|
||||
ValidArgsFunction: completeSessionNames,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result, err := guestSessionAttachBeginFunc(cmd.Context(), layout.SocketPath, api.GuestSessionAttachBeginParams{VMIDOrName: args[0], SessionIDOrName: args[1]})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
socketPath := strings.TrimSpace(result.SocketPath)
|
||||
if socketPath == "" && result.TransportKind == "unix_socket" {
|
||||
socketPath = strings.TrimSpace(result.TransportTarget)
|
||||
}
|
||||
return runGuestSessionAttach(cmd.Context(), cmd.InOrStdin(), cmd.OutOrStdout(), cmd.ErrOrStderr(), socketPath)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newVMSessionSendCommand() *cobra.Command {
|
||||
var message string
|
||||
cmd := &cobra.Command{
|
||||
Use: "send <id-or-name> <session>",
|
||||
Short: "Write bytes to a running guest session's stdin pipe",
|
||||
Long: "Write a payload to the stdin pipe of a running pipe-mode guest session without holding the exclusive attach. Use --message for an inline JSONL string, or pipe bytes via stdin when --message is omitted. A trailing newline is appended to --message values that lack one.",
|
||||
Args: exactArgsUsage(2, "usage: banger vm session send <id-or-name> <session> [--message '<json>']"),
|
||||
ValidArgsFunction: completeSessionNames,
|
||||
Example: strings.TrimSpace(`
|
||||
banger vm session send devbox planner --message '{"type":"abort"}'
|
||||
banger vm session send devbox planner --message '{"type":"steer","message":"Focus on src/"}'
|
||||
echo '{"type":"prompt","prompt":"Summarize."}' | banger vm session send devbox planner
|
||||
`),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
layout, _, err := ensureDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var payload []byte
|
||||
if message != "" {
|
||||
payload = []byte(message)
|
||||
if len(payload) > 0 && payload[len(payload)-1] != '\n' {
|
||||
payload = append(payload, '\n')
|
||||
}
|
||||
} else {
|
||||
payload, err = io.ReadAll(cmd.InOrStdin())
|
||||
if err != nil {
|
||||
return fmt.Errorf("read stdin: %w", err)
|
||||
}
|
||||
}
|
||||
result, err := guestSessionSendFunc(cmd.Context(), layout.SocketPath, api.GuestSessionSendParams{
|
||||
VMIDOrName: args[0],
|
||||
SessionIDOrName: args[1],
|
||||
Payload: payload,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fmt.Fprintf(cmd.OutOrStdout(), "sent %d bytes to session %s\n", result.BytesWritten, result.Session.Name)
|
||||
return err
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&message, "message", "", "JSONL message to send; a trailing newline is appended if absent")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func parseKeyValuePairs(values []string) (map[string]string, error) {
|
||||
if len(values) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
result := make(map[string]string, len(values))
|
||||
for _, value := range values {
|
||||
key, raw, ok := strings.Cut(value, "=")
|
||||
if !ok || strings.TrimSpace(key) == "" {
|
||||
return nil, fmt.Errorf("invalid key=value entry %q", value)
|
||||
}
|
||||
result[strings.TrimSpace(key)] = raw
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func runGuestSessionAttach(ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, socketPath string) error {
|
||||
conn, err := (&net.Dialer{}).DialContext(ctx, "unix", socketPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
writeErrCh := make(chan error, 1)
|
||||
go func() {
|
||||
writeErrCh <- streamGuestSessionAttachInput(conn, stdin)
|
||||
}()
|
||||
for {
|
||||
channel, payload, err := sessionstream.ReadFrame(conn)
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if errors.Is(err, io.EOF) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
switch channel {
|
||||
case sessionstream.ChannelStdout:
|
||||
if _, err := stdout.Write(payload); err != nil {
|
||||
return err
|
||||
}
|
||||
case sessionstream.ChannelStderr:
|
||||
if _, err := stderr.Write(payload); err != nil {
|
||||
return err
|
||||
}
|
||||
case sessionstream.ChannelControl:
|
||||
message, err := sessionstream.ReadControl(payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch message.Type {
|
||||
case "exit":
|
||||
if message.ExitCode != nil && *message.ExitCode != 0 {
|
||||
return fmt.Errorf("guest session exited with code %d", *message.ExitCode)
|
||||
}
|
||||
return nil
|
||||
case "error":
|
||||
if strings.TrimSpace(message.Error) == "" {
|
||||
return errors.New("guest session attach failed")
|
||||
}
|
||||
return errors.New(message.Error)
|
||||
}
|
||||
}
|
||||
select {
|
||||
case err := <-writeErrCh:
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func streamGuestSessionAttachInput(conn net.Conn, stdin io.Reader) error {
|
||||
if stdin == nil {
|
||||
return sessionstream.WriteControl(conn, sessionstream.ControlMessage{Type: "eof"})
|
||||
}
|
||||
buffer := make([]byte, 32*1024)
|
||||
for {
|
||||
n, err := stdin.Read(buffer)
|
||||
if n > 0 {
|
||||
if writeErr := sessionstream.WriteFrame(conn, sessionstream.ChannelStdin, buffer[:n]); writeErr != nil {
|
||||
return writeErr
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
return sessionstream.WriteControl(conn, sessionstream.ControlMessage{Type: "eof"})
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
138
internal/cli/daemon_lifecycle.go
Normal file
138
internal/cli/daemon_lifecycle.go
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"banger/internal/api"
|
||||
"banger/internal/config"
|
||||
"banger/internal/model"
|
||||
"banger/internal/paths"
|
||||
"banger/internal/rpc"
|
||||
)
|
||||
|
||||
// ensureDaemon pings the socket; on miss it auto-starts bangerd, on
|
||||
// version mismatch it restarts. Every CLI command that needs to talk
|
||||
// to the daemon routes through here.
|
||||
func ensureDaemon(ctx context.Context) (paths.Layout, model.DaemonConfig, error) {
|
||||
layout, err := paths.Resolve()
|
||||
if err != nil {
|
||||
return paths.Layout{}, model.DaemonConfig{}, err
|
||||
}
|
||||
cfg, err := config.Load(layout)
|
||||
if err != nil {
|
||||
return paths.Layout{}, model.DaemonConfig{}, err
|
||||
}
|
||||
if ping, err := daemonPingFunc(ctx, layout.SocketPath); err == nil {
|
||||
if daemonOutdated(ping.PID) {
|
||||
if err := restartDaemon(ctx, layout, ping.PID); err != nil {
|
||||
return paths.Layout{}, model.DaemonConfig{}, err
|
||||
}
|
||||
return layout, cfg, nil
|
||||
}
|
||||
return layout, cfg, nil
|
||||
}
|
||||
if err := startDaemon(ctx, layout); err != nil {
|
||||
return paths.Layout{}, model.DaemonConfig{}, err
|
||||
}
|
||||
return layout, cfg, nil
|
||||
}
|
||||
|
||||
// daemonOutdated reports whether the running daemon binary differs
|
||||
// from the one on disk — useful after `make install` when the user's
|
||||
// session still holds a handle to an old daemon. os.SameFile compares
|
||||
// inode + dev, so a fresh binary at the same path registers as
|
||||
// different.
|
||||
func daemonOutdated(pid int) bool {
|
||||
if pid <= 0 {
|
||||
return false
|
||||
}
|
||||
daemonBin, err := bangerdPathFunc()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
currentInfo, err := os.Stat(daemonBin)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
runningInfo, err := os.Stat(daemonExePath(pid))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return !os.SameFile(currentInfo, runningInfo)
|
||||
}
|
||||
|
||||
func restartDaemon(ctx context.Context, layout paths.Layout, pid int) error {
|
||||
stopCtx, cancel := context.WithTimeout(ctx, 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, _ = rpc.Call[api.ShutdownResult](stopCtx, layout.SocketPath, "shutdown", api.Empty{})
|
||||
if waitForPIDExit(pid, 2*time.Second) {
|
||||
return startDaemon(ctx, layout)
|
||||
}
|
||||
if proc, err := os.FindProcess(pid); err == nil {
|
||||
_ = proc.Signal(syscall.SIGTERM)
|
||||
}
|
||||
if !waitForPIDExit(pid, 2*time.Second) {
|
||||
return fmt.Errorf("timed out restarting stale daemon pid %d", pid)
|
||||
}
|
||||
return startDaemon(ctx, layout)
|
||||
}
|
||||
|
||||
func waitForPIDExit(pid int, timeout time.Duration) bool {
|
||||
deadline := time.Now().Add(timeout)
|
||||
for time.Now().Before(deadline) {
|
||||
if !pidRunning(pid) {
|
||||
return true
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
return !pidRunning(pid)
|
||||
}
|
||||
|
||||
func pidRunning(pid int) bool {
|
||||
if pid <= 0 {
|
||||
return false
|
||||
}
|
||||
proc, err := os.FindProcess(pid)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return proc.Signal(syscall.Signal(0)) == nil
|
||||
}
|
||||
|
||||
func startDaemon(ctx context.Context, layout paths.Layout) error {
|
||||
if err := paths.Ensure(layout); err != nil {
|
||||
return err
|
||||
}
|
||||
logFile, err := os.OpenFile(layout.DaemonLog, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer logFile.Close()
|
||||
|
||||
daemonBin, err := paths.BangerdPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cmd := buildDaemonCommand(daemonBin)
|
||||
cmd.Stdout = logFile
|
||||
cmd.Stderr = logFile
|
||||
cmd.Stdin = nil
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
||||
if err := cmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := rpc.WaitForSocket(layout.SocketPath, 5*time.Second); err != nil {
|
||||
return fmt.Errorf("daemon failed to start; inspect %s: %w", layout.DaemonLog, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildDaemonCommand(daemonBin string) *exec.Cmd {
|
||||
return exec.Command(daemonBin)
|
||||
}
|
||||
318
internal/cli/printers.go
Normal file
318
internal/cli/printers.go
Normal file
|
|
@ -0,0 +1,318 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"banger/internal/api"
|
||||
"banger/internal/model"
|
||||
"banger/internal/system"
|
||||
)
|
||||
|
||||
// anyWriter is the minimal writer surface every printer needs. Split
|
||||
// out from io.Writer because some of our callers already hold a
|
||||
// tabwriter/bytes.Buffer by value.
|
||||
type anyWriter interface {
|
||||
Write(p []byte) (n int, err error)
|
||||
}
|
||||
|
||||
// -- small helpers --------------------------------------------------
|
||||
|
||||
func humanSize(bytes int64) string {
|
||||
if bytes <= 0 {
|
||||
return "-"
|
||||
}
|
||||
const (
|
||||
kib = 1024
|
||||
mib = 1024 * kib
|
||||
gib = 1024 * mib
|
||||
)
|
||||
switch {
|
||||
case bytes >= gib:
|
||||
return fmt.Sprintf("%.1fGiB", float64(bytes)/float64(gib))
|
||||
case bytes >= mib:
|
||||
return fmt.Sprintf("%.1fMiB", float64(bytes)/float64(mib))
|
||||
case bytes >= kib:
|
||||
return fmt.Sprintf("%.1fKiB", float64(bytes)/float64(kib))
|
||||
default:
|
||||
return fmt.Sprintf("%dB", bytes)
|
||||
}
|
||||
}
|
||||
|
||||
func dashIfEmpty(s string) string {
|
||||
if strings.TrimSpace(s) == "" {
|
||||
return "-"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func emptyDash(value string) string {
|
||||
value = strings.TrimSpace(value)
|
||||
if value == "" {
|
||||
return "-"
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
// -- generic printers -----------------------------------------------
|
||||
|
||||
func printJSON(out anyWriter, v any) error {
|
||||
data, err := json.MarshalIndent(v, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fmt.Fprintln(out, string(data))
|
||||
return err
|
||||
}
|
||||
|
||||
// -- VM printers ----------------------------------------------------
|
||||
|
||||
func printVMSummary(out anyWriter, vm model.VMRecord) error {
|
||||
_, err := fmt.Fprintf(
|
||||
out,
|
||||
"%s\t%s\t%s\t%s\t%s\t%s\n",
|
||||
shortID(vm.ID),
|
||||
vm.Name,
|
||||
vm.State,
|
||||
vm.Runtime.GuestIP,
|
||||
model.FormatSizeBytes(vm.Spec.WorkDiskSizeBytes),
|
||||
vm.Runtime.DNSName,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
func printVMIDList(out anyWriter, vms []model.VMRecord) error {
|
||||
for _, vm := range vms {
|
||||
if _, err := fmt.Fprintln(out, vm.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func printVMListTable(out anyWriter, vms []model.VMRecord, imageNames map[string]string) error {
|
||||
w := tabwriter.NewWriter(out, 0, 8, 2, ' ', 0)
|
||||
if _, err := fmt.Fprintln(w, "ID\tNAME\tSTATE\tIMAGE\tIP\tVCPU\tMEM\tDISK\tCREATED"); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, vm := range vms {
|
||||
if _, err := fmt.Fprintf(
|
||||
w,
|
||||
"%s\t%s\t%s\t%s\t%s\t%d\t%d MiB\t%s\t%s\n",
|
||||
shortID(vm.ID),
|
||||
vm.Name,
|
||||
vm.State,
|
||||
vmImageLabel(vm.ImageID, imageNames),
|
||||
vm.Runtime.GuestIP,
|
||||
vm.Spec.VCPUCount,
|
||||
vm.Spec.MemoryMiB,
|
||||
model.FormatSizeBytes(vm.Spec.WorkDiskSizeBytes),
|
||||
relativeTime(vm.CreatedAt),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return w.Flush()
|
||||
}
|
||||
|
||||
func printVMPortsTable(out anyWriter, result api.VMPortsResult) error {
|
||||
type portRow struct {
|
||||
Proto string
|
||||
Endpoint string
|
||||
Process string
|
||||
Command string
|
||||
Port int
|
||||
}
|
||||
rows := make([]portRow, 0, len(result.Ports))
|
||||
for _, port := range result.Ports {
|
||||
rows = append(rows, portRow{
|
||||
Proto: port.Proto,
|
||||
Endpoint: port.Endpoint,
|
||||
Process: port.Process,
|
||||
Command: port.Command,
|
||||
Port: port.Port,
|
||||
})
|
||||
}
|
||||
sort.Slice(rows, func(i, j int) bool {
|
||||
if rows[i].Proto != rows[j].Proto {
|
||||
return rows[i].Proto < rows[j].Proto
|
||||
}
|
||||
if rows[i].Port != rows[j].Port {
|
||||
return rows[i].Port < rows[j].Port
|
||||
}
|
||||
if rows[i].Process != rows[j].Process {
|
||||
return rows[i].Process < rows[j].Process
|
||||
}
|
||||
return rows[i].Command < rows[j].Command
|
||||
})
|
||||
if len(rows) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
w := tabwriter.NewWriter(out, 0, 8, 2, ' ', 0)
|
||||
if _, err := fmt.Fprintln(w, "PROTO\tENDPOINT\tPROCESS\tCOMMAND"); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, row := range rows {
|
||||
if _, err := fmt.Fprintf(
|
||||
w,
|
||||
"%s\t%s\t%s\t%s\n",
|
||||
row.Proto,
|
||||
emptyDash(row.Endpoint),
|
||||
emptyDash(row.Process),
|
||||
emptyDash(row.Command),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return w.Flush()
|
||||
}
|
||||
|
||||
// -- image printers -------------------------------------------------
|
||||
|
||||
func printImageSummary(out anyWriter, image model.Image) error {
|
||||
_, err := fmt.Fprintf(out, "%s\t%s\t%t\t%s\n", shortID(image.ID), image.Name, image.Managed, image.RootfsPath)
|
||||
return err
|
||||
}
|
||||
|
||||
func imageNameIndex(images []model.Image) map[string]string {
|
||||
index := make(map[string]string, len(images))
|
||||
for _, image := range images {
|
||||
index[image.ID] = image.Name
|
||||
}
|
||||
return index
|
||||
}
|
||||
|
||||
func vmImageLabel(imageID string, imageNames map[string]string) string {
|
||||
if name := strings.TrimSpace(imageNames[imageID]); name != "" {
|
||||
return name
|
||||
}
|
||||
return shortID(imageID)
|
||||
}
|
||||
|
||||
func printImageListTable(out anyWriter, images []model.Image) error {
|
||||
w := tabwriter.NewWriter(out, 0, 8, 2, ' ', 0)
|
||||
if _, err := fmt.Fprintln(w, "ID\tNAME\tMANAGED\tROOTFS SIZE\tCREATED"); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, image := range images {
|
||||
if _, err := fmt.Fprintf(
|
||||
w,
|
||||
"%s\t%s\t%t\t%s\t%s\n",
|
||||
shortID(image.ID),
|
||||
image.Name,
|
||||
image.Managed,
|
||||
rootfsSizeLabel(image.RootfsPath),
|
||||
relativeTime(image.CreatedAt),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return w.Flush()
|
||||
}
|
||||
|
||||
func rootfsSizeLabel(path string) string {
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return "-"
|
||||
}
|
||||
if info.Size() <= 0 {
|
||||
return "0"
|
||||
}
|
||||
return model.FormatSizeBytes(info.Size())
|
||||
}
|
||||
|
||||
// -- kernel printers ------------------------------------------------
|
||||
|
||||
func printKernelListTable(out anyWriter, entries []api.KernelEntry) error {
|
||||
w := tabwriter.NewWriter(out, 0, 8, 2, ' ', 0)
|
||||
if _, err := fmt.Fprintln(w, "NAME\tDISTRO\tARCH\tKERNEL\tIMPORTED"); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if _, err := fmt.Fprintf(
|
||||
w,
|
||||
"%s\t%s\t%s\t%s\t%s\n",
|
||||
entry.Name,
|
||||
dashIfEmpty(entry.Distro),
|
||||
dashIfEmpty(entry.Arch),
|
||||
dashIfEmpty(entry.KernelVersion),
|
||||
dashIfEmpty(entry.ImportedAt),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return w.Flush()
|
||||
}
|
||||
|
||||
func printKernelCatalogTable(out anyWriter, entries []api.KernelCatalogEntry) error {
|
||||
w := tabwriter.NewWriter(out, 0, 8, 2, ' ', 0)
|
||||
if _, err := fmt.Fprintln(w, "NAME\tDISTRO\tARCH\tKERNEL\tSIZE\tSTATE"); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
state := "available"
|
||||
if entry.Pulled {
|
||||
state = "pulled"
|
||||
}
|
||||
if _, err := fmt.Fprintf(
|
||||
w,
|
||||
"%s\t%s\t%s\t%s\t%s\t%s\n",
|
||||
entry.Name,
|
||||
dashIfEmpty(entry.Distro),
|
||||
dashIfEmpty(entry.Arch),
|
||||
dashIfEmpty(entry.KernelVersion),
|
||||
humanSize(entry.SizeBytes),
|
||||
state,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return w.Flush()
|
||||
}
|
||||
|
||||
// -- guest session printers -----------------------------------------
|
||||
|
||||
func printGuestSessionSummary(out anyWriter, session model.GuestSession) error {
|
||||
_, err := fmt.Fprintf(out, "%s\t%s\t%s\t%s\t%s\n", session.ID, session.Name, session.Status, session.Command, session.CWD)
|
||||
return err
|
||||
}
|
||||
|
||||
func printGuestSessionTable(out io.Writer, sessions []model.GuestSession) error {
|
||||
tw := tabwriter.NewWriter(out, 0, 0, 2, ' ', 0)
|
||||
if _, err := fmt.Fprintln(tw, "ID\tNAME\tSTATUS\tATTACH\tCOMMAND\tCWD"); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, session := range sessions {
|
||||
attach := "no"
|
||||
if session.Attachable {
|
||||
attach = "yes"
|
||||
}
|
||||
if _, err := fmt.Fprintf(tw, "%s\t%s\t%s\t%s\t%s\t%s\n", shortID(session.ID), session.Name, session.Status, attach, session.Command, session.CWD); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return tw.Flush()
|
||||
}
|
||||
|
||||
// -- doctor printer -------------------------------------------------
|
||||
|
||||
func printDoctorReport(out anyWriter, report system.Report) error {
|
||||
for _, check := range report.Checks {
|
||||
status := strings.ToUpper(string(check.Status))
|
||||
if _, err := fmt.Fprintf(out, "%s\t%s\n", status, check.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, detail := range check.Details {
|
||||
if _, err := fmt.Fprintf(out, " - %s\n", detail); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
125
internal/cli/ssh.go
Normal file
125
internal/cli/ssh.go
Normal file
|
|
@ -0,0 +1,125 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"banger/internal/model"
|
||||
"banger/internal/paths"
|
||||
"banger/internal/system"
|
||||
"banger/internal/vsockagent"
|
||||
)
|
||||
|
||||
// runSSHSession executes ssh with the given args. On exit it decides
|
||||
// whether to print the "vm is still running" reminder: we skip it if
|
||||
// the caller asked (e.g. --rm is about to delete the VM), if the
|
||||
// ctx is already done, or if the ssh error isn't the one that
|
||||
// typically means "user disconnected cleanly".
|
||||
func runSSHSession(ctx context.Context, socketPath, vmRef string, stdin io.Reader, stdout, stderr io.Writer, sshArgs []string, skipReminder bool) error {
|
||||
sshErr := sshExecFunc(ctx, stdin, stdout, stderr, sshArgs)
|
||||
if skipReminder || !shouldCheckSSHReminder(sshErr) || ctx.Err() != nil {
|
||||
return sshErr
|
||||
}
|
||||
pingCtx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
health, err := vmHealthFunc(pingCtx, socketPath, vmRef)
|
||||
if err != nil {
|
||||
_, _ = fmt.Fprintln(stderr, vsockagent.WarningMessage(vmRef, err))
|
||||
return sshErr
|
||||
}
|
||||
if health.Healthy {
|
||||
name := health.Name
|
||||
if strings.TrimSpace(name) == "" {
|
||||
name = vmRef
|
||||
}
|
||||
_, _ = fmt.Fprintln(stderr, vsockagent.ReminderMessage(name))
|
||||
}
|
||||
return sshErr
|
||||
}
|
||||
|
||||
func shouldCheckSSHReminder(err error) bool {
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
var exitErr *exec.ExitError
|
||||
if !errors.As(err, &exitErr) {
|
||||
return false
|
||||
}
|
||||
return exitErr.ExitCode() != 255
|
||||
}
|
||||
|
||||
// sshCommandArgs builds the argv for `ssh` invocations against a VM.
|
||||
// Host-key verification uses a banger-owned known_hosts file
|
||||
// populated by the daemon's first successful Go-SSH dial to each VM
|
||||
// (trust-on-first-use). `accept-new` means: accept-and-pin on first
|
||||
// contact; strict-verify afterwards. The user's own
|
||||
// ~/.ssh/known_hosts is never touched.
|
||||
func sshCommandArgs(cfg model.DaemonConfig, guestIP string, extra []string) ([]string, error) {
|
||||
if guestIP == "" {
|
||||
return nil, errors.New("vm has no guest IP")
|
||||
}
|
||||
args := []string{}
|
||||
args = append(args, "-F", "/dev/null")
|
||||
if cfg.SSHKeyPath != "" {
|
||||
args = append(args, "-i", cfg.SSHKeyPath)
|
||||
}
|
||||
knownHosts, khErr := bangerKnownHostsPath()
|
||||
args = append(
|
||||
args,
|
||||
"-o", "IdentitiesOnly=yes",
|
||||
"-o", "BatchMode=yes",
|
||||
"-o", "PreferredAuthentications=publickey",
|
||||
"-o", "PasswordAuthentication=no",
|
||||
"-o", "KbdInteractiveAuthentication=no",
|
||||
)
|
||||
if khErr == nil {
|
||||
args = append(args,
|
||||
"-o", "UserKnownHostsFile="+knownHosts,
|
||||
"-o", "StrictHostKeyChecking=accept-new",
|
||||
)
|
||||
} else {
|
||||
// If we can't resolve the banger path (unusual — paths.Resolve
|
||||
// basically can't fail), fall through to a hard-fail posture
|
||||
// rather than silently disabling verification.
|
||||
args = append(args,
|
||||
"-o", "StrictHostKeyChecking=yes",
|
||||
)
|
||||
}
|
||||
args = append(args, "root@"+guestIP)
|
||||
args = append(args, extra...)
|
||||
return args, nil
|
||||
}
|
||||
|
||||
// bangerKnownHostsPath resolves the TOFU file the daemon writes into
|
||||
// and the CLI reads back. Both sides must agree on the path or the
|
||||
// pin doesn't round-trip.
|
||||
func bangerKnownHostsPath() (string, error) {
|
||||
layout, err := paths.Resolve()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return layout.KnownHostsPath, nil
|
||||
}
|
||||
|
||||
func validateSSHPrereqs(cfg model.DaemonConfig) error {
|
||||
checks := system.NewPreflight()
|
||||
checks.RequireCommand("ssh", "install openssh-client")
|
||||
if strings.TrimSpace(cfg.SSHKeyPath) != "" {
|
||||
checks.RequireFile(cfg.SSHKeyPath, "ssh private key", `set "ssh_key_path" or let banger create its default key`)
|
||||
}
|
||||
return checks.Err("ssh preflight failed")
|
||||
}
|
||||
|
||||
func validateVMRunPrereqs(cfg model.DaemonConfig) error {
|
||||
checks := system.NewPreflight()
|
||||
checks.RequireCommand("git", "install git")
|
||||
if strings.TrimSpace(cfg.SSHKeyPath) != "" {
|
||||
checks.RequireFile(cfg.SSHKeyPath, "ssh private key", `set "ssh_key_path" or let banger create its default key`)
|
||||
}
|
||||
return checks.Err("vm run preflight failed")
|
||||
}
|
||||
277
internal/cli/vm_create.go
Normal file
277
internal/cli/vm_create.go
Normal file
|
|
@ -0,0 +1,277 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"banger/internal/api"
|
||||
"banger/internal/config"
|
||||
"banger/internal/model"
|
||||
"banger/internal/paths"
|
||||
"banger/internal/system"
|
||||
)
|
||||
|
||||
// effectiveVMDefaults resolves the default VM sizing applied when
|
||||
// --vcpu/--memory/--disk-size aren't given: config overrides win
|
||||
// over host-derived heuristics, both fall back to baked-in
|
||||
// constants. Called at command-build time so the cobra flag defaults
|
||||
// reflect the resolved values.
|
||||
func effectiveVMDefaults() model.VMDefaults {
|
||||
var override model.VMDefaultsOverride
|
||||
if layout, err := paths.Resolve(); err == nil {
|
||||
if cfg, err := config.Load(layout); err == nil {
|
||||
override = cfg.VMDefaults
|
||||
}
|
||||
}
|
||||
host, err := system.ReadHostResources()
|
||||
if err != nil {
|
||||
return model.ResolveVMDefaults(override, 0, 0)
|
||||
}
|
||||
return model.ResolveVMDefaults(override, host.CPUCount, host.TotalMemoryBytes)
|
||||
}
|
||||
|
||||
// printVMSpecLine writes a one-line sizing summary to out. Always
|
||||
// emitted (even non-TTY) so logs and CI output carry the numbers.
|
||||
func printVMSpecLine(out io.Writer, params api.VMCreateParams) {
|
||||
vcpu := model.DefaultVCPUCount
|
||||
if params.VCPUCount != nil {
|
||||
vcpu = *params.VCPUCount
|
||||
}
|
||||
memory := model.DefaultMemoryMiB
|
||||
if params.MemoryMiB != nil {
|
||||
memory = *params.MemoryMiB
|
||||
}
|
||||
diskBytes := int64(model.DefaultWorkDiskSize)
|
||||
if strings.TrimSpace(params.WorkDiskSize) != "" {
|
||||
if parsed, err := model.ParseSize(params.WorkDiskSize); err == nil {
|
||||
diskBytes = parsed
|
||||
}
|
||||
}
|
||||
_, _ = fmt.Fprintf(out, "spec: %d vcpu · %d MiB · %s disk\n",
|
||||
vcpu, memory, model.FormatSizeBytes(diskBytes))
|
||||
}
|
||||
|
||||
// runVMCreate drives the create RPC + polls for progress. stderr
|
||||
// gets the spec line up front and the progress renderer thereafter.
|
||||
// On context cancel we cooperate with the daemon to cancel the
|
||||
// in-flight op so it doesn't leak partially-created VM state.
|
||||
func runVMCreate(ctx context.Context, socketPath string, stderr io.Writer, params api.VMCreateParams) (model.VMRecord, error) {
|
||||
printVMSpecLine(stderr, params)
|
||||
begin, err := vmCreateBeginFunc(ctx, socketPath, params)
|
||||
if err != nil {
|
||||
return model.VMRecord{}, err
|
||||
}
|
||||
renderer := newVMCreateProgressRenderer(stderr)
|
||||
renderer.render(begin.Operation)
|
||||
|
||||
op := begin.Operation
|
||||
for {
|
||||
if op.Done {
|
||||
renderer.render(op)
|
||||
if op.Success && op.VM != nil {
|
||||
return *op.VM, nil
|
||||
}
|
||||
if strings.TrimSpace(op.Error) == "" {
|
||||
return model.VMRecord{}, errors.New("vm create failed")
|
||||
}
|
||||
return model.VMRecord{}, errors.New(op.Error)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
cancelCtx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
_ = vmCreateCancelFunc(cancelCtx, socketPath, op.ID)
|
||||
return model.VMRecord{}, ctx.Err()
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
}
|
||||
|
||||
status, err := vmCreateStatusFunc(ctx, socketPath, op.ID)
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
cancelCtx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
_ = vmCreateCancelFunc(cancelCtx, socketPath, op.ID)
|
||||
return model.VMRecord{}, ctx.Err()
|
||||
}
|
||||
return model.VMRecord{}, err
|
||||
}
|
||||
op = status.Operation
|
||||
renderer.render(op)
|
||||
}
|
||||
}
|
||||
|
||||
type vmCreateProgressRenderer struct {
|
||||
out io.Writer
|
||||
enabled bool
|
||||
lastLine string
|
||||
}
|
||||
|
||||
func newVMCreateProgressRenderer(out io.Writer) *vmCreateProgressRenderer {
|
||||
return &vmCreateProgressRenderer{
|
||||
out: out,
|
||||
enabled: writerSupportsProgress(out),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *vmCreateProgressRenderer) render(op api.VMCreateOperation) {
|
||||
if r == nil || !r.enabled {
|
||||
return
|
||||
}
|
||||
line := formatVMCreateProgress(op)
|
||||
if line == "" || line == r.lastLine {
|
||||
return
|
||||
}
|
||||
r.lastLine = line
|
||||
_, _ = fmt.Fprintln(r.out, line)
|
||||
}
|
||||
|
||||
// writerSupportsProgress returns true only when out is a terminal.
|
||||
// Keeps stage lines + heartbeat dots out of piped / logged output
|
||||
// where they'd just be noise.
|
||||
func writerSupportsProgress(out io.Writer) bool {
|
||||
file, ok := out.(*os.File)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
info, err := file.Stat()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return info.Mode()&os.ModeCharDevice != 0
|
||||
}
|
||||
|
||||
// withHeartbeat runs fn while emitting a dot to stderr every 2
|
||||
// seconds so the user sees long-running RPCs (bundle downloads, etc.)
|
||||
// aren't wedged. No-op when stderr isn't a terminal, so piped or
|
||||
// logged output stays clean.
|
||||
func withHeartbeat(stderr io.Writer, label string, fn func() error) error {
|
||||
if !writerSupportsProgress(stderr) {
|
||||
return fn()
|
||||
}
|
||||
fmt.Fprintf(stderr, "[%s] ", label)
|
||||
stop := make(chan struct{})
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
ticker := time.NewTicker(2 * time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
case <-ticker.C:
|
||||
fmt.Fprint(stderr, ".")
|
||||
}
|
||||
}
|
||||
}()
|
||||
err := fn()
|
||||
close(stop)
|
||||
<-done
|
||||
fmt.Fprintln(stderr)
|
||||
return err
|
||||
}
|
||||
|
||||
func formatVMCreateProgress(op api.VMCreateOperation) string {
|
||||
stage := strings.TrimSpace(op.Stage)
|
||||
detail := strings.TrimSpace(op.Detail)
|
||||
label := vmCreateStageLabel(stage)
|
||||
if label == "" && detail == "" {
|
||||
return ""
|
||||
}
|
||||
if label == "" {
|
||||
return "[vm create] " + detail
|
||||
}
|
||||
if detail == "" {
|
||||
return "[vm create] " + label
|
||||
}
|
||||
return "[vm create] " + label + ": " + detail
|
||||
}
|
||||
|
||||
// vmCreateStageLabel humanises the daemon-side stage IDs. Anything
|
||||
// unknown falls through to `strings.ReplaceAll(_, "_", " ")` so new
|
||||
// stages still render meaningfully without a code change.
|
||||
func vmCreateStageLabel(stage string) string {
|
||||
switch strings.TrimSpace(stage) {
|
||||
case "queued":
|
||||
return "queued"
|
||||
case "resolve_image":
|
||||
return "resolving image"
|
||||
case "reserve_vm":
|
||||
return "allocating vm"
|
||||
case "preflight":
|
||||
return "checking host prerequisites"
|
||||
case "prepare_rootfs":
|
||||
return "preparing root filesystem"
|
||||
case "prepare_host_features":
|
||||
return "preparing host features"
|
||||
case "prepare_work_disk":
|
||||
return "preparing work disk"
|
||||
case "boot_firecracker":
|
||||
return "starting firecracker"
|
||||
case "wait_vsock_agent":
|
||||
return "waiting for vsock agent"
|
||||
case "wait_guest_ready":
|
||||
return "waiting for guest services"
|
||||
case "apply_dns":
|
||||
return "publishing dns"
|
||||
case "apply_nat":
|
||||
return "configuring nat"
|
||||
case "finalize":
|
||||
return "finalizing"
|
||||
case "ready":
|
||||
return "ready"
|
||||
default:
|
||||
return strings.ReplaceAll(stage, "_", " ")
|
||||
}
|
||||
}
|
||||
|
||||
func validatePositiveSetting(label string, value int) error {
|
||||
if value <= 0 {
|
||||
return fmt.Errorf("%s must be a positive integer", label)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// shortID and relativeTime are small display helpers used across
|
||||
// every printer; kept here alongside the other render-time helpers.
|
||||
func shortID(id string) string {
|
||||
if len(id) <= 12 {
|
||||
return id
|
||||
}
|
||||
return id[:12]
|
||||
}
|
||||
|
||||
func relativeTime(t time.Time) string {
|
||||
if t.IsZero() {
|
||||
return "-"
|
||||
}
|
||||
delta := time.Since(t)
|
||||
switch {
|
||||
case delta < 30*time.Second:
|
||||
return "moments ago"
|
||||
case delta < time.Minute:
|
||||
return fmt.Sprintf("%d seconds ago", int(delta.Seconds()))
|
||||
case delta < 2*time.Minute:
|
||||
return "1 minute ago"
|
||||
case delta < time.Hour:
|
||||
return fmt.Sprintf("%d minutes ago", int(delta.Minutes()))
|
||||
case delta < 2*time.Hour:
|
||||
return "1 hour ago"
|
||||
case delta < 24*time.Hour:
|
||||
return fmt.Sprintf("%d hours ago", int(delta.Hours()))
|
||||
case delta < 48*time.Hour:
|
||||
return "1 day ago"
|
||||
case delta < 7*24*time.Hour:
|
||||
return fmt.Sprintf("%d days ago", int(delta.Hours()/24))
|
||||
case delta < 14*24*time.Hour:
|
||||
return "1 week ago"
|
||||
default:
|
||||
return fmt.Sprintf("%d weeks ago", int(delta.Hours()/(24*7)))
|
||||
}
|
||||
}
|
||||
410
internal/cli/vm_run.go
Normal file
410
internal/cli/vm_run.go
Normal file
|
|
@ -0,0 +1,410 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"banger/internal/api"
|
||||
"banger/internal/daemon/workspace"
|
||||
"banger/internal/model"
|
||||
"banger/internal/toolingplan"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// vmRunGuestClient is the narrow guest-SSH surface vm run needs. The
|
||||
// daemon's guest-SSH package returns a value that satisfies this
|
||||
// interface directly; we restate it here so tests can plug in fakes
|
||||
// without pulling the full daemon in.
|
||||
type vmRunGuestClient interface {
|
||||
Close() error
|
||||
UploadFile(ctx context.Context, remotePath string, mode os.FileMode, data []byte, logWriter io.Writer) error
|
||||
RunScript(ctx context.Context, script string, logWriter io.Writer) error
|
||||
StreamTar(ctx context.Context, sourceDir, remoteCommand string, logWriter io.Writer) error
|
||||
StreamTarEntries(ctx context.Context, sourceDir string, entries []string, remoteCommand string, logWriter io.Writer) error
|
||||
}
|
||||
|
||||
// vmRunRepo is the CLI-local view of the workspace argument to
|
||||
// `vm run`: an absolute source path that passed preflight, plus the
|
||||
// two branch flags. Everything else the flow needs (RepoRoot,
|
||||
// RepoName, HEAD commit, etc.) comes back from the workspace.prepare
|
||||
// RPC, which does the full git inspection daemon-side.
|
||||
type vmRunRepo struct {
|
||||
sourcePath string
|
||||
branchName string
|
||||
fromRef string
|
||||
}
|
||||
|
||||
const vmRunToolingInstallTimeoutSeconds = 120
|
||||
|
||||
// vmRunSSHTimeout bounds how long `vm run` waits for guest ssh after
|
||||
// the vsock agent is ready. vsock readiness already means systemd
|
||||
// should be up within seconds; a minute plus change is generous
|
||||
// headroom for a slow first boot while still short enough that a
|
||||
// wedged sshd surfaces promptly instead of hanging forever. Var, not
|
||||
// const, so tests can shrink it.
|
||||
var vmRunSSHTimeout = 90 * time.Second
|
||||
|
||||
// ExitCodeError wraps a remote command's exit status so the CLI's main()
|
||||
// can propagate it verbatim. Only errors explicitly wrapped in this
|
||||
// type get forwarded as process exit codes — plain *exec.ExitError
|
||||
// values (from unrelated subprocesses like mkfs.ext4) must still
|
||||
// surface as regular errors so the user sees a message.
|
||||
type ExitCodeError struct {
|
||||
Code int
|
||||
}
|
||||
|
||||
func (e ExitCodeError) Error() string {
|
||||
return fmt.Sprintf("exit status %d", e.Code)
|
||||
}
|
||||
|
||||
// vmRunPreflightRepo validates a vm run workspace path BEFORE the VM
|
||||
// is created, so bad paths fail fast instead of leaving the user
|
||||
// with an orphaned VM. The check is intentionally minimal: the
|
||||
// daemon's PrepareVMWorkspace does a full git inspection (branch,
|
||||
// HEAD, identity, overlay) and returns everything the tooling
|
||||
// harness needs, so duplicating the heavy lifting here just doubles
|
||||
// the I/O. We only enforce what the user can fix locally before
|
||||
// banger commits to creating a VM:
|
||||
//
|
||||
// - the path exists and is a directory,
|
||||
// - it sits inside a non-bare git repository,
|
||||
// - the repository has no submodules (unsupported in the shallow
|
||||
// overlay mode vm run uses).
|
||||
func vmRunPreflightRepo(ctx context.Context, rawPath string) (string, error) {
|
||||
if strings.TrimSpace(rawPath) == "" {
|
||||
wd, err := cwdFunc()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
rawPath = wd
|
||||
}
|
||||
sourcePath, err := workspace.ResolveSourcePath(rawPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
repoRoot, err := workspace.GitTrimmedOutput(ctx, sourcePath, "rev-parse", "--show-toplevel")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("%s is not inside a git repository", sourcePath)
|
||||
}
|
||||
isBare, err := workspace.GitTrimmedOutput(ctx, repoRoot, "rev-parse", "--is-bare-repository")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("inspect git repository %s: %w", repoRoot, err)
|
||||
}
|
||||
if isBare == "true" {
|
||||
return "", fmt.Errorf("vm run requires a non-bare git repository: %s", repoRoot)
|
||||
}
|
||||
submodules, err := workspace.ListSubmodules(ctx, repoRoot)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(submodules) > 0 {
|
||||
return "", fmt.Errorf("vm run does not support git submodules in %s (%s); use `vm create` + `vm workspace prepare --mode full_copy`", repoRoot, strings.Join(submodules, ", "))
|
||||
}
|
||||
return sourcePath, nil
|
||||
}
|
||||
|
||||
// splitVMRunArgs partitions cobra positional args into the optional path
|
||||
// argument and the trailing command (everything after a `--` separator).
|
||||
// The path slice may contain 0..1 entries; the command slice may be empty.
|
||||
func splitVMRunArgs(cmd *cobra.Command, args []string) (pathArgs, commandArgs []string) {
|
||||
dash := cmd.ArgsLenAtDash()
|
||||
if dash < 0 {
|
||||
return args, nil
|
||||
}
|
||||
if dash > len(args) {
|
||||
dash = len(args)
|
||||
}
|
||||
return args[:dash], args[dash:]
|
||||
}
|
||||
|
||||
// runVMRun orchestrates the full `vm run` flow: create the VM, wait
|
||||
// for guest ssh, optionally materialise a workspace and kick off the
|
||||
// tooling bootstrap, then either attach interactively or run the
|
||||
// user's command and propagate its exit status.
|
||||
func runVMRun(ctx context.Context, socketPath string, cfg model.DaemonConfig, stdin io.Reader, stdout, stderr io.Writer, params api.VMCreateParams, repo *vmRunRepo, command []string, removeOnExit bool) error {
|
||||
progress := newVMRunProgressRenderer(stderr)
|
||||
vm, err := runVMCreate(ctx, socketPath, stderr, params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
vmRef := strings.TrimSpace(vm.Name)
|
||||
if vmRef == "" {
|
||||
vmRef = shortID(vm.ID)
|
||||
}
|
||||
// --rm cleanup is wired AFTER ssh is confirmed. An ssh-wait
|
||||
// timeout leaves the VM alive for `vm logs` inspection (our
|
||||
// error message tells the user that); the cleanup only fires
|
||||
// once the session phase runs.
|
||||
shouldRemove := false
|
||||
if removeOnExit {
|
||||
defer func() {
|
||||
if !shouldRemove {
|
||||
return
|
||||
}
|
||||
// Use a fresh context so Ctrl-C during the session
|
||||
// doesn't abort the delete RPC.
|
||||
cleanupCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
if err := vmDeleteFunc(cleanupCtx, socketPath, vmRef); err != nil {
|
||||
printVMRunWarning(stderr, fmt.Sprintf("--rm cleanup failed: %v (leaked vm %q; delete manually)", err, vmRef))
|
||||
}
|
||||
}()
|
||||
}
|
||||
sshAddress := net.JoinHostPort(vm.Runtime.GuestIP, "22")
|
||||
progress.render("waiting for guest ssh")
|
||||
sshCtx, cancelSSH := context.WithTimeout(ctx, vmRunSSHTimeout)
|
||||
if err := guestWaitForSSHFunc(sshCtx, sshAddress, cfg.SSHKeyPath, 250*time.Millisecond); err != nil {
|
||||
cancelSSH()
|
||||
// Surface parent-context cancellation (Ctrl-C, caller
|
||||
// timeout) as-is. Only the guest-side timeout needs the
|
||||
// actionable hint.
|
||||
if errors.Is(ctx.Err(), context.Canceled) || errors.Is(ctx.Err(), context.DeadlineExceeded) {
|
||||
return fmt.Errorf("vm %q: %w", vmRef, ctx.Err())
|
||||
}
|
||||
return fmt.Errorf(
|
||||
"vm %q is running but guest ssh did not come up within %s. "+
|
||||
"sshd is the likely suspect — inspect the guest console with "+
|
||||
"`banger vm logs %s` (look for `Failed to start ssh.service`). "+
|
||||
"The VM is still alive; leave it for inspection or remove with `banger vm delete %s`. "+
|
||||
"underlying error: %w",
|
||||
vmRef, vmRunSSHTimeout, vmRef, vmRef, err,
|
||||
)
|
||||
}
|
||||
cancelSSH()
|
||||
shouldRemove = removeOnExit
|
||||
if repo != nil {
|
||||
progress.render("preparing guest workspace")
|
||||
// --from is only meaningful paired with --branch; the daemon
|
||||
// rejects "from without branch" outright. Our flag default is
|
||||
// "HEAD" (useful only when --branch is set), so scrub it when
|
||||
// branch is empty to avoid a false "workspace from requires
|
||||
// branch" error.
|
||||
fromRef := ""
|
||||
if strings.TrimSpace(repo.branchName) != "" {
|
||||
fromRef = repo.fromRef
|
||||
}
|
||||
prepared, err := vmWorkspacePrepareFunc(ctx, socketPath, api.VMWorkspacePrepareParams{
|
||||
IDOrName: vmRef,
|
||||
SourcePath: repo.sourcePath,
|
||||
GuestPath: vmRunGuestDir(),
|
||||
Branch: repo.branchName,
|
||||
From: fromRef,
|
||||
Mode: string(model.WorkspacePrepareModeShallowOverlay),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("vm %q is running but workspace prepare failed: %w", vmRef, err)
|
||||
}
|
||||
// The prepare RPC already did the full git inspection on the
|
||||
// daemon side; grab what the tooling harness needs from its
|
||||
// result instead of re-inspecting here.
|
||||
if len(command) == 0 {
|
||||
client, err := guestDialFunc(ctx, sshAddress, cfg.SSHKeyPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("vm %q is running but guest ssh is unavailable: %w", vmRef, err)
|
||||
}
|
||||
if err := startVMRunToolingHarness(ctx, client, prepared.Workspace.RepoRoot, prepared.Workspace.RepoName, progress); err != nil {
|
||||
printVMRunWarning(stderr, fmt.Sprintf("guest tooling bootstrap start failed: %v", err))
|
||||
}
|
||||
_ = client.Close()
|
||||
}
|
||||
}
|
||||
sshArgs, err := sshCommandArgs(cfg, vm.Runtime.GuestIP, command)
|
||||
if err != nil {
|
||||
return fmt.Errorf("vm %q is running but ssh args could not be built: %w", vmRef, err)
|
||||
}
|
||||
if len(command) > 0 {
|
||||
progress.render("running command in guest")
|
||||
if err := sshExecFunc(ctx, stdin, stdout, stderr, sshArgs); err != nil {
|
||||
var exitErr *exec.ExitError
|
||||
if errors.As(err, &exitErr) {
|
||||
return ExitCodeError{Code: exitErr.ExitCode()}
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
progress.render("attaching to guest")
|
||||
return runSSHSession(ctx, socketPath, vmRef, stdin, stdout, stderr, sshArgs, removeOnExit)
|
||||
}
|
||||
|
||||
func vmRunGuestDir() string {
|
||||
return "/root/repo"
|
||||
}
|
||||
|
||||
func vmRunToolingHarnessPath(repoName string) string {
|
||||
return filepath.ToSlash(filepath.Join("/tmp", "banger-vm-run-tooling-"+repoName+".sh"))
|
||||
}
|
||||
|
||||
func vmRunToolingHarnessLogPath(repoName string) string {
|
||||
return filepath.ToSlash(filepath.Join("/root/.cache/banger", "vm-run-tooling-"+repoName+".log"))
|
||||
}
|
||||
|
||||
// startVMRunToolingHarness uploads + launches the mise bootstrap
|
||||
// script inside the guest. repoRoot / repoName both come from the
|
||||
// daemon's workspace.prepare RPC response — the CLI no longer does
|
||||
// its own git inspection.
|
||||
func startVMRunToolingHarness(ctx context.Context, client vmRunGuestClient, repoRoot, repoName string, progress *vmRunProgressRenderer) error {
|
||||
if progress != nil {
|
||||
progress.render("starting guest tooling bootstrap")
|
||||
}
|
||||
plan := buildVMRunToolingPlanFunc(ctx, repoRoot)
|
||||
var uploadLog bytes.Buffer
|
||||
if err := client.UploadFile(ctx, vmRunToolingHarnessPath(repoName), 0o755, []byte(vmRunToolingHarnessScript(plan)), &uploadLog); err != nil {
|
||||
return formatVMRunStepError("upload guest tooling bootstrap", err, uploadLog.String())
|
||||
}
|
||||
var launchLog bytes.Buffer
|
||||
if err := client.RunScript(ctx, vmRunToolingHarnessLaunchScript(repoName), &launchLog); err != nil {
|
||||
return formatVMRunStepError("launch guest tooling bootstrap", err, launchLog.String())
|
||||
}
|
||||
if progress != nil {
|
||||
progress.render("guest tooling log: " + vmRunToolingHarnessLogPath(repoName))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func vmRunToolingHarnessScript(plan toolingplan.Plan) string {
|
||||
var script strings.Builder
|
||||
script.WriteString("set -uo pipefail\n")
|
||||
fmt.Fprintf(&script, "DIR=%s\n", shellQuote(vmRunGuestDir()))
|
||||
script.WriteString("export PATH=/usr/local/bin:/root/.local/share/mise/shims:$PATH\n")
|
||||
script.WriteString("if [ -f /etc/profile.d/mise.sh ]; then . /etc/profile.d/mise.sh || true; fi\n")
|
||||
script.WriteString("log() { printf '%s\\n' \"$*\"; }\n")
|
||||
script.WriteString("run_best_effort() {\n")
|
||||
script.WriteString(" \"$@\"\n")
|
||||
script.WriteString(" rc=$?\n")
|
||||
script.WriteString(" if [ \"$rc\" -ne 0 ]; then\n")
|
||||
script.WriteString(" log \"command failed ($rc): $*\"\n")
|
||||
script.WriteString(" fi\n")
|
||||
script.WriteString(" return 0\n")
|
||||
script.WriteString("}\n")
|
||||
script.WriteString("run_bounded_best_effort() {\n")
|
||||
script.WriteString(" timeout_secs=\"$1\"\n")
|
||||
script.WriteString(" shift\n")
|
||||
script.WriteString(" timeout_marker=\"$(mktemp)\"\n")
|
||||
script.WriteString(" rm -f \"$timeout_marker\"\n")
|
||||
script.WriteString(" \"$@\" &\n")
|
||||
script.WriteString(" cmd_pid=$!\n")
|
||||
script.WriteString(" (\n")
|
||||
script.WriteString(" sleep \"$timeout_secs\"\n")
|
||||
script.WriteString(" if kill -0 \"$cmd_pid\" 2>/dev/null; then\n")
|
||||
script.WriteString(" : >\"$timeout_marker\"\n")
|
||||
script.WriteString(" log \"command timed out after ${timeout_secs}s: $*\"\n")
|
||||
script.WriteString(" kill -TERM \"$cmd_pid\" 2>/dev/null || true\n")
|
||||
script.WriteString(" if command -v pkill >/dev/null 2>&1; then pkill -TERM -P \"$cmd_pid\" 2>/dev/null || true; fi\n")
|
||||
script.WriteString(" sleep 2\n")
|
||||
script.WriteString(" kill -KILL \"$cmd_pid\" 2>/dev/null || true\n")
|
||||
script.WriteString(" if command -v pkill >/dev/null 2>&1; then pkill -KILL -P \"$cmd_pid\" 2>/dev/null || true; fi\n")
|
||||
script.WriteString(" fi\n")
|
||||
script.WriteString(" ) &\n")
|
||||
script.WriteString(" watchdog_pid=$!\n")
|
||||
script.WriteString(" wait \"$cmd_pid\"\n")
|
||||
script.WriteString(" rc=$?\n")
|
||||
script.WriteString(" kill \"$watchdog_pid\" 2>/dev/null || true\n")
|
||||
script.WriteString(" wait \"$watchdog_pid\" 2>/dev/null || true\n")
|
||||
script.WriteString(" if [ -f \"$timeout_marker\" ]; then\n")
|
||||
script.WriteString(" rm -f \"$timeout_marker\"\n")
|
||||
script.WriteString(" return 0\n")
|
||||
script.WriteString(" fi\n")
|
||||
script.WriteString(" rm -f \"$timeout_marker\"\n")
|
||||
script.WriteString(" if [ \"$rc\" -ne 0 ]; then\n")
|
||||
script.WriteString(" log \"command failed ($rc): $*\"\n")
|
||||
script.WriteString(" fi\n")
|
||||
script.WriteString(" return 0\n")
|
||||
script.WriteString("}\n")
|
||||
script.WriteString("cd \"$DIR\" || { log \"missing repo directory: $DIR\"; exit 0; }\n")
|
||||
script.WriteString("MISE_BIN=\"$(command -v mise || true)\"\n")
|
||||
script.WriteString("if [ -z \"$MISE_BIN\" ]; then log \"mise not found; skipping guest tooling bootstrap\"; exit 0; fi\n")
|
||||
script.WriteString("log \"starting guest tooling bootstrap in $DIR\"\n")
|
||||
if len(plan.RepoManagedTools) > 0 {
|
||||
fmt.Fprintf(&script, "log %s\n", shellQuote("repo-managed mise tools: "+strings.Join(plan.RepoManagedTools, ", ")))
|
||||
}
|
||||
script.WriteString("if [ -f .mise.toml ] || [ -f .tool-versions ]; then\n")
|
||||
script.WriteString(" log \"running mise install from repo declarations\"\n")
|
||||
script.WriteString(" run_best_effort \"$MISE_BIN\" install\n")
|
||||
script.WriteString("fi\n")
|
||||
fmt.Fprintf(&script, "INSTALL_TIMEOUT_SECS=%d\n", vmRunToolingInstallTimeoutSeconds)
|
||||
for _, step := range plan.Steps {
|
||||
stepLabel := fmt.Sprintf("deterministic install: %s@%s (%s)", step.Tool, step.Version, step.Source)
|
||||
fmt.Fprintf(&script, "log %s\n", shellQuote(stepLabel))
|
||||
fmt.Fprintf(&script, "run_bounded_best_effort \"$INSTALL_TIMEOUT_SECS\" \"$MISE_BIN\" use -g --pin %s\n", shellQuote(step.Tool+"@"+step.Version))
|
||||
}
|
||||
for _, skip := range plan.Skips {
|
||||
skipLabel := fmt.Sprintf("deterministic skip: %s (%s)", skip.Target, skip.Reason)
|
||||
fmt.Fprintf(&script, "log %s\n", shellQuote(skipLabel))
|
||||
}
|
||||
if len(plan.Steps) > 0 {
|
||||
script.WriteString("run_best_effort \"$MISE_BIN\" reshim\n")
|
||||
}
|
||||
script.WriteString("log \"guest tooling bootstrap finished\"\n")
|
||||
return script.String()
|
||||
}
|
||||
|
||||
func vmRunToolingHarnessLaunchScript(repoName string) string {
|
||||
var script strings.Builder
|
||||
script.WriteString("set -euo pipefail\n")
|
||||
fmt.Fprintf(&script, "HELPER=%s\n", shellQuote(vmRunToolingHarnessPath(repoName)))
|
||||
fmt.Fprintf(&script, "LOG=%s\n", shellQuote(vmRunToolingHarnessLogPath(repoName)))
|
||||
script.WriteString("mkdir -p \"$(dirname \"$LOG\")\"\n")
|
||||
script.WriteString("nohup bash \"$HELPER\" >\"$LOG\" 2>&1 </dev/null &\n")
|
||||
script.WriteString("disown || true\n")
|
||||
return script.String()
|
||||
}
|
||||
|
||||
func formatVMRunStepError(action string, err error, log string) error {
|
||||
log = strings.TrimSpace(log)
|
||||
if log == "" {
|
||||
return fmt.Errorf("%s: %w", action, err)
|
||||
}
|
||||
return fmt.Errorf("%s: %w: %s", action, err, log)
|
||||
}
|
||||
|
||||
type vmRunProgressRenderer struct {
|
||||
out io.Writer
|
||||
enabled bool
|
||||
lastLine string
|
||||
}
|
||||
|
||||
func newVMRunProgressRenderer(out io.Writer) *vmRunProgressRenderer {
|
||||
return &vmRunProgressRenderer{
|
||||
out: out,
|
||||
enabled: out != nil,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *vmRunProgressRenderer) render(detail string) {
|
||||
if r == nil || !r.enabled {
|
||||
return
|
||||
}
|
||||
line := formatVMRunProgress(detail)
|
||||
if line == "" || line == r.lastLine {
|
||||
return
|
||||
}
|
||||
r.lastLine = line
|
||||
_, _ = fmt.Fprintln(r.out, line)
|
||||
}
|
||||
|
||||
func formatVMRunProgress(detail string) string {
|
||||
detail = strings.TrimSpace(detail)
|
||||
if detail == "" {
|
||||
return ""
|
||||
}
|
||||
return "[vm run] " + detail
|
||||
}
|
||||
|
||||
func printVMRunWarning(out io.Writer, detail string) {
|
||||
detail = strings.TrimSpace(detail)
|
||||
if out == nil || detail == "" {
|
||||
return
|
||||
}
|
||||
_, _ = fmt.Fprintln(out, "[vm run] warning: "+detail)
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue