Add experimental Void guest workflow and vsock agent
Make iterating on a Firecracker-friendly Void guest practical without replacing the Debian default image path. Add local Void rootfs build/register/verify plumbing, a language-agnostic dev package baseline, and guest SSH/work-disk hardening so new images use the runtime bundle key, keep a normal root bash environment, and repair stale nested /root layouts on restart. Replace the guest PING/PONG responder with an HTTP /healthz agent over vsock, rename the runtime bundle and config surface from ping helper to agent while still accepting the legacy keys, and route the post-SSH reminder through the new vm.health path. Validated with GOCACHE=/tmp/banger-gocache go test ./..., make build, bash -n customize.sh make-rootfs-void.sh, and git diff --check.
This commit is contained in:
parent
c8d9a122f9
commit
3ed78fdcfc
42 changed files with 2222 additions and 388 deletions
|
|
@ -191,7 +191,10 @@ func (workDiskCapability) ContributeMachine(cfg *firecracker.MachineConfig, vm m
|
|||
}
|
||||
|
||||
func (workDiskCapability) PrepareHost(ctx context.Context, d *Daemon, vm *model.VMRecord, image model.Image) error {
|
||||
return d.ensureWorkDisk(ctx, vm, image)
|
||||
if err := d.ensureWorkDisk(ctx, vm, image); err != nil {
|
||||
return err
|
||||
}
|
||||
return d.ensureAuthorizedKeyOnWorkDisk(ctx, vm)
|
||||
}
|
||||
|
||||
func (workDiskCapability) AddDoctorChecks(_ context.Context, d *Daemon, report *system.Report) {
|
||||
|
|
|
|||
|
|
@ -331,6 +331,13 @@ func (d *Daemon) dispatch(ctx context.Context, req rpc.Request) rpc.Response {
|
|||
return rpc.NewError("not_running", fmt.Sprintf("vm %s is not running", vm.Name))
|
||||
}
|
||||
return marshalResultOrError(api.VMSSHResult{Name: vm.Name, GuestIP: vm.Runtime.GuestIP}, nil)
|
||||
case "vm.health":
|
||||
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
result, err := d.HealthVM(ctx, params.IDOrName)
|
||||
return marshalResultOrError(result, err)
|
||||
case "vm.ping":
|
||||
params, err := rpc.DecodeParams[api.VMRefParams](req)
|
||||
if err != nil {
|
||||
|
|
@ -355,6 +362,13 @@ func (d *Daemon) dispatch(ctx context.Context, req rpc.Request) rpc.Response {
|
|||
}
|
||||
image, err := d.BuildImage(ctx, params)
|
||||
return marshalResultOrError(api.ImageShowResult{Image: image}, err)
|
||||
case "image.register":
|
||||
params, err := rpc.DecodeParams[api.ImageRegisterParams](req)
|
||||
if err != nil {
|
||||
return rpc.NewError("bad_request", err.Error())
|
||||
}
|
||||
image, err := d.RegisterImage(ctx, params)
|
||||
return marshalResultOrError(api.ImageShowResult{Image: image}, err)
|
||||
case "image.delete":
|
||||
params, err := rpc.DecodeParams[api.ImageRefParams](req)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -246,6 +246,128 @@ func TestEnsureDefaultImageSkipsRewriteWhenCurrentArtifactsMissing(t *testing.T)
|
|||
}
|
||||
}
|
||||
|
||||
func TestRegisterImageCreatesUnmanagedImage(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
rootfs, kernel, initrd, modulesDir, _ := writeDefaultImageArtifacts(t, dir)
|
||||
workSeed := filepath.Join(dir, "rootfs-void.work-seed.ext4")
|
||||
packages := filepath.Join(dir, "packages.void")
|
||||
if err := os.WriteFile(workSeed, []byte("seed"), 0o644); err != nil {
|
||||
t.Fatalf("WriteFile(workSeed): %v", err)
|
||||
}
|
||||
if err := os.WriteFile(packages, []byte("base-minimal\nopenssh\n"), 0o644); err != nil {
|
||||
t.Fatalf("WriteFile(packages): %v", err)
|
||||
}
|
||||
db := openDefaultImageStore(t, dir)
|
||||
d := &Daemon{
|
||||
config: model.DaemonConfig{
|
||||
DefaultKernel: kernel,
|
||||
DefaultInitrd: initrd,
|
||||
DefaultModulesDir: modulesDir,
|
||||
},
|
||||
store: db,
|
||||
}
|
||||
|
||||
image, err := d.RegisterImage(context.Background(), api.ImageRegisterParams{
|
||||
Name: "void-exp",
|
||||
RootfsPath: rootfs,
|
||||
WorkSeedPath: workSeed,
|
||||
PackagesPath: packages,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("RegisterImage: %v", err)
|
||||
}
|
||||
if image.Managed {
|
||||
t.Fatal("registered image should be unmanaged")
|
||||
}
|
||||
if image.Name != "void-exp" || image.RootfsPath != rootfs || image.WorkSeedPath != workSeed || image.KernelPath != kernel {
|
||||
t.Fatalf("registered image = %+v", image)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegisterImageUpdatesExistingUnmanagedImageInPlace(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
_, kernel, initrd, modulesDir, _ := writeDefaultImageArtifacts(t, dir)
|
||||
newRootfs := filepath.Join(dir, "rootfs-void-next.ext4")
|
||||
newWorkSeed := filepath.Join(dir, "rootfs-void-next.work-seed.ext4")
|
||||
packages := filepath.Join(dir, "packages.void")
|
||||
for _, path := range []string{newRootfs, newWorkSeed} {
|
||||
if err := os.WriteFile(path, []byte("next"), 0o644); err != nil {
|
||||
t.Fatalf("WriteFile(%s): %v", path, err)
|
||||
}
|
||||
}
|
||||
if err := os.WriteFile(packages, []byte("base-minimal\n"), 0o644); err != nil {
|
||||
t.Fatalf("WriteFile(packages): %v", err)
|
||||
}
|
||||
db := openDefaultImageStore(t, dir)
|
||||
now := time.Date(2026, time.March, 16, 12, 0, 0, 0, time.UTC)
|
||||
existing := model.Image{
|
||||
ID: "void-image-id",
|
||||
Name: "void-exp",
|
||||
Managed: false,
|
||||
RootfsPath: filepath.Join(dir, "old-rootfs.ext4"),
|
||||
KernelPath: kernel,
|
||||
InitrdPath: initrd,
|
||||
ModulesDir: modulesDir,
|
||||
PackagesPath: packages,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
}
|
||||
if err := db.UpsertImage(context.Background(), existing); err != nil {
|
||||
t.Fatalf("UpsertImage: %v", err)
|
||||
}
|
||||
d := &Daemon{
|
||||
config: model.DaemonConfig{
|
||||
DefaultKernel: kernel,
|
||||
DefaultInitrd: initrd,
|
||||
DefaultModulesDir: modulesDir,
|
||||
},
|
||||
store: db,
|
||||
}
|
||||
|
||||
image, err := d.RegisterImage(context.Background(), api.ImageRegisterParams{
|
||||
Name: "void-exp",
|
||||
RootfsPath: newRootfs,
|
||||
WorkSeedPath: newWorkSeed,
|
||||
PackagesPath: packages,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("RegisterImage: %v", err)
|
||||
}
|
||||
if image.ID != existing.ID || !image.CreatedAt.Equal(existing.CreatedAt) {
|
||||
t.Fatalf("updated image identity changed: %+v", image)
|
||||
}
|
||||
if image.RootfsPath != newRootfs || image.WorkSeedPath != newWorkSeed {
|
||||
t.Fatalf("updated image paths not applied: %+v", image)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegisterImageRejectsManagedOverwrite(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
rootfs, kernel, _, _, _ := writeDefaultImageArtifacts(t, dir)
|
||||
db := openDefaultImageStore(t, dir)
|
||||
now := time.Date(2026, time.March, 16, 12, 0, 0, 0, time.UTC)
|
||||
if err := db.UpsertImage(context.Background(), model.Image{
|
||||
ID: "managed-id",
|
||||
Name: "void-exp",
|
||||
Managed: true,
|
||||
RootfsPath: rootfs,
|
||||
KernelPath: kernel,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
}); err != nil {
|
||||
t.Fatalf("UpsertImage: %v", err)
|
||||
}
|
||||
d := &Daemon{config: model.DaemonConfig{DefaultKernel: kernel}, store: db}
|
||||
|
||||
_, err := d.RegisterImage(context.Background(), api.ImageRegisterParams{
|
||||
Name: "void-exp",
|
||||
RootfsPath: rootfs,
|
||||
})
|
||||
if err == nil || !strings.Contains(err.Error(), "cannot be updated via register") {
|
||||
t.Fatalf("RegisterImage(managed) error = %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func openDefaultImageStore(t *testing.T, dir string) *store.Store {
|
||||
t.Helper()
|
||||
db, err := store.Open(filepath.Join(dir, "state.db"))
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ func (d *Daemon) doctorReport(ctx context.Context) system.Report {
|
|||
|
||||
report.AddPreflight("runtime bundle", d.runtimeBundleChecks(), runtimeBundleStatus(d.config))
|
||||
report.AddPreflight("core vm lifecycle", d.coreVMLifecycleChecks(), "required host tools available")
|
||||
report.AddPreflight("vsock ssh reminder", d.vsockChecks(), "vsock reminder prerequisites available")
|
||||
report.AddPreflight("vsock guest agent", d.vsockChecks(), "vsock agent prerequisites available")
|
||||
d.addCapabilityDoctorChecks(ctx, &report)
|
||||
report.AddPreflight("image build", d.imageBuildChecks(ctx), "image build prerequisites available")
|
||||
|
||||
|
|
@ -44,8 +44,8 @@ func (d *Daemon) runtimeBundleChecks() *system.Preflight {
|
|||
checks := system.NewPreflight()
|
||||
hint := paths.RuntimeBundleHint()
|
||||
checks.RequireExecutable(d.config.FirecrackerBin, "firecracker binary", hint)
|
||||
checks.RequireFile(d.config.SSHKeyPath, "ssh private key", `set "ssh_key_path" or refresh the runtime bundle`)
|
||||
checks.RequireExecutable(d.config.VSockPingHelperPath, "vsock ping helper", `run 'make build' or refresh the runtime bundle`)
|
||||
checks.RequireFile(d.config.SSHKeyPath, "runtime ssh private key", `refresh the runtime bundle`)
|
||||
checks.RequireExecutable(d.config.VSockAgentPath, "vsock agent", `run 'make build' or refresh the runtime bundle`)
|
||||
checks.RequireFile(d.config.DefaultRootfs, "default rootfs image", `set "default_rootfs" or refresh the runtime bundle`)
|
||||
checks.RequireFile(d.config.DefaultKernel, "kernel image", `set "default_kernel" or refresh the runtime bundle`)
|
||||
if strings.TrimSpace(d.config.DefaultInitrd) != "" {
|
||||
|
|
@ -79,7 +79,7 @@ func (d *Daemon) imageBuildChecks(ctx context.Context) *system.Preflight {
|
|||
|
||||
func (d *Daemon) vsockChecks() *system.Preflight {
|
||||
checks := system.NewPreflight()
|
||||
checks.RequireExecutable(d.config.VSockPingHelperPath, "vsock ping helper", `run 'make build' or refresh the runtime bundle`)
|
||||
checks.RequireExecutable(d.config.VSockAgentPath, "vsock agent", `run 'make build' or refresh the runtime bundle`)
|
||||
checks.RequireFile(vsockHostDevicePath, "vsock host device", "load the vhost_vsock kernel module on the host")
|
||||
return checks
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ import (
|
|||
"banger/internal/hostnat"
|
||||
"banger/internal/model"
|
||||
"banger/internal/system"
|
||||
"banger/internal/vsockping"
|
||||
"banger/internal/vsockagent"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -104,14 +104,14 @@ func (d *Daemon) runImageBuildNative(ctx context.Context, spec imageBuildSpec) (
|
|||
}
|
||||
defer client.Close()
|
||||
|
||||
helperBytes, err := os.ReadFile(d.config.VSockPingHelperPath)
|
||||
helperBytes, err := os.ReadFile(d.config.VSockAgentPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeBuildLog(spec.BuildLog, "installing vsock ping helper"); err != nil {
|
||||
if err := writeBuildLog(spec.BuildLog, "installing vsock agent"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := client.UploadFile(ctx, vsockping.GuestInstallPath, 0o755, helperBytes, spec.BuildLog); err != nil {
|
||||
if err := client.UploadFile(ctx, vsockagent.GuestInstallPath, 0o755, helperBytes, spec.BuildLog); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeBuildLog(spec.BuildLog, "configuring guest"); err != nil {
|
||||
|
|
@ -333,14 +333,14 @@ func appendTmuxSetup(script *bytes.Buffer) {
|
|||
func appendVSockPingSetup(script *bytes.Buffer) {
|
||||
script.WriteString("mkdir -p /etc/modules-load.d /etc/systemd/system\n")
|
||||
script.WriteString("cat > /etc/modules-load.d/banger-vsock.conf <<'EOF'\n")
|
||||
script.WriteString(vsockping.ModulesLoadConfig())
|
||||
script.WriteString(vsockagent.ModulesLoadConfig())
|
||||
script.WriteString("EOF\n")
|
||||
script.WriteString("chmod 0644 /etc/modules-load.d/banger-vsock.conf\n")
|
||||
script.WriteString("cat > /etc/systemd/system/" + vsockping.ServiceName + " <<'EOF'\n")
|
||||
script.WriteString(vsockping.ServiceUnit())
|
||||
script.WriteString("cat > /etc/systemd/system/" + vsockagent.ServiceName + " <<'EOF'\n")
|
||||
script.WriteString(vsockagent.ServiceUnit())
|
||||
script.WriteString("EOF\n")
|
||||
script.WriteString("chmod 0644 /etc/systemd/system/" + vsockping.ServiceName + "\n")
|
||||
script.WriteString("if command -v systemctl >/dev/null 2>&1; then systemctl daemon-reload || true; systemctl enable --now " + vsockping.ServiceName + " || true; fi\n")
|
||||
script.WriteString("chmod 0644 /etc/systemd/system/" + vsockagent.ServiceName + "\n")
|
||||
script.WriteString("if command -v systemctl >/dev/null 2>&1; then systemctl daemon-reload || true; systemctl enable --now " + vsockagent.ServiceName + " || true; fi\n")
|
||||
}
|
||||
|
||||
func appendGitRepo(script *bytes.Buffer, dir, repo string) {
|
||||
|
|
|
|||
|
|
@ -28,9 +28,9 @@ func TestBuildProvisionScriptInstallsDefaultTools(t *testing.T) {
|
|||
"run '~/.tmux/plugins/tpm/tpm'",
|
||||
"cat > /etc/modules-load.d/banger-vsock.conf <<'EOF'",
|
||||
"vmw_vsock_virtio_transport",
|
||||
"cat > /etc/systemd/system/banger-vsock-pingd.service <<'EOF'",
|
||||
"ExecStart=/usr/local/bin/banger-vsock-pingd",
|
||||
"systemctl enable --now banger-vsock-pingd.service || true",
|
||||
"cat > /etc/systemd/system/banger-vsock-agent.service <<'EOF'",
|
||||
"ExecStart=/usr/local/bin/banger-vsock-agent",
|
||||
"systemctl enable --now banger-vsock-agent.service || true",
|
||||
"rm -f /root/get-docker /root/get-docker.sh /tmp/get-docker /tmp/get-docker.sh",
|
||||
} {
|
||||
if !strings.Contains(script, snippet) {
|
||||
|
|
|
|||
|
|
@ -2,9 +2,12 @@ package daemon
|
|||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"banger/internal/api"
|
||||
"banger/internal/model"
|
||||
|
|
@ -132,6 +135,110 @@ func (d *Daemon) BuildImage(ctx context.Context, params api.ImageBuildParams) (i
|
|||
return image, nil
|
||||
}
|
||||
|
||||
func (d *Daemon) RegisterImage(ctx context.Context, params api.ImageRegisterParams) (image model.Image, err error) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
name := strings.TrimSpace(params.Name)
|
||||
if name == "" {
|
||||
return model.Image{}, fmt.Errorf("image name is required")
|
||||
}
|
||||
|
||||
rootfsPath := strings.TrimSpace(params.RootfsPath)
|
||||
if rootfsPath == "" {
|
||||
return model.Image{}, fmt.Errorf("rootfs path is required")
|
||||
}
|
||||
workSeedPath := strings.TrimSpace(params.WorkSeedPath)
|
||||
if workSeedPath == "" {
|
||||
candidate := system.WorkSeedPath(rootfsPath)
|
||||
if candidate != "" {
|
||||
if _, statErr := os.Stat(candidate); statErr == nil {
|
||||
workSeedPath = candidate
|
||||
}
|
||||
}
|
||||
}
|
||||
kernelPath := strings.TrimSpace(params.KernelPath)
|
||||
if kernelPath == "" {
|
||||
kernelPath = d.config.DefaultKernel
|
||||
}
|
||||
initrdPath := strings.TrimSpace(params.InitrdPath)
|
||||
if initrdPath == "" {
|
||||
initrdPath = d.config.DefaultInitrd
|
||||
}
|
||||
modulesDir := strings.TrimSpace(params.ModulesDir)
|
||||
if modulesDir == "" {
|
||||
modulesDir = d.config.DefaultModulesDir
|
||||
}
|
||||
packagesPath := strings.TrimSpace(params.PackagesPath)
|
||||
|
||||
if err := validateImageRegisterPaths(rootfsPath, workSeedPath, kernelPath, initrdPath, modulesDir, packagesPath); err != nil {
|
||||
return model.Image{}, err
|
||||
}
|
||||
|
||||
now := model.Now()
|
||||
existing, lookupErr := d.store.GetImageByName(ctx, name)
|
||||
switch {
|
||||
case lookupErr == nil:
|
||||
if existing.Managed {
|
||||
return model.Image{}, fmt.Errorf("managed image %s cannot be updated via register", name)
|
||||
}
|
||||
image = existing
|
||||
image.RootfsPath = rootfsPath
|
||||
image.WorkSeedPath = workSeedPath
|
||||
image.KernelPath = kernelPath
|
||||
image.InitrdPath = initrdPath
|
||||
image.ModulesDir = modulesDir
|
||||
image.PackagesPath = packagesPath
|
||||
image.Docker = params.Docker
|
||||
image.UpdatedAt = now
|
||||
case errors.Is(lookupErr, sql.ErrNoRows):
|
||||
id, idErr := model.NewID()
|
||||
if idErr != nil {
|
||||
return model.Image{}, idErr
|
||||
}
|
||||
image = model.Image{
|
||||
ID: id,
|
||||
Name: name,
|
||||
Managed: false,
|
||||
RootfsPath: rootfsPath,
|
||||
WorkSeedPath: workSeedPath,
|
||||
KernelPath: kernelPath,
|
||||
InitrdPath: initrdPath,
|
||||
ModulesDir: modulesDir,
|
||||
PackagesPath: packagesPath,
|
||||
Docker: params.Docker,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
}
|
||||
default:
|
||||
return model.Image{}, lookupErr
|
||||
}
|
||||
|
||||
if err := d.store.UpsertImage(ctx, image); err != nil {
|
||||
return model.Image{}, err
|
||||
}
|
||||
return image, nil
|
||||
}
|
||||
|
||||
func validateImageRegisterPaths(rootfsPath, workSeedPath, kernelPath, initrdPath, modulesDir, packagesPath string) error {
|
||||
checks := system.NewPreflight()
|
||||
checks.RequireFile(rootfsPath, "rootfs image", `pass --rootfs <path>`)
|
||||
checks.RequireFile(kernelPath, "kernel image", `pass --kernel <path> or set "default_kernel"`)
|
||||
if workSeedPath != "" {
|
||||
checks.RequireFile(workSeedPath, "work-seed image", `pass --work-seed <path> or rebuild the image with a work seed`)
|
||||
}
|
||||
if initrdPath != "" {
|
||||
checks.RequireFile(initrdPath, "initrd image", `pass --initrd <path> or set "default_initrd"`)
|
||||
}
|
||||
if modulesDir != "" {
|
||||
checks.RequireDir(modulesDir, "kernel modules dir", `pass --modules <dir> or set "default_modules_dir"`)
|
||||
}
|
||||
if packagesPath != "" {
|
||||
checks.RequireFile(packagesPath, "packages manifest", `pass --packages <path>`)
|
||||
}
|
||||
return checks.Err("image register failed")
|
||||
}
|
||||
|
||||
func writePackagesMetadata(rootfsPath, packagesPath string) error {
|
||||
if rootfsPath == "" || packagesPath == "" {
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ func TestStartVMLockedLogsBridgeFailure(t *testing.T) {
|
|||
t.Setenv("PATH", binDir)
|
||||
|
||||
firecrackerBin := filepath.Join(t.TempDir(), "firecracker")
|
||||
vsockHelper := filepath.Join(t.TempDir(), "banger-vsock-pingd")
|
||||
vsockHelper := filepath.Join(t.TempDir(), "banger-vsock-agent")
|
||||
if err := os.WriteFile(firecrackerBin, []byte("#!/bin/sh\nexit 0\n"), 0o755); err != nil {
|
||||
t.Fatalf("write firecracker: %v", err)
|
||||
}
|
||||
|
|
@ -105,12 +105,12 @@ func TestStartVMLockedLogsBridgeFailure(t *testing.T) {
|
|||
d := &Daemon{
|
||||
layout: paths.Layout{RuntimeDir: filepath.Join(t.TempDir(), "runtime")},
|
||||
config: model.DaemonConfig{
|
||||
BridgeName: "br-fc",
|
||||
BridgeIP: model.DefaultBridgeIP,
|
||||
DefaultDNS: model.DefaultDNS,
|
||||
FirecrackerBin: firecrackerBin,
|
||||
VSockPingHelperPath: vsockHelper,
|
||||
StatsPollInterval: model.DefaultStatsPollInterval,
|
||||
BridgeName: "br-fc",
|
||||
BridgeIP: model.DefaultBridgeIP,
|
||||
DefaultDNS: model.DefaultDNS,
|
||||
FirecrackerBin: firecrackerBin,
|
||||
VSockAgentPath: vsockHelper,
|
||||
StatsPollInterval: model.DefaultStatsPollInterval,
|
||||
},
|
||||
runner: runner,
|
||||
logger: logger,
|
||||
|
|
@ -151,7 +151,7 @@ func TestBuildImagePreservesBuildLogOnFailure(t *testing.T) {
|
|||
packagesPath := filepath.Join(t.TempDir(), "packages.apt")
|
||||
sshKeyPath := filepath.Join(t.TempDir(), "id_ed25519")
|
||||
firecrackerBin := filepath.Join(t.TempDir(), "firecracker")
|
||||
vsockHelper := filepath.Join(t.TempDir(), "banger-vsock-pingd")
|
||||
vsockHelper := filepath.Join(t.TempDir(), "banger-vsock-agent")
|
||||
for _, path := range []string{baseRootfs, kernelPath, packagesPath, sshKeyPath} {
|
||||
if err := os.WriteFile(path, []byte("artifact"), 0o644); err != nil {
|
||||
t.Fatalf("write %s: %v", path, err)
|
||||
|
|
@ -186,7 +186,7 @@ func TestBuildImagePreservesBuildLogOnFailure(t *testing.T) {
|
|||
DefaultPackagesFile: packagesPath,
|
||||
SSHKeyPath: sshKeyPath,
|
||||
FirecrackerBin: firecrackerBin,
|
||||
VSockPingHelperPath: vsockHelper,
|
||||
VSockAgentPath: vsockHelper,
|
||||
},
|
||||
store: store,
|
||||
runner: runner,
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ func (d *Daemon) addBaseStartPrereqs(checks *system.Preflight, image model.Image
|
|||
|
||||
d.addBaseStartCommandPrereqs(checks)
|
||||
checks.RequireExecutable(d.config.FirecrackerBin, "firecracker binary", hint)
|
||||
checks.RequireExecutable(d.config.VSockPingHelperPath, "vsock ping helper", `run 'make build' or refresh the runtime bundle`)
|
||||
checks.RequireExecutable(d.config.VSockAgentPath, "vsock agent", `run 'make build' or refresh the runtime bundle`)
|
||||
checks.RequireFile(vsockHostDevicePath, "vsock host device", "load the vhost_vsock kernel module on the host")
|
||||
checks.RequireFile(image.RootfsPath, "rootfs image", "select a valid image or rebuild the runtime bundle")
|
||||
checks.RequireFile(image.KernelPath, "kernel image", `set "default_kernel" or refresh the runtime bundle`)
|
||||
|
|
@ -79,8 +79,8 @@ func (d *Daemon) addImageBuildPrereqs(ctx context.Context, checks *system.Prefli
|
|||
checks.RequireCommand(command, toolHint(command))
|
||||
}
|
||||
checks.RequireExecutable(d.config.FirecrackerBin, "firecracker binary", hint)
|
||||
checks.RequireFile(d.config.SSHKeyPath, "ssh private key", `set "ssh_key_path" or refresh the runtime bundle`)
|
||||
checks.RequireExecutable(d.config.VSockPingHelperPath, "vsock ping helper", `run 'make build' or refresh the runtime bundle`)
|
||||
checks.RequireFile(d.config.SSHKeyPath, "runtime ssh private key", `refresh the runtime bundle`)
|
||||
checks.RequireExecutable(d.config.VSockAgentPath, "vsock agent", `run 'make build' or refresh the runtime bundle`)
|
||||
checks.RequireFile(baseRootfs, "base rootfs image", `pass --base-rootfs or set "default_base_rootfs"`)
|
||||
checks.RequireFile(kernelPath, "kernel image", `pass --kernel or set "default_kernel"`)
|
||||
checks.RequireFile(d.config.DefaultPackagesFile, "package manifest", `set "default_packages_file" or refresh the runtime bundle`)
|
||||
|
|
|
|||
|
|
@ -13,11 +13,13 @@ import (
|
|||
|
||||
"banger/internal/api"
|
||||
"banger/internal/firecracker"
|
||||
"banger/internal/guest"
|
||||
"banger/internal/guestconfig"
|
||||
"banger/internal/model"
|
||||
"banger/internal/paths"
|
||||
"banger/internal/system"
|
||||
"banger/internal/vmdns"
|
||||
"banger/internal/vsockagent"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -582,11 +584,11 @@ func (d *Daemon) GetVMStats(ctx context.Context, idOrName string) (model.VMRecor
|
|||
return vm, vm.Stats, nil
|
||||
}
|
||||
|
||||
func (d *Daemon) PingVM(ctx context.Context, idOrName string) (result api.VMPingResult, err error) {
|
||||
func (d *Daemon) HealthVM(ctx context.Context, idOrName string) (result api.VMHealthResult, err error) {
|
||||
_, err = d.withVMLockByRef(ctx, idOrName, func(vm model.VMRecord) (model.VMRecord, error) {
|
||||
result.Name = vm.Name
|
||||
if vm.State != model.VMStateRunning || !system.ProcessRunning(vm.Runtime.PID, vm.Runtime.APISockPath) {
|
||||
result.Alive = false
|
||||
result.Healthy = false
|
||||
return vm, nil
|
||||
}
|
||||
if strings.TrimSpace(vm.Runtime.VSockPath) == "" {
|
||||
|
|
@ -600,15 +602,23 @@ func (d *Daemon) PingVM(ctx context.Context, idOrName string) (result api.VMPing
|
|||
}
|
||||
pingCtx, cancel := context.WithTimeout(ctx, 3*time.Second)
|
||||
defer cancel()
|
||||
if err := firecracker.PingVSock(pingCtx, d.logger, vm.Runtime.VSockPath); err != nil {
|
||||
if err := vsockagent.Health(pingCtx, d.logger, vm.Runtime.VSockPath); err != nil {
|
||||
return model.VMRecord{}, err
|
||||
}
|
||||
result.Alive = true
|
||||
result.Healthy = true
|
||||
return vm, nil
|
||||
})
|
||||
return result, err
|
||||
}
|
||||
|
||||
func (d *Daemon) PingVM(ctx context.Context, idOrName string) (result api.VMPingResult, err error) {
|
||||
health, err := d.HealthVM(ctx, idOrName)
|
||||
if err != nil {
|
||||
return api.VMPingResult{}, err
|
||||
}
|
||||
return api.VMPingResult{Name: health.Name, Alive: health.Healthy}, nil
|
||||
}
|
||||
|
||||
func (d *Daemon) getVMStatsLocked(ctx context.Context, vm model.VMRecord) (model.VMRecord, error) {
|
||||
stats, err := d.collectStats(ctx, vm)
|
||||
if err == nil {
|
||||
|
|
@ -814,6 +824,84 @@ func (d *Daemon) ensureWorkDisk(ctx context.Context, vm *model.VMRecord, image m
|
|||
return nil
|
||||
}
|
||||
|
||||
func (d *Daemon) ensureAuthorizedKeyOnWorkDisk(ctx context.Context, vm *model.VMRecord) error {
|
||||
publicKey, err := guest.AuthorizedPublicKey(d.config.SSHKeyPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("derive authorized ssh key: %w", err)
|
||||
}
|
||||
workMount, cleanupWork, err := system.MountTempDir(ctx, d.runner, vm.Runtime.WorkDiskPath, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cleanupWork()
|
||||
|
||||
if err := d.flattenNestedWorkHome(ctx, workMount); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sshDir := filepath.Join(workMount, ".ssh")
|
||||
if _, err := d.runner.RunSudo(ctx, "mkdir", "-p", sshDir); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := d.runner.RunSudo(ctx, "chmod", "700", sshDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
authorizedKeysPath := filepath.Join(sshDir, "authorized_keys")
|
||||
existing, err := d.runner.RunSudo(ctx, "cat", authorizedKeysPath)
|
||||
if err != nil {
|
||||
existing = nil
|
||||
}
|
||||
merged := mergeAuthorizedKey(existing, publicKey)
|
||||
|
||||
tmpFile, err := os.CreateTemp("", "banger-authorized-keys-*")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmpPath := tmpFile.Name()
|
||||
if _, err := tmpFile.Write(merged); err != nil {
|
||||
_ = tmpFile.Close()
|
||||
_ = os.Remove(tmpPath)
|
||||
return err
|
||||
}
|
||||
if err := tmpFile.Close(); err != nil {
|
||||
_ = os.Remove(tmpPath)
|
||||
return err
|
||||
}
|
||||
defer os.Remove(tmpPath)
|
||||
|
||||
if _, err := d.runner.RunSudo(ctx, "install", "-m", "600", tmpPath, authorizedKeysPath); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func mergeAuthorizedKey(existing, managed []byte) []byte {
|
||||
managedLine := strings.TrimSpace(string(managed))
|
||||
if managedLine == "" {
|
||||
return append([]byte(nil), existing...)
|
||||
}
|
||||
|
||||
lines := strings.Split(strings.ReplaceAll(string(existing), "\r\n", "\n"), "\n")
|
||||
out := make([]string, 0, len(lines)+1)
|
||||
found := false
|
||||
for _, line := range lines {
|
||||
line = strings.TrimRight(line, "\r")
|
||||
trimmed := strings.TrimSpace(line)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
if trimmed == managedLine {
|
||||
found = true
|
||||
}
|
||||
out = append(out, line)
|
||||
}
|
||||
if !found {
|
||||
out = append(out, managedLine)
|
||||
}
|
||||
return []byte(strings.Join(out, "\n") + "\n")
|
||||
}
|
||||
|
||||
func (d *Daemon) flattenNestedWorkHome(ctx context.Context, workMount string) error {
|
||||
nestedHome := filepath.Join(workMount, "root")
|
||||
if !exists(nestedHome) {
|
||||
|
|
|
|||
|
|
@ -2,6 +2,10 @@ package daemon
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
|
|
@ -253,7 +257,7 @@ func TestSetVMRejectsStoppedOnlyChangesForRunningVM(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPingVMReturnsAliveForRunningGuest(t *testing.T) {
|
||||
func TestHealthVMReturnsHealthyForRunningGuest(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := context.Background()
|
||||
|
|
@ -296,16 +300,24 @@ func TestPingVMReturnsAliveForRunningGuest(t *testing.T) {
|
|||
serverDone <- err
|
||||
return
|
||||
}
|
||||
n, err = conn.Read(buf)
|
||||
if err != nil {
|
||||
serverDone <- err
|
||||
reqBuf := make([]byte, 0, 512)
|
||||
reqBuf = append(reqBuf, buf[:0]...)
|
||||
for {
|
||||
n, err = conn.Read(buf)
|
||||
if err != nil {
|
||||
serverDone <- err
|
||||
return
|
||||
}
|
||||
reqBuf = append(reqBuf, buf[:n]...)
|
||||
if strings.Contains(string(reqBuf), "\r\n\r\n") {
|
||||
break
|
||||
}
|
||||
}
|
||||
if got := string(reqBuf); !strings.Contains(got, "GET /healthz HTTP/1.1\r\n") {
|
||||
serverDone <- fmt.Errorf("unexpected health payload %q", got)
|
||||
return
|
||||
}
|
||||
if got := string(buf[:n]); got != "PING\n" {
|
||||
serverDone <- fmt.Errorf("unexpected ping payload %q", got)
|
||||
return
|
||||
}
|
||||
_, err = conn.Write([]byte("PONG\n"))
|
||||
_, err = conn.Write([]byte("HTTP/1.1 200 OK\r\nContent-Type: application/json\r\nContent-Length: 15\r\n\r\n{\"status\":\"ok\"}"))
|
||||
serverDone <- err
|
||||
}()
|
||||
|
||||
|
|
@ -326,12 +338,12 @@ func TestPingVMReturnsAliveForRunningGuest(t *testing.T) {
|
|||
},
|
||||
}
|
||||
d := &Daemon{store: db, runner: runner}
|
||||
result, err := d.PingVM(ctx, vm.Name)
|
||||
result, err := d.HealthVM(ctx, vm.Name)
|
||||
if err != nil {
|
||||
t.Fatalf("PingVM: %v", err)
|
||||
t.Fatalf("HealthVM: %v", err)
|
||||
}
|
||||
if !result.Alive || result.Name != vm.Name {
|
||||
t.Fatalf("PingVM result = %+v, want alive %s", result, vm.Name)
|
||||
if !result.Healthy || result.Name != vm.Name {
|
||||
t.Fatalf("HealthVM result = %+v, want healthy %s", result, vm.Name)
|
||||
}
|
||||
runner.assertExhausted()
|
||||
if err := <-serverDone; err != nil {
|
||||
|
|
@ -339,7 +351,65 @@ func TestPingVMReturnsAliveForRunningGuest(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPingVMReturnsFalseForStoppedVM(t *testing.T) {
|
||||
func TestPingVMAliasReturnsAliveForHealthyVM(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := context.Background()
|
||||
db := openDaemonStore(t)
|
||||
apiSock := filepath.Join(t.TempDir(), "fc.sock")
|
||||
fake := startFakeFirecrackerProcess(t, apiSock)
|
||||
t.Cleanup(func() {
|
||||
_ = fake.Process.Kill()
|
||||
_ = fake.Wait()
|
||||
})
|
||||
vsockSock := filepath.Join(t.TempDir(), "fc.vsock")
|
||||
listener, err := net.Listen("unix", vsockSock)
|
||||
if err != nil {
|
||||
t.Fatalf("listen vsock: %v", err)
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
_ = listener.Close()
|
||||
_ = os.Remove(vsockSock)
|
||||
})
|
||||
go func() {
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
buf := make([]byte, 512)
|
||||
_, _ = conn.Read(buf)
|
||||
_, _ = conn.Write([]byte("OK 1\n"))
|
||||
_, _ = conn.Read(buf)
|
||||
_, _ = conn.Write([]byte("HTTP/1.1 200 OK\r\nContent-Type: application/json\r\nContent-Length: 15\r\n\r\n{\"status\":\"ok\"}"))
|
||||
}()
|
||||
vm := testVM("healthy-ping", "image-healthy", "172.16.0.42")
|
||||
vm.State = model.VMStateRunning
|
||||
vm.Runtime.State = model.VMStateRunning
|
||||
vm.Runtime.PID = fake.Process.Pid
|
||||
vm.Runtime.APISockPath = apiSock
|
||||
vm.Runtime.VSockPath = vsockSock
|
||||
vm.Runtime.VSockCID = 10042
|
||||
upsertDaemonVM(t, ctx, db, vm)
|
||||
|
||||
runner := &scriptedRunner{
|
||||
t: t,
|
||||
steps: []runnerStep{
|
||||
sudoStep("", nil, "chown", fmt.Sprintf("%d:%d", os.Getuid(), os.Getgid()), vsockSock),
|
||||
sudoStep("", nil, "chmod", "600", vsockSock),
|
||||
},
|
||||
}
|
||||
d := &Daemon{store: db, runner: runner}
|
||||
result, err := d.PingVM(ctx, vm.Name)
|
||||
if err != nil {
|
||||
t.Fatalf("PingVM: %v", err)
|
||||
}
|
||||
if !result.Alive {
|
||||
t.Fatalf("PingVM result = %+v, want alive", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHealthVMReturnsFalseForStoppedVM(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := context.Background()
|
||||
|
|
@ -348,12 +418,12 @@ func TestPingVMReturnsFalseForStoppedVM(t *testing.T) {
|
|||
upsertDaemonVM(t, ctx, db, vm)
|
||||
|
||||
d := &Daemon{store: db}
|
||||
result, err := d.PingVM(ctx, vm.Name)
|
||||
result, err := d.HealthVM(ctx, vm.Name)
|
||||
if err != nil {
|
||||
t.Fatalf("PingVM: %v", err)
|
||||
t.Fatalf("HealthVM: %v", err)
|
||||
}
|
||||
if result.Alive {
|
||||
t.Fatalf("PingVM result = %+v, want not alive", result)
|
||||
if result.Healthy {
|
||||
t.Fatalf("HealthVM result = %+v, want not healthy", result)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -406,6 +476,64 @@ func TestFlattenNestedWorkHomeCopiesEntriesIndividually(t *testing.T) {
|
|||
runner.assertExhausted()
|
||||
}
|
||||
|
||||
func TestEnsureAuthorizedKeyOnWorkDiskRepairsNestedRootLayout(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
workDiskDir := t.TempDir()
|
||||
nestedHome := filepath.Join(workDiskDir, "root")
|
||||
if err := os.MkdirAll(filepath.Join(nestedHome, ".ssh"), 0o700); err != nil {
|
||||
t.Fatalf("MkdirAll(.ssh): %v", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(nestedHome, ".bashrc"), []byte("export TEST_PROMPT=1\n"), 0o644); err != nil {
|
||||
t.Fatalf("WriteFile(.bashrc): %v", err)
|
||||
}
|
||||
legacyKey := "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILEgacykey legacy@test\n"
|
||||
if err := os.WriteFile(filepath.Join(nestedHome, ".ssh", "authorized_keys"), []byte(legacyKey), 0o600); err != nil {
|
||||
t.Fatalf("WriteFile(authorized_keys): %v", err)
|
||||
}
|
||||
|
||||
privateKey, err := rsa.GenerateKey(rand.Reader, 1024)
|
||||
if err != nil {
|
||||
t.Fatalf("GenerateKey: %v", err)
|
||||
}
|
||||
privateKeyPEM := pem.EncodeToMemory(&pem.Block{
|
||||
Type: "RSA PRIVATE KEY",
|
||||
Bytes: x509.MarshalPKCS1PrivateKey(privateKey),
|
||||
})
|
||||
sshKeyPath := filepath.Join(t.TempDir(), "id_rsa")
|
||||
if err := os.WriteFile(sshKeyPath, privateKeyPEM, 0o600); err != nil {
|
||||
t.Fatalf("WriteFile(private key): %v", err)
|
||||
}
|
||||
|
||||
d := &Daemon{
|
||||
runner: &filesystemRunner{t: t},
|
||||
config: model.DaemonConfig{SSHKeyPath: sshKeyPath},
|
||||
}
|
||||
vm := testVM("seed-repair", "image-seed-repair", "172.16.0.61")
|
||||
vm.Runtime.WorkDiskPath = workDiskDir
|
||||
|
||||
if err := d.ensureAuthorizedKeyOnWorkDisk(context.Background(), &vm); err != nil {
|
||||
t.Fatalf("ensureAuthorizedKeyOnWorkDisk: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(filepath.Join(workDiskDir, "root")); !os.IsNotExist(err) {
|
||||
t.Fatalf("nested root still exists: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(filepath.Join(workDiskDir, ".bashrc")); err != nil {
|
||||
t.Fatalf(".bashrc missing at top level: %v", err)
|
||||
}
|
||||
data, err := os.ReadFile(filepath.Join(workDiskDir, ".ssh", "authorized_keys"))
|
||||
if err != nil {
|
||||
t.Fatalf("ReadFile(authorized_keys): %v", err)
|
||||
}
|
||||
content := string(data)
|
||||
if !strings.Contains(content, strings.TrimSpace(legacyKey)) {
|
||||
t.Fatalf("authorized_keys missing legacy key: %q", content)
|
||||
}
|
||||
if !strings.Contains(content, "ssh-rsa ") {
|
||||
t.Fatalf("authorized_keys missing managed key: %q", content)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateVMRejectsNonPositiveCPUAndMemory(t *testing.T) {
|
||||
d := &Daemon{}
|
||||
if _, err := d.CreateVM(context.Background(), api.VMCreateParams{VCPUCount: ptr(0)}); err == nil || !strings.Contains(err.Error(), "vcpu must be a positive integer") {
|
||||
|
|
@ -824,6 +952,29 @@ func testImage(name string) model.Image {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMergeAuthorizedKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
managed := []byte("ssh-ed25519 AAAATESTKEY banger\n")
|
||||
existing := []byte("ssh-ed25519 AAAAOTHER other\n")
|
||||
merged := mergeAuthorizedKey(existing, managed)
|
||||
got := string(merged)
|
||||
if !strings.Contains(got, "ssh-ed25519 AAAAOTHER other") {
|
||||
t.Fatalf("merged keys dropped existing entry: %q", got)
|
||||
}
|
||||
if !strings.Contains(got, "ssh-ed25519 AAAATESTKEY banger") {
|
||||
t.Fatalf("merged keys missing managed entry: %q", got)
|
||||
}
|
||||
if strings.Count(got, "ssh-ed25519 AAAATESTKEY banger") != 1 {
|
||||
t.Fatalf("managed key duplicated in %q", got)
|
||||
}
|
||||
|
||||
merged = mergeAuthorizedKey(merged, managed)
|
||||
if strings.Count(string(merged), "ssh-ed25519 AAAATESTKEY banger") != 1 {
|
||||
t.Fatalf("managed key duplicated after second merge: %q", string(merged))
|
||||
}
|
||||
}
|
||||
|
||||
func startFakeFirecrackerProcess(t *testing.T, apiSock string) *exec.Cmd {
|
||||
t.Helper()
|
||||
|
||||
|
|
@ -878,6 +1029,117 @@ type processKillingRunner struct {
|
|||
proc *exec.Cmd
|
||||
}
|
||||
|
||||
type filesystemRunner struct {
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func (r *filesystemRunner) Run(ctx context.Context, name string, args ...string) ([]byte, error) {
|
||||
r.t.Helper()
|
||||
return nil, fmt.Errorf("unexpected Run call: %s %v", name, args)
|
||||
}
|
||||
|
||||
func (r *filesystemRunner) RunSudo(ctx context.Context, args ...string) ([]byte, error) {
|
||||
r.t.Helper()
|
||||
if len(args) == 0 {
|
||||
return nil, errors.New("missing sudo command")
|
||||
}
|
||||
switch args[0] {
|
||||
case "mount":
|
||||
if len(args) != 3 {
|
||||
return nil, fmt.Errorf("unexpected mount args: %v", args)
|
||||
}
|
||||
source, mountDir := args[1], args[2]
|
||||
if err := os.Remove(mountDir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := os.Symlink(source, mountDir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, nil
|
||||
case "umount":
|
||||
return nil, nil
|
||||
case "chmod":
|
||||
if len(args) != 3 {
|
||||
return nil, fmt.Errorf("unexpected chmod args: %v", args)
|
||||
}
|
||||
mode, err := strconv.ParseUint(args[1], 8, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, os.Chmod(args[2], os.FileMode(mode))
|
||||
case "cp":
|
||||
if len(args) != 4 || args[1] != "-a" {
|
||||
return nil, fmt.Errorf("unexpected cp args: %v", args)
|
||||
}
|
||||
return nil, copyIntoDir(args[2], args[3])
|
||||
case "rm":
|
||||
if len(args) != 3 || args[1] != "-rf" {
|
||||
return nil, fmt.Errorf("unexpected rm args: %v", args)
|
||||
}
|
||||
return nil, os.RemoveAll(args[2])
|
||||
case "mkdir":
|
||||
if len(args) != 3 || args[1] != "-p" {
|
||||
return nil, fmt.Errorf("unexpected mkdir args: %v", args)
|
||||
}
|
||||
return nil, os.MkdirAll(args[2], 0o755)
|
||||
case "cat":
|
||||
if len(args) != 2 {
|
||||
return nil, fmt.Errorf("unexpected cat args: %v", args)
|
||||
}
|
||||
return os.ReadFile(args[1])
|
||||
case "install":
|
||||
if len(args) != 5 || args[1] != "-m" {
|
||||
return nil, fmt.Errorf("unexpected install args: %v", args)
|
||||
}
|
||||
mode, err := strconv.ParseUint(args[2], 8, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data, err := os.ReadFile(args[3])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(args[4]), 0o755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, os.WriteFile(args[4], data, os.FileMode(mode))
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected sudo command: %v", args)
|
||||
}
|
||||
}
|
||||
|
||||
func copyIntoDir(sourcePath, targetDir string) error {
|
||||
targetDir = strings.TrimSuffix(targetDir, "/")
|
||||
info, err := os.Stat(sourcePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
destPath := filepath.Join(targetDir, filepath.Base(sourcePath))
|
||||
if info.IsDir() {
|
||||
if err := os.MkdirAll(destPath, info.Mode().Perm()); err != nil {
|
||||
return err
|
||||
}
|
||||
entries, err := os.ReadDir(sourcePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if err := copyIntoDir(filepath.Join(sourcePath, entry.Name()), destPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return os.Chmod(destPath, info.Mode().Perm())
|
||||
}
|
||||
data, err := os.ReadFile(sourcePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(destPath), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(destPath, data, info.Mode().Perm())
|
||||
}
|
||||
|
||||
func (r *processKillingRunner) Run(ctx context.Context, name string, args ...string) ([]byte, error) {
|
||||
return r.scriptedRunner.Run(ctx, name, args...)
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue