Add vsock-backed SSH session reminders

Remind users when a VM is still running after 	hanger vm ssh exits instead of silently dropping them back to the host shell.\n\nAttach a Firecracker vsock device to each VM, persist the host vsock path/CID,\nadd a new guest-side banger-vsock-pingd responder to the runtime bundle and both\nimage-build paths, and expose a vm.ping RPC that the CLI and TUI call after SSH\nreturns. Doctor and start/build preflight now validate the helper plus\n/dev/vhost-vsock so the feature fails early and clearly.\n\nValidated with go mod tidy, bash -n customize.sh, git diff --check, make build,\nand GOCACHE=/tmp/banger-gocache go test ./... outside the sandbox because the\ndaemon tests need real Unix/UDP sockets. Rebuild the image/rootfs used for new\nVMs so the guest ping service is present.
This commit is contained in:
Thales Maciel 2026-03-18 20:14:51 -03:00
parent 4930d82cb9
commit 08ef706e3f
No known key found for this signature in database
GPG key ID: 33112E6833C34679
31 changed files with 912 additions and 75 deletions

View file

@ -323,6 +323,13 @@ func (d *Daemon) dispatch(ctx context.Context, req rpc.Request) rpc.Response {
return rpc.NewError("not_running", fmt.Sprintf("vm %s is not running", vm.Name))
}
return marshalResultOrError(api.VMSSHResult{Name: vm.Name, GuestIP: vm.Runtime.GuestIP}, nil)
case "vm.ping":
params, err := rpc.DecodeParams[api.VMRefParams](req)
if err != nil {
return rpc.NewError("bad_request", err.Error())
}
result, err := d.PingVM(ctx, params.IDOrName)
return marshalResultOrError(result, err)
case "image.list":
images, err := d.store.ListImages(ctx)
return marshalResultOrError(api.ImageListResult{Images: images}, err)

View file

@ -33,6 +33,7 @@ func (d *Daemon) doctorReport(ctx context.Context) system.Report {
report.AddPreflight("runtime bundle", d.runtimeBundleChecks(), runtimeBundleStatus(d.config))
report.AddPreflight("core vm lifecycle", d.coreVMLifecycleChecks(), "required host tools available")
report.AddPreflight("vsock ssh reminder", d.vsockChecks(), "vsock reminder prerequisites available")
d.addCapabilityDoctorChecks(ctx, &report)
report.AddPreflight("image build", d.imageBuildChecks(ctx), "image build prerequisites available")
@ -44,6 +45,7 @@ func (d *Daemon) runtimeBundleChecks() *system.Preflight {
hint := paths.RuntimeBundleHint()
checks.RequireExecutable(d.config.FirecrackerBin, "firecracker binary", hint)
checks.RequireFile(d.config.SSHKeyPath, "ssh private key", `set "ssh_key_path" or refresh the runtime bundle`)
checks.RequireExecutable(d.config.VSockPingHelperPath, "vsock ping helper", `run 'make build' or refresh the runtime bundle`)
checks.RequireFile(d.config.DefaultRootfs, "default rootfs image", `set "default_rootfs" or refresh the runtime bundle`)
checks.RequireFile(d.config.DefaultKernel, "kernel image", `set "default_kernel" or refresh the runtime bundle`)
if strings.TrimSpace(d.config.DefaultInitrd) != "" {
@ -75,6 +77,13 @@ func (d *Daemon) imageBuildChecks(ctx context.Context) *system.Preflight {
return checks
}
func (d *Daemon) vsockChecks() *system.Preflight {
checks := system.NewPreflight()
checks.RequireExecutable(d.config.VSockPingHelperPath, "vsock ping helper", `run 'make build' or refresh the runtime bundle`)
checks.RequireFile(vsockHostDevicePath, "vsock host device", "load the vhost_vsock kernel module on the host")
return checks
}
func runtimeBundleStatus(cfg model.DaemonConfig) string {
if strings.TrimSpace(cfg.RuntimeDir) == "" {
return "runtime dir not configured"

View file

@ -17,6 +17,7 @@ import (
"banger/internal/hostnat"
"banger/internal/model"
"banger/internal/system"
"banger/internal/vsockping"
)
const (
@ -103,6 +104,16 @@ func (d *Daemon) runImageBuildNative(ctx context.Context, spec imageBuildSpec) (
}
defer client.Close()
helperBytes, err := os.ReadFile(d.config.VSockPingHelperPath)
if err != nil {
return err
}
if err := writeBuildLog(spec.BuildLog, "installing vsock ping helper"); err != nil {
return err
}
if err := client.UploadFile(ctx, vsockping.GuestInstallPath, 0o755, helperBytes, spec.BuildLog); err != nil {
return err
}
if err := writeBuildLog(spec.BuildLog, "configuring guest"); err != nil {
return err
}
@ -207,7 +218,7 @@ func (d *Daemon) startImageBuildVM(ctx context.Context, spec imageBuildSpec) (im
return imageBuildVM{}, nil, err
}
vm.PID = d.resolveFirecrackerPID(firecrackerCtx, machine, vm.APISock)
if err := d.ensureSocketAccess(ctx, vm.APISock); err != nil {
if err := d.ensureSocketAccess(ctx, vm.APISock, "firecracker api socket"); err != nil {
_ = d.killVMProcess(context.Background(), vm.PID)
_ = hostnat.Ensure(ctx, d.runner, vm.GuestIP, vm.TapDevice, false)
_, _ = d.runner.RunSudo(ctx, "ip", "link", "del", vm.TapDevice)
@ -255,6 +266,7 @@ func buildProvisionScript(vmName, dnsServer string, packages []string, installDo
script.WriteString("DEBIAN_FRONTEND=noninteractive apt-get -y install \"${PACKAGES[@]}\"\n")
appendMiseSetup(&script)
appendTmuxSetup(&script)
appendVSockPingSetup(&script)
if installDocker {
script.WriteString("DEBIAN_FRONTEND=noninteractive apt-get -y remove containerd || true\n")
script.WriteString("if ! DEBIAN_FRONTEND=noninteractive apt-get -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin; then\n")
@ -318,6 +330,19 @@ func appendTmuxSetup(script *bytes.Buffer) {
script.WriteString("chmod 0644 \"$TMUX_CONF\"\n")
}
func appendVSockPingSetup(script *bytes.Buffer) {
script.WriteString("mkdir -p /etc/modules-load.d /etc/systemd/system\n")
script.WriteString("cat > /etc/modules-load.d/banger-vsock.conf <<'EOF'\n")
script.WriteString(vsockping.ModulesLoadConfig())
script.WriteString("EOF\n")
script.WriteString("chmod 0644 /etc/modules-load.d/banger-vsock.conf\n")
script.WriteString("cat > /etc/systemd/system/" + vsockping.ServiceName + " <<'EOF'\n")
script.WriteString(vsockping.ServiceUnit())
script.WriteString("EOF\n")
script.WriteString("chmod 0644 /etc/systemd/system/" + vsockping.ServiceName + "\n")
script.WriteString("if command -v systemctl >/dev/null 2>&1; then systemctl daemon-reload || true; systemctl enable --now " + vsockping.ServiceName + " || true; fi\n")
}
func appendGitRepo(script *bytes.Buffer, dir, repo string) {
fmt.Fprintf(script, "if [[ -d \"%s/.git\" ]]; then\n", dir)
fmt.Fprintf(script, " git -C \"%s\" fetch --depth 1 origin\n", dir)

View file

@ -26,6 +26,11 @@ func TestBuildProvisionScriptInstallsDefaultTools(t *testing.T) {
"set -g @continuum-restore 'off'",
"set -g @resurrect-dir '/root/.tmux/resurrect'",
"run '~/.tmux/plugins/tpm/tpm'",
"cat > /etc/modules-load.d/banger-vsock.conf <<'EOF'",
"vmw_vsock_virtio_transport",
"cat > /etc/systemd/system/banger-vsock-pingd.service <<'EOF'",
"ExecStart=/usr/local/bin/banger-vsock-pingd",
"systemctl enable --now banger-vsock-pingd.service || true",
"rm -f /root/get-docker /root/get-docker.sh /tmp/get-docker /tmp/get-docker.sh",
} {
if !strings.Contains(script, snippet) {

View file

@ -43,6 +43,11 @@ func TestNewDaemonLoggerEmitsJSONAtConfiguredLevel(t *testing.T) {
func TestStartVMLockedLogsBridgeFailure(t *testing.T) {
ctx := context.Background()
origVsockHostDevicePath := vsockHostDevicePath
vsockHostDevicePath = filepath.Join(t.TempDir(), "vhost-vsock")
t.Cleanup(func() {
vsockHostDevicePath = origVsockHostDevicePath
})
binDir := t.TempDir()
for _, name := range []string{
"sudo", "ip", "dmsetup", "losetup", "blockdev", "truncate", "pgrep", "ps",
@ -54,9 +59,16 @@ func TestStartVMLockedLogsBridgeFailure(t *testing.T) {
t.Setenv("PATH", binDir)
firecrackerBin := filepath.Join(t.TempDir(), "firecracker")
vsockHelper := filepath.Join(t.TempDir(), "banger-vsock-pingd")
if err := os.WriteFile(firecrackerBin, []byte("#!/bin/sh\nexit 0\n"), 0o755); err != nil {
t.Fatalf("write firecracker: %v", err)
}
if err := os.WriteFile(vsockHostDevicePath, []byte{}, 0o644); err != nil {
t.Fatalf("write vsock host device: %v", err)
}
if err := os.WriteFile(vsockHelper, []byte("#!/bin/sh\nexit 0\n"), 0o755); err != nil {
t.Fatalf("write vsock helper: %v", err)
}
rootfsPath := filepath.Join(t.TempDir(), "rootfs.ext4")
kernelPath := filepath.Join(t.TempDir(), "vmlinux")
for _, path := range []string{rootfsPath, kernelPath} {
@ -93,11 +105,12 @@ func TestStartVMLockedLogsBridgeFailure(t *testing.T) {
d := &Daemon{
layout: paths.Layout{RuntimeDir: filepath.Join(t.TempDir(), "runtime")},
config: model.DaemonConfig{
BridgeName: "br-fc",
BridgeIP: model.DefaultBridgeIP,
DefaultDNS: model.DefaultDNS,
FirecrackerBin: firecrackerBin,
StatsPollInterval: model.DefaultStatsPollInterval,
BridgeName: "br-fc",
BridgeIP: model.DefaultBridgeIP,
DefaultDNS: model.DefaultDNS,
FirecrackerBin: firecrackerBin,
VSockPingHelperPath: vsockHelper,
StatsPollInterval: model.DefaultStatsPollInterval,
},
runner: runner,
logger: logger,
@ -138,11 +151,15 @@ func TestBuildImagePreservesBuildLogOnFailure(t *testing.T) {
packagesPath := filepath.Join(t.TempDir(), "packages.apt")
sshKeyPath := filepath.Join(t.TempDir(), "id_ed25519")
firecrackerBin := filepath.Join(t.TempDir(), "firecracker")
vsockHelper := filepath.Join(t.TempDir(), "banger-vsock-pingd")
for _, path := range []string{baseRootfs, kernelPath, packagesPath, sshKeyPath} {
if err := os.WriteFile(path, []byte("artifact"), 0o644); err != nil {
t.Fatalf("write %s: %v", path, err)
}
}
if err := os.WriteFile(vsockHelper, []byte("#!/bin/sh\nexit 0\n"), 0o755); err != nil {
t.Fatalf("write %s: %v", vsockHelper, err)
}
if err := os.WriteFile(firecrackerBin, []byte("#!/bin/sh\nexit 0\n"), 0o755); err != nil {
t.Fatalf("write %s: %v", firecrackerBin, err)
}
@ -169,6 +186,7 @@ func TestBuildImagePreservesBuildLogOnFailure(t *testing.T) {
DefaultPackagesFile: packagesPath,
SSHKeyPath: sshKeyPath,
FirecrackerBin: firecrackerBin,
VSockPingHelperPath: vsockHelper,
},
store: store,
runner: runner,

View file

@ -9,6 +9,8 @@ import (
"banger/internal/system"
)
var vsockHostDevicePath = "/dev/vhost-vsock"
func (d *Daemon) validateStartPrereqs(ctx context.Context, vm model.VMRecord, image model.Image) error {
checks := system.NewPreflight()
d.addBaseStartPrereqs(checks, image)
@ -52,6 +54,8 @@ func (d *Daemon) addBaseStartPrereqs(checks *system.Preflight, image model.Image
d.addBaseStartCommandPrereqs(checks)
checks.RequireExecutable(d.config.FirecrackerBin, "firecracker binary", hint)
checks.RequireExecutable(d.config.VSockPingHelperPath, "vsock ping helper", `run 'make build' or refresh the runtime bundle`)
checks.RequireFile(vsockHostDevicePath, "vsock host device", "load the vhost_vsock kernel module on the host")
checks.RequireFile(image.RootfsPath, "rootfs image", "select a valid image or rebuild the runtime bundle")
checks.RequireFile(image.KernelPath, "kernel image", `set "default_kernel" or refresh the runtime bundle`)
if strings.TrimSpace(image.InitrdPath) != "" {
@ -73,6 +77,7 @@ func (d *Daemon) addImageBuildPrereqs(ctx context.Context, checks *system.Prefli
}
checks.RequireExecutable(d.config.FirecrackerBin, "firecracker binary", hint)
checks.RequireFile(d.config.SSHKeyPath, "ssh private key", `set "ssh_key_path" or refresh the runtime bundle`)
checks.RequireExecutable(d.config.VSockPingHelperPath, "vsock ping helper", `run 'make build' or refresh the runtime bundle`)
checks.RequireFile(baseRootfs, "base rootfs image", `pass --base-rootfs or set "default_base_rootfs"`)
checks.RequireFile(kernelPath, "kernel image", `pass --kernel or set "default_kernel"`)
checks.RequireFile(d.config.DefaultPackagesFile, "package manifest", `set "default_packages_file" or refresh the runtime bundle`)

View file

@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
"net"
"os"
"path/filepath"
"strconv"
@ -75,6 +76,10 @@ func (d *Daemon) CreateVM(ctx context.Context, params api.VMCreateParams) (vm mo
if err := os.MkdirAll(vmDir, 0o755); err != nil {
return model.VMRecord{}, err
}
vsockCID, err := defaultVSockCID(guestIP)
if err != nil {
return model.VMRecord{}, err
}
systemOverlaySize := int64(model.DefaultSystemOverlaySize)
if params.SystemOverlaySize != "" {
systemOverlaySize, err = model.ParseSize(params.SystemOverlaySize)
@ -111,6 +116,8 @@ func (d *Daemon) CreateVM(ctx context.Context, params api.VMCreateParams) (vm mo
GuestIP: guestIP,
DNSName: vmdns.RecordName(name),
VMDir: vmDir,
VSockPath: defaultVSockPath(d.layout.RuntimeDir, id),
VSockCID: vsockCID,
SystemOverlay: filepath.Join(vmDir, "system.cow"),
WorkDiskPath: filepath.Join(vmDir, "root.ext4"),
LogPath: filepath.Join(vmDir, "firecracker.log"),
@ -183,9 +190,21 @@ func (d *Daemon) startVMLocked(ctx context.Context, vm model.VMRecord, image mod
apiSock := filepath.Join(d.layout.RuntimeDir, "fc-"+shortID+".sock")
tap := "tap-fc-" + shortID
dmName := "fc-rootfs-" + shortID
if strings.TrimSpace(vm.Runtime.VSockPath) == "" {
vm.Runtime.VSockPath = defaultVSockPath(d.layout.RuntimeDir, vm.ID)
}
if vm.Runtime.VSockCID == 0 {
vm.Runtime.VSockCID, err = defaultVSockCID(vm.Runtime.GuestIP)
if err != nil {
return model.VMRecord{}, err
}
}
if err := os.RemoveAll(apiSock); err != nil && !os.IsNotExist(err) {
return model.VMRecord{}, err
}
if err := os.RemoveAll(vm.Runtime.VSockPath); err != nil && !os.IsNotExist(err) {
return model.VMRecord{}, err
}
op.stage("system_overlay", "overlay_path", vm.Runtime.SystemOverlay)
if err := d.ensureSystemOverlay(ctx, &vm); err != nil {
@ -260,6 +279,8 @@ func (d *Daemon) startVMLocked(ctx context.Context, vm model.VMRecord, image mod
IsRoot: true,
}},
TapDevice: tap,
VSockPath: vm.Runtime.VSockPath,
VSockCID: vm.Runtime.VSockCID,
VCPUCount: vm.Spec.VCPUCount,
MemoryMiB: vm.Spec.MemoryMiB,
Logger: d.logger,
@ -276,7 +297,11 @@ func (d *Daemon) startVMLocked(ctx context.Context, vm model.VMRecord, image mod
vm.Runtime.PID = d.resolveFirecrackerPID(firecrackerCtx, machine, apiSock)
op.debugStage("firecracker_started", "pid", vm.Runtime.PID)
op.stage("socket_access", "api_socket", apiSock)
if err := d.ensureSocketAccess(ctx, apiSock); err != nil {
if err := d.ensureSocketAccess(ctx, apiSock, "firecracker api socket"); err != nil {
return cleanupOnErr(err)
}
op.stage("vsock_access", "vsock_path", vm.Runtime.VSockPath, "vsock_cid", vm.Runtime.VSockCID)
if err := d.ensureSocketAccess(ctx, vm.Runtime.VSockPath, "firecracker vsock socket"); err != nil {
return cleanupOnErr(err)
}
op.stage("post_start_features")
@ -556,6 +581,33 @@ func (d *Daemon) GetVMStats(ctx context.Context, idOrName string) (model.VMRecor
return vm, vm.Stats, nil
}
func (d *Daemon) PingVM(ctx context.Context, idOrName string) (result api.VMPingResult, err error) {
_, err = d.withVMLockByRef(ctx, idOrName, func(vm model.VMRecord) (model.VMRecord, error) {
result.Name = vm.Name
if vm.State != model.VMStateRunning || !system.ProcessRunning(vm.Runtime.PID, vm.Runtime.APISockPath) {
result.Alive = false
return vm, nil
}
if strings.TrimSpace(vm.Runtime.VSockPath) == "" {
return model.VMRecord{}, errors.New("vm has no vsock path")
}
if vm.Runtime.VSockCID == 0 {
return model.VMRecord{}, errors.New("vm has no vsock cid")
}
if err := d.ensureSocketAccess(ctx, vm.Runtime.VSockPath, "firecracker vsock socket"); err != nil {
return model.VMRecord{}, err
}
pingCtx, cancel := context.WithTimeout(ctx, 3*time.Second)
defer cancel()
if err := firecracker.PingVSock(pingCtx, d.logger, vm.Runtime.VSockPath); err != nil {
return model.VMRecord{}, err
}
result.Alive = true
return vm, nil
})
return result, err
}
func (d *Daemon) getVMStatsLocked(ctx context.Context, vm model.VMRecord) (model.VMRecord, error) {
stats, err := d.collectStats(ctx, vm)
if err == nil {
@ -812,11 +864,14 @@ func (d *Daemon) firecrackerBinary() (string, error) {
return path, nil
}
func (d *Daemon) ensureSocketAccess(ctx context.Context, apiSock string) error {
if _, err := d.runner.RunSudo(ctx, "chown", fmt.Sprintf("%d:%d", os.Getuid(), os.Getgid()), apiSock); err != nil {
func (d *Daemon) ensureSocketAccess(ctx context.Context, socketPath, label string) error {
if err := waitForPath(ctx, socketPath, 5*time.Second, label); err != nil {
return err
}
_, err := d.runner.RunSudo(ctx, "chmod", "600", apiSock)
if _, err := d.runner.RunSudo(ctx, "chown", fmt.Sprintf("%d:%d", os.Getuid(), os.Getgid()), socketPath); err != nil {
return err
}
_, err := d.runner.RunSudo(ctx, "chmod", "600", socketPath)
return err
}
@ -841,7 +896,7 @@ func (d *Daemon) resolveFirecrackerPID(ctx context.Context, machine *firecracker
}
func (d *Daemon) sendCtrlAltDel(ctx context.Context, vm model.VMRecord) error {
if err := d.ensureSocketAccess(ctx, vm.Runtime.APISockPath); err != nil {
if err := d.ensureSocketAccess(ctx, vm.Runtime.APISockPath, "firecracker api socket"); err != nil {
return err
}
client := firecracker.New(vm.Runtime.APISockPath, d.logger)
@ -887,6 +942,9 @@ func (d *Daemon) cleanupRuntime(ctx context.Context, vm model.VMRecord, preserve
if vm.Runtime.APISockPath != "" {
_ = os.Remove(vm.Runtime.APISockPath)
}
if vm.Runtime.VSockPath != "" {
_ = os.Remove(vm.Runtime.VSockPath)
}
snapshotErr := d.cleanupDMSnapshot(ctx, dmSnapshotHandles{
BaseLoop: vm.Runtime.BaseLoop,
COWLoop: vm.Runtime.COWLoop,
@ -910,6 +968,37 @@ func clearRuntimeHandles(vm *model.VMRecord) {
vm.Runtime.DMDev = ""
}
func defaultVSockPath(runtimeDir, vmID string) string {
return filepath.Join(runtimeDir, "fc-"+system.ShortID(vmID)+".vsock")
}
func defaultVSockCID(guestIP string) (uint32, error) {
ip := net.ParseIP(strings.TrimSpace(guestIP)).To4()
if ip == nil {
return 0, fmt.Errorf("guest IP is not IPv4: %q", guestIP)
}
return 10000 + uint32(ip[3]), nil
}
func waitForPath(ctx context.Context, path string, timeout time.Duration, label string) error {
deadline := time.Now().Add(timeout)
for {
if _, err := os.Stat(path); err == nil {
return nil
} else if err != nil && !os.IsNotExist(err) {
return err
}
if time.Now().After(deadline) {
return fmt.Errorf("%s not ready: %s: %w", label, path, context.DeadlineExceeded)
}
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(100 * time.Millisecond):
}
}
}
func (d *Daemon) setDNS(ctx context.Context, vmName, guestIP string) error {
if d.vmDNS == nil {
return nil

View file

@ -261,6 +261,114 @@ func TestSetVMRejectsStoppedOnlyChangesForRunningVM(t *testing.T) {
}
}
func TestPingVMReturnsAliveForRunningGuest(t *testing.T) {
t.Parallel()
ctx := context.Background()
db := openDaemonStore(t)
apiSock := filepath.Join(t.TempDir(), "fc.sock")
fake := startFakeFirecrackerProcess(t, apiSock)
t.Cleanup(func() {
_ = fake.Process.Kill()
_ = fake.Wait()
})
vsockSock := filepath.Join(t.TempDir(), "fc.vsock")
listener, err := net.Listen("unix", vsockSock)
if err != nil {
t.Fatalf("listen vsock: %v", err)
}
t.Cleanup(func() {
_ = listener.Close()
_ = os.Remove(vsockSock)
})
serverDone := make(chan error, 1)
go func() {
conn, err := listener.Accept()
if err != nil {
serverDone <- err
return
}
defer conn.Close()
buf := make([]byte, 128)
n, err := conn.Read(buf)
if err != nil {
serverDone <- err
return
}
if got := string(buf[:n]); got != "CONNECT 42070\n" {
serverDone <- fmt.Errorf("unexpected connect message %q", got)
return
}
if _, err := conn.Write([]byte("OK 1\n")); err != nil {
serverDone <- err
return
}
n, err = conn.Read(buf)
if err != nil {
serverDone <- err
return
}
if got := string(buf[:n]); got != "PING\n" {
serverDone <- fmt.Errorf("unexpected ping payload %q", got)
return
}
_, err = conn.Write([]byte("PONG\n"))
serverDone <- err
}()
vm := testVM("alive", "image-alive", "172.16.0.41")
vm.State = model.VMStateRunning
vm.Runtime.State = model.VMStateRunning
vm.Runtime.PID = fake.Process.Pid
vm.Runtime.APISockPath = apiSock
vm.Runtime.VSockPath = vsockSock
vm.Runtime.VSockCID = 10041
if err := db.UpsertVM(ctx, vm); err != nil {
t.Fatalf("UpsertVM: %v", err)
}
runner := &scriptedRunner{
t: t,
steps: []runnerStep{
sudoStep("", nil, "chown", fmt.Sprintf("%d:%d", os.Getuid(), os.Getgid()), vsockSock),
sudoStep("", nil, "chmod", "600", vsockSock),
},
}
d := &Daemon{store: db, runner: runner}
result, err := d.PingVM(ctx, vm.Name)
if err != nil {
t.Fatalf("PingVM: %v", err)
}
if !result.Alive || result.Name != vm.Name {
t.Fatalf("PingVM result = %+v, want alive %s", result, vm.Name)
}
runner.assertExhausted()
if err := <-serverDone; err != nil {
t.Fatalf("server: %v", err)
}
}
func TestPingVMReturnsFalseForStoppedVM(t *testing.T) {
t.Parallel()
ctx := context.Background()
db := openDaemonStore(t)
vm := testVM("stopped-ping", "image-stopped", "172.16.0.42")
if err := db.UpsertVM(ctx, vm); err != nil {
t.Fatalf("UpsertVM: %v", err)
}
d := &Daemon{store: db}
result, err := d.PingVM(ctx, vm.Name)
if err != nil {
t.Fatalf("PingVM: %v", err)
}
if result.Alive {
t.Fatalf("PingVM result = %+v, want not alive", result)
}
}
func TestSetVMDiskResizeFailsPreflightWhenToolsMissing(t *testing.T) {
ctx := context.Background()
db := openDaemonStore(t)