Introduces three interconnected features for persistent VM workflows: 1. `banger vm exec <vm> -- <cmd>`: runs a command in the prepared workspace, automatically cd-ing into the guest path and wrapping via `mise exec --` so mise-managed tools are on PATH. Falls back to a plain exec when mise isn't available. Exit code propagates verbatim. 2. Workspace persistence: workspace.prepare now stores the guest path, host source path, and HEAD commit into a new `workspace_json` column on the vms table (migration 3). This state survives daemon restarts and informs both dirty-checking and auto-prepare. 3. Dirty detection: `vm exec` compares the stored HEAD commit against the current host repo HEAD. When stale it warns and, with --auto-prepare, re-syncs the workspace before running. Also: - WORKSPACE column added to `banger ps` / `vm list` - `banger vm` quick reference updated with `vm exec` entry
490 lines
14 KiB
Go
490 lines
14 KiB
Go
package store
|
|
|
|
import (
|
|
"context"
|
|
"database/sql"
|
|
"encoding/json"
|
|
"errors"
|
|
"fmt"
|
|
"net/url"
|
|
"path/filepath"
|
|
"sync"
|
|
"time"
|
|
|
|
_ "modernc.org/sqlite"
|
|
|
|
"banger/internal/model"
|
|
)
|
|
|
|
type Store struct {
|
|
db *sql.DB
|
|
writeMu sync.Mutex
|
|
}
|
|
|
|
func Open(path string) (*Store, error) {
|
|
dsn, err := sqliteDSN(path)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
db, err := sql.Open("sqlite", dsn)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
store := &Store{db: db}
|
|
if err := runMigrations(db); err != nil {
|
|
_ = db.Close()
|
|
return nil, err
|
|
}
|
|
return store, nil
|
|
}
|
|
|
|
// OpenReadOnly opens the state DB without running migrations and with
|
|
// SQLite's mode=ro flag so no write can slip through — the file and
|
|
// its WAL sidecar stay untouched. Used by `banger doctor`, which must
|
|
// be pure inspection: running it should never mutate user state, and
|
|
// it must not trigger a schema migration the user didn't ask for.
|
|
//
|
|
// Returns the usual sql.ErrNoRows-compatible errors from the read
|
|
// queries if the DB's schema is older than the current code expects;
|
|
// doctor surfaces those as failing checks rather than a hard crash.
|
|
func OpenReadOnly(path string) (*Store, error) {
|
|
dsn, err := sqliteReadOnlyDSN(path)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
db, err := sql.Open("sqlite", dsn)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
// Ping forces SQLite to actually open the file, so a missing or
|
|
// unreadable DB fails here rather than at first query. Match the
|
|
// existing Open contract: caller expects success to mean "ready
|
|
// to read."
|
|
if err := db.Ping(); err != nil {
|
|
_ = db.Close()
|
|
return nil, err
|
|
}
|
|
return &Store{db: db}, nil
|
|
}
|
|
|
|
func (s *Store) Close() error {
|
|
return s.db.Close()
|
|
}
|
|
|
|
func sqliteDSN(path string) (string, error) {
|
|
absPath, err := filepath.Abs(path)
|
|
if err != nil {
|
|
return "", fmt.Errorf("resolve sqlite path: %w", err)
|
|
}
|
|
query := url.Values{}
|
|
for _, pragma := range []string{
|
|
"journal_mode(WAL)",
|
|
"synchronous(NORMAL)",
|
|
"foreign_keys(1)",
|
|
"busy_timeout(5000)",
|
|
"temp_store(MEMORY)",
|
|
"wal_autocheckpoint(1000)",
|
|
"journal_size_limit(67108864)",
|
|
} {
|
|
query.Add("_pragma", pragma)
|
|
}
|
|
return (&url.URL{
|
|
Scheme: "file",
|
|
Path: filepath.ToSlash(absPath),
|
|
RawQuery: query.Encode(),
|
|
}).String(), nil
|
|
}
|
|
|
|
// sqliteReadOnlyDSN builds a DSN that opens the DB in SQLite's
|
|
// read-only mode. Deliberately omits journal_mode=WAL and the other
|
|
// write-adjacent pragmas set by sqliteDSN — mode=ro refuses them
|
|
// anyway, and keeping the list minimal means the query never touches
|
|
// the file. foreign_keys and busy_timeout are the only pragmas worth
|
|
// keeping for read paths (semantics parity + lock backoff).
|
|
func sqliteReadOnlyDSN(path string) (string, error) {
|
|
absPath, err := filepath.Abs(path)
|
|
if err != nil {
|
|
return "", fmt.Errorf("resolve sqlite path: %w", err)
|
|
}
|
|
query := url.Values{}
|
|
query.Set("mode", "ro")
|
|
query.Add("_pragma", "foreign_keys(1)")
|
|
query.Add("_pragma", "busy_timeout(5000)")
|
|
return (&url.URL{
|
|
Scheme: "file",
|
|
Path: filepath.ToSlash(absPath),
|
|
RawQuery: query.Encode(),
|
|
}).String(), nil
|
|
}
|
|
|
|
func (s *Store) UpsertImage(ctx context.Context, image model.Image) error {
|
|
s.writeMu.Lock()
|
|
defer s.writeMu.Unlock()
|
|
const query = `
|
|
INSERT INTO images (
|
|
id, name, managed, artifact_dir, rootfs_path, work_seed_path, kernel_path, initrd_path,
|
|
modules_dir, build_size, seeded_ssh_public_key_fingerprint, created_at, updated_at
|
|
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
ON CONFLICT(id) DO UPDATE SET
|
|
name=excluded.name,
|
|
managed=excluded.managed,
|
|
artifact_dir=excluded.artifact_dir,
|
|
rootfs_path=excluded.rootfs_path,
|
|
work_seed_path=excluded.work_seed_path,
|
|
kernel_path=excluded.kernel_path,
|
|
initrd_path=excluded.initrd_path,
|
|
modules_dir=excluded.modules_dir,
|
|
build_size=excluded.build_size,
|
|
seeded_ssh_public_key_fingerprint=excluded.seeded_ssh_public_key_fingerprint,
|
|
updated_at=excluded.updated_at`
|
|
_, err := s.db.ExecContext(ctx, query,
|
|
image.ID,
|
|
image.Name,
|
|
boolToInt(image.Managed),
|
|
image.ArtifactDir,
|
|
image.RootfsPath,
|
|
image.WorkSeedPath,
|
|
image.KernelPath,
|
|
image.InitrdPath,
|
|
image.ModulesDir,
|
|
image.BuildSize,
|
|
image.SeededSSHPublicKeyFingerprint,
|
|
image.CreatedAt.Format(time.RFC3339),
|
|
image.UpdatedAt.Format(time.RFC3339),
|
|
)
|
|
return err
|
|
}
|
|
|
|
func (s *Store) GetImageByName(ctx context.Context, name string) (model.Image, error) {
|
|
return s.getImage(ctx, "SELECT id, name, managed, artifact_dir, rootfs_path, work_seed_path, kernel_path, initrd_path, modules_dir, build_size, seeded_ssh_public_key_fingerprint, created_at, updated_at FROM images WHERE name = ?", name)
|
|
}
|
|
|
|
func (s *Store) GetImageByID(ctx context.Context, id string) (model.Image, error) {
|
|
return s.getImage(ctx, "SELECT id, name, managed, artifact_dir, rootfs_path, work_seed_path, kernel_path, initrd_path, modules_dir, build_size, seeded_ssh_public_key_fingerprint, created_at, updated_at FROM images WHERE id = ?", id)
|
|
}
|
|
|
|
func (s *Store) ListImages(ctx context.Context) ([]model.Image, error) {
|
|
rows, err := s.db.QueryContext(ctx, "SELECT id, name, managed, artifact_dir, rootfs_path, work_seed_path, kernel_path, initrd_path, modules_dir, build_size, seeded_ssh_public_key_fingerprint, created_at, updated_at FROM images ORDER BY created_at ASC")
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var images []model.Image
|
|
for rows.Next() {
|
|
image, err := scanImage(rows)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
images = append(images, image)
|
|
}
|
|
return images, rows.Err()
|
|
}
|
|
|
|
func (s *Store) DeleteImage(ctx context.Context, id string) error {
|
|
s.writeMu.Lock()
|
|
defer s.writeMu.Unlock()
|
|
_, err := s.db.ExecContext(ctx, "DELETE FROM images WHERE id = ?", id)
|
|
return err
|
|
}
|
|
|
|
func (s *Store) UpsertVM(ctx context.Context, vm model.VMRecord) error {
|
|
s.writeMu.Lock()
|
|
defer s.writeMu.Unlock()
|
|
specJSON, err := json.Marshal(vm.Spec)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
runtimeJSON, err := json.Marshal(vm.Runtime)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
statsJSON, err := json.Marshal(vm.Stats)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
const query = `
|
|
INSERT INTO vms (
|
|
id, name, image_id, guest_ip, state, created_at, updated_at, last_touched_at,
|
|
spec_json, runtime_json, stats_json
|
|
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
ON CONFLICT(id) DO UPDATE SET
|
|
name=excluded.name,
|
|
image_id=excluded.image_id,
|
|
guest_ip=excluded.guest_ip,
|
|
state=excluded.state,
|
|
updated_at=excluded.updated_at,
|
|
last_touched_at=excluded.last_touched_at,
|
|
spec_json=excluded.spec_json,
|
|
runtime_json=excluded.runtime_json,
|
|
stats_json=excluded.stats_json`
|
|
_, err = s.db.ExecContext(ctx, query,
|
|
vm.ID,
|
|
vm.Name,
|
|
vm.ImageID,
|
|
vm.Runtime.GuestIP,
|
|
string(vm.State),
|
|
vm.CreatedAt.Format(time.RFC3339),
|
|
vm.UpdatedAt.Format(time.RFC3339),
|
|
vm.LastTouchedAt.Format(time.RFC3339),
|
|
string(specJSON),
|
|
string(runtimeJSON),
|
|
string(statsJSON),
|
|
)
|
|
return err
|
|
}
|
|
|
|
func (s *Store) GetVM(ctx context.Context, idOrName string) (model.VMRecord, error) {
|
|
const query = `
|
|
SELECT id, name, image_id, guest_ip, state, created_at, updated_at, last_touched_at,
|
|
spec_json, runtime_json, stats_json, workspace_json
|
|
FROM vms
|
|
WHERE id = ? OR name = ?
|
|
`
|
|
row := s.db.QueryRowContext(ctx, query, idOrName, idOrName)
|
|
return scanVMRow(row)
|
|
}
|
|
|
|
func (s *Store) GetVMByID(ctx context.Context, id string) (model.VMRecord, error) {
|
|
row := s.db.QueryRowContext(ctx, `
|
|
SELECT id, name, image_id, guest_ip, state, created_at, updated_at, last_touched_at,
|
|
spec_json, runtime_json, stats_json, workspace_json
|
|
FROM vms WHERE id = ?`, id)
|
|
return scanVMRow(row)
|
|
}
|
|
|
|
// GetVMByName is the exact-name lookup used for creation-time
|
|
// uniqueness checks. Unlike GetVM (which matches id OR name) and
|
|
// Daemon.FindVM (which also falls back to prefix-matching), this
|
|
// returns sql.ErrNoRows for anything except a literal name hit, so
|
|
// a new VM can't be rejected just because its name prefixes an
|
|
// existing VM's id or an existing VM's name.
|
|
func (s *Store) GetVMByName(ctx context.Context, name string) (model.VMRecord, error) {
|
|
row := s.db.QueryRowContext(ctx, `
|
|
SELECT id, name, image_id, guest_ip, state, created_at, updated_at, last_touched_at,
|
|
spec_json, runtime_json, stats_json, workspace_json
|
|
FROM vms WHERE name = ?`, name)
|
|
return scanVMRow(row)
|
|
}
|
|
|
|
func (s *Store) ListVMs(ctx context.Context) ([]model.VMRecord, error) {
|
|
rows, err := s.db.QueryContext(ctx, `
|
|
SELECT id, name, image_id, guest_ip, state, created_at, updated_at, last_touched_at,
|
|
spec_json, runtime_json, stats_json, workspace_json
|
|
FROM vms ORDER BY created_at ASC`)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var vms []model.VMRecord
|
|
for rows.Next() {
|
|
vm, err := scanVMRows(rows)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
vms = append(vms, vm)
|
|
}
|
|
return vms, rows.Err()
|
|
}
|
|
|
|
func (s *Store) DeleteVM(ctx context.Context, id string) error {
|
|
s.writeMu.Lock()
|
|
defer s.writeMu.Unlock()
|
|
_, err := s.db.ExecContext(ctx, "DELETE FROM vms WHERE id = ?", id)
|
|
return err
|
|
}
|
|
|
|
// SetVMWorkspace persists the workspace state from a workspace.prepare
|
|
// result onto the VM row. Called after a successful prepare so the
|
|
// guest path, host source path, and HEAD commit survive daemon
|
|
// restarts and are available to `vm exec` without re-stating them.
|
|
// Best-effort from the caller's perspective — a failure here does not
|
|
// roll back the prepare itself.
|
|
func (s *Store) SetVMWorkspace(ctx context.Context, vmID string, workspace model.VMWorkspace) error {
|
|
s.writeMu.Lock()
|
|
defer s.writeMu.Unlock()
|
|
data, err := json.Marshal(workspace)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
_, err = s.db.ExecContext(ctx, "UPDATE vms SET workspace_json = ? WHERE id = ?", string(data), vmID)
|
|
return err
|
|
}
|
|
|
|
func (s *Store) FindVMsUsingImage(ctx context.Context, imageID string) ([]model.VMRecord, error) {
|
|
rows, err := s.db.QueryContext(ctx, `
|
|
SELECT id, name, image_id, guest_ip, state, created_at, updated_at, last_touched_at,
|
|
spec_json, runtime_json, stats_json, workspace_json
|
|
FROM vms WHERE image_id = ?`, imageID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var vms []model.VMRecord
|
|
for rows.Next() {
|
|
vm, err := scanVMRows(rows)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
vms = append(vms, vm)
|
|
}
|
|
return vms, rows.Err()
|
|
}
|
|
|
|
func (s *Store) NextGuestIP(ctx context.Context, bridgeIPPrefix string) (string, error) {
|
|
used := map[string]struct{}{}
|
|
rows, err := s.db.QueryContext(ctx, "SELECT guest_ip FROM vms")
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
defer rows.Close()
|
|
for rows.Next() {
|
|
var ip string
|
|
if err := rows.Scan(&ip); err != nil {
|
|
return "", err
|
|
}
|
|
used[ip] = struct{}{}
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return "", err
|
|
}
|
|
for i := 2; i < 255; i++ {
|
|
candidate := fmt.Sprintf("%s.%d", bridgeIPPrefix, i)
|
|
if _, exists := used[candidate]; !exists {
|
|
return candidate, nil
|
|
}
|
|
}
|
|
return "", errors.New("no guest IPs available")
|
|
}
|
|
|
|
func (s *Store) getImage(ctx context.Context, query string, arg string) (model.Image, error) {
|
|
row := s.db.QueryRowContext(ctx, query, arg)
|
|
return scanImageRow(row)
|
|
}
|
|
|
|
func scanImage(rows scanner) (model.Image, error) {
|
|
return scanImageRow(rows)
|
|
}
|
|
|
|
type scanner interface {
|
|
Scan(dest ...any) error
|
|
}
|
|
|
|
func scanImageRow(row scanner) (model.Image, error) {
|
|
var image model.Image
|
|
var managed int
|
|
var workSeedPath sql.NullString
|
|
var seededSSHPublicKeyFingerprint sql.NullString
|
|
var createdAt, updatedAt string
|
|
err := row.Scan(
|
|
&image.ID,
|
|
&image.Name,
|
|
&managed,
|
|
&image.ArtifactDir,
|
|
&image.RootfsPath,
|
|
&workSeedPath,
|
|
&image.KernelPath,
|
|
&image.InitrdPath,
|
|
&image.ModulesDir,
|
|
&image.BuildSize,
|
|
&seededSSHPublicKeyFingerprint,
|
|
&createdAt,
|
|
&updatedAt,
|
|
)
|
|
if err != nil {
|
|
return image, err
|
|
}
|
|
image.Managed = managed == 1
|
|
image.WorkSeedPath = workSeedPath.String
|
|
image.SeededSSHPublicKeyFingerprint = seededSSHPublicKeyFingerprint.String
|
|
image.CreatedAt, err = time.Parse(time.RFC3339, createdAt)
|
|
if err != nil {
|
|
return image, err
|
|
}
|
|
image.UpdatedAt, err = time.Parse(time.RFC3339, updatedAt)
|
|
if err != nil {
|
|
return image, err
|
|
}
|
|
return image, nil
|
|
}
|
|
|
|
func scanVMRow(row scanner) (model.VMRecord, error) {
|
|
return scanVMInto(row)
|
|
}
|
|
|
|
func scanVMRows(rows scanner) (model.VMRecord, error) {
|
|
return scanVMInto(rows)
|
|
}
|
|
|
|
func scanVMInto(row scanner) (model.VMRecord, error) {
|
|
var vm model.VMRecord
|
|
var state, createdAt, updatedAt, touchedAt, specJSON, runtimeJSON, statsJSON, workspaceJSON string
|
|
err := row.Scan(
|
|
&vm.ID,
|
|
&vm.Name,
|
|
&vm.ImageID,
|
|
&vm.Runtime.GuestIP,
|
|
&state,
|
|
&createdAt,
|
|
&updatedAt,
|
|
&touchedAt,
|
|
&specJSON,
|
|
&runtimeJSON,
|
|
&statsJSON,
|
|
&workspaceJSON,
|
|
)
|
|
if err != nil {
|
|
return vm, err
|
|
}
|
|
vm.State = model.VMState(state)
|
|
if err := json.Unmarshal([]byte(specJSON), &vm.Spec); err != nil {
|
|
return vm, err
|
|
}
|
|
if err := json.Unmarshal([]byte(runtimeJSON), &vm.Runtime); err != nil {
|
|
return vm, err
|
|
}
|
|
if statsJSON != "" {
|
|
if err := json.Unmarshal([]byte(statsJSON), &vm.Stats); err != nil {
|
|
return vm, err
|
|
}
|
|
}
|
|
if workspaceJSON != "" && workspaceJSON != "{}" {
|
|
if err := json.Unmarshal([]byte(workspaceJSON), &vm.Workspace); err != nil {
|
|
return vm, err
|
|
}
|
|
}
|
|
var parseErr error
|
|
vm.CreatedAt, parseErr = time.Parse(time.RFC3339, createdAt)
|
|
if parseErr != nil {
|
|
return vm, parseErr
|
|
}
|
|
vm.UpdatedAt, parseErr = time.Parse(time.RFC3339, updatedAt)
|
|
if parseErr != nil {
|
|
return vm, parseErr
|
|
}
|
|
vm.LastTouchedAt, parseErr = time.Parse(time.RFC3339, touchedAt)
|
|
if parseErr != nil {
|
|
return vm, parseErr
|
|
}
|
|
return vm, nil
|
|
}
|
|
|
|
func boolToInt(value bool) int {
|
|
if value {
|
|
return 1
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func nullableTimeString(value time.Time) any {
|
|
if value.IsZero() {
|
|
return nil
|
|
}
|
|
return value.Format(time.RFC3339)
|
|
}
|
|
|
|
func nullableInt(value *int) any {
|
|
if value == nil {
|
|
return nil
|
|
}
|
|
return *value
|
|
}
|