Prerequisite for `banger update`. Before swapping a staged binary
into place, the updater needs to confirm the new bangerd recognises
the running install's DB schema. Without this, an operator could end
up with a service that won't open its store after the binary swap +
restart.
* store.InspectSchemaState(path): opens the DB read-only (reusing
OpenReadOnly's mode=ro DSN), reads the schema_migrations table,
and classifies the relationship between applied and known IDs:
SchemaCompatible (lockstep), SchemaMigrationsNeeded (binary
newer, will auto-migrate on first Open), or SchemaIncompatible
(DB has applied IDs the binary doesn't know about).
Missing schema_migrations table is treated as "all migrations
pending" rather than an error — matches the fresh-install case.
* bangerd --check-migrations: opens the configured DB read-only,
prints a one-line classification, and exits 0/1/2. The exit
code is the contract:
0 — compatible
1 — migrations needed (binary newer; safe to swap)
2 — incompatible (binary older than DB; abort the swap)
Honours --system to pick between system StateDir and user mode.
* bangerdExit indirection so future tests can capture the exit
code without terminating the test process. Production points
at os.Exit.
Tests cover the four classifications: compatible (fully migrated
DB), migrations-needed (only baseline applied), incompatible
(synthetic id=99 inserted), and missing-table (fresh DB). Live
exercise on this dev host returned `migrations needed: pending [3]
(binary will apply on first Open)` and exit 1, matching the
contract.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
303 lines
8.6 KiB
Go
303 lines
8.6 KiB
Go
package store
|
|
|
|
import (
|
|
"database/sql"
|
|
"fmt"
|
|
"sort"
|
|
"time"
|
|
)
|
|
|
|
// migration is one ordered, atomic schema step. id must be unique and
|
|
// strictly increasing across the slice. name is a human-readable label
|
|
// stored alongside the id for debugging, and up receives a *sql.Tx so
|
|
// DDL + data backfills land atomically — either the migration fully
|
|
// applies and a schema_migrations row is written, or the whole thing
|
|
// rolls back and gets retried on next Open().
|
|
type migration struct {
|
|
id int
|
|
name string
|
|
up func(*sql.Tx) error
|
|
}
|
|
|
|
// migrations is the canonical ordered history. Append new migrations
|
|
// at the bottom with the next id. Never edit or reorder existing
|
|
// entries — installed DBs key off the id column.
|
|
var migrations = []migration{
|
|
{id: 1, name: "baseline", up: migrateBaseline},
|
|
{id: 2, name: "drop_images_docker", up: migrateDropImagesDocker},
|
|
{id: 3, name: "add_vm_workspace", up: migrateAddVMWorkspace},
|
|
}
|
|
|
|
// runMigrations ensures schema_migrations exists, then applies every
|
|
// migration whose id hasn't been recorded yet, in id order.
|
|
func runMigrations(db *sql.DB) error {
|
|
if _, err := db.Exec(`CREATE TABLE IF NOT EXISTS schema_migrations (
|
|
id INTEGER PRIMARY KEY,
|
|
name TEXT NOT NULL,
|
|
applied_at TEXT NOT NULL
|
|
)`); err != nil {
|
|
return fmt.Errorf("create schema_migrations: %w", err)
|
|
}
|
|
|
|
applied, err := loadAppliedMigrations(db)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
sorted := make([]migration, len(migrations))
|
|
copy(sorted, migrations)
|
|
sort.Slice(sorted, func(i, j int) bool { return sorted[i].id < sorted[j].id })
|
|
seen := map[int]bool{}
|
|
for _, m := range sorted {
|
|
if seen[m.id] {
|
|
return fmt.Errorf("duplicate migration id %d (%q)", m.id, m.name)
|
|
}
|
|
seen[m.id] = true
|
|
}
|
|
|
|
for _, m := range sorted {
|
|
if _, ok := applied[m.id]; ok {
|
|
continue
|
|
}
|
|
if err := applyMigration(db, m); err != nil {
|
|
return fmt.Errorf("migration %d (%s): %w", m.id, m.name, err)
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// SchemaCompatibility classifies the relationship between this
|
|
// binary's known migrations and a (possibly stale) DB's applied set.
|
|
type SchemaCompatibility int
|
|
|
|
const (
|
|
// SchemaCompatible: every applied id is known to this binary AND
|
|
// every known id has been applied. Binary and DB are in lockstep.
|
|
SchemaCompatible SchemaCompatibility = iota
|
|
// SchemaMigrationsNeeded: binary knows ids the DB hasn't applied
|
|
// yet. Open() would auto-migrate; safe.
|
|
SchemaMigrationsNeeded
|
|
// SchemaIncompatible: DB has applied ids this binary doesn't
|
|
// know about. Binary is older than the running install. Refuse
|
|
// the swap.
|
|
SchemaIncompatible
|
|
)
|
|
|
|
// SchemaState describes the migration status of a DB relative to
|
|
// this binary's compiled-in `migrations` slice. Used by
|
|
// `bangerd --check-migrations` to gate `banger update`'s binary swap
|
|
// before service restart — a staged binary must not be allowed to
|
|
// take over a DB whose schema it doesn't know how to read.
|
|
type SchemaState struct {
|
|
Compatibility SchemaCompatibility
|
|
AppliedIDs []int
|
|
KnownMaxID int
|
|
Pending []int // known IDs not yet applied
|
|
Unknown []int // applied IDs the binary doesn't recognise
|
|
}
|
|
|
|
// InspectSchemaState opens path read-only and reports how the DB's
|
|
// applied-migration set compares to the binary's known set. Returns
|
|
// an error only on real I/O failures (file missing, permission
|
|
// denied, corrupt SQLite); a "DB ahead of binary" state is reported
|
|
// via Compatibility, not as an error.
|
|
func InspectSchemaState(path string) (SchemaState, error) {
|
|
dsn, err := sqliteReadOnlyDSN(path)
|
|
if err != nil {
|
|
return SchemaState{}, err
|
|
}
|
|
db, err := sql.Open("sqlite", dsn)
|
|
if err != nil {
|
|
return SchemaState{}, err
|
|
}
|
|
defer db.Close()
|
|
if err := db.Ping(); err != nil {
|
|
return SchemaState{}, err
|
|
}
|
|
// schema_migrations may not exist on a fresh install. Treat that
|
|
// as "applied = ∅" rather than an error — the equivalent of
|
|
// "the new binary will create the table on first Open".
|
|
rows, err := db.Query("SELECT id FROM schema_migrations")
|
|
if err != nil {
|
|
// modernc.org/sqlite doesn't expose a typed "no such table"
|
|
// error; sniff the message. Anything else bubbles.
|
|
if errMissingTable(err) {
|
|
return classifySchemaState(nil), nil
|
|
}
|
|
return SchemaState{}, err
|
|
}
|
|
defer rows.Close()
|
|
var applied []int
|
|
for rows.Next() {
|
|
var id int
|
|
if err := rows.Scan(&id); err != nil {
|
|
return SchemaState{}, err
|
|
}
|
|
applied = append(applied, id)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return SchemaState{}, err
|
|
}
|
|
return classifySchemaState(applied), nil
|
|
}
|
|
|
|
func classifySchemaState(applied []int) SchemaState {
|
|
known := map[int]struct{}{}
|
|
knownMax := 0
|
|
for _, m := range migrations {
|
|
known[m.id] = struct{}{}
|
|
if m.id > knownMax {
|
|
knownMax = m.id
|
|
}
|
|
}
|
|
appliedSet := map[int]struct{}{}
|
|
var unknown []int
|
|
for _, id := range applied {
|
|
appliedSet[id] = struct{}{}
|
|
if _, ok := known[id]; !ok {
|
|
unknown = append(unknown, id)
|
|
}
|
|
}
|
|
var pending []int
|
|
for _, m := range migrations {
|
|
if _, ok := appliedSet[m.id]; !ok {
|
|
pending = append(pending, m.id)
|
|
}
|
|
}
|
|
state := SchemaState{
|
|
AppliedIDs: append([]int(nil), applied...),
|
|
KnownMaxID: knownMax,
|
|
Pending: pending,
|
|
Unknown: unknown,
|
|
}
|
|
switch {
|
|
case len(unknown) > 0:
|
|
state.Compatibility = SchemaIncompatible
|
|
case len(pending) > 0:
|
|
state.Compatibility = SchemaMigrationsNeeded
|
|
default:
|
|
state.Compatibility = SchemaCompatible
|
|
}
|
|
return state
|
|
}
|
|
|
|
func errMissingTable(err error) bool {
|
|
if err == nil {
|
|
return false
|
|
}
|
|
msg := err.Error()
|
|
// modernc.org/sqlite wraps the underlying SQLITE_ERROR with this
|
|
// canonical sub-string for missing-table errors.
|
|
return contains(msg, "no such table: schema_migrations")
|
|
}
|
|
|
|
func contains(s, sub string) bool {
|
|
if len(sub) > len(s) {
|
|
return false
|
|
}
|
|
for i := 0; i+len(sub) <= len(s); i++ {
|
|
if s[i:i+len(sub)] == sub {
|
|
return true
|
|
}
|
|
}
|
|
return false
|
|
}
|
|
|
|
func loadAppliedMigrations(db *sql.DB) (map[int]struct{}, error) {
|
|
rows, err := db.Query("SELECT id FROM schema_migrations")
|
|
if err != nil {
|
|
return nil, fmt.Errorf("load schema_migrations: %w", err)
|
|
}
|
|
defer rows.Close()
|
|
applied := map[int]struct{}{}
|
|
for rows.Next() {
|
|
var id int
|
|
if err := rows.Scan(&id); err != nil {
|
|
return nil, err
|
|
}
|
|
applied[id] = struct{}{}
|
|
}
|
|
return applied, rows.Err()
|
|
}
|
|
|
|
func applyMigration(db *sql.DB, m migration) error {
|
|
tx, err := db.Begin()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err := m.up(tx); err != nil {
|
|
_ = tx.Rollback()
|
|
return err
|
|
}
|
|
if _, err := tx.Exec(
|
|
"INSERT INTO schema_migrations (id, name, applied_at) VALUES (?, ?, ?)",
|
|
m.id, m.name, time.Now().UTC().Format(time.RFC3339),
|
|
); err != nil {
|
|
_ = tx.Rollback()
|
|
return fmt.Errorf("record migration: %w", err)
|
|
}
|
|
return tx.Commit()
|
|
}
|
|
|
|
// migrateBaseline creates the full current schema.
|
|
func migrateBaseline(tx *sql.Tx) error {
|
|
stmts := []string{
|
|
`CREATE TABLE IF NOT EXISTS images (
|
|
id TEXT PRIMARY KEY,
|
|
name TEXT NOT NULL UNIQUE,
|
|
managed INTEGER NOT NULL DEFAULT 0,
|
|
artifact_dir TEXT,
|
|
rootfs_path TEXT NOT NULL,
|
|
work_seed_path TEXT,
|
|
kernel_path TEXT NOT NULL,
|
|
initrd_path TEXT,
|
|
modules_dir TEXT,
|
|
build_size TEXT,
|
|
seeded_ssh_public_key_fingerprint TEXT,
|
|
docker INTEGER NOT NULL DEFAULT 0,
|
|
created_at TEXT NOT NULL,
|
|
updated_at TEXT NOT NULL
|
|
);`,
|
|
`CREATE TABLE IF NOT EXISTS vms (
|
|
id TEXT PRIMARY KEY,
|
|
name TEXT NOT NULL UNIQUE,
|
|
image_id TEXT NOT NULL,
|
|
guest_ip TEXT NOT NULL UNIQUE,
|
|
state TEXT NOT NULL,
|
|
created_at TEXT NOT NULL,
|
|
updated_at TEXT NOT NULL,
|
|
last_touched_at TEXT NOT NULL,
|
|
spec_json TEXT NOT NULL,
|
|
runtime_json TEXT NOT NULL,
|
|
stats_json TEXT NOT NULL DEFAULT '{}',
|
|
FOREIGN KEY(image_id) REFERENCES images(id) ON DELETE RESTRICT
|
|
);`,
|
|
}
|
|
for _, stmt := range stmts {
|
|
if _, err := tx.Exec(stmt); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// migrateDropImagesDocker removes the legacy images.docker column.
|
|
// SQLite supports ALTER TABLE ... DROP COLUMN since 3.35 (2021), and
|
|
// banger ships against modern SQLite, so a single statement is enough.
|
|
// Existing values are simply discarded — the field never affected
|
|
// runtime behaviour.
|
|
func migrateDropImagesDocker(tx *sql.Tx) error {
|
|
_, err := tx.Exec(`ALTER TABLE images DROP COLUMN docker;`)
|
|
return err
|
|
}
|
|
|
|
// migrateAddVMWorkspace adds the workspace_json column that records
|
|
// the last workspace.prepare result (guest path, host source path,
|
|
// HEAD commit, and timestamp) per VM. Default '{}' means no workspace
|
|
// has been prepared yet. The column is managed exclusively via
|
|
// Store.SetVMWorkspace; lifecycle UpsertVM calls never touch it so
|
|
// workspace state survives VM stop/start cycles.
|
|
func migrateAddVMWorkspace(tx *sql.Tx) error {
|
|
_, err := tx.Exec(`ALTER TABLE vms ADD COLUMN workspace_json TEXT NOT NULL DEFAULT '{}'`)
|
|
return err
|
|
}
|