Add daily-loop prepare and readiness checks

Make the local chat-host loop explicit and cheap so users can warm the machine once instead of rediscovering environment and guest setup on every session.

Add cache-backed daily-loop manifests plus the new `pyro prepare` flow, extend `pyro doctor --environment` with warm/cold/stale readiness reporting, and add `make smoke-daily-loop` to prove the warmed repro-fix reset path end to end.

Also fix `python -m pyro_mcp.cli` to invoke `main()` so the new smoke and `dist-check` actually exercise the CLI module, and update the docs/roadmap to present `doctor -> prepare -> connect host -> reset` as the recommended daily path.

Validation: `uv lock`, `UV_OFFLINE=1 UV_CACHE_DIR=.uv-cache make check`, `UV_OFFLINE=1 UV_CACHE_DIR=.uv-cache make dist-check`, and `UV_OFFLINE=1 UV_CACHE_DIR=.uv-cache make smoke-daily-loop`.
This commit is contained in:
Thales Maciel 2026-03-13 21:17:59 -03:00
parent d0cf6d8f21
commit 663241d5d2
26 changed files with 1592 additions and 199 deletions

View file

@ -13,6 +13,7 @@ from typing import Any, cast
from pyro_mcp import __version__
from pyro_mcp.api import McpToolProfile, Pyro, WorkspaceUseCaseMode
from pyro_mcp.contract import PUBLIC_MCP_MODES, PUBLIC_MCP_PROFILES
from pyro_mcp.daily_loop import DEFAULT_PREPARE_ENVIRONMENT
from pyro_mcp.demo import run_demo
from pyro_mcp.host_helpers import (
HostDoctorEntry,
@ -154,6 +155,7 @@ def _print_doctor_human(payload: dict[str, Any]) -> None:
)
runtime = payload.get("runtime")
if isinstance(runtime, dict):
print(f"Catalog version: {str(runtime.get('catalog_version', 'unknown'))}")
print(f"Environment cache: {str(runtime.get('cache_dir', 'unknown'))}")
capabilities = runtime.get("capabilities")
if isinstance(capabilities, dict):
@ -171,12 +173,51 @@ def _print_doctor_human(payload: dict[str, Any]) -> None:
f"tun={'yes' if bool(networking.get('tun_available')) else 'no'} "
f"ip_forward={'yes' if bool(networking.get('ip_forward_enabled')) else 'no'}"
)
daily_loop = payload.get("daily_loop")
if isinstance(daily_loop, dict):
status = str(daily_loop.get("status", "cold")).upper()
environment = str(daily_loop.get("environment", DEFAULT_PREPARE_ENVIRONMENT))
print(f"Daily loop: {status} ({environment})")
print(
" "
f"installed={'yes' if bool(daily_loop.get('installed')) else 'no'} "
f"network_prepared={'yes' if bool(daily_loop.get('network_prepared')) else 'no'}"
)
prepared_at = daily_loop.get("prepared_at")
if prepared_at is not None:
print(f" prepared_at={prepared_at}")
reason = daily_loop.get("reason")
if isinstance(reason, str) and reason != "":
print(f" reason={reason}")
if str(daily_loop.get("status", "cold")) != "warm":
print(f" Run: pyro prepare {environment}")
if isinstance(issues, list) and issues:
print("Issues:")
for issue in issues:
print(f"- {issue}")
def _print_prepare_human(payload: dict[str, Any]) -> None:
environment = str(payload.get("environment", DEFAULT_PREPARE_ENVIRONMENT))
status = str(payload.get("status", "cold")).upper()
print(f"Prepare: {environment}")
print(f"Daily loop: {status}")
print(
"Result: "
f"{'reused' if bool(payload.get('reused')) else 'prepared'} "
f"network_prepared={'yes' if bool(payload.get('network_prepared')) else 'no'}"
)
print(f"Cache dir: {str(payload.get('cache_dir', 'unknown'))}")
print(f"Manifest: {str(payload.get('manifest_path', 'unknown'))}")
prepared_at = payload.get("prepared_at")
if prepared_at is not None:
print(f"Prepared at: {prepared_at}")
print(f"Duration: {int(payload.get('last_prepare_duration_ms', 0))} ms")
reason = payload.get("reason")
if isinstance(reason, str) and reason != "":
print(f"Reason: {reason}")
def _build_host_server_config(args: argparse.Namespace) -> HostServerConfig:
return HostServerConfig(
installed_package=bool(getattr(args, "installed_package", False)),
@ -899,8 +940,7 @@ def _build_parser() -> argparse.ArgumentParser:
"""
Suggested zero-to-hero path:
pyro doctor
pyro env list
pyro env pull debian:12
pyro prepare debian:12
pyro run debian:12 -- git --version
pyro host connect claude-code
@ -909,6 +949,11 @@ def _build_parser() -> argparse.ArgumentParser:
pyro host connect codex
pyro host print-config opencode
Daily local loop after the first warmup:
pyro doctor --environment debian:12
pyro prepare debian:12
pyro workspace reset WORKSPACE_ID
If you want terminal-level visibility into the workspace model:
pyro workspace create debian:12 --seed-path ./repo --id-only
pyro workspace sync push WORKSPACE_ID ./changes
@ -928,6 +973,51 @@ def _build_parser() -> argparse.ArgumentParser:
parser.add_argument("--version", action="version", version=f"%(prog)s {__version__}")
subparsers = parser.add_subparsers(dest="command", required=True, metavar="COMMAND")
prepare_parser = subparsers.add_parser(
"prepare",
help="Warm the local machine for the daily workspace loop.",
description=(
"Warm the recommended guest-backed daily loop by ensuring the "
"environment is installed and proving create, exec, reset, and "
"delete on one throwaway workspace."
),
epilog=dedent(
f"""
Examples:
pyro prepare
pyro prepare {DEFAULT_PREPARE_ENVIRONMENT}
pyro prepare {DEFAULT_PREPARE_ENVIRONMENT} --network
pyro prepare {DEFAULT_PREPARE_ENVIRONMENT} --force
"""
),
formatter_class=_HelpFormatter,
)
prepare_parser.add_argument(
"environment",
nargs="?",
default=DEFAULT_PREPARE_ENVIRONMENT,
metavar="ENVIRONMENT",
help=(
"Curated environment to warm for the daily loop. Defaults to "
f"`{DEFAULT_PREPARE_ENVIRONMENT}`."
),
)
prepare_parser.add_argument(
"--network",
action="store_true",
help="Also warm guest networking by proving one egress-enabled workspace cycle.",
)
prepare_parser.add_argument(
"--force",
action="store_true",
help="Rerun warmup even when a compatible warm manifest already exists.",
)
prepare_parser.add_argument(
"--json",
action="store_true",
help="Print structured JSON instead of human-readable output.",
)
env_parser = subparsers.add_parser(
"env",
help="Inspect and manage curated environments.",
@ -1245,10 +1335,7 @@ def _build_parser() -> argparse.ArgumentParser:
mcp_serve_parser.add_argument(
"--no-project-source",
action="store_true",
help=(
"Disable automatic Git checkout detection from the current working "
"directory."
),
help=("Disable automatic Git checkout detection from the current working directory."),
)
run_parser = subparsers.add_parser(
@ -1306,8 +1393,7 @@ def _build_parser() -> argparse.ArgumentParser:
"--allow-host-compat",
action="store_true",
help=(
"Opt into host-side compatibility execution if guest boot or guest exec "
"is unavailable."
"Opt into host-side compatibility execution if guest boot or guest exec is unavailable."
),
)
run_parser.add_argument(
@ -1428,8 +1514,7 @@ def _build_parser() -> argparse.ArgumentParser:
"--allow-host-compat",
action="store_true",
help=(
"Opt into host-side compatibility execution if guest boot or guest exec "
"is unavailable."
"Opt into host-side compatibility execution if guest boot or guest exec is unavailable."
),
)
workspace_create_parser.add_argument(
@ -1479,8 +1564,7 @@ def _build_parser() -> argparse.ArgumentParser:
"exec",
help="Run one command inside an existing workspace.",
description=(
"Run one non-interactive command in the persistent `/workspace` "
"for a workspace."
"Run one non-interactive command in the persistent `/workspace` for a workspace."
),
epilog=dedent(
"""
@ -1716,8 +1800,7 @@ def _build_parser() -> argparse.ArgumentParser:
"created automatically."
),
epilog=(
"Example:\n"
" pyro workspace file write WORKSPACE_ID src/app.py --text-file ./app.py"
"Example:\n pyro workspace file write WORKSPACE_ID src/app.py --text-file ./app.py"
),
formatter_class=_HelpFormatter,
)
@ -1909,8 +1992,7 @@ def _build_parser() -> argparse.ArgumentParser:
"start",
help="Start one stopped workspace without resetting it.",
description=(
"Start a previously stopped workspace from its preserved rootfs and "
"workspace state."
"Start a previously stopped workspace from its preserved rootfs and workspace state."
),
epilog="Example:\n pyro workspace start WORKSPACE_ID",
formatter_class=_HelpFormatter,
@ -2036,8 +2118,7 @@ def _build_parser() -> argparse.ArgumentParser:
"shell",
help="Open and manage persistent interactive shells.",
description=(
"Open one or more persistent interactive PTY shell sessions inside a started "
"workspace."
"Open one or more persistent interactive PTY shell sessions inside a started workspace."
),
epilog=dedent(
"""
@ -2520,8 +2601,7 @@ while true; do sleep 60; done'
"logs",
help="Show command history for one workspace.",
description=(
"Show persisted command history, including stdout and stderr, "
"for one workspace."
"Show persisted command history, including stdout and stderr, for one workspace."
),
epilog="Example:\n pyro workspace logs WORKSPACE_ID",
formatter_class=_HelpFormatter,
@ -2557,11 +2637,16 @@ while true; do sleep 60; done'
doctor_parser = subparsers.add_parser(
"doctor",
help="Inspect runtime and host diagnostics.",
description="Check host prerequisites and embedded runtime health before your first run.",
description=(
"Check host prerequisites and embedded runtime health, plus "
"daily-loop warmth before your first run or before reconnecting a "
"chat host."
),
epilog=dedent(
"""
Examples:
pyro doctor
pyro doctor --environment debian:12
pyro doctor --json
"""
),
@ -2572,6 +2657,14 @@ while true; do sleep 60; done'
default=DEFAULT_PLATFORM,
help="Runtime platform to inspect.",
)
doctor_parser.add_argument(
"--environment",
default=DEFAULT_PREPARE_ENVIRONMENT,
help=(
"Environment to inspect for the daily-loop warm manifest. "
f"Defaults to `{DEFAULT_PREPARE_ENVIRONMENT}`."
),
)
doctor_parser.add_argument(
"--json",
action="store_true",
@ -2734,6 +2827,24 @@ def _parse_workspace_publish_options(values: list[str]) -> list[dict[str, int |
def main() -> None:
args = _build_parser().parse_args()
pyro = Pyro()
if args.command == "prepare":
try:
payload = pyro.manager.prepare_daily_loop(
args.environment,
network=bool(args.network),
force=bool(args.force),
)
except Exception as exc: # noqa: BLE001
if bool(args.json):
_print_json({"ok": False, "error": str(exc)})
else:
print(f"[error] {exc}", file=sys.stderr, flush=True)
raise SystemExit(1) from exc
if bool(args.json):
_print_json(payload)
else:
_print_prepare_human(payload)
return
if args.command == "env":
if args.env_command == "list":
list_payload: dict[str, Any] = {
@ -2881,10 +2992,7 @@ def main() -> None:
if args.command == "workspace":
if args.workspace_command == "create":
secrets = [
*(
_parse_workspace_secret_option(value)
for value in getattr(args, "secret", [])
),
*(_parse_workspace_secret_option(value) for value in getattr(args, "secret", [])),
*(
_parse_workspace_secret_file_option(value)
for value in getattr(args, "secret_file", [])
@ -2919,9 +3027,7 @@ def main() -> None:
return
if args.workspace_command == "update":
labels = _parse_workspace_label_options(getattr(args, "label", []))
clear_labels = _parse_workspace_clear_label_options(
getattr(args, "clear_label", [])
)
clear_labels = _parse_workspace_clear_label_options(getattr(args, "clear_label", []))
try:
payload = pyro.update_workspace(
args.workspace_id,
@ -3527,7 +3633,17 @@ def main() -> None:
print(f"Deleted workspace: {str(payload.get('workspace_id', 'unknown'))}")
return
if args.command == "doctor":
payload = doctor_report(platform=args.platform)
try:
payload = doctor_report(
platform=args.platform,
environment=args.environment,
)
except Exception as exc: # noqa: BLE001
if bool(args.json):
_print_json({"ok": False, "error": str(exc)})
else:
print(f"[error] {exc}", file=sys.stderr, flush=True)
raise SystemExit(1) from exc
if bool(args.json):
_print_json(payload)
else:
@ -3558,3 +3674,7 @@ def main() -> None:
return
result = run_demo(network=bool(args.network))
_print_json(result)
if __name__ == "__main__":
main()

View file

@ -2,9 +2,10 @@
from __future__ import annotations
PUBLIC_CLI_COMMANDS = ("demo", "doctor", "env", "host", "mcp", "run", "workspace")
PUBLIC_CLI_COMMANDS = ("demo", "doctor", "env", "host", "mcp", "prepare", "run", "workspace")
PUBLIC_CLI_DEMO_SUBCOMMANDS = ("ollama",)
PUBLIC_CLI_ENV_SUBCOMMANDS = ("inspect", "list", "pull", "prune")
PUBLIC_CLI_DOCTOR_FLAGS = ("--platform", "--environment", "--json")
PUBLIC_CLI_HOST_SUBCOMMANDS = ("connect", "doctor", "print-config", "repair")
PUBLIC_CLI_HOST_COMMON_FLAGS = (
"--installed-package",
@ -28,6 +29,7 @@ PUBLIC_CLI_MCP_SERVE_FLAGS = (
"--repo-ref",
"--no-project-source",
)
PUBLIC_CLI_PREPARE_FLAGS = ("--network", "--force", "--json")
PUBLIC_CLI_WORKSPACE_SUBCOMMANDS = (
"create",
"delete",

152
src/pyro_mcp/daily_loop.py Normal file
View file

@ -0,0 +1,152 @@
"""Machine-level daily-loop warmup state for the CLI prepare flow."""
from __future__ import annotations
import json
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Literal
DEFAULT_PREPARE_ENVIRONMENT = "debian:12"
PREPARE_MANIFEST_LAYOUT_VERSION = 1
DailyLoopStatus = Literal["cold", "warm", "stale"]
def _environment_key(environment: str) -> str:
return environment.replace("/", "_").replace(":", "_")
@dataclass(frozen=True)
class DailyLoopManifest:
"""Persisted machine-readiness proof for one environment on one platform."""
environment: str
environment_version: str
platform: str
catalog_version: str
bundle_version: str | None
prepared_at: float
network_prepared: bool
last_prepare_duration_ms: int
def to_payload(self) -> dict[str, Any]:
return {
"layout_version": PREPARE_MANIFEST_LAYOUT_VERSION,
"environment": self.environment,
"environment_version": self.environment_version,
"platform": self.platform,
"catalog_version": self.catalog_version,
"bundle_version": self.bundle_version,
"prepared_at": self.prepared_at,
"network_prepared": self.network_prepared,
"last_prepare_duration_ms": self.last_prepare_duration_ms,
}
@classmethod
def from_payload(cls, payload: dict[str, Any]) -> "DailyLoopManifest":
return cls(
environment=str(payload["environment"]),
environment_version=str(payload["environment_version"]),
platform=str(payload["platform"]),
catalog_version=str(payload["catalog_version"]),
bundle_version=(
None if payload.get("bundle_version") is None else str(payload["bundle_version"])
),
prepared_at=float(payload["prepared_at"]),
network_prepared=bool(payload.get("network_prepared", False)),
last_prepare_duration_ms=int(payload.get("last_prepare_duration_ms", 0)),
)
def prepare_manifest_path(cache_dir: Path, *, platform: str, environment: str) -> Path:
return cache_dir / ".prepare" / platform / f"{_environment_key(environment)}.json"
def load_prepare_manifest(path: Path) -> tuple[DailyLoopManifest | None, str | None]:
if not path.exists():
return None, None
try:
payload = json.loads(path.read_text(encoding="utf-8"))
except (OSError, json.JSONDecodeError) as exc:
return None, f"prepare manifest is unreadable: {exc}"
if not isinstance(payload, dict):
return None, "prepare manifest is not a JSON object"
try:
manifest = DailyLoopManifest.from_payload(payload)
except (KeyError, TypeError, ValueError) as exc:
return None, f"prepare manifest is invalid: {exc}"
return manifest, None
def write_prepare_manifest(path: Path, manifest: DailyLoopManifest) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(
json.dumps(manifest.to_payload(), indent=2, sort_keys=True),
encoding="utf-8",
)
def evaluate_daily_loop_status(
*,
environment: str,
environment_version: str,
platform: str,
catalog_version: str,
bundle_version: str | None,
installed: bool,
manifest: DailyLoopManifest | None,
manifest_error: str | None = None,
) -> tuple[DailyLoopStatus, str | None]:
if manifest_error is not None:
return "stale", manifest_error
if manifest is None:
if not installed:
return "cold", "environment is not installed"
return "cold", "daily loop has not been prepared yet"
if not installed:
return "stale", "environment install is missing"
if manifest.environment != environment:
return "stale", "prepare manifest environment does not match the selected environment"
if manifest.environment_version != environment_version:
return "stale", "environment version changed since the last prepare run"
if manifest.platform != platform:
return "stale", "platform changed since the last prepare run"
if manifest.catalog_version != catalog_version:
return "stale", "catalog version changed since the last prepare run"
if manifest.bundle_version != bundle_version:
return "stale", "runtime bundle version changed since the last prepare run"
return "warm", None
def prepare_request_is_satisfied(
manifest: DailyLoopManifest | None,
*,
require_network: bool,
) -> bool:
if manifest is None:
return False
if require_network and not manifest.network_prepared:
return False
return True
def serialize_daily_loop_report(
*,
environment: str,
status: DailyLoopStatus,
installed: bool,
cache_dir: Path,
manifest_path: Path,
reason: str | None,
manifest: DailyLoopManifest | None,
) -> dict[str, Any]:
return {
"environment": environment,
"status": status,
"installed": installed,
"network_prepared": bool(manifest.network_prepared) if manifest is not None else False,
"prepared_at": None if manifest is None else manifest.prepared_at,
"manifest_path": str(manifest_path),
"reason": reason,
"cache_dir": str(cache_dir),
}

View file

@ -0,0 +1,131 @@
"""Real guest-backed smoke for the daily local prepare and reset loop."""
from __future__ import annotations
import argparse
import json
import subprocess
import sys
import tempfile
from pathlib import Path
from pyro_mcp.api import Pyro
from pyro_mcp.daily_loop import DEFAULT_PREPARE_ENVIRONMENT
def _log(message: str) -> None:
print(f"[daily-loop] {message}", flush=True)
def _write_text(path: Path, text: str) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(text, encoding="utf-8")
def _run_prepare(environment: str) -> dict[str, object]:
proc = subprocess.run( # noqa: S603
[sys.executable, "-m", "pyro_mcp.cli", "prepare", environment, "--json"],
text=True,
capture_output=True,
check=False,
)
if proc.returncode != 0:
raise RuntimeError(proc.stderr.strip() or proc.stdout.strip() or "pyro prepare failed")
payload = json.loads(proc.stdout)
if not isinstance(payload, dict):
raise RuntimeError("pyro prepare did not return a JSON object")
return payload
def run_daily_loop_smoke(*, environment: str = DEFAULT_PREPARE_ENVIRONMENT) -> None:
_log(f"prepare environment={environment}")
first_prepare = _run_prepare(environment)
assert bool(first_prepare["prepared"]) is True, first_prepare
second_prepare = _run_prepare(environment)
assert bool(second_prepare["reused"]) is True, second_prepare
pyro = Pyro()
with tempfile.TemporaryDirectory(prefix="pyro-daily-loop-") as temp_dir:
root = Path(temp_dir)
seed_dir = root / "seed"
export_dir = root / "export"
_write_text(seed_dir / "message.txt", "broken\n")
_write_text(
seed_dir / "check.sh",
"#!/bin/sh\n"
"set -eu\n"
"value=$(cat message.txt)\n"
'[ "$value" = "fixed" ] || {\n'
" printf 'expected fixed got %s\\n' \"$value\" >&2\n"
" exit 1\n"
"}\n"
"printf '%s\\n' \"$value\"\n",
)
workspace_id: str | None = None
try:
created = pyro.create_workspace(
environment=environment,
seed_path=seed_dir,
name="daily-loop",
labels={"suite": "daily-loop-smoke"},
)
workspace_id = str(created["workspace_id"])
_log(f"workspace_id={workspace_id}")
failing = pyro.exec_workspace(workspace_id, command="sh check.sh")
assert int(failing["exit_code"]) != 0, failing
patched = pyro.apply_workspace_patch(
workspace_id,
patch=("--- a/message.txt\n+++ b/message.txt\n@@ -1 +1 @@\n-broken\n+fixed\n"),
)
assert bool(patched["changed"]) is True, patched
passing = pyro.exec_workspace(workspace_id, command="sh check.sh")
assert int(passing["exit_code"]) == 0, passing
assert str(passing["stdout"]) == "fixed\n", passing
export_path = export_dir / "message.txt"
exported = pyro.export_workspace(
workspace_id,
"message.txt",
output_path=export_path,
)
assert export_path.read_text(encoding="utf-8") == "fixed\n"
assert str(exported["artifact_type"]) == "file", exported
reset = pyro.reset_workspace(workspace_id)
assert int(reset["reset_count"]) == 1, reset
rerun = pyro.exec_workspace(workspace_id, command="sh check.sh")
assert int(rerun["exit_code"]) != 0, rerun
reset_read = pyro.read_workspace_file(workspace_id, "message.txt")
assert str(reset_read["content"]) == "broken\n", reset_read
finally:
if workspace_id is not None:
try:
pyro.delete_workspace(workspace_id)
except Exception:
pass
def build_arg_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description="Run the real guest-backed daily-loop prepare and reset smoke.",
)
parser.add_argument(
"--environment",
default=DEFAULT_PREPARE_ENVIRONMENT,
help=f"Environment to warm and test. Defaults to `{DEFAULT_PREPARE_ENVIRONMENT}`.",
)
return parser
def main() -> None:
args = build_arg_parser().parse_args()
run_daily_loop_smoke(environment=args.environment)
if __name__ == "__main__":
main()

View file

@ -5,16 +5,18 @@ from __future__ import annotations
import argparse
import json
from pyro_mcp.daily_loop import DEFAULT_PREPARE_ENVIRONMENT
from pyro_mcp.runtime import DEFAULT_PLATFORM, doctor_report
def _build_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="Inspect bundled runtime health for pyro-mcp.")
parser.add_argument("--platform", default=DEFAULT_PLATFORM)
parser.add_argument("--environment", default=DEFAULT_PREPARE_ENVIRONMENT)
return parser
def main() -> None:
args = _build_parser().parse_args()
report = doctor_report(platform=args.platform)
report = doctor_report(platform=args.platform, environment=args.environment)
print(json.dumps(report, indent=2, sort_keys=True))

View file

@ -11,6 +11,13 @@ from dataclasses import dataclass
from pathlib import Path
from typing import Any
from pyro_mcp.daily_loop import (
DEFAULT_PREPARE_ENVIRONMENT,
evaluate_daily_loop_status,
load_prepare_manifest,
prepare_manifest_path,
serialize_daily_loop_report,
)
from pyro_mcp.vm_network import TapNetworkManager
DEFAULT_PLATFORM = "linux-x86_64"
@ -200,7 +207,11 @@ def runtime_capabilities(paths: RuntimePaths) -> RuntimeCapabilities:
)
def doctor_report(*, platform: str = DEFAULT_PLATFORM) -> dict[str, Any]:
def doctor_report(
*,
platform: str = DEFAULT_PLATFORM,
environment: str = DEFAULT_PREPARE_ENVIRONMENT,
) -> dict[str, Any]:
"""Build a runtime diagnostics report."""
report: dict[str, Any] = {
"platform": platform,
@ -258,6 +269,36 @@ def doctor_report(*, platform: str = DEFAULT_PLATFORM) -> dict[str, Any]:
"cache_dir": str(environment_store.cache_dir),
"environments": environment_store.list_environments(),
}
environment_details = environment_store.inspect_environment(environment)
manifest_path = prepare_manifest_path(
environment_store.cache_dir,
platform=platform,
environment=environment,
)
manifest, manifest_error = load_prepare_manifest(manifest_path)
status, reason = evaluate_daily_loop_status(
environment=environment,
environment_version=str(environment_details["version"]),
platform=platform,
catalog_version=environment_store.catalog_version,
bundle_version=(
None
if paths.manifest.get("bundle_version") is None
else str(paths.manifest["bundle_version"])
),
installed=bool(environment_details["installed"]),
manifest=manifest,
manifest_error=manifest_error,
)
report["daily_loop"] = serialize_daily_loop_report(
environment=environment,
status=status,
installed=bool(environment_details["installed"]),
cache_dir=environment_store.cache_dir,
manifest_path=manifest_path,
reason=reason,
manifest=manifest,
)
if not report["kvm"]["exists"]:
report["issues"] = ["/dev/kvm is not available on this host"]
return report

View file

@ -19,7 +19,7 @@ from typing import Any
from pyro_mcp.runtime import DEFAULT_PLATFORM, RuntimePaths
DEFAULT_ENVIRONMENT_VERSION = "1.0.0"
DEFAULT_CATALOG_VERSION = "4.4.0"
DEFAULT_CATALOG_VERSION = "4.5.0"
OCI_MANIFEST_ACCEPT = ", ".join(
(
"application/vnd.oci.image.index.v1+json",
@ -48,7 +48,7 @@ class VmEnvironment:
oci_repository: str | None = None
oci_reference: str | None = None
source_digest: str | None = None
compatibility: str = ">=4.4.0,<5.0.0"
compatibility: str = ">=4.5.0,<5.0.0"
@dataclass(frozen=True)

View file

@ -24,6 +24,15 @@ from dataclasses import dataclass, field
from pathlib import Path, PurePosixPath
from typing import Any, Literal, cast
from pyro_mcp.daily_loop import (
DailyLoopManifest,
evaluate_daily_loop_status,
load_prepare_manifest,
prepare_manifest_path,
prepare_request_is_satisfied,
serialize_daily_loop_report,
write_prepare_manifest,
)
from pyro_mcp.runtime import (
RuntimeCapabilities,
RuntimePaths,
@ -288,9 +297,7 @@ class WorkspaceRecord:
network=_deserialize_network(payload.get("network")),
name=_normalize_workspace_name(_optional_str(payload.get("name")), allow_none=True),
labels=_normalize_workspace_labels(payload.get("labels")),
last_activity_at=float(
payload.get("last_activity_at", float(payload["created_at"]))
),
last_activity_at=float(payload.get("last_activity_at", float(payload["created_at"]))),
command_count=int(payload.get("command_count", 0)),
last_command=_optional_dict(payload.get("last_command")),
workspace_seed=_workspace_seed_dict(payload.get("workspace_seed")),
@ -544,9 +551,7 @@ class WorkspacePublishedPortRecord:
host=str(payload.get("host", DEFAULT_PUBLISHED_PORT_HOST)),
protocol=str(payload.get("protocol", "tcp")),
proxy_pid=(
None
if payload.get("proxy_pid") is None
else int(payload.get("proxy_pid", 0))
None if payload.get("proxy_pid") is None else int(payload.get("proxy_pid", 0))
),
)
@ -921,9 +926,7 @@ def _validate_workspace_file_read_max_bytes(max_bytes: int) -> int:
if max_bytes <= 0:
raise ValueError("max_bytes must be positive")
if max_bytes > WORKSPACE_FILE_MAX_BYTES:
raise ValueError(
f"max_bytes must be at most {WORKSPACE_FILE_MAX_BYTES} bytes"
)
raise ValueError(f"max_bytes must be at most {WORKSPACE_FILE_MAX_BYTES} bytes")
return max_bytes
@ -951,9 +954,7 @@ def _decode_workspace_patch_text(path: str, content_bytes: bytes) -> str:
try:
return content_bytes.decode("utf-8")
except UnicodeDecodeError as exc:
raise RuntimeError(
f"workspace patch only supports UTF-8 text files: {path}"
) from exc
raise RuntimeError(f"workspace patch only supports UTF-8 text files: {path}") from exc
def _normalize_archive_member_name(name: str) -> PurePosixPath:
@ -1043,9 +1044,7 @@ def _prepare_workspace_secrets(
has_value = "value" in item
has_file_path = "file_path" in item
if has_value == has_file_path:
raise ValueError(
f"secret {name!r} must provide exactly one of 'value' or 'file_path'"
)
raise ValueError(f"secret {name!r} must provide exactly one of 'value' or 'file_path'")
source_kind: WorkspaceSecretSourceKind
if has_value:
value = _validate_workspace_secret_value(name, str(item["value"]))
@ -1525,9 +1524,7 @@ def _normalize_workspace_published_port_specs(
)
dedupe_key = (spec.host_port, spec.guest_port)
if dedupe_key in seen_guest_ports:
raise ValueError(
"published ports must not repeat the same host/guest port mapping"
)
raise ValueError("published ports must not repeat the same host/guest port mapping")
seen_guest_ports.add(dedupe_key)
normalized.append(spec)
return normalized
@ -1790,7 +1787,7 @@ def _start_local_service(
),
"status=$?",
f"printf '%s' \"$status\" > {shlex.quote(str(status_path))}",
"exit \"$status\"",
'exit "$status"',
]
)
+ "\n",
@ -1973,9 +1970,7 @@ def _patch_rootfs_runtime_file(
) -> None:
debugfs_path = shutil.which("debugfs")
if debugfs_path is None:
raise RuntimeError(
"debugfs is required to seed workspaces on guest-backed runtimes"
)
raise RuntimeError("debugfs is required to seed workspaces on guest-backed runtimes")
with tempfile.TemporaryDirectory(prefix=f"pyro-{asset_label}-") as temp_dir:
staged_path = Path(temp_dir) / Path(destination_path).name
shutil.copy2(source_path, staged_path)
@ -3634,6 +3629,152 @@ class VmManager:
def prune_environments(self) -> dict[str, object]:
return self._environment_store.prune_environments()
def prepare_daily_loop(
self,
environment: str,
*,
network: bool = False,
force: bool = False,
) -> dict[str, Any]:
spec = get_environment(environment, runtime_paths=self._runtime_paths)
if self._backend_name != "firecracker":
raise RuntimeError("pyro prepare requires a guest-backed runtime and is unavailable")
if not self._runtime_capabilities.supports_vm_boot:
reason = self._runtime_capabilities.reason or "runtime does not support guest boot"
raise RuntimeError(
f"pyro prepare requires guest-backed workspace boot and is unavailable: {reason}"
)
if not self._runtime_capabilities.supports_guest_exec:
reason = self._runtime_capabilities.reason or (
"runtime does not support guest command execution"
)
raise RuntimeError(
f"pyro prepare requires guest command execution and is unavailable: {reason}"
)
if network and not self._runtime_capabilities.supports_guest_network:
reason = self._runtime_capabilities.reason or (
"runtime does not support guest networking"
)
raise RuntimeError(
f"pyro prepare --network requires guest networking and is unavailable: {reason}"
)
runtime_paths = self._runtime_paths
if runtime_paths is None:
raise RuntimeError("runtime paths are unavailable for pyro prepare")
platform = str(runtime_paths.manifest.get("platform", "linux-x86_64"))
bundle_version = cast(str | None, runtime_paths.manifest.get("bundle_version"))
manifest_path = prepare_manifest_path(
self._environment_store.cache_dir,
platform=platform,
environment=environment,
)
manifest, manifest_error = load_prepare_manifest(manifest_path)
status, status_reason = evaluate_daily_loop_status(
environment=environment,
environment_version=spec.version,
platform=platform,
catalog_version=self._environment_store.catalog_version,
bundle_version=bundle_version,
installed=bool(self.inspect_environment(environment)["installed"]),
manifest=manifest,
manifest_error=manifest_error,
)
if (
not force
and status == "warm"
and prepare_request_is_satisfied(manifest, require_network=network)
):
if manifest is None:
raise AssertionError("warm prepare state requires a manifest")
payload = serialize_daily_loop_report(
environment=environment,
status="warm",
installed=True,
cache_dir=self._environment_store.cache_dir,
manifest_path=manifest_path,
reason="reused existing warm manifest",
manifest=manifest,
)
payload.update(
{
"prepared": True,
"reused": True,
"executed": False,
"forced": force,
"network_requested": network,
"last_prepare_duration_ms": manifest.last_prepare_duration_ms,
}
)
return payload
self._environment_store.ensure_installed(environment)
started = time.monotonic()
workspace_id: str | None = None
execution_mode = "pending"
try:
created = self.create_workspace(
environment=environment,
network_policy="egress" if network else "off",
allow_host_compat=False,
)
workspace_id = str(created["workspace_id"])
exec_result = self.exec_workspace(
workspace_id,
command="pwd",
timeout_seconds=DEFAULT_TIMEOUT_SECONDS,
)
execution_mode = str(exec_result.get("execution_mode", "unknown"))
if int(exec_result.get("exit_code", 1)) != 0:
raise RuntimeError("prepare guest exec failed")
if str(exec_result.get("stdout", "")) != f"{WORKSPACE_GUEST_PATH}\n":
raise RuntimeError("prepare guest exec returned an unexpected working directory")
self.reset_workspace(workspace_id)
finally:
if workspace_id is not None:
try:
self.delete_workspace(workspace_id, reason="prepare_cleanup")
except Exception:
pass
duration_ms = int((time.monotonic() - started) * 1000)
prepared_at = time.time()
preserved_network_prepared = bool(
manifest is not None and status == "warm" and manifest.network_prepared
)
prepared_manifest = DailyLoopManifest(
environment=environment,
environment_version=spec.version,
platform=platform,
catalog_version=self._environment_store.catalog_version,
bundle_version=bundle_version,
prepared_at=prepared_at,
network_prepared=network or preserved_network_prepared,
last_prepare_duration_ms=duration_ms,
)
write_prepare_manifest(manifest_path, prepared_manifest)
payload = serialize_daily_loop_report(
environment=environment,
status="warm",
installed=True,
cache_dir=self._environment_store.cache_dir,
manifest_path=manifest_path,
reason=status_reason,
manifest=prepared_manifest,
)
payload.update(
{
"prepared": True,
"reused": False,
"executed": True,
"forced": force,
"network_requested": network,
"last_prepare_duration_ms": duration_ms,
"execution_mode": execution_mode,
}
)
return payload
def create_vm(
self,
*,
@ -3859,9 +4000,7 @@ class VmManager:
raise RuntimeError(
f"max active VMs reached ({self._max_active_vms}); delete old VMs first"
)
self._require_workspace_network_policy_support(
network_policy=normalized_network_policy
)
self._require_workspace_network_policy_support(network_policy=normalized_network_policy)
self._backend.create(instance)
if self._runtime_capabilities.supports_guest_exec:
self._ensure_workspace_guest_bootstrap_support(instance)
@ -3963,9 +4102,7 @@ class VmManager:
"destination": str(workspace_sync["destination"]),
"entry_count": int(workspace_sync["entry_count"]),
"bytes_written": int(workspace_sync["bytes_written"]),
"execution_mode": str(
instance.metadata.get("execution_mode", "pending")
),
"execution_mode": str(instance.metadata.get("execution_mode", "pending")),
},
)
self._save_workspace_locked(workspace)
@ -4397,9 +4534,7 @@ class VmManager:
payload={
"summary": dict(summary),
"entries": [dict(entry) for entry in entries[:10]],
"execution_mode": str(
instance.metadata.get("execution_mode", "pending")
),
"execution_mode": str(instance.metadata.get("execution_mode", "pending")),
},
)
self._save_workspace_locked(workspace)
@ -4568,9 +4703,7 @@ class VmManager:
recreated = workspace.to_instance(
workdir=self._workspace_runtime_dir(workspace.workspace_id)
)
self._require_workspace_network_policy_support(
network_policy=workspace.network_policy
)
self._require_workspace_network_policy_support(network_policy=workspace.network_policy)
self._backend.create(recreated)
if self._runtime_capabilities.supports_guest_exec:
self._ensure_workspace_guest_bootstrap_support(recreated)
@ -4764,9 +4897,7 @@ class VmManager:
if wait_for_idle_ms is not None and (
wait_for_idle_ms <= 0 or wait_for_idle_ms > MAX_SHELL_WAIT_FOR_IDLE_MS
):
raise ValueError(
f"wait_for_idle_ms must be between 1 and {MAX_SHELL_WAIT_FOR_IDLE_MS}"
)
raise ValueError(f"wait_for_idle_ms must be between 1 and {MAX_SHELL_WAIT_FOR_IDLE_MS}")
with self._lock:
workspace = self._load_workspace_locked(workspace_id)
instance = self._workspace_instance_for_live_shell_locked(workspace)
@ -5041,8 +5172,7 @@ class VmManager:
if normalized_published_ports:
if workspace.network_policy != "egress+published-ports":
raise RuntimeError(
"published ports require workspace network_policy "
"'egress+published-ports'"
"published ports require workspace network_policy 'egress+published-ports'"
)
if instance.network is None:
raise RuntimeError(
@ -5447,11 +5577,7 @@ class VmManager:
if not isinstance(entry, dict):
continue
diff_entries.append(
{
key: value
for key, value in entry.items()
if key != "text_patch"
}
{key: value for key, value in entry.items() if key != "text_patch"}
)
payload["changes"] = {
"available": True,
@ -5509,9 +5635,7 @@ class VmManager:
self._stop_workspace_services_locked(workspace, instance)
self._close_workspace_shells_locked(workspace, instance)
try:
self._require_workspace_network_policy_support(
network_policy=workspace.network_policy
)
self._require_workspace_network_policy_support(network_policy=workspace.network_policy)
if self._runtime_capabilities.supports_guest_exec:
self._ensure_workspace_guest_bootstrap_support(instance)
with self._lock:
@ -5694,9 +5818,7 @@ class VmManager:
"execution_mode": workspace.metadata.get("execution_mode", "pending"),
"workspace_path": WORKSPACE_GUEST_PATH,
"workspace_seed": _workspace_seed_dict(workspace.workspace_seed),
"secrets": [
_serialize_workspace_secret_public(secret) for secret in workspace.secrets
],
"secrets": [_serialize_workspace_secret_public(secret) for secret in workspace.secrets],
"command_count": workspace.command_count,
"last_command": workspace.last_command,
"reset_count": workspace.reset_count,
@ -5867,9 +5989,7 @@ class VmManager:
env_values: dict[str, str] = {}
for secret_name, env_name in secret_env.items():
if secret_name not in secret_values:
raise ValueError(
f"secret_env references unknown workspace secret {secret_name!r}"
)
raise ValueError(f"secret_env references unknown workspace secret {secret_name!r}")
env_values[env_name] = secret_values[secret_name]
return env_values
@ -6019,13 +6139,10 @@ class VmManager:
bytes_written=bytes_written,
cleanup_dir=cleanup_dir,
)
if (
not resolved_source_path.is_file()
or not _is_supported_seed_archive(resolved_source_path)
if not resolved_source_path.is_file() or not _is_supported_seed_archive(
resolved_source_path
):
raise ValueError(
"seed_path must be a directory or a .tar/.tar.gz/.tgz archive"
)
raise ValueError("seed_path must be a directory or a .tar/.tar.gz/.tgz archive")
entry_count, bytes_written = _inspect_seed_archive(resolved_source_path)
return PreparedWorkspaceSeed(
mode="tar_archive",
@ -6128,8 +6245,7 @@ class VmManager:
rootfs_path = Path(raw_rootfs_image)
if not rootfs_path.exists():
raise RuntimeError(
f"workspace {workspace.workspace_id!r} rootfs image is unavailable at "
f"{rootfs_path}"
f"workspace {workspace.workspace_id!r} rootfs image is unavailable at {rootfs_path}"
)
return rootfs_path
@ -6146,9 +6262,7 @@ class VmManager:
f"workspace {workspace.workspace_id!r} must be stopped before {operation_name}"
)
if workspace.metadata.get("execution_mode") == "host_compat":
raise RuntimeError(
f"{operation_name} is unavailable for host_compat workspaces"
)
raise RuntimeError(f"{operation_name} is unavailable for host_compat workspaces")
return self._workspace_rootfs_image_path_locked(workspace)
def _scrub_workspace_runtime_state_locked(