Refactor public API around environments

This commit is contained in:
Thales Maciel 2026-03-08 16:02:02 -03:00
parent 57dae52cc2
commit 5d5243df23
41 changed files with 1301 additions and 459 deletions

View file

@ -1,11 +1,36 @@
"""Public package surface for pyro_mcp."""
from importlib.metadata import version
from __future__ import annotations
import tomllib
from importlib.metadata import PackageNotFoundError, version
from pathlib import Path
from pyro_mcp.api import Pyro
from pyro_mcp.server import create_server
from pyro_mcp.vm_manager import VmManager
__version__ = version("pyro-mcp")
def _resolve_version() -> str:
try:
installed_version = version("pyro-mcp")
except PackageNotFoundError:
installed_version = None
pyproject_path = Path(__file__).resolve().parents[2] / "pyproject.toml"
if pyproject_path.exists():
payload = tomllib.loads(pyproject_path.read_text(encoding="utf-8"))
project = payload.get("project")
if isinstance(project, dict):
raw_version = project.get("version")
if isinstance(raw_version, str) and raw_version != "":
return raw_version
if installed_version is None:
return "0+unknown"
return installed_version
__version__ = _resolve_version()
__all__ = ["Pyro", "VmManager", "__version__", "create_server"]

View file

@ -19,13 +19,13 @@ class Pyro:
*,
backend_name: str | None = None,
base_dir: Path | None = None,
artifacts_dir: Path | None = None,
cache_dir: Path | None = None,
max_active_vms: int = 4,
) -> None:
self._manager = manager or VmManager(
backend_name=backend_name,
base_dir=base_dir,
artifacts_dir=artifacts_dir,
cache_dir=cache_dir,
max_active_vms=max_active_vms,
)
@ -33,20 +33,29 @@ class Pyro:
def manager(self) -> VmManager:
return self._manager
def list_profiles(self) -> list[dict[str, object]]:
return self._manager.list_profiles()
def list_environments(self) -> list[dict[str, object]]:
return self._manager.list_environments()
def pull_environment(self, environment: str) -> dict[str, object]:
return self._manager.pull_environment(environment)
def inspect_environment(self, environment: str) -> dict[str, object]:
return self._manager.inspect_environment(environment)
def prune_environments(self) -> dict[str, object]:
return self._manager.prune_environments()
def create_vm(
self,
*,
profile: str,
environment: str,
vcpu_count: int,
mem_mib: int,
ttl_seconds: int = 600,
network: bool = False,
) -> dict[str, Any]:
return self._manager.create_vm(
profile=profile,
environment=environment,
vcpu_count=vcpu_count,
mem_mib=mem_mib,
ttl_seconds=ttl_seconds,
@ -77,7 +86,7 @@ class Pyro:
def run_in_vm(
self,
*,
profile: str,
environment: str,
command: str,
vcpu_count: int,
mem_mib: int,
@ -86,7 +95,7 @@ class Pyro:
network: bool = False,
) -> dict[str, Any]:
return self._manager.run_vm(
profile=profile,
environment=environment,
command=command,
vcpu_count=vcpu_count,
mem_mib=mem_mib,
@ -100,7 +109,7 @@ class Pyro:
@server.tool()
async def vm_run(
profile: str,
environment: str,
command: str,
vcpu_count: int,
mem_mib: int,
@ -110,7 +119,7 @@ class Pyro:
) -> dict[str, Any]:
"""Create, start, execute, and clean up an ephemeral VM."""
return self.run_in_vm(
profile=profile,
environment=environment,
command=command,
vcpu_count=vcpu_count,
mem_mib=mem_mib,
@ -120,21 +129,21 @@ class Pyro:
)
@server.tool()
async def vm_list_profiles() -> list[dict[str, object]]:
"""List standard environment profiles and package highlights."""
return self.list_profiles()
async def vm_list_environments() -> list[dict[str, object]]:
"""List curated Linux environments and installation status."""
return self.list_environments()
@server.tool()
async def vm_create(
profile: str,
environment: str,
vcpu_count: int,
mem_mib: int,
ttl_seconds: int = 600,
network: bool = False,
) -> dict[str, Any]:
"""Create an ephemeral VM record with profile and resource sizing."""
"""Create an ephemeral VM record with environment and resource sizing."""
return self.create_vm(
profile=profile,
environment=environment,
vcpu_count=vcpu_count,
mem_mib=mem_mib,
ttl_seconds=ttl_seconds,

View file

@ -11,6 +11,7 @@ from pyro_mcp.api import Pyro
from pyro_mcp.demo import run_demo
from pyro_mcp.ollama_demo import DEFAULT_OLLAMA_BASE_URL, DEFAULT_OLLAMA_MODEL, run_ollama_tool_demo
from pyro_mcp.runtime import DEFAULT_PLATFORM, doctor_report
from pyro_mcp.vm_environments import DEFAULT_CATALOG_VERSION
def _print_json(payload: dict[str, Any]) -> None:
@ -18,22 +19,36 @@ def _print_json(payload: dict[str, Any]) -> None:
def _build_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="pyro CLI for ephemeral Firecracker VMs.")
parser = argparse.ArgumentParser(
description="pyro CLI for curated ephemeral Linux environments."
)
parser.add_argument("--version", action="version", version=f"%(prog)s {__version__}")
subparsers = parser.add_subparsers(dest="command", required=True)
env_parser = subparsers.add_parser("env", help="Inspect and manage curated environments.")
env_subparsers = env_parser.add_subparsers(dest="env_command", required=True)
env_subparsers.add_parser("list", help="List official environments.")
pull_parser = env_subparsers.add_parser(
"pull",
help="Install an environment into the local cache.",
)
pull_parser.add_argument("environment")
inspect_parser = env_subparsers.add_parser("inspect", help="Inspect one environment.")
inspect_parser.add_argument("environment")
env_subparsers.add_parser("prune", help="Delete stale cached environments.")
mcp_parser = subparsers.add_parser("mcp", help="Run the MCP server.")
mcp_subparsers = mcp_parser.add_subparsers(dest="mcp_command", required=True)
mcp_subparsers.add_parser("serve", help="Run the MCP server over stdio.")
run_parser = subparsers.add_parser("run", help="Run one command inside an ephemeral VM.")
run_parser.add_argument("--profile", required=True)
run_parser.add_argument("environment")
run_parser.add_argument("--vcpu-count", type=int, required=True)
run_parser.add_argument("--mem-mib", type=int, required=True)
run_parser.add_argument("--timeout-seconds", type=int, default=30)
run_parser.add_argument("--ttl-seconds", type=int, default=600)
run_parser.add_argument("--network", action="store_true")
run_parser.add_argument("command_args", nargs=argparse.REMAINDER)
run_parser.add_argument("command_args", nargs="*")
doctor_parser = subparsers.add_parser("doctor", help="Inspect runtime and host diagnostics.")
doctor_parser.add_argument("--platform", default=DEFAULT_PLATFORM)
@ -59,13 +74,32 @@ def _require_command(command_args: list[str]) -> str:
def main() -> None:
args = _build_parser().parse_args()
pyro = Pyro()
if args.command == "env":
if args.env_command == "list":
_print_json(
{
"catalog_version": DEFAULT_CATALOG_VERSION,
"environments": pyro.list_environments(),
}
)
return
if args.env_command == "pull":
_print_json(dict(pyro.pull_environment(args.environment)))
return
if args.env_command == "inspect":
_print_json(dict(pyro.inspect_environment(args.environment)))
return
if args.env_command == "prune":
_print_json(dict(pyro.prune_environments()))
return
if args.command == "mcp":
Pyro().create_server().run(transport="stdio")
pyro.create_server().run(transport="stdio")
return
if args.command == "run":
command = _require_command(args.command_args)
result = Pyro().run_in_vm(
profile=args.profile,
result = pyro.run_in_vm(
environment=args.environment,
command=command,
vcpu_count=args.vcpu_count,
mem_mib=args.mem_mib,

View file

@ -2,10 +2,10 @@
from __future__ import annotations
PUBLIC_CLI_COMMANDS = ("mcp", "run", "doctor", "demo")
PUBLIC_CLI_COMMANDS = ("demo", "doctor", "env", "mcp", "run")
PUBLIC_CLI_DEMO_SUBCOMMANDS = ("ollama",)
PUBLIC_CLI_ENV_SUBCOMMANDS = ("inspect", "list", "pull", "prune")
PUBLIC_CLI_RUN_FLAGS = (
"--profile",
"--vcpu-count",
"--mem-mib",
"--timeout-seconds",
@ -18,8 +18,11 @@ PUBLIC_SDK_METHODS = (
"create_vm",
"delete_vm",
"exec_vm",
"list_profiles",
"inspect_environment",
"list_environments",
"network_info_vm",
"prune_environments",
"pull_environment",
"reap_expired",
"run_in_vm",
"start_vm",
@ -28,14 +31,14 @@ PUBLIC_SDK_METHODS = (
)
PUBLIC_MCP_TOOLS = (
"vm_run",
"vm_list_profiles",
"vm_create",
"vm_start",
"vm_exec",
"vm_stop",
"vm_delete",
"vm_status",
"vm_exec",
"vm_list_environments",
"vm_network_info",
"vm_reap_expired",
"vm_run",
"vm_start",
"vm_status",
"vm_stop",
)

View file

@ -28,7 +28,7 @@ def run_demo(*, network: bool = False) -> dict[str, Any]:
"execution_mode": "guest_vsock" if network else "host_compat",
}
return pyro.run_in_vm(
profile="debian-git",
environment="debian:12",
command=_demo_command(status),
vcpu_count=1,
mem_mib=512,

View file

@ -32,7 +32,7 @@ TOOL_SPECS: Final[list[dict[str, Any]]] = [
"parameters": {
"type": "object",
"properties": {
"profile": {"type": "string"},
"environment": {"type": "string"},
"command": {"type": "string"},
"vcpu_count": {"type": "integer"},
"mem_mib": {"type": "integer"},
@ -40,7 +40,7 @@ TOOL_SPECS: Final[list[dict[str, Any]]] = [
"ttl_seconds": {"type": "integer"},
"network": {"type": "boolean"},
},
"required": ["profile", "command", "vcpu_count", "mem_mib"],
"required": ["environment", "command", "vcpu_count", "mem_mib"],
"additionalProperties": False,
},
},
@ -48,8 +48,8 @@ TOOL_SPECS: Final[list[dict[str, Any]]] = [
{
"type": "function",
"function": {
"name": "vm_list_profiles",
"description": "List standard VM environment profiles.",
"name": "vm_list_environments",
"description": "List curated Linux environments and installation status.",
"parameters": {
"type": "object",
"properties": {},
@ -65,13 +65,13 @@ TOOL_SPECS: Final[list[dict[str, Any]]] = [
"parameters": {
"type": "object",
"properties": {
"profile": {"type": "string"},
"environment": {"type": "string"},
"vcpu_count": {"type": "integer"},
"mem_mib": {"type": "integer"},
"ttl_seconds": {"type": "integer"},
"network": {"type": "boolean"},
},
"required": ["profile", "vcpu_count", "mem_mib"],
"required": ["environment", "vcpu_count", "mem_mib"],
"additionalProperties": False,
},
},
@ -206,7 +206,7 @@ def _dispatch_tool_call(
ttl_seconds = arguments.get("ttl_seconds", 600)
timeout_seconds = arguments.get("timeout_seconds", 30)
return pyro.run_in_vm(
profile=_require_str(arguments, "profile"),
environment=_require_str(arguments, "environment"),
command=_require_str(arguments, "command"),
vcpu_count=_require_int(arguments, "vcpu_count"),
mem_mib=_require_int(arguments, "mem_mib"),
@ -214,12 +214,12 @@ def _dispatch_tool_call(
ttl_seconds=_require_int({"ttl_seconds": ttl_seconds}, "ttl_seconds"),
network=_require_bool(arguments, "network", default=False),
)
if tool_name == "vm_list_profiles":
return {"profiles": pyro.list_profiles()}
if tool_name == "vm_list_environments":
return {"environments": pyro.list_environments()}
if tool_name == "vm_create":
ttl_seconds = arguments.get("ttl_seconds", 600)
return pyro.create_vm(
profile=_require_str(arguments, "profile"),
environment=_require_str(arguments, "environment"),
vcpu_count=_require_int(arguments, "vcpu_count"),
mem_mib=_require_int(arguments, "mem_mib"),
ttl_seconds=_require_int({"ttl_seconds": ttl_seconds}, "ttl_seconds"),
@ -256,7 +256,7 @@ def _format_tool_error(tool_name: str, arguments: dict[str, Any], exc: Exception
def _run_direct_lifecycle_fallback(pyro: Pyro) -> dict[str, Any]:
return pyro.run_in_vm(
profile="debian-git",
environment="debian:12",
command=NETWORK_PROOF_COMMAND,
vcpu_count=1,
mem_mib=512,
@ -326,7 +326,7 @@ def run_ollama_tool_demo(
"content": (
"Use the VM tools to prove outbound internet access in an ephemeral VM.\n"
"Prefer `vm_run` unless a lower-level lifecycle step is strictly necessary.\n"
"Use profile `debian-git`, choose adequate vCPU/memory, "
"Use environment `debian:12`, choose adequate vCPU/memory, "
"and set `network` to true.\n"
f"Run this exact command: `{NETWORK_PROOF_COMMAND}`.\n"
f"Success means the clone completes and the command prints `true`.\n"

View file

@ -1,4 +1,4 @@
"""Bundled runtime resolver and diagnostics."""
"""Embedded runtime resolver and diagnostics."""
from __future__ import annotations
@ -64,7 +64,7 @@ def resolve_runtime_paths(
platform: str = DEFAULT_PLATFORM,
verify_checksums: bool = True,
) -> RuntimePaths:
"""Resolve and validate bundled runtime assets."""
"""Resolve and validate embedded runtime assets."""
bundle_parent = Path(os.environ.get("PYRO_RUNTIME_BUNDLE_DIR", _default_bundle_parent()))
bundle_root = bundle_parent / platform
manifest_path = bundle_root / "manifest.json"
@ -102,7 +102,7 @@ def resolve_runtime_paths(
guest_agent_path = bundle_root / raw_agent_path
artifacts_dir = bundle_root / "profiles"
required_paths = [firecracker_bin, jailer_bin, artifacts_dir]
required_paths = [firecracker_bin, jailer_bin]
if guest_agent_path is not None:
required_paths.append(guest_agent_path)
@ -139,30 +139,6 @@ def resolve_runtime_paths(
f"runtime checksum mismatch for {full_path}; "
f"expected {raw_hash}, got {actual}"
)
profiles = manifest.get("profiles")
if not isinstance(profiles, dict):
raise RuntimeError("runtime manifest is missing `profiles`")
for profile_name, profile_spec in profiles.items():
if not isinstance(profile_spec, dict):
raise RuntimeError(f"profile manifest entry for {profile_name!r} is malformed")
for kind in ("kernel", "rootfs"):
spec = profile_spec.get(kind)
if not isinstance(spec, dict):
raise RuntimeError(f"profile {profile_name!r} is missing {kind} spec")
raw_path = spec.get("path")
raw_hash = spec.get("sha256")
if not isinstance(raw_path, str) or not isinstance(raw_hash, str):
raise RuntimeError(f"profile {profile_name!r} {kind} spec is malformed")
full_path = bundle_root / raw_path
if not full_path.exists():
raise RuntimeError(f"profile asset missing: {full_path}")
actual = _sha256(full_path)
if actual != raw_hash:
raise RuntimeError(
f"profile checksum mismatch for {full_path}; "
f"expected {raw_hash}, got {actual}"
)
return RuntimePaths(
bundle_root=bundle_root,
manifest_path=manifest_path,
@ -241,9 +217,9 @@ def doctor_report(*, platform: str = DEFAULT_PLATFORM) -> dict[str, Any]:
return report
capabilities = runtime_capabilities(paths)
from pyro_mcp.vm_environments import EnvironmentStore
profiles = paths.manifest.get("profiles", {})
profile_names = sorted(profiles.keys()) if isinstance(profiles, dict) else []
environment_store = EnvironmentStore(runtime_paths=paths)
report["runtime_ok"] = True
report["runtime"] = {
"bundle_root": str(paths.bundle_root),
@ -252,16 +228,19 @@ def doctor_report(*, platform: str = DEFAULT_PLATFORM) -> dict[str, Any]:
"jailer_bin": str(paths.jailer_bin),
"guest_agent_path": str(paths.guest_agent_path) if paths.guest_agent_path else None,
"artifacts_dir": str(paths.artifacts_dir),
"artifacts_present": paths.artifacts_dir.exists(),
"notice_path": str(paths.notice_path),
"bundle_version": paths.manifest.get("bundle_version"),
"component_versions": paths.manifest.get("component_versions", {}),
"profiles": profile_names,
"capabilities": {
"supports_vm_boot": capabilities.supports_vm_boot,
"supports_guest_exec": capabilities.supports_guest_exec,
"supports_guest_network": capabilities.supports_guest_network,
"reason": capabilities.reason,
},
"catalog_version": environment_store.catalog_version,
"cache_dir": str(environment_store.cache_dir),
"environments": environment_store.list_environments(),
}
if not report["kvm"]["exists"]:
report["issues"] = ["/dev/kvm is not available on this host"]

View file

@ -1,4 +1,4 @@
"""Direct Firecracker boot validation for a bundled runtime profile."""
"""Direct Firecracker boot validation for a curated environment."""
from __future__ import annotations
@ -12,13 +12,13 @@ from pathlib import Path
from types import SimpleNamespace
from pyro_mcp.runtime import resolve_runtime_paths
from pyro_mcp.vm_environments import EnvironmentStore, get_environment
from pyro_mcp.vm_firecracker import build_launch_plan
from pyro_mcp.vm_profiles import get_profile
@dataclass(frozen=True)
class BootCheckResult:
profile: str
environment: str
workdir: Path
firecracker_started: bool
vm_alive_after_wait: bool
@ -49,30 +49,31 @@ def _classify_result(*, firecracker_log: str, serial_log: str, vm_alive: bool) -
def run_boot_check(
*,
profile: str = "debian-base",
environment: str = "debian:12-base",
vcpu_count: int = 1,
mem_mib: int = 1024,
wait_seconds: int = 8,
keep_workdir: bool = False,
) -> BootCheckResult: # pragma: no cover - integration helper
get_profile(profile)
get_environment(environment)
if wait_seconds <= 0:
raise ValueError("wait_seconds must be positive")
runtime_paths = resolve_runtime_paths()
profile_dir = runtime_paths.artifacts_dir / profile
environment_store = EnvironmentStore(runtime_paths=runtime_paths)
installed_environment = environment_store.ensure_installed(environment)
workdir = Path(tempfile.mkdtemp(prefix="pyro-boot-check-"))
try:
rootfs_copy = workdir / "rootfs.ext4"
shutil.copy2(profile_dir / "rootfs.ext4", rootfs_copy)
shutil.copy2(installed_environment.rootfs_image, rootfs_copy)
instance = SimpleNamespace(
vm_id="abcd00000001",
vcpu_count=vcpu_count,
mem_mib=mem_mib,
workdir=workdir,
metadata={
"kernel_image": str(profile_dir / "vmlinux"),
"kernel_image": str(installed_environment.kernel_image),
"rootfs_image": str(rootfs_copy),
},
network=None,
@ -114,7 +115,7 @@ def run_boot_check(
vm_alive=vm_alive,
)
return BootCheckResult(
profile=profile,
environment=environment,
workdir=workdir,
firecracker_started="Successfully started microvm" in firecracker_log,
vm_alive_after_wait=vm_alive,
@ -131,7 +132,7 @@ def run_boot_check(
def main() -> None: # pragma: no cover - CLI wiring
parser = argparse.ArgumentParser(description="Run a direct Firecracker boot check.")
parser.add_argument("--profile", default="debian-base")
parser.add_argument("--environment", default="debian:12-base")
parser.add_argument("--vcpu-count", type=int, default=1)
parser.add_argument("--mem-mib", type=int, default=1024)
parser.add_argument("--wait-seconds", type=int, default=8)
@ -140,13 +141,13 @@ def main() -> None: # pragma: no cover - CLI wiring
args = parser.parse_args()
result = run_boot_check(
profile=args.profile,
environment=args.environment,
vcpu_count=args.vcpu_count,
mem_mib=args.mem_mib,
wait_seconds=args.wait_seconds,
keep_workdir=args.keep_workdir,
)
print(f"[boot] profile={result.profile}")
print(f"[boot] environment={result.environment}")
print(f"[boot] firecracker_started={result.firecracker_started}")
print(f"[boot] vm_alive_after_wait={result.vm_alive_after_wait}")
print(f"[boot] process_returncode={result.process_returncode}")

View file

@ -9,7 +9,7 @@
"sha256": "86622337f91df329cca72bb21cd1324fb8b6fa47931601d65ee4b2c72ef2cae5"
}
},
"bundle_version": "0.1.0",
"bundle_version": "1.0.0",
"capabilities": {
"guest_exec": true,
"guest_network": true,

View file

@ -1,4 +1,4 @@
"""Direct guest-network validation for a bundled runtime profile."""
"""Direct guest-network validation for a curated environment."""
from __future__ import annotations
@ -28,7 +28,7 @@ class NetworkCheckResult:
def run_network_check(
*,
profile: str = "debian-git",
environment: str = "debian:12",
vcpu_count: int = 1,
mem_mib: int = 1024,
ttl_seconds: int = 600,
@ -37,7 +37,7 @@ def run_network_check(
) -> NetworkCheckResult: # pragma: no cover - integration helper
pyro = Pyro(base_dir=base_dir)
result = pyro.run_in_vm(
profile=profile,
environment=environment,
command=NETWORK_CHECK_COMMAND,
vcpu_count=vcpu_count,
mem_mib=mem_mib,
@ -58,7 +58,7 @@ def run_network_check(
def main() -> None: # pragma: no cover - CLI wiring
parser = argparse.ArgumentParser(description="Run a guest networking check.")
parser.add_argument("--profile", default="debian-git")
parser.add_argument("--environment", default="debian:12")
parser.add_argument("--vcpu-count", type=int, default=1)
parser.add_argument("--mem-mib", type=int, default=1024)
parser.add_argument("--ttl-seconds", type=int, default=600)
@ -66,7 +66,7 @@ def main() -> None: # pragma: no cover - CLI wiring
args = parser.parse_args()
result = run_network_check(
profile=args.profile,
environment=args.environment,
vcpu_count=args.vcpu_count,
mem_mib=args.mem_mib,
ttl_seconds=args.ttl_seconds,

View file

@ -0,0 +1,615 @@
"""Official environment catalog and local cache management."""
from __future__ import annotations
import json
import os
import shutil
import tarfile
import tempfile
import time
import urllib.error
import urllib.parse
import urllib.request
from dataclasses import dataclass
from pathlib import Path
from typing import Any
from pyro_mcp.runtime import DEFAULT_PLATFORM, RuntimePaths
DEFAULT_ENVIRONMENT_VERSION = "1.0.0"
DEFAULT_CATALOG_VERSION = "1.0.0"
OCI_MANIFEST_ACCEPT = ", ".join(
(
"application/vnd.oci.image.index.v1+json",
"application/vnd.oci.image.manifest.v1+json",
"application/vnd.docker.distribution.manifest.list.v2+json",
"application/vnd.docker.distribution.manifest.v2+json",
)
)
@dataclass(frozen=True)
class VmEnvironment:
"""Catalog entry describing a curated Linux environment."""
name: str
version: str
description: str
default_packages: tuple[str, ...]
distribution: str
distribution_version: str
source_profile: str
platform: str = DEFAULT_PLATFORM
source_url: str | None = None
oci_registry: str | None = None
oci_repository: str | None = None
oci_reference: str | None = None
source_digest: str | None = None
compatibility: str = ">=1.0.0,<2.0.0"
@dataclass(frozen=True)
class InstalledEnvironment:
"""Resolved environment artifact locations."""
name: str
version: str
install_dir: Path
kernel_image: Path
rootfs_image: Path
source: str
source_digest: str | None
installed: bool
CATALOG: dict[str, VmEnvironment] = {
"debian:12": VmEnvironment(
name="debian:12",
version=DEFAULT_ENVIRONMENT_VERSION,
description="Debian 12 environment with Git preinstalled for common agent workflows.",
default_packages=("bash", "coreutils", "git"),
distribution="debian",
distribution_version="12",
source_profile="debian-git",
oci_registry="ghcr.io",
oci_repository="thaloco/pyro-environments/debian-12",
oci_reference=DEFAULT_ENVIRONMENT_VERSION,
),
"debian:12-base": VmEnvironment(
name="debian:12-base",
version=DEFAULT_ENVIRONMENT_VERSION,
description="Minimal Debian 12 environment for shell and core Unix tooling.",
default_packages=("bash", "coreutils"),
distribution="debian",
distribution_version="12",
source_profile="debian-base",
oci_registry="ghcr.io",
oci_repository="thaloco/pyro-environments/debian-12-base",
oci_reference=DEFAULT_ENVIRONMENT_VERSION,
),
"debian:12-build": VmEnvironment(
name="debian:12-build",
version=DEFAULT_ENVIRONMENT_VERSION,
description="Debian 12 environment with Git and common build tools preinstalled.",
default_packages=("bash", "coreutils", "git", "gcc", "make", "cmake", "python3"),
distribution="debian",
distribution_version="12",
source_profile="debian-build",
oci_registry="ghcr.io",
oci_repository="thaloco/pyro-environments/debian-12-build",
oci_reference=DEFAULT_ENVIRONMENT_VERSION,
),
}
def _default_cache_dir() -> Path:
return Path(
os.environ.get(
"PYRO_ENVIRONMENT_CACHE_DIR",
str(Path.home() / ".cache" / "pyro-mcp" / "environments"),
)
)
def _manifest_profile_digest(runtime_paths: RuntimePaths, profile_name: str) -> str | None:
profiles = runtime_paths.manifest.get("profiles")
if not isinstance(profiles, dict):
return None
profile = profiles.get(profile_name)
if not isinstance(profile, dict):
return None
rootfs = profile.get("rootfs")
if not isinstance(rootfs, dict):
return None
raw_digest = rootfs.get("sha256")
return raw_digest if isinstance(raw_digest, str) else None
def get_environment(name: str, *, runtime_paths: RuntimePaths | None = None) -> VmEnvironment:
"""Resolve a curated environment by name."""
try:
spec = CATALOG[name]
except KeyError as exc:
known = ", ".join(sorted(CATALOG))
raise ValueError(f"unknown environment {name!r}; expected one of: {known}") from exc
if runtime_paths is None:
return spec
return VmEnvironment(
name=spec.name,
version=spec.version,
description=spec.description,
default_packages=spec.default_packages,
distribution=spec.distribution,
distribution_version=spec.distribution_version,
source_profile=spec.source_profile,
platform=spec.platform,
source_url=spec.source_url,
oci_registry=spec.oci_registry,
oci_repository=spec.oci_repository,
oci_reference=spec.oci_reference,
source_digest=_manifest_profile_digest(runtime_paths, spec.source_profile),
compatibility=spec.compatibility,
)
def list_environments(*, runtime_paths: RuntimePaths | None = None) -> list[dict[str, object]]:
"""Return catalog metadata in a JSON-safe format."""
return [
_serialize_environment(get_environment(name, runtime_paths=runtime_paths))
for name in sorted(CATALOG)
]
def _serialize_environment(environment: VmEnvironment) -> dict[str, object]:
return {
"name": environment.name,
"version": environment.version,
"description": environment.description,
"default_packages": list(environment.default_packages),
"distribution": environment.distribution,
"distribution_version": environment.distribution_version,
"platform": environment.platform,
"oci_registry": environment.oci_registry,
"oci_repository": environment.oci_repository,
"oci_reference": environment.oci_reference,
"source_digest": environment.source_digest,
"compatibility": environment.compatibility,
}
class EnvironmentStore:
"""Install and inspect curated environments in a local cache."""
def __init__(
self,
*,
runtime_paths: RuntimePaths,
cache_dir: Path | None = None,
) -> None:
self._runtime_paths = runtime_paths
self._cache_dir = cache_dir or _default_cache_dir()
raw_platform = self._runtime_paths.manifest.get("platform", DEFAULT_PLATFORM)
platform = raw_platform if isinstance(raw_platform, str) else DEFAULT_PLATFORM
self._platform_dir = self._cache_dir / platform
@property
def cache_dir(self) -> Path:
return self._cache_dir
@property
def catalog_version(self) -> str:
return DEFAULT_CATALOG_VERSION
def list_environments(self) -> list[dict[str, object]]:
environments: list[dict[str, object]] = []
for name in sorted(CATALOG):
environments.append(self.inspect_environment(name))
return environments
def pull_environment(self, name: str) -> dict[str, object]:
installed = self.ensure_installed(name)
return {
**self.inspect_environment(name),
"install_dir": str(installed.install_dir),
"kernel_image": str(installed.kernel_image),
"rootfs_image": str(installed.rootfs_image),
"source": installed.source,
}
def inspect_environment(self, name: str) -> dict[str, object]:
spec = get_environment(name, runtime_paths=self._runtime_paths)
install_dir = self._install_dir(spec)
metadata_path = install_dir / "environment.json"
installed = metadata_path.exists() and (install_dir / "vmlinux").exists()
payload = _serialize_environment(spec)
payload.update(
{
"catalog_version": self.catalog_version,
"installed": installed,
"cache_dir": str(self._cache_dir),
"install_dir": str(install_dir),
}
)
if installed:
payload["install_manifest"] = str(metadata_path)
return payload
def ensure_installed(self, name: str) -> InstalledEnvironment:
spec = get_environment(name, runtime_paths=self._runtime_paths)
self._platform_dir.mkdir(parents=True, exist_ok=True)
install_dir = self._install_dir(spec)
metadata_path = install_dir / "environment.json"
if metadata_path.exists():
kernel_image = install_dir / "vmlinux"
rootfs_image = install_dir / "rootfs.ext4"
if kernel_image.exists() and rootfs_image.exists():
metadata = json.loads(metadata_path.read_text(encoding="utf-8"))
source = str(metadata.get("source", "cache"))
raw_digest = metadata.get("source_digest")
digest = raw_digest if isinstance(raw_digest, str) else None
return InstalledEnvironment(
name=spec.name,
version=spec.version,
install_dir=install_dir,
kernel_image=kernel_image,
rootfs_image=rootfs_image,
source=source,
source_digest=digest,
installed=True,
)
source_dir = self._runtime_paths.artifacts_dir / spec.source_profile
if source_dir.exists():
return self._install_from_local_source(spec, source_dir)
if (
spec.oci_registry is not None
and spec.oci_repository is not None
and spec.oci_reference is not None
):
return self._install_from_oci(spec)
if spec.source_url is not None:
return self._install_from_archive(spec, spec.source_url)
raise RuntimeError(
f"environment {spec.name!r} is not installed and no downloadable source is configured"
)
def prune_environments(self) -> dict[str, object]:
deleted: list[str] = []
if not self._platform_dir.exists():
return {"deleted_environment_dirs": [], "count": 0}
for child in self._platform_dir.iterdir():
if child.name.startswith(".partial-"):
shutil.rmtree(child, ignore_errors=True)
deleted.append(child.name)
continue
if not child.is_dir():
continue
marker = child / "environment.json"
if not marker.exists():
shutil.rmtree(child, ignore_errors=True)
deleted.append(child.name)
continue
metadata = json.loads(marker.read_text(encoding="utf-8"))
raw_name = metadata.get("name")
raw_version = metadata.get("version")
if not isinstance(raw_name, str) or not isinstance(raw_version, str):
shutil.rmtree(child, ignore_errors=True)
deleted.append(child.name)
continue
try:
spec = get_environment(raw_name, runtime_paths=self._runtime_paths)
except ValueError:
shutil.rmtree(child, ignore_errors=True)
deleted.append(child.name)
continue
if spec.version != raw_version:
shutil.rmtree(child, ignore_errors=True)
deleted.append(child.name)
return {"deleted_environment_dirs": sorted(deleted), "count": len(deleted)}
def _install_dir(self, spec: VmEnvironment) -> Path:
normalized = spec.name.replace(":", "_")
return self._platform_dir / f"{normalized}-{spec.version}"
def _install_from_local_source(
self, spec: VmEnvironment, source_dir: Path
) -> InstalledEnvironment:
install_dir = self._install_dir(spec)
temp_dir = Path(tempfile.mkdtemp(prefix=".partial-", dir=self._platform_dir))
try:
self._link_or_copy(source_dir / "vmlinux", temp_dir / "vmlinux")
self._link_or_copy(source_dir / "rootfs.ext4", temp_dir / "rootfs.ext4")
self._write_install_manifest(
temp_dir,
spec=spec,
source="bundled-runtime-source",
source_digest=spec.source_digest,
)
shutil.rmtree(install_dir, ignore_errors=True)
temp_dir.replace(install_dir)
except Exception:
shutil.rmtree(temp_dir, ignore_errors=True)
raise
return InstalledEnvironment(
name=spec.name,
version=spec.version,
install_dir=install_dir,
kernel_image=install_dir / "vmlinux",
rootfs_image=install_dir / "rootfs.ext4",
source="bundled-runtime-source",
source_digest=spec.source_digest,
installed=True,
)
def _install_from_archive(self, spec: VmEnvironment, archive_url: str) -> InstalledEnvironment:
install_dir = self._install_dir(spec)
temp_dir = Path(tempfile.mkdtemp(prefix=".partial-", dir=self._platform_dir))
archive_path = temp_dir / "environment.tgz"
try:
urllib.request.urlretrieve(archive_url, archive_path) # noqa: S310
self._extract_archive(archive_path, temp_dir)
kernel_image = self._locate_artifact(temp_dir, "vmlinux")
rootfs_image = self._locate_artifact(temp_dir, "rootfs.ext4")
if kernel_image.parent != temp_dir:
shutil.move(str(kernel_image), temp_dir / "vmlinux")
if rootfs_image.parent != temp_dir:
shutil.move(str(rootfs_image), temp_dir / "rootfs.ext4")
self._write_install_manifest(
temp_dir,
spec=spec,
source=archive_url,
source_digest=spec.source_digest,
)
archive_path.unlink(missing_ok=True)
shutil.rmtree(install_dir, ignore_errors=True)
temp_dir.replace(install_dir)
except Exception:
shutil.rmtree(temp_dir, ignore_errors=True)
raise
return InstalledEnvironment(
name=spec.name,
version=spec.version,
install_dir=install_dir,
kernel_image=install_dir / "vmlinux",
rootfs_image=install_dir / "rootfs.ext4",
source=archive_url,
source_digest=spec.source_digest,
installed=True,
)
def _install_from_oci(self, spec: VmEnvironment) -> InstalledEnvironment:
install_dir = self._install_dir(spec)
temp_dir = Path(tempfile.mkdtemp(prefix=".partial-", dir=self._platform_dir))
try:
manifest, resolved_digest = self._fetch_oci_manifest(spec)
layers = manifest.get("layers")
if not isinstance(layers, list) or not layers:
raise RuntimeError("OCI manifest did not contain any layers")
for index, layer in enumerate(layers):
if not isinstance(layer, dict):
raise RuntimeError("OCI manifest layer entry is malformed")
raw_digest = layer.get("digest")
if not isinstance(raw_digest, str):
raise RuntimeError("OCI manifest layer is missing a digest")
blob_path = temp_dir / f"layer-{index}.tar"
self._download_oci_blob(spec, raw_digest, blob_path)
self._extract_tar_archive(blob_path, temp_dir)
blob_path.unlink(missing_ok=True)
kernel_image = self._locate_artifact(temp_dir, "vmlinux")
rootfs_image = self._locate_artifact(temp_dir, "rootfs.ext4")
if kernel_image.parent != temp_dir:
shutil.move(str(kernel_image), temp_dir / "vmlinux")
if rootfs_image.parent != temp_dir:
shutil.move(str(rootfs_image), temp_dir / "rootfs.ext4")
source = (
f"oci://{spec.oci_registry}/{spec.oci_repository}:{spec.oci_reference}"
if spec.oci_registry is not None
and spec.oci_repository is not None
and spec.oci_reference is not None
else "oci://unknown"
)
self._write_install_manifest(
temp_dir,
spec=spec,
source=source,
source_digest=resolved_digest or spec.source_digest,
)
shutil.rmtree(install_dir, ignore_errors=True)
temp_dir.replace(install_dir)
except Exception:
shutil.rmtree(temp_dir, ignore_errors=True)
raise
return InstalledEnvironment(
name=spec.name,
version=spec.version,
install_dir=install_dir,
kernel_image=install_dir / "vmlinux",
rootfs_image=install_dir / "rootfs.ext4",
source=source,
source_digest=resolved_digest or spec.source_digest,
installed=True,
)
def _write_install_manifest(
self,
install_dir: Path,
*,
spec: VmEnvironment,
source: str,
source_digest: str | None,
) -> None:
payload = {
"catalog_version": self.catalog_version,
"name": spec.name,
"version": spec.version,
"source": source,
"source_digest": source_digest,
"installed_at": int(time.time()),
}
(install_dir / "environment.json").write_text(
json.dumps(payload, indent=2, sort_keys=True) + "\n",
encoding="utf-8",
)
def _extract_archive(self, archive_path: Path, dest_dir: Path) -> None:
self._extract_tar_archive(archive_path, dest_dir)
def _locate_artifact(self, root: Path, name: str) -> Path:
for candidate in root.rglob(name):
if candidate.is_file():
return candidate
raise RuntimeError(f"environment archive did not contain {name}")
def _link_or_copy(self, source: Path, dest: Path) -> None:
dest.parent.mkdir(parents=True, exist_ok=True)
relative_target = os.path.relpath(source, start=dest.parent)
try:
dest.symlink_to(relative_target)
except OSError:
shutil.copy2(source, dest)
def _fetch_oci_manifest(
self, spec: VmEnvironment
) -> tuple[dict[str, Any], str | None]:
if spec.oci_registry is None or spec.oci_repository is None or spec.oci_reference is None:
raise RuntimeError("OCI source metadata is incomplete")
headers = {"Accept": OCI_MANIFEST_ACCEPT}
payload, response_headers = self._request_bytes(
self._oci_url(
spec.oci_registry,
spec.oci_repository,
f"manifests/{spec.oci_reference}",
),
headers=headers,
repository=spec.oci_repository,
)
manifest = json.loads(payload.decode("utf-8"))
if not isinstance(manifest, dict):
raise RuntimeError("OCI manifest response was not a JSON object")
resolved_digest = response_headers.get("Docker-Content-Digest")
media_type = manifest.get("mediaType")
if media_type in {
"application/vnd.oci.image.index.v1+json",
"application/vnd.docker.distribution.manifest.list.v2+json",
}:
manifests = manifest.get("manifests")
if not isinstance(manifests, list):
raise RuntimeError("OCI index did not contain manifests")
selected = self._select_oci_manifest_descriptor(manifests)
payload, response_headers = self._request_bytes(
self._oci_url(
spec.oci_registry,
spec.oci_repository,
f"manifests/{selected}",
),
headers=headers,
repository=spec.oci_repository,
)
manifest = json.loads(payload.decode("utf-8"))
if not isinstance(manifest, dict):
raise RuntimeError("OCI child manifest response was not a JSON object")
resolved_digest = response_headers.get("Docker-Content-Digest") or selected
return manifest, resolved_digest
def _download_oci_blob(self, spec: VmEnvironment, digest: str, dest: Path) -> None:
if spec.oci_registry is None or spec.oci_repository is None:
raise RuntimeError("OCI source metadata is incomplete")
payload, _ = self._request_bytes(
self._oci_url(
spec.oci_registry,
spec.oci_repository,
f"blobs/{digest}",
),
headers={},
repository=spec.oci_repository,
)
dest.write_bytes(payload)
def _request_bytes(
self,
url: str,
*,
headers: dict[str, str],
repository: str,
) -> tuple[bytes, dict[str, str]]:
request = urllib.request.Request(url, headers=headers, method="GET")
try:
with urllib.request.urlopen(request, timeout=90) as response: # noqa: S310
return response.read(), dict(response.headers.items())
except urllib.error.HTTPError as exc:
if exc.code != 401:
raise RuntimeError(f"failed to fetch OCI resource {url}: {exc}") from exc
authenticate = exc.headers.get("WWW-Authenticate")
if authenticate is None:
raise RuntimeError("OCI registry denied access without an auth challenge") from exc
token = self._fetch_registry_token(authenticate, repository)
authenticated_request = urllib.request.Request(
url,
headers={**headers, "Authorization": f"Bearer {token}"},
method="GET",
)
with urllib.request.urlopen(authenticated_request, timeout=90) as response: # noqa: S310
return response.read(), dict(response.headers.items())
def _fetch_registry_token(self, authenticate: str, repository: str) -> str:
if not authenticate.startswith("Bearer "):
raise RuntimeError("unsupported OCI authentication scheme")
params = self._parse_authenticate_parameters(authenticate[len("Bearer ") :])
realm = params.get("realm")
if realm is None:
raise RuntimeError("OCI auth challenge did not include a token realm")
query = {
"service": params.get("service", ""),
"scope": params.get("scope", f"repository:{repository}:pull"),
}
token_url = f"{realm}?{urllib.parse.urlencode(query)}"
with urllib.request.urlopen(token_url, timeout=90) as response: # noqa: S310
payload = json.loads(response.read().decode("utf-8"))
if not isinstance(payload, dict):
raise RuntimeError("OCI auth token response was not a JSON object")
raw_token = payload.get("token") or payload.get("access_token")
if not isinstance(raw_token, str) or raw_token == "":
raise RuntimeError("OCI auth token response did not include a bearer token")
return raw_token
def _parse_authenticate_parameters(self, raw: str) -> dict[str, str]:
params: dict[str, str] = {}
for segment in raw.split(","):
if "=" not in segment:
continue
key, value = segment.split("=", 1)
params[key.strip()] = value.strip().strip('"')
return params
def _select_oci_manifest_descriptor(self, manifests: list[Any]) -> str:
for manifest in manifests:
if not isinstance(manifest, dict):
continue
platform = manifest.get("platform")
if not isinstance(platform, dict):
continue
os_name = platform.get("os")
architecture = platform.get("architecture")
raw_digest = manifest.get("digest")
if (
isinstance(os_name, str)
and isinstance(architecture, str)
and isinstance(raw_digest, str)
and os_name == "linux"
and architecture in {"amd64", "x86_64"}
):
return raw_digest
raise RuntimeError("OCI index did not contain a linux/amd64 manifest")
def _extract_tar_archive(self, archive_path: Path, dest_dir: Path) -> None:
dest_root = dest_dir.resolve()
with tarfile.open(archive_path, "r:*") as archive:
for member in archive.getmembers():
member_path = (dest_dir / member.name).resolve()
if not member_path.is_relative_to(dest_root):
raise RuntimeError(f"unsafe archive member path: {member.name}")
archive.extractall(dest_dir, filter="data")
def _oci_url(self, registry: str, repository: str, suffix: str) -> str:
return f"https://{registry}/v2/{repository}/{suffix}"

View file

@ -19,10 +19,10 @@ from pyro_mcp.runtime import (
resolve_runtime_paths,
runtime_capabilities,
)
from pyro_mcp.vm_environments import EnvironmentStore, get_environment
from pyro_mcp.vm_firecracker import build_launch_plan
from pyro_mcp.vm_guest import VsockExecClient
from pyro_mcp.vm_network import NetworkConfig, TapNetworkManager
from pyro_mcp.vm_profiles import get_profile, list_profiles, resolve_artifacts
VmState = Literal["created", "started", "stopped"]
@ -32,7 +32,7 @@ class VmInstance:
"""In-memory VM lifecycle record."""
vm_id: str
profile: str
environment: str
vcpu_count: int
mem_mib: int
ttl_seconds: int
@ -85,6 +85,23 @@ def _run_host_command(workdir: Path, command: str, timeout_seconds: int) -> VmEx
)
def _copy_rootfs(source: Path, dest: Path) -> str:
dest.parent.mkdir(parents=True, exist_ok=True)
try:
proc = subprocess.run( # noqa: S603
["cp", "--reflink=auto", str(source), str(dest)],
text=True,
capture_output=True,
check=False,
)
if proc.returncode == 0:
return "reflink_or_copy"
except OSError:
pass
shutil.copy2(source, dest)
return "copy2"
class VmBackend:
"""Backend interface for lifecycle operations."""
@ -132,14 +149,14 @@ class FirecrackerBackend(VmBackend): # pragma: no cover
def __init__(
self,
artifacts_dir: Path,
environment_store: EnvironmentStore,
firecracker_bin: Path,
jailer_bin: Path,
runtime_capabilities: RuntimeCapabilities,
network_manager: TapNetworkManager | None = None,
guest_exec_client: VsockExecClient | None = None,
) -> None:
self._artifacts_dir = artifacts_dir
self._environment_store = environment_store
self._firecracker_bin = firecracker_bin
self._jailer_bin = jailer_bin
self._runtime_capabilities = runtime_capabilities
@ -156,15 +173,26 @@ class FirecrackerBackend(VmBackend): # pragma: no cover
def create(self, instance: VmInstance) -> None:
instance.workdir.mkdir(parents=True, exist_ok=False)
try:
artifacts = resolve_artifacts(self._artifacts_dir, instance.profile)
if not artifacts.kernel_image.exists() or not artifacts.rootfs_image.exists():
installed_environment = self._environment_store.ensure_installed(instance.environment)
if (
not installed_environment.kernel_image.exists()
or not installed_environment.rootfs_image.exists()
):
raise RuntimeError(
f"missing profile artifacts for {instance.profile}; expected "
f"{artifacts.kernel_image} and {artifacts.rootfs_image}"
f"missing environment artifacts for {instance.environment}; expected "
f"{installed_environment.kernel_image} and {installed_environment.rootfs_image}"
)
instance.metadata["kernel_image"] = str(artifacts.kernel_image)
instance.metadata["environment_version"] = installed_environment.version
instance.metadata["environment_source"] = installed_environment.source
if installed_environment.source_digest is not None:
instance.metadata["environment_digest"] = installed_environment.source_digest
instance.metadata["environment_install_dir"] = str(installed_environment.install_dir)
instance.metadata["kernel_image"] = str(installed_environment.kernel_image)
rootfs_copy = instance.workdir / "rootfs.ext4"
shutil.copy2(artifacts.rootfs_image, rootfs_copy)
instance.metadata["rootfs_clone_mode"] = _copy_rootfs(
installed_environment.rootfs_image,
rootfs_copy,
)
instance.metadata["rootfs_image"] = str(rootfs_copy)
if instance.network_requested:
network = self._network_manager.allocate(instance.vm_id)
@ -320,28 +348,35 @@ class VmManager:
*,
backend_name: str | None = None,
base_dir: Path | None = None,
artifacts_dir: Path | None = None,
cache_dir: Path | None = None,
max_active_vms: int = 4,
runtime_paths: RuntimePaths | None = None,
network_manager: TapNetworkManager | None = None,
) -> None:
self._backend_name = backend_name or "firecracker"
self._base_dir = base_dir or Path("/tmp/pyro-mcp")
resolved_cache_dir = cache_dir or self._base_dir / ".environment-cache"
self._runtime_paths = runtime_paths
if self._backend_name == "firecracker":
self._runtime_paths = self._runtime_paths or resolve_runtime_paths()
self._artifacts_dir = artifacts_dir or self._runtime_paths.artifacts_dir
self._runtime_capabilities = runtime_capabilities(self._runtime_paths)
else:
self._artifacts_dir = artifacts_dir or Path(
os.environ.get("PYRO_VM_ARTIFACTS_DIR", "/opt/pyro-mcp/artifacts")
self._environment_store = EnvironmentStore(
runtime_paths=self._runtime_paths,
cache_dir=resolved_cache_dir,
)
else:
self._runtime_capabilities = RuntimeCapabilities(
supports_vm_boot=False,
supports_guest_exec=False,
supports_guest_network=False,
reason="mock backend does not boot a guest",
)
if self._runtime_paths is None:
self._runtime_paths = resolve_runtime_paths(verify_checksums=False)
self._environment_store = EnvironmentStore(
runtime_paths=self._runtime_paths,
cache_dir=resolved_cache_dir,
)
self._max_active_vms = max_active_vms
if network_manager is not None:
self._network_manager = network_manager
@ -361,7 +396,7 @@ class VmManager:
if self._runtime_paths is None:
raise RuntimeError("runtime paths were not initialized for firecracker backend")
return FirecrackerBackend(
self._artifacts_dir,
self._environment_store,
firecracker_bin=self._runtime_paths.firecracker_bin,
jailer_bin=self._runtime_paths.jailer_bin,
runtime_capabilities=self._runtime_capabilities,
@ -369,20 +404,29 @@ class VmManager:
)
raise ValueError("invalid backend; expected one of: mock, firecracker")
def list_profiles(self) -> list[dict[str, object]]:
return list_profiles()
def list_environments(self) -> list[dict[str, object]]:
return self._environment_store.list_environments()
def pull_environment(self, environment: str) -> dict[str, object]:
return self._environment_store.pull_environment(environment)
def inspect_environment(self, environment: str) -> dict[str, object]:
return self._environment_store.inspect_environment(environment)
def prune_environments(self) -> dict[str, object]:
return self._environment_store.prune_environments()
def create_vm(
self,
*,
profile: str,
environment: str,
vcpu_count: int,
mem_mib: int,
ttl_seconds: int,
network: bool = False,
) -> dict[str, Any]:
self._validate_limits(vcpu_count=vcpu_count, mem_mib=mem_mib, ttl_seconds=ttl_seconds)
get_profile(profile)
get_environment(environment, runtime_paths=self._runtime_paths)
now = time.time()
with self._lock:
self._reap_expired_locked(now)
@ -394,7 +438,7 @@ class VmManager:
vm_id = uuid.uuid4().hex[:12]
instance = VmInstance(
vm_id=vm_id,
profile=profile,
environment=environment,
vcpu_count=vcpu_count,
mem_mib=mem_mib,
ttl_seconds=ttl_seconds,
@ -410,7 +454,7 @@ class VmManager:
def run_vm(
self,
*,
profile: str,
environment: str,
command: str,
vcpu_count: int,
mem_mib: int,
@ -419,7 +463,7 @@ class VmManager:
network: bool = False,
) -> dict[str, Any]:
created = self.create_vm(
profile=profile,
environment=environment,
vcpu_count=vcpu_count,
mem_mib=mem_mib,
ttl_seconds=ttl_seconds,
@ -459,6 +503,8 @@ class VmManager:
cleanup = self.delete_vm(vm_id, reason="post_exec_cleanup")
return {
"vm_id": vm_id,
"environment": instance.environment,
"environment_version": instance.metadata.get("environment_version"),
"command": command,
"stdout": exec_result.stdout,
"stderr": exec_result.stderr,
@ -532,7 +578,8 @@ class VmManager:
def _serialize(self, instance: VmInstance) -> dict[str, Any]:
return {
"vm_id": instance.vm_id,
"profile": instance.profile,
"environment": instance.environment,
"environment_version": instance.metadata.get("environment_version"),
"vcpu_count": instance.vcpu_count,
"mem_mib": instance.mem_mib,
"ttl_seconds": instance.ttl_seconds,

View file

@ -1,72 +0,0 @@
"""Standard VM environment profiles for ephemeral coding environments."""
from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
@dataclass(frozen=True)
class VmProfile:
"""Profile metadata describing guest OS/tooling flavor."""
name: str
description: str
default_packages: tuple[str, ...]
@dataclass(frozen=True)
class VmArtifacts:
"""Resolved artifact paths for a profile."""
kernel_image: Path
rootfs_image: Path
PROFILE_CATALOG: dict[str, VmProfile] = {
"debian-base": VmProfile(
name="debian-base",
description="Minimal Debian userspace for shell and core Unix tooling.",
default_packages=("bash", "coreutils"),
),
"debian-git": VmProfile(
name="debian-git",
description="Debian base environment with Git preinstalled.",
default_packages=("bash", "coreutils", "git"),
),
"debian-build": VmProfile(
name="debian-build",
description="Debian Git environment with common build tools for source builds.",
default_packages=("bash", "coreutils", "git", "gcc", "make", "cmake", "python3"),
),
}
def list_profiles() -> list[dict[str, object]]:
"""Return profile metadata in a JSON-safe format."""
return [
{
"name": profile.name,
"description": profile.description,
"default_packages": list(profile.default_packages),
}
for profile in PROFILE_CATALOG.values()
]
def get_profile(name: str) -> VmProfile:
"""Resolve a profile by name."""
try:
return PROFILE_CATALOG[name]
except KeyError as exc:
known = ", ".join(sorted(PROFILE_CATALOG))
raise ValueError(f"unknown profile {name!r}; expected one of: {known}") from exc
def resolve_artifacts(artifacts_dir: Path, profile_name: str) -> VmArtifacts:
"""Resolve kernel/rootfs file locations for a profile."""
profile_dir = artifacts_dir / profile_name
return VmArtifacts(
kernel_image=profile_dir / "vmlinux",
rootfs_image=profile_dir / "rootfs.ext4",
)