Pivot persistent APIs to workspaces

Replace the public persistent-sandbox contract with workspace-first naming across CLI, SDK, MCP, payloads, and on-disk state.

Rename the task surface to workspace equivalents, switch create-time seeding to `seed_path`, and store records under `workspaces/<workspace_id>/workspace.json` without carrying legacy task aliases or migrating old local task state.

Keep `pyro run` and `vm_*` unchanged. Validation covered `uv lock`, focused public-contract/API/CLI/manager tests, `UV_CACHE_DIR=.uv-cache make check`, and `UV_CACHE_DIR=.uv-cache make dist-check`.
This commit is contained in:
Thales Maciel 2026-03-12 01:21:49 -03:00
parent f57454bcb4
commit 48b82d8386
13 changed files with 743 additions and 618 deletions

View file

@ -2,6 +2,15 @@
All notable user-visible changes to `pyro-mcp` are documented here. All notable user-visible changes to `pyro-mcp` are documented here.
## 2.4.0
- Replaced the public persistent-workspace surface from `task_*` to `workspace_*` across the CLI,
Python SDK, and MCP server in one clean cut with no compatibility aliases.
- Renamed create-time seeding from `source_path` to `seed_path` for workspace creation while keeping
later `workspace sync push` imports on `source_path`.
- Switched persisted local records from `tasks/*/task.json` to `workspaces/*/workspace.json` and
updated the main docs/examples to the workspace-first language.
## 2.3.0 ## 2.3.0
- Added `task sync push` across the CLI, Python SDK, and MCP server so started task workspaces can - Added `task sync push` across the CLI, Python SDK, and MCP server so started task workspaces can

View file

@ -1,7 +1,7 @@
[project] [project]
name = "pyro-mcp" name = "pyro-mcp"
version = "2.3.0" version = "2.4.0"
description = "Curated Linux environments for ephemeral Firecracker-backed VM execution." description = "Ephemeral Firecracker sandboxes with curated environments, persistent workspaces, and MCP tools."
readme = "README.md" readme = "README.md"
license = { file = "LICENSE" } license = { file = "LICENSE" }
authors = [ authors = [

View file

@ -77,7 +77,7 @@ class Pyro:
def exec_vm(self, vm_id: str, *, command: str, timeout_seconds: int = 30) -> dict[str, Any]: def exec_vm(self, vm_id: str, *, command: str, timeout_seconds: int = 30) -> dict[str, Any]:
return self._manager.exec_vm(vm_id, command=command, timeout_seconds=timeout_seconds) return self._manager.exec_vm(vm_id, command=command, timeout_seconds=timeout_seconds)
def create_task( def create_workspace(
self, self,
*, *,
environment: str, environment: str,
@ -86,44 +86,52 @@ class Pyro:
ttl_seconds: int = DEFAULT_TTL_SECONDS, ttl_seconds: int = DEFAULT_TTL_SECONDS,
network: bool = False, network: bool = False,
allow_host_compat: bool = DEFAULT_ALLOW_HOST_COMPAT, allow_host_compat: bool = DEFAULT_ALLOW_HOST_COMPAT,
source_path: str | Path | None = None, seed_path: str | Path | None = None,
) -> dict[str, Any]: ) -> dict[str, Any]:
return self._manager.create_task( return self._manager.create_workspace(
environment=environment, environment=environment,
vcpu_count=vcpu_count, vcpu_count=vcpu_count,
mem_mib=mem_mib, mem_mib=mem_mib,
ttl_seconds=ttl_seconds, ttl_seconds=ttl_seconds,
network=network, network=network,
allow_host_compat=allow_host_compat, allow_host_compat=allow_host_compat,
source_path=source_path, seed_path=seed_path,
) )
def exec_task( def exec_workspace(
self, self,
task_id: str, workspace_id: str,
*, *,
command: str, command: str,
timeout_seconds: int = DEFAULT_TIMEOUT_SECONDS, timeout_seconds: int = DEFAULT_TIMEOUT_SECONDS,
) -> dict[str, Any]: ) -> dict[str, Any]:
return self._manager.exec_task(task_id, command=command, timeout_seconds=timeout_seconds) return self._manager.exec_workspace(
workspace_id,
command=command,
timeout_seconds=timeout_seconds,
)
def status_task(self, task_id: str) -> dict[str, Any]: def status_workspace(self, workspace_id: str) -> dict[str, Any]:
return self._manager.status_task(task_id) return self._manager.status_workspace(workspace_id)
def push_task_sync( def push_workspace_sync(
self, self,
task_id: str, workspace_id: str,
source_path: str | Path, source_path: str | Path,
*, *,
dest: str = "/workspace", dest: str = "/workspace",
) -> dict[str, Any]: ) -> dict[str, Any]:
return self._manager.push_task_sync(task_id, source_path=source_path, dest=dest) return self._manager.push_workspace_sync(
workspace_id,
source_path=source_path,
dest=dest,
)
def logs_task(self, task_id: str) -> dict[str, Any]: def logs_workspace(self, workspace_id: str) -> dict[str, Any]:
return self._manager.logs_task(task_id) return self._manager.logs_workspace(workspace_id)
def delete_task(self, task_id: str) -> dict[str, Any]: def delete_workspace(self, workspace_id: str) -> dict[str, Any]:
return self._manager.delete_task(task_id) return self._manager.delete_workspace(workspace_id)
def stop_vm(self, vm_id: str) -> dict[str, Any]: def stop_vm(self, vm_id: str) -> dict[str, Any]:
return self._manager.stop_vm(vm_id) return self._manager.stop_vm(vm_id)
@ -249,57 +257,61 @@ class Pyro:
return self.reap_expired() return self.reap_expired()
@server.tool() @server.tool()
async def task_create( async def workspace_create(
environment: str, environment: str,
vcpu_count: int = DEFAULT_VCPU_COUNT, vcpu_count: int = DEFAULT_VCPU_COUNT,
mem_mib: int = DEFAULT_MEM_MIB, mem_mib: int = DEFAULT_MEM_MIB,
ttl_seconds: int = DEFAULT_TTL_SECONDS, ttl_seconds: int = DEFAULT_TTL_SECONDS,
network: bool = False, network: bool = False,
allow_host_compat: bool = DEFAULT_ALLOW_HOST_COMPAT, allow_host_compat: bool = DEFAULT_ALLOW_HOST_COMPAT,
source_path: str | None = None, seed_path: str | None = None,
) -> dict[str, Any]: ) -> dict[str, Any]:
"""Create and start a persistent task workspace.""" """Create and start a persistent workspace."""
return self.create_task( return self.create_workspace(
environment=environment, environment=environment,
vcpu_count=vcpu_count, vcpu_count=vcpu_count,
mem_mib=mem_mib, mem_mib=mem_mib,
ttl_seconds=ttl_seconds, ttl_seconds=ttl_seconds,
network=network, network=network,
allow_host_compat=allow_host_compat, allow_host_compat=allow_host_compat,
source_path=source_path, seed_path=seed_path,
) )
@server.tool() @server.tool()
async def task_exec( async def workspace_exec(
task_id: str, workspace_id: str,
command: str, command: str,
timeout_seconds: int = DEFAULT_TIMEOUT_SECONDS, timeout_seconds: int = DEFAULT_TIMEOUT_SECONDS,
) -> dict[str, Any]: ) -> dict[str, Any]:
"""Run one command inside an existing task workspace.""" """Run one command inside an existing persistent workspace."""
return self.exec_task(task_id, command=command, timeout_seconds=timeout_seconds) return self.exec_workspace(
workspace_id,
command=command,
timeout_seconds=timeout_seconds,
)
@server.tool() @server.tool()
async def task_sync_push( async def workspace_sync_push(
task_id: str, workspace_id: str,
source_path: str, source_path: str,
dest: str = "/workspace", dest: str = "/workspace",
) -> dict[str, Any]: ) -> dict[str, Any]:
"""Push host content into the persistent workspace of a started task.""" """Push host content into the persistent `/workspace` of a started workspace."""
return self.push_task_sync(task_id, source_path=source_path, dest=dest) return self.push_workspace_sync(workspace_id, source_path=source_path, dest=dest)
@server.tool() @server.tool()
async def task_status(task_id: str) -> dict[str, Any]: async def workspace_status(workspace_id: str) -> dict[str, Any]:
"""Inspect task state and latest command metadata.""" """Inspect workspace state and latest command metadata."""
return self.status_task(task_id) return self.status_workspace(workspace_id)
@server.tool() @server.tool()
async def task_logs(task_id: str) -> dict[str, Any]: async def workspace_logs(workspace_id: str) -> dict[str, Any]:
"""Return persisted command history for one task.""" """Return persisted command history for one workspace."""
return self.logs_task(task_id) return self.logs_workspace(workspace_id)
@server.tool() @server.tool()
async def task_delete(task_id: str) -> dict[str, Any]: async def workspace_delete(workspace_id: str) -> dict[str, Any]:
"""Delete a task workspace and its backing sandbox.""" """Delete a persistent workspace and its backing sandbox."""
return self.delete_task(task_id) return self.delete_workspace(workspace_id)
return server return server

View file

@ -18,7 +18,7 @@ from pyro_mcp.vm_environments import DEFAULT_CATALOG_VERSION
from pyro_mcp.vm_manager import ( from pyro_mcp.vm_manager import (
DEFAULT_MEM_MIB, DEFAULT_MEM_MIB,
DEFAULT_VCPU_COUNT, DEFAULT_VCPU_COUNT,
TASK_WORKSPACE_GUEST_PATH, WORKSPACE_GUEST_PATH,
) )
@ -151,17 +151,17 @@ def _print_doctor_human(payload: dict[str, Any]) -> None:
print(f"- {issue}") print(f"- {issue}")
def _print_task_summary_human(payload: dict[str, Any], *, action: str) -> None: def _print_workspace_summary_human(payload: dict[str, Any], *, action: str) -> None:
print(f"{action}: {str(payload.get('task_id', 'unknown'))}") print(f"{action} ID: {str(payload.get('workspace_id', 'unknown'))}")
print(f"Environment: {str(payload.get('environment', 'unknown'))}") print(f"Environment: {str(payload.get('environment', 'unknown'))}")
print(f"State: {str(payload.get('state', 'unknown'))}") print(f"State: {str(payload.get('state', 'unknown'))}")
print(f"Workspace: {str(payload.get('workspace_path', '/workspace'))}") print(f"Workspace: {str(payload.get('workspace_path', '/workspace'))}")
workspace_seed = payload.get("workspace_seed") workspace_seed = payload.get("workspace_seed")
if isinstance(workspace_seed, dict): if isinstance(workspace_seed, dict):
mode = str(workspace_seed.get("mode", "empty")) mode = str(workspace_seed.get("mode", "empty"))
source_path = workspace_seed.get("source_path") seed_path = workspace_seed.get("seed_path")
if isinstance(source_path, str) and source_path != "": if isinstance(seed_path, str) and seed_path != "":
print(f"Workspace seed: {mode} from {source_path}") print(f"Workspace seed: {mode} from {seed_path}")
else: else:
print(f"Workspace seed: {mode}") print(f"Workspace seed: {mode}")
print(f"Execution mode: {str(payload.get('execution_mode', 'pending'))}") print(f"Execution mode: {str(payload.get('execution_mode', 'pending'))}")
@ -179,16 +179,16 @@ def _print_task_summary_human(payload: dict[str, Any], *, action: str) -> None:
) )
def _print_task_exec_human(payload: dict[str, Any]) -> None: def _print_workspace_exec_human(payload: dict[str, Any]) -> None:
stdout = str(payload.get("stdout", "")) stdout = str(payload.get("stdout", ""))
stderr = str(payload.get("stderr", "")) stderr = str(payload.get("stderr", ""))
_write_stream(stdout, stream=sys.stdout) _write_stream(stdout, stream=sys.stdout)
_write_stream(stderr, stream=sys.stderr) _write_stream(stderr, stream=sys.stderr)
print( print(
"[task-exec] " "[workspace-exec] "
f"task_id={str(payload.get('task_id', 'unknown'))} " f"workspace_id={str(payload.get('workspace_id', 'unknown'))} "
f"sequence={int(payload.get('sequence', 0))} " f"sequence={int(payload.get('sequence', 0))} "
f"cwd={str(payload.get('cwd', TASK_WORKSPACE_GUEST_PATH))} " f"cwd={str(payload.get('cwd', WORKSPACE_GUEST_PATH))} "
f"execution_mode={str(payload.get('execution_mode', 'unknown'))} " f"execution_mode={str(payload.get('execution_mode', 'unknown'))} "
f"exit_code={int(payload.get('exit_code', 1))} " f"exit_code={int(payload.get('exit_code', 1))} "
f"duration_ms={int(payload.get('duration_ms', 0))}", f"duration_ms={int(payload.get('duration_ms', 0))}",
@ -197,27 +197,27 @@ def _print_task_exec_human(payload: dict[str, Any]) -> None:
) )
def _print_task_sync_human(payload: dict[str, Any]) -> None: def _print_workspace_sync_human(payload: dict[str, Any]) -> None:
workspace_sync = payload.get("workspace_sync") workspace_sync = payload.get("workspace_sync")
if not isinstance(workspace_sync, dict): if not isinstance(workspace_sync, dict):
print(f"Synced task: {str(payload.get('task_id', 'unknown'))}") print(f"Synced workspace: {str(payload.get('workspace_id', 'unknown'))}")
return return
print( print(
"[task-sync] " "[workspace-sync] "
f"task_id={str(payload.get('task_id', 'unknown'))} " f"workspace_id={str(payload.get('workspace_id', 'unknown'))} "
f"mode={str(workspace_sync.get('mode', 'unknown'))} " f"mode={str(workspace_sync.get('mode', 'unknown'))} "
f"source={str(workspace_sync.get('source_path', 'unknown'))} " f"source={str(workspace_sync.get('source_path', 'unknown'))} "
f"destination={str(workspace_sync.get('destination', TASK_WORKSPACE_GUEST_PATH))} " f"destination={str(workspace_sync.get('destination', WORKSPACE_GUEST_PATH))} "
f"entry_count={int(workspace_sync.get('entry_count', 0))} " f"entry_count={int(workspace_sync.get('entry_count', 0))} "
f"bytes_written={int(workspace_sync.get('bytes_written', 0))} " f"bytes_written={int(workspace_sync.get('bytes_written', 0))} "
f"execution_mode={str(payload.get('execution_mode', 'unknown'))}" f"execution_mode={str(payload.get('execution_mode', 'unknown'))}"
) )
def _print_task_logs_human(payload: dict[str, Any]) -> None: def _print_workspace_logs_human(payload: dict[str, Any]) -> None:
entries = payload.get("entries") entries = payload.get("entries")
if not isinstance(entries, list) or not entries: if not isinstance(entries, list) or not entries:
print("No task logs found.") print("No workspace logs found.")
return return
for entry in entries: for entry in entries:
if not isinstance(entry, dict): if not isinstance(entry, dict):
@ -226,7 +226,7 @@ def _print_task_logs_human(payload: dict[str, Any]) -> None:
f"#{int(entry.get('sequence', 0))} " f"#{int(entry.get('sequence', 0))} "
f"exit_code={int(entry.get('exit_code', -1))} " f"exit_code={int(entry.get('exit_code', -1))} "
f"duration_ms={int(entry.get('duration_ms', 0))} " f"duration_ms={int(entry.get('duration_ms', 0))} "
f"cwd={str(entry.get('cwd', TASK_WORKSPACE_GUEST_PATH))}" f"cwd={str(entry.get('cwd', WORKSPACE_GUEST_PATH))}"
) )
print(f"$ {str(entry.get('command', ''))}") print(f"$ {str(entry.get('command', ''))}")
stdout = str(entry.get("stdout", "")) stdout = str(entry.get("stdout", ""))
@ -267,8 +267,8 @@ def _build_parser() -> argparse.ArgumentParser:
pyro run debian:12 -- git --version pyro run debian:12 -- git --version
Need repeated commands in one workspace after that? Need repeated commands in one workspace after that?
pyro task create debian:12 --source-path ./repo pyro workspace create debian:12 --seed-path ./repo
pyro task sync push TASK_ID ./changes pyro workspace sync push WORKSPACE_ID ./changes
Use `pyro mcp serve` only after the CLI validation path works. Use `pyro mcp serve` only after the CLI validation path works.
""" """
@ -463,9 +463,9 @@ def _build_parser() -> argparse.ArgumentParser:
), ),
) )
task_parser = subparsers.add_parser( workspace_parser = subparsers.add_parser(
"task", "workspace",
help="Manage persistent task workspaces.", help="Manage persistent workspaces.",
description=( description=(
"Create a persistent workspace when you need repeated commands in one " "Create a persistent workspace when you need repeated commands in one "
"sandbox instead of one-shot `pyro run`." "sandbox instead of one-shot `pyro run`."
@ -473,58 +473,62 @@ def _build_parser() -> argparse.ArgumentParser:
epilog=dedent( epilog=dedent(
""" """
Examples: Examples:
pyro task create debian:12 --source-path ./repo pyro workspace create debian:12 --seed-path ./repo
pyro task sync push TASK_ID ./repo --dest src pyro workspace sync push WORKSPACE_ID ./repo --dest src
pyro task exec TASK_ID -- sh -lc 'printf "hello\\n" > note.txt' pyro workspace exec WORKSPACE_ID -- sh -lc 'printf "hello\\n" > note.txt'
pyro task logs TASK_ID pyro workspace logs WORKSPACE_ID
""" """
), ),
formatter_class=_HelpFormatter, formatter_class=_HelpFormatter,
) )
task_subparsers = task_parser.add_subparsers(dest="task_command", required=True, metavar="TASK") workspace_subparsers = workspace_parser.add_subparsers(
task_create_parser = task_subparsers.add_parser( dest="workspace_command",
required=True,
metavar="WORKSPACE",
)
workspace_create_parser = workspace_subparsers.add_parser(
"create", "create",
help="Create and start a persistent task workspace.", help="Create and start a persistent workspace.",
description="Create a task workspace that stays alive across repeated exec calls.", description="Create a persistent workspace that stays alive across repeated exec calls.",
epilog=dedent( epilog=dedent(
""" """
Examples: Examples:
pyro task create debian:12 pyro workspace create debian:12
pyro task create debian:12 --source-path ./repo pyro workspace create debian:12 --seed-path ./repo
pyro task sync push TASK_ID ./changes pyro workspace sync push WORKSPACE_ID ./changes
""" """
), ),
formatter_class=_HelpFormatter, formatter_class=_HelpFormatter,
) )
task_create_parser.add_argument( workspace_create_parser.add_argument(
"environment", "environment",
metavar="ENVIRONMENT", metavar="ENVIRONMENT",
help="Curated environment to boot, for example `debian:12`.", help="Curated environment to boot, for example `debian:12`.",
) )
task_create_parser.add_argument( workspace_create_parser.add_argument(
"--vcpu-count", "--vcpu-count",
type=int, type=int,
default=DEFAULT_VCPU_COUNT, default=DEFAULT_VCPU_COUNT,
help="Number of virtual CPUs to allocate to the task guest.", help="Number of virtual CPUs to allocate to the guest.",
) )
task_create_parser.add_argument( workspace_create_parser.add_argument(
"--mem-mib", "--mem-mib",
type=int, type=int,
default=DEFAULT_MEM_MIB, default=DEFAULT_MEM_MIB,
help="Guest memory allocation in MiB.", help="Guest memory allocation in MiB.",
) )
task_create_parser.add_argument( workspace_create_parser.add_argument(
"--ttl-seconds", "--ttl-seconds",
type=int, type=int,
default=600, default=600,
help="Time-to-live for the task before automatic cleanup.", help="Time-to-live for the workspace before automatic cleanup.",
) )
task_create_parser.add_argument( workspace_create_parser.add_argument(
"--network", "--network",
action="store_true", action="store_true",
help="Enable outbound guest networking for the task guest.", help="Enable outbound guest networking for the workspace guest.",
) )
task_create_parser.add_argument( workspace_create_parser.add_argument(
"--allow-host-compat", "--allow-host-compat",
action="store_true", action="store_true",
help=( help=(
@ -532,143 +536,153 @@ def _build_parser() -> argparse.ArgumentParser:
"is unavailable." "is unavailable."
), ),
) )
task_create_parser.add_argument( workspace_create_parser.add_argument(
"--source-path", "--seed-path",
help=( help=(
"Optional host directory or .tar/.tar.gz/.tgz archive to seed into `/workspace` " "Optional host directory or .tar/.tar.gz/.tgz archive to seed into `/workspace` "
"before the task is returned." "before the workspace is returned."
), ),
) )
task_create_parser.add_argument( workspace_create_parser.add_argument(
"--json", "--json",
action="store_true", action="store_true",
help="Print structured JSON instead of human-readable output.", help="Print structured JSON instead of human-readable output.",
) )
task_exec_parser = task_subparsers.add_parser( workspace_exec_parser = workspace_subparsers.add_parser(
"exec", "exec",
help="Run one command inside an existing task workspace.", help="Run one command inside an existing workspace.",
description="Run one non-interactive command in the persistent `/workspace` for a task.", description=(
epilog="Example:\n pyro task exec TASK_ID -- cat note.txt", "Run one non-interactive command in the persistent `/workspace` "
"for a workspace."
),
epilog="Example:\n pyro workspace exec WORKSPACE_ID -- cat note.txt",
formatter_class=_HelpFormatter, formatter_class=_HelpFormatter,
) )
task_exec_parser.add_argument("task_id", metavar="TASK_ID", help="Persistent task identifier.") workspace_exec_parser.add_argument(
task_exec_parser.add_argument( "workspace_id",
metavar="WORKSPACE_ID",
help="Persistent workspace identifier.",
)
workspace_exec_parser.add_argument(
"--timeout-seconds", "--timeout-seconds",
type=int, type=int,
default=30, default=30,
help="Maximum time allowed for the task command.", help="Maximum time allowed for the workspace command.",
) )
task_exec_parser.add_argument( workspace_exec_parser.add_argument(
"--json", "--json",
action="store_true", action="store_true",
help="Print structured JSON instead of human-readable output.", help="Print structured JSON instead of human-readable output.",
) )
task_exec_parser.add_argument( workspace_exec_parser.add_argument(
"command_args", "command_args",
nargs="*", nargs="*",
metavar="ARG", metavar="ARG",
help=( help=(
"Command and arguments to run inside the task workspace. Prefix them with `--`, " "Command and arguments to run inside the workspace. Prefix them with `--`, "
"for example `pyro task exec TASK_ID -- cat note.txt`." "for example `pyro workspace exec WORKSPACE_ID -- cat note.txt`."
), ),
) )
task_sync_parser = task_subparsers.add_parser( workspace_sync_parser = workspace_subparsers.add_parser(
"sync", "sync",
help="Push host content into a started task workspace.", help="Push host content into a started workspace.",
description=( description=(
"Push host directory or archive content into `/workspace` for an existing " "Push host directory or archive content into `/workspace` for an existing "
"started task." "started workspace."
), ),
epilog=dedent( epilog=dedent(
""" """
Examples: Examples:
pyro task sync push TASK_ID ./repo pyro workspace sync push WORKSPACE_ID ./repo
pyro task sync push TASK_ID ./patches --dest src pyro workspace sync push WORKSPACE_ID ./patches --dest src
Sync is non-atomic. If a sync fails partway through, delete and recreate the task. Sync is non-atomic. If a sync fails partway through, delete and recreate the workspace.
""" """
), ),
formatter_class=_HelpFormatter, formatter_class=_HelpFormatter,
) )
task_sync_subparsers = task_sync_parser.add_subparsers( workspace_sync_subparsers = workspace_sync_parser.add_subparsers(
dest="task_sync_command", dest="workspace_sync_command",
required=True, required=True,
metavar="SYNC", metavar="SYNC",
) )
task_sync_push_parser = task_sync_subparsers.add_parser( workspace_sync_push_parser = workspace_sync_subparsers.add_parser(
"push", "push",
help="Push one host directory or archive into a started task.", help="Push one host directory or archive into a started workspace.",
description="Import host content into `/workspace` or a subdirectory of it.", description="Import host content into `/workspace` or a subdirectory of it.",
epilog="Example:\n pyro task sync push TASK_ID ./repo --dest src", epilog="Example:\n pyro workspace sync push WORKSPACE_ID ./repo --dest src",
formatter_class=_HelpFormatter, formatter_class=_HelpFormatter,
) )
task_sync_push_parser.add_argument( workspace_sync_push_parser.add_argument(
"task_id", "workspace_id",
metavar="TASK_ID", metavar="WORKSPACE_ID",
help="Persistent task identifier.", help="Persistent workspace identifier.",
) )
task_sync_push_parser.add_argument( workspace_sync_push_parser.add_argument(
"source_path", "source_path",
metavar="SOURCE_PATH", metavar="SOURCE_PATH",
help="Host directory or .tar/.tar.gz/.tgz archive to push into the task workspace.", help="Host directory or .tar/.tar.gz/.tgz archive to push into the workspace.",
) )
task_sync_push_parser.add_argument( workspace_sync_push_parser.add_argument(
"--dest", "--dest",
default=TASK_WORKSPACE_GUEST_PATH, default=WORKSPACE_GUEST_PATH,
help="Workspace destination path. Relative values resolve inside `/workspace`.", help="Workspace destination path. Relative values resolve inside `/workspace`.",
) )
task_sync_push_parser.add_argument( workspace_sync_push_parser.add_argument(
"--json", "--json",
action="store_true", action="store_true",
help="Print structured JSON instead of human-readable output.", help="Print structured JSON instead of human-readable output.",
) )
task_status_parser = task_subparsers.add_parser( workspace_status_parser = workspace_subparsers.add_parser(
"status", "status",
help="Inspect one task workspace.", help="Inspect one workspace.",
description="Show task state, sizing, workspace path, and latest command metadata.", description="Show workspace state, sizing, workspace path, and latest command metadata.",
epilog="Example:\n pyro task status TASK_ID", epilog="Example:\n pyro workspace status WORKSPACE_ID",
formatter_class=_HelpFormatter, formatter_class=_HelpFormatter,
) )
task_status_parser.add_argument( workspace_status_parser.add_argument(
"task_id", "workspace_id",
metavar="TASK_ID", metavar="WORKSPACE_ID",
help="Persistent task identifier.", help="Persistent workspace identifier.",
) )
task_status_parser.add_argument( workspace_status_parser.add_argument(
"--json", "--json",
action="store_true", action="store_true",
help="Print structured JSON instead of human-readable output.", help="Print structured JSON instead of human-readable output.",
) )
task_logs_parser = task_subparsers.add_parser( workspace_logs_parser = workspace_subparsers.add_parser(
"logs", "logs",
help="Show command history for one task.", help="Show command history for one workspace.",
description="Show persisted command history, including stdout and stderr, for one task.", description=(
epilog="Example:\n pyro task logs TASK_ID", "Show persisted command history, including stdout and stderr, "
"for one workspace."
),
epilog="Example:\n pyro workspace logs WORKSPACE_ID",
formatter_class=_HelpFormatter, formatter_class=_HelpFormatter,
) )
task_logs_parser.add_argument( workspace_logs_parser.add_argument(
"task_id", "workspace_id",
metavar="TASK_ID", metavar="WORKSPACE_ID",
help="Persistent task identifier.", help="Persistent workspace identifier.",
) )
task_logs_parser.add_argument( workspace_logs_parser.add_argument(
"--json", "--json",
action="store_true", action="store_true",
help="Print structured JSON instead of human-readable output.", help="Print structured JSON instead of human-readable output.",
) )
task_delete_parser = task_subparsers.add_parser( workspace_delete_parser = workspace_subparsers.add_parser(
"delete", "delete",
help="Delete one task workspace.", help="Delete one workspace.",
description="Stop the backing sandbox if needed and remove the task workspace.", description="Stop the backing sandbox if needed and remove the workspace.",
epilog="Example:\n pyro task delete TASK_ID", epilog="Example:\n pyro workspace delete WORKSPACE_ID",
formatter_class=_HelpFormatter, formatter_class=_HelpFormatter,
) )
task_delete_parser.add_argument( workspace_delete_parser.add_argument(
"task_id", "workspace_id",
metavar="TASK_ID", metavar="WORKSPACE_ID",
help="Persistent task identifier.", help="Persistent workspace identifier.",
) )
task_delete_parser.add_argument( workspace_delete_parser.add_argument(
"--json", "--json",
action="store_true", action="store_true",
help="Print structured JSON instead of human-readable output.", help="Print structured JSON instead of human-readable output.",
@ -847,28 +861,28 @@ def main() -> None:
if exit_code != 0: if exit_code != 0:
raise SystemExit(exit_code) raise SystemExit(exit_code)
return return
if args.command == "task": if args.command == "workspace":
if args.task_command == "create": if args.workspace_command == "create":
payload = pyro.create_task( payload = pyro.create_workspace(
environment=args.environment, environment=args.environment,
vcpu_count=args.vcpu_count, vcpu_count=args.vcpu_count,
mem_mib=args.mem_mib, mem_mib=args.mem_mib,
ttl_seconds=args.ttl_seconds, ttl_seconds=args.ttl_seconds,
network=args.network, network=args.network,
allow_host_compat=args.allow_host_compat, allow_host_compat=args.allow_host_compat,
source_path=args.source_path, seed_path=args.seed_path,
) )
if bool(args.json): if bool(args.json):
_print_json(payload) _print_json(payload)
else: else:
_print_task_summary_human(payload, action="Task") _print_workspace_summary_human(payload, action="Workspace")
return return
if args.task_command == "exec": if args.workspace_command == "exec":
command = _require_command(args.command_args) command = _require_command(args.command_args)
if bool(args.json): if bool(args.json):
try: try:
payload = pyro.exec_task( payload = pyro.exec_workspace(
args.task_id, args.workspace_id,
command=command, command=command,
timeout_seconds=args.timeout_seconds, timeout_seconds=args.timeout_seconds,
) )
@ -878,24 +892,24 @@ def main() -> None:
_print_json(payload) _print_json(payload)
else: else:
try: try:
payload = pyro.exec_task( payload = pyro.exec_workspace(
args.task_id, args.workspace_id,
command=command, command=command,
timeout_seconds=args.timeout_seconds, timeout_seconds=args.timeout_seconds,
) )
except Exception as exc: # noqa: BLE001 except Exception as exc: # noqa: BLE001
print(f"[error] {exc}", file=sys.stderr, flush=True) print(f"[error] {exc}", file=sys.stderr, flush=True)
raise SystemExit(1) from exc raise SystemExit(1) from exc
_print_task_exec_human(payload) _print_workspace_exec_human(payload)
exit_code = int(payload.get("exit_code", 1)) exit_code = int(payload.get("exit_code", 1))
if exit_code != 0: if exit_code != 0:
raise SystemExit(exit_code) raise SystemExit(exit_code)
return return
if args.task_command == "sync" and args.task_sync_command == "push": if args.workspace_command == "sync" and args.workspace_sync_command == "push":
if bool(args.json): if bool(args.json):
try: try:
payload = pyro.push_task_sync( payload = pyro.push_workspace_sync(
args.task_id, args.workspace_id,
args.source_path, args.source_path,
dest=args.dest, dest=args.dest,
) )
@ -905,36 +919,36 @@ def main() -> None:
_print_json(payload) _print_json(payload)
else: else:
try: try:
payload = pyro.push_task_sync( payload = pyro.push_workspace_sync(
args.task_id, args.workspace_id,
args.source_path, args.source_path,
dest=args.dest, dest=args.dest,
) )
except Exception as exc: # noqa: BLE001 except Exception as exc: # noqa: BLE001
print(f"[error] {exc}", file=sys.stderr, flush=True) print(f"[error] {exc}", file=sys.stderr, flush=True)
raise SystemExit(1) from exc raise SystemExit(1) from exc
_print_task_sync_human(payload) _print_workspace_sync_human(payload)
return return
if args.task_command == "status": if args.workspace_command == "status":
payload = pyro.status_task(args.task_id) payload = pyro.status_workspace(args.workspace_id)
if bool(args.json): if bool(args.json):
_print_json(payload) _print_json(payload)
else: else:
_print_task_summary_human(payload, action="Task") _print_workspace_summary_human(payload, action="Workspace")
return return
if args.task_command == "logs": if args.workspace_command == "logs":
payload = pyro.logs_task(args.task_id) payload = pyro.logs_workspace(args.workspace_id)
if bool(args.json): if bool(args.json):
_print_json(payload) _print_json(payload)
else: else:
_print_task_logs_human(payload) _print_workspace_logs_human(payload)
return return
if args.task_command == "delete": if args.workspace_command == "delete":
payload = pyro.delete_task(args.task_id) payload = pyro.delete_workspace(args.workspace_id)
if bool(args.json): if bool(args.json):
_print_json(payload) _print_json(payload)
else: else:
print(f"Deleted task: {str(payload.get('task_id', 'unknown'))}") print(f"Deleted workspace: {str(payload.get('workspace_id', 'unknown'))}")
return return
if args.command == "doctor": if args.command == "doctor":
payload = doctor_report(platform=args.platform) payload = doctor_report(platform=args.platform)

View file

@ -2,21 +2,21 @@
from __future__ import annotations from __future__ import annotations
PUBLIC_CLI_COMMANDS = ("demo", "doctor", "env", "mcp", "run", "task") PUBLIC_CLI_COMMANDS = ("demo", "doctor", "env", "mcp", "run", "workspace")
PUBLIC_CLI_DEMO_SUBCOMMANDS = ("ollama",) PUBLIC_CLI_DEMO_SUBCOMMANDS = ("ollama",)
PUBLIC_CLI_ENV_SUBCOMMANDS = ("inspect", "list", "pull", "prune") PUBLIC_CLI_ENV_SUBCOMMANDS = ("inspect", "list", "pull", "prune")
PUBLIC_CLI_TASK_SUBCOMMANDS = ("create", "delete", "exec", "logs", "status", "sync") PUBLIC_CLI_WORKSPACE_SUBCOMMANDS = ("create", "delete", "exec", "logs", "status", "sync")
PUBLIC_CLI_TASK_SYNC_SUBCOMMANDS = ("push",) PUBLIC_CLI_WORKSPACE_SYNC_SUBCOMMANDS = ("push",)
PUBLIC_CLI_TASK_CREATE_FLAGS = ( PUBLIC_CLI_WORKSPACE_CREATE_FLAGS = (
"--vcpu-count", "--vcpu-count",
"--mem-mib", "--mem-mib",
"--ttl-seconds", "--ttl-seconds",
"--network", "--network",
"--allow-host-compat", "--allow-host-compat",
"--source-path", "--seed-path",
"--json", "--json",
) )
PUBLIC_CLI_TASK_SYNC_PUSH_FLAGS = ("--dest", "--json") PUBLIC_CLI_WORKSPACE_SYNC_PUSH_FLAGS = ("--dest", "--json")
PUBLIC_CLI_RUN_FLAGS = ( PUBLIC_CLI_RUN_FLAGS = (
"--vcpu-count", "--vcpu-count",
"--mem-mib", "--mem-mib",
@ -29,24 +29,24 @@ PUBLIC_CLI_RUN_FLAGS = (
PUBLIC_SDK_METHODS = ( PUBLIC_SDK_METHODS = (
"create_server", "create_server",
"create_task",
"create_vm", "create_vm",
"delete_task", "create_workspace",
"delete_vm", "delete_vm",
"exec_task", "delete_workspace",
"exec_vm", "exec_vm",
"exec_workspace",
"inspect_environment", "inspect_environment",
"list_environments", "list_environments",
"logs_task", "logs_workspace",
"network_info_vm", "network_info_vm",
"prune_environments", "prune_environments",
"pull_environment", "pull_environment",
"push_task_sync", "push_workspace_sync",
"reap_expired", "reap_expired",
"run_in_vm", "run_in_vm",
"start_vm", "start_vm",
"status_task",
"status_vm", "status_vm",
"status_workspace",
"stop_vm", "stop_vm",
) )
@ -61,10 +61,10 @@ PUBLIC_MCP_TOOLS = (
"vm_start", "vm_start",
"vm_status", "vm_status",
"vm_stop", "vm_stop",
"task_create", "workspace_create",
"task_delete", "workspace_delete",
"task_exec", "workspace_exec",
"task_logs", "workspace_logs",
"task_status", "workspace_status",
"task_sync_push", "workspace_sync_push",
) )

View file

@ -19,7 +19,7 @@ from typing import Any
from pyro_mcp.runtime import DEFAULT_PLATFORM, RuntimePaths from pyro_mcp.runtime import DEFAULT_PLATFORM, RuntimePaths
DEFAULT_ENVIRONMENT_VERSION = "1.0.0" DEFAULT_ENVIRONMENT_VERSION = "1.0.0"
DEFAULT_CATALOG_VERSION = "2.3.0" DEFAULT_CATALOG_VERSION = "2.4.0"
OCI_MANIFEST_ACCEPT = ", ".join( OCI_MANIFEST_ACCEPT = ", ".join(
( (
"application/vnd.oci.image.index.v1+json", "application/vnd.oci.image.index.v1+json",

View file

@ -1,4 +1,4 @@
"""Lifecycle manager for ephemeral VM environments and persistent tasks.""" """Lifecycle manager for ephemeral VM environments and persistent workspaces."""
from __future__ import annotations from __future__ import annotations
@ -36,13 +36,13 @@ DEFAULT_TIMEOUT_SECONDS = 30
DEFAULT_TTL_SECONDS = 600 DEFAULT_TTL_SECONDS = 600
DEFAULT_ALLOW_HOST_COMPAT = False DEFAULT_ALLOW_HOST_COMPAT = False
TASK_LAYOUT_VERSION = 2 WORKSPACE_LAYOUT_VERSION = 2
TASK_WORKSPACE_DIRNAME = "workspace" WORKSPACE_DIRNAME = "workspace"
TASK_COMMANDS_DIRNAME = "commands" WORKSPACE_COMMANDS_DIRNAME = "commands"
TASK_RUNTIME_DIRNAME = "runtime" WORKSPACE_RUNTIME_DIRNAME = "runtime"
TASK_WORKSPACE_GUEST_PATH = "/workspace" WORKSPACE_GUEST_PATH = "/workspace"
TASK_GUEST_AGENT_PATH = "/opt/pyro/bin/pyro_guest_agent.py" WORKSPACE_GUEST_AGENT_PATH = "/opt/pyro/bin/pyro_guest_agent.py"
TASK_ARCHIVE_UPLOAD_TIMEOUT_SECONDS = 60 WORKSPACE_ARCHIVE_UPLOAD_TIMEOUT_SECONDS = 60
WorkspaceSeedMode = Literal["empty", "directory", "tar_archive"] WorkspaceSeedMode = Literal["empty", "directory", "tar_archive"]
@ -69,10 +69,10 @@ class VmInstance:
@dataclass @dataclass
class TaskRecord: class WorkspaceRecord:
"""Persistent task metadata stored on disk.""" """Persistent workspace metadata stored on disk."""
task_id: str workspace_id: str
environment: str environment: str
vcpu_count: int vcpu_count: int
mem_mib: int mem_mib: int
@ -98,9 +98,9 @@ class TaskRecord:
command_count: int = 0, command_count: int = 0,
last_command: dict[str, Any] | None = None, last_command: dict[str, Any] | None = None,
workspace_seed: dict[str, Any] | None = None, workspace_seed: dict[str, Any] | None = None,
) -> TaskRecord: ) -> WorkspaceRecord:
return cls( return cls(
task_id=instance.vm_id, workspace_id=instance.vm_id,
environment=instance.environment, environment=instance.environment,
vcpu_count=instance.vcpu_count, vcpu_count=instance.vcpu_count,
mem_mib=instance.mem_mib, mem_mib=instance.mem_mib,
@ -121,7 +121,7 @@ class TaskRecord:
def to_instance(self, *, workdir: Path) -> VmInstance: def to_instance(self, *, workdir: Path) -> VmInstance:
return VmInstance( return VmInstance(
vm_id=self.task_id, vm_id=self.workspace_id,
environment=self.environment, environment=self.environment,
vcpu_count=self.vcpu_count, vcpu_count=self.vcpu_count,
mem_mib=self.mem_mib, mem_mib=self.mem_mib,
@ -140,8 +140,8 @@ class TaskRecord:
def to_payload(self) -> dict[str, Any]: def to_payload(self) -> dict[str, Any]:
return { return {
"layout_version": TASK_LAYOUT_VERSION, "layout_version": WORKSPACE_LAYOUT_VERSION,
"task_id": self.task_id, "workspace_id": self.workspace_id,
"environment": self.environment, "environment": self.environment,
"vcpu_count": self.vcpu_count, "vcpu_count": self.vcpu_count,
"mem_mib": self.mem_mib, "mem_mib": self.mem_mib,
@ -161,9 +161,9 @@ class TaskRecord:
} }
@classmethod @classmethod
def from_payload(cls, payload: dict[str, Any]) -> TaskRecord: def from_payload(cls, payload: dict[str, Any]) -> WorkspaceRecord:
return cls( return cls(
task_id=str(payload["task_id"]), workspace_id=str(payload["workspace_id"]),
environment=str(payload["environment"]), environment=str(payload["environment"]),
vcpu_count=int(payload["vcpu_count"]), vcpu_count=int(payload["vcpu_count"]),
mem_mib=int(payload["mem_mib"]), mem_mib=int(payload["mem_mib"]),
@ -179,7 +179,7 @@ class TaskRecord:
network=_deserialize_network(payload.get("network")), network=_deserialize_network(payload.get("network")),
command_count=int(payload.get("command_count", 0)), command_count=int(payload.get("command_count", 0)),
last_command=_optional_dict(payload.get("last_command")), last_command=_optional_dict(payload.get("last_command")),
workspace_seed=_task_workspace_seed_dict(payload.get("workspace_seed")), workspace_seed=_workspace_seed_dict(payload.get("workspace_seed")),
) )
@ -194,10 +194,15 @@ class PreparedWorkspaceSeed:
bytes_written: int = 0 bytes_written: int = 0
cleanup_dir: Path | None = None cleanup_dir: Path | None = None
def to_payload(self, *, destination: str = TASK_WORKSPACE_GUEST_PATH) -> dict[str, Any]: def to_payload(
self,
*,
destination: str = WORKSPACE_GUEST_PATH,
path_key: str = "seed_path",
) -> dict[str, Any]:
return { return {
"mode": self.mode, "mode": self.mode,
"source_path": self.source_path, path_key: self.source_path,
"destination": destination, "destination": destination,
"entry_count": self.entry_count, "entry_count": self.entry_count,
"bytes_written": self.bytes_written, "bytes_written": self.bytes_written,
@ -255,21 +260,21 @@ def _string_dict(value: object) -> dict[str, str]:
def _empty_workspace_seed_payload() -> dict[str, Any]: def _empty_workspace_seed_payload() -> dict[str, Any]:
return { return {
"mode": "empty", "mode": "empty",
"source_path": None, "seed_path": None,
"destination": TASK_WORKSPACE_GUEST_PATH, "destination": WORKSPACE_GUEST_PATH,
"entry_count": 0, "entry_count": 0,
"bytes_written": 0, "bytes_written": 0,
} }
def _task_workspace_seed_dict(value: object) -> dict[str, Any]: def _workspace_seed_dict(value: object) -> dict[str, Any]:
if not isinstance(value, dict): if not isinstance(value, dict):
return _empty_workspace_seed_payload() return _empty_workspace_seed_payload()
payload = _empty_workspace_seed_payload() payload = _empty_workspace_seed_payload()
payload.update( payload.update(
{ {
"mode": str(value.get("mode", payload["mode"])), "mode": str(value.get("mode", payload["mode"])),
"source_path": _optional_str(value.get("source_path")), "seed_path": _optional_str(value.get("seed_path")),
"destination": str(value.get("destination", payload["destination"])), "destination": str(value.get("destination", payload["destination"])),
"entry_count": int(value.get("entry_count", payload["entry_count"])), "entry_count": int(value.get("entry_count", payload["entry_count"])),
"bytes_written": int(value.get("bytes_written", payload["bytes_written"])), "bytes_written": int(value.get("bytes_written", payload["bytes_written"])),
@ -374,7 +379,7 @@ def _normalize_workspace_destination(destination: str) -> tuple[str, PurePosixPa
destination_path = PurePosixPath(candidate) destination_path = PurePosixPath(candidate)
if any(part == ".." for part in destination_path.parts): if any(part == ".." for part in destination_path.parts):
raise ValueError("workspace destination must stay inside /workspace") raise ValueError("workspace destination must stay inside /workspace")
workspace_root = PurePosixPath(TASK_WORKSPACE_GUEST_PATH) workspace_root = PurePosixPath(WORKSPACE_GUEST_PATH)
if not destination_path.is_absolute(): if not destination_path.is_absolute():
destination_path = workspace_root / destination_path destination_path = workspace_root / destination_path
parts = [part for part in destination_path.parts if part not in {"", "."}] parts = [part for part in destination_path.parts if part not in {"", "."}]
@ -510,7 +515,7 @@ def _extract_seed_archive_to_host_workspace(
def _instance_workspace_host_dir(instance: VmInstance) -> Path: def _instance_workspace_host_dir(instance: VmInstance) -> Path:
raw_value = instance.metadata.get("workspace_host_dir") raw_value = instance.metadata.get("workspace_host_dir")
if raw_value is None or raw_value == "": if raw_value is None or raw_value == "":
raise RuntimeError("task workspace host directory is unavailable") raise RuntimeError("workspace host directory is unavailable")
return Path(raw_value) return Path(raw_value)
@ -518,13 +523,13 @@ def _patch_rootfs_guest_agent(rootfs_image: Path, guest_agent_path: Path) -> Non
debugfs_path = shutil.which("debugfs") debugfs_path = shutil.which("debugfs")
if debugfs_path is None: if debugfs_path is None:
raise RuntimeError( raise RuntimeError(
"debugfs is required to seed task workspaces on guest-backed runtimes" "debugfs is required to seed workspaces on guest-backed runtimes"
) )
with tempfile.TemporaryDirectory(prefix="pyro-guest-agent-") as temp_dir: with tempfile.TemporaryDirectory(prefix="pyro-guest-agent-") as temp_dir:
staged_agent_path = Path(temp_dir) / "pyro_guest_agent.py" staged_agent_path = Path(temp_dir) / "pyro_guest_agent.py"
shutil.copy2(guest_agent_path, staged_agent_path) shutil.copy2(guest_agent_path, staged_agent_path)
subprocess.run( # noqa: S603 subprocess.run( # noqa: S603
[debugfs_path, "-w", "-R", f"rm {TASK_GUEST_AGENT_PATH}", str(rootfs_image)], [debugfs_path, "-w", "-R", f"rm {WORKSPACE_GUEST_AGENT_PATH}", str(rootfs_image)],
text=True, text=True,
capture_output=True, capture_output=True,
check=False, check=False,
@ -534,7 +539,7 @@ def _patch_rootfs_guest_agent(rootfs_image: Path, guest_agent_path: Path) -> Non
debugfs_path, debugfs_path,
"-w", "-w",
"-R", "-R",
f"write {staged_agent_path} {TASK_GUEST_AGENT_PATH}", f"write {staged_agent_path} {WORKSPACE_GUEST_AGENT_PATH}",
str(rootfs_image), str(rootfs_image),
], ],
text=True, text=True,
@ -543,7 +548,7 @@ def _patch_rootfs_guest_agent(rootfs_image: Path, guest_agent_path: Path) -> Non
) )
if proc.returncode != 0: if proc.returncode != 0:
raise RuntimeError( raise RuntimeError(
"failed to patch guest agent into task rootfs: " "failed to patch guest agent into workspace rootfs: "
f"{proc.stderr.strip() or proc.stdout.strip()}" f"{proc.stderr.strip() or proc.stdout.strip()}"
) )
@ -862,7 +867,7 @@ class FirecrackerBackend(VmBackend): # pragma: no cover
port, port,
archive_path, archive_path,
destination=destination, destination=destination,
timeout_seconds=TASK_ARCHIVE_UPLOAD_TIMEOUT_SECONDS, timeout_seconds=WORKSPACE_ARCHIVE_UPLOAD_TIMEOUT_SECONDS,
uds_path=uds_path, uds_path=uds_path,
) )
return { return {
@ -885,7 +890,7 @@ class FirecrackerBackend(VmBackend): # pragma: no cover
class VmManager: class VmManager:
"""In-process lifecycle manager for ephemeral VM environments and tasks.""" """In-process lifecycle manager for ephemeral VM environments and workspaces."""
MIN_VCPUS = 1 MIN_VCPUS = 1
MAX_VCPUS = 8 MAX_VCPUS = 8
@ -911,7 +916,7 @@ class VmManager:
) -> None: ) -> None:
self._backend_name = backend_name or "firecracker" self._backend_name = backend_name or "firecracker"
self._base_dir = base_dir or Path("/tmp/pyro-mcp") self._base_dir = base_dir or Path("/tmp/pyro-mcp")
self._tasks_dir = self._base_dir / "tasks" self._workspaces_dir = self._base_dir / "workspaces"
resolved_cache_dir = cache_dir or default_cache_dir() resolved_cache_dir = cache_dir or default_cache_dir()
self._runtime_paths = runtime_paths self._runtime_paths = runtime_paths
if self._backend_name == "firecracker": if self._backend_name == "firecracker":
@ -944,7 +949,7 @@ class VmManager:
self._lock = threading.Lock() self._lock = threading.Lock()
self._instances: dict[str, VmInstance] = {} self._instances: dict[str, VmInstance] = {}
self._base_dir.mkdir(parents=True, exist_ok=True) self._base_dir.mkdir(parents=True, exist_ok=True)
self._tasks_dir.mkdir(parents=True, exist_ok=True) self._workspaces_dir.mkdir(parents=True, exist_ok=True)
self._backend = self._build_backend() self._backend = self._build_backend()
def _build_backend(self) -> VmBackend: def _build_backend(self) -> VmBackend:
@ -989,8 +994,8 @@ class VmManager:
now = time.time() now = time.time()
with self._lock: with self._lock:
self._reap_expired_locked(now) self._reap_expired_locked(now)
self._reap_expired_tasks_locked(now) self._reap_expired_workspaces_locked(now)
active_count = len(self._instances) + self._count_tasks_locked() active_count = len(self._instances) + self._count_workspaces_locked()
if active_count >= self._max_active_vms: if active_count >= self._max_active_vms:
raise RuntimeError( raise RuntimeError(
f"max active VMs reached ({self._max_active_vms}); delete old VMs first" f"max active VMs reached ({self._max_active_vms}); delete old VMs first"
@ -1126,7 +1131,7 @@ class VmManager:
del self._instances[vm_id] del self._instances[vm_id]
return {"deleted_vm_ids": expired_vm_ids, "count": len(expired_vm_ids)} return {"deleted_vm_ids": expired_vm_ids, "count": len(expired_vm_ids)}
def create_task( def create_workspace(
self, self,
*, *,
environment: str, environment: str,
@ -1135,22 +1140,22 @@ class VmManager:
ttl_seconds: int = DEFAULT_TTL_SECONDS, ttl_seconds: int = DEFAULT_TTL_SECONDS,
network: bool = False, network: bool = False,
allow_host_compat: bool = DEFAULT_ALLOW_HOST_COMPAT, allow_host_compat: bool = DEFAULT_ALLOW_HOST_COMPAT,
source_path: str | Path | None = None, seed_path: str | Path | None = None,
) -> dict[str, Any]: ) -> dict[str, Any]:
self._validate_limits(vcpu_count=vcpu_count, mem_mib=mem_mib, ttl_seconds=ttl_seconds) self._validate_limits(vcpu_count=vcpu_count, mem_mib=mem_mib, ttl_seconds=ttl_seconds)
get_environment(environment, runtime_paths=self._runtime_paths) get_environment(environment, runtime_paths=self._runtime_paths)
prepared_seed = self._prepare_workspace_seed(source_path) prepared_seed = self._prepare_workspace_seed(seed_path)
now = time.time() now = time.time()
task_id = uuid.uuid4().hex[:12] workspace_id = uuid.uuid4().hex[:12]
task_dir = self._task_dir(task_id) workspace_dir = self._workspace_dir(workspace_id)
runtime_dir = self._task_runtime_dir(task_id) runtime_dir = self._workspace_runtime_dir(workspace_id)
workspace_dir = self._task_workspace_dir(task_id) host_workspace_dir = self._workspace_host_dir(workspace_id)
commands_dir = self._task_commands_dir(task_id) commands_dir = self._workspace_commands_dir(workspace_id)
task_dir.mkdir(parents=True, exist_ok=False) workspace_dir.mkdir(parents=True, exist_ok=False)
workspace_dir.mkdir(parents=True, exist_ok=True) host_workspace_dir.mkdir(parents=True, exist_ok=True)
commands_dir.mkdir(parents=True, exist_ok=True) commands_dir.mkdir(parents=True, exist_ok=True)
instance = VmInstance( instance = VmInstance(
vm_id=task_id, vm_id=workspace_id,
environment=environment, environment=environment,
vcpu_count=vcpu_count, vcpu_count=vcpu_count,
mem_mib=mem_mib, mem_mib=mem_mib,
@ -1162,13 +1167,13 @@ class VmManager:
allow_host_compat=allow_host_compat, allow_host_compat=allow_host_compat,
) )
instance.metadata["allow_host_compat"] = str(allow_host_compat).lower() instance.metadata["allow_host_compat"] = str(allow_host_compat).lower()
instance.metadata["workspace_path"] = TASK_WORKSPACE_GUEST_PATH instance.metadata["workspace_path"] = WORKSPACE_GUEST_PATH
instance.metadata["workspace_host_dir"] = str(workspace_dir) instance.metadata["workspace_host_dir"] = str(host_workspace_dir)
try: try:
with self._lock: with self._lock:
self._reap_expired_locked(now) self._reap_expired_locked(now)
self._reap_expired_tasks_locked(now) self._reap_expired_workspaces_locked(now)
active_count = len(self._instances) + self._count_tasks_locked() active_count = len(self._instances) + self._count_workspaces_locked()
if active_count >= self._max_active_vms: if active_count >= self._max_active_vms:
raise RuntimeError( raise RuntimeError(
f"max active VMs reached ({self._max_active_vms}); delete old VMs first" f"max active VMs reached ({self._max_active_vms}); delete old VMs first"
@ -1178,7 +1183,7 @@ class VmManager:
prepared_seed.archive_path is not None prepared_seed.archive_path is not None
and self._runtime_capabilities.supports_guest_exec and self._runtime_capabilities.supports_guest_exec
): ):
self._ensure_task_guest_seed_support(instance) self._ensure_workspace_guest_seed_support(instance)
with self._lock: with self._lock:
self._start_instance_locked(instance) self._start_instance_locked(instance)
self._require_guest_exec_or_opt_in(instance) self._require_guest_exec_or_opt_in(instance)
@ -1187,7 +1192,7 @@ class VmManager:
import_summary = self._backend.import_archive( import_summary = self._backend.import_archive(
instance, instance,
archive_path=prepared_seed.archive_path, archive_path=prepared_seed.archive_path,
destination=TASK_WORKSPACE_GUEST_PATH, destination=WORKSPACE_GUEST_PATH,
) )
workspace_seed["entry_count"] = int(import_summary["entry_count"]) workspace_seed["entry_count"] = int(import_summary["entry_count"])
workspace_seed["bytes_written"] = int(import_summary["bytes_written"]) workspace_seed["bytes_written"] = int(import_summary["bytes_written"])
@ -1195,14 +1200,14 @@ class VmManager:
elif self._runtime_capabilities.supports_guest_exec: elif self._runtime_capabilities.supports_guest_exec:
self._backend.exec( self._backend.exec(
instance, instance,
f"mkdir -p {shlex.quote(TASK_WORKSPACE_GUEST_PATH)}", f"mkdir -p {shlex.quote(WORKSPACE_GUEST_PATH)}",
10, 10,
) )
else: else:
instance.metadata["execution_mode"] = "host_compat" instance.metadata["execution_mode"] = "host_compat"
task = TaskRecord.from_instance(instance, workspace_seed=workspace_seed) workspace = WorkspaceRecord.from_instance(instance, workspace_seed=workspace_seed)
self._save_task_locked(task) self._save_workspace_locked(workspace)
return self._serialize_task(task) return self._serialize_workspace(workspace)
except Exception: except Exception:
if runtime_dir.exists(): if runtime_dir.exists():
try: try:
@ -1215,17 +1220,17 @@ class VmManager:
self._backend.delete(instance) self._backend.delete(instance)
except Exception: except Exception:
pass pass
shutil.rmtree(task_dir, ignore_errors=True) shutil.rmtree(workspace_dir, ignore_errors=True)
raise raise
finally: finally:
prepared_seed.cleanup() prepared_seed.cleanup()
def push_task_sync( def push_workspace_sync(
self, self,
task_id: str, workspace_id: str,
*, *,
source_path: str | Path, source_path: str | Path,
dest: str = TASK_WORKSPACE_GUEST_PATH, dest: str = WORKSPACE_GUEST_PATH,
) -> dict[str, Any]: ) -> dict[str, Any]:
prepared_seed = self._prepare_workspace_seed(source_path) prepared_seed = self._prepare_workspace_seed(source_path)
if prepared_seed.archive_path is None: if prepared_seed.archive_path is None:
@ -1233,14 +1238,17 @@ class VmManager:
raise ValueError("source_path is required") raise ValueError("source_path is required")
normalized_destination, _ = _normalize_workspace_destination(dest) normalized_destination, _ = _normalize_workspace_destination(dest)
with self._lock: with self._lock:
task = self._load_task_locked(task_id) workspace = self._load_workspace_locked(workspace_id)
self._ensure_task_not_expired_locked(task, time.time()) self._ensure_workspace_not_expired_locked(workspace, time.time())
self._refresh_task_liveness_locked(task) self._refresh_workspace_liveness_locked(workspace)
if task.state != "started": if workspace.state != "started":
raise RuntimeError( raise RuntimeError(
f"task {task_id} must be in 'started' state before task_sync_push" f"workspace {workspace_id} must be in 'started' state "
"before workspace_sync_push"
) )
instance = task.to_instance(workdir=self._task_runtime_dir(task.task_id)) instance = workspace.to_instance(
workdir=self._workspace_runtime_dir(workspace.workspace_id)
)
try: try:
import_summary = self._backend.import_archive( import_summary = self._backend.import_archive(
instance, instance,
@ -1249,58 +1257,71 @@ class VmManager:
) )
finally: finally:
prepared_seed.cleanup() prepared_seed.cleanup()
workspace_sync = prepared_seed.to_payload(destination=normalized_destination) workspace_sync = prepared_seed.to_payload(
destination=normalized_destination,
path_key="source_path",
)
workspace_sync["entry_count"] = int(import_summary["entry_count"]) workspace_sync["entry_count"] = int(import_summary["entry_count"])
workspace_sync["bytes_written"] = int(import_summary["bytes_written"]) workspace_sync["bytes_written"] = int(import_summary["bytes_written"])
workspace_sync["destination"] = str(import_summary["destination"]) workspace_sync["destination"] = str(import_summary["destination"])
with self._lock: with self._lock:
task = self._load_task_locked(task_id) workspace = self._load_workspace_locked(workspace_id)
task.state = instance.state workspace.state = instance.state
task.firecracker_pid = instance.firecracker_pid workspace.firecracker_pid = instance.firecracker_pid
task.last_error = instance.last_error workspace.last_error = instance.last_error
task.metadata = dict(instance.metadata) workspace.metadata = dict(instance.metadata)
self._save_task_locked(task) self._save_workspace_locked(workspace)
return { return {
"task_id": task_id, "workspace_id": workspace_id,
"execution_mode": instance.metadata.get("execution_mode", "pending"), "execution_mode": instance.metadata.get("execution_mode", "pending"),
"workspace_sync": workspace_sync, "workspace_sync": workspace_sync,
} }
def exec_task(self, task_id: str, *, command: str, timeout_seconds: int = 30) -> dict[str, Any]: def exec_workspace(
self,
workspace_id: str,
*,
command: str,
timeout_seconds: int = 30,
) -> dict[str, Any]:
if timeout_seconds <= 0: if timeout_seconds <= 0:
raise ValueError("timeout_seconds must be positive") raise ValueError("timeout_seconds must be positive")
with self._lock: with self._lock:
task = self._load_task_locked(task_id) workspace = self._load_workspace_locked(workspace_id)
self._ensure_task_not_expired_locked(task, time.time()) self._ensure_workspace_not_expired_locked(workspace, time.time())
self._refresh_task_liveness_locked(task) self._refresh_workspace_liveness_locked(workspace)
if task.state != "started": if workspace.state != "started":
raise RuntimeError(f"task {task_id} must be in 'started' state before task_exec") raise RuntimeError(
instance = task.to_instance(workdir=self._task_runtime_dir(task.task_id)) f"workspace {workspace_id} must be in 'started' state before workspace_exec"
)
instance = workspace.to_instance(
workdir=self._workspace_runtime_dir(workspace.workspace_id)
)
exec_result, execution_mode = self._exec_instance( exec_result, execution_mode = self._exec_instance(
instance, instance,
command=command, command=command,
timeout_seconds=timeout_seconds, timeout_seconds=timeout_seconds,
host_workdir=self._task_workspace_dir(task.task_id), host_workdir=self._workspace_host_dir(workspace.workspace_id),
guest_cwd=TASK_WORKSPACE_GUEST_PATH, guest_cwd=WORKSPACE_GUEST_PATH,
) )
with self._lock: with self._lock:
task = self._load_task_locked(task_id) workspace = self._load_workspace_locked(workspace_id)
task.state = instance.state workspace.state = instance.state
task.firecracker_pid = instance.firecracker_pid workspace.firecracker_pid = instance.firecracker_pid
task.last_error = instance.last_error workspace.last_error = instance.last_error
task.metadata = dict(instance.metadata) workspace.metadata = dict(instance.metadata)
entry = self._record_task_command_locked( entry = self._record_workspace_command_locked(
task, workspace,
command=command, command=command,
exec_result=exec_result, exec_result=exec_result,
execution_mode=execution_mode, execution_mode=execution_mode,
cwd=TASK_WORKSPACE_GUEST_PATH, cwd=WORKSPACE_GUEST_PATH,
) )
self._save_task_locked(task) self._save_workspace_locked(workspace)
return { return {
"task_id": task_id, "workspace_id": workspace_id,
"environment": task.environment, "environment": workspace.environment,
"environment_version": task.metadata.get("environment_version"), "environment_version": workspace.metadata.get("environment_version"),
"command": command, "command": command,
"stdout": exec_result.stdout, "stdout": exec_result.stdout,
"stderr": exec_result.stderr, "stderr": exec_result.stderr,
@ -1308,36 +1329,47 @@ class VmManager:
"duration_ms": exec_result.duration_ms, "duration_ms": exec_result.duration_ms,
"execution_mode": execution_mode, "execution_mode": execution_mode,
"sequence": entry["sequence"], "sequence": entry["sequence"],
"cwd": TASK_WORKSPACE_GUEST_PATH, "cwd": WORKSPACE_GUEST_PATH,
} }
def status_task(self, task_id: str) -> dict[str, Any]: def status_workspace(self, workspace_id: str) -> dict[str, Any]:
with self._lock: with self._lock:
task = self._load_task_locked(task_id) workspace = self._load_workspace_locked(workspace_id)
self._ensure_task_not_expired_locked(task, time.time()) self._ensure_workspace_not_expired_locked(workspace, time.time())
self._refresh_task_liveness_locked(task) self._refresh_workspace_liveness_locked(workspace)
self._save_task_locked(task) self._save_workspace_locked(workspace)
return self._serialize_task(task) return self._serialize_workspace(workspace)
def logs_task(self, task_id: str) -> dict[str, Any]: def logs_workspace(self, workspace_id: str) -> dict[str, Any]:
with self._lock: with self._lock:
task = self._load_task_locked(task_id) workspace = self._load_workspace_locked(workspace_id)
self._ensure_task_not_expired_locked(task, time.time()) self._ensure_workspace_not_expired_locked(workspace, time.time())
self._refresh_task_liveness_locked(task) self._refresh_workspace_liveness_locked(workspace)
self._save_task_locked(task) self._save_workspace_locked(workspace)
entries = self._read_task_logs_locked(task.task_id) entries = self._read_workspace_logs_locked(workspace.workspace_id)
return {"task_id": task.task_id, "count": len(entries), "entries": entries} return {
"workspace_id": workspace.workspace_id,
"count": len(entries),
"entries": entries,
}
def delete_task(self, task_id: str, *, reason: str = "explicit_delete") -> dict[str, Any]: def delete_workspace(
self,
workspace_id: str,
*,
reason: str = "explicit_delete",
) -> dict[str, Any]:
with self._lock: with self._lock:
task = self._load_task_locked(task_id) workspace = self._load_workspace_locked(workspace_id)
instance = task.to_instance(workdir=self._task_runtime_dir(task.task_id)) instance = workspace.to_instance(
if task.state == "started": workdir=self._workspace_runtime_dir(workspace.workspace_id)
)
if workspace.state == "started":
self._backend.stop(instance) self._backend.stop(instance)
task.state = "stopped" workspace.state = "stopped"
self._backend.delete(instance) self._backend.delete(instance)
shutil.rmtree(self._task_dir(task_id), ignore_errors=True) shutil.rmtree(self._workspace_dir(workspace_id), ignore_errors=True)
return {"task_id": task_id, "deleted": True, "reason": reason} return {"workspace_id": workspace_id, "deleted": True, "reason": reason}
def _validate_limits(self, *, vcpu_count: int, mem_mib: int, ttl_seconds: int) -> None: def _validate_limits(self, *, vcpu_count: int, mem_mib: int, ttl_seconds: int) -> None:
if not self.MIN_VCPUS <= vcpu_count <= self.MAX_VCPUS: if not self.MIN_VCPUS <= vcpu_count <= self.MAX_VCPUS:
@ -1368,27 +1400,27 @@ class VmManager:
"metadata": instance.metadata, "metadata": instance.metadata,
} }
def _serialize_task(self, task: TaskRecord) -> dict[str, Any]: def _serialize_workspace(self, workspace: WorkspaceRecord) -> dict[str, Any]:
return { return {
"task_id": task.task_id, "workspace_id": workspace.workspace_id,
"environment": task.environment, "environment": workspace.environment,
"environment_version": task.metadata.get("environment_version"), "environment_version": workspace.metadata.get("environment_version"),
"vcpu_count": task.vcpu_count, "vcpu_count": workspace.vcpu_count,
"mem_mib": task.mem_mib, "mem_mib": workspace.mem_mib,
"ttl_seconds": task.ttl_seconds, "ttl_seconds": workspace.ttl_seconds,
"created_at": task.created_at, "created_at": workspace.created_at,
"expires_at": task.expires_at, "expires_at": workspace.expires_at,
"state": task.state, "state": workspace.state,
"network_enabled": task.network is not None, "network_enabled": workspace.network is not None,
"allow_host_compat": task.allow_host_compat, "allow_host_compat": workspace.allow_host_compat,
"guest_ip": task.network.guest_ip if task.network is not None else None, "guest_ip": workspace.network.guest_ip if workspace.network is not None else None,
"tap_name": task.network.tap_name if task.network is not None else None, "tap_name": workspace.network.tap_name if workspace.network is not None else None,
"execution_mode": task.metadata.get("execution_mode", "pending"), "execution_mode": workspace.metadata.get("execution_mode", "pending"),
"workspace_path": TASK_WORKSPACE_GUEST_PATH, "workspace_path": WORKSPACE_GUEST_PATH,
"workspace_seed": _task_workspace_seed_dict(task.workspace_seed), "workspace_seed": _workspace_seed_dict(workspace.workspace_seed),
"command_count": task.command_count, "command_count": workspace.command_count,
"last_command": task.last_command, "last_command": workspace.last_command,
"metadata": task.metadata, "metadata": workspace.metadata,
} }
def _require_guest_boot_or_opt_in(self, instance: VmInstance) -> None: def _require_guest_boot_or_opt_in(self, instance: VmInstance) -> None:
@ -1481,14 +1513,14 @@ class VmManager:
execution_mode = instance.metadata.get("execution_mode", "unknown") execution_mode = instance.metadata.get("execution_mode", "unknown")
return exec_result, execution_mode return exec_result, execution_mode
def _prepare_workspace_seed(self, source_path: str | Path | None) -> PreparedWorkspaceSeed: def _prepare_workspace_seed(self, seed_path: str | Path | None) -> PreparedWorkspaceSeed:
if source_path is None: if seed_path is None:
return PreparedWorkspaceSeed(mode="empty", source_path=None) return PreparedWorkspaceSeed(mode="empty", source_path=None)
resolved_source_path = Path(source_path).expanduser().resolve() resolved_source_path = Path(seed_path).expanduser().resolve()
if not resolved_source_path.exists(): if not resolved_source_path.exists():
raise ValueError(f"source_path {resolved_source_path} does not exist") raise ValueError(f"seed_path {resolved_source_path} does not exist")
if resolved_source_path.is_dir(): if resolved_source_path.is_dir():
cleanup_dir = Path(tempfile.mkdtemp(prefix="pyro-task-seed-")) cleanup_dir = Path(tempfile.mkdtemp(prefix="pyro-workspace-seed-"))
archive_path = cleanup_dir / "workspace-seed.tar" archive_path = cleanup_dir / "workspace-seed.tar"
try: try:
_write_directory_seed_archive(resolved_source_path, archive_path) _write_directory_seed_archive(resolved_source_path, archive_path)
@ -1509,7 +1541,7 @@ class VmManager:
or not _is_supported_seed_archive(resolved_source_path) or not _is_supported_seed_archive(resolved_source_path)
): ):
raise ValueError( raise ValueError(
"source_path must be a directory or a .tar/.tar.gz/.tgz archive" "seed_path must be a directory or a .tar/.tar.gz/.tgz archive"
) )
entry_count, bytes_written = _inspect_seed_archive(resolved_source_path) entry_count, bytes_written = _inspect_seed_archive(resolved_source_path)
return PreparedWorkspaceSeed( return PreparedWorkspaceSeed(
@ -1520,94 +1552,102 @@ class VmManager:
bytes_written=bytes_written, bytes_written=bytes_written,
) )
def _ensure_task_guest_seed_support(self, instance: VmInstance) -> None: def _ensure_workspace_guest_seed_support(self, instance: VmInstance) -> None:
if self._runtime_paths is None or self._runtime_paths.guest_agent_path is None: if self._runtime_paths is None or self._runtime_paths.guest_agent_path is None:
raise RuntimeError("runtime bundle does not provide a guest agent for task seeding") raise RuntimeError(
"runtime bundle does not provide a guest agent for workspace seeding"
)
rootfs_image = instance.metadata.get("rootfs_image") rootfs_image = instance.metadata.get("rootfs_image")
if rootfs_image is None or rootfs_image == "": if rootfs_image is None or rootfs_image == "":
raise RuntimeError("task rootfs image is unavailable for guest workspace seeding") raise RuntimeError("workspace rootfs image is unavailable for guest seeding")
_patch_rootfs_guest_agent(Path(rootfs_image), self._runtime_paths.guest_agent_path) _patch_rootfs_guest_agent(Path(rootfs_image), self._runtime_paths.guest_agent_path)
def _task_dir(self, task_id: str) -> Path: def _workspace_dir(self, workspace_id: str) -> Path:
return self._tasks_dir / task_id return self._workspaces_dir / workspace_id
def _task_runtime_dir(self, task_id: str) -> Path: def _workspace_runtime_dir(self, workspace_id: str) -> Path:
return self._task_dir(task_id) / TASK_RUNTIME_DIRNAME return self._workspace_dir(workspace_id) / WORKSPACE_RUNTIME_DIRNAME
def _task_workspace_dir(self, task_id: str) -> Path: def _workspace_host_dir(self, workspace_id: str) -> Path:
return self._task_dir(task_id) / TASK_WORKSPACE_DIRNAME return self._workspace_dir(workspace_id) / WORKSPACE_DIRNAME
def _task_commands_dir(self, task_id: str) -> Path: def _workspace_commands_dir(self, workspace_id: str) -> Path:
return self._task_dir(task_id) / TASK_COMMANDS_DIRNAME return self._workspace_dir(workspace_id) / WORKSPACE_COMMANDS_DIRNAME
def _task_metadata_path(self, task_id: str) -> Path: def _workspace_metadata_path(self, workspace_id: str) -> Path:
return self._task_dir(task_id) / "task.json" return self._workspace_dir(workspace_id) / "workspace.json"
def _count_tasks_locked(self) -> int: def _count_workspaces_locked(self) -> int:
return sum(1 for _ in self._tasks_dir.glob("*/task.json")) return sum(1 for _ in self._workspaces_dir.glob("*/workspace.json"))
def _load_task_locked(self, task_id: str) -> TaskRecord: def _load_workspace_locked(self, workspace_id: str) -> WorkspaceRecord:
metadata_path = self._task_metadata_path(task_id) metadata_path = self._workspace_metadata_path(workspace_id)
if not metadata_path.exists(): if not metadata_path.exists():
raise ValueError(f"task {task_id!r} does not exist") raise ValueError(f"workspace {workspace_id!r} does not exist")
payload = json.loads(metadata_path.read_text(encoding="utf-8")) payload = json.loads(metadata_path.read_text(encoding="utf-8"))
if not isinstance(payload, dict): if not isinstance(payload, dict):
raise RuntimeError(f"task record at {metadata_path} is invalid") raise RuntimeError(f"workspace record at {metadata_path} is invalid")
return TaskRecord.from_payload(payload) return WorkspaceRecord.from_payload(payload)
def _save_task_locked(self, task: TaskRecord) -> None: def _save_workspace_locked(self, workspace: WorkspaceRecord) -> None:
metadata_path = self._task_metadata_path(task.task_id) metadata_path = self._workspace_metadata_path(workspace.workspace_id)
metadata_path.parent.mkdir(parents=True, exist_ok=True) metadata_path.parent.mkdir(parents=True, exist_ok=True)
metadata_path.write_text( metadata_path.write_text(
json.dumps(task.to_payload(), indent=2, sort_keys=True), json.dumps(workspace.to_payload(), indent=2, sort_keys=True),
encoding="utf-8", encoding="utf-8",
) )
def _reap_expired_tasks_locked(self, now: float) -> None: def _reap_expired_workspaces_locked(self, now: float) -> None:
for metadata_path in list(self._tasks_dir.glob("*/task.json")): for metadata_path in list(self._workspaces_dir.glob("*/workspace.json")):
payload = json.loads(metadata_path.read_text(encoding="utf-8")) payload = json.loads(metadata_path.read_text(encoding="utf-8"))
if not isinstance(payload, dict): if not isinstance(payload, dict):
shutil.rmtree(metadata_path.parent, ignore_errors=True) shutil.rmtree(metadata_path.parent, ignore_errors=True)
continue continue
task = TaskRecord.from_payload(payload) workspace = WorkspaceRecord.from_payload(payload)
if task.expires_at > now: if workspace.expires_at > now:
continue continue
instance = task.to_instance(workdir=self._task_runtime_dir(task.task_id)) instance = workspace.to_instance(
if task.state == "started": workdir=self._workspace_runtime_dir(workspace.workspace_id)
)
if workspace.state == "started":
self._backend.stop(instance) self._backend.stop(instance)
task.state = "stopped" workspace.state = "stopped"
self._backend.delete(instance) self._backend.delete(instance)
shutil.rmtree(self._task_dir(task.task_id), ignore_errors=True) shutil.rmtree(self._workspace_dir(workspace.workspace_id), ignore_errors=True)
def _ensure_task_not_expired_locked(self, task: TaskRecord, now: float) -> None: def _ensure_workspace_not_expired_locked(
if task.expires_at <= now: self,
task_id = task.task_id workspace: WorkspaceRecord,
self._reap_expired_tasks_locked(now) now: float,
raise RuntimeError(f"task {task_id!r} expired and was automatically deleted") ) -> None:
if workspace.expires_at <= now:
workspace_id = workspace.workspace_id
self._reap_expired_workspaces_locked(now)
raise RuntimeError(f"workspace {workspace_id!r} expired and was automatically deleted")
def _refresh_task_liveness_locked(self, task: TaskRecord) -> None: def _refresh_workspace_liveness_locked(self, workspace: WorkspaceRecord) -> None:
if task.state != "started": if workspace.state != "started":
return return
execution_mode = task.metadata.get("execution_mode") execution_mode = workspace.metadata.get("execution_mode")
if execution_mode == "host_compat": if execution_mode == "host_compat":
return return
if _pid_is_running(task.firecracker_pid): if _pid_is_running(workspace.firecracker_pid):
return return
task.state = "stopped" workspace.state = "stopped"
task.firecracker_pid = None workspace.firecracker_pid = None
task.last_error = "backing guest process is no longer running" workspace.last_error = "backing guest process is no longer running"
def _record_task_command_locked( def _record_workspace_command_locked(
self, self,
task: TaskRecord, workspace: WorkspaceRecord,
*, *,
command: str, command: str,
exec_result: VmExecResult, exec_result: VmExecResult,
execution_mode: str, execution_mode: str,
cwd: str, cwd: str,
) -> dict[str, Any]: ) -> dict[str, Any]:
sequence = task.command_count + 1 sequence = workspace.command_count + 1
commands_dir = self._task_commands_dir(task.task_id) commands_dir = self._workspace_commands_dir(workspace.workspace_id)
commands_dir.mkdir(parents=True, exist_ok=True) commands_dir.mkdir(parents=True, exist_ok=True)
base_name = f"{sequence:06d}" base_name = f"{sequence:06d}"
stdout_path = commands_dir / f"{base_name}.stdout" stdout_path = commands_dir / f"{base_name}.stdout"
@ -1627,8 +1667,8 @@ class VmManager:
"recorded_at": time.time(), "recorded_at": time.time(),
} }
record_path.write_text(json.dumps(entry, indent=2, sort_keys=True), encoding="utf-8") record_path.write_text(json.dumps(entry, indent=2, sort_keys=True), encoding="utf-8")
task.command_count = sequence workspace.command_count = sequence
task.last_command = { workspace.last_command = {
"sequence": sequence, "sequence": sequence,
"command": command, "command": command,
"cwd": cwd, "cwd": cwd,
@ -1638,9 +1678,9 @@ class VmManager:
} }
return entry return entry
def _read_task_logs_locked(self, task_id: str) -> list[dict[str, Any]]: def _read_workspace_logs_locked(self, workspace_id: str) -> list[dict[str, Any]]:
entries: list[dict[str, Any]] = [] entries: list[dict[str, Any]] = []
commands_dir = self._task_commands_dir(task_id) commands_dir = self._workspace_commands_dir(workspace_id)
if not commands_dir.exists(): if not commands_dir.exists():
return entries return entries
for record_path in sorted(commands_dir.glob("*.json")): for record_path in sorted(commands_dir.glob("*.json")):

View file

@ -48,8 +48,8 @@ def test_pyro_create_server_registers_vm_run(tmp_path: Path) -> None:
tool_names = asyncio.run(_run()) tool_names = asyncio.run(_run())
assert "vm_run" in tool_names assert "vm_run" in tool_names
assert "vm_create" in tool_names assert "vm_create" in tool_names
assert "task_create" in tool_names assert "workspace_create" in tool_names
assert "task_sync_push" in tool_names assert "workspace_sync_push" in tool_names
def test_pyro_vm_run_tool_executes(tmp_path: Path) -> None: def test_pyro_vm_run_tool_executes(tmp_path: Path) -> None:
@ -106,7 +106,7 @@ def test_pyro_create_vm_defaults_sizing_and_host_compat(tmp_path: Path) -> None:
assert created["allow_host_compat"] is True assert created["allow_host_compat"] is True
def test_pyro_task_methods_delegate_to_manager(tmp_path: Path) -> None: def test_pyro_workspace_methods_delegate_to_manager(tmp_path: Path) -> None:
pyro = Pyro( pyro = Pyro(
manager=VmManager( manager=VmManager(
backend_name="mock", backend_name="mock",
@ -119,20 +119,20 @@ def test_pyro_task_methods_delegate_to_manager(tmp_path: Path) -> None:
source_dir.mkdir() source_dir.mkdir()
(source_dir / "note.txt").write_text("ok\n", encoding="utf-8") (source_dir / "note.txt").write_text("ok\n", encoding="utf-8")
created = pyro.create_task( created = pyro.create_workspace(
environment="debian:12-base", environment="debian:12-base",
allow_host_compat=True, allow_host_compat=True,
source_path=source_dir, seed_path=source_dir,
) )
task_id = str(created["task_id"]) workspace_id = str(created["workspace_id"])
updated_dir = tmp_path / "updated" updated_dir = tmp_path / "updated"
updated_dir.mkdir() updated_dir.mkdir()
(updated_dir / "more.txt").write_text("more\n", encoding="utf-8") (updated_dir / "more.txt").write_text("more\n", encoding="utf-8")
synced = pyro.push_task_sync(task_id, updated_dir, dest="subdir") synced = pyro.push_workspace_sync(workspace_id, updated_dir, dest="subdir")
executed = pyro.exec_task(task_id, command="cat note.txt") executed = pyro.exec_workspace(workspace_id, command="cat note.txt")
status = pyro.status_task(task_id) status = pyro.status_workspace(workspace_id)
logs = pyro.logs_task(task_id) logs = pyro.logs_workspace(workspace_id)
deleted = pyro.delete_task(task_id) deleted = pyro.delete_workspace(workspace_id)
assert executed["stdout"] == "ok\n" assert executed["stdout"] == "ok\n"
assert created["workspace_seed"]["mode"] == "directory" assert created["workspace_seed"]["mode"] == "directory"

View file

@ -30,7 +30,7 @@ def test_cli_help_guides_first_run() -> None:
assert "pyro env list" in help_text assert "pyro env list" in help_text
assert "pyro env pull debian:12" in help_text assert "pyro env pull debian:12" in help_text
assert "pyro run debian:12 -- git --version" in help_text assert "pyro run debian:12 -- git --version" in help_text
assert "pyro task sync push TASK_ID ./changes" in help_text assert "pyro workspace sync push WORKSPACE_ID ./changes" in help_text
assert "Use `pyro mcp serve` only after the CLI validation path works." in help_text assert "Use `pyro mcp serve` only after the CLI validation path works." in help_text
@ -60,28 +60,37 @@ def test_cli_subcommand_help_includes_examples_and_guidance() -> None:
assert "Expose pyro tools over stdio for an MCP client." in mcp_help assert "Expose pyro tools over stdio for an MCP client." in mcp_help
assert "Use this from an MCP client config after the CLI evaluation path works." in mcp_help assert "Use this from an MCP client config after the CLI evaluation path works." in mcp_help
task_help = _subparser_choice(parser, "task").format_help() workspace_help = _subparser_choice(parser, "workspace").format_help()
assert "pyro task create debian:12 --source-path ./repo" in task_help assert "pyro workspace create debian:12 --seed-path ./repo" in workspace_help
assert "pyro task sync push TASK_ID ./repo --dest src" in task_help assert "pyro workspace sync push WORKSPACE_ID ./repo --dest src" in workspace_help
assert "pyro task exec TASK_ID" in task_help assert "pyro workspace exec WORKSPACE_ID" in workspace_help
task_create_help = _subparser_choice(_subparser_choice(parser, "task"), "create").format_help() workspace_create_help = _subparser_choice(
assert "--source-path" in task_create_help _subparser_choice(parser, "workspace"),
assert "seed into `/workspace`" in task_create_help "create",
task_exec_help = _subparser_choice(_subparser_choice(parser, "task"), "exec").format_help()
assert "persistent `/workspace`" in task_exec_help
assert "pyro task exec TASK_ID -- cat note.txt" in task_exec_help
task_sync_help = _subparser_choice(_subparser_choice(parser, "task"), "sync").format_help()
assert "Sync is non-atomic." in task_sync_help
assert "pyro task sync push TASK_ID ./repo" in task_sync_help
task_sync_push_help = _subparser_choice(
_subparser_choice(_subparser_choice(parser, "task"), "sync"), "push"
).format_help() ).format_help()
assert "--dest" in task_sync_push_help assert "--seed-path" in workspace_create_help
assert "Import host content into `/workspace`" in task_sync_push_help assert "seed into `/workspace`" in workspace_create_help
workspace_exec_help = _subparser_choice(
_subparser_choice(parser, "workspace"),
"exec",
).format_help()
assert "persistent `/workspace`" in workspace_exec_help
assert "pyro workspace exec WORKSPACE_ID -- cat note.txt" in workspace_exec_help
workspace_sync_help = _subparser_choice(
_subparser_choice(parser, "workspace"),
"sync",
).format_help()
assert "Sync is non-atomic." in workspace_sync_help
assert "pyro workspace sync push WORKSPACE_ID ./repo" in workspace_sync_help
workspace_sync_push_help = _subparser_choice(
_subparser_choice(_subparser_choice(parser, "workspace"), "sync"), "push"
).format_help()
assert "--dest" in workspace_sync_push_help
assert "Import host content into `/workspace`" in workspace_sync_push_help
def test_cli_run_prints_json( def test_cli_run_prints_json(
@ -344,32 +353,32 @@ def test_cli_requires_run_command() -> None:
def test_cli_requires_command_preserves_shell_argument_boundaries() -> None: def test_cli_requires_command_preserves_shell_argument_boundaries() -> None:
command = cli._require_command( command = cli._require_command(
["--", "sh", "-lc", 'printf "hello from task\\n" > note.txt'] ["--", "sh", "-lc", 'printf "hello from workspace\\n" > note.txt']
) )
assert command == 'sh -lc \'printf "hello from task\\n" > note.txt\'' assert command == 'sh -lc \'printf "hello from workspace\\n" > note.txt\''
def test_cli_task_create_prints_json( def test_cli_workspace_create_prints_json(
monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str] monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str]
) -> None: ) -> None:
class StubPyro: class StubPyro:
def create_task(self, **kwargs: Any) -> dict[str, Any]: def create_workspace(self, **kwargs: Any) -> dict[str, Any]:
assert kwargs["environment"] == "debian:12" assert kwargs["environment"] == "debian:12"
assert kwargs["source_path"] == "./repo" assert kwargs["seed_path"] == "./repo"
return {"task_id": "task-123", "state": "started"} return {"workspace_id": "workspace-123", "state": "started"}
class StubParser: class StubParser:
def parse_args(self) -> argparse.Namespace: def parse_args(self) -> argparse.Namespace:
return argparse.Namespace( return argparse.Namespace(
command="task", command="workspace",
task_command="create", workspace_command="create",
environment="debian:12", environment="debian:12",
vcpu_count=1, vcpu_count=1,
mem_mib=1024, mem_mib=1024,
ttl_seconds=600, ttl_seconds=600,
network=False, network=False,
allow_host_compat=False, allow_host_compat=False,
source_path="./repo", seed_path="./repo",
json=True, json=True,
) )
@ -377,23 +386,23 @@ def test_cli_task_create_prints_json(
monkeypatch.setattr(cli, "Pyro", StubPyro) monkeypatch.setattr(cli, "Pyro", StubPyro)
cli.main() cli.main()
output = json.loads(capsys.readouterr().out) output = json.loads(capsys.readouterr().out)
assert output["task_id"] == "task-123" assert output["workspace_id"] == "workspace-123"
def test_cli_task_create_prints_human( def test_cli_workspace_create_prints_human(
monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str] monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str]
) -> None: ) -> None:
class StubPyro: class StubPyro:
def create_task(self, **kwargs: Any) -> dict[str, Any]: def create_workspace(self, **kwargs: Any) -> dict[str, Any]:
del kwargs del kwargs
return { return {
"task_id": "task-123", "workspace_id": "workspace-123",
"environment": "debian:12", "environment": "debian:12",
"state": "started", "state": "started",
"workspace_path": "/workspace", "workspace_path": "/workspace",
"workspace_seed": { "workspace_seed": {
"mode": "directory", "mode": "directory",
"source_path": "/tmp/repo", "seed_path": "/tmp/repo",
"destination": "/workspace", "destination": "/workspace",
"entry_count": 1, "entry_count": 1,
"bytes_written": 6, "bytes_written": 6,
@ -408,15 +417,15 @@ def test_cli_task_create_prints_human(
class StubParser: class StubParser:
def parse_args(self) -> argparse.Namespace: def parse_args(self) -> argparse.Namespace:
return argparse.Namespace( return argparse.Namespace(
command="task", command="workspace",
task_command="create", workspace_command="create",
environment="debian:12", environment="debian:12",
vcpu_count=1, vcpu_count=1,
mem_mib=1024, mem_mib=1024,
ttl_seconds=600, ttl_seconds=600,
network=False, network=False,
allow_host_compat=False, allow_host_compat=False,
source_path="/tmp/repo", seed_path="/tmp/repo",
json=False, json=False,
) )
@ -424,22 +433,28 @@ def test_cli_task_create_prints_human(
monkeypatch.setattr(cli, "Pyro", StubPyro) monkeypatch.setattr(cli, "Pyro", StubPyro)
cli.main() cli.main()
output = capsys.readouterr().out output = capsys.readouterr().out
assert "Task: task-123" in output assert "Workspace ID: workspace-123" in output
assert "Workspace: /workspace" in output assert "Workspace: /workspace" in output
assert "Workspace seed: directory from /tmp/repo" in output assert "Workspace seed: directory from /tmp/repo" in output
def test_cli_task_exec_prints_human_output( def test_cli_workspace_exec_prints_human_output(
monkeypatch: pytest.MonkeyPatch, monkeypatch: pytest.MonkeyPatch,
capsys: pytest.CaptureFixture[str], capsys: pytest.CaptureFixture[str],
) -> None: ) -> None:
class StubPyro: class StubPyro:
def exec_task(self, task_id: str, *, command: str, timeout_seconds: int) -> dict[str, Any]: def exec_workspace(
assert task_id == "task-123" self,
workspace_id: str,
*,
command: str,
timeout_seconds: int,
) -> dict[str, Any]:
assert workspace_id == "workspace-123"
assert command == "cat note.txt" assert command == "cat note.txt"
assert timeout_seconds == 30 assert timeout_seconds == 30
return { return {
"task_id": task_id, "workspace_id": workspace_id,
"sequence": 2, "sequence": 2,
"cwd": "/workspace", "cwd": "/workspace",
"execution_mode": "guest_vsock", "execution_mode": "guest_vsock",
@ -452,9 +467,9 @@ def test_cli_task_exec_prints_human_output(
class StubParser: class StubParser:
def parse_args(self) -> argparse.Namespace: def parse_args(self) -> argparse.Namespace:
return argparse.Namespace( return argparse.Namespace(
command="task", command="workspace",
task_command="exec", workspace_command="exec",
task_id="task-123", workspace_id="workspace-123",
timeout_seconds=30, timeout_seconds=30,
json=False, json=False,
command_args=["--", "cat", "note.txt"], command_args=["--", "cat", "note.txt"],
@ -465,19 +480,28 @@ def test_cli_task_exec_prints_human_output(
cli.main() cli.main()
captured = capsys.readouterr() captured = capsys.readouterr()
assert captured.out == "hello\n" assert captured.out == "hello\n"
assert "[task-exec] task_id=task-123 sequence=2 cwd=/workspace" in captured.err assert (
"[workspace-exec] workspace_id=workspace-123 sequence=2 cwd=/workspace"
in captured.err
)
def test_cli_task_sync_push_prints_json( def test_cli_workspace_sync_push_prints_json(
monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str] monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str]
) -> None: ) -> None:
class StubPyro: class StubPyro:
def push_task_sync(self, task_id: str, source_path: str, *, dest: str) -> dict[str, Any]: def push_workspace_sync(
assert task_id == "task-123" self,
workspace_id: str,
source_path: str,
*,
dest: str,
) -> dict[str, Any]:
assert workspace_id == "workspace-123"
assert source_path == "./repo" assert source_path == "./repo"
assert dest == "src" assert dest == "src"
return { return {
"task_id": task_id, "workspace_id": workspace_id,
"execution_mode": "guest_vsock", "execution_mode": "guest_vsock",
"workspace_sync": { "workspace_sync": {
"mode": "directory", "mode": "directory",
@ -491,10 +515,10 @@ def test_cli_task_sync_push_prints_json(
class StubParser: class StubParser:
def parse_args(self) -> argparse.Namespace: def parse_args(self) -> argparse.Namespace:
return argparse.Namespace( return argparse.Namespace(
command="task", command="workspace",
task_command="sync", workspace_command="sync",
task_sync_command="push", workspace_sync_command="push",
task_id="task-123", workspace_id="workspace-123",
source_path="./repo", source_path="./repo",
dest="src", dest="src",
json=True, json=True,
@ -507,17 +531,23 @@ def test_cli_task_sync_push_prints_json(
assert output["workspace_sync"]["destination"] == "/workspace/src" assert output["workspace_sync"]["destination"] == "/workspace/src"
def test_cli_task_sync_push_prints_human( def test_cli_workspace_sync_push_prints_human(
monkeypatch: pytest.MonkeyPatch, monkeypatch: pytest.MonkeyPatch,
capsys: pytest.CaptureFixture[str], capsys: pytest.CaptureFixture[str],
) -> None: ) -> None:
class StubPyro: class StubPyro:
def push_task_sync(self, task_id: str, source_path: str, *, dest: str) -> dict[str, Any]: def push_workspace_sync(
assert task_id == "task-123" self,
workspace_id: str,
source_path: str,
*,
dest: str,
) -> dict[str, Any]:
assert workspace_id == "workspace-123"
assert source_path == "./repo" assert source_path == "./repo"
assert dest == "/workspace" assert dest == "/workspace"
return { return {
"task_id": task_id, "workspace_id": workspace_id,
"execution_mode": "guest_vsock", "execution_mode": "guest_vsock",
"workspace_sync": { "workspace_sync": {
"mode": "directory", "mode": "directory",
@ -531,10 +561,10 @@ def test_cli_task_sync_push_prints_human(
class StubParser: class StubParser:
def parse_args(self) -> argparse.Namespace: def parse_args(self) -> argparse.Namespace:
return argparse.Namespace( return argparse.Namespace(
command="task", command="workspace",
task_command="sync", workspace_command="sync",
task_sync_command="push", workspace_sync_command="push",
task_id="task-123", workspace_id="workspace-123",
source_path="./repo", source_path="./repo",
dest="/workspace", dest="/workspace",
json=False, json=False,
@ -544,22 +574,22 @@ def test_cli_task_sync_push_prints_human(
monkeypatch.setattr(cli, "Pyro", StubPyro) monkeypatch.setattr(cli, "Pyro", StubPyro)
cli.main() cli.main()
output = capsys.readouterr().out output = capsys.readouterr().out
assert "[task-sync] task_id=task-123 mode=directory source=/tmp/repo" in output assert "[workspace-sync] workspace_id=workspace-123 mode=directory source=/tmp/repo" in output
assert ( assert (
"destination=/workspace entry_count=2 bytes_written=12 " "destination=/workspace entry_count=2 bytes_written=12 "
"execution_mode=guest_vsock" "execution_mode=guest_vsock"
) in output ) in output
def test_cli_task_logs_and_delete_print_human( def test_cli_workspace_logs_and_delete_print_human(
monkeypatch: pytest.MonkeyPatch, monkeypatch: pytest.MonkeyPatch,
capsys: pytest.CaptureFixture[str], capsys: pytest.CaptureFixture[str],
) -> None: ) -> None:
class StubPyro: class StubPyro:
def logs_task(self, task_id: str) -> dict[str, Any]: def logs_workspace(self, workspace_id: str) -> dict[str, Any]:
assert task_id == "task-123" assert workspace_id == "workspace-123"
return { return {
"task_id": task_id, "workspace_id": workspace_id,
"count": 1, "count": 1,
"entries": [ "entries": [
{ {
@ -574,16 +604,16 @@ def test_cli_task_logs_and_delete_print_human(
], ],
} }
def delete_task(self, task_id: str) -> dict[str, Any]: def delete_workspace(self, workspace_id: str) -> dict[str, Any]:
assert task_id == "task-123" assert workspace_id == "workspace-123"
return {"task_id": task_id, "deleted": True} return {"workspace_id": workspace_id, "deleted": True}
class LogsParser: class LogsParser:
def parse_args(self) -> argparse.Namespace: def parse_args(self) -> argparse.Namespace:
return argparse.Namespace( return argparse.Namespace(
command="task", command="workspace",
task_command="logs", workspace_command="logs",
task_id="task-123", workspace_id="workspace-123",
json=False, json=False,
) )
@ -594,9 +624,9 @@ def test_cli_task_logs_and_delete_print_human(
class DeleteParser: class DeleteParser:
def parse_args(self) -> argparse.Namespace: def parse_args(self) -> argparse.Namespace:
return argparse.Namespace( return argparse.Namespace(
command="task", command="workspace",
task_command="delete", workspace_command="delete",
task_id="task-123", workspace_id="workspace-123",
json=False, json=False,
) )
@ -605,28 +635,28 @@ def test_cli_task_logs_and_delete_print_human(
output = capsys.readouterr().out output = capsys.readouterr().out
assert "#1 exit_code=0 duration_ms=2 cwd=/workspace" in output assert "#1 exit_code=0 duration_ms=2 cwd=/workspace" in output
assert "Deleted task: task-123" in output assert "Deleted workspace: workspace-123" in output
def test_cli_task_status_and_delete_print_json( def test_cli_workspace_status_and_delete_print_json(
monkeypatch: pytest.MonkeyPatch, monkeypatch: pytest.MonkeyPatch,
capsys: pytest.CaptureFixture[str], capsys: pytest.CaptureFixture[str],
) -> None: ) -> None:
class StubPyro: class StubPyro:
def status_task(self, task_id: str) -> dict[str, Any]: def status_workspace(self, workspace_id: str) -> dict[str, Any]:
assert task_id == "task-123" assert workspace_id == "workspace-123"
return {"task_id": task_id, "state": "started"} return {"workspace_id": workspace_id, "state": "started"}
def delete_task(self, task_id: str) -> dict[str, Any]: def delete_workspace(self, workspace_id: str) -> dict[str, Any]:
assert task_id == "task-123" assert workspace_id == "workspace-123"
return {"task_id": task_id, "deleted": True} return {"workspace_id": workspace_id, "deleted": True}
class StatusParser: class StatusParser:
def parse_args(self) -> argparse.Namespace: def parse_args(self) -> argparse.Namespace:
return argparse.Namespace( return argparse.Namespace(
command="task", command="workspace",
task_command="status", workspace_command="status",
task_id="task-123", workspace_id="workspace-123",
json=True, json=True,
) )
@ -639,9 +669,9 @@ def test_cli_task_status_and_delete_print_json(
class DeleteParser: class DeleteParser:
def parse_args(self) -> argparse.Namespace: def parse_args(self) -> argparse.Namespace:
return argparse.Namespace( return argparse.Namespace(
command="task", command="workspace",
task_command="delete", workspace_command="delete",
task_id="task-123", workspace_id="workspace-123",
json=True, json=True,
) )
@ -651,20 +681,26 @@ def test_cli_task_status_and_delete_print_json(
assert deleted["deleted"] is True assert deleted["deleted"] is True
def test_cli_task_exec_json_error_exits_nonzero( def test_cli_workspace_exec_json_error_exits_nonzero(
monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str] monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str]
) -> None: ) -> None:
class StubPyro: class StubPyro:
def exec_task(self, task_id: str, *, command: str, timeout_seconds: int) -> dict[str, Any]: def exec_workspace(
del task_id, command, timeout_seconds self,
raise RuntimeError("task is unavailable") workspace_id: str,
*,
command: str,
timeout_seconds: int,
) -> dict[str, Any]:
del workspace_id, command, timeout_seconds
raise RuntimeError("workspace is unavailable")
class StubParser: class StubParser:
def parse_args(self) -> argparse.Namespace: def parse_args(self) -> argparse.Namespace:
return argparse.Namespace( return argparse.Namespace(
command="task", command="workspace",
task_command="exec", workspace_command="exec",
task_id="task-123", workspace_id="workspace-123",
timeout_seconds=30, timeout_seconds=30,
json=True, json=True,
command_args=["--", "true"], command_args=["--", "true"],

View file

@ -17,10 +17,10 @@ from pyro_mcp.contract import (
PUBLIC_CLI_DEMO_SUBCOMMANDS, PUBLIC_CLI_DEMO_SUBCOMMANDS,
PUBLIC_CLI_ENV_SUBCOMMANDS, PUBLIC_CLI_ENV_SUBCOMMANDS,
PUBLIC_CLI_RUN_FLAGS, PUBLIC_CLI_RUN_FLAGS,
PUBLIC_CLI_TASK_CREATE_FLAGS, PUBLIC_CLI_WORKSPACE_CREATE_FLAGS,
PUBLIC_CLI_TASK_SUBCOMMANDS, PUBLIC_CLI_WORKSPACE_SUBCOMMANDS,
PUBLIC_CLI_TASK_SYNC_PUSH_FLAGS, PUBLIC_CLI_WORKSPACE_SYNC_PUSH_FLAGS,
PUBLIC_CLI_TASK_SYNC_SUBCOMMANDS, PUBLIC_CLI_WORKSPACE_SYNC_SUBCOMMANDS,
PUBLIC_MCP_TOOLS, PUBLIC_MCP_TOOLS,
PUBLIC_SDK_METHODS, PUBLIC_SDK_METHODS,
) )
@ -67,22 +67,25 @@ def test_public_cli_help_lists_commands_and_run_flags() -> None:
for subcommand_name in PUBLIC_CLI_ENV_SUBCOMMANDS: for subcommand_name in PUBLIC_CLI_ENV_SUBCOMMANDS:
assert subcommand_name in env_help_text assert subcommand_name in env_help_text
task_help_text = _subparser_choice(parser, "task").format_help() workspace_help_text = _subparser_choice(parser, "workspace").format_help()
for subcommand_name in PUBLIC_CLI_TASK_SUBCOMMANDS: for subcommand_name in PUBLIC_CLI_WORKSPACE_SUBCOMMANDS:
assert subcommand_name in task_help_text assert subcommand_name in workspace_help_text
task_create_help_text = _subparser_choice( workspace_create_help_text = _subparser_choice(
_subparser_choice(parser, "task"), "create" _subparser_choice(parser, "workspace"), "create"
).format_help() ).format_help()
for flag in PUBLIC_CLI_TASK_CREATE_FLAGS: for flag in PUBLIC_CLI_WORKSPACE_CREATE_FLAGS:
assert flag in task_create_help_text assert flag in workspace_create_help_text
task_sync_help_text = _subparser_choice(_subparser_choice(parser, "task"), "sync").format_help() workspace_sync_help_text = _subparser_choice(
for subcommand_name in PUBLIC_CLI_TASK_SYNC_SUBCOMMANDS: _subparser_choice(parser, "workspace"),
assert subcommand_name in task_sync_help_text "sync",
task_sync_push_help_text = _subparser_choice(
_subparser_choice(_subparser_choice(parser, "task"), "sync"), "push"
).format_help() ).format_help()
for flag in PUBLIC_CLI_TASK_SYNC_PUSH_FLAGS: for subcommand_name in PUBLIC_CLI_WORKSPACE_SYNC_SUBCOMMANDS:
assert flag in task_sync_push_help_text assert subcommand_name in workspace_sync_help_text
workspace_sync_push_help_text = _subparser_choice(
_subparser_choice(_subparser_choice(parser, "workspace"), "sync"), "push"
).format_help()
for flag in PUBLIC_CLI_WORKSPACE_SYNC_PUSH_FLAGS:
assert flag in workspace_sync_push_help_text
demo_help_text = _subparser_choice(parser, "demo").format_help() demo_help_text = _subparser_choice(parser, "demo").format_help()
for subcommand_name in PUBLIC_CLI_DEMO_SUBCOMMANDS: for subcommand_name in PUBLIC_CLI_DEMO_SUBCOMMANDS:

View file

@ -31,9 +31,9 @@ def test_create_server_registers_vm_tools(tmp_path: Path) -> None:
assert "vm_network_info" in tool_names assert "vm_network_info" in tool_names
assert "vm_run" in tool_names assert "vm_run" in tool_names
assert "vm_status" in tool_names assert "vm_status" in tool_names
assert "task_create" in tool_names assert "workspace_create" in tool_names
assert "task_logs" in tool_names assert "workspace_logs" in tool_names
assert "task_sync_push" in tool_names assert "workspace_sync_push" in tool_names
def test_vm_run_round_trip(tmp_path: Path) -> None: def test_vm_run_round_trip(tmp_path: Path) -> None:
@ -166,7 +166,7 @@ def test_server_main_runs_stdio_transport(monkeypatch: pytest.MonkeyPatch) -> No
assert called == {"transport": "stdio"} assert called == {"transport": "stdio"}
def test_task_tools_round_trip(tmp_path: Path) -> None: def test_workspace_tools_round_trip(tmp_path: Path) -> None:
manager = VmManager( manager = VmManager(
backend_name="mock", backend_name="mock",
base_dir=tmp_path / "vms", base_dir=tmp_path / "vms",
@ -194,23 +194,23 @@ def test_task_tools_round_trip(tmp_path: Path) -> None:
server = create_server(manager=manager) server = create_server(manager=manager)
created = _extract_structured( created = _extract_structured(
await server.call_tool( await server.call_tool(
"task_create", "workspace_create",
{ {
"environment": "debian:12-base", "environment": "debian:12-base",
"allow_host_compat": True, "allow_host_compat": True,
"source_path": str(source_dir), "seed_path": str(source_dir),
}, },
) )
) )
task_id = str(created["task_id"]) workspace_id = str(created["workspace_id"])
update_dir = tmp_path / "update" update_dir = tmp_path / "update"
update_dir.mkdir() update_dir.mkdir()
(update_dir / "more.txt").write_text("more\n", encoding="utf-8") (update_dir / "more.txt").write_text("more\n", encoding="utf-8")
synced = _extract_structured( synced = _extract_structured(
await server.call_tool( await server.call_tool(
"task_sync_push", "workspace_sync_push",
{ {
"task_id": task_id, "workspace_id": workspace_id,
"source_path": str(update_dir), "source_path": str(update_dir),
"dest": "subdir", "dest": "subdir",
}, },
@ -218,15 +218,19 @@ def test_task_tools_round_trip(tmp_path: Path) -> None:
) )
executed = _extract_structured( executed = _extract_structured(
await server.call_tool( await server.call_tool(
"task_exec", "workspace_exec",
{ {
"task_id": task_id, "workspace_id": workspace_id,
"command": "cat subdir/more.txt", "command": "cat subdir/more.txt",
}, },
) )
) )
logs = _extract_structured(await server.call_tool("task_logs", {"task_id": task_id})) logs = _extract_structured(
deleted = _extract_structured(await server.call_tool("task_delete", {"task_id": task_id})) await server.call_tool("workspace_logs", {"workspace_id": workspace_id})
)
deleted = _extract_structured(
await server.call_tool("workspace_delete", {"workspace_id": workspace_id})
)
return created, synced, executed, logs, deleted return created, synced, executed, logs, deleted
created, synced, executed, logs, deleted = asyncio.run(_run()) created, synced, executed, logs, deleted = asyncio.run(_run())

View file

@ -267,48 +267,48 @@ def test_vm_manager_run_vm(tmp_path: Path) -> None:
assert str(result["stdout"]) == "ok\n" assert str(result["stdout"]) == "ok\n"
def test_task_lifecycle_and_logs(tmp_path: Path) -> None: def test_workspace_lifecycle_and_logs(tmp_path: Path) -> None:
manager = VmManager( manager = VmManager(
backend_name="mock", backend_name="mock",
base_dir=tmp_path / "vms", base_dir=tmp_path / "vms",
network_manager=TapNetworkManager(enabled=False), network_manager=TapNetworkManager(enabled=False),
) )
created = manager.create_task( created = manager.create_workspace(
environment="debian:12-base", environment="debian:12-base",
allow_host_compat=True, allow_host_compat=True,
) )
task_id = str(created["task_id"]) workspace_id = str(created["workspace_id"])
assert created["state"] == "started" assert created["state"] == "started"
assert created["workspace_path"] == "/workspace" assert created["workspace_path"] == "/workspace"
first = manager.exec_task( first = manager.exec_workspace(
task_id, workspace_id,
command="printf 'hello\\n' > note.txt", command="printf 'hello\\n' > note.txt",
timeout_seconds=30, timeout_seconds=30,
) )
second = manager.exec_task(task_id, command="cat note.txt", timeout_seconds=30) second = manager.exec_workspace(workspace_id, command="cat note.txt", timeout_seconds=30)
assert first["exit_code"] == 0 assert first["exit_code"] == 0
assert second["stdout"] == "hello\n" assert second["stdout"] == "hello\n"
status = manager.status_task(task_id) status = manager.status_workspace(workspace_id)
assert status["command_count"] == 2 assert status["command_count"] == 2
assert status["last_command"] is not None assert status["last_command"] is not None
logs = manager.logs_task(task_id) logs = manager.logs_workspace(workspace_id)
assert logs["count"] == 2 assert logs["count"] == 2
entries = logs["entries"] entries = logs["entries"]
assert isinstance(entries, list) assert isinstance(entries, list)
assert entries[1]["stdout"] == "hello\n" assert entries[1]["stdout"] == "hello\n"
deleted = manager.delete_task(task_id) deleted = manager.delete_workspace(workspace_id)
assert deleted["deleted"] is True assert deleted["deleted"] is True
with pytest.raises(ValueError, match="does not exist"): with pytest.raises(ValueError, match="does not exist"):
manager.status_task(task_id) manager.status_workspace(workspace_id)
def test_task_create_seeds_directory_source_into_workspace(tmp_path: Path) -> None: def test_workspace_create_seeds_directory_source_into_workspace(tmp_path: Path) -> None:
source_dir = tmp_path / "seed" source_dir = tmp_path / "seed"
source_dir.mkdir() source_dir.mkdir()
(source_dir / "note.txt").write_text("hello\n", encoding="utf-8") (source_dir / "note.txt").write_text("hello\n", encoding="utf-8")
@ -319,25 +319,25 @@ def test_task_create_seeds_directory_source_into_workspace(tmp_path: Path) -> No
network_manager=TapNetworkManager(enabled=False), network_manager=TapNetworkManager(enabled=False),
) )
created = manager.create_task( created = manager.create_workspace(
environment="debian:12-base", environment="debian:12-base",
allow_host_compat=True, allow_host_compat=True,
source_path=source_dir, seed_path=source_dir,
) )
task_id = str(created["task_id"]) workspace_id = str(created["workspace_id"])
workspace_seed = created["workspace_seed"] workspace_seed = created["workspace_seed"]
assert workspace_seed["mode"] == "directory" assert workspace_seed["mode"] == "directory"
assert workspace_seed["source_path"] == str(source_dir.resolve()) assert workspace_seed["seed_path"] == str(source_dir.resolve())
executed = manager.exec_task(task_id, command="cat note.txt", timeout_seconds=30) executed = manager.exec_workspace(workspace_id, command="cat note.txt", timeout_seconds=30)
assert executed["stdout"] == "hello\n" assert executed["stdout"] == "hello\n"
status = manager.status_task(task_id) status = manager.status_workspace(workspace_id)
assert status["workspace_seed"]["mode"] == "directory" assert status["workspace_seed"]["mode"] == "directory"
assert status["workspace_seed"]["source_path"] == str(source_dir.resolve()) assert status["workspace_seed"]["seed_path"] == str(source_dir.resolve())
def test_task_create_seeds_tar_archive_into_workspace(tmp_path: Path) -> None: def test_workspace_create_seeds_tar_archive_into_workspace(tmp_path: Path) -> None:
archive_path = tmp_path / "seed.tgz" archive_path = tmp_path / "seed.tgz"
nested_dir = tmp_path / "src" nested_dir = tmp_path / "src"
nested_dir.mkdir() nested_dir.mkdir()
@ -351,19 +351,19 @@ def test_task_create_seeds_tar_archive_into_workspace(tmp_path: Path) -> None:
network_manager=TapNetworkManager(enabled=False), network_manager=TapNetworkManager(enabled=False),
) )
created = manager.create_task( created = manager.create_workspace(
environment="debian:12-base", environment="debian:12-base",
allow_host_compat=True, allow_host_compat=True,
source_path=archive_path, seed_path=archive_path,
) )
task_id = str(created["task_id"]) workspace_id = str(created["workspace_id"])
assert created["workspace_seed"]["mode"] == "tar_archive" assert created["workspace_seed"]["mode"] == "tar_archive"
executed = manager.exec_task(task_id, command="cat note.txt", timeout_seconds=30) executed = manager.exec_workspace(workspace_id, command="cat note.txt", timeout_seconds=30)
assert executed["stdout"] == "archive\n" assert executed["stdout"] == "archive\n"
def test_task_sync_push_updates_started_workspace(tmp_path: Path) -> None: def test_workspace_sync_push_updates_started_workspace(tmp_path: Path) -> None:
source_dir = tmp_path / "seed" source_dir = tmp_path / "seed"
source_dir.mkdir() source_dir.mkdir()
(source_dir / "note.txt").write_text("hello\n", encoding="utf-8") (source_dir / "note.txt").write_text("hello\n", encoding="utf-8")
@ -377,26 +377,30 @@ def test_task_sync_push_updates_started_workspace(tmp_path: Path) -> None:
network_manager=TapNetworkManager(enabled=False), network_manager=TapNetworkManager(enabled=False),
) )
created = manager.create_task( created = manager.create_workspace(
environment="debian:12-base", environment="debian:12-base",
allow_host_compat=True, allow_host_compat=True,
source_path=source_dir, seed_path=source_dir,
) )
task_id = str(created["task_id"]) workspace_id = str(created["workspace_id"])
synced = manager.push_task_sync(task_id, source_path=update_dir, dest="subdir") synced = manager.push_workspace_sync(workspace_id, source_path=update_dir, dest="subdir")
assert synced["workspace_sync"]["mode"] == "directory" assert synced["workspace_sync"]["mode"] == "directory"
assert synced["workspace_sync"]["destination"] == "/workspace/subdir" assert synced["workspace_sync"]["destination"] == "/workspace/subdir"
executed = manager.exec_task(task_id, command="cat subdir/more.txt", timeout_seconds=30) executed = manager.exec_workspace(
workspace_id,
command="cat subdir/more.txt",
timeout_seconds=30,
)
assert executed["stdout"] == "more\n" assert executed["stdout"] == "more\n"
status = manager.status_task(task_id) status = manager.status_workspace(workspace_id)
assert status["command_count"] == 1 assert status["command_count"] == 1
assert status["workspace_seed"]["mode"] == "directory" assert status["workspace_seed"]["mode"] == "directory"
def test_task_sync_push_requires_started_task(tmp_path: Path) -> None: def test_workspace_sync_push_requires_started_workspace(tmp_path: Path) -> None:
source_dir = tmp_path / "seed" source_dir = tmp_path / "seed"
source_dir.mkdir() source_dir.mkdir()
(source_dir / "note.txt").write_text("hello\n", encoding="utf-8") (source_dir / "note.txt").write_text("hello\n", encoding="utf-8")
@ -410,22 +414,25 @@ def test_task_sync_push_requires_started_task(tmp_path: Path) -> None:
network_manager=TapNetworkManager(enabled=False), network_manager=TapNetworkManager(enabled=False),
) )
created = manager.create_task( created = manager.create_workspace(
environment="debian:12-base", environment="debian:12-base",
allow_host_compat=True, allow_host_compat=True,
source_path=source_dir, seed_path=source_dir,
) )
task_id = str(created["task_id"]) workspace_id = str(created["workspace_id"])
task_path = tmp_path / "vms" / "tasks" / task_id / "task.json" workspace_path = tmp_path / "vms" / "workspaces" / workspace_id / "workspace.json"
payload = json.loads(task_path.read_text(encoding="utf-8")) payload = json.loads(workspace_path.read_text(encoding="utf-8"))
payload["state"] = "stopped" payload["state"] = "stopped"
task_path.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8") workspace_path.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8")
with pytest.raises(RuntimeError, match="must be in 'started' state before task_sync_push"): with pytest.raises(
manager.push_task_sync(task_id, source_path=update_dir) RuntimeError,
match="must be in 'started' state before workspace_sync_push",
):
manager.push_workspace_sync(workspace_id, source_path=update_dir)
def test_task_sync_push_rejects_destination_outside_workspace(tmp_path: Path) -> None: def test_workspace_sync_push_rejects_destination_outside_workspace(tmp_path: Path) -> None:
source_dir = tmp_path / "seed" source_dir = tmp_path / "seed"
source_dir.mkdir() source_dir.mkdir()
(source_dir / "note.txt").write_text("hello\n", encoding="utf-8") (source_dir / "note.txt").write_text("hello\n", encoding="utf-8")
@ -436,18 +443,18 @@ def test_task_sync_push_rejects_destination_outside_workspace(tmp_path: Path) ->
network_manager=TapNetworkManager(enabled=False), network_manager=TapNetworkManager(enabled=False),
) )
task_id = str( workspace_id = str(
manager.create_task( manager.create_workspace(
environment="debian:12-base", environment="debian:12-base",
allow_host_compat=True, allow_host_compat=True,
)["task_id"] )["workspace_id"]
) )
with pytest.raises(ValueError, match="workspace destination must stay inside /workspace"): with pytest.raises(ValueError, match="workspace destination must stay inside /workspace"):
manager.push_task_sync(task_id, source_path=source_dir, dest="../escape") manager.push_workspace_sync(workspace_id, source_path=source_dir, dest="../escape")
def test_task_create_rejects_unsafe_seed_archive(tmp_path: Path) -> None: def test_workspace_create_rejects_unsafe_seed_archive(tmp_path: Path) -> None:
archive_path = tmp_path / "bad.tgz" archive_path = tmp_path / "bad.tgz"
with tarfile.open(archive_path, "w:gz") as archive: with tarfile.open(archive_path, "w:gz") as archive:
payload = b"bad\n" payload = b"bad\n"
@ -462,15 +469,15 @@ def test_task_create_rejects_unsafe_seed_archive(tmp_path: Path) -> None:
) )
with pytest.raises(RuntimeError, match="unsafe archive member path"): with pytest.raises(RuntimeError, match="unsafe archive member path"):
manager.create_task( manager.create_workspace(
environment="debian:12-base", environment="debian:12-base",
allow_host_compat=True, allow_host_compat=True,
source_path=archive_path, seed_path=archive_path,
) )
assert list((tmp_path / "vms" / "tasks").iterdir()) == [] assert list((tmp_path / "vms" / "workspaces").iterdir()) == []
def test_task_create_rejects_archive_that_writes_through_symlink(tmp_path: Path) -> None: def test_workspace_create_rejects_archive_that_writes_through_symlink(tmp_path: Path) -> None:
archive_path = tmp_path / "bad-symlink.tgz" archive_path = tmp_path / "bad-symlink.tgz"
with tarfile.open(archive_path, "w:gz") as archive: with tarfile.open(archive_path, "w:gz") as archive:
symlink_info = tarfile.TarInfo(name="linked") symlink_info = tarfile.TarInfo(name="linked")
@ -490,14 +497,14 @@ def test_task_create_rejects_archive_that_writes_through_symlink(tmp_path: Path)
) )
with pytest.raises(RuntimeError, match="traverse through a symlinked path"): with pytest.raises(RuntimeError, match="traverse through a symlinked path"):
manager.create_task( manager.create_workspace(
environment="debian:12-base", environment="debian:12-base",
allow_host_compat=True, allow_host_compat=True,
source_path=archive_path, seed_path=archive_path,
) )
def test_task_create_cleans_up_on_seed_failure( def test_workspace_create_cleans_up_on_seed_failure(
tmp_path: Path, monkeypatch: pytest.MonkeyPatch tmp_path: Path, monkeypatch: pytest.MonkeyPatch
) -> None: ) -> None:
source_dir = tmp_path / "seed" source_dir = tmp_path / "seed"
@ -517,27 +524,27 @@ def test_task_create_cleans_up_on_seed_failure(
monkeypatch.setattr(manager._backend, "import_archive", _boom) # noqa: SLF001 monkeypatch.setattr(manager._backend, "import_archive", _boom) # noqa: SLF001
with pytest.raises(RuntimeError, match="seed import failed"): with pytest.raises(RuntimeError, match="seed import failed"):
manager.create_task( manager.create_workspace(
environment="debian:12-base", environment="debian:12-base",
allow_host_compat=True, allow_host_compat=True,
source_path=source_dir, seed_path=source_dir,
) )
assert list((tmp_path / "vms" / "tasks").iterdir()) == [] assert list((tmp_path / "vms" / "workspaces").iterdir()) == []
def test_task_rehydrates_across_manager_processes(tmp_path: Path) -> None: def test_workspace_rehydrates_across_manager_processes(tmp_path: Path) -> None:
base_dir = tmp_path / "vms" base_dir = tmp_path / "vms"
manager = VmManager( manager = VmManager(
backend_name="mock", backend_name="mock",
base_dir=base_dir, base_dir=base_dir,
network_manager=TapNetworkManager(enabled=False), network_manager=TapNetworkManager(enabled=False),
) )
task_id = str( workspace_id = str(
manager.create_task( manager.create_workspace(
environment="debian:12-base", environment="debian:12-base",
allow_host_compat=True, allow_host_compat=True,
)["task_id"] )["workspace_id"]
) )
other = VmManager( other = VmManager(
@ -545,33 +552,33 @@ def test_task_rehydrates_across_manager_processes(tmp_path: Path) -> None:
base_dir=base_dir, base_dir=base_dir,
network_manager=TapNetworkManager(enabled=False), network_manager=TapNetworkManager(enabled=False),
) )
executed = other.exec_task(task_id, command="printf 'ok\\n'", timeout_seconds=30) executed = other.exec_workspace(workspace_id, command="printf 'ok\\n'", timeout_seconds=30)
assert executed["exit_code"] == 0 assert executed["exit_code"] == 0
assert executed["stdout"] == "ok\n" assert executed["stdout"] == "ok\n"
logs = other.logs_task(task_id) logs = other.logs_workspace(workspace_id)
assert logs["count"] == 1 assert logs["count"] == 1
def test_task_requires_started_state(tmp_path: Path) -> None: def test_workspace_requires_started_state(tmp_path: Path) -> None:
manager = VmManager( manager = VmManager(
backend_name="mock", backend_name="mock",
base_dir=tmp_path / "vms", base_dir=tmp_path / "vms",
network_manager=TapNetworkManager(enabled=False), network_manager=TapNetworkManager(enabled=False),
) )
task_id = str( workspace_id = str(
manager.create_task( manager.create_workspace(
environment="debian:12-base", environment="debian:12-base",
allow_host_compat=True, allow_host_compat=True,
)["task_id"] )["workspace_id"]
) )
task_dir = tmp_path / "vms" / "tasks" / task_id / "task.json" workspace_path = tmp_path / "vms" / "workspaces" / workspace_id / "workspace.json"
payload = json.loads(task_dir.read_text(encoding="utf-8")) payload = json.loads(workspace_path.read_text(encoding="utf-8"))
payload["state"] = "stopped" payload["state"] = "stopped"
task_dir.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8") workspace_path.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8")
with pytest.raises(RuntimeError, match="must be in 'started' state"): with pytest.raises(RuntimeError, match="must be in 'started' state"):
manager.exec_task(task_id, command="true", timeout_seconds=30) manager.exec_workspace(workspace_id, command="true", timeout_seconds=30)
def test_vm_manager_firecracker_backend_path( def test_vm_manager_firecracker_backend_path(
@ -708,7 +715,7 @@ def test_copy_rootfs_falls_back_to_copy2(
assert dest.read_text(encoding="utf-8") == "payload" assert dest.read_text(encoding="utf-8") == "payload"
def test_task_create_cleans_up_on_start_failure( def test_workspace_create_cleans_up_on_start_failure(
tmp_path: Path, monkeypatch: pytest.MonkeyPatch tmp_path: Path, monkeypatch: pytest.MonkeyPatch
) -> None: ) -> None:
manager = VmManager( manager = VmManager(
@ -724,9 +731,9 @@ def test_task_create_cleans_up_on_start_failure(
monkeypatch.setattr(manager._backend, "start", _boom) # noqa: SLF001 monkeypatch.setattr(manager._backend, "start", _boom) # noqa: SLF001
with pytest.raises(RuntimeError, match="boom"): with pytest.raises(RuntimeError, match="boom"):
manager.create_task(environment="debian:12-base", allow_host_compat=True) manager.create_workspace(environment="debian:12-base", allow_host_compat=True)
assert list((tmp_path / "vms" / "tasks").iterdir()) == [] assert list((tmp_path / "vms" / "workspaces").iterdir()) == []
def test_exec_instance_wraps_guest_workspace_command(tmp_path: Path) -> None: def test_exec_instance_wraps_guest_workspace_command(tmp_path: Path) -> None:
@ -786,53 +793,53 @@ def test_exec_instance_wraps_guest_workspace_command(tmp_path: Path) -> None:
assert captured["workdir"] is None assert captured["workdir"] is None
def test_status_task_marks_dead_backing_process_stopped(tmp_path: Path) -> None: def test_status_workspace_marks_dead_backing_process_stopped(tmp_path: Path) -> None:
manager = VmManager( manager = VmManager(
backend_name="mock", backend_name="mock",
base_dir=tmp_path / "vms", base_dir=tmp_path / "vms",
network_manager=TapNetworkManager(enabled=False), network_manager=TapNetworkManager(enabled=False),
) )
task_id = str( workspace_id = str(
manager.create_task( manager.create_workspace(
environment="debian:12-base", environment="debian:12-base",
allow_host_compat=True, allow_host_compat=True,
)["task_id"] )["workspace_id"]
) )
task_path = tmp_path / "vms" / "tasks" / task_id / "task.json" workspace_path = tmp_path / "vms" / "workspaces" / workspace_id / "workspace.json"
payload = json.loads(task_path.read_text(encoding="utf-8")) payload = json.loads(workspace_path.read_text(encoding="utf-8"))
payload["metadata"]["execution_mode"] = "guest_vsock" payload["metadata"]["execution_mode"] = "guest_vsock"
payload["firecracker_pid"] = 999999 payload["firecracker_pid"] = 999999
task_path.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8") workspace_path.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8")
status = manager.status_task(task_id) status = manager.status_workspace(workspace_id)
assert status["state"] == "stopped" assert status["state"] == "stopped"
updated_payload = json.loads(task_path.read_text(encoding="utf-8")) updated_payload = json.loads(workspace_path.read_text(encoding="utf-8"))
assert "backing guest process" in str(updated_payload.get("last_error", "")) assert "backing guest process" in str(updated_payload.get("last_error", ""))
def test_reap_expired_tasks_removes_invalid_and_expired_records(tmp_path: Path) -> None: def test_reap_expired_workspaces_removes_invalid_and_expired_records(tmp_path: Path) -> None:
manager = VmManager( manager = VmManager(
backend_name="mock", backend_name="mock",
base_dir=tmp_path / "vms", base_dir=tmp_path / "vms",
network_manager=TapNetworkManager(enabled=False), network_manager=TapNetworkManager(enabled=False),
) )
invalid_dir = tmp_path / "vms" / "tasks" / "invalid" invalid_dir = tmp_path / "vms" / "workspaces" / "invalid"
invalid_dir.mkdir(parents=True) invalid_dir.mkdir(parents=True)
(invalid_dir / "task.json").write_text("[]", encoding="utf-8") (invalid_dir / "workspace.json").write_text("[]", encoding="utf-8")
task_id = str( workspace_id = str(
manager.create_task( manager.create_workspace(
environment="debian:12-base", environment="debian:12-base",
allow_host_compat=True, allow_host_compat=True,
)["task_id"] )["workspace_id"]
) )
task_path = tmp_path / "vms" / "tasks" / task_id / "task.json" workspace_path = tmp_path / "vms" / "workspaces" / workspace_id / "workspace.json"
payload = json.loads(task_path.read_text(encoding="utf-8")) payload = json.loads(workspace_path.read_text(encoding="utf-8"))
payload["expires_at"] = 0.0 payload["expires_at"] = 0.0
task_path.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8") workspace_path.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8")
with manager._lock: # noqa: SLF001 with manager._lock: # noqa: SLF001
manager._reap_expired_tasks_locked(time.time()) # noqa: SLF001 manager._reap_expired_workspaces_locked(time.time()) # noqa: SLF001
assert not invalid_dir.exists() assert not invalid_dir.exists()
assert not (tmp_path / "vms" / "tasks" / task_id).exists() assert not (tmp_path / "vms" / "workspaces" / workspace_id).exists()

2
uv.lock generated
View file

@ -706,7 +706,7 @@ crypto = [
[[package]] [[package]]
name = "pyro-mcp" name = "pyro-mcp"
version = "2.3.0" version = "2.4.0"
source = { editable = "." } source = { editable = "." }
dependencies = [ dependencies = [
{ name = "mcp" }, { name = "mcp" },