Pivot persistent APIs to workspaces
Replace the public persistent-sandbox contract with workspace-first naming across CLI, SDK, MCP, payloads, and on-disk state. Rename the task surface to workspace equivalents, switch create-time seeding to `seed_path`, and store records under `workspaces/<workspace_id>/workspace.json` without carrying legacy task aliases or migrating old local task state. Keep `pyro run` and `vm_*` unchanged. Validation covered `uv lock`, focused public-contract/API/CLI/manager tests, `UV_CACHE_DIR=.uv-cache make check`, and `UV_CACHE_DIR=.uv-cache make dist-check`.
This commit is contained in:
parent
f57454bcb4
commit
48b82d8386
13 changed files with 743 additions and 618 deletions
|
|
@ -2,6 +2,15 @@
|
|||
|
||||
All notable user-visible changes to `pyro-mcp` are documented here.
|
||||
|
||||
## 2.4.0
|
||||
|
||||
- Replaced the public persistent-workspace surface from `task_*` to `workspace_*` across the CLI,
|
||||
Python SDK, and MCP server in one clean cut with no compatibility aliases.
|
||||
- Renamed create-time seeding from `source_path` to `seed_path` for workspace creation while keeping
|
||||
later `workspace sync push` imports on `source_path`.
|
||||
- Switched persisted local records from `tasks/*/task.json` to `workspaces/*/workspace.json` and
|
||||
updated the main docs/examples to the workspace-first language.
|
||||
|
||||
## 2.3.0
|
||||
|
||||
- Added `task sync push` across the CLI, Python SDK, and MCP server so started task workspaces can
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
[project]
|
||||
name = "pyro-mcp"
|
||||
version = "2.3.0"
|
||||
description = "Curated Linux environments for ephemeral Firecracker-backed VM execution."
|
||||
version = "2.4.0"
|
||||
description = "Ephemeral Firecracker sandboxes with curated environments, persistent workspaces, and MCP tools."
|
||||
readme = "README.md"
|
||||
license = { file = "LICENSE" }
|
||||
authors = [
|
||||
|
|
|
|||
|
|
@ -77,7 +77,7 @@ class Pyro:
|
|||
def exec_vm(self, vm_id: str, *, command: str, timeout_seconds: int = 30) -> dict[str, Any]:
|
||||
return self._manager.exec_vm(vm_id, command=command, timeout_seconds=timeout_seconds)
|
||||
|
||||
def create_task(
|
||||
def create_workspace(
|
||||
self,
|
||||
*,
|
||||
environment: str,
|
||||
|
|
@ -86,44 +86,52 @@ class Pyro:
|
|||
ttl_seconds: int = DEFAULT_TTL_SECONDS,
|
||||
network: bool = False,
|
||||
allow_host_compat: bool = DEFAULT_ALLOW_HOST_COMPAT,
|
||||
source_path: str | Path | None = None,
|
||||
seed_path: str | Path | None = None,
|
||||
) -> dict[str, Any]:
|
||||
return self._manager.create_task(
|
||||
return self._manager.create_workspace(
|
||||
environment=environment,
|
||||
vcpu_count=vcpu_count,
|
||||
mem_mib=mem_mib,
|
||||
ttl_seconds=ttl_seconds,
|
||||
network=network,
|
||||
allow_host_compat=allow_host_compat,
|
||||
source_path=source_path,
|
||||
seed_path=seed_path,
|
||||
)
|
||||
|
||||
def exec_task(
|
||||
def exec_workspace(
|
||||
self,
|
||||
task_id: str,
|
||||
workspace_id: str,
|
||||
*,
|
||||
command: str,
|
||||
timeout_seconds: int = DEFAULT_TIMEOUT_SECONDS,
|
||||
) -> dict[str, Any]:
|
||||
return self._manager.exec_task(task_id, command=command, timeout_seconds=timeout_seconds)
|
||||
return self._manager.exec_workspace(
|
||||
workspace_id,
|
||||
command=command,
|
||||
timeout_seconds=timeout_seconds,
|
||||
)
|
||||
|
||||
def status_task(self, task_id: str) -> dict[str, Any]:
|
||||
return self._manager.status_task(task_id)
|
||||
def status_workspace(self, workspace_id: str) -> dict[str, Any]:
|
||||
return self._manager.status_workspace(workspace_id)
|
||||
|
||||
def push_task_sync(
|
||||
def push_workspace_sync(
|
||||
self,
|
||||
task_id: str,
|
||||
workspace_id: str,
|
||||
source_path: str | Path,
|
||||
*,
|
||||
dest: str = "/workspace",
|
||||
) -> dict[str, Any]:
|
||||
return self._manager.push_task_sync(task_id, source_path=source_path, dest=dest)
|
||||
return self._manager.push_workspace_sync(
|
||||
workspace_id,
|
||||
source_path=source_path,
|
||||
dest=dest,
|
||||
)
|
||||
|
||||
def logs_task(self, task_id: str) -> dict[str, Any]:
|
||||
return self._manager.logs_task(task_id)
|
||||
def logs_workspace(self, workspace_id: str) -> dict[str, Any]:
|
||||
return self._manager.logs_workspace(workspace_id)
|
||||
|
||||
def delete_task(self, task_id: str) -> dict[str, Any]:
|
||||
return self._manager.delete_task(task_id)
|
||||
def delete_workspace(self, workspace_id: str) -> dict[str, Any]:
|
||||
return self._manager.delete_workspace(workspace_id)
|
||||
|
||||
def stop_vm(self, vm_id: str) -> dict[str, Any]:
|
||||
return self._manager.stop_vm(vm_id)
|
||||
|
|
@ -249,57 +257,61 @@ class Pyro:
|
|||
return self.reap_expired()
|
||||
|
||||
@server.tool()
|
||||
async def task_create(
|
||||
async def workspace_create(
|
||||
environment: str,
|
||||
vcpu_count: int = DEFAULT_VCPU_COUNT,
|
||||
mem_mib: int = DEFAULT_MEM_MIB,
|
||||
ttl_seconds: int = DEFAULT_TTL_SECONDS,
|
||||
network: bool = False,
|
||||
allow_host_compat: bool = DEFAULT_ALLOW_HOST_COMPAT,
|
||||
source_path: str | None = None,
|
||||
seed_path: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Create and start a persistent task workspace."""
|
||||
return self.create_task(
|
||||
"""Create and start a persistent workspace."""
|
||||
return self.create_workspace(
|
||||
environment=environment,
|
||||
vcpu_count=vcpu_count,
|
||||
mem_mib=mem_mib,
|
||||
ttl_seconds=ttl_seconds,
|
||||
network=network,
|
||||
allow_host_compat=allow_host_compat,
|
||||
source_path=source_path,
|
||||
seed_path=seed_path,
|
||||
)
|
||||
|
||||
@server.tool()
|
||||
async def task_exec(
|
||||
task_id: str,
|
||||
async def workspace_exec(
|
||||
workspace_id: str,
|
||||
command: str,
|
||||
timeout_seconds: int = DEFAULT_TIMEOUT_SECONDS,
|
||||
) -> dict[str, Any]:
|
||||
"""Run one command inside an existing task workspace."""
|
||||
return self.exec_task(task_id, command=command, timeout_seconds=timeout_seconds)
|
||||
"""Run one command inside an existing persistent workspace."""
|
||||
return self.exec_workspace(
|
||||
workspace_id,
|
||||
command=command,
|
||||
timeout_seconds=timeout_seconds,
|
||||
)
|
||||
|
||||
@server.tool()
|
||||
async def task_sync_push(
|
||||
task_id: str,
|
||||
async def workspace_sync_push(
|
||||
workspace_id: str,
|
||||
source_path: str,
|
||||
dest: str = "/workspace",
|
||||
) -> dict[str, Any]:
|
||||
"""Push host content into the persistent workspace of a started task."""
|
||||
return self.push_task_sync(task_id, source_path=source_path, dest=dest)
|
||||
"""Push host content into the persistent `/workspace` of a started workspace."""
|
||||
return self.push_workspace_sync(workspace_id, source_path=source_path, dest=dest)
|
||||
|
||||
@server.tool()
|
||||
async def task_status(task_id: str) -> dict[str, Any]:
|
||||
"""Inspect task state and latest command metadata."""
|
||||
return self.status_task(task_id)
|
||||
async def workspace_status(workspace_id: str) -> dict[str, Any]:
|
||||
"""Inspect workspace state and latest command metadata."""
|
||||
return self.status_workspace(workspace_id)
|
||||
|
||||
@server.tool()
|
||||
async def task_logs(task_id: str) -> dict[str, Any]:
|
||||
"""Return persisted command history for one task."""
|
||||
return self.logs_task(task_id)
|
||||
async def workspace_logs(workspace_id: str) -> dict[str, Any]:
|
||||
"""Return persisted command history for one workspace."""
|
||||
return self.logs_workspace(workspace_id)
|
||||
|
||||
@server.tool()
|
||||
async def task_delete(task_id: str) -> dict[str, Any]:
|
||||
"""Delete a task workspace and its backing sandbox."""
|
||||
return self.delete_task(task_id)
|
||||
async def workspace_delete(workspace_id: str) -> dict[str, Any]:
|
||||
"""Delete a persistent workspace and its backing sandbox."""
|
||||
return self.delete_workspace(workspace_id)
|
||||
|
||||
return server
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ from pyro_mcp.vm_environments import DEFAULT_CATALOG_VERSION
|
|||
from pyro_mcp.vm_manager import (
|
||||
DEFAULT_MEM_MIB,
|
||||
DEFAULT_VCPU_COUNT,
|
||||
TASK_WORKSPACE_GUEST_PATH,
|
||||
WORKSPACE_GUEST_PATH,
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -151,17 +151,17 @@ def _print_doctor_human(payload: dict[str, Any]) -> None:
|
|||
print(f"- {issue}")
|
||||
|
||||
|
||||
def _print_task_summary_human(payload: dict[str, Any], *, action: str) -> None:
|
||||
print(f"{action}: {str(payload.get('task_id', 'unknown'))}")
|
||||
def _print_workspace_summary_human(payload: dict[str, Any], *, action: str) -> None:
|
||||
print(f"{action} ID: {str(payload.get('workspace_id', 'unknown'))}")
|
||||
print(f"Environment: {str(payload.get('environment', 'unknown'))}")
|
||||
print(f"State: {str(payload.get('state', 'unknown'))}")
|
||||
print(f"Workspace: {str(payload.get('workspace_path', '/workspace'))}")
|
||||
workspace_seed = payload.get("workspace_seed")
|
||||
if isinstance(workspace_seed, dict):
|
||||
mode = str(workspace_seed.get("mode", "empty"))
|
||||
source_path = workspace_seed.get("source_path")
|
||||
if isinstance(source_path, str) and source_path != "":
|
||||
print(f"Workspace seed: {mode} from {source_path}")
|
||||
seed_path = workspace_seed.get("seed_path")
|
||||
if isinstance(seed_path, str) and seed_path != "":
|
||||
print(f"Workspace seed: {mode} from {seed_path}")
|
||||
else:
|
||||
print(f"Workspace seed: {mode}")
|
||||
print(f"Execution mode: {str(payload.get('execution_mode', 'pending'))}")
|
||||
|
|
@ -179,16 +179,16 @@ def _print_task_summary_human(payload: dict[str, Any], *, action: str) -> None:
|
|||
)
|
||||
|
||||
|
||||
def _print_task_exec_human(payload: dict[str, Any]) -> None:
|
||||
def _print_workspace_exec_human(payload: dict[str, Any]) -> None:
|
||||
stdout = str(payload.get("stdout", ""))
|
||||
stderr = str(payload.get("stderr", ""))
|
||||
_write_stream(stdout, stream=sys.stdout)
|
||||
_write_stream(stderr, stream=sys.stderr)
|
||||
print(
|
||||
"[task-exec] "
|
||||
f"task_id={str(payload.get('task_id', 'unknown'))} "
|
||||
"[workspace-exec] "
|
||||
f"workspace_id={str(payload.get('workspace_id', 'unknown'))} "
|
||||
f"sequence={int(payload.get('sequence', 0))} "
|
||||
f"cwd={str(payload.get('cwd', TASK_WORKSPACE_GUEST_PATH))} "
|
||||
f"cwd={str(payload.get('cwd', WORKSPACE_GUEST_PATH))} "
|
||||
f"execution_mode={str(payload.get('execution_mode', 'unknown'))} "
|
||||
f"exit_code={int(payload.get('exit_code', 1))} "
|
||||
f"duration_ms={int(payload.get('duration_ms', 0))}",
|
||||
|
|
@ -197,27 +197,27 @@ def _print_task_exec_human(payload: dict[str, Any]) -> None:
|
|||
)
|
||||
|
||||
|
||||
def _print_task_sync_human(payload: dict[str, Any]) -> None:
|
||||
def _print_workspace_sync_human(payload: dict[str, Any]) -> None:
|
||||
workspace_sync = payload.get("workspace_sync")
|
||||
if not isinstance(workspace_sync, dict):
|
||||
print(f"Synced task: {str(payload.get('task_id', 'unknown'))}")
|
||||
print(f"Synced workspace: {str(payload.get('workspace_id', 'unknown'))}")
|
||||
return
|
||||
print(
|
||||
"[task-sync] "
|
||||
f"task_id={str(payload.get('task_id', 'unknown'))} "
|
||||
"[workspace-sync] "
|
||||
f"workspace_id={str(payload.get('workspace_id', 'unknown'))} "
|
||||
f"mode={str(workspace_sync.get('mode', 'unknown'))} "
|
||||
f"source={str(workspace_sync.get('source_path', 'unknown'))} "
|
||||
f"destination={str(workspace_sync.get('destination', TASK_WORKSPACE_GUEST_PATH))} "
|
||||
f"destination={str(workspace_sync.get('destination', WORKSPACE_GUEST_PATH))} "
|
||||
f"entry_count={int(workspace_sync.get('entry_count', 0))} "
|
||||
f"bytes_written={int(workspace_sync.get('bytes_written', 0))} "
|
||||
f"execution_mode={str(payload.get('execution_mode', 'unknown'))}"
|
||||
)
|
||||
|
||||
|
||||
def _print_task_logs_human(payload: dict[str, Any]) -> None:
|
||||
def _print_workspace_logs_human(payload: dict[str, Any]) -> None:
|
||||
entries = payload.get("entries")
|
||||
if not isinstance(entries, list) or not entries:
|
||||
print("No task logs found.")
|
||||
print("No workspace logs found.")
|
||||
return
|
||||
for entry in entries:
|
||||
if not isinstance(entry, dict):
|
||||
|
|
@ -226,7 +226,7 @@ def _print_task_logs_human(payload: dict[str, Any]) -> None:
|
|||
f"#{int(entry.get('sequence', 0))} "
|
||||
f"exit_code={int(entry.get('exit_code', -1))} "
|
||||
f"duration_ms={int(entry.get('duration_ms', 0))} "
|
||||
f"cwd={str(entry.get('cwd', TASK_WORKSPACE_GUEST_PATH))}"
|
||||
f"cwd={str(entry.get('cwd', WORKSPACE_GUEST_PATH))}"
|
||||
)
|
||||
print(f"$ {str(entry.get('command', ''))}")
|
||||
stdout = str(entry.get("stdout", ""))
|
||||
|
|
@ -267,8 +267,8 @@ def _build_parser() -> argparse.ArgumentParser:
|
|||
pyro run debian:12 -- git --version
|
||||
|
||||
Need repeated commands in one workspace after that?
|
||||
pyro task create debian:12 --source-path ./repo
|
||||
pyro task sync push TASK_ID ./changes
|
||||
pyro workspace create debian:12 --seed-path ./repo
|
||||
pyro workspace sync push WORKSPACE_ID ./changes
|
||||
|
||||
Use `pyro mcp serve` only after the CLI validation path works.
|
||||
"""
|
||||
|
|
@ -463,9 +463,9 @@ def _build_parser() -> argparse.ArgumentParser:
|
|||
),
|
||||
)
|
||||
|
||||
task_parser = subparsers.add_parser(
|
||||
"task",
|
||||
help="Manage persistent task workspaces.",
|
||||
workspace_parser = subparsers.add_parser(
|
||||
"workspace",
|
||||
help="Manage persistent workspaces.",
|
||||
description=(
|
||||
"Create a persistent workspace when you need repeated commands in one "
|
||||
"sandbox instead of one-shot `pyro run`."
|
||||
|
|
@ -473,58 +473,62 @@ def _build_parser() -> argparse.ArgumentParser:
|
|||
epilog=dedent(
|
||||
"""
|
||||
Examples:
|
||||
pyro task create debian:12 --source-path ./repo
|
||||
pyro task sync push TASK_ID ./repo --dest src
|
||||
pyro task exec TASK_ID -- sh -lc 'printf "hello\\n" > note.txt'
|
||||
pyro task logs TASK_ID
|
||||
pyro workspace create debian:12 --seed-path ./repo
|
||||
pyro workspace sync push WORKSPACE_ID ./repo --dest src
|
||||
pyro workspace exec WORKSPACE_ID -- sh -lc 'printf "hello\\n" > note.txt'
|
||||
pyro workspace logs WORKSPACE_ID
|
||||
"""
|
||||
),
|
||||
formatter_class=_HelpFormatter,
|
||||
)
|
||||
task_subparsers = task_parser.add_subparsers(dest="task_command", required=True, metavar="TASK")
|
||||
task_create_parser = task_subparsers.add_parser(
|
||||
workspace_subparsers = workspace_parser.add_subparsers(
|
||||
dest="workspace_command",
|
||||
required=True,
|
||||
metavar="WORKSPACE",
|
||||
)
|
||||
workspace_create_parser = workspace_subparsers.add_parser(
|
||||
"create",
|
||||
help="Create and start a persistent task workspace.",
|
||||
description="Create a task workspace that stays alive across repeated exec calls.",
|
||||
help="Create and start a persistent workspace.",
|
||||
description="Create a persistent workspace that stays alive across repeated exec calls.",
|
||||
epilog=dedent(
|
||||
"""
|
||||
Examples:
|
||||
pyro task create debian:12
|
||||
pyro task create debian:12 --source-path ./repo
|
||||
pyro task sync push TASK_ID ./changes
|
||||
pyro workspace create debian:12
|
||||
pyro workspace create debian:12 --seed-path ./repo
|
||||
pyro workspace sync push WORKSPACE_ID ./changes
|
||||
"""
|
||||
),
|
||||
formatter_class=_HelpFormatter,
|
||||
)
|
||||
task_create_parser.add_argument(
|
||||
workspace_create_parser.add_argument(
|
||||
"environment",
|
||||
metavar="ENVIRONMENT",
|
||||
help="Curated environment to boot, for example `debian:12`.",
|
||||
)
|
||||
task_create_parser.add_argument(
|
||||
workspace_create_parser.add_argument(
|
||||
"--vcpu-count",
|
||||
type=int,
|
||||
default=DEFAULT_VCPU_COUNT,
|
||||
help="Number of virtual CPUs to allocate to the task guest.",
|
||||
help="Number of virtual CPUs to allocate to the guest.",
|
||||
)
|
||||
task_create_parser.add_argument(
|
||||
workspace_create_parser.add_argument(
|
||||
"--mem-mib",
|
||||
type=int,
|
||||
default=DEFAULT_MEM_MIB,
|
||||
help="Guest memory allocation in MiB.",
|
||||
)
|
||||
task_create_parser.add_argument(
|
||||
workspace_create_parser.add_argument(
|
||||
"--ttl-seconds",
|
||||
type=int,
|
||||
default=600,
|
||||
help="Time-to-live for the task before automatic cleanup.",
|
||||
help="Time-to-live for the workspace before automatic cleanup.",
|
||||
)
|
||||
task_create_parser.add_argument(
|
||||
workspace_create_parser.add_argument(
|
||||
"--network",
|
||||
action="store_true",
|
||||
help="Enable outbound guest networking for the task guest.",
|
||||
help="Enable outbound guest networking for the workspace guest.",
|
||||
)
|
||||
task_create_parser.add_argument(
|
||||
workspace_create_parser.add_argument(
|
||||
"--allow-host-compat",
|
||||
action="store_true",
|
||||
help=(
|
||||
|
|
@ -532,143 +536,153 @@ def _build_parser() -> argparse.ArgumentParser:
|
|||
"is unavailable."
|
||||
),
|
||||
)
|
||||
task_create_parser.add_argument(
|
||||
"--source-path",
|
||||
workspace_create_parser.add_argument(
|
||||
"--seed-path",
|
||||
help=(
|
||||
"Optional host directory or .tar/.tar.gz/.tgz archive to seed into `/workspace` "
|
||||
"before the task is returned."
|
||||
"before the workspace is returned."
|
||||
),
|
||||
)
|
||||
task_create_parser.add_argument(
|
||||
workspace_create_parser.add_argument(
|
||||
"--json",
|
||||
action="store_true",
|
||||
help="Print structured JSON instead of human-readable output.",
|
||||
)
|
||||
task_exec_parser = task_subparsers.add_parser(
|
||||
workspace_exec_parser = workspace_subparsers.add_parser(
|
||||
"exec",
|
||||
help="Run one command inside an existing task workspace.",
|
||||
description="Run one non-interactive command in the persistent `/workspace` for a task.",
|
||||
epilog="Example:\n pyro task exec TASK_ID -- cat note.txt",
|
||||
help="Run one command inside an existing workspace.",
|
||||
description=(
|
||||
"Run one non-interactive command in the persistent `/workspace` "
|
||||
"for a workspace."
|
||||
),
|
||||
epilog="Example:\n pyro workspace exec WORKSPACE_ID -- cat note.txt",
|
||||
formatter_class=_HelpFormatter,
|
||||
)
|
||||
task_exec_parser.add_argument("task_id", metavar="TASK_ID", help="Persistent task identifier.")
|
||||
task_exec_parser.add_argument(
|
||||
workspace_exec_parser.add_argument(
|
||||
"workspace_id",
|
||||
metavar="WORKSPACE_ID",
|
||||
help="Persistent workspace identifier.",
|
||||
)
|
||||
workspace_exec_parser.add_argument(
|
||||
"--timeout-seconds",
|
||||
type=int,
|
||||
default=30,
|
||||
help="Maximum time allowed for the task command.",
|
||||
help="Maximum time allowed for the workspace command.",
|
||||
)
|
||||
task_exec_parser.add_argument(
|
||||
workspace_exec_parser.add_argument(
|
||||
"--json",
|
||||
action="store_true",
|
||||
help="Print structured JSON instead of human-readable output.",
|
||||
)
|
||||
task_exec_parser.add_argument(
|
||||
workspace_exec_parser.add_argument(
|
||||
"command_args",
|
||||
nargs="*",
|
||||
metavar="ARG",
|
||||
help=(
|
||||
"Command and arguments to run inside the task workspace. Prefix them with `--`, "
|
||||
"for example `pyro task exec TASK_ID -- cat note.txt`."
|
||||
"Command and arguments to run inside the workspace. Prefix them with `--`, "
|
||||
"for example `pyro workspace exec WORKSPACE_ID -- cat note.txt`."
|
||||
),
|
||||
)
|
||||
task_sync_parser = task_subparsers.add_parser(
|
||||
workspace_sync_parser = workspace_subparsers.add_parser(
|
||||
"sync",
|
||||
help="Push host content into a started task workspace.",
|
||||
help="Push host content into a started workspace.",
|
||||
description=(
|
||||
"Push host directory or archive content into `/workspace` for an existing "
|
||||
"started task."
|
||||
"started workspace."
|
||||
),
|
||||
epilog=dedent(
|
||||
"""
|
||||
Examples:
|
||||
pyro task sync push TASK_ID ./repo
|
||||
pyro task sync push TASK_ID ./patches --dest src
|
||||
pyro workspace sync push WORKSPACE_ID ./repo
|
||||
pyro workspace sync push WORKSPACE_ID ./patches --dest src
|
||||
|
||||
Sync is non-atomic. If a sync fails partway through, delete and recreate the task.
|
||||
Sync is non-atomic. If a sync fails partway through, delete and recreate the workspace.
|
||||
"""
|
||||
),
|
||||
formatter_class=_HelpFormatter,
|
||||
)
|
||||
task_sync_subparsers = task_sync_parser.add_subparsers(
|
||||
dest="task_sync_command",
|
||||
workspace_sync_subparsers = workspace_sync_parser.add_subparsers(
|
||||
dest="workspace_sync_command",
|
||||
required=True,
|
||||
metavar="SYNC",
|
||||
)
|
||||
task_sync_push_parser = task_sync_subparsers.add_parser(
|
||||
workspace_sync_push_parser = workspace_sync_subparsers.add_parser(
|
||||
"push",
|
||||
help="Push one host directory or archive into a started task.",
|
||||
help="Push one host directory or archive into a started workspace.",
|
||||
description="Import host content into `/workspace` or a subdirectory of it.",
|
||||
epilog="Example:\n pyro task sync push TASK_ID ./repo --dest src",
|
||||
epilog="Example:\n pyro workspace sync push WORKSPACE_ID ./repo --dest src",
|
||||
formatter_class=_HelpFormatter,
|
||||
)
|
||||
task_sync_push_parser.add_argument(
|
||||
"task_id",
|
||||
metavar="TASK_ID",
|
||||
help="Persistent task identifier.",
|
||||
workspace_sync_push_parser.add_argument(
|
||||
"workspace_id",
|
||||
metavar="WORKSPACE_ID",
|
||||
help="Persistent workspace identifier.",
|
||||
)
|
||||
task_sync_push_parser.add_argument(
|
||||
workspace_sync_push_parser.add_argument(
|
||||
"source_path",
|
||||
metavar="SOURCE_PATH",
|
||||
help="Host directory or .tar/.tar.gz/.tgz archive to push into the task workspace.",
|
||||
help="Host directory or .tar/.tar.gz/.tgz archive to push into the workspace.",
|
||||
)
|
||||
task_sync_push_parser.add_argument(
|
||||
workspace_sync_push_parser.add_argument(
|
||||
"--dest",
|
||||
default=TASK_WORKSPACE_GUEST_PATH,
|
||||
default=WORKSPACE_GUEST_PATH,
|
||||
help="Workspace destination path. Relative values resolve inside `/workspace`.",
|
||||
)
|
||||
task_sync_push_parser.add_argument(
|
||||
workspace_sync_push_parser.add_argument(
|
||||
"--json",
|
||||
action="store_true",
|
||||
help="Print structured JSON instead of human-readable output.",
|
||||
)
|
||||
task_status_parser = task_subparsers.add_parser(
|
||||
workspace_status_parser = workspace_subparsers.add_parser(
|
||||
"status",
|
||||
help="Inspect one task workspace.",
|
||||
description="Show task state, sizing, workspace path, and latest command metadata.",
|
||||
epilog="Example:\n pyro task status TASK_ID",
|
||||
help="Inspect one workspace.",
|
||||
description="Show workspace state, sizing, workspace path, and latest command metadata.",
|
||||
epilog="Example:\n pyro workspace status WORKSPACE_ID",
|
||||
formatter_class=_HelpFormatter,
|
||||
)
|
||||
task_status_parser.add_argument(
|
||||
"task_id",
|
||||
metavar="TASK_ID",
|
||||
help="Persistent task identifier.",
|
||||
workspace_status_parser.add_argument(
|
||||
"workspace_id",
|
||||
metavar="WORKSPACE_ID",
|
||||
help="Persistent workspace identifier.",
|
||||
)
|
||||
task_status_parser.add_argument(
|
||||
workspace_status_parser.add_argument(
|
||||
"--json",
|
||||
action="store_true",
|
||||
help="Print structured JSON instead of human-readable output.",
|
||||
)
|
||||
task_logs_parser = task_subparsers.add_parser(
|
||||
workspace_logs_parser = workspace_subparsers.add_parser(
|
||||
"logs",
|
||||
help="Show command history for one task.",
|
||||
description="Show persisted command history, including stdout and stderr, for one task.",
|
||||
epilog="Example:\n pyro task logs TASK_ID",
|
||||
help="Show command history for one workspace.",
|
||||
description=(
|
||||
"Show persisted command history, including stdout and stderr, "
|
||||
"for one workspace."
|
||||
),
|
||||
epilog="Example:\n pyro workspace logs WORKSPACE_ID",
|
||||
formatter_class=_HelpFormatter,
|
||||
)
|
||||
task_logs_parser.add_argument(
|
||||
"task_id",
|
||||
metavar="TASK_ID",
|
||||
help="Persistent task identifier.",
|
||||
workspace_logs_parser.add_argument(
|
||||
"workspace_id",
|
||||
metavar="WORKSPACE_ID",
|
||||
help="Persistent workspace identifier.",
|
||||
)
|
||||
task_logs_parser.add_argument(
|
||||
workspace_logs_parser.add_argument(
|
||||
"--json",
|
||||
action="store_true",
|
||||
help="Print structured JSON instead of human-readable output.",
|
||||
)
|
||||
task_delete_parser = task_subparsers.add_parser(
|
||||
workspace_delete_parser = workspace_subparsers.add_parser(
|
||||
"delete",
|
||||
help="Delete one task workspace.",
|
||||
description="Stop the backing sandbox if needed and remove the task workspace.",
|
||||
epilog="Example:\n pyro task delete TASK_ID",
|
||||
help="Delete one workspace.",
|
||||
description="Stop the backing sandbox if needed and remove the workspace.",
|
||||
epilog="Example:\n pyro workspace delete WORKSPACE_ID",
|
||||
formatter_class=_HelpFormatter,
|
||||
)
|
||||
task_delete_parser.add_argument(
|
||||
"task_id",
|
||||
metavar="TASK_ID",
|
||||
help="Persistent task identifier.",
|
||||
workspace_delete_parser.add_argument(
|
||||
"workspace_id",
|
||||
metavar="WORKSPACE_ID",
|
||||
help="Persistent workspace identifier.",
|
||||
)
|
||||
task_delete_parser.add_argument(
|
||||
workspace_delete_parser.add_argument(
|
||||
"--json",
|
||||
action="store_true",
|
||||
help="Print structured JSON instead of human-readable output.",
|
||||
|
|
@ -847,28 +861,28 @@ def main() -> None:
|
|||
if exit_code != 0:
|
||||
raise SystemExit(exit_code)
|
||||
return
|
||||
if args.command == "task":
|
||||
if args.task_command == "create":
|
||||
payload = pyro.create_task(
|
||||
if args.command == "workspace":
|
||||
if args.workspace_command == "create":
|
||||
payload = pyro.create_workspace(
|
||||
environment=args.environment,
|
||||
vcpu_count=args.vcpu_count,
|
||||
mem_mib=args.mem_mib,
|
||||
ttl_seconds=args.ttl_seconds,
|
||||
network=args.network,
|
||||
allow_host_compat=args.allow_host_compat,
|
||||
source_path=args.source_path,
|
||||
seed_path=args.seed_path,
|
||||
)
|
||||
if bool(args.json):
|
||||
_print_json(payload)
|
||||
else:
|
||||
_print_task_summary_human(payload, action="Task")
|
||||
_print_workspace_summary_human(payload, action="Workspace")
|
||||
return
|
||||
if args.task_command == "exec":
|
||||
if args.workspace_command == "exec":
|
||||
command = _require_command(args.command_args)
|
||||
if bool(args.json):
|
||||
try:
|
||||
payload = pyro.exec_task(
|
||||
args.task_id,
|
||||
payload = pyro.exec_workspace(
|
||||
args.workspace_id,
|
||||
command=command,
|
||||
timeout_seconds=args.timeout_seconds,
|
||||
)
|
||||
|
|
@ -878,24 +892,24 @@ def main() -> None:
|
|||
_print_json(payload)
|
||||
else:
|
||||
try:
|
||||
payload = pyro.exec_task(
|
||||
args.task_id,
|
||||
payload = pyro.exec_workspace(
|
||||
args.workspace_id,
|
||||
command=command,
|
||||
timeout_seconds=args.timeout_seconds,
|
||||
)
|
||||
except Exception as exc: # noqa: BLE001
|
||||
print(f"[error] {exc}", file=sys.stderr, flush=True)
|
||||
raise SystemExit(1) from exc
|
||||
_print_task_exec_human(payload)
|
||||
_print_workspace_exec_human(payload)
|
||||
exit_code = int(payload.get("exit_code", 1))
|
||||
if exit_code != 0:
|
||||
raise SystemExit(exit_code)
|
||||
return
|
||||
if args.task_command == "sync" and args.task_sync_command == "push":
|
||||
if args.workspace_command == "sync" and args.workspace_sync_command == "push":
|
||||
if bool(args.json):
|
||||
try:
|
||||
payload = pyro.push_task_sync(
|
||||
args.task_id,
|
||||
payload = pyro.push_workspace_sync(
|
||||
args.workspace_id,
|
||||
args.source_path,
|
||||
dest=args.dest,
|
||||
)
|
||||
|
|
@ -905,36 +919,36 @@ def main() -> None:
|
|||
_print_json(payload)
|
||||
else:
|
||||
try:
|
||||
payload = pyro.push_task_sync(
|
||||
args.task_id,
|
||||
payload = pyro.push_workspace_sync(
|
||||
args.workspace_id,
|
||||
args.source_path,
|
||||
dest=args.dest,
|
||||
)
|
||||
except Exception as exc: # noqa: BLE001
|
||||
print(f"[error] {exc}", file=sys.stderr, flush=True)
|
||||
raise SystemExit(1) from exc
|
||||
_print_task_sync_human(payload)
|
||||
_print_workspace_sync_human(payload)
|
||||
return
|
||||
if args.task_command == "status":
|
||||
payload = pyro.status_task(args.task_id)
|
||||
if args.workspace_command == "status":
|
||||
payload = pyro.status_workspace(args.workspace_id)
|
||||
if bool(args.json):
|
||||
_print_json(payload)
|
||||
else:
|
||||
_print_task_summary_human(payload, action="Task")
|
||||
_print_workspace_summary_human(payload, action="Workspace")
|
||||
return
|
||||
if args.task_command == "logs":
|
||||
payload = pyro.logs_task(args.task_id)
|
||||
if args.workspace_command == "logs":
|
||||
payload = pyro.logs_workspace(args.workspace_id)
|
||||
if bool(args.json):
|
||||
_print_json(payload)
|
||||
else:
|
||||
_print_task_logs_human(payload)
|
||||
_print_workspace_logs_human(payload)
|
||||
return
|
||||
if args.task_command == "delete":
|
||||
payload = pyro.delete_task(args.task_id)
|
||||
if args.workspace_command == "delete":
|
||||
payload = pyro.delete_workspace(args.workspace_id)
|
||||
if bool(args.json):
|
||||
_print_json(payload)
|
||||
else:
|
||||
print(f"Deleted task: {str(payload.get('task_id', 'unknown'))}")
|
||||
print(f"Deleted workspace: {str(payload.get('workspace_id', 'unknown'))}")
|
||||
return
|
||||
if args.command == "doctor":
|
||||
payload = doctor_report(platform=args.platform)
|
||||
|
|
|
|||
|
|
@ -2,21 +2,21 @@
|
|||
|
||||
from __future__ import annotations
|
||||
|
||||
PUBLIC_CLI_COMMANDS = ("demo", "doctor", "env", "mcp", "run", "task")
|
||||
PUBLIC_CLI_COMMANDS = ("demo", "doctor", "env", "mcp", "run", "workspace")
|
||||
PUBLIC_CLI_DEMO_SUBCOMMANDS = ("ollama",)
|
||||
PUBLIC_CLI_ENV_SUBCOMMANDS = ("inspect", "list", "pull", "prune")
|
||||
PUBLIC_CLI_TASK_SUBCOMMANDS = ("create", "delete", "exec", "logs", "status", "sync")
|
||||
PUBLIC_CLI_TASK_SYNC_SUBCOMMANDS = ("push",)
|
||||
PUBLIC_CLI_TASK_CREATE_FLAGS = (
|
||||
PUBLIC_CLI_WORKSPACE_SUBCOMMANDS = ("create", "delete", "exec", "logs", "status", "sync")
|
||||
PUBLIC_CLI_WORKSPACE_SYNC_SUBCOMMANDS = ("push",)
|
||||
PUBLIC_CLI_WORKSPACE_CREATE_FLAGS = (
|
||||
"--vcpu-count",
|
||||
"--mem-mib",
|
||||
"--ttl-seconds",
|
||||
"--network",
|
||||
"--allow-host-compat",
|
||||
"--source-path",
|
||||
"--seed-path",
|
||||
"--json",
|
||||
)
|
||||
PUBLIC_CLI_TASK_SYNC_PUSH_FLAGS = ("--dest", "--json")
|
||||
PUBLIC_CLI_WORKSPACE_SYNC_PUSH_FLAGS = ("--dest", "--json")
|
||||
PUBLIC_CLI_RUN_FLAGS = (
|
||||
"--vcpu-count",
|
||||
"--mem-mib",
|
||||
|
|
@ -29,24 +29,24 @@ PUBLIC_CLI_RUN_FLAGS = (
|
|||
|
||||
PUBLIC_SDK_METHODS = (
|
||||
"create_server",
|
||||
"create_task",
|
||||
"create_vm",
|
||||
"delete_task",
|
||||
"create_workspace",
|
||||
"delete_vm",
|
||||
"exec_task",
|
||||
"delete_workspace",
|
||||
"exec_vm",
|
||||
"exec_workspace",
|
||||
"inspect_environment",
|
||||
"list_environments",
|
||||
"logs_task",
|
||||
"logs_workspace",
|
||||
"network_info_vm",
|
||||
"prune_environments",
|
||||
"pull_environment",
|
||||
"push_task_sync",
|
||||
"push_workspace_sync",
|
||||
"reap_expired",
|
||||
"run_in_vm",
|
||||
"start_vm",
|
||||
"status_task",
|
||||
"status_vm",
|
||||
"status_workspace",
|
||||
"stop_vm",
|
||||
)
|
||||
|
||||
|
|
@ -61,10 +61,10 @@ PUBLIC_MCP_TOOLS = (
|
|||
"vm_start",
|
||||
"vm_status",
|
||||
"vm_stop",
|
||||
"task_create",
|
||||
"task_delete",
|
||||
"task_exec",
|
||||
"task_logs",
|
||||
"task_status",
|
||||
"task_sync_push",
|
||||
"workspace_create",
|
||||
"workspace_delete",
|
||||
"workspace_exec",
|
||||
"workspace_logs",
|
||||
"workspace_status",
|
||||
"workspace_sync_push",
|
||||
)
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ from typing import Any
|
|||
from pyro_mcp.runtime import DEFAULT_PLATFORM, RuntimePaths
|
||||
|
||||
DEFAULT_ENVIRONMENT_VERSION = "1.0.0"
|
||||
DEFAULT_CATALOG_VERSION = "2.3.0"
|
||||
DEFAULT_CATALOG_VERSION = "2.4.0"
|
||||
OCI_MANIFEST_ACCEPT = ", ".join(
|
||||
(
|
||||
"application/vnd.oci.image.index.v1+json",
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
"""Lifecycle manager for ephemeral VM environments and persistent tasks."""
|
||||
"""Lifecycle manager for ephemeral VM environments and persistent workspaces."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
|
@ -36,13 +36,13 @@ DEFAULT_TIMEOUT_SECONDS = 30
|
|||
DEFAULT_TTL_SECONDS = 600
|
||||
DEFAULT_ALLOW_HOST_COMPAT = False
|
||||
|
||||
TASK_LAYOUT_VERSION = 2
|
||||
TASK_WORKSPACE_DIRNAME = "workspace"
|
||||
TASK_COMMANDS_DIRNAME = "commands"
|
||||
TASK_RUNTIME_DIRNAME = "runtime"
|
||||
TASK_WORKSPACE_GUEST_PATH = "/workspace"
|
||||
TASK_GUEST_AGENT_PATH = "/opt/pyro/bin/pyro_guest_agent.py"
|
||||
TASK_ARCHIVE_UPLOAD_TIMEOUT_SECONDS = 60
|
||||
WORKSPACE_LAYOUT_VERSION = 2
|
||||
WORKSPACE_DIRNAME = "workspace"
|
||||
WORKSPACE_COMMANDS_DIRNAME = "commands"
|
||||
WORKSPACE_RUNTIME_DIRNAME = "runtime"
|
||||
WORKSPACE_GUEST_PATH = "/workspace"
|
||||
WORKSPACE_GUEST_AGENT_PATH = "/opt/pyro/bin/pyro_guest_agent.py"
|
||||
WORKSPACE_ARCHIVE_UPLOAD_TIMEOUT_SECONDS = 60
|
||||
|
||||
WorkspaceSeedMode = Literal["empty", "directory", "tar_archive"]
|
||||
|
||||
|
|
@ -69,10 +69,10 @@ class VmInstance:
|
|||
|
||||
|
||||
@dataclass
|
||||
class TaskRecord:
|
||||
"""Persistent task metadata stored on disk."""
|
||||
class WorkspaceRecord:
|
||||
"""Persistent workspace metadata stored on disk."""
|
||||
|
||||
task_id: str
|
||||
workspace_id: str
|
||||
environment: str
|
||||
vcpu_count: int
|
||||
mem_mib: int
|
||||
|
|
@ -98,9 +98,9 @@ class TaskRecord:
|
|||
command_count: int = 0,
|
||||
last_command: dict[str, Any] | None = None,
|
||||
workspace_seed: dict[str, Any] | None = None,
|
||||
) -> TaskRecord:
|
||||
) -> WorkspaceRecord:
|
||||
return cls(
|
||||
task_id=instance.vm_id,
|
||||
workspace_id=instance.vm_id,
|
||||
environment=instance.environment,
|
||||
vcpu_count=instance.vcpu_count,
|
||||
mem_mib=instance.mem_mib,
|
||||
|
|
@ -121,7 +121,7 @@ class TaskRecord:
|
|||
|
||||
def to_instance(self, *, workdir: Path) -> VmInstance:
|
||||
return VmInstance(
|
||||
vm_id=self.task_id,
|
||||
vm_id=self.workspace_id,
|
||||
environment=self.environment,
|
||||
vcpu_count=self.vcpu_count,
|
||||
mem_mib=self.mem_mib,
|
||||
|
|
@ -140,8 +140,8 @@ class TaskRecord:
|
|||
|
||||
def to_payload(self) -> dict[str, Any]:
|
||||
return {
|
||||
"layout_version": TASK_LAYOUT_VERSION,
|
||||
"task_id": self.task_id,
|
||||
"layout_version": WORKSPACE_LAYOUT_VERSION,
|
||||
"workspace_id": self.workspace_id,
|
||||
"environment": self.environment,
|
||||
"vcpu_count": self.vcpu_count,
|
||||
"mem_mib": self.mem_mib,
|
||||
|
|
@ -161,9 +161,9 @@ class TaskRecord:
|
|||
}
|
||||
|
||||
@classmethod
|
||||
def from_payload(cls, payload: dict[str, Any]) -> TaskRecord:
|
||||
def from_payload(cls, payload: dict[str, Any]) -> WorkspaceRecord:
|
||||
return cls(
|
||||
task_id=str(payload["task_id"]),
|
||||
workspace_id=str(payload["workspace_id"]),
|
||||
environment=str(payload["environment"]),
|
||||
vcpu_count=int(payload["vcpu_count"]),
|
||||
mem_mib=int(payload["mem_mib"]),
|
||||
|
|
@ -179,7 +179,7 @@ class TaskRecord:
|
|||
network=_deserialize_network(payload.get("network")),
|
||||
command_count=int(payload.get("command_count", 0)),
|
||||
last_command=_optional_dict(payload.get("last_command")),
|
||||
workspace_seed=_task_workspace_seed_dict(payload.get("workspace_seed")),
|
||||
workspace_seed=_workspace_seed_dict(payload.get("workspace_seed")),
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -194,10 +194,15 @@ class PreparedWorkspaceSeed:
|
|||
bytes_written: int = 0
|
||||
cleanup_dir: Path | None = None
|
||||
|
||||
def to_payload(self, *, destination: str = TASK_WORKSPACE_GUEST_PATH) -> dict[str, Any]:
|
||||
def to_payload(
|
||||
self,
|
||||
*,
|
||||
destination: str = WORKSPACE_GUEST_PATH,
|
||||
path_key: str = "seed_path",
|
||||
) -> dict[str, Any]:
|
||||
return {
|
||||
"mode": self.mode,
|
||||
"source_path": self.source_path,
|
||||
path_key: self.source_path,
|
||||
"destination": destination,
|
||||
"entry_count": self.entry_count,
|
||||
"bytes_written": self.bytes_written,
|
||||
|
|
@ -255,21 +260,21 @@ def _string_dict(value: object) -> dict[str, str]:
|
|||
def _empty_workspace_seed_payload() -> dict[str, Any]:
|
||||
return {
|
||||
"mode": "empty",
|
||||
"source_path": None,
|
||||
"destination": TASK_WORKSPACE_GUEST_PATH,
|
||||
"seed_path": None,
|
||||
"destination": WORKSPACE_GUEST_PATH,
|
||||
"entry_count": 0,
|
||||
"bytes_written": 0,
|
||||
}
|
||||
|
||||
|
||||
def _task_workspace_seed_dict(value: object) -> dict[str, Any]:
|
||||
def _workspace_seed_dict(value: object) -> dict[str, Any]:
|
||||
if not isinstance(value, dict):
|
||||
return _empty_workspace_seed_payload()
|
||||
payload = _empty_workspace_seed_payload()
|
||||
payload.update(
|
||||
{
|
||||
"mode": str(value.get("mode", payload["mode"])),
|
||||
"source_path": _optional_str(value.get("source_path")),
|
||||
"seed_path": _optional_str(value.get("seed_path")),
|
||||
"destination": str(value.get("destination", payload["destination"])),
|
||||
"entry_count": int(value.get("entry_count", payload["entry_count"])),
|
||||
"bytes_written": int(value.get("bytes_written", payload["bytes_written"])),
|
||||
|
|
@ -374,7 +379,7 @@ def _normalize_workspace_destination(destination: str) -> tuple[str, PurePosixPa
|
|||
destination_path = PurePosixPath(candidate)
|
||||
if any(part == ".." for part in destination_path.parts):
|
||||
raise ValueError("workspace destination must stay inside /workspace")
|
||||
workspace_root = PurePosixPath(TASK_WORKSPACE_GUEST_PATH)
|
||||
workspace_root = PurePosixPath(WORKSPACE_GUEST_PATH)
|
||||
if not destination_path.is_absolute():
|
||||
destination_path = workspace_root / destination_path
|
||||
parts = [part for part in destination_path.parts if part not in {"", "."}]
|
||||
|
|
@ -510,7 +515,7 @@ def _extract_seed_archive_to_host_workspace(
|
|||
def _instance_workspace_host_dir(instance: VmInstance) -> Path:
|
||||
raw_value = instance.metadata.get("workspace_host_dir")
|
||||
if raw_value is None or raw_value == "":
|
||||
raise RuntimeError("task workspace host directory is unavailable")
|
||||
raise RuntimeError("workspace host directory is unavailable")
|
||||
return Path(raw_value)
|
||||
|
||||
|
||||
|
|
@ -518,13 +523,13 @@ def _patch_rootfs_guest_agent(rootfs_image: Path, guest_agent_path: Path) -> Non
|
|||
debugfs_path = shutil.which("debugfs")
|
||||
if debugfs_path is None:
|
||||
raise RuntimeError(
|
||||
"debugfs is required to seed task workspaces on guest-backed runtimes"
|
||||
"debugfs is required to seed workspaces on guest-backed runtimes"
|
||||
)
|
||||
with tempfile.TemporaryDirectory(prefix="pyro-guest-agent-") as temp_dir:
|
||||
staged_agent_path = Path(temp_dir) / "pyro_guest_agent.py"
|
||||
shutil.copy2(guest_agent_path, staged_agent_path)
|
||||
subprocess.run( # noqa: S603
|
||||
[debugfs_path, "-w", "-R", f"rm {TASK_GUEST_AGENT_PATH}", str(rootfs_image)],
|
||||
[debugfs_path, "-w", "-R", f"rm {WORKSPACE_GUEST_AGENT_PATH}", str(rootfs_image)],
|
||||
text=True,
|
||||
capture_output=True,
|
||||
check=False,
|
||||
|
|
@ -534,7 +539,7 @@ def _patch_rootfs_guest_agent(rootfs_image: Path, guest_agent_path: Path) -> Non
|
|||
debugfs_path,
|
||||
"-w",
|
||||
"-R",
|
||||
f"write {staged_agent_path} {TASK_GUEST_AGENT_PATH}",
|
||||
f"write {staged_agent_path} {WORKSPACE_GUEST_AGENT_PATH}",
|
||||
str(rootfs_image),
|
||||
],
|
||||
text=True,
|
||||
|
|
@ -543,7 +548,7 @@ def _patch_rootfs_guest_agent(rootfs_image: Path, guest_agent_path: Path) -> Non
|
|||
)
|
||||
if proc.returncode != 0:
|
||||
raise RuntimeError(
|
||||
"failed to patch guest agent into task rootfs: "
|
||||
"failed to patch guest agent into workspace rootfs: "
|
||||
f"{proc.stderr.strip() or proc.stdout.strip()}"
|
||||
)
|
||||
|
||||
|
|
@ -862,7 +867,7 @@ class FirecrackerBackend(VmBackend): # pragma: no cover
|
|||
port,
|
||||
archive_path,
|
||||
destination=destination,
|
||||
timeout_seconds=TASK_ARCHIVE_UPLOAD_TIMEOUT_SECONDS,
|
||||
timeout_seconds=WORKSPACE_ARCHIVE_UPLOAD_TIMEOUT_SECONDS,
|
||||
uds_path=uds_path,
|
||||
)
|
||||
return {
|
||||
|
|
@ -885,7 +890,7 @@ class FirecrackerBackend(VmBackend): # pragma: no cover
|
|||
|
||||
|
||||
class VmManager:
|
||||
"""In-process lifecycle manager for ephemeral VM environments and tasks."""
|
||||
"""In-process lifecycle manager for ephemeral VM environments and workspaces."""
|
||||
|
||||
MIN_VCPUS = 1
|
||||
MAX_VCPUS = 8
|
||||
|
|
@ -911,7 +916,7 @@ class VmManager:
|
|||
) -> None:
|
||||
self._backend_name = backend_name or "firecracker"
|
||||
self._base_dir = base_dir or Path("/tmp/pyro-mcp")
|
||||
self._tasks_dir = self._base_dir / "tasks"
|
||||
self._workspaces_dir = self._base_dir / "workspaces"
|
||||
resolved_cache_dir = cache_dir or default_cache_dir()
|
||||
self._runtime_paths = runtime_paths
|
||||
if self._backend_name == "firecracker":
|
||||
|
|
@ -944,7 +949,7 @@ class VmManager:
|
|||
self._lock = threading.Lock()
|
||||
self._instances: dict[str, VmInstance] = {}
|
||||
self._base_dir.mkdir(parents=True, exist_ok=True)
|
||||
self._tasks_dir.mkdir(parents=True, exist_ok=True)
|
||||
self._workspaces_dir.mkdir(parents=True, exist_ok=True)
|
||||
self._backend = self._build_backend()
|
||||
|
||||
def _build_backend(self) -> VmBackend:
|
||||
|
|
@ -989,8 +994,8 @@ class VmManager:
|
|||
now = time.time()
|
||||
with self._lock:
|
||||
self._reap_expired_locked(now)
|
||||
self._reap_expired_tasks_locked(now)
|
||||
active_count = len(self._instances) + self._count_tasks_locked()
|
||||
self._reap_expired_workspaces_locked(now)
|
||||
active_count = len(self._instances) + self._count_workspaces_locked()
|
||||
if active_count >= self._max_active_vms:
|
||||
raise RuntimeError(
|
||||
f"max active VMs reached ({self._max_active_vms}); delete old VMs first"
|
||||
|
|
@ -1126,7 +1131,7 @@ class VmManager:
|
|||
del self._instances[vm_id]
|
||||
return {"deleted_vm_ids": expired_vm_ids, "count": len(expired_vm_ids)}
|
||||
|
||||
def create_task(
|
||||
def create_workspace(
|
||||
self,
|
||||
*,
|
||||
environment: str,
|
||||
|
|
@ -1135,22 +1140,22 @@ class VmManager:
|
|||
ttl_seconds: int = DEFAULT_TTL_SECONDS,
|
||||
network: bool = False,
|
||||
allow_host_compat: bool = DEFAULT_ALLOW_HOST_COMPAT,
|
||||
source_path: str | Path | None = None,
|
||||
seed_path: str | Path | None = None,
|
||||
) -> dict[str, Any]:
|
||||
self._validate_limits(vcpu_count=vcpu_count, mem_mib=mem_mib, ttl_seconds=ttl_seconds)
|
||||
get_environment(environment, runtime_paths=self._runtime_paths)
|
||||
prepared_seed = self._prepare_workspace_seed(source_path)
|
||||
prepared_seed = self._prepare_workspace_seed(seed_path)
|
||||
now = time.time()
|
||||
task_id = uuid.uuid4().hex[:12]
|
||||
task_dir = self._task_dir(task_id)
|
||||
runtime_dir = self._task_runtime_dir(task_id)
|
||||
workspace_dir = self._task_workspace_dir(task_id)
|
||||
commands_dir = self._task_commands_dir(task_id)
|
||||
task_dir.mkdir(parents=True, exist_ok=False)
|
||||
workspace_dir.mkdir(parents=True, exist_ok=True)
|
||||
workspace_id = uuid.uuid4().hex[:12]
|
||||
workspace_dir = self._workspace_dir(workspace_id)
|
||||
runtime_dir = self._workspace_runtime_dir(workspace_id)
|
||||
host_workspace_dir = self._workspace_host_dir(workspace_id)
|
||||
commands_dir = self._workspace_commands_dir(workspace_id)
|
||||
workspace_dir.mkdir(parents=True, exist_ok=False)
|
||||
host_workspace_dir.mkdir(parents=True, exist_ok=True)
|
||||
commands_dir.mkdir(parents=True, exist_ok=True)
|
||||
instance = VmInstance(
|
||||
vm_id=task_id,
|
||||
vm_id=workspace_id,
|
||||
environment=environment,
|
||||
vcpu_count=vcpu_count,
|
||||
mem_mib=mem_mib,
|
||||
|
|
@ -1162,13 +1167,13 @@ class VmManager:
|
|||
allow_host_compat=allow_host_compat,
|
||||
)
|
||||
instance.metadata["allow_host_compat"] = str(allow_host_compat).lower()
|
||||
instance.metadata["workspace_path"] = TASK_WORKSPACE_GUEST_PATH
|
||||
instance.metadata["workspace_host_dir"] = str(workspace_dir)
|
||||
instance.metadata["workspace_path"] = WORKSPACE_GUEST_PATH
|
||||
instance.metadata["workspace_host_dir"] = str(host_workspace_dir)
|
||||
try:
|
||||
with self._lock:
|
||||
self._reap_expired_locked(now)
|
||||
self._reap_expired_tasks_locked(now)
|
||||
active_count = len(self._instances) + self._count_tasks_locked()
|
||||
self._reap_expired_workspaces_locked(now)
|
||||
active_count = len(self._instances) + self._count_workspaces_locked()
|
||||
if active_count >= self._max_active_vms:
|
||||
raise RuntimeError(
|
||||
f"max active VMs reached ({self._max_active_vms}); delete old VMs first"
|
||||
|
|
@ -1178,7 +1183,7 @@ class VmManager:
|
|||
prepared_seed.archive_path is not None
|
||||
and self._runtime_capabilities.supports_guest_exec
|
||||
):
|
||||
self._ensure_task_guest_seed_support(instance)
|
||||
self._ensure_workspace_guest_seed_support(instance)
|
||||
with self._lock:
|
||||
self._start_instance_locked(instance)
|
||||
self._require_guest_exec_or_opt_in(instance)
|
||||
|
|
@ -1187,7 +1192,7 @@ class VmManager:
|
|||
import_summary = self._backend.import_archive(
|
||||
instance,
|
||||
archive_path=prepared_seed.archive_path,
|
||||
destination=TASK_WORKSPACE_GUEST_PATH,
|
||||
destination=WORKSPACE_GUEST_PATH,
|
||||
)
|
||||
workspace_seed["entry_count"] = int(import_summary["entry_count"])
|
||||
workspace_seed["bytes_written"] = int(import_summary["bytes_written"])
|
||||
|
|
@ -1195,14 +1200,14 @@ class VmManager:
|
|||
elif self._runtime_capabilities.supports_guest_exec:
|
||||
self._backend.exec(
|
||||
instance,
|
||||
f"mkdir -p {shlex.quote(TASK_WORKSPACE_GUEST_PATH)}",
|
||||
f"mkdir -p {shlex.quote(WORKSPACE_GUEST_PATH)}",
|
||||
10,
|
||||
)
|
||||
else:
|
||||
instance.metadata["execution_mode"] = "host_compat"
|
||||
task = TaskRecord.from_instance(instance, workspace_seed=workspace_seed)
|
||||
self._save_task_locked(task)
|
||||
return self._serialize_task(task)
|
||||
workspace = WorkspaceRecord.from_instance(instance, workspace_seed=workspace_seed)
|
||||
self._save_workspace_locked(workspace)
|
||||
return self._serialize_workspace(workspace)
|
||||
except Exception:
|
||||
if runtime_dir.exists():
|
||||
try:
|
||||
|
|
@ -1215,17 +1220,17 @@ class VmManager:
|
|||
self._backend.delete(instance)
|
||||
except Exception:
|
||||
pass
|
||||
shutil.rmtree(task_dir, ignore_errors=True)
|
||||
shutil.rmtree(workspace_dir, ignore_errors=True)
|
||||
raise
|
||||
finally:
|
||||
prepared_seed.cleanup()
|
||||
|
||||
def push_task_sync(
|
||||
def push_workspace_sync(
|
||||
self,
|
||||
task_id: str,
|
||||
workspace_id: str,
|
||||
*,
|
||||
source_path: str | Path,
|
||||
dest: str = TASK_WORKSPACE_GUEST_PATH,
|
||||
dest: str = WORKSPACE_GUEST_PATH,
|
||||
) -> dict[str, Any]:
|
||||
prepared_seed = self._prepare_workspace_seed(source_path)
|
||||
if prepared_seed.archive_path is None:
|
||||
|
|
@ -1233,14 +1238,17 @@ class VmManager:
|
|||
raise ValueError("source_path is required")
|
||||
normalized_destination, _ = _normalize_workspace_destination(dest)
|
||||
with self._lock:
|
||||
task = self._load_task_locked(task_id)
|
||||
self._ensure_task_not_expired_locked(task, time.time())
|
||||
self._refresh_task_liveness_locked(task)
|
||||
if task.state != "started":
|
||||
workspace = self._load_workspace_locked(workspace_id)
|
||||
self._ensure_workspace_not_expired_locked(workspace, time.time())
|
||||
self._refresh_workspace_liveness_locked(workspace)
|
||||
if workspace.state != "started":
|
||||
raise RuntimeError(
|
||||
f"task {task_id} must be in 'started' state before task_sync_push"
|
||||
f"workspace {workspace_id} must be in 'started' state "
|
||||
"before workspace_sync_push"
|
||||
)
|
||||
instance = task.to_instance(workdir=self._task_runtime_dir(task.task_id))
|
||||
instance = workspace.to_instance(
|
||||
workdir=self._workspace_runtime_dir(workspace.workspace_id)
|
||||
)
|
||||
try:
|
||||
import_summary = self._backend.import_archive(
|
||||
instance,
|
||||
|
|
@ -1249,58 +1257,71 @@ class VmManager:
|
|||
)
|
||||
finally:
|
||||
prepared_seed.cleanup()
|
||||
workspace_sync = prepared_seed.to_payload(destination=normalized_destination)
|
||||
workspace_sync = prepared_seed.to_payload(
|
||||
destination=normalized_destination,
|
||||
path_key="source_path",
|
||||
)
|
||||
workspace_sync["entry_count"] = int(import_summary["entry_count"])
|
||||
workspace_sync["bytes_written"] = int(import_summary["bytes_written"])
|
||||
workspace_sync["destination"] = str(import_summary["destination"])
|
||||
with self._lock:
|
||||
task = self._load_task_locked(task_id)
|
||||
task.state = instance.state
|
||||
task.firecracker_pid = instance.firecracker_pid
|
||||
task.last_error = instance.last_error
|
||||
task.metadata = dict(instance.metadata)
|
||||
self._save_task_locked(task)
|
||||
workspace = self._load_workspace_locked(workspace_id)
|
||||
workspace.state = instance.state
|
||||
workspace.firecracker_pid = instance.firecracker_pid
|
||||
workspace.last_error = instance.last_error
|
||||
workspace.metadata = dict(instance.metadata)
|
||||
self._save_workspace_locked(workspace)
|
||||
return {
|
||||
"task_id": task_id,
|
||||
"workspace_id": workspace_id,
|
||||
"execution_mode": instance.metadata.get("execution_mode", "pending"),
|
||||
"workspace_sync": workspace_sync,
|
||||
}
|
||||
|
||||
def exec_task(self, task_id: str, *, command: str, timeout_seconds: int = 30) -> dict[str, Any]:
|
||||
def exec_workspace(
|
||||
self,
|
||||
workspace_id: str,
|
||||
*,
|
||||
command: str,
|
||||
timeout_seconds: int = 30,
|
||||
) -> dict[str, Any]:
|
||||
if timeout_seconds <= 0:
|
||||
raise ValueError("timeout_seconds must be positive")
|
||||
with self._lock:
|
||||
task = self._load_task_locked(task_id)
|
||||
self._ensure_task_not_expired_locked(task, time.time())
|
||||
self._refresh_task_liveness_locked(task)
|
||||
if task.state != "started":
|
||||
raise RuntimeError(f"task {task_id} must be in 'started' state before task_exec")
|
||||
instance = task.to_instance(workdir=self._task_runtime_dir(task.task_id))
|
||||
workspace = self._load_workspace_locked(workspace_id)
|
||||
self._ensure_workspace_not_expired_locked(workspace, time.time())
|
||||
self._refresh_workspace_liveness_locked(workspace)
|
||||
if workspace.state != "started":
|
||||
raise RuntimeError(
|
||||
f"workspace {workspace_id} must be in 'started' state before workspace_exec"
|
||||
)
|
||||
instance = workspace.to_instance(
|
||||
workdir=self._workspace_runtime_dir(workspace.workspace_id)
|
||||
)
|
||||
exec_result, execution_mode = self._exec_instance(
|
||||
instance,
|
||||
command=command,
|
||||
timeout_seconds=timeout_seconds,
|
||||
host_workdir=self._task_workspace_dir(task.task_id),
|
||||
guest_cwd=TASK_WORKSPACE_GUEST_PATH,
|
||||
host_workdir=self._workspace_host_dir(workspace.workspace_id),
|
||||
guest_cwd=WORKSPACE_GUEST_PATH,
|
||||
)
|
||||
with self._lock:
|
||||
task = self._load_task_locked(task_id)
|
||||
task.state = instance.state
|
||||
task.firecracker_pid = instance.firecracker_pid
|
||||
task.last_error = instance.last_error
|
||||
task.metadata = dict(instance.metadata)
|
||||
entry = self._record_task_command_locked(
|
||||
task,
|
||||
workspace = self._load_workspace_locked(workspace_id)
|
||||
workspace.state = instance.state
|
||||
workspace.firecracker_pid = instance.firecracker_pid
|
||||
workspace.last_error = instance.last_error
|
||||
workspace.metadata = dict(instance.metadata)
|
||||
entry = self._record_workspace_command_locked(
|
||||
workspace,
|
||||
command=command,
|
||||
exec_result=exec_result,
|
||||
execution_mode=execution_mode,
|
||||
cwd=TASK_WORKSPACE_GUEST_PATH,
|
||||
cwd=WORKSPACE_GUEST_PATH,
|
||||
)
|
||||
self._save_task_locked(task)
|
||||
self._save_workspace_locked(workspace)
|
||||
return {
|
||||
"task_id": task_id,
|
||||
"environment": task.environment,
|
||||
"environment_version": task.metadata.get("environment_version"),
|
||||
"workspace_id": workspace_id,
|
||||
"environment": workspace.environment,
|
||||
"environment_version": workspace.metadata.get("environment_version"),
|
||||
"command": command,
|
||||
"stdout": exec_result.stdout,
|
||||
"stderr": exec_result.stderr,
|
||||
|
|
@ -1308,36 +1329,47 @@ class VmManager:
|
|||
"duration_ms": exec_result.duration_ms,
|
||||
"execution_mode": execution_mode,
|
||||
"sequence": entry["sequence"],
|
||||
"cwd": TASK_WORKSPACE_GUEST_PATH,
|
||||
"cwd": WORKSPACE_GUEST_PATH,
|
||||
}
|
||||
|
||||
def status_task(self, task_id: str) -> dict[str, Any]:
|
||||
def status_workspace(self, workspace_id: str) -> dict[str, Any]:
|
||||
with self._lock:
|
||||
task = self._load_task_locked(task_id)
|
||||
self._ensure_task_not_expired_locked(task, time.time())
|
||||
self._refresh_task_liveness_locked(task)
|
||||
self._save_task_locked(task)
|
||||
return self._serialize_task(task)
|
||||
workspace = self._load_workspace_locked(workspace_id)
|
||||
self._ensure_workspace_not_expired_locked(workspace, time.time())
|
||||
self._refresh_workspace_liveness_locked(workspace)
|
||||
self._save_workspace_locked(workspace)
|
||||
return self._serialize_workspace(workspace)
|
||||
|
||||
def logs_task(self, task_id: str) -> dict[str, Any]:
|
||||
def logs_workspace(self, workspace_id: str) -> dict[str, Any]:
|
||||
with self._lock:
|
||||
task = self._load_task_locked(task_id)
|
||||
self._ensure_task_not_expired_locked(task, time.time())
|
||||
self._refresh_task_liveness_locked(task)
|
||||
self._save_task_locked(task)
|
||||
entries = self._read_task_logs_locked(task.task_id)
|
||||
return {"task_id": task.task_id, "count": len(entries), "entries": entries}
|
||||
workspace = self._load_workspace_locked(workspace_id)
|
||||
self._ensure_workspace_not_expired_locked(workspace, time.time())
|
||||
self._refresh_workspace_liveness_locked(workspace)
|
||||
self._save_workspace_locked(workspace)
|
||||
entries = self._read_workspace_logs_locked(workspace.workspace_id)
|
||||
return {
|
||||
"workspace_id": workspace.workspace_id,
|
||||
"count": len(entries),
|
||||
"entries": entries,
|
||||
}
|
||||
|
||||
def delete_task(self, task_id: str, *, reason: str = "explicit_delete") -> dict[str, Any]:
|
||||
def delete_workspace(
|
||||
self,
|
||||
workspace_id: str,
|
||||
*,
|
||||
reason: str = "explicit_delete",
|
||||
) -> dict[str, Any]:
|
||||
with self._lock:
|
||||
task = self._load_task_locked(task_id)
|
||||
instance = task.to_instance(workdir=self._task_runtime_dir(task.task_id))
|
||||
if task.state == "started":
|
||||
workspace = self._load_workspace_locked(workspace_id)
|
||||
instance = workspace.to_instance(
|
||||
workdir=self._workspace_runtime_dir(workspace.workspace_id)
|
||||
)
|
||||
if workspace.state == "started":
|
||||
self._backend.stop(instance)
|
||||
task.state = "stopped"
|
||||
workspace.state = "stopped"
|
||||
self._backend.delete(instance)
|
||||
shutil.rmtree(self._task_dir(task_id), ignore_errors=True)
|
||||
return {"task_id": task_id, "deleted": True, "reason": reason}
|
||||
shutil.rmtree(self._workspace_dir(workspace_id), ignore_errors=True)
|
||||
return {"workspace_id": workspace_id, "deleted": True, "reason": reason}
|
||||
|
||||
def _validate_limits(self, *, vcpu_count: int, mem_mib: int, ttl_seconds: int) -> None:
|
||||
if not self.MIN_VCPUS <= vcpu_count <= self.MAX_VCPUS:
|
||||
|
|
@ -1368,27 +1400,27 @@ class VmManager:
|
|||
"metadata": instance.metadata,
|
||||
}
|
||||
|
||||
def _serialize_task(self, task: TaskRecord) -> dict[str, Any]:
|
||||
def _serialize_workspace(self, workspace: WorkspaceRecord) -> dict[str, Any]:
|
||||
return {
|
||||
"task_id": task.task_id,
|
||||
"environment": task.environment,
|
||||
"environment_version": task.metadata.get("environment_version"),
|
||||
"vcpu_count": task.vcpu_count,
|
||||
"mem_mib": task.mem_mib,
|
||||
"ttl_seconds": task.ttl_seconds,
|
||||
"created_at": task.created_at,
|
||||
"expires_at": task.expires_at,
|
||||
"state": task.state,
|
||||
"network_enabled": task.network is not None,
|
||||
"allow_host_compat": task.allow_host_compat,
|
||||
"guest_ip": task.network.guest_ip if task.network is not None else None,
|
||||
"tap_name": task.network.tap_name if task.network is not None else None,
|
||||
"execution_mode": task.metadata.get("execution_mode", "pending"),
|
||||
"workspace_path": TASK_WORKSPACE_GUEST_PATH,
|
||||
"workspace_seed": _task_workspace_seed_dict(task.workspace_seed),
|
||||
"command_count": task.command_count,
|
||||
"last_command": task.last_command,
|
||||
"metadata": task.metadata,
|
||||
"workspace_id": workspace.workspace_id,
|
||||
"environment": workspace.environment,
|
||||
"environment_version": workspace.metadata.get("environment_version"),
|
||||
"vcpu_count": workspace.vcpu_count,
|
||||
"mem_mib": workspace.mem_mib,
|
||||
"ttl_seconds": workspace.ttl_seconds,
|
||||
"created_at": workspace.created_at,
|
||||
"expires_at": workspace.expires_at,
|
||||
"state": workspace.state,
|
||||
"network_enabled": workspace.network is not None,
|
||||
"allow_host_compat": workspace.allow_host_compat,
|
||||
"guest_ip": workspace.network.guest_ip if workspace.network is not None else None,
|
||||
"tap_name": workspace.network.tap_name if workspace.network is not None else None,
|
||||
"execution_mode": workspace.metadata.get("execution_mode", "pending"),
|
||||
"workspace_path": WORKSPACE_GUEST_PATH,
|
||||
"workspace_seed": _workspace_seed_dict(workspace.workspace_seed),
|
||||
"command_count": workspace.command_count,
|
||||
"last_command": workspace.last_command,
|
||||
"metadata": workspace.metadata,
|
||||
}
|
||||
|
||||
def _require_guest_boot_or_opt_in(self, instance: VmInstance) -> None:
|
||||
|
|
@ -1481,14 +1513,14 @@ class VmManager:
|
|||
execution_mode = instance.metadata.get("execution_mode", "unknown")
|
||||
return exec_result, execution_mode
|
||||
|
||||
def _prepare_workspace_seed(self, source_path: str | Path | None) -> PreparedWorkspaceSeed:
|
||||
if source_path is None:
|
||||
def _prepare_workspace_seed(self, seed_path: str | Path | None) -> PreparedWorkspaceSeed:
|
||||
if seed_path is None:
|
||||
return PreparedWorkspaceSeed(mode="empty", source_path=None)
|
||||
resolved_source_path = Path(source_path).expanduser().resolve()
|
||||
resolved_source_path = Path(seed_path).expanduser().resolve()
|
||||
if not resolved_source_path.exists():
|
||||
raise ValueError(f"source_path {resolved_source_path} does not exist")
|
||||
raise ValueError(f"seed_path {resolved_source_path} does not exist")
|
||||
if resolved_source_path.is_dir():
|
||||
cleanup_dir = Path(tempfile.mkdtemp(prefix="pyro-task-seed-"))
|
||||
cleanup_dir = Path(tempfile.mkdtemp(prefix="pyro-workspace-seed-"))
|
||||
archive_path = cleanup_dir / "workspace-seed.tar"
|
||||
try:
|
||||
_write_directory_seed_archive(resolved_source_path, archive_path)
|
||||
|
|
@ -1509,7 +1541,7 @@ class VmManager:
|
|||
or not _is_supported_seed_archive(resolved_source_path)
|
||||
):
|
||||
raise ValueError(
|
||||
"source_path must be a directory or a .tar/.tar.gz/.tgz archive"
|
||||
"seed_path must be a directory or a .tar/.tar.gz/.tgz archive"
|
||||
)
|
||||
entry_count, bytes_written = _inspect_seed_archive(resolved_source_path)
|
||||
return PreparedWorkspaceSeed(
|
||||
|
|
@ -1520,94 +1552,102 @@ class VmManager:
|
|||
bytes_written=bytes_written,
|
||||
)
|
||||
|
||||
def _ensure_task_guest_seed_support(self, instance: VmInstance) -> None:
|
||||
def _ensure_workspace_guest_seed_support(self, instance: VmInstance) -> None:
|
||||
if self._runtime_paths is None or self._runtime_paths.guest_agent_path is None:
|
||||
raise RuntimeError("runtime bundle does not provide a guest agent for task seeding")
|
||||
raise RuntimeError(
|
||||
"runtime bundle does not provide a guest agent for workspace seeding"
|
||||
)
|
||||
rootfs_image = instance.metadata.get("rootfs_image")
|
||||
if rootfs_image is None or rootfs_image == "":
|
||||
raise RuntimeError("task rootfs image is unavailable for guest workspace seeding")
|
||||
raise RuntimeError("workspace rootfs image is unavailable for guest seeding")
|
||||
_patch_rootfs_guest_agent(Path(rootfs_image), self._runtime_paths.guest_agent_path)
|
||||
|
||||
def _task_dir(self, task_id: str) -> Path:
|
||||
return self._tasks_dir / task_id
|
||||
def _workspace_dir(self, workspace_id: str) -> Path:
|
||||
return self._workspaces_dir / workspace_id
|
||||
|
||||
def _task_runtime_dir(self, task_id: str) -> Path:
|
||||
return self._task_dir(task_id) / TASK_RUNTIME_DIRNAME
|
||||
def _workspace_runtime_dir(self, workspace_id: str) -> Path:
|
||||
return self._workspace_dir(workspace_id) / WORKSPACE_RUNTIME_DIRNAME
|
||||
|
||||
def _task_workspace_dir(self, task_id: str) -> Path:
|
||||
return self._task_dir(task_id) / TASK_WORKSPACE_DIRNAME
|
||||
def _workspace_host_dir(self, workspace_id: str) -> Path:
|
||||
return self._workspace_dir(workspace_id) / WORKSPACE_DIRNAME
|
||||
|
||||
def _task_commands_dir(self, task_id: str) -> Path:
|
||||
return self._task_dir(task_id) / TASK_COMMANDS_DIRNAME
|
||||
def _workspace_commands_dir(self, workspace_id: str) -> Path:
|
||||
return self._workspace_dir(workspace_id) / WORKSPACE_COMMANDS_DIRNAME
|
||||
|
||||
def _task_metadata_path(self, task_id: str) -> Path:
|
||||
return self._task_dir(task_id) / "task.json"
|
||||
def _workspace_metadata_path(self, workspace_id: str) -> Path:
|
||||
return self._workspace_dir(workspace_id) / "workspace.json"
|
||||
|
||||
def _count_tasks_locked(self) -> int:
|
||||
return sum(1 for _ in self._tasks_dir.glob("*/task.json"))
|
||||
def _count_workspaces_locked(self) -> int:
|
||||
return sum(1 for _ in self._workspaces_dir.glob("*/workspace.json"))
|
||||
|
||||
def _load_task_locked(self, task_id: str) -> TaskRecord:
|
||||
metadata_path = self._task_metadata_path(task_id)
|
||||
def _load_workspace_locked(self, workspace_id: str) -> WorkspaceRecord:
|
||||
metadata_path = self._workspace_metadata_path(workspace_id)
|
||||
if not metadata_path.exists():
|
||||
raise ValueError(f"task {task_id!r} does not exist")
|
||||
raise ValueError(f"workspace {workspace_id!r} does not exist")
|
||||
payload = json.loads(metadata_path.read_text(encoding="utf-8"))
|
||||
if not isinstance(payload, dict):
|
||||
raise RuntimeError(f"task record at {metadata_path} is invalid")
|
||||
return TaskRecord.from_payload(payload)
|
||||
raise RuntimeError(f"workspace record at {metadata_path} is invalid")
|
||||
return WorkspaceRecord.from_payload(payload)
|
||||
|
||||
def _save_task_locked(self, task: TaskRecord) -> None:
|
||||
metadata_path = self._task_metadata_path(task.task_id)
|
||||
def _save_workspace_locked(self, workspace: WorkspaceRecord) -> None:
|
||||
metadata_path = self._workspace_metadata_path(workspace.workspace_id)
|
||||
metadata_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
metadata_path.write_text(
|
||||
json.dumps(task.to_payload(), indent=2, sort_keys=True),
|
||||
json.dumps(workspace.to_payload(), indent=2, sort_keys=True),
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
def _reap_expired_tasks_locked(self, now: float) -> None:
|
||||
for metadata_path in list(self._tasks_dir.glob("*/task.json")):
|
||||
def _reap_expired_workspaces_locked(self, now: float) -> None:
|
||||
for metadata_path in list(self._workspaces_dir.glob("*/workspace.json")):
|
||||
payload = json.loads(metadata_path.read_text(encoding="utf-8"))
|
||||
if not isinstance(payload, dict):
|
||||
shutil.rmtree(metadata_path.parent, ignore_errors=True)
|
||||
continue
|
||||
task = TaskRecord.from_payload(payload)
|
||||
if task.expires_at > now:
|
||||
workspace = WorkspaceRecord.from_payload(payload)
|
||||
if workspace.expires_at > now:
|
||||
continue
|
||||
instance = task.to_instance(workdir=self._task_runtime_dir(task.task_id))
|
||||
if task.state == "started":
|
||||
instance = workspace.to_instance(
|
||||
workdir=self._workspace_runtime_dir(workspace.workspace_id)
|
||||
)
|
||||
if workspace.state == "started":
|
||||
self._backend.stop(instance)
|
||||
task.state = "stopped"
|
||||
workspace.state = "stopped"
|
||||
self._backend.delete(instance)
|
||||
shutil.rmtree(self._task_dir(task.task_id), ignore_errors=True)
|
||||
shutil.rmtree(self._workspace_dir(workspace.workspace_id), ignore_errors=True)
|
||||
|
||||
def _ensure_task_not_expired_locked(self, task: TaskRecord, now: float) -> None:
|
||||
if task.expires_at <= now:
|
||||
task_id = task.task_id
|
||||
self._reap_expired_tasks_locked(now)
|
||||
raise RuntimeError(f"task {task_id!r} expired and was automatically deleted")
|
||||
def _ensure_workspace_not_expired_locked(
|
||||
self,
|
||||
workspace: WorkspaceRecord,
|
||||
now: float,
|
||||
) -> None:
|
||||
if workspace.expires_at <= now:
|
||||
workspace_id = workspace.workspace_id
|
||||
self._reap_expired_workspaces_locked(now)
|
||||
raise RuntimeError(f"workspace {workspace_id!r} expired and was automatically deleted")
|
||||
|
||||
def _refresh_task_liveness_locked(self, task: TaskRecord) -> None:
|
||||
if task.state != "started":
|
||||
def _refresh_workspace_liveness_locked(self, workspace: WorkspaceRecord) -> None:
|
||||
if workspace.state != "started":
|
||||
return
|
||||
execution_mode = task.metadata.get("execution_mode")
|
||||
execution_mode = workspace.metadata.get("execution_mode")
|
||||
if execution_mode == "host_compat":
|
||||
return
|
||||
if _pid_is_running(task.firecracker_pid):
|
||||
if _pid_is_running(workspace.firecracker_pid):
|
||||
return
|
||||
task.state = "stopped"
|
||||
task.firecracker_pid = None
|
||||
task.last_error = "backing guest process is no longer running"
|
||||
workspace.state = "stopped"
|
||||
workspace.firecracker_pid = None
|
||||
workspace.last_error = "backing guest process is no longer running"
|
||||
|
||||
def _record_task_command_locked(
|
||||
def _record_workspace_command_locked(
|
||||
self,
|
||||
task: TaskRecord,
|
||||
workspace: WorkspaceRecord,
|
||||
*,
|
||||
command: str,
|
||||
exec_result: VmExecResult,
|
||||
execution_mode: str,
|
||||
cwd: str,
|
||||
) -> dict[str, Any]:
|
||||
sequence = task.command_count + 1
|
||||
commands_dir = self._task_commands_dir(task.task_id)
|
||||
sequence = workspace.command_count + 1
|
||||
commands_dir = self._workspace_commands_dir(workspace.workspace_id)
|
||||
commands_dir.mkdir(parents=True, exist_ok=True)
|
||||
base_name = f"{sequence:06d}"
|
||||
stdout_path = commands_dir / f"{base_name}.stdout"
|
||||
|
|
@ -1627,8 +1667,8 @@ class VmManager:
|
|||
"recorded_at": time.time(),
|
||||
}
|
||||
record_path.write_text(json.dumps(entry, indent=2, sort_keys=True), encoding="utf-8")
|
||||
task.command_count = sequence
|
||||
task.last_command = {
|
||||
workspace.command_count = sequence
|
||||
workspace.last_command = {
|
||||
"sequence": sequence,
|
||||
"command": command,
|
||||
"cwd": cwd,
|
||||
|
|
@ -1638,9 +1678,9 @@ class VmManager:
|
|||
}
|
||||
return entry
|
||||
|
||||
def _read_task_logs_locked(self, task_id: str) -> list[dict[str, Any]]:
|
||||
def _read_workspace_logs_locked(self, workspace_id: str) -> list[dict[str, Any]]:
|
||||
entries: list[dict[str, Any]] = []
|
||||
commands_dir = self._task_commands_dir(task_id)
|
||||
commands_dir = self._workspace_commands_dir(workspace_id)
|
||||
if not commands_dir.exists():
|
||||
return entries
|
||||
for record_path in sorted(commands_dir.glob("*.json")):
|
||||
|
|
|
|||
|
|
@ -48,8 +48,8 @@ def test_pyro_create_server_registers_vm_run(tmp_path: Path) -> None:
|
|||
tool_names = asyncio.run(_run())
|
||||
assert "vm_run" in tool_names
|
||||
assert "vm_create" in tool_names
|
||||
assert "task_create" in tool_names
|
||||
assert "task_sync_push" in tool_names
|
||||
assert "workspace_create" in tool_names
|
||||
assert "workspace_sync_push" in tool_names
|
||||
|
||||
|
||||
def test_pyro_vm_run_tool_executes(tmp_path: Path) -> None:
|
||||
|
|
@ -106,7 +106,7 @@ def test_pyro_create_vm_defaults_sizing_and_host_compat(tmp_path: Path) -> None:
|
|||
assert created["allow_host_compat"] is True
|
||||
|
||||
|
||||
def test_pyro_task_methods_delegate_to_manager(tmp_path: Path) -> None:
|
||||
def test_pyro_workspace_methods_delegate_to_manager(tmp_path: Path) -> None:
|
||||
pyro = Pyro(
|
||||
manager=VmManager(
|
||||
backend_name="mock",
|
||||
|
|
@ -119,20 +119,20 @@ def test_pyro_task_methods_delegate_to_manager(tmp_path: Path) -> None:
|
|||
source_dir.mkdir()
|
||||
(source_dir / "note.txt").write_text("ok\n", encoding="utf-8")
|
||||
|
||||
created = pyro.create_task(
|
||||
created = pyro.create_workspace(
|
||||
environment="debian:12-base",
|
||||
allow_host_compat=True,
|
||||
source_path=source_dir,
|
||||
seed_path=source_dir,
|
||||
)
|
||||
task_id = str(created["task_id"])
|
||||
workspace_id = str(created["workspace_id"])
|
||||
updated_dir = tmp_path / "updated"
|
||||
updated_dir.mkdir()
|
||||
(updated_dir / "more.txt").write_text("more\n", encoding="utf-8")
|
||||
synced = pyro.push_task_sync(task_id, updated_dir, dest="subdir")
|
||||
executed = pyro.exec_task(task_id, command="cat note.txt")
|
||||
status = pyro.status_task(task_id)
|
||||
logs = pyro.logs_task(task_id)
|
||||
deleted = pyro.delete_task(task_id)
|
||||
synced = pyro.push_workspace_sync(workspace_id, updated_dir, dest="subdir")
|
||||
executed = pyro.exec_workspace(workspace_id, command="cat note.txt")
|
||||
status = pyro.status_workspace(workspace_id)
|
||||
logs = pyro.logs_workspace(workspace_id)
|
||||
deleted = pyro.delete_workspace(workspace_id)
|
||||
|
||||
assert executed["stdout"] == "ok\n"
|
||||
assert created["workspace_seed"]["mode"] == "directory"
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ def test_cli_help_guides_first_run() -> None:
|
|||
assert "pyro env list" in help_text
|
||||
assert "pyro env pull debian:12" in help_text
|
||||
assert "pyro run debian:12 -- git --version" in help_text
|
||||
assert "pyro task sync push TASK_ID ./changes" in help_text
|
||||
assert "pyro workspace sync push WORKSPACE_ID ./changes" in help_text
|
||||
assert "Use `pyro mcp serve` only after the CLI validation path works." in help_text
|
||||
|
||||
|
||||
|
|
@ -60,28 +60,37 @@ def test_cli_subcommand_help_includes_examples_and_guidance() -> None:
|
|||
assert "Expose pyro tools over stdio for an MCP client." in mcp_help
|
||||
assert "Use this from an MCP client config after the CLI evaluation path works." in mcp_help
|
||||
|
||||
task_help = _subparser_choice(parser, "task").format_help()
|
||||
assert "pyro task create debian:12 --source-path ./repo" in task_help
|
||||
assert "pyro task sync push TASK_ID ./repo --dest src" in task_help
|
||||
assert "pyro task exec TASK_ID" in task_help
|
||||
workspace_help = _subparser_choice(parser, "workspace").format_help()
|
||||
assert "pyro workspace create debian:12 --seed-path ./repo" in workspace_help
|
||||
assert "pyro workspace sync push WORKSPACE_ID ./repo --dest src" in workspace_help
|
||||
assert "pyro workspace exec WORKSPACE_ID" in workspace_help
|
||||
|
||||
task_create_help = _subparser_choice(_subparser_choice(parser, "task"), "create").format_help()
|
||||
assert "--source-path" in task_create_help
|
||||
assert "seed into `/workspace`" in task_create_help
|
||||
|
||||
task_exec_help = _subparser_choice(_subparser_choice(parser, "task"), "exec").format_help()
|
||||
assert "persistent `/workspace`" in task_exec_help
|
||||
assert "pyro task exec TASK_ID -- cat note.txt" in task_exec_help
|
||||
|
||||
task_sync_help = _subparser_choice(_subparser_choice(parser, "task"), "sync").format_help()
|
||||
assert "Sync is non-atomic." in task_sync_help
|
||||
assert "pyro task sync push TASK_ID ./repo" in task_sync_help
|
||||
|
||||
task_sync_push_help = _subparser_choice(
|
||||
_subparser_choice(_subparser_choice(parser, "task"), "sync"), "push"
|
||||
workspace_create_help = _subparser_choice(
|
||||
_subparser_choice(parser, "workspace"),
|
||||
"create",
|
||||
).format_help()
|
||||
assert "--dest" in task_sync_push_help
|
||||
assert "Import host content into `/workspace`" in task_sync_push_help
|
||||
assert "--seed-path" in workspace_create_help
|
||||
assert "seed into `/workspace`" in workspace_create_help
|
||||
|
||||
workspace_exec_help = _subparser_choice(
|
||||
_subparser_choice(parser, "workspace"),
|
||||
"exec",
|
||||
).format_help()
|
||||
assert "persistent `/workspace`" in workspace_exec_help
|
||||
assert "pyro workspace exec WORKSPACE_ID -- cat note.txt" in workspace_exec_help
|
||||
|
||||
workspace_sync_help = _subparser_choice(
|
||||
_subparser_choice(parser, "workspace"),
|
||||
"sync",
|
||||
).format_help()
|
||||
assert "Sync is non-atomic." in workspace_sync_help
|
||||
assert "pyro workspace sync push WORKSPACE_ID ./repo" in workspace_sync_help
|
||||
|
||||
workspace_sync_push_help = _subparser_choice(
|
||||
_subparser_choice(_subparser_choice(parser, "workspace"), "sync"), "push"
|
||||
).format_help()
|
||||
assert "--dest" in workspace_sync_push_help
|
||||
assert "Import host content into `/workspace`" in workspace_sync_push_help
|
||||
|
||||
|
||||
def test_cli_run_prints_json(
|
||||
|
|
@ -344,32 +353,32 @@ def test_cli_requires_run_command() -> None:
|
|||
|
||||
def test_cli_requires_command_preserves_shell_argument_boundaries() -> None:
|
||||
command = cli._require_command(
|
||||
["--", "sh", "-lc", 'printf "hello from task\\n" > note.txt']
|
||||
["--", "sh", "-lc", 'printf "hello from workspace\\n" > note.txt']
|
||||
)
|
||||
assert command == 'sh -lc \'printf "hello from task\\n" > note.txt\''
|
||||
assert command == 'sh -lc \'printf "hello from workspace\\n" > note.txt\''
|
||||
|
||||
|
||||
def test_cli_task_create_prints_json(
|
||||
def test_cli_workspace_create_prints_json(
|
||||
monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str]
|
||||
) -> None:
|
||||
class StubPyro:
|
||||
def create_task(self, **kwargs: Any) -> dict[str, Any]:
|
||||
def create_workspace(self, **kwargs: Any) -> dict[str, Any]:
|
||||
assert kwargs["environment"] == "debian:12"
|
||||
assert kwargs["source_path"] == "./repo"
|
||||
return {"task_id": "task-123", "state": "started"}
|
||||
assert kwargs["seed_path"] == "./repo"
|
||||
return {"workspace_id": "workspace-123", "state": "started"}
|
||||
|
||||
class StubParser:
|
||||
def parse_args(self) -> argparse.Namespace:
|
||||
return argparse.Namespace(
|
||||
command="task",
|
||||
task_command="create",
|
||||
command="workspace",
|
||||
workspace_command="create",
|
||||
environment="debian:12",
|
||||
vcpu_count=1,
|
||||
mem_mib=1024,
|
||||
ttl_seconds=600,
|
||||
network=False,
|
||||
allow_host_compat=False,
|
||||
source_path="./repo",
|
||||
seed_path="./repo",
|
||||
json=True,
|
||||
)
|
||||
|
||||
|
|
@ -377,23 +386,23 @@ def test_cli_task_create_prints_json(
|
|||
monkeypatch.setattr(cli, "Pyro", StubPyro)
|
||||
cli.main()
|
||||
output = json.loads(capsys.readouterr().out)
|
||||
assert output["task_id"] == "task-123"
|
||||
assert output["workspace_id"] == "workspace-123"
|
||||
|
||||
|
||||
def test_cli_task_create_prints_human(
|
||||
def test_cli_workspace_create_prints_human(
|
||||
monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str]
|
||||
) -> None:
|
||||
class StubPyro:
|
||||
def create_task(self, **kwargs: Any) -> dict[str, Any]:
|
||||
def create_workspace(self, **kwargs: Any) -> dict[str, Any]:
|
||||
del kwargs
|
||||
return {
|
||||
"task_id": "task-123",
|
||||
"workspace_id": "workspace-123",
|
||||
"environment": "debian:12",
|
||||
"state": "started",
|
||||
"workspace_path": "/workspace",
|
||||
"workspace_seed": {
|
||||
"mode": "directory",
|
||||
"source_path": "/tmp/repo",
|
||||
"seed_path": "/tmp/repo",
|
||||
"destination": "/workspace",
|
||||
"entry_count": 1,
|
||||
"bytes_written": 6,
|
||||
|
|
@ -408,15 +417,15 @@ def test_cli_task_create_prints_human(
|
|||
class StubParser:
|
||||
def parse_args(self) -> argparse.Namespace:
|
||||
return argparse.Namespace(
|
||||
command="task",
|
||||
task_command="create",
|
||||
command="workspace",
|
||||
workspace_command="create",
|
||||
environment="debian:12",
|
||||
vcpu_count=1,
|
||||
mem_mib=1024,
|
||||
ttl_seconds=600,
|
||||
network=False,
|
||||
allow_host_compat=False,
|
||||
source_path="/tmp/repo",
|
||||
seed_path="/tmp/repo",
|
||||
json=False,
|
||||
)
|
||||
|
||||
|
|
@ -424,22 +433,28 @@ def test_cli_task_create_prints_human(
|
|||
monkeypatch.setattr(cli, "Pyro", StubPyro)
|
||||
cli.main()
|
||||
output = capsys.readouterr().out
|
||||
assert "Task: task-123" in output
|
||||
assert "Workspace ID: workspace-123" in output
|
||||
assert "Workspace: /workspace" in output
|
||||
assert "Workspace seed: directory from /tmp/repo" in output
|
||||
|
||||
|
||||
def test_cli_task_exec_prints_human_output(
|
||||
def test_cli_workspace_exec_prints_human_output(
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
capsys: pytest.CaptureFixture[str],
|
||||
) -> None:
|
||||
class StubPyro:
|
||||
def exec_task(self, task_id: str, *, command: str, timeout_seconds: int) -> dict[str, Any]:
|
||||
assert task_id == "task-123"
|
||||
def exec_workspace(
|
||||
self,
|
||||
workspace_id: str,
|
||||
*,
|
||||
command: str,
|
||||
timeout_seconds: int,
|
||||
) -> dict[str, Any]:
|
||||
assert workspace_id == "workspace-123"
|
||||
assert command == "cat note.txt"
|
||||
assert timeout_seconds == 30
|
||||
return {
|
||||
"task_id": task_id,
|
||||
"workspace_id": workspace_id,
|
||||
"sequence": 2,
|
||||
"cwd": "/workspace",
|
||||
"execution_mode": "guest_vsock",
|
||||
|
|
@ -452,9 +467,9 @@ def test_cli_task_exec_prints_human_output(
|
|||
class StubParser:
|
||||
def parse_args(self) -> argparse.Namespace:
|
||||
return argparse.Namespace(
|
||||
command="task",
|
||||
task_command="exec",
|
||||
task_id="task-123",
|
||||
command="workspace",
|
||||
workspace_command="exec",
|
||||
workspace_id="workspace-123",
|
||||
timeout_seconds=30,
|
||||
json=False,
|
||||
command_args=["--", "cat", "note.txt"],
|
||||
|
|
@ -465,19 +480,28 @@ def test_cli_task_exec_prints_human_output(
|
|||
cli.main()
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "hello\n"
|
||||
assert "[task-exec] task_id=task-123 sequence=2 cwd=/workspace" in captured.err
|
||||
assert (
|
||||
"[workspace-exec] workspace_id=workspace-123 sequence=2 cwd=/workspace"
|
||||
in captured.err
|
||||
)
|
||||
|
||||
|
||||
def test_cli_task_sync_push_prints_json(
|
||||
def test_cli_workspace_sync_push_prints_json(
|
||||
monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str]
|
||||
) -> None:
|
||||
class StubPyro:
|
||||
def push_task_sync(self, task_id: str, source_path: str, *, dest: str) -> dict[str, Any]:
|
||||
assert task_id == "task-123"
|
||||
def push_workspace_sync(
|
||||
self,
|
||||
workspace_id: str,
|
||||
source_path: str,
|
||||
*,
|
||||
dest: str,
|
||||
) -> dict[str, Any]:
|
||||
assert workspace_id == "workspace-123"
|
||||
assert source_path == "./repo"
|
||||
assert dest == "src"
|
||||
return {
|
||||
"task_id": task_id,
|
||||
"workspace_id": workspace_id,
|
||||
"execution_mode": "guest_vsock",
|
||||
"workspace_sync": {
|
||||
"mode": "directory",
|
||||
|
|
@ -491,10 +515,10 @@ def test_cli_task_sync_push_prints_json(
|
|||
class StubParser:
|
||||
def parse_args(self) -> argparse.Namespace:
|
||||
return argparse.Namespace(
|
||||
command="task",
|
||||
task_command="sync",
|
||||
task_sync_command="push",
|
||||
task_id="task-123",
|
||||
command="workspace",
|
||||
workspace_command="sync",
|
||||
workspace_sync_command="push",
|
||||
workspace_id="workspace-123",
|
||||
source_path="./repo",
|
||||
dest="src",
|
||||
json=True,
|
||||
|
|
@ -507,17 +531,23 @@ def test_cli_task_sync_push_prints_json(
|
|||
assert output["workspace_sync"]["destination"] == "/workspace/src"
|
||||
|
||||
|
||||
def test_cli_task_sync_push_prints_human(
|
||||
def test_cli_workspace_sync_push_prints_human(
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
capsys: pytest.CaptureFixture[str],
|
||||
) -> None:
|
||||
class StubPyro:
|
||||
def push_task_sync(self, task_id: str, source_path: str, *, dest: str) -> dict[str, Any]:
|
||||
assert task_id == "task-123"
|
||||
def push_workspace_sync(
|
||||
self,
|
||||
workspace_id: str,
|
||||
source_path: str,
|
||||
*,
|
||||
dest: str,
|
||||
) -> dict[str, Any]:
|
||||
assert workspace_id == "workspace-123"
|
||||
assert source_path == "./repo"
|
||||
assert dest == "/workspace"
|
||||
return {
|
||||
"task_id": task_id,
|
||||
"workspace_id": workspace_id,
|
||||
"execution_mode": "guest_vsock",
|
||||
"workspace_sync": {
|
||||
"mode": "directory",
|
||||
|
|
@ -531,10 +561,10 @@ def test_cli_task_sync_push_prints_human(
|
|||
class StubParser:
|
||||
def parse_args(self) -> argparse.Namespace:
|
||||
return argparse.Namespace(
|
||||
command="task",
|
||||
task_command="sync",
|
||||
task_sync_command="push",
|
||||
task_id="task-123",
|
||||
command="workspace",
|
||||
workspace_command="sync",
|
||||
workspace_sync_command="push",
|
||||
workspace_id="workspace-123",
|
||||
source_path="./repo",
|
||||
dest="/workspace",
|
||||
json=False,
|
||||
|
|
@ -544,22 +574,22 @@ def test_cli_task_sync_push_prints_human(
|
|||
monkeypatch.setattr(cli, "Pyro", StubPyro)
|
||||
cli.main()
|
||||
output = capsys.readouterr().out
|
||||
assert "[task-sync] task_id=task-123 mode=directory source=/tmp/repo" in output
|
||||
assert "[workspace-sync] workspace_id=workspace-123 mode=directory source=/tmp/repo" in output
|
||||
assert (
|
||||
"destination=/workspace entry_count=2 bytes_written=12 "
|
||||
"execution_mode=guest_vsock"
|
||||
) in output
|
||||
|
||||
|
||||
def test_cli_task_logs_and_delete_print_human(
|
||||
def test_cli_workspace_logs_and_delete_print_human(
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
capsys: pytest.CaptureFixture[str],
|
||||
) -> None:
|
||||
class StubPyro:
|
||||
def logs_task(self, task_id: str) -> dict[str, Any]:
|
||||
assert task_id == "task-123"
|
||||
def logs_workspace(self, workspace_id: str) -> dict[str, Any]:
|
||||
assert workspace_id == "workspace-123"
|
||||
return {
|
||||
"task_id": task_id,
|
||||
"workspace_id": workspace_id,
|
||||
"count": 1,
|
||||
"entries": [
|
||||
{
|
||||
|
|
@ -574,16 +604,16 @@ def test_cli_task_logs_and_delete_print_human(
|
|||
],
|
||||
}
|
||||
|
||||
def delete_task(self, task_id: str) -> dict[str, Any]:
|
||||
assert task_id == "task-123"
|
||||
return {"task_id": task_id, "deleted": True}
|
||||
def delete_workspace(self, workspace_id: str) -> dict[str, Any]:
|
||||
assert workspace_id == "workspace-123"
|
||||
return {"workspace_id": workspace_id, "deleted": True}
|
||||
|
||||
class LogsParser:
|
||||
def parse_args(self) -> argparse.Namespace:
|
||||
return argparse.Namespace(
|
||||
command="task",
|
||||
task_command="logs",
|
||||
task_id="task-123",
|
||||
command="workspace",
|
||||
workspace_command="logs",
|
||||
workspace_id="workspace-123",
|
||||
json=False,
|
||||
)
|
||||
|
||||
|
|
@ -594,9 +624,9 @@ def test_cli_task_logs_and_delete_print_human(
|
|||
class DeleteParser:
|
||||
def parse_args(self) -> argparse.Namespace:
|
||||
return argparse.Namespace(
|
||||
command="task",
|
||||
task_command="delete",
|
||||
task_id="task-123",
|
||||
command="workspace",
|
||||
workspace_command="delete",
|
||||
workspace_id="workspace-123",
|
||||
json=False,
|
||||
)
|
||||
|
||||
|
|
@ -605,28 +635,28 @@ def test_cli_task_logs_and_delete_print_human(
|
|||
|
||||
output = capsys.readouterr().out
|
||||
assert "#1 exit_code=0 duration_ms=2 cwd=/workspace" in output
|
||||
assert "Deleted task: task-123" in output
|
||||
assert "Deleted workspace: workspace-123" in output
|
||||
|
||||
|
||||
def test_cli_task_status_and_delete_print_json(
|
||||
def test_cli_workspace_status_and_delete_print_json(
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
capsys: pytest.CaptureFixture[str],
|
||||
) -> None:
|
||||
class StubPyro:
|
||||
def status_task(self, task_id: str) -> dict[str, Any]:
|
||||
assert task_id == "task-123"
|
||||
return {"task_id": task_id, "state": "started"}
|
||||
def status_workspace(self, workspace_id: str) -> dict[str, Any]:
|
||||
assert workspace_id == "workspace-123"
|
||||
return {"workspace_id": workspace_id, "state": "started"}
|
||||
|
||||
def delete_task(self, task_id: str) -> dict[str, Any]:
|
||||
assert task_id == "task-123"
|
||||
return {"task_id": task_id, "deleted": True}
|
||||
def delete_workspace(self, workspace_id: str) -> dict[str, Any]:
|
||||
assert workspace_id == "workspace-123"
|
||||
return {"workspace_id": workspace_id, "deleted": True}
|
||||
|
||||
class StatusParser:
|
||||
def parse_args(self) -> argparse.Namespace:
|
||||
return argparse.Namespace(
|
||||
command="task",
|
||||
task_command="status",
|
||||
task_id="task-123",
|
||||
command="workspace",
|
||||
workspace_command="status",
|
||||
workspace_id="workspace-123",
|
||||
json=True,
|
||||
)
|
||||
|
||||
|
|
@ -639,9 +669,9 @@ def test_cli_task_status_and_delete_print_json(
|
|||
class DeleteParser:
|
||||
def parse_args(self) -> argparse.Namespace:
|
||||
return argparse.Namespace(
|
||||
command="task",
|
||||
task_command="delete",
|
||||
task_id="task-123",
|
||||
command="workspace",
|
||||
workspace_command="delete",
|
||||
workspace_id="workspace-123",
|
||||
json=True,
|
||||
)
|
||||
|
||||
|
|
@ -651,20 +681,26 @@ def test_cli_task_status_and_delete_print_json(
|
|||
assert deleted["deleted"] is True
|
||||
|
||||
|
||||
def test_cli_task_exec_json_error_exits_nonzero(
|
||||
def test_cli_workspace_exec_json_error_exits_nonzero(
|
||||
monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str]
|
||||
) -> None:
|
||||
class StubPyro:
|
||||
def exec_task(self, task_id: str, *, command: str, timeout_seconds: int) -> dict[str, Any]:
|
||||
del task_id, command, timeout_seconds
|
||||
raise RuntimeError("task is unavailable")
|
||||
def exec_workspace(
|
||||
self,
|
||||
workspace_id: str,
|
||||
*,
|
||||
command: str,
|
||||
timeout_seconds: int,
|
||||
) -> dict[str, Any]:
|
||||
del workspace_id, command, timeout_seconds
|
||||
raise RuntimeError("workspace is unavailable")
|
||||
|
||||
class StubParser:
|
||||
def parse_args(self) -> argparse.Namespace:
|
||||
return argparse.Namespace(
|
||||
command="task",
|
||||
task_command="exec",
|
||||
task_id="task-123",
|
||||
command="workspace",
|
||||
workspace_command="exec",
|
||||
workspace_id="workspace-123",
|
||||
timeout_seconds=30,
|
||||
json=True,
|
||||
command_args=["--", "true"],
|
||||
|
|
|
|||
|
|
@ -17,10 +17,10 @@ from pyro_mcp.contract import (
|
|||
PUBLIC_CLI_DEMO_SUBCOMMANDS,
|
||||
PUBLIC_CLI_ENV_SUBCOMMANDS,
|
||||
PUBLIC_CLI_RUN_FLAGS,
|
||||
PUBLIC_CLI_TASK_CREATE_FLAGS,
|
||||
PUBLIC_CLI_TASK_SUBCOMMANDS,
|
||||
PUBLIC_CLI_TASK_SYNC_PUSH_FLAGS,
|
||||
PUBLIC_CLI_TASK_SYNC_SUBCOMMANDS,
|
||||
PUBLIC_CLI_WORKSPACE_CREATE_FLAGS,
|
||||
PUBLIC_CLI_WORKSPACE_SUBCOMMANDS,
|
||||
PUBLIC_CLI_WORKSPACE_SYNC_PUSH_FLAGS,
|
||||
PUBLIC_CLI_WORKSPACE_SYNC_SUBCOMMANDS,
|
||||
PUBLIC_MCP_TOOLS,
|
||||
PUBLIC_SDK_METHODS,
|
||||
)
|
||||
|
|
@ -67,22 +67,25 @@ def test_public_cli_help_lists_commands_and_run_flags() -> None:
|
|||
for subcommand_name in PUBLIC_CLI_ENV_SUBCOMMANDS:
|
||||
assert subcommand_name in env_help_text
|
||||
|
||||
task_help_text = _subparser_choice(parser, "task").format_help()
|
||||
for subcommand_name in PUBLIC_CLI_TASK_SUBCOMMANDS:
|
||||
assert subcommand_name in task_help_text
|
||||
task_create_help_text = _subparser_choice(
|
||||
_subparser_choice(parser, "task"), "create"
|
||||
workspace_help_text = _subparser_choice(parser, "workspace").format_help()
|
||||
for subcommand_name in PUBLIC_CLI_WORKSPACE_SUBCOMMANDS:
|
||||
assert subcommand_name in workspace_help_text
|
||||
workspace_create_help_text = _subparser_choice(
|
||||
_subparser_choice(parser, "workspace"), "create"
|
||||
).format_help()
|
||||
for flag in PUBLIC_CLI_TASK_CREATE_FLAGS:
|
||||
assert flag in task_create_help_text
|
||||
task_sync_help_text = _subparser_choice(_subparser_choice(parser, "task"), "sync").format_help()
|
||||
for subcommand_name in PUBLIC_CLI_TASK_SYNC_SUBCOMMANDS:
|
||||
assert subcommand_name in task_sync_help_text
|
||||
task_sync_push_help_text = _subparser_choice(
|
||||
_subparser_choice(_subparser_choice(parser, "task"), "sync"), "push"
|
||||
for flag in PUBLIC_CLI_WORKSPACE_CREATE_FLAGS:
|
||||
assert flag in workspace_create_help_text
|
||||
workspace_sync_help_text = _subparser_choice(
|
||||
_subparser_choice(parser, "workspace"),
|
||||
"sync",
|
||||
).format_help()
|
||||
for flag in PUBLIC_CLI_TASK_SYNC_PUSH_FLAGS:
|
||||
assert flag in task_sync_push_help_text
|
||||
for subcommand_name in PUBLIC_CLI_WORKSPACE_SYNC_SUBCOMMANDS:
|
||||
assert subcommand_name in workspace_sync_help_text
|
||||
workspace_sync_push_help_text = _subparser_choice(
|
||||
_subparser_choice(_subparser_choice(parser, "workspace"), "sync"), "push"
|
||||
).format_help()
|
||||
for flag in PUBLIC_CLI_WORKSPACE_SYNC_PUSH_FLAGS:
|
||||
assert flag in workspace_sync_push_help_text
|
||||
|
||||
demo_help_text = _subparser_choice(parser, "demo").format_help()
|
||||
for subcommand_name in PUBLIC_CLI_DEMO_SUBCOMMANDS:
|
||||
|
|
|
|||
|
|
@ -31,9 +31,9 @@ def test_create_server_registers_vm_tools(tmp_path: Path) -> None:
|
|||
assert "vm_network_info" in tool_names
|
||||
assert "vm_run" in tool_names
|
||||
assert "vm_status" in tool_names
|
||||
assert "task_create" in tool_names
|
||||
assert "task_logs" in tool_names
|
||||
assert "task_sync_push" in tool_names
|
||||
assert "workspace_create" in tool_names
|
||||
assert "workspace_logs" in tool_names
|
||||
assert "workspace_sync_push" in tool_names
|
||||
|
||||
|
||||
def test_vm_run_round_trip(tmp_path: Path) -> None:
|
||||
|
|
@ -166,7 +166,7 @@ def test_server_main_runs_stdio_transport(monkeypatch: pytest.MonkeyPatch) -> No
|
|||
assert called == {"transport": "stdio"}
|
||||
|
||||
|
||||
def test_task_tools_round_trip(tmp_path: Path) -> None:
|
||||
def test_workspace_tools_round_trip(tmp_path: Path) -> None:
|
||||
manager = VmManager(
|
||||
backend_name="mock",
|
||||
base_dir=tmp_path / "vms",
|
||||
|
|
@ -194,23 +194,23 @@ def test_task_tools_round_trip(tmp_path: Path) -> None:
|
|||
server = create_server(manager=manager)
|
||||
created = _extract_structured(
|
||||
await server.call_tool(
|
||||
"task_create",
|
||||
"workspace_create",
|
||||
{
|
||||
"environment": "debian:12-base",
|
||||
"allow_host_compat": True,
|
||||
"source_path": str(source_dir),
|
||||
"seed_path": str(source_dir),
|
||||
},
|
||||
)
|
||||
)
|
||||
task_id = str(created["task_id"])
|
||||
workspace_id = str(created["workspace_id"])
|
||||
update_dir = tmp_path / "update"
|
||||
update_dir.mkdir()
|
||||
(update_dir / "more.txt").write_text("more\n", encoding="utf-8")
|
||||
synced = _extract_structured(
|
||||
await server.call_tool(
|
||||
"task_sync_push",
|
||||
"workspace_sync_push",
|
||||
{
|
||||
"task_id": task_id,
|
||||
"workspace_id": workspace_id,
|
||||
"source_path": str(update_dir),
|
||||
"dest": "subdir",
|
||||
},
|
||||
|
|
@ -218,15 +218,19 @@ def test_task_tools_round_trip(tmp_path: Path) -> None:
|
|||
)
|
||||
executed = _extract_structured(
|
||||
await server.call_tool(
|
||||
"task_exec",
|
||||
"workspace_exec",
|
||||
{
|
||||
"task_id": task_id,
|
||||
"workspace_id": workspace_id,
|
||||
"command": "cat subdir/more.txt",
|
||||
},
|
||||
)
|
||||
)
|
||||
logs = _extract_structured(await server.call_tool("task_logs", {"task_id": task_id}))
|
||||
deleted = _extract_structured(await server.call_tool("task_delete", {"task_id": task_id}))
|
||||
logs = _extract_structured(
|
||||
await server.call_tool("workspace_logs", {"workspace_id": workspace_id})
|
||||
)
|
||||
deleted = _extract_structured(
|
||||
await server.call_tool("workspace_delete", {"workspace_id": workspace_id})
|
||||
)
|
||||
return created, synced, executed, logs, deleted
|
||||
|
||||
created, synced, executed, logs, deleted = asyncio.run(_run())
|
||||
|
|
|
|||
|
|
@ -267,48 +267,48 @@ def test_vm_manager_run_vm(tmp_path: Path) -> None:
|
|||
assert str(result["stdout"]) == "ok\n"
|
||||
|
||||
|
||||
def test_task_lifecycle_and_logs(tmp_path: Path) -> None:
|
||||
def test_workspace_lifecycle_and_logs(tmp_path: Path) -> None:
|
||||
manager = VmManager(
|
||||
backend_name="mock",
|
||||
base_dir=tmp_path / "vms",
|
||||
network_manager=TapNetworkManager(enabled=False),
|
||||
)
|
||||
|
||||
created = manager.create_task(
|
||||
created = manager.create_workspace(
|
||||
environment="debian:12-base",
|
||||
allow_host_compat=True,
|
||||
)
|
||||
task_id = str(created["task_id"])
|
||||
workspace_id = str(created["workspace_id"])
|
||||
assert created["state"] == "started"
|
||||
assert created["workspace_path"] == "/workspace"
|
||||
|
||||
first = manager.exec_task(
|
||||
task_id,
|
||||
first = manager.exec_workspace(
|
||||
workspace_id,
|
||||
command="printf 'hello\\n' > note.txt",
|
||||
timeout_seconds=30,
|
||||
)
|
||||
second = manager.exec_task(task_id, command="cat note.txt", timeout_seconds=30)
|
||||
second = manager.exec_workspace(workspace_id, command="cat note.txt", timeout_seconds=30)
|
||||
|
||||
assert first["exit_code"] == 0
|
||||
assert second["stdout"] == "hello\n"
|
||||
|
||||
status = manager.status_task(task_id)
|
||||
status = manager.status_workspace(workspace_id)
|
||||
assert status["command_count"] == 2
|
||||
assert status["last_command"] is not None
|
||||
|
||||
logs = manager.logs_task(task_id)
|
||||
logs = manager.logs_workspace(workspace_id)
|
||||
assert logs["count"] == 2
|
||||
entries = logs["entries"]
|
||||
assert isinstance(entries, list)
|
||||
assert entries[1]["stdout"] == "hello\n"
|
||||
|
||||
deleted = manager.delete_task(task_id)
|
||||
deleted = manager.delete_workspace(workspace_id)
|
||||
assert deleted["deleted"] is True
|
||||
with pytest.raises(ValueError, match="does not exist"):
|
||||
manager.status_task(task_id)
|
||||
manager.status_workspace(workspace_id)
|
||||
|
||||
|
||||
def test_task_create_seeds_directory_source_into_workspace(tmp_path: Path) -> None:
|
||||
def test_workspace_create_seeds_directory_source_into_workspace(tmp_path: Path) -> None:
|
||||
source_dir = tmp_path / "seed"
|
||||
source_dir.mkdir()
|
||||
(source_dir / "note.txt").write_text("hello\n", encoding="utf-8")
|
||||
|
|
@ -319,25 +319,25 @@ def test_task_create_seeds_directory_source_into_workspace(tmp_path: Path) -> No
|
|||
network_manager=TapNetworkManager(enabled=False),
|
||||
)
|
||||
|
||||
created = manager.create_task(
|
||||
created = manager.create_workspace(
|
||||
environment="debian:12-base",
|
||||
allow_host_compat=True,
|
||||
source_path=source_dir,
|
||||
seed_path=source_dir,
|
||||
)
|
||||
task_id = str(created["task_id"])
|
||||
workspace_id = str(created["workspace_id"])
|
||||
|
||||
workspace_seed = created["workspace_seed"]
|
||||
assert workspace_seed["mode"] == "directory"
|
||||
assert workspace_seed["source_path"] == str(source_dir.resolve())
|
||||
executed = manager.exec_task(task_id, command="cat note.txt", timeout_seconds=30)
|
||||
assert workspace_seed["seed_path"] == str(source_dir.resolve())
|
||||
executed = manager.exec_workspace(workspace_id, command="cat note.txt", timeout_seconds=30)
|
||||
assert executed["stdout"] == "hello\n"
|
||||
|
||||
status = manager.status_task(task_id)
|
||||
status = manager.status_workspace(workspace_id)
|
||||
assert status["workspace_seed"]["mode"] == "directory"
|
||||
assert status["workspace_seed"]["source_path"] == str(source_dir.resolve())
|
||||
assert status["workspace_seed"]["seed_path"] == str(source_dir.resolve())
|
||||
|
||||
|
||||
def test_task_create_seeds_tar_archive_into_workspace(tmp_path: Path) -> None:
|
||||
def test_workspace_create_seeds_tar_archive_into_workspace(tmp_path: Path) -> None:
|
||||
archive_path = tmp_path / "seed.tgz"
|
||||
nested_dir = tmp_path / "src"
|
||||
nested_dir.mkdir()
|
||||
|
|
@ -351,19 +351,19 @@ def test_task_create_seeds_tar_archive_into_workspace(tmp_path: Path) -> None:
|
|||
network_manager=TapNetworkManager(enabled=False),
|
||||
)
|
||||
|
||||
created = manager.create_task(
|
||||
created = manager.create_workspace(
|
||||
environment="debian:12-base",
|
||||
allow_host_compat=True,
|
||||
source_path=archive_path,
|
||||
seed_path=archive_path,
|
||||
)
|
||||
task_id = str(created["task_id"])
|
||||
workspace_id = str(created["workspace_id"])
|
||||
|
||||
assert created["workspace_seed"]["mode"] == "tar_archive"
|
||||
executed = manager.exec_task(task_id, command="cat note.txt", timeout_seconds=30)
|
||||
executed = manager.exec_workspace(workspace_id, command="cat note.txt", timeout_seconds=30)
|
||||
assert executed["stdout"] == "archive\n"
|
||||
|
||||
|
||||
def test_task_sync_push_updates_started_workspace(tmp_path: Path) -> None:
|
||||
def test_workspace_sync_push_updates_started_workspace(tmp_path: Path) -> None:
|
||||
source_dir = tmp_path / "seed"
|
||||
source_dir.mkdir()
|
||||
(source_dir / "note.txt").write_text("hello\n", encoding="utf-8")
|
||||
|
|
@ -377,26 +377,30 @@ def test_task_sync_push_updates_started_workspace(tmp_path: Path) -> None:
|
|||
network_manager=TapNetworkManager(enabled=False),
|
||||
)
|
||||
|
||||
created = manager.create_task(
|
||||
created = manager.create_workspace(
|
||||
environment="debian:12-base",
|
||||
allow_host_compat=True,
|
||||
source_path=source_dir,
|
||||
seed_path=source_dir,
|
||||
)
|
||||
task_id = str(created["task_id"])
|
||||
synced = manager.push_task_sync(task_id, source_path=update_dir, dest="subdir")
|
||||
workspace_id = str(created["workspace_id"])
|
||||
synced = manager.push_workspace_sync(workspace_id, source_path=update_dir, dest="subdir")
|
||||
|
||||
assert synced["workspace_sync"]["mode"] == "directory"
|
||||
assert synced["workspace_sync"]["destination"] == "/workspace/subdir"
|
||||
|
||||
executed = manager.exec_task(task_id, command="cat subdir/more.txt", timeout_seconds=30)
|
||||
executed = manager.exec_workspace(
|
||||
workspace_id,
|
||||
command="cat subdir/more.txt",
|
||||
timeout_seconds=30,
|
||||
)
|
||||
assert executed["stdout"] == "more\n"
|
||||
|
||||
status = manager.status_task(task_id)
|
||||
status = manager.status_workspace(workspace_id)
|
||||
assert status["command_count"] == 1
|
||||
assert status["workspace_seed"]["mode"] == "directory"
|
||||
|
||||
|
||||
def test_task_sync_push_requires_started_task(tmp_path: Path) -> None:
|
||||
def test_workspace_sync_push_requires_started_workspace(tmp_path: Path) -> None:
|
||||
source_dir = tmp_path / "seed"
|
||||
source_dir.mkdir()
|
||||
(source_dir / "note.txt").write_text("hello\n", encoding="utf-8")
|
||||
|
|
@ -410,22 +414,25 @@ def test_task_sync_push_requires_started_task(tmp_path: Path) -> None:
|
|||
network_manager=TapNetworkManager(enabled=False),
|
||||
)
|
||||
|
||||
created = manager.create_task(
|
||||
created = manager.create_workspace(
|
||||
environment="debian:12-base",
|
||||
allow_host_compat=True,
|
||||
source_path=source_dir,
|
||||
seed_path=source_dir,
|
||||
)
|
||||
task_id = str(created["task_id"])
|
||||
task_path = tmp_path / "vms" / "tasks" / task_id / "task.json"
|
||||
payload = json.loads(task_path.read_text(encoding="utf-8"))
|
||||
workspace_id = str(created["workspace_id"])
|
||||
workspace_path = tmp_path / "vms" / "workspaces" / workspace_id / "workspace.json"
|
||||
payload = json.loads(workspace_path.read_text(encoding="utf-8"))
|
||||
payload["state"] = "stopped"
|
||||
task_path.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8")
|
||||
workspace_path.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8")
|
||||
|
||||
with pytest.raises(RuntimeError, match="must be in 'started' state before task_sync_push"):
|
||||
manager.push_task_sync(task_id, source_path=update_dir)
|
||||
with pytest.raises(
|
||||
RuntimeError,
|
||||
match="must be in 'started' state before workspace_sync_push",
|
||||
):
|
||||
manager.push_workspace_sync(workspace_id, source_path=update_dir)
|
||||
|
||||
|
||||
def test_task_sync_push_rejects_destination_outside_workspace(tmp_path: Path) -> None:
|
||||
def test_workspace_sync_push_rejects_destination_outside_workspace(tmp_path: Path) -> None:
|
||||
source_dir = tmp_path / "seed"
|
||||
source_dir.mkdir()
|
||||
(source_dir / "note.txt").write_text("hello\n", encoding="utf-8")
|
||||
|
|
@ -436,18 +443,18 @@ def test_task_sync_push_rejects_destination_outside_workspace(tmp_path: Path) ->
|
|||
network_manager=TapNetworkManager(enabled=False),
|
||||
)
|
||||
|
||||
task_id = str(
|
||||
manager.create_task(
|
||||
workspace_id = str(
|
||||
manager.create_workspace(
|
||||
environment="debian:12-base",
|
||||
allow_host_compat=True,
|
||||
)["task_id"]
|
||||
)["workspace_id"]
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match="workspace destination must stay inside /workspace"):
|
||||
manager.push_task_sync(task_id, source_path=source_dir, dest="../escape")
|
||||
manager.push_workspace_sync(workspace_id, source_path=source_dir, dest="../escape")
|
||||
|
||||
|
||||
def test_task_create_rejects_unsafe_seed_archive(tmp_path: Path) -> None:
|
||||
def test_workspace_create_rejects_unsafe_seed_archive(tmp_path: Path) -> None:
|
||||
archive_path = tmp_path / "bad.tgz"
|
||||
with tarfile.open(archive_path, "w:gz") as archive:
|
||||
payload = b"bad\n"
|
||||
|
|
@ -462,15 +469,15 @@ def test_task_create_rejects_unsafe_seed_archive(tmp_path: Path) -> None:
|
|||
)
|
||||
|
||||
with pytest.raises(RuntimeError, match="unsafe archive member path"):
|
||||
manager.create_task(
|
||||
manager.create_workspace(
|
||||
environment="debian:12-base",
|
||||
allow_host_compat=True,
|
||||
source_path=archive_path,
|
||||
seed_path=archive_path,
|
||||
)
|
||||
assert list((tmp_path / "vms" / "tasks").iterdir()) == []
|
||||
assert list((tmp_path / "vms" / "workspaces").iterdir()) == []
|
||||
|
||||
|
||||
def test_task_create_rejects_archive_that_writes_through_symlink(tmp_path: Path) -> None:
|
||||
def test_workspace_create_rejects_archive_that_writes_through_symlink(tmp_path: Path) -> None:
|
||||
archive_path = tmp_path / "bad-symlink.tgz"
|
||||
with tarfile.open(archive_path, "w:gz") as archive:
|
||||
symlink_info = tarfile.TarInfo(name="linked")
|
||||
|
|
@ -490,14 +497,14 @@ def test_task_create_rejects_archive_that_writes_through_symlink(tmp_path: Path)
|
|||
)
|
||||
|
||||
with pytest.raises(RuntimeError, match="traverse through a symlinked path"):
|
||||
manager.create_task(
|
||||
manager.create_workspace(
|
||||
environment="debian:12-base",
|
||||
allow_host_compat=True,
|
||||
source_path=archive_path,
|
||||
seed_path=archive_path,
|
||||
)
|
||||
|
||||
|
||||
def test_task_create_cleans_up_on_seed_failure(
|
||||
def test_workspace_create_cleans_up_on_seed_failure(
|
||||
tmp_path: Path, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
source_dir = tmp_path / "seed"
|
||||
|
|
@ -517,27 +524,27 @@ def test_task_create_cleans_up_on_seed_failure(
|
|||
monkeypatch.setattr(manager._backend, "import_archive", _boom) # noqa: SLF001
|
||||
|
||||
with pytest.raises(RuntimeError, match="seed import failed"):
|
||||
manager.create_task(
|
||||
manager.create_workspace(
|
||||
environment="debian:12-base",
|
||||
allow_host_compat=True,
|
||||
source_path=source_dir,
|
||||
seed_path=source_dir,
|
||||
)
|
||||
|
||||
assert list((tmp_path / "vms" / "tasks").iterdir()) == []
|
||||
assert list((tmp_path / "vms" / "workspaces").iterdir()) == []
|
||||
|
||||
|
||||
def test_task_rehydrates_across_manager_processes(tmp_path: Path) -> None:
|
||||
def test_workspace_rehydrates_across_manager_processes(tmp_path: Path) -> None:
|
||||
base_dir = tmp_path / "vms"
|
||||
manager = VmManager(
|
||||
backend_name="mock",
|
||||
base_dir=base_dir,
|
||||
network_manager=TapNetworkManager(enabled=False),
|
||||
)
|
||||
task_id = str(
|
||||
manager.create_task(
|
||||
workspace_id = str(
|
||||
manager.create_workspace(
|
||||
environment="debian:12-base",
|
||||
allow_host_compat=True,
|
||||
)["task_id"]
|
||||
)["workspace_id"]
|
||||
)
|
||||
|
||||
other = VmManager(
|
||||
|
|
@ -545,33 +552,33 @@ def test_task_rehydrates_across_manager_processes(tmp_path: Path) -> None:
|
|||
base_dir=base_dir,
|
||||
network_manager=TapNetworkManager(enabled=False),
|
||||
)
|
||||
executed = other.exec_task(task_id, command="printf 'ok\\n'", timeout_seconds=30)
|
||||
executed = other.exec_workspace(workspace_id, command="printf 'ok\\n'", timeout_seconds=30)
|
||||
assert executed["exit_code"] == 0
|
||||
assert executed["stdout"] == "ok\n"
|
||||
|
||||
logs = other.logs_task(task_id)
|
||||
logs = other.logs_workspace(workspace_id)
|
||||
assert logs["count"] == 1
|
||||
|
||||
|
||||
def test_task_requires_started_state(tmp_path: Path) -> None:
|
||||
def test_workspace_requires_started_state(tmp_path: Path) -> None:
|
||||
manager = VmManager(
|
||||
backend_name="mock",
|
||||
base_dir=tmp_path / "vms",
|
||||
network_manager=TapNetworkManager(enabled=False),
|
||||
)
|
||||
task_id = str(
|
||||
manager.create_task(
|
||||
workspace_id = str(
|
||||
manager.create_workspace(
|
||||
environment="debian:12-base",
|
||||
allow_host_compat=True,
|
||||
)["task_id"]
|
||||
)["workspace_id"]
|
||||
)
|
||||
task_dir = tmp_path / "vms" / "tasks" / task_id / "task.json"
|
||||
payload = json.loads(task_dir.read_text(encoding="utf-8"))
|
||||
workspace_path = tmp_path / "vms" / "workspaces" / workspace_id / "workspace.json"
|
||||
payload = json.loads(workspace_path.read_text(encoding="utf-8"))
|
||||
payload["state"] = "stopped"
|
||||
task_dir.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8")
|
||||
workspace_path.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8")
|
||||
|
||||
with pytest.raises(RuntimeError, match="must be in 'started' state"):
|
||||
manager.exec_task(task_id, command="true", timeout_seconds=30)
|
||||
manager.exec_workspace(workspace_id, command="true", timeout_seconds=30)
|
||||
|
||||
|
||||
def test_vm_manager_firecracker_backend_path(
|
||||
|
|
@ -708,7 +715,7 @@ def test_copy_rootfs_falls_back_to_copy2(
|
|||
assert dest.read_text(encoding="utf-8") == "payload"
|
||||
|
||||
|
||||
def test_task_create_cleans_up_on_start_failure(
|
||||
def test_workspace_create_cleans_up_on_start_failure(
|
||||
tmp_path: Path, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
manager = VmManager(
|
||||
|
|
@ -724,9 +731,9 @@ def test_task_create_cleans_up_on_start_failure(
|
|||
monkeypatch.setattr(manager._backend, "start", _boom) # noqa: SLF001
|
||||
|
||||
with pytest.raises(RuntimeError, match="boom"):
|
||||
manager.create_task(environment="debian:12-base", allow_host_compat=True)
|
||||
manager.create_workspace(environment="debian:12-base", allow_host_compat=True)
|
||||
|
||||
assert list((tmp_path / "vms" / "tasks").iterdir()) == []
|
||||
assert list((tmp_path / "vms" / "workspaces").iterdir()) == []
|
||||
|
||||
|
||||
def test_exec_instance_wraps_guest_workspace_command(tmp_path: Path) -> None:
|
||||
|
|
@ -786,53 +793,53 @@ def test_exec_instance_wraps_guest_workspace_command(tmp_path: Path) -> None:
|
|||
assert captured["workdir"] is None
|
||||
|
||||
|
||||
def test_status_task_marks_dead_backing_process_stopped(tmp_path: Path) -> None:
|
||||
def test_status_workspace_marks_dead_backing_process_stopped(tmp_path: Path) -> None:
|
||||
manager = VmManager(
|
||||
backend_name="mock",
|
||||
base_dir=tmp_path / "vms",
|
||||
network_manager=TapNetworkManager(enabled=False),
|
||||
)
|
||||
task_id = str(
|
||||
manager.create_task(
|
||||
workspace_id = str(
|
||||
manager.create_workspace(
|
||||
environment="debian:12-base",
|
||||
allow_host_compat=True,
|
||||
)["task_id"]
|
||||
)["workspace_id"]
|
||||
)
|
||||
task_path = tmp_path / "vms" / "tasks" / task_id / "task.json"
|
||||
payload = json.loads(task_path.read_text(encoding="utf-8"))
|
||||
workspace_path = tmp_path / "vms" / "workspaces" / workspace_id / "workspace.json"
|
||||
payload = json.loads(workspace_path.read_text(encoding="utf-8"))
|
||||
payload["metadata"]["execution_mode"] = "guest_vsock"
|
||||
payload["firecracker_pid"] = 999999
|
||||
task_path.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8")
|
||||
workspace_path.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8")
|
||||
|
||||
status = manager.status_task(task_id)
|
||||
status = manager.status_workspace(workspace_id)
|
||||
assert status["state"] == "stopped"
|
||||
updated_payload = json.loads(task_path.read_text(encoding="utf-8"))
|
||||
updated_payload = json.loads(workspace_path.read_text(encoding="utf-8"))
|
||||
assert "backing guest process" in str(updated_payload.get("last_error", ""))
|
||||
|
||||
|
||||
def test_reap_expired_tasks_removes_invalid_and_expired_records(tmp_path: Path) -> None:
|
||||
def test_reap_expired_workspaces_removes_invalid_and_expired_records(tmp_path: Path) -> None:
|
||||
manager = VmManager(
|
||||
backend_name="mock",
|
||||
base_dir=tmp_path / "vms",
|
||||
network_manager=TapNetworkManager(enabled=False),
|
||||
)
|
||||
invalid_dir = tmp_path / "vms" / "tasks" / "invalid"
|
||||
invalid_dir = tmp_path / "vms" / "workspaces" / "invalid"
|
||||
invalid_dir.mkdir(parents=True)
|
||||
(invalid_dir / "task.json").write_text("[]", encoding="utf-8")
|
||||
(invalid_dir / "workspace.json").write_text("[]", encoding="utf-8")
|
||||
|
||||
task_id = str(
|
||||
manager.create_task(
|
||||
workspace_id = str(
|
||||
manager.create_workspace(
|
||||
environment="debian:12-base",
|
||||
allow_host_compat=True,
|
||||
)["task_id"]
|
||||
)["workspace_id"]
|
||||
)
|
||||
task_path = tmp_path / "vms" / "tasks" / task_id / "task.json"
|
||||
payload = json.loads(task_path.read_text(encoding="utf-8"))
|
||||
workspace_path = tmp_path / "vms" / "workspaces" / workspace_id / "workspace.json"
|
||||
payload = json.loads(workspace_path.read_text(encoding="utf-8"))
|
||||
payload["expires_at"] = 0.0
|
||||
task_path.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8")
|
||||
workspace_path.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8")
|
||||
|
||||
with manager._lock: # noqa: SLF001
|
||||
manager._reap_expired_tasks_locked(time.time()) # noqa: SLF001
|
||||
manager._reap_expired_workspaces_locked(time.time()) # noqa: SLF001
|
||||
|
||||
assert not invalid_dir.exists()
|
||||
assert not (tmp_path / "vms" / "tasks" / task_id).exists()
|
||||
assert not (tmp_path / "vms" / "workspaces" / workspace_id).exists()
|
||||
|
|
|
|||
2
uv.lock
generated
2
uv.lock
generated
|
|
@ -706,7 +706,7 @@ crypto = [
|
|||
|
||||
[[package]]
|
||||
name = "pyro-mcp"
|
||||
version = "2.3.0"
|
||||
version = "2.4.0"
|
||||
source = { editable = "." }
|
||||
dependencies = [
|
||||
{ name = "mcp" },
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue