Add MCP tool profiles for workspace chat flows
Expose stable MCP/server tool profiles so chat hosts can start narrow and widen only when needed. This adds vm-run, workspace-core, and workspace-full across the CLI serve path, Pyro.create_server(), and the package-level create_server() factory while keeping workspace-full as the default. Register profile-specific tool sets from one shared contract mapping, and narrow the workspace-core schemas so secrets, network policy, shells, services, snapshots, and disk tools do not leak into the default persistent chat profile. The full surface remains available unchanged under workspace-full. Refresh the public docs and examples around the profile progression, add a canonical OpenAI Responses workspace-core example, mark the 3.4.0 roadmap milestone done, and verify with uv lock, UV_CACHE_DIR=.uv-cache make check, UV_CACHE_DIR=.uv-cache make dist-check, and a real guest-backed workspace-core smoke for create, file write, exec, diff, export, reset, and delete.
This commit is contained in:
parent
446f7fce04
commit
eecfd7a7d7
23 changed files with 984 additions and 511 deletions
|
|
@ -2,7 +2,7 @@
|
|||
"mcpServers": {
|
||||
"pyro": {
|
||||
"command": "uvx",
|
||||
"args": ["--from", "pyro-mcp", "pyro", "mcp", "serve"]
|
||||
"args": ["--from", "pyro-mcp", "pyro", "mcp", "serve", "--profile", "workspace-core"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
"mcpServers": {
|
||||
"pyro": {
|
||||
"command": "uvx",
|
||||
"args": ["--from", "pyro-mcp", "pyro", "mcp", "serve"]
|
||||
"args": ["--from", "pyro-mcp", "pyro", "mcp", "serve", "--profile", "workspace-core"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ Generic stdio MCP configuration using `uvx`:
|
|||
"mcpServers": {
|
||||
"pyro": {
|
||||
"command": "uvx",
|
||||
"args": ["--from", "pyro-mcp", "pyro", "mcp", "serve"]
|
||||
"args": ["--from", "pyro-mcp", "pyro", "mcp", "serve", "--profile", "workspace-core"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -22,15 +22,21 @@ If `pyro-mcp` is already installed locally, the same server can be configured wi
|
|||
"mcpServers": {
|
||||
"pyro": {
|
||||
"command": "pyro",
|
||||
"args": ["mcp", "serve"]
|
||||
"args": ["mcp", "serve", "--profile", "workspace-core"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Primary tool for most agents:
|
||||
Profile progression:
|
||||
|
||||
- `vm_run`
|
||||
- `vm-run`: expose only `vm_run`
|
||||
- `workspace-core`: the default persistent chat profile
|
||||
- `workspace-full`: shells, services, snapshots, secrets, network policy, and disk tools
|
||||
|
||||
Primary profile for most agents:
|
||||
|
||||
- `workspace-core`
|
||||
|
||||
Use lifecycle tools only when the agent needs persistent VM state across multiple tool calls.
|
||||
|
||||
|
|
|
|||
89
examples/openai_responses_workspace_core.py
Normal file
89
examples/openai_responses_workspace_core.py
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
"""Canonical OpenAI Responses API integration centered on workspace-core.
|
||||
|
||||
Requirements:
|
||||
- `pip install openai` or `uv add openai`
|
||||
- `OPENAI_API_KEY`
|
||||
|
||||
This example mirrors the `workspace-core` MCP profile by deriving tool schemas
|
||||
from `Pyro.create_server(profile="workspace-core")` and dispatching tool calls
|
||||
back through that same profiled server.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
from typing import Any, cast
|
||||
|
||||
from pyro_mcp import Pyro
|
||||
|
||||
DEFAULT_MODEL = "gpt-5"
|
||||
|
||||
|
||||
def _tool_to_openai(tool: Any) -> dict[str, Any]:
|
||||
return {
|
||||
"type": "function",
|
||||
"name": str(tool.name),
|
||||
"description": str(getattr(tool, "description", "") or ""),
|
||||
"strict": True,
|
||||
"parameters": dict(tool.inputSchema),
|
||||
}
|
||||
|
||||
|
||||
def _extract_structured(raw_result: object) -> dict[str, Any]:
|
||||
if not isinstance(raw_result, tuple) or len(raw_result) != 2:
|
||||
raise TypeError("unexpected call_tool result shape")
|
||||
_, structured = raw_result
|
||||
if not isinstance(structured, dict):
|
||||
raise TypeError("expected structured dictionary result")
|
||||
return cast(dict[str, Any], structured)
|
||||
|
||||
|
||||
async def run_openai_workspace_core_example(*, prompt: str, model: str = DEFAULT_MODEL) -> str:
|
||||
from openai import OpenAI # type: ignore[import-not-found]
|
||||
|
||||
pyro = Pyro()
|
||||
server = pyro.create_server(profile="workspace-core")
|
||||
tools = [_tool_to_openai(tool) for tool in await server.list_tools()]
|
||||
client = OpenAI()
|
||||
input_items: list[dict[str, Any]] = [{"role": "user", "content": prompt}]
|
||||
|
||||
while True:
|
||||
response = client.responses.create(
|
||||
model=model,
|
||||
input=input_items,
|
||||
tools=tools,
|
||||
)
|
||||
input_items.extend(response.output)
|
||||
|
||||
tool_calls = [item for item in response.output if item.type == "function_call"]
|
||||
if not tool_calls:
|
||||
return str(response.output_text)
|
||||
|
||||
for tool_call in tool_calls:
|
||||
result = _extract_structured(
|
||||
await server.call_tool(tool_call.name, json.loads(tool_call.arguments))
|
||||
)
|
||||
input_items.append(
|
||||
{
|
||||
"type": "function_call_output",
|
||||
"call_id": tool_call.call_id,
|
||||
"output": json.dumps(result, sort_keys=True),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
model = os.environ.get("OPENAI_MODEL", DEFAULT_MODEL)
|
||||
prompt = (
|
||||
"Use the workspace-core tools to create a Debian 12 workspace named "
|
||||
"`chat-fix`, write `app.py` with `print(\"fixed\")`, run it with "
|
||||
"`python3 app.py`, export the file to `./app.py`, then delete the workspace. "
|
||||
"Do not use one-shot vm_run for this request."
|
||||
)
|
||||
print(asyncio.run(run_openai_workspace_core_example(prompt=prompt, model=model)))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Loading…
Add table
Add a link
Reference in a new issue