Make the recommended MCP profile visible from the first help and docs pass without changing 3.x behavior. Rework help, top-level docs, public-contract wording, and shipped MCP/OpenAI examples so is the recommended first profile while stays the compatibility default for full-surface hosts. Bump the package and catalog to 3.8.0, mark the roadmap milestone done, and add regression coverage for the new MCP help and docs alignment. Validation included uv lock, targeted profile/help tests, make check, make dist-check, and a real guest-backed server smoke.
90 lines
2.8 KiB
Python
90 lines
2.8 KiB
Python
"""Canonical OpenAI Responses API integration centered on workspace-core.
|
|
|
|
Requirements:
|
|
- `pip install openai` or `uv add openai`
|
|
- `OPENAI_API_KEY`
|
|
|
|
This is the recommended persistent-chat example. It mirrors the
|
|
`workspace-core` MCP profile by deriving tool schemas from
|
|
`Pyro.create_server(profile="workspace-core")` and dispatching tool calls back
|
|
through that same profiled server.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import asyncio
|
|
import json
|
|
import os
|
|
from typing import Any, cast
|
|
|
|
from pyro_mcp import Pyro
|
|
|
|
DEFAULT_MODEL = "gpt-5"
|
|
|
|
|
|
def _tool_to_openai(tool: Any) -> dict[str, Any]:
|
|
return {
|
|
"type": "function",
|
|
"name": str(tool.name),
|
|
"description": str(getattr(tool, "description", "") or ""),
|
|
"strict": True,
|
|
"parameters": dict(tool.inputSchema),
|
|
}
|
|
|
|
|
|
def _extract_structured(raw_result: object) -> dict[str, Any]:
|
|
if not isinstance(raw_result, tuple) or len(raw_result) != 2:
|
|
raise TypeError("unexpected call_tool result shape")
|
|
_, structured = raw_result
|
|
if not isinstance(structured, dict):
|
|
raise TypeError("expected structured dictionary result")
|
|
return cast(dict[str, Any], structured)
|
|
|
|
|
|
async def run_openai_workspace_core_example(*, prompt: str, model: str = DEFAULT_MODEL) -> str:
|
|
from openai import OpenAI # type: ignore[import-not-found]
|
|
|
|
pyro = Pyro()
|
|
server = pyro.create_server(profile="workspace-core")
|
|
tools = [_tool_to_openai(tool) for tool in await server.list_tools()]
|
|
client = OpenAI()
|
|
input_items: list[dict[str, Any]] = [{"role": "user", "content": prompt}]
|
|
|
|
while True:
|
|
response = client.responses.create(
|
|
model=model,
|
|
input=input_items,
|
|
tools=tools,
|
|
)
|
|
input_items.extend(response.output)
|
|
|
|
tool_calls = [item for item in response.output if item.type == "function_call"]
|
|
if not tool_calls:
|
|
return str(response.output_text)
|
|
|
|
for tool_call in tool_calls:
|
|
result = _extract_structured(
|
|
await server.call_tool(tool_call.name, json.loads(tool_call.arguments))
|
|
)
|
|
input_items.append(
|
|
{
|
|
"type": "function_call_output",
|
|
"call_id": tool_call.call_id,
|
|
"output": json.dumps(result, sort_keys=True),
|
|
}
|
|
)
|
|
|
|
|
|
def main() -> None:
|
|
model = os.environ.get("OPENAI_MODEL", DEFAULT_MODEL)
|
|
prompt = (
|
|
"Use the workspace-core tools to create a Debian 12 workspace named "
|
|
"`chat-fix`, write `app.py` with `print(\"fixed\")`, run it with "
|
|
"`python3 app.py`, export the file to `./app.py`, then delete the workspace. "
|
|
"Do not use one-shot vm_run for this request."
|
|
)
|
|
print(asyncio.run(run_openai_workspace_core_example(prompt=prompt, model=model)))
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|