pyro-mcp/examples/openai_responses_vm_run.py

107 lines
3.4 KiB
Python

"""Canonical OpenAI Responses API integration centered on vm_run.
Requirements:
- `pip install openai` or `uv add openai`
- `OPENAI_API_KEY`
This example keeps the model-facing contract intentionally small: one `vm_run`
tool that creates an ephemeral VM, runs one command, and cleans up.
"""
from __future__ import annotations
import json
import os
from typing import Any
from pyro_mcp import Pyro
from pyro_mcp.vm_manager import (
DEFAULT_ALLOW_HOST_COMPAT,
DEFAULT_MEM_MIB,
DEFAULT_TIMEOUT_SECONDS,
DEFAULT_TTL_SECONDS,
DEFAULT_VCPU_COUNT,
)
DEFAULT_MODEL = "gpt-5"
OPENAI_VM_RUN_TOOL: dict[str, Any] = {
"type": "function",
"name": "vm_run",
"description": "Run one command in an ephemeral Firecracker VM and clean it up.",
"strict": True,
"parameters": {
"type": "object",
"properties": {
"environment": {"type": "string"},
"command": {"type": "string"},
"vcpu_count": {"type": "integer"},
"mem_mib": {"type": "integer"},
"timeout_seconds": {"type": "integer"},
"ttl_seconds": {"type": "integer"},
"network": {"type": "boolean"},
"allow_host_compat": {"type": "boolean"},
},
"required": ["environment", "command"],
"additionalProperties": False,
},
}
def call_vm_run(arguments: dict[str, Any]) -> dict[str, Any]:
pyro = Pyro()
return pyro.run_in_vm(
environment=str(arguments["environment"]),
command=str(arguments["command"]),
vcpu_count=int(arguments.get("vcpu_count", DEFAULT_VCPU_COUNT)),
mem_mib=int(arguments.get("mem_mib", DEFAULT_MEM_MIB)),
timeout_seconds=int(arguments.get("timeout_seconds", DEFAULT_TIMEOUT_SECONDS)),
ttl_seconds=int(arguments.get("ttl_seconds", DEFAULT_TTL_SECONDS)),
network=bool(arguments.get("network", False)),
allow_host_compat=bool(arguments.get("allow_host_compat", DEFAULT_ALLOW_HOST_COMPAT)),
)
def run_openai_vm_run_example(*, prompt: str, model: str = DEFAULT_MODEL) -> str:
from openai import OpenAI # type: ignore[import-not-found]
client = OpenAI()
input_items: list[dict[str, Any]] = [{"role": "user", "content": prompt}]
while True:
response = client.responses.create(
model=model,
input=input_items,
tools=[OPENAI_VM_RUN_TOOL],
)
input_items.extend(response.output)
tool_calls = [item for item in response.output if item.type == "function_call"]
if not tool_calls:
return str(response.output_text)
for tool_call in tool_calls:
if tool_call.name != "vm_run":
raise RuntimeError(f"unexpected tool requested: {tool_call.name}")
result = call_vm_run(json.loads(tool_call.arguments))
input_items.append(
{
"type": "function_call_output",
"call_id": tool_call.call_id,
"output": json.dumps(result, sort_keys=True),
}
)
def main() -> None:
model = os.environ.get("OPENAI_MODEL", DEFAULT_MODEL)
prompt = (
"Use the vm_run tool to run `git --version` in an ephemeral VM. "
"Use the `debian:12` environment. "
"Do not use networking for this request."
)
print(run_openai_vm_run_example(prompt=prompt, model=model))
if __name__ == "__main__":
main()