Add OpenAI Responses API vm_run integration example
This commit is contained in:
parent
0aa5e25dc1
commit
f7c8a4366b
4 changed files with 263 additions and 0 deletions
98
examples/openai_responses_vm_run.py
Normal file
98
examples/openai_responses_vm_run.py
Normal file
|
|
@ -0,0 +1,98 @@
|
|||
"""Canonical OpenAI Responses API integration centered on vm_run.
|
||||
|
||||
Requirements:
|
||||
- `pip install openai` or `uv add openai`
|
||||
- `OPENAI_API_KEY`
|
||||
|
||||
This example keeps the model-facing contract intentionally small: one `vm_run`
|
||||
tool that creates an ephemeral VM, runs one command, and cleans up.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
from pyro_mcp import Pyro
|
||||
|
||||
DEFAULT_MODEL = "gpt-5"
|
||||
|
||||
OPENAI_VM_RUN_TOOL: dict[str, Any] = {
|
||||
"type": "function",
|
||||
"name": "vm_run",
|
||||
"description": "Run one command in an ephemeral Firecracker VM and clean it up.",
|
||||
"strict": True,
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"profile": {"type": "string"},
|
||||
"command": {"type": "string"},
|
||||
"vcpu_count": {"type": "integer"},
|
||||
"mem_mib": {"type": "integer"},
|
||||
"timeout_seconds": {"type": "integer"},
|
||||
"ttl_seconds": {"type": "integer"},
|
||||
"network": {"type": "boolean"},
|
||||
},
|
||||
"required": ["profile", "command", "vcpu_count", "mem_mib"],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def call_vm_run(arguments: dict[str, Any]) -> dict[str, Any]:
|
||||
pyro = Pyro()
|
||||
return pyro.run_in_vm(
|
||||
profile=str(arguments["profile"]),
|
||||
command=str(arguments["command"]),
|
||||
vcpu_count=int(arguments["vcpu_count"]),
|
||||
mem_mib=int(arguments["mem_mib"]),
|
||||
timeout_seconds=int(arguments.get("timeout_seconds", 30)),
|
||||
ttl_seconds=int(arguments.get("ttl_seconds", 600)),
|
||||
network=bool(arguments.get("network", False)),
|
||||
)
|
||||
|
||||
|
||||
def run_openai_vm_run_example(*, prompt: str, model: str = DEFAULT_MODEL) -> str:
|
||||
from openai import OpenAI # type: ignore[import-not-found]
|
||||
|
||||
client = OpenAI()
|
||||
input_items: list[dict[str, Any]] = [{"role": "user", "content": prompt}]
|
||||
|
||||
while True:
|
||||
response = client.responses.create(
|
||||
model=model,
|
||||
input=input_items,
|
||||
tools=[OPENAI_VM_RUN_TOOL],
|
||||
)
|
||||
input_items.extend(response.output)
|
||||
|
||||
tool_calls = [item for item in response.output if item.type == "function_call"]
|
||||
if not tool_calls:
|
||||
return str(response.output_text)
|
||||
|
||||
for tool_call in tool_calls:
|
||||
if tool_call.name != "vm_run":
|
||||
raise RuntimeError(f"unexpected tool requested: {tool_call.name}")
|
||||
result = call_vm_run(json.loads(tool_call.arguments))
|
||||
input_items.append(
|
||||
{
|
||||
"type": "function_call_output",
|
||||
"call_id": tool_call.call_id,
|
||||
"output": json.dumps(result, sort_keys=True),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
model = os.environ.get("OPENAI_MODEL", DEFAULT_MODEL)
|
||||
prompt = (
|
||||
"Use the vm_run tool to run `git --version` in an ephemeral VM. "
|
||||
"Use the debian-git profile with 1 vCPU and 1024 MiB of memory. "
|
||||
"Do not use networking for this request."
|
||||
)
|
||||
print(run_openai_vm_run_example(prompt=prompt, model=model))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Loading…
Add table
Add a link
Reference in a new issue