Inline STT and tray

This commit is contained in:
Thales Maciel 2026-02-24 11:27:22 -03:00
parent 8c68719041
commit 4e8edc3e40
No known key found for this signature in database
GPG key ID: 33112E6833C34679
8 changed files with 109 additions and 171 deletions

View file

@ -5,21 +5,27 @@ import json
import logging
import sys
import urllib.request
from dataclasses import dataclass
from pathlib import Path
from dataclasses import dataclass
def load_system_prompt(path: str | None) -> str:
if path:
return Path(path).read_text(encoding="utf-8").strip()
return (Path(__file__).parent / "system_prompt.txt").read_text(encoding="utf-8").strip()
SYSTEM_PROMPT = (
"You are an amanuensis. Rewrite the user's dictated text into clean, grammatical prose.\n\n"
"Rules:\n"
"- Remove filler words (um/uh/like), false starts, and self-corrections.\n"
"- Keep meaning, facts, and intent.\n"
"- Prefer concise sentences.\n"
"- Do not add new info.\n"
"- Output ONLY the cleaned text, no commentary.\n\n"
"Examples:\n"
" - \"schedule that for 5 PM, I mean 4 PM\" -> \"schedule that for 4 PM\"\n"
" - \"let's ask Bob, I mean Janice, let's ask Janice\" -> \"let's ask Janice\"\n"
)
@dataclass
class AIConfig:
model: str
temperature: float
system_prompt_file: str
base_url: str
api_key: str
timeout_sec: int
@ -30,7 +36,7 @@ class AIConfig:
class GenericAPIProcessor:
def __init__(self, cfg: AIConfig):
self.cfg = cfg
self.system = load_system_prompt(cfg.system_prompt_file)
self.system = SYSTEM_PROMPT
def process(self, text: str) -> str:
language = self.cfg.language_hint or ""
@ -46,7 +52,7 @@ class GenericAPIProcessor:
{"role": "system", "content": self.system},
{"role": "user", "content": user_content},
],
"temperature": self.cfg.temperature,
"temperature": 0.0,
}
data = json.dumps(payload).encode("utf-8")
url = _chat_completions_url(self.cfg.base_url)
@ -101,6 +107,10 @@ def list_models(base_url: str, api_key: str = "", timeout_sec: int = 10) -> list
return []
def load_system_prompt(_path: str | None = None) -> str:
return SYSTEM_PROMPT
def _models_url(base_url: str) -> str:
root = _root_url(base_url)
return root.rstrip("/") + "/v1/models"
@ -149,14 +159,11 @@ def main() -> int:
json.dumps(redacted_dict(cfg), indent=2),
)
prompt = load_system_prompt("")
logging.info("system prompt:\n%s", prompt)
logging.info("system prompt:\n%s", SYSTEM_PROMPT)
processor = build_processor(
AIConfig(
model=cfg.ai_cleanup.get("model", ""),
temperature=cfg.ai_cleanup.get("temperature", 0.0),
system_prompt_file="",
base_url=cfg.ai_cleanup.get("base_url", ""),
api_key=cfg.ai_cleanup.get("api_key", ""),
timeout_sec=25,