Add benchmark-driven model promotion workflow and pipeline stages
Some checks failed
ci / test-and-build (push) Has been cancelled

This commit is contained in:
Thales Maciel 2026-02-28 15:12:33 -03:00
parent 98b13d1069
commit 8c1f7c1e13
38 changed files with 5300 additions and 503 deletions

View file

@ -15,18 +15,9 @@ DEFAULT_HOTKEY = "Cmd+m"
DEFAULT_STT_PROVIDER = "local_whisper"
DEFAULT_STT_MODEL = "base"
DEFAULT_STT_DEVICE = "cpu"
DEFAULT_LLM_PROVIDER = "local_llama"
DEFAULT_EXTERNAL_API_PROVIDER = "openai"
DEFAULT_EXTERNAL_API_BASE_URL = "https://api.openai.com/v1"
DEFAULT_EXTERNAL_API_MODEL = "gpt-4o-mini"
DEFAULT_EXTERNAL_API_TIMEOUT_MS = 15000
DEFAULT_EXTERNAL_API_MAX_RETRIES = 2
DEFAULT_EXTERNAL_API_KEY_ENV_VAR = "AMAN_EXTERNAL_API_KEY"
DEFAULT_INJECTION_BACKEND = "clipboard"
DEFAULT_UX_PROFILE = "default"
ALLOWED_STT_PROVIDERS = {"local_whisper"}
ALLOWED_LLM_PROVIDERS = {"local_llama", "external_api"}
ALLOWED_EXTERNAL_API_PROVIDERS = {"openai"}
ALLOWED_INJECTION_BACKENDS = {"clipboard", "injection"}
ALLOWED_UX_PROFILES = {"default", "fast", "polished"}
WILDCARD_CHARS = set("*?[]{}")
@ -66,27 +57,10 @@ class SttConfig:
language: str = DEFAULT_STT_LANGUAGE
@dataclass
class LlmConfig:
provider: str = DEFAULT_LLM_PROVIDER
@dataclass
class ModelsConfig:
allow_custom_models: bool = False
whisper_model_path: str = ""
llm_model_path: str = ""
@dataclass
class ExternalApiConfig:
enabled: bool = False
provider: str = DEFAULT_EXTERNAL_API_PROVIDER
base_url: str = DEFAULT_EXTERNAL_API_BASE_URL
model: str = DEFAULT_EXTERNAL_API_MODEL
timeout_ms: int = DEFAULT_EXTERNAL_API_TIMEOUT_MS
max_retries: int = DEFAULT_EXTERNAL_API_MAX_RETRIES
api_key_env_var: str = DEFAULT_EXTERNAL_API_KEY_ENV_VAR
@dataclass
@ -95,6 +69,12 @@ class InjectionConfig:
remove_transcription_from_clipboard: bool = False
@dataclass
class SafetyConfig:
enabled: bool = True
strict: bool = False
@dataclass
class UxConfig:
profile: str = DEFAULT_UX_PROFILE
@ -124,10 +104,9 @@ class Config:
daemon: DaemonConfig = field(default_factory=DaemonConfig)
recording: RecordingConfig = field(default_factory=RecordingConfig)
stt: SttConfig = field(default_factory=SttConfig)
llm: LlmConfig = field(default_factory=LlmConfig)
models: ModelsConfig = field(default_factory=ModelsConfig)
external_api: ExternalApiConfig = field(default_factory=ExternalApiConfig)
injection: InjectionConfig = field(default_factory=InjectionConfig)
safety: SafetyConfig = field(default_factory=SafetyConfig)
ux: UxConfig = field(default_factory=UxConfig)
advanced: AdvancedConfig = field(default_factory=AdvancedConfig)
vocabulary: VocabularyConfig = field(default_factory=VocabularyConfig)
@ -225,16 +204,6 @@ def validate(cfg: Config) -> None:
'{"stt":{"language":"auto"}}',
)
llm_provider = cfg.llm.provider.strip().lower()
if llm_provider not in ALLOWED_LLM_PROVIDERS:
allowed = ", ".join(sorted(ALLOWED_LLM_PROVIDERS))
_raise_cfg_error(
"llm.provider",
f"must be one of: {allowed}",
'{"llm":{"provider":"local_llama"}}',
)
cfg.llm.provider = llm_provider
if not isinstance(cfg.models.allow_custom_models, bool):
_raise_cfg_error(
"models.allow_custom_models",
@ -247,14 +216,7 @@ def validate(cfg: Config) -> None:
"must be string",
'{"models":{"whisper_model_path":""}}',
)
if not isinstance(cfg.models.llm_model_path, str):
_raise_cfg_error(
"models.llm_model_path",
"must be string",
'{"models":{"llm_model_path":""}}',
)
cfg.models.whisper_model_path = cfg.models.whisper_model_path.strip()
cfg.models.llm_model_path = cfg.models.llm_model_path.strip()
if not cfg.models.allow_custom_models:
if cfg.models.whisper_model_path:
_raise_cfg_error(
@ -262,65 +224,6 @@ def validate(cfg: Config) -> None:
"requires models.allow_custom_models=true",
'{"models":{"allow_custom_models":true,"whisper_model_path":"/path/model.bin"}}',
)
if cfg.models.llm_model_path:
_raise_cfg_error(
"models.llm_model_path",
"requires models.allow_custom_models=true",
'{"models":{"allow_custom_models":true,"llm_model_path":"/path/model.gguf"}}',
)
if not isinstance(cfg.external_api.enabled, bool):
_raise_cfg_error(
"external_api.enabled",
"must be boolean",
'{"external_api":{"enabled":false}}',
)
external_provider = cfg.external_api.provider.strip().lower()
if external_provider not in ALLOWED_EXTERNAL_API_PROVIDERS:
allowed = ", ".join(sorted(ALLOWED_EXTERNAL_API_PROVIDERS))
_raise_cfg_error(
"external_api.provider",
f"must be one of: {allowed}",
'{"external_api":{"provider":"openai"}}',
)
cfg.external_api.provider = external_provider
if not cfg.external_api.base_url.strip():
_raise_cfg_error(
"external_api.base_url",
"cannot be empty",
'{"external_api":{"base_url":"https://api.openai.com/v1"}}',
)
if not cfg.external_api.model.strip():
_raise_cfg_error(
"external_api.model",
"cannot be empty",
'{"external_api":{"model":"gpt-4o-mini"}}',
)
if not isinstance(cfg.external_api.timeout_ms, int) or cfg.external_api.timeout_ms <= 0:
_raise_cfg_error(
"external_api.timeout_ms",
"must be a positive integer",
'{"external_api":{"timeout_ms":15000}}',
)
if not isinstance(cfg.external_api.max_retries, int) or cfg.external_api.max_retries < 0:
_raise_cfg_error(
"external_api.max_retries",
"must be a non-negative integer",
'{"external_api":{"max_retries":2}}',
)
if not cfg.external_api.api_key_env_var.strip():
_raise_cfg_error(
"external_api.api_key_env_var",
"cannot be empty",
'{"external_api":{"api_key_env_var":"AMAN_EXTERNAL_API_KEY"}}',
)
if cfg.llm.provider == "external_api" and not cfg.external_api.enabled:
_raise_cfg_error(
"llm.provider",
"external_api provider requires external_api.enabled=true",
'{"llm":{"provider":"external_api"},"external_api":{"enabled":true}}',
)
backend = cfg.injection.backend.strip().lower()
if backend not in ALLOWED_INJECTION_BACKENDS:
@ -337,6 +240,18 @@ def validate(cfg: Config) -> None:
"must be boolean",
'{"injection":{"remove_transcription_from_clipboard":false}}',
)
if not isinstance(cfg.safety.enabled, bool):
_raise_cfg_error(
"safety.enabled",
"must be boolean",
'{"safety":{"enabled":true}}',
)
if not isinstance(cfg.safety.strict, bool):
_raise_cfg_error(
"safety.strict",
"must be boolean",
'{"safety":{"strict":false}}',
)
profile = cfg.ux.profile.strip().lower()
if profile not in ALLOWED_UX_PROFILES:
@ -371,10 +286,9 @@ def _from_dict(data: dict[str, Any], cfg: Config) -> Config:
"daemon",
"recording",
"stt",
"llm",
"models",
"external_api",
"injection",
"safety",
"vocabulary",
"ux",
"advanced",
@ -384,10 +298,9 @@ def _from_dict(data: dict[str, Any], cfg: Config) -> Config:
daemon = _ensure_dict(data.get("daemon"), "daemon")
recording = _ensure_dict(data.get("recording"), "recording")
stt = _ensure_dict(data.get("stt"), "stt")
llm = _ensure_dict(data.get("llm"), "llm")
models = _ensure_dict(data.get("models"), "models")
external_api = _ensure_dict(data.get("external_api"), "external_api")
injection = _ensure_dict(data.get("injection"), "injection")
safety = _ensure_dict(data.get("safety"), "safety")
vocabulary = _ensure_dict(data.get("vocabulary"), "vocabulary")
ux = _ensure_dict(data.get("ux"), "ux")
advanced = _ensure_dict(data.get("advanced"), "advanced")
@ -395,22 +308,17 @@ def _from_dict(data: dict[str, Any], cfg: Config) -> Config:
_reject_unknown_keys(daemon, {"hotkey"}, parent="daemon")
_reject_unknown_keys(recording, {"input"}, parent="recording")
_reject_unknown_keys(stt, {"provider", "model", "device", "language"}, parent="stt")
_reject_unknown_keys(llm, {"provider"}, parent="llm")
_reject_unknown_keys(
models,
{"allow_custom_models", "whisper_model_path", "llm_model_path"},
{"allow_custom_models", "whisper_model_path"},
parent="models",
)
_reject_unknown_keys(
external_api,
{"enabled", "provider", "base_url", "model", "timeout_ms", "max_retries", "api_key_env_var"},
parent="external_api",
)
_reject_unknown_keys(
injection,
{"backend", "remove_transcription_from_clipboard"},
parent="injection",
)
_reject_unknown_keys(safety, {"enabled", "strict"}, parent="safety")
_reject_unknown_keys(vocabulary, {"replacements", "terms"}, parent="vocabulary")
_reject_unknown_keys(ux, {"profile", "show_notifications"}, parent="ux")
_reject_unknown_keys(advanced, {"strict_startup"}, parent="advanced")
@ -429,30 +337,10 @@ def _from_dict(data: dict[str, Any], cfg: Config) -> Config:
cfg.stt.device = _as_nonempty_str(stt["device"], "stt.device")
if "language" in stt:
cfg.stt.language = _as_nonempty_str(stt["language"], "stt.language")
if "provider" in llm:
cfg.llm.provider = _as_nonempty_str(llm["provider"], "llm.provider")
if "allow_custom_models" in models:
cfg.models.allow_custom_models = _as_bool(models["allow_custom_models"], "models.allow_custom_models")
if "whisper_model_path" in models:
cfg.models.whisper_model_path = _as_str(models["whisper_model_path"], "models.whisper_model_path")
if "llm_model_path" in models:
cfg.models.llm_model_path = _as_str(models["llm_model_path"], "models.llm_model_path")
if "enabled" in external_api:
cfg.external_api.enabled = _as_bool(external_api["enabled"], "external_api.enabled")
if "provider" in external_api:
cfg.external_api.provider = _as_nonempty_str(external_api["provider"], "external_api.provider")
if "base_url" in external_api:
cfg.external_api.base_url = _as_nonempty_str(external_api["base_url"], "external_api.base_url")
if "model" in external_api:
cfg.external_api.model = _as_nonempty_str(external_api["model"], "external_api.model")
if "timeout_ms" in external_api:
cfg.external_api.timeout_ms = _as_int(external_api["timeout_ms"], "external_api.timeout_ms")
if "max_retries" in external_api:
cfg.external_api.max_retries = _as_int(external_api["max_retries"], "external_api.max_retries")
if "api_key_env_var" in external_api:
cfg.external_api.api_key_env_var = _as_nonempty_str(
external_api["api_key_env_var"], "external_api.api_key_env_var"
)
if "backend" in injection:
cfg.injection.backend = _as_nonempty_str(injection["backend"], "injection.backend")
if "remove_transcription_from_clipboard" in injection:
@ -460,6 +348,10 @@ def _from_dict(data: dict[str, Any], cfg: Config) -> Config:
injection["remove_transcription_from_clipboard"],
"injection.remove_transcription_from_clipboard",
)
if "enabled" in safety:
cfg.safety.enabled = _as_bool(safety["enabled"], "safety.enabled")
if "strict" in safety:
cfg.safety.strict = _as_bool(safety["strict"], "safety.strict")
if "replacements" in vocabulary:
cfg.vocabulary.replacements = _as_replacements(vocabulary["replacements"])
if "terms" in vocabulary: