Add benchmark-driven model promotion workflow and pipeline stages
Some checks failed
ci / test-and-build (push) Has been cancelled
Some checks failed
ci / test-and-build (push) Has been cancelled
This commit is contained in:
parent
98b13d1069
commit
8c1f7c1e13
38 changed files with 5300 additions and 503 deletions
|
|
@ -1,7 +1,6 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
from dataclasses import asdict, dataclass
|
||||
from pathlib import Path
|
||||
|
||||
|
|
@ -153,22 +152,11 @@ def _provider_check(cfg: Config | None) -> list[DiagnosticCheck]:
|
|||
hint="fix config.load first",
|
||||
)
|
||||
]
|
||||
if cfg.llm.provider == "external_api":
|
||||
key_name = cfg.external_api.api_key_env_var
|
||||
if not os.getenv(key_name, "").strip():
|
||||
return [
|
||||
DiagnosticCheck(
|
||||
id="provider.runtime",
|
||||
ok=False,
|
||||
message=f"external api provider enabled but {key_name} is missing",
|
||||
hint=f"export {key_name} before starting aman",
|
||||
)
|
||||
]
|
||||
return [
|
||||
DiagnosticCheck(
|
||||
id="provider.runtime",
|
||||
ok=True,
|
||||
message=f"stt={cfg.stt.provider}, llm={cfg.llm.provider}",
|
||||
message=f"stt={cfg.stt.provider}, editor=local_llama_builtin",
|
||||
)
|
||||
]
|
||||
|
||||
|
|
@ -183,35 +171,20 @@ def _model_check(cfg: Config | None) -> list[DiagnosticCheck]:
|
|||
hint="fix config.load first",
|
||||
)
|
||||
]
|
||||
if cfg.llm.provider == "external_api":
|
||||
return [
|
||||
DiagnosticCheck(
|
||||
id="model.cache",
|
||||
ok=True,
|
||||
message="local llm model cache check skipped (external_api provider)",
|
||||
)
|
||||
]
|
||||
if cfg.models.allow_custom_models and cfg.models.llm_model_path.strip():
|
||||
path = Path(cfg.models.llm_model_path)
|
||||
if cfg.models.allow_custom_models and cfg.models.whisper_model_path.strip():
|
||||
path = Path(cfg.models.whisper_model_path)
|
||||
if not path.exists():
|
||||
return [
|
||||
DiagnosticCheck(
|
||||
id="model.cache",
|
||||
ok=False,
|
||||
message=f"custom llm model path does not exist: {path}",
|
||||
hint="fix models.llm_model_path or disable custom model paths",
|
||||
message=f"custom whisper model path does not exist: {path}",
|
||||
hint="fix models.whisper_model_path or disable custom model paths",
|
||||
)
|
||||
]
|
||||
return [
|
||||
DiagnosticCheck(
|
||||
id="model.cache",
|
||||
ok=True,
|
||||
message=f"custom llm model path is ready at {path}",
|
||||
)
|
||||
]
|
||||
try:
|
||||
model_path = ensure_model()
|
||||
return [DiagnosticCheck(id="model.cache", ok=True, message=f"model is ready at {model_path}")]
|
||||
return [DiagnosticCheck(id="model.cache", ok=True, message=f"editor model is ready at {model_path}")]
|
||||
except Exception as exc:
|
||||
return [
|
||||
DiagnosticCheck(
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue