Split aman.py into focused CLI and runtime modules
Break the old god module into flat siblings for CLI parsing, run lifecycle, daemon state, shared processing helpers, benchmark tooling, and maintainer-only model sync so changes stop sharing one giant import graph. Keep aman as a thin shim over aman_cli, move sync-default-model behind the hidden aman-maint entrypoint plus Make wrappers, and update packaging metadata plus maintainer docs to reflect the new surface. Retarget the tests to the new seams with dedicated runtime, run, benchmark, maintainer, and entrypoint suites, and verify with python3 -m unittest discover -s tests -p "test_*.py", python3 -m py_compile src/*.py tests/*.py, PYTHONPATH=src python3 -m aman --help, PYTHONPATH=src python3 -m aman version, and PYTHONPATH=src python3 -m aman_maint --help.
This commit is contained in:
parent
721248ca26
commit
4d0081d1d0
18 changed files with 2838 additions and 2427 deletions
191
tests/test_aman_benchmarks.py
Normal file
191
tests/test_aman_benchmarks.py
Normal file
|
|
@ -0,0 +1,191 @@
|
|||
import io
|
||||
import json
|
||||
import sys
|
||||
import tempfile
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import patch
|
||||
|
||||
ROOT = Path(__file__).resolve().parents[1]
|
||||
SRC = ROOT / "src"
|
||||
if str(SRC) not in sys.path:
|
||||
sys.path.insert(0, str(SRC))
|
||||
|
||||
import aman_benchmarks
|
||||
import aman_cli
|
||||
from config import Config
|
||||
|
||||
|
||||
class _FakeBenchEditorStage:
|
||||
def warmup(self):
|
||||
return
|
||||
|
||||
def rewrite(self, transcript, *, language, dictionary_context):
|
||||
_ = dictionary_context
|
||||
return SimpleNamespace(
|
||||
final_text=f"[{language}] {transcript.strip()}",
|
||||
latency_ms=1.0,
|
||||
pass1_ms=0.5,
|
||||
pass2_ms=0.5,
|
||||
)
|
||||
|
||||
|
||||
class AmanBenchmarksTests(unittest.TestCase):
|
||||
def test_bench_command_json_output(self):
|
||||
args = aman_cli.parse_cli_args(
|
||||
["bench", "--text", "hello", "--repeat", "2", "--warmup", "0", "--json"]
|
||||
)
|
||||
out = io.StringIO()
|
||||
with patch("aman_benchmarks.load", return_value=Config()), patch(
|
||||
"aman_benchmarks.build_editor_stage", return_value=_FakeBenchEditorStage()
|
||||
), patch("sys.stdout", out):
|
||||
exit_code = aman_benchmarks.bench_command(args)
|
||||
|
||||
self.assertEqual(exit_code, 0)
|
||||
payload = json.loads(out.getvalue())
|
||||
self.assertEqual(payload["measured_runs"], 2)
|
||||
self.assertEqual(payload["summary"]["runs"], 2)
|
||||
self.assertEqual(len(payload["runs"]), 2)
|
||||
self.assertEqual(payload["editor_backend"], "local_llama_builtin")
|
||||
self.assertIn("avg_alignment_ms", payload["summary"])
|
||||
self.assertIn("avg_fact_guard_ms", payload["summary"])
|
||||
self.assertIn("alignment_applied", payload["runs"][0])
|
||||
self.assertIn("fact_guard_action", payload["runs"][0])
|
||||
|
||||
def test_bench_command_supports_text_file_input(self):
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
text_file = Path(td) / "input.txt"
|
||||
text_file.write_text("hello from file", encoding="utf-8")
|
||||
args = aman_cli.parse_cli_args(
|
||||
["bench", "--text-file", str(text_file), "--repeat", "1", "--warmup", "0", "--print-output"]
|
||||
)
|
||||
out = io.StringIO()
|
||||
with patch("aman_benchmarks.load", return_value=Config()), patch(
|
||||
"aman_benchmarks.build_editor_stage", return_value=_FakeBenchEditorStage()
|
||||
), patch("sys.stdout", out):
|
||||
exit_code = aman_benchmarks.bench_command(args)
|
||||
|
||||
self.assertEqual(exit_code, 0)
|
||||
self.assertIn("[auto] hello from file", out.getvalue())
|
||||
|
||||
def test_bench_command_rejects_empty_input(self):
|
||||
args = aman_cli.parse_cli_args(["bench", "--text", " "])
|
||||
with patch("aman_benchmarks.load", return_value=Config()), patch(
|
||||
"aman_benchmarks.build_editor_stage", return_value=_FakeBenchEditorStage()
|
||||
):
|
||||
exit_code = aman_benchmarks.bench_command(args)
|
||||
|
||||
self.assertEqual(exit_code, 1)
|
||||
|
||||
def test_bench_command_rejects_non_positive_repeat(self):
|
||||
args = aman_cli.parse_cli_args(["bench", "--text", "hello", "--repeat", "0"])
|
||||
with patch("aman_benchmarks.load", return_value=Config()), patch(
|
||||
"aman_benchmarks.build_editor_stage", return_value=_FakeBenchEditorStage()
|
||||
):
|
||||
exit_code = aman_benchmarks.bench_command(args)
|
||||
|
||||
self.assertEqual(exit_code, 1)
|
||||
|
||||
def test_eval_models_command_writes_report(self):
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
output_path = Path(td) / "report.json"
|
||||
args = aman_cli.parse_cli_args(
|
||||
[
|
||||
"eval-models",
|
||||
"--dataset",
|
||||
"benchmarks/cleanup_dataset.jsonl",
|
||||
"--matrix",
|
||||
"benchmarks/model_matrix.small_first.json",
|
||||
"--output",
|
||||
str(output_path),
|
||||
"--json",
|
||||
]
|
||||
)
|
||||
out = io.StringIO()
|
||||
fake_report = {
|
||||
"models": [
|
||||
{
|
||||
"name": "base",
|
||||
"best_param_set": {
|
||||
"latency_ms": {"p50": 1000.0},
|
||||
"quality": {"hybrid_score_avg": 0.8, "parse_valid_rate": 1.0},
|
||||
},
|
||||
}
|
||||
],
|
||||
"winner_recommendation": {"name": "base", "reason": "test"},
|
||||
}
|
||||
with patch("aman_benchmarks.run_model_eval", return_value=fake_report), patch(
|
||||
"sys.stdout", out
|
||||
):
|
||||
exit_code = aman_benchmarks.eval_models_command(args)
|
||||
self.assertEqual(exit_code, 0)
|
||||
self.assertTrue(output_path.exists())
|
||||
payload = json.loads(output_path.read_text(encoding="utf-8"))
|
||||
self.assertEqual(payload["winner_recommendation"]["name"], "base")
|
||||
|
||||
def test_eval_models_command_forwards_heuristic_arguments(self):
|
||||
args = aman_cli.parse_cli_args(
|
||||
[
|
||||
"eval-models",
|
||||
"--dataset",
|
||||
"benchmarks/cleanup_dataset.jsonl",
|
||||
"--matrix",
|
||||
"benchmarks/model_matrix.small_first.json",
|
||||
"--heuristic-dataset",
|
||||
"benchmarks/heuristics_dataset.jsonl",
|
||||
"--heuristic-weight",
|
||||
"0.35",
|
||||
"--report-version",
|
||||
"2",
|
||||
"--json",
|
||||
]
|
||||
)
|
||||
out = io.StringIO()
|
||||
fake_report = {
|
||||
"models": [{"name": "base", "best_param_set": {}}],
|
||||
"winner_recommendation": {"name": "base", "reason": "ok"},
|
||||
}
|
||||
with patch("aman_benchmarks.run_model_eval", return_value=fake_report) as run_eval_mock, patch(
|
||||
"sys.stdout", out
|
||||
):
|
||||
exit_code = aman_benchmarks.eval_models_command(args)
|
||||
self.assertEqual(exit_code, 0)
|
||||
run_eval_mock.assert_called_once_with(
|
||||
"benchmarks/cleanup_dataset.jsonl",
|
||||
"benchmarks/model_matrix.small_first.json",
|
||||
heuristic_dataset_path="benchmarks/heuristics_dataset.jsonl",
|
||||
heuristic_weight=0.35,
|
||||
report_version=2,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
def test_build_heuristic_dataset_command_json_output(self):
|
||||
args = aman_cli.parse_cli_args(
|
||||
[
|
||||
"build-heuristic-dataset",
|
||||
"--input",
|
||||
"benchmarks/heuristics_dataset.raw.jsonl",
|
||||
"--output",
|
||||
"benchmarks/heuristics_dataset.jsonl",
|
||||
"--json",
|
||||
]
|
||||
)
|
||||
out = io.StringIO()
|
||||
summary = {
|
||||
"raw_rows": 4,
|
||||
"written_rows": 4,
|
||||
"generated_word_rows": 2,
|
||||
"output_path": "benchmarks/heuristics_dataset.jsonl",
|
||||
}
|
||||
with patch("aman_benchmarks.build_heuristic_dataset", return_value=summary), patch(
|
||||
"sys.stdout", out
|
||||
):
|
||||
exit_code = aman_benchmarks.build_heuristic_dataset_command(args)
|
||||
self.assertEqual(exit_code, 0)
|
||||
payload = json.loads(out.getvalue())
|
||||
self.assertEqual(payload["written_rows"], 4)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
@ -1,11 +1,9 @@
|
|||
import io
|
||||
import json
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import patch
|
||||
|
||||
ROOT = Path(__file__).resolve().parents[1]
|
||||
|
|
@ -13,114 +11,16 @@ SRC = ROOT / "src"
|
|||
if str(SRC) not in sys.path:
|
||||
sys.path.insert(0, str(SRC))
|
||||
|
||||
import aman
|
||||
from config import Config
|
||||
from config_ui import ConfigUiResult
|
||||
import aman_cli
|
||||
from diagnostics import DiagnosticCheck, DiagnosticReport
|
||||
|
||||
|
||||
class _FakeDesktop:
|
||||
def __init__(self):
|
||||
self.hotkey = None
|
||||
self.hotkey_callback = None
|
||||
|
||||
def start_hotkey_listener(self, hotkey, callback):
|
||||
self.hotkey = hotkey
|
||||
self.hotkey_callback = callback
|
||||
|
||||
def stop_hotkey_listener(self):
|
||||
return
|
||||
|
||||
def start_cancel_listener(self, callback):
|
||||
_ = callback
|
||||
return
|
||||
|
||||
def stop_cancel_listener(self):
|
||||
return
|
||||
|
||||
def validate_hotkey(self, hotkey):
|
||||
_ = hotkey
|
||||
return
|
||||
|
||||
def inject_text(self, text, backend, *, remove_transcription_from_clipboard=False):
|
||||
_ = (text, backend, remove_transcription_from_clipboard)
|
||||
return
|
||||
|
||||
def run_tray(self, _state_getter, on_quit, **_kwargs):
|
||||
on_quit()
|
||||
|
||||
def request_quit(self):
|
||||
return
|
||||
|
||||
|
||||
class _HotkeyFailDesktop(_FakeDesktop):
|
||||
def start_hotkey_listener(self, hotkey, callback):
|
||||
_ = (hotkey, callback)
|
||||
raise RuntimeError("already in use")
|
||||
|
||||
|
||||
class _FakeDaemon:
|
||||
def __init__(self, cfg, _desktop, *, verbose=False, config_path=None):
|
||||
self.cfg = cfg
|
||||
self.verbose = verbose
|
||||
self.config_path = config_path
|
||||
self._paused = False
|
||||
|
||||
def get_state(self):
|
||||
return "idle"
|
||||
|
||||
def is_paused(self):
|
||||
return self._paused
|
||||
|
||||
def toggle_paused(self):
|
||||
self._paused = not self._paused
|
||||
return self._paused
|
||||
|
||||
def apply_config(self, cfg):
|
||||
self.cfg = cfg
|
||||
|
||||
def toggle(self):
|
||||
return
|
||||
|
||||
def shutdown(self, timeout=1.0):
|
||||
_ = timeout
|
||||
return True
|
||||
|
||||
|
||||
class _RetrySetupDesktop(_FakeDesktop):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.settings_invocations = 0
|
||||
|
||||
def run_tray(self, _state_getter, on_quit, **kwargs):
|
||||
settings_cb = kwargs.get("on_open_settings")
|
||||
if settings_cb is not None and self.settings_invocations == 0:
|
||||
self.settings_invocations += 1
|
||||
settings_cb()
|
||||
return
|
||||
on_quit()
|
||||
|
||||
|
||||
class _FakeBenchEditorStage:
|
||||
def warmup(self):
|
||||
return
|
||||
|
||||
def rewrite(self, transcript, *, language, dictionary_context):
|
||||
_ = dictionary_context
|
||||
return SimpleNamespace(
|
||||
final_text=f"[{language}] {transcript.strip()}",
|
||||
latency_ms=1.0,
|
||||
pass1_ms=0.5,
|
||||
pass2_ms=0.5,
|
||||
)
|
||||
|
||||
|
||||
class AmanCliTests(unittest.TestCase):
|
||||
def test_parse_cli_args_help_flag_uses_top_level_parser(self):
|
||||
out = io.StringIO()
|
||||
|
||||
with patch("sys.stdout", out), self.assertRaises(SystemExit) as exc:
|
||||
aman._parse_cli_args(["--help"])
|
||||
aman_cli.parse_cli_args(["--help"])
|
||||
|
||||
self.assertEqual(exc.exception.code, 0)
|
||||
rendered = out.getvalue()
|
||||
|
|
@ -133,31 +33,31 @@ class AmanCliTests(unittest.TestCase):
|
|||
out = io.StringIO()
|
||||
|
||||
with patch("sys.stdout", out), self.assertRaises(SystemExit) as exc:
|
||||
aman._parse_cli_args(["-h"])
|
||||
aman_cli.parse_cli_args(["-h"])
|
||||
|
||||
self.assertEqual(exc.exception.code, 0)
|
||||
self.assertIn("self-check", out.getvalue())
|
||||
|
||||
def test_parse_cli_args_defaults_to_run_command(self):
|
||||
args = aman._parse_cli_args(["--dry-run"])
|
||||
args = aman_cli.parse_cli_args(["--dry-run"])
|
||||
|
||||
self.assertEqual(args.command, "run")
|
||||
self.assertTrue(args.dry_run)
|
||||
|
||||
def test_parse_cli_args_doctor_command(self):
|
||||
args = aman._parse_cli_args(["doctor", "--json"])
|
||||
args = aman_cli.parse_cli_args(["doctor", "--json"])
|
||||
|
||||
self.assertEqual(args.command, "doctor")
|
||||
self.assertTrue(args.json)
|
||||
|
||||
def test_parse_cli_args_self_check_command(self):
|
||||
args = aman._parse_cli_args(["self-check", "--json"])
|
||||
args = aman_cli.parse_cli_args(["self-check", "--json"])
|
||||
|
||||
self.assertEqual(args.command, "self-check")
|
||||
self.assertTrue(args.json)
|
||||
|
||||
def test_parse_cli_args_bench_command(self):
|
||||
args = aman._parse_cli_args(
|
||||
args = aman_cli.parse_cli_args(
|
||||
["bench", "--text", "hello", "--repeat", "2", "--warmup", "0", "--json"]
|
||||
)
|
||||
|
||||
|
|
@ -169,11 +69,17 @@ class AmanCliTests(unittest.TestCase):
|
|||
|
||||
def test_parse_cli_args_bench_requires_input(self):
|
||||
with self.assertRaises(SystemExit):
|
||||
aman._parse_cli_args(["bench"])
|
||||
aman_cli.parse_cli_args(["bench"])
|
||||
|
||||
def test_parse_cli_args_eval_models_command(self):
|
||||
args = aman._parse_cli_args(
|
||||
["eval-models", "--dataset", "benchmarks/cleanup_dataset.jsonl", "--matrix", "benchmarks/model_matrix.small_first.json"]
|
||||
args = aman_cli.parse_cli_args(
|
||||
[
|
||||
"eval-models",
|
||||
"--dataset",
|
||||
"benchmarks/cleanup_dataset.jsonl",
|
||||
"--matrix",
|
||||
"benchmarks/model_matrix.small_first.json",
|
||||
]
|
||||
)
|
||||
self.assertEqual(args.command, "eval-models")
|
||||
self.assertEqual(args.dataset, "benchmarks/cleanup_dataset.jsonl")
|
||||
|
|
@ -183,7 +89,7 @@ class AmanCliTests(unittest.TestCase):
|
|||
self.assertEqual(args.report_version, 2)
|
||||
|
||||
def test_parse_cli_args_eval_models_with_heuristic_options(self):
|
||||
args = aman._parse_cli_args(
|
||||
args = aman_cli.parse_cli_args(
|
||||
[
|
||||
"eval-models",
|
||||
"--dataset",
|
||||
|
|
@ -203,7 +109,7 @@ class AmanCliTests(unittest.TestCase):
|
|||
self.assertEqual(args.report_version, 2)
|
||||
|
||||
def test_parse_cli_args_build_heuristic_dataset_command(self):
|
||||
args = aman._parse_cli_args(
|
||||
args = aman_cli.parse_cli_args(
|
||||
[
|
||||
"build-heuristic-dataset",
|
||||
"--input",
|
||||
|
|
@ -216,79 +122,40 @@ class AmanCliTests(unittest.TestCase):
|
|||
self.assertEqual(args.input, "benchmarks/heuristics_dataset.raw.jsonl")
|
||||
self.assertEqual(args.output, "benchmarks/heuristics_dataset.jsonl")
|
||||
|
||||
def test_parse_cli_args_sync_default_model_command(self):
|
||||
args = aman._parse_cli_args(
|
||||
[
|
||||
"sync-default-model",
|
||||
"--report",
|
||||
"benchmarks/results/latest.json",
|
||||
"--artifacts",
|
||||
"benchmarks/model_artifacts.json",
|
||||
"--constants",
|
||||
"src/constants.py",
|
||||
"--check",
|
||||
]
|
||||
)
|
||||
self.assertEqual(args.command, "sync-default-model")
|
||||
self.assertEqual(args.report, "benchmarks/results/latest.json")
|
||||
self.assertEqual(args.artifacts, "benchmarks/model_artifacts.json")
|
||||
self.assertEqual(args.constants, "src/constants.py")
|
||||
self.assertTrue(args.check)
|
||||
def test_parse_cli_args_legacy_maint_command_errors_with_migration_hint(self):
|
||||
err = io.StringIO()
|
||||
|
||||
with patch("sys.stderr", err), self.assertRaises(SystemExit) as exc:
|
||||
aman_cli.parse_cli_args(["sync-default-model"])
|
||||
|
||||
self.assertEqual(exc.exception.code, 2)
|
||||
self.assertIn("aman-maint sync-default-model", err.getvalue())
|
||||
self.assertIn("make sync-default-model", err.getvalue())
|
||||
|
||||
def test_version_command_prints_version(self):
|
||||
out = io.StringIO()
|
||||
args = aman._parse_cli_args(["version"])
|
||||
with patch("aman._app_version", return_value="1.2.3"), patch("sys.stdout", out):
|
||||
exit_code = aman._version_command(args)
|
||||
args = aman_cli.parse_cli_args(["version"])
|
||||
with patch("aman_cli.app_version", return_value="1.2.3"), patch("sys.stdout", out):
|
||||
exit_code = aman_cli.version_command(args)
|
||||
self.assertEqual(exit_code, 0)
|
||||
self.assertEqual(out.getvalue().strip(), "1.2.3")
|
||||
|
||||
def test_version_command_does_not_import_config_ui(self):
|
||||
script = f"""
|
||||
import builtins
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, {str(SRC)!r})
|
||||
real_import = builtins.__import__
|
||||
|
||||
def blocked(name, globals=None, locals=None, fromlist=(), level=0):
|
||||
if name == "config_ui":
|
||||
raise ModuleNotFoundError("blocked config_ui")
|
||||
return real_import(name, globals, locals, fromlist, level)
|
||||
|
||||
builtins.__import__ = blocked
|
||||
import aman
|
||||
args = aman._parse_cli_args(["version"])
|
||||
raise SystemExit(aman._version_command(args))
|
||||
"""
|
||||
result = subprocess.run(
|
||||
[sys.executable, "-c", script],
|
||||
cwd=ROOT,
|
||||
text=True,
|
||||
capture_output=True,
|
||||
check=False,
|
||||
)
|
||||
|
||||
self.assertEqual(result.returncode, 0, result.stderr)
|
||||
self.assertRegex(result.stdout.strip(), r"\S+")
|
||||
|
||||
def test_app_version_prefers_local_pyproject_version(self):
|
||||
pyproject_text = '[project]\nversion = "9.9.9"\n'
|
||||
|
||||
with patch.object(aman.Path, "exists", return_value=True), patch.object(
|
||||
aman.Path, "read_text", return_value=pyproject_text
|
||||
), patch("aman.importlib.metadata.version", return_value="1.0.0"):
|
||||
self.assertEqual(aman._app_version(), "9.9.9")
|
||||
with patch.object(aman_cli.Path, "exists", return_value=True), patch.object(
|
||||
aman_cli.Path, "read_text", return_value=pyproject_text
|
||||
), patch("aman_cli.importlib.metadata.version", return_value="1.0.0"):
|
||||
self.assertEqual(aman_cli.app_version(), "9.9.9")
|
||||
|
||||
def test_doctor_command_json_output_and_exit_code(self):
|
||||
report = DiagnosticReport(
|
||||
checks=[DiagnosticCheck(id="config.load", status="ok", message="ok", next_step="")]
|
||||
)
|
||||
args = aman._parse_cli_args(["doctor", "--json"])
|
||||
args = aman_cli.parse_cli_args(["doctor", "--json"])
|
||||
out = io.StringIO()
|
||||
with patch("aman.run_doctor", return_value=report), patch("sys.stdout", out):
|
||||
exit_code = aman._doctor_command(args)
|
||||
with patch("aman_cli.run_doctor", return_value=report), patch("sys.stdout", out):
|
||||
exit_code = aman_cli.doctor_command(args)
|
||||
|
||||
self.assertEqual(exit_code, 0)
|
||||
payload = json.loads(out.getvalue())
|
||||
|
|
@ -300,10 +167,10 @@ raise SystemExit(aman._version_command(args))
|
|||
report = DiagnosticReport(
|
||||
checks=[DiagnosticCheck(id="config.load", status="fail", message="broken", next_step="fix")]
|
||||
)
|
||||
args = aman._parse_cli_args(["doctor"])
|
||||
args = aman_cli.parse_cli_args(["doctor"])
|
||||
out = io.StringIO()
|
||||
with patch("aman.run_doctor", return_value=report), patch("sys.stdout", out):
|
||||
exit_code = aman._doctor_command(args)
|
||||
with patch("aman_cli.run_doctor", return_value=report), patch("sys.stdout", out):
|
||||
exit_code = aman_cli.doctor_command(args)
|
||||
|
||||
self.assertEqual(exit_code, 2)
|
||||
self.assertIn("[FAIL] config.load", out.getvalue())
|
||||
|
|
@ -313,10 +180,10 @@ raise SystemExit(aman._version_command(args))
|
|||
report = DiagnosticReport(
|
||||
checks=[DiagnosticCheck(id="model.cache", status="warn", message="missing", next_step="run aman once")]
|
||||
)
|
||||
args = aman._parse_cli_args(["doctor"])
|
||||
args = aman_cli.parse_cli_args(["doctor"])
|
||||
out = io.StringIO()
|
||||
with patch("aman.run_doctor", return_value=report), patch("sys.stdout", out):
|
||||
exit_code = aman._doctor_command(args)
|
||||
with patch("aman_cli.run_doctor", return_value=report), patch("sys.stdout", out):
|
||||
exit_code = aman_cli.doctor_command(args)
|
||||
|
||||
self.assertEqual(exit_code, 0)
|
||||
self.assertIn("[WARN] model.cache", out.getvalue())
|
||||
|
|
@ -326,275 +193,22 @@ raise SystemExit(aman._version_command(args))
|
|||
report = DiagnosticReport(
|
||||
checks=[DiagnosticCheck(id="startup.readiness", status="ok", message="ready", next_step="")]
|
||||
)
|
||||
args = aman._parse_cli_args(["self-check", "--json"])
|
||||
args = aman_cli.parse_cli_args(["self-check", "--json"])
|
||||
out = io.StringIO()
|
||||
with patch("aman.run_self_check", return_value=report) as runner, patch("sys.stdout", out):
|
||||
exit_code = aman._self_check_command(args)
|
||||
with patch("aman_cli.run_self_check", return_value=report) as runner, patch("sys.stdout", out):
|
||||
exit_code = aman_cli.self_check_command(args)
|
||||
|
||||
self.assertEqual(exit_code, 0)
|
||||
runner.assert_called_once_with("")
|
||||
payload = json.loads(out.getvalue())
|
||||
self.assertEqual(payload["status"], "ok")
|
||||
|
||||
def test_bench_command_json_output(self):
|
||||
args = aman._parse_cli_args(["bench", "--text", "hello", "--repeat", "2", "--warmup", "0", "--json"])
|
||||
out = io.StringIO()
|
||||
with patch("aman.load", return_value=Config()), patch(
|
||||
"aman._build_editor_stage", return_value=_FakeBenchEditorStage()
|
||||
), patch("sys.stdout", out):
|
||||
exit_code = aman._bench_command(args)
|
||||
|
||||
self.assertEqual(exit_code, 0)
|
||||
payload = json.loads(out.getvalue())
|
||||
self.assertEqual(payload["measured_runs"], 2)
|
||||
self.assertEqual(payload["summary"]["runs"], 2)
|
||||
self.assertEqual(len(payload["runs"]), 2)
|
||||
self.assertEqual(payload["editor_backend"], "local_llama_builtin")
|
||||
self.assertIn("avg_alignment_ms", payload["summary"])
|
||||
self.assertIn("avg_fact_guard_ms", payload["summary"])
|
||||
self.assertIn("alignment_applied", payload["runs"][0])
|
||||
self.assertIn("fact_guard_action", payload["runs"][0])
|
||||
|
||||
def test_bench_command_supports_text_file_input(self):
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
text_file = Path(td) / "input.txt"
|
||||
text_file.write_text("hello from file", encoding="utf-8")
|
||||
args = aman._parse_cli_args(
|
||||
["bench", "--text-file", str(text_file), "--repeat", "1", "--warmup", "0", "--print-output"]
|
||||
)
|
||||
out = io.StringIO()
|
||||
with patch("aman.load", return_value=Config()), patch(
|
||||
"aman._build_editor_stage", return_value=_FakeBenchEditorStage()
|
||||
), patch("sys.stdout", out):
|
||||
exit_code = aman._bench_command(args)
|
||||
|
||||
self.assertEqual(exit_code, 0)
|
||||
self.assertIn("[auto] hello from file", out.getvalue())
|
||||
|
||||
def test_bench_command_rejects_empty_input(self):
|
||||
args = aman._parse_cli_args(["bench", "--text", " "])
|
||||
with patch("aman.load", return_value=Config()), patch(
|
||||
"aman._build_editor_stage", return_value=_FakeBenchEditorStage()
|
||||
):
|
||||
exit_code = aman._bench_command(args)
|
||||
|
||||
self.assertEqual(exit_code, 1)
|
||||
|
||||
def test_bench_command_rejects_non_positive_repeat(self):
|
||||
args = aman._parse_cli_args(["bench", "--text", "hello", "--repeat", "0"])
|
||||
with patch("aman.load", return_value=Config()), patch(
|
||||
"aman._build_editor_stage", return_value=_FakeBenchEditorStage()
|
||||
):
|
||||
exit_code = aman._bench_command(args)
|
||||
|
||||
self.assertEqual(exit_code, 1)
|
||||
|
||||
def test_eval_models_command_writes_report(self):
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
output_path = Path(td) / "report.json"
|
||||
args = aman._parse_cli_args(
|
||||
[
|
||||
"eval-models",
|
||||
"--dataset",
|
||||
"benchmarks/cleanup_dataset.jsonl",
|
||||
"--matrix",
|
||||
"benchmarks/model_matrix.small_first.json",
|
||||
"--output",
|
||||
str(output_path),
|
||||
"--json",
|
||||
]
|
||||
)
|
||||
out = io.StringIO()
|
||||
fake_report = {
|
||||
"models": [{"name": "base", "best_param_set": {"latency_ms": {"p50": 1000.0}, "quality": {"hybrid_score_avg": 0.8, "parse_valid_rate": 1.0}}}],
|
||||
"winner_recommendation": {"name": "base", "reason": "test"},
|
||||
}
|
||||
with patch("aman.run_model_eval", return_value=fake_report), patch("sys.stdout", out):
|
||||
exit_code = aman._eval_models_command(args)
|
||||
self.assertEqual(exit_code, 0)
|
||||
self.assertTrue(output_path.exists())
|
||||
payload = json.loads(output_path.read_text(encoding="utf-8"))
|
||||
self.assertEqual(payload["winner_recommendation"]["name"], "base")
|
||||
|
||||
def test_eval_models_command_forwards_heuristic_arguments(self):
|
||||
args = aman._parse_cli_args(
|
||||
[
|
||||
"eval-models",
|
||||
"--dataset",
|
||||
"benchmarks/cleanup_dataset.jsonl",
|
||||
"--matrix",
|
||||
"benchmarks/model_matrix.small_first.json",
|
||||
"--heuristic-dataset",
|
||||
"benchmarks/heuristics_dataset.jsonl",
|
||||
"--heuristic-weight",
|
||||
"0.35",
|
||||
"--report-version",
|
||||
"2",
|
||||
"--json",
|
||||
]
|
||||
)
|
||||
out = io.StringIO()
|
||||
fake_report = {
|
||||
"models": [{"name": "base", "best_param_set": {}}],
|
||||
"winner_recommendation": {"name": "base", "reason": "ok"},
|
||||
}
|
||||
with patch("aman.run_model_eval", return_value=fake_report) as run_eval_mock, patch(
|
||||
"sys.stdout", out
|
||||
):
|
||||
exit_code = aman._eval_models_command(args)
|
||||
self.assertEqual(exit_code, 0)
|
||||
run_eval_mock.assert_called_once_with(
|
||||
"benchmarks/cleanup_dataset.jsonl",
|
||||
"benchmarks/model_matrix.small_first.json",
|
||||
heuristic_dataset_path="benchmarks/heuristics_dataset.jsonl",
|
||||
heuristic_weight=0.35,
|
||||
report_version=2,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
def test_build_heuristic_dataset_command_json_output(self):
|
||||
args = aman._parse_cli_args(
|
||||
[
|
||||
"build-heuristic-dataset",
|
||||
"--input",
|
||||
"benchmarks/heuristics_dataset.raw.jsonl",
|
||||
"--output",
|
||||
"benchmarks/heuristics_dataset.jsonl",
|
||||
"--json",
|
||||
]
|
||||
)
|
||||
out = io.StringIO()
|
||||
summary = {
|
||||
"raw_rows": 4,
|
||||
"written_rows": 4,
|
||||
"generated_word_rows": 2,
|
||||
"output_path": "benchmarks/heuristics_dataset.jsonl",
|
||||
}
|
||||
with patch("aman.build_heuristic_dataset", return_value=summary), patch("sys.stdout", out):
|
||||
exit_code = aman._build_heuristic_dataset_command(args)
|
||||
self.assertEqual(exit_code, 0)
|
||||
payload = json.loads(out.getvalue())
|
||||
self.assertEqual(payload["written_rows"], 4)
|
||||
|
||||
def test_sync_default_model_command_updates_constants(self):
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
report_path = Path(td) / "latest.json"
|
||||
artifacts_path = Path(td) / "artifacts.json"
|
||||
constants_path = Path(td) / "constants.py"
|
||||
report_path.write_text(
|
||||
json.dumps(
|
||||
{
|
||||
"winner_recommendation": {
|
||||
"name": "test-model",
|
||||
}
|
||||
}
|
||||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
artifacts_path.write_text(
|
||||
json.dumps(
|
||||
{
|
||||
"models": [
|
||||
{
|
||||
"name": "test-model",
|
||||
"filename": "winner.gguf",
|
||||
"url": "https://example.invalid/winner.gguf",
|
||||
"sha256": "a" * 64,
|
||||
}
|
||||
]
|
||||
}
|
||||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
constants_path.write_text(
|
||||
(
|
||||
'MODEL_NAME = "old.gguf"\n'
|
||||
'MODEL_URL = "https://example.invalid/old.gguf"\n'
|
||||
'MODEL_SHA256 = "' + ("b" * 64) + '"\n'
|
||||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
args = aman._parse_cli_args(
|
||||
[
|
||||
"sync-default-model",
|
||||
"--report",
|
||||
str(report_path),
|
||||
"--artifacts",
|
||||
str(artifacts_path),
|
||||
"--constants",
|
||||
str(constants_path),
|
||||
]
|
||||
)
|
||||
exit_code = aman._sync_default_model_command(args)
|
||||
self.assertEqual(exit_code, 0)
|
||||
updated = constants_path.read_text(encoding="utf-8")
|
||||
self.assertIn('MODEL_NAME = "winner.gguf"', updated)
|
||||
self.assertIn('MODEL_URL = "https://example.invalid/winner.gguf"', updated)
|
||||
self.assertIn('MODEL_SHA256 = "' + ("a" * 64) + '"', updated)
|
||||
|
||||
def test_sync_default_model_command_check_mode_returns_2_on_drift(self):
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
report_path = Path(td) / "latest.json"
|
||||
artifacts_path = Path(td) / "artifacts.json"
|
||||
constants_path = Path(td) / "constants.py"
|
||||
report_path.write_text(
|
||||
json.dumps(
|
||||
{
|
||||
"winner_recommendation": {
|
||||
"name": "test-model",
|
||||
}
|
||||
}
|
||||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
artifacts_path.write_text(
|
||||
json.dumps(
|
||||
{
|
||||
"models": [
|
||||
{
|
||||
"name": "test-model",
|
||||
"filename": "winner.gguf",
|
||||
"url": "https://example.invalid/winner.gguf",
|
||||
"sha256": "a" * 64,
|
||||
}
|
||||
]
|
||||
}
|
||||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
constants_path.write_text(
|
||||
(
|
||||
'MODEL_NAME = "old.gguf"\n'
|
||||
'MODEL_URL = "https://example.invalid/old.gguf"\n'
|
||||
'MODEL_SHA256 = "' + ("b" * 64) + '"\n'
|
||||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
args = aman._parse_cli_args(
|
||||
[
|
||||
"sync-default-model",
|
||||
"--report",
|
||||
str(report_path),
|
||||
"--artifacts",
|
||||
str(artifacts_path),
|
||||
"--constants",
|
||||
str(constants_path),
|
||||
"--check",
|
||||
]
|
||||
)
|
||||
exit_code = aman._sync_default_model_command(args)
|
||||
self.assertEqual(exit_code, 2)
|
||||
updated = constants_path.read_text(encoding="utf-8")
|
||||
self.assertIn('MODEL_NAME = "old.gguf"', updated)
|
||||
|
||||
def test_init_command_creates_default_config(self):
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
path = Path(td) / "config.json"
|
||||
args = aman._parse_cli_args(["init", "--config", str(path)])
|
||||
args = aman_cli.parse_cli_args(["init", "--config", str(path)])
|
||||
|
||||
exit_code = aman._init_command(args)
|
||||
exit_code = aman_cli.init_command(args)
|
||||
self.assertEqual(exit_code, 0)
|
||||
self.assertTrue(path.exists())
|
||||
payload = json.loads(path.read_text(encoding="utf-8"))
|
||||
|
|
@ -604,9 +218,9 @@ raise SystemExit(aman._version_command(args))
|
|||
with tempfile.TemporaryDirectory() as td:
|
||||
path = Path(td) / "config.json"
|
||||
path.write_text('{"daemon":{"hotkey":"Super+m"}}\n', encoding="utf-8")
|
||||
args = aman._parse_cli_args(["init", "--config", str(path)])
|
||||
args = aman_cli.parse_cli_args(["init", "--config", str(path)])
|
||||
|
||||
exit_code = aman._init_command(args)
|
||||
exit_code = aman_cli.init_command(args)
|
||||
self.assertEqual(exit_code, 1)
|
||||
self.assertIn("Super+m", path.read_text(encoding="utf-8"))
|
||||
|
||||
|
|
@ -614,109 +228,13 @@ raise SystemExit(aman._version_command(args))
|
|||
with tempfile.TemporaryDirectory() as td:
|
||||
path = Path(td) / "config.json"
|
||||
path.write_text('{"daemon":{"hotkey":"Super+m"}}\n', encoding="utf-8")
|
||||
args = aman._parse_cli_args(["init", "--config", str(path), "--force"])
|
||||
args = aman_cli.parse_cli_args(["init", "--config", str(path), "--force"])
|
||||
|
||||
exit_code = aman._init_command(args)
|
||||
exit_code = aman_cli.init_command(args)
|
||||
self.assertEqual(exit_code, 0)
|
||||
payload = json.loads(path.read_text(encoding="utf-8"))
|
||||
self.assertEqual(payload["daemon"]["hotkey"], "Cmd+m")
|
||||
|
||||
def test_run_command_missing_config_uses_settings_ui_and_writes_file(self):
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
path = Path(td) / "config.json"
|
||||
args = aman._parse_cli_args(["run", "--config", str(path)])
|
||||
desktop = _FakeDesktop()
|
||||
onboard_cfg = Config()
|
||||
onboard_cfg.daemon.hotkey = "Super+m"
|
||||
with patch("aman._lock_single_instance", return_value=object()), patch(
|
||||
"aman.get_desktop_adapter", return_value=desktop
|
||||
), patch(
|
||||
"aman._run_config_ui",
|
||||
return_value=ConfigUiResult(saved=True, config=onboard_cfg, closed_reason="saved"),
|
||||
) as config_ui_mock, patch("aman.Daemon", _FakeDaemon):
|
||||
exit_code = aman._run_command(args)
|
||||
|
||||
self.assertEqual(exit_code, 0)
|
||||
self.assertTrue(path.exists())
|
||||
self.assertEqual(desktop.hotkey, "Super+m")
|
||||
config_ui_mock.assert_called_once()
|
||||
|
||||
def test_run_command_missing_config_cancel_returns_without_starting_daemon(self):
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
path = Path(td) / "config.json"
|
||||
args = aman._parse_cli_args(["run", "--config", str(path)])
|
||||
desktop = _FakeDesktop()
|
||||
with patch("aman._lock_single_instance", return_value=object()), patch(
|
||||
"aman.get_desktop_adapter", return_value=desktop
|
||||
), patch(
|
||||
"aman._run_config_ui",
|
||||
return_value=ConfigUiResult(saved=False, config=None, closed_reason="cancelled"),
|
||||
), patch("aman.Daemon") as daemon_cls:
|
||||
exit_code = aman._run_command(args)
|
||||
|
||||
self.assertEqual(exit_code, 0)
|
||||
self.assertFalse(path.exists())
|
||||
daemon_cls.assert_not_called()
|
||||
|
||||
def test_run_command_missing_config_cancel_then_retry_settings(self):
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
path = Path(td) / "config.json"
|
||||
args = aman._parse_cli_args(["run", "--config", str(path)])
|
||||
desktop = _RetrySetupDesktop()
|
||||
onboard_cfg = Config()
|
||||
config_ui_results = [
|
||||
ConfigUiResult(saved=False, config=None, closed_reason="cancelled"),
|
||||
ConfigUiResult(saved=True, config=onboard_cfg, closed_reason="saved"),
|
||||
]
|
||||
with patch("aman._lock_single_instance", return_value=object()), patch(
|
||||
"aman.get_desktop_adapter", return_value=desktop
|
||||
), patch(
|
||||
"aman._run_config_ui",
|
||||
side_effect=config_ui_results,
|
||||
), patch("aman.Daemon", _FakeDaemon):
|
||||
exit_code = aman._run_command(args)
|
||||
|
||||
self.assertEqual(exit_code, 0)
|
||||
self.assertTrue(path.exists())
|
||||
self.assertEqual(desktop.settings_invocations, 1)
|
||||
|
||||
def test_run_command_hotkey_failure_logs_actionable_issue(self):
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
path = Path(td) / "config.json"
|
||||
path.write_text(json.dumps({"config_version": 1}) + "\n", encoding="utf-8")
|
||||
args = aman._parse_cli_args(["run", "--config", str(path)])
|
||||
desktop = _HotkeyFailDesktop()
|
||||
with patch("aman._lock_single_instance", return_value=object()), patch(
|
||||
"aman.get_desktop_adapter", return_value=desktop
|
||||
), patch("aman.load", return_value=Config()), patch("aman.Daemon", _FakeDaemon), self.assertLogs(
|
||||
level="ERROR"
|
||||
) as logs:
|
||||
exit_code = aman._run_command(args)
|
||||
|
||||
self.assertEqual(exit_code, 1)
|
||||
rendered = "\n".join(logs.output)
|
||||
self.assertIn("hotkey.parse: hotkey setup failed: already in use", rendered)
|
||||
self.assertIn("next_step: run `aman doctor --config", rendered)
|
||||
|
||||
def test_run_command_daemon_init_failure_logs_self_check_next_step(self):
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
path = Path(td) / "config.json"
|
||||
path.write_text(json.dumps({"config_version": 1}) + "\n", encoding="utf-8")
|
||||
args = aman._parse_cli_args(["run", "--config", str(path)])
|
||||
desktop = _FakeDesktop()
|
||||
with patch("aman._lock_single_instance", return_value=object()), patch(
|
||||
"aman.get_desktop_adapter", return_value=desktop
|
||||
), patch("aman.load", return_value=Config()), patch(
|
||||
"aman.Daemon", side_effect=RuntimeError("warmup boom")
|
||||
), self.assertLogs(level="ERROR") as logs:
|
||||
exit_code = aman._run_command(args)
|
||||
|
||||
self.assertEqual(exit_code, 1)
|
||||
rendered = "\n".join(logs.output)
|
||||
self.assertIn("startup.readiness: startup failed: warmup boom", rendered)
|
||||
self.assertIn("next_step: run `aman self-check --config", rendered)
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
|||
51
tests/test_aman_entrypoint.py
Normal file
51
tests/test_aman_entrypoint.py
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
|
||||
ROOT = Path(__file__).resolve().parents[1]
|
||||
SRC = ROOT / "src"
|
||||
if str(SRC) not in sys.path:
|
||||
sys.path.insert(0, str(SRC))
|
||||
|
||||
import aman
|
||||
import aman_cli
|
||||
|
||||
|
||||
class AmanEntrypointTests(unittest.TestCase):
|
||||
def test_aman_module_only_reexports_main(self):
|
||||
self.assertIs(aman.main, aman_cli.main)
|
||||
self.assertFalse(hasattr(aman, "Daemon"))
|
||||
|
||||
def test_python_m_aman_version_succeeds_without_config_ui(self):
|
||||
script = f"""
|
||||
import builtins
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, {str(SRC)!r})
|
||||
real_import = builtins.__import__
|
||||
|
||||
def blocked(name, globals=None, locals=None, fromlist=(), level=0):
|
||||
if name == "config_ui":
|
||||
raise ModuleNotFoundError("blocked config_ui")
|
||||
return real_import(name, globals, locals, fromlist, level)
|
||||
|
||||
builtins.__import__ = blocked
|
||||
import aman
|
||||
raise SystemExit(aman.main(["version"]))
|
||||
"""
|
||||
result = subprocess.run(
|
||||
[sys.executable, "-c", script],
|
||||
cwd=ROOT,
|
||||
text=True,
|
||||
capture_output=True,
|
||||
check=False,
|
||||
)
|
||||
|
||||
self.assertEqual(result.returncode, 0, result.stderr)
|
||||
self.assertRegex(result.stdout.strip(), re.compile(r"\S+"))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
148
tests/test_aman_maint.py
Normal file
148
tests/test_aman_maint.py
Normal file
|
|
@ -0,0 +1,148 @@
|
|||
import json
|
||||
import sys
|
||||
import tempfile
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
ROOT = Path(__file__).resolve().parents[1]
|
||||
SRC = ROOT / "src"
|
||||
if str(SRC) not in sys.path:
|
||||
sys.path.insert(0, str(SRC))
|
||||
|
||||
import aman_maint
|
||||
import aman_model_sync
|
||||
|
||||
|
||||
class AmanMaintTests(unittest.TestCase):
|
||||
def test_parse_args_sync_default_model_command(self):
|
||||
args = aman_maint.parse_args(
|
||||
[
|
||||
"sync-default-model",
|
||||
"--report",
|
||||
"benchmarks/results/latest.json",
|
||||
"--artifacts",
|
||||
"benchmarks/model_artifacts.json",
|
||||
"--constants",
|
||||
"src/constants.py",
|
||||
"--check",
|
||||
]
|
||||
)
|
||||
|
||||
self.assertEqual(args.command, "sync-default-model")
|
||||
self.assertEqual(args.report, "benchmarks/results/latest.json")
|
||||
self.assertEqual(args.artifacts, "benchmarks/model_artifacts.json")
|
||||
self.assertEqual(args.constants, "src/constants.py")
|
||||
self.assertTrue(args.check)
|
||||
|
||||
def test_main_dispatches_sync_default_model_command(self):
|
||||
with patch("aman_model_sync.sync_default_model_command", return_value=7) as handler:
|
||||
exit_code = aman_maint.main(["sync-default-model"])
|
||||
|
||||
self.assertEqual(exit_code, 7)
|
||||
handler.assert_called_once()
|
||||
|
||||
def test_sync_default_model_command_updates_constants(self):
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
report_path = Path(td) / "latest.json"
|
||||
artifacts_path = Path(td) / "artifacts.json"
|
||||
constants_path = Path(td) / "constants.py"
|
||||
report_path.write_text(
|
||||
json.dumps({"winner_recommendation": {"name": "test-model"}}),
|
||||
encoding="utf-8",
|
||||
)
|
||||
artifacts_path.write_text(
|
||||
json.dumps(
|
||||
{
|
||||
"models": [
|
||||
{
|
||||
"name": "test-model",
|
||||
"filename": "winner.gguf",
|
||||
"url": "https://example.invalid/winner.gguf",
|
||||
"sha256": "a" * 64,
|
||||
}
|
||||
]
|
||||
}
|
||||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
constants_path.write_text(
|
||||
(
|
||||
'MODEL_NAME = "old.gguf"\n'
|
||||
'MODEL_URL = "https://example.invalid/old.gguf"\n'
|
||||
'MODEL_SHA256 = "' + ("b" * 64) + '"\n'
|
||||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
args = aman_maint.parse_args(
|
||||
[
|
||||
"sync-default-model",
|
||||
"--report",
|
||||
str(report_path),
|
||||
"--artifacts",
|
||||
str(artifacts_path),
|
||||
"--constants",
|
||||
str(constants_path),
|
||||
]
|
||||
)
|
||||
exit_code = aman_model_sync.sync_default_model_command(args)
|
||||
self.assertEqual(exit_code, 0)
|
||||
updated = constants_path.read_text(encoding="utf-8")
|
||||
self.assertIn('MODEL_NAME = "winner.gguf"', updated)
|
||||
self.assertIn('MODEL_URL = "https://example.invalid/winner.gguf"', updated)
|
||||
self.assertIn('MODEL_SHA256 = "' + ("a" * 64) + '"', updated)
|
||||
|
||||
def test_sync_default_model_command_check_mode_returns_2_on_drift(self):
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
report_path = Path(td) / "latest.json"
|
||||
artifacts_path = Path(td) / "artifacts.json"
|
||||
constants_path = Path(td) / "constants.py"
|
||||
report_path.write_text(
|
||||
json.dumps({"winner_recommendation": {"name": "test-model"}}),
|
||||
encoding="utf-8",
|
||||
)
|
||||
artifacts_path.write_text(
|
||||
json.dumps(
|
||||
{
|
||||
"models": [
|
||||
{
|
||||
"name": "test-model",
|
||||
"filename": "winner.gguf",
|
||||
"url": "https://example.invalid/winner.gguf",
|
||||
"sha256": "a" * 64,
|
||||
}
|
||||
]
|
||||
}
|
||||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
constants_path.write_text(
|
||||
(
|
||||
'MODEL_NAME = "old.gguf"\n'
|
||||
'MODEL_URL = "https://example.invalid/old.gguf"\n'
|
||||
'MODEL_SHA256 = "' + ("b" * 64) + '"\n'
|
||||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
args = aman_maint.parse_args(
|
||||
[
|
||||
"sync-default-model",
|
||||
"--report",
|
||||
str(report_path),
|
||||
"--artifacts",
|
||||
str(artifacts_path),
|
||||
"--constants",
|
||||
str(constants_path),
|
||||
"--check",
|
||||
]
|
||||
)
|
||||
exit_code = aman_model_sync.sync_default_model_command(args)
|
||||
self.assertEqual(exit_code, 2)
|
||||
updated = constants_path.read_text(encoding="utf-8")
|
||||
self.assertIn('MODEL_NAME = "old.gguf"', updated)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
210
tests/test_aman_run.py
Normal file
210
tests/test_aman_run.py
Normal file
|
|
@ -0,0 +1,210 @@
|
|||
import json
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import patch
|
||||
|
||||
ROOT = Path(__file__).resolve().parents[1]
|
||||
SRC = ROOT / "src"
|
||||
if str(SRC) not in sys.path:
|
||||
sys.path.insert(0, str(SRC))
|
||||
|
||||
import aman_cli
|
||||
import aman_run
|
||||
from config import Config
|
||||
|
||||
|
||||
class _FakeDesktop:
|
||||
def __init__(self):
|
||||
self.hotkey = None
|
||||
self.hotkey_callback = None
|
||||
|
||||
def start_hotkey_listener(self, hotkey, callback):
|
||||
self.hotkey = hotkey
|
||||
self.hotkey_callback = callback
|
||||
|
||||
def stop_hotkey_listener(self):
|
||||
return
|
||||
|
||||
def start_cancel_listener(self, callback):
|
||||
_ = callback
|
||||
return
|
||||
|
||||
def stop_cancel_listener(self):
|
||||
return
|
||||
|
||||
def validate_hotkey(self, hotkey):
|
||||
_ = hotkey
|
||||
return
|
||||
|
||||
def inject_text(self, text, backend, *, remove_transcription_from_clipboard=False):
|
||||
_ = (text, backend, remove_transcription_from_clipboard)
|
||||
return
|
||||
|
||||
def run_tray(self, _state_getter, on_quit, **_kwargs):
|
||||
on_quit()
|
||||
|
||||
def request_quit(self):
|
||||
return
|
||||
|
||||
|
||||
class _HotkeyFailDesktop(_FakeDesktop):
|
||||
def start_hotkey_listener(self, hotkey, callback):
|
||||
_ = (hotkey, callback)
|
||||
raise RuntimeError("already in use")
|
||||
|
||||
|
||||
class _FakeDaemon:
|
||||
def __init__(self, cfg, _desktop, *, verbose=False, config_path=None):
|
||||
self.cfg = cfg
|
||||
self.verbose = verbose
|
||||
self.config_path = config_path
|
||||
self._paused = False
|
||||
|
||||
def get_state(self):
|
||||
return "idle"
|
||||
|
||||
def is_paused(self):
|
||||
return self._paused
|
||||
|
||||
def toggle_paused(self):
|
||||
self._paused = not self._paused
|
||||
return self._paused
|
||||
|
||||
def apply_config(self, cfg):
|
||||
self.cfg = cfg
|
||||
|
||||
def toggle(self):
|
||||
return
|
||||
|
||||
def shutdown(self, timeout=1.0):
|
||||
_ = timeout
|
||||
return True
|
||||
|
||||
|
||||
class _RetrySetupDesktop(_FakeDesktop):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.settings_invocations = 0
|
||||
|
||||
def run_tray(self, _state_getter, on_quit, **kwargs):
|
||||
settings_cb = kwargs.get("on_open_settings")
|
||||
if settings_cb is not None and self.settings_invocations == 0:
|
||||
self.settings_invocations += 1
|
||||
settings_cb()
|
||||
return
|
||||
on_quit()
|
||||
|
||||
|
||||
class AmanRunTests(unittest.TestCase):
|
||||
def test_lock_rejects_second_instance(self):
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
with patch.dict(os.environ, {"XDG_RUNTIME_DIR": td}, clear=False):
|
||||
first = aman_run.lock_single_instance()
|
||||
try:
|
||||
with self.assertRaises(SystemExit) as ctx:
|
||||
aman_run.lock_single_instance()
|
||||
self.assertIn("already running", str(ctx.exception))
|
||||
finally:
|
||||
first.close()
|
||||
|
||||
def test_run_command_missing_config_uses_settings_ui_and_writes_file(self):
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
path = Path(td) / "config.json"
|
||||
args = aman_cli.parse_cli_args(["run", "--config", str(path)])
|
||||
desktop = _FakeDesktop()
|
||||
onboard_cfg = Config()
|
||||
onboard_cfg.daemon.hotkey = "Super+m"
|
||||
result = SimpleNamespace(saved=True, config=onboard_cfg, closed_reason="saved")
|
||||
with patch("aman_run.lock_single_instance", return_value=object()), patch(
|
||||
"aman_run.get_desktop_adapter", return_value=desktop
|
||||
), patch("aman_run.run_config_ui", return_value=result) as config_ui_mock, patch(
|
||||
"aman_run.Daemon", _FakeDaemon
|
||||
):
|
||||
exit_code = aman_run.run_command(args)
|
||||
|
||||
self.assertEqual(exit_code, 0)
|
||||
self.assertTrue(path.exists())
|
||||
self.assertEqual(desktop.hotkey, "Super+m")
|
||||
config_ui_mock.assert_called_once()
|
||||
|
||||
def test_run_command_missing_config_cancel_returns_without_starting_daemon(self):
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
path = Path(td) / "config.json"
|
||||
args = aman_cli.parse_cli_args(["run", "--config", str(path)])
|
||||
desktop = _FakeDesktop()
|
||||
result = SimpleNamespace(saved=False, config=None, closed_reason="cancelled")
|
||||
with patch("aman_run.lock_single_instance", return_value=object()), patch(
|
||||
"aman_run.get_desktop_adapter", return_value=desktop
|
||||
), patch("aman_run.run_config_ui", return_value=result), patch(
|
||||
"aman_run.Daemon"
|
||||
) as daemon_cls:
|
||||
exit_code = aman_run.run_command(args)
|
||||
|
||||
self.assertEqual(exit_code, 0)
|
||||
self.assertFalse(path.exists())
|
||||
daemon_cls.assert_not_called()
|
||||
|
||||
def test_run_command_missing_config_cancel_then_retry_settings(self):
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
path = Path(td) / "config.json"
|
||||
args = aman_cli.parse_cli_args(["run", "--config", str(path)])
|
||||
desktop = _RetrySetupDesktop()
|
||||
onboard_cfg = Config()
|
||||
config_ui_results = [
|
||||
SimpleNamespace(saved=False, config=None, closed_reason="cancelled"),
|
||||
SimpleNamespace(saved=True, config=onboard_cfg, closed_reason="saved"),
|
||||
]
|
||||
with patch("aman_run.lock_single_instance", return_value=object()), patch(
|
||||
"aman_run.get_desktop_adapter", return_value=desktop
|
||||
), patch("aman_run.run_config_ui", side_effect=config_ui_results), patch(
|
||||
"aman_run.Daemon", _FakeDaemon
|
||||
):
|
||||
exit_code = aman_run.run_command(args)
|
||||
|
||||
self.assertEqual(exit_code, 0)
|
||||
self.assertTrue(path.exists())
|
||||
self.assertEqual(desktop.settings_invocations, 1)
|
||||
|
||||
def test_run_command_hotkey_failure_logs_actionable_issue(self):
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
path = Path(td) / "config.json"
|
||||
path.write_text(json.dumps({"config_version": 1}) + "\n", encoding="utf-8")
|
||||
args = aman_cli.parse_cli_args(["run", "--config", str(path)])
|
||||
desktop = _HotkeyFailDesktop()
|
||||
with patch("aman_run.lock_single_instance", return_value=object()), patch(
|
||||
"aman_run.get_desktop_adapter", return_value=desktop
|
||||
), patch("aman_run.load", return_value=Config()), patch(
|
||||
"aman_run.Daemon", _FakeDaemon
|
||||
), self.assertLogs(level="ERROR") as logs:
|
||||
exit_code = aman_run.run_command(args)
|
||||
|
||||
self.assertEqual(exit_code, 1)
|
||||
rendered = "\n".join(logs.output)
|
||||
self.assertIn("hotkey.parse: hotkey setup failed: already in use", rendered)
|
||||
self.assertIn("next_step: run `aman doctor --config", rendered)
|
||||
|
||||
def test_run_command_daemon_init_failure_logs_self_check_next_step(self):
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
path = Path(td) / "config.json"
|
||||
path.write_text(json.dumps({"config_version": 1}) + "\n", encoding="utf-8")
|
||||
args = aman_cli.parse_cli_args(["run", "--config", str(path)])
|
||||
desktop = _FakeDesktop()
|
||||
with patch("aman_run.lock_single_instance", return_value=object()), patch(
|
||||
"aman_run.get_desktop_adapter", return_value=desktop
|
||||
), patch("aman_run.load", return_value=Config()), patch(
|
||||
"aman_run.Daemon", side_effect=RuntimeError("warmup boom")
|
||||
), self.assertLogs(level="ERROR") as logs:
|
||||
exit_code = aman_run.run_command(args)
|
||||
|
||||
self.assertEqual(exit_code, 1)
|
||||
rendered = "\n".join(logs.output)
|
||||
self.assertIn("startup.readiness: startup failed: warmup boom", rendered)
|
||||
self.assertIn("next_step: run `aman self-check --config", rendered)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
@ -1,6 +1,4 @@
|
|||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
|
@ -10,7 +8,7 @@ SRC = ROOT / "src"
|
|||
if str(SRC) not in sys.path:
|
||||
sys.path.insert(0, str(SRC))
|
||||
|
||||
import aman
|
||||
import aman_runtime
|
||||
from config import Config, VocabularyReplacement
|
||||
from stages.asr_whisper import AsrResult, AsrSegment, AsrWord
|
||||
|
||||
|
|
@ -128,10 +126,10 @@ class FakeAIProcessor:
|
|||
self.warmup_error = None
|
||||
self.process_error = None
|
||||
|
||||
def process(self, text, lang="auto", **_kwargs):
|
||||
def process(self, text, lang="auto", **kwargs):
|
||||
if self.process_error is not None:
|
||||
raise self.process_error
|
||||
self.last_kwargs = {"lang": lang, **_kwargs}
|
||||
self.last_kwargs = {"lang": lang, **kwargs}
|
||||
return text
|
||||
|
||||
def warmup(self, profile="default"):
|
||||
|
|
@ -174,8 +172,7 @@ def _asr_result(text: str, words: list[str], *, language: str = "auto") -> AsrRe
|
|||
|
||||
class DaemonTests(unittest.TestCase):
|
||||
def _config(self) -> Config:
|
||||
cfg = Config()
|
||||
return cfg
|
||||
return Config()
|
||||
|
||||
def _build_daemon(
|
||||
self,
|
||||
|
|
@ -185,16 +182,16 @@ class DaemonTests(unittest.TestCase):
|
|||
cfg: Config | None = None,
|
||||
verbose: bool = False,
|
||||
ai_processor: FakeAIProcessor | None = None,
|
||||
) -> aman.Daemon:
|
||||
) -> aman_runtime.Daemon:
|
||||
active_cfg = cfg if cfg is not None else self._config()
|
||||
active_ai_processor = ai_processor or FakeAIProcessor()
|
||||
with patch("aman._build_whisper_model", return_value=model), patch(
|
||||
"aman.LlamaProcessor", return_value=active_ai_processor
|
||||
with patch("aman_runtime.build_whisper_model", return_value=model), patch(
|
||||
"aman_processing.LlamaProcessor", return_value=active_ai_processor
|
||||
):
|
||||
return aman.Daemon(active_cfg, desktop, verbose=verbose)
|
||||
return aman_runtime.Daemon(active_cfg, desktop, verbose=verbose)
|
||||
|
||||
@patch("aman.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("aman.start_audio_recording", return_value=(object(), object()))
|
||||
@patch("aman_runtime.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("aman_runtime.start_audio_recording", return_value=(object(), object()))
|
||||
def test_toggle_start_stop_injects_text(self, _start_mock, _stop_mock):
|
||||
desktop = FakeDesktop()
|
||||
daemon = self._build_daemon(desktop, FakeModel(), verbose=False)
|
||||
|
|
@ -205,15 +202,15 @@ class DaemonTests(unittest.TestCase):
|
|||
)
|
||||
|
||||
daemon.toggle()
|
||||
self.assertEqual(daemon.get_state(), aman.State.RECORDING)
|
||||
self.assertEqual(daemon.get_state(), aman_runtime.State.RECORDING)
|
||||
|
||||
daemon.toggle()
|
||||
|
||||
self.assertEqual(daemon.get_state(), aman.State.IDLE)
|
||||
self.assertEqual(daemon.get_state(), aman_runtime.State.IDLE)
|
||||
self.assertEqual(desktop.inject_calls, [("hello world", "clipboard", False)])
|
||||
|
||||
@patch("aman.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("aman.start_audio_recording", return_value=(object(), object()))
|
||||
@patch("aman_runtime.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("aman_runtime.start_audio_recording", return_value=(object(), object()))
|
||||
def test_shutdown_stops_recording_without_injection(self, _start_mock, _stop_mock):
|
||||
desktop = FakeDesktop()
|
||||
daemon = self._build_daemon(desktop, FakeModel(), verbose=False)
|
||||
|
|
@ -224,14 +221,14 @@ class DaemonTests(unittest.TestCase):
|
|||
)
|
||||
|
||||
daemon.toggle()
|
||||
self.assertEqual(daemon.get_state(), aman.State.RECORDING)
|
||||
self.assertEqual(daemon.get_state(), aman_runtime.State.RECORDING)
|
||||
|
||||
self.assertTrue(daemon.shutdown(timeout=0.2))
|
||||
self.assertEqual(daemon.get_state(), aman.State.IDLE)
|
||||
self.assertEqual(daemon.get_state(), aman_runtime.State.IDLE)
|
||||
self.assertEqual(desktop.inject_calls, [])
|
||||
|
||||
@patch("aman.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("aman.start_audio_recording", return_value=(object(), object()))
|
||||
@patch("aman_runtime.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("aman_runtime.start_audio_recording", return_value=(object(), object()))
|
||||
def test_dictionary_replacement_applies_after_ai(self, _start_mock, _stop_mock):
|
||||
desktop = FakeDesktop()
|
||||
model = FakeModel(text="good morning martha")
|
||||
|
|
@ -250,8 +247,8 @@ class DaemonTests(unittest.TestCase):
|
|||
|
||||
self.assertEqual(desktop.inject_calls, [("good morning Marta", "clipboard", False)])
|
||||
|
||||
@patch("aman.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("aman.start_audio_recording", return_value=(object(), object()))
|
||||
@patch("aman_runtime.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("aman_runtime.start_audio_recording", return_value=(object(), object()))
|
||||
def test_editor_failure_aborts_output_injection(self, _start_mock, _stop_mock):
|
||||
desktop = FakeDesktop()
|
||||
model = FakeModel(text="hello world")
|
||||
|
|
@ -274,10 +271,10 @@ class DaemonTests(unittest.TestCase):
|
|||
daemon.toggle()
|
||||
|
||||
self.assertEqual(desktop.inject_calls, [])
|
||||
self.assertEqual(daemon.get_state(), aman.State.IDLE)
|
||||
self.assertEqual(daemon.get_state(), aman_runtime.State.IDLE)
|
||||
|
||||
@patch("aman.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("aman.start_audio_recording", return_value=(object(), object()))
|
||||
@patch("aman_runtime.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("aman_runtime.start_audio_recording", return_value=(object(), object()))
|
||||
def test_live_path_uses_asr_words_for_alignment_correction(self, _start_mock, _stop_mock):
|
||||
desktop = FakeDesktop()
|
||||
ai_processor = FakeAIProcessor()
|
||||
|
|
@ -299,8 +296,8 @@ class DaemonTests(unittest.TestCase):
|
|||
self.assertEqual(desktop.inject_calls, [("set alarm for 7", "clipboard", False)])
|
||||
self.assertEqual(ai_processor.last_kwargs.get("lang"), "en")
|
||||
|
||||
@patch("aman.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("aman.start_audio_recording", return_value=(object(), object()))
|
||||
@patch("aman_runtime.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("aman_runtime.start_audio_recording", return_value=(object(), object()))
|
||||
def test_live_path_calls_word_aware_pipeline_entrypoint(self, _start_mock, _stop_mock):
|
||||
desktop = FakeDesktop()
|
||||
daemon = self._build_daemon(desktop, FakeModel(), verbose=False)
|
||||
|
|
@ -413,10 +410,10 @@ class DaemonTests(unittest.TestCase):
|
|||
|
||||
def test_editor_stage_is_initialized_during_daemon_init(self):
|
||||
desktop = FakeDesktop()
|
||||
with patch("aman._build_whisper_model", return_value=FakeModel()), patch(
|
||||
"aman.LlamaProcessor", return_value=FakeAIProcessor()
|
||||
with patch("aman_runtime.build_whisper_model", return_value=FakeModel()), patch(
|
||||
"aman_processing.LlamaProcessor", return_value=FakeAIProcessor()
|
||||
) as processor_cls:
|
||||
daemon = aman.Daemon(self._config(), desktop, verbose=True)
|
||||
daemon = aman_runtime.Daemon(self._config(), desktop, verbose=True)
|
||||
|
||||
processor_cls.assert_called_once_with(verbose=True, model_path=None)
|
||||
self.assertIsNotNone(daemon.editor_stage)
|
||||
|
|
@ -424,10 +421,10 @@ class DaemonTests(unittest.TestCase):
|
|||
def test_editor_stage_is_warmed_up_during_daemon_init(self):
|
||||
desktop = FakeDesktop()
|
||||
ai_processor = FakeAIProcessor()
|
||||
with patch("aman._build_whisper_model", return_value=FakeModel()), patch(
|
||||
"aman.LlamaProcessor", return_value=ai_processor
|
||||
with patch("aman_runtime.build_whisper_model", return_value=FakeModel()), patch(
|
||||
"aman_processing.LlamaProcessor", return_value=ai_processor
|
||||
):
|
||||
daemon = aman.Daemon(self._config(), desktop, verbose=False)
|
||||
daemon = aman_runtime.Daemon(self._config(), desktop, verbose=False)
|
||||
|
||||
self.assertIs(daemon.editor_stage._processor, ai_processor)
|
||||
self.assertEqual(ai_processor.warmup_calls, ["default"])
|
||||
|
|
@ -438,11 +435,11 @@ class DaemonTests(unittest.TestCase):
|
|||
cfg.advanced.strict_startup = True
|
||||
ai_processor = FakeAIProcessor()
|
||||
ai_processor.warmup_error = RuntimeError("warmup boom")
|
||||
with patch("aman._build_whisper_model", return_value=FakeModel()), patch(
|
||||
"aman.LlamaProcessor", return_value=ai_processor
|
||||
with patch("aman_runtime.build_whisper_model", return_value=FakeModel()), patch(
|
||||
"aman_processing.LlamaProcessor", return_value=ai_processor
|
||||
):
|
||||
with self.assertRaisesRegex(RuntimeError, "editor stage warmup failed"):
|
||||
aman.Daemon(cfg, desktop, verbose=False)
|
||||
aman_runtime.Daemon(cfg, desktop, verbose=False)
|
||||
|
||||
def test_editor_stage_warmup_failure_is_non_fatal_without_strict_startup(self):
|
||||
desktop = FakeDesktop()
|
||||
|
|
@ -450,19 +447,19 @@ class DaemonTests(unittest.TestCase):
|
|||
cfg.advanced.strict_startup = False
|
||||
ai_processor = FakeAIProcessor()
|
||||
ai_processor.warmup_error = RuntimeError("warmup boom")
|
||||
with patch("aman._build_whisper_model", return_value=FakeModel()), patch(
|
||||
"aman.LlamaProcessor", return_value=ai_processor
|
||||
with patch("aman_runtime.build_whisper_model", return_value=FakeModel()), patch(
|
||||
"aman_processing.LlamaProcessor", return_value=ai_processor
|
||||
):
|
||||
with self.assertLogs(level="WARNING") as logs:
|
||||
daemon = aman.Daemon(cfg, desktop, verbose=False)
|
||||
daemon = aman_runtime.Daemon(cfg, desktop, verbose=False)
|
||||
|
||||
self.assertIs(daemon.editor_stage._processor, ai_processor)
|
||||
self.assertTrue(
|
||||
any("continuing because advanced.strict_startup=false" in line for line in logs.output)
|
||||
)
|
||||
|
||||
@patch("aman.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("aman.start_audio_recording", return_value=(object(), object()))
|
||||
@patch("aman_runtime.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("aman_runtime.start_audio_recording", return_value=(object(), object()))
|
||||
def test_passes_clipboard_remove_option_to_desktop(self, _start_mock, _stop_mock):
|
||||
desktop = FakeDesktop()
|
||||
model = FakeModel(text="hello world")
|
||||
|
|
@ -486,14 +483,12 @@ class DaemonTests(unittest.TestCase):
|
|||
daemon = self._build_daemon(desktop, FakeModel(), verbose=False)
|
||||
|
||||
with self.assertLogs(level="DEBUG") as logs:
|
||||
daemon.set_state(aman.State.RECORDING)
|
||||
daemon.set_state(aman_runtime.State.RECORDING)
|
||||
|
||||
self.assertTrue(
|
||||
any("DEBUG:root:state: idle -> recording" in line for line in logs.output)
|
||||
)
|
||||
self.assertTrue(any("DEBUG:root:state: idle -> recording" in line for line in logs.output))
|
||||
|
||||
@patch("aman.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("aman.start_audio_recording", return_value=(object(), object()))
|
||||
@patch("aman_runtime.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("aman_runtime.start_audio_recording", return_value=(object(), object()))
|
||||
def test_cancel_listener_armed_only_while_recording(self, _start_mock, _stop_mock):
|
||||
desktop = FakeDesktop()
|
||||
daemon = self._build_daemon(desktop, FakeModel(), verbose=False)
|
||||
|
|
@ -514,7 +509,7 @@ class DaemonTests(unittest.TestCase):
|
|||
self.assertEqual(desktop.cancel_listener_stop_calls, 1)
|
||||
self.assertIsNone(desktop.cancel_listener_callback)
|
||||
|
||||
@patch("aman.start_audio_recording")
|
||||
@patch("aman_runtime.start_audio_recording")
|
||||
def test_recording_does_not_start_when_cancel_listener_fails(self, start_mock):
|
||||
stream = FakeStream()
|
||||
start_mock.return_value = (stream, object())
|
||||
|
|
@ -523,13 +518,13 @@ class DaemonTests(unittest.TestCase):
|
|||
|
||||
daemon.toggle()
|
||||
|
||||
self.assertEqual(daemon.get_state(), aman.State.IDLE)
|
||||
self.assertEqual(daemon.get_state(), aman_runtime.State.IDLE)
|
||||
self.assertIsNone(daemon.stream)
|
||||
self.assertIsNone(daemon.record)
|
||||
self.assertEqual(stream.stop_calls, 1)
|
||||
self.assertEqual(stream.close_calls, 1)
|
||||
|
||||
@patch("aman.start_audio_recording", side_effect=RuntimeError("device missing"))
|
||||
@patch("aman_runtime.start_audio_recording", side_effect=RuntimeError("device missing"))
|
||||
def test_record_start_failure_logs_actionable_issue(self, _start_mock):
|
||||
desktop = FakeDesktop()
|
||||
daemon = self._build_daemon(desktop, FakeModel(), verbose=False)
|
||||
|
|
@ -541,8 +536,8 @@ class DaemonTests(unittest.TestCase):
|
|||
self.assertIn("audio.input: record start failed: device missing", rendered)
|
||||
self.assertIn("next_step: run `aman doctor --config", rendered)
|
||||
|
||||
@patch("aman.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("aman.start_audio_recording", return_value=(object(), object()))
|
||||
@patch("aman_runtime.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("aman_runtime.start_audio_recording", return_value=(object(), object()))
|
||||
def test_output_failure_logs_actionable_issue(self, _start_mock, _stop_mock):
|
||||
desktop = FailingInjectDesktop()
|
||||
daemon = self._build_daemon(desktop, FakeModel(), verbose=False)
|
||||
|
|
@ -560,8 +555,8 @@ class DaemonTests(unittest.TestCase):
|
|||
self.assertIn("injection.backend: output failed: xtest unavailable", rendered)
|
||||
self.assertIn("next_step: run `aman doctor --config", rendered)
|
||||
|
||||
@patch("aman.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("aman.start_audio_recording", return_value=(object(), object()))
|
||||
@patch("aman_runtime.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("aman_runtime.start_audio_recording", return_value=(object(), object()))
|
||||
def test_ai_processor_receives_active_profile(self, _start_mock, _stop_mock):
|
||||
desktop = FakeDesktop()
|
||||
cfg = self._config()
|
||||
|
|
@ -585,8 +580,8 @@ class DaemonTests(unittest.TestCase):
|
|||
|
||||
self.assertEqual(ai_processor.last_kwargs.get("profile"), "fast")
|
||||
|
||||
@patch("aman.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("aman.start_audio_recording", return_value=(object(), object()))
|
||||
@patch("aman_runtime.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("aman_runtime.start_audio_recording", return_value=(object(), object()))
|
||||
def test_ai_processor_receives_effective_language(self, _start_mock, _stop_mock):
|
||||
desktop = FakeDesktop()
|
||||
cfg = self._config()
|
||||
|
|
@ -610,7 +605,7 @@ class DaemonTests(unittest.TestCase):
|
|||
|
||||
self.assertEqual(ai_processor.last_kwargs.get("lang"), "es")
|
||||
|
||||
@patch("aman.start_audio_recording")
|
||||
@patch("aman_runtime.start_audio_recording")
|
||||
def test_paused_state_blocks_recording_start(self, start_mock):
|
||||
desktop = FakeDesktop()
|
||||
daemon = self._build_daemon(desktop, FakeModel(), verbose=False)
|
||||
|
|
@ -619,22 +614,9 @@ class DaemonTests(unittest.TestCase):
|
|||
daemon.toggle()
|
||||
|
||||
start_mock.assert_not_called()
|
||||
self.assertEqual(daemon.get_state(), aman.State.IDLE)
|
||||
self.assertEqual(daemon.get_state(), aman_runtime.State.IDLE)
|
||||
self.assertEqual(desktop.cancel_listener_start_calls, 0)
|
||||
|
||||
|
||||
class LockTests(unittest.TestCase):
|
||||
def test_lock_rejects_second_instance(self):
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
with patch.dict(os.environ, {"XDG_RUNTIME_DIR": td}, clear=False):
|
||||
first = aman._lock_single_instance()
|
||||
try:
|
||||
with self.assertRaises(SystemExit) as ctx:
|
||||
aman._lock_single_instance()
|
||||
self.assertIn("already running", str(ctx.exception))
|
||||
finally:
|
||||
first.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
Loading…
Add table
Add a link
Reference in a new issue