Split aman.py into focused CLI and runtime modules

Break the old god module into flat siblings for CLI parsing, run lifecycle, daemon state, shared processing helpers, benchmark tooling, and maintainer-only model sync so changes stop sharing one giant import graph.

Keep aman as a thin shim over aman_cli, move sync-default-model behind the hidden aman-maint entrypoint plus Make wrappers, and update packaging metadata plus maintainer docs to reflect the new surface.

Retarget the tests to the new seams with dedicated runtime, run, benchmark, maintainer, and entrypoint suites, and verify with python3 -m unittest discover -s tests -p "test_*.py", python3 -m py_compile src/*.py tests/*.py, PYTHONPATH=src python3 -m aman --help, PYTHONPATH=src python3 -m aman version, and PYTHONPATH=src python3 -m aman_maint --help.
This commit is contained in:
Thales Maciel 2026-03-14 14:54:57 -03:00
parent 721248ca26
commit 4d0081d1d0
18 changed files with 2838 additions and 2427 deletions

View file

@ -1,11 +1,9 @@
import io
import json
import subprocess
import sys
import tempfile
import unittest
from pathlib import Path
from types import SimpleNamespace
from unittest.mock import patch
ROOT = Path(__file__).resolve().parents[1]
@ -13,114 +11,16 @@ SRC = ROOT / "src"
if str(SRC) not in sys.path:
sys.path.insert(0, str(SRC))
import aman
from config import Config
from config_ui import ConfigUiResult
import aman_cli
from diagnostics import DiagnosticCheck, DiagnosticReport
class _FakeDesktop:
def __init__(self):
self.hotkey = None
self.hotkey_callback = None
def start_hotkey_listener(self, hotkey, callback):
self.hotkey = hotkey
self.hotkey_callback = callback
def stop_hotkey_listener(self):
return
def start_cancel_listener(self, callback):
_ = callback
return
def stop_cancel_listener(self):
return
def validate_hotkey(self, hotkey):
_ = hotkey
return
def inject_text(self, text, backend, *, remove_transcription_from_clipboard=False):
_ = (text, backend, remove_transcription_from_clipboard)
return
def run_tray(self, _state_getter, on_quit, **_kwargs):
on_quit()
def request_quit(self):
return
class _HotkeyFailDesktop(_FakeDesktop):
def start_hotkey_listener(self, hotkey, callback):
_ = (hotkey, callback)
raise RuntimeError("already in use")
class _FakeDaemon:
def __init__(self, cfg, _desktop, *, verbose=False, config_path=None):
self.cfg = cfg
self.verbose = verbose
self.config_path = config_path
self._paused = False
def get_state(self):
return "idle"
def is_paused(self):
return self._paused
def toggle_paused(self):
self._paused = not self._paused
return self._paused
def apply_config(self, cfg):
self.cfg = cfg
def toggle(self):
return
def shutdown(self, timeout=1.0):
_ = timeout
return True
class _RetrySetupDesktop(_FakeDesktop):
def __init__(self):
super().__init__()
self.settings_invocations = 0
def run_tray(self, _state_getter, on_quit, **kwargs):
settings_cb = kwargs.get("on_open_settings")
if settings_cb is not None and self.settings_invocations == 0:
self.settings_invocations += 1
settings_cb()
return
on_quit()
class _FakeBenchEditorStage:
def warmup(self):
return
def rewrite(self, transcript, *, language, dictionary_context):
_ = dictionary_context
return SimpleNamespace(
final_text=f"[{language}] {transcript.strip()}",
latency_ms=1.0,
pass1_ms=0.5,
pass2_ms=0.5,
)
class AmanCliTests(unittest.TestCase):
def test_parse_cli_args_help_flag_uses_top_level_parser(self):
out = io.StringIO()
with patch("sys.stdout", out), self.assertRaises(SystemExit) as exc:
aman._parse_cli_args(["--help"])
aman_cli.parse_cli_args(["--help"])
self.assertEqual(exc.exception.code, 0)
rendered = out.getvalue()
@ -133,31 +33,31 @@ class AmanCliTests(unittest.TestCase):
out = io.StringIO()
with patch("sys.stdout", out), self.assertRaises(SystemExit) as exc:
aman._parse_cli_args(["-h"])
aman_cli.parse_cli_args(["-h"])
self.assertEqual(exc.exception.code, 0)
self.assertIn("self-check", out.getvalue())
def test_parse_cli_args_defaults_to_run_command(self):
args = aman._parse_cli_args(["--dry-run"])
args = aman_cli.parse_cli_args(["--dry-run"])
self.assertEqual(args.command, "run")
self.assertTrue(args.dry_run)
def test_parse_cli_args_doctor_command(self):
args = aman._parse_cli_args(["doctor", "--json"])
args = aman_cli.parse_cli_args(["doctor", "--json"])
self.assertEqual(args.command, "doctor")
self.assertTrue(args.json)
def test_parse_cli_args_self_check_command(self):
args = aman._parse_cli_args(["self-check", "--json"])
args = aman_cli.parse_cli_args(["self-check", "--json"])
self.assertEqual(args.command, "self-check")
self.assertTrue(args.json)
def test_parse_cli_args_bench_command(self):
args = aman._parse_cli_args(
args = aman_cli.parse_cli_args(
["bench", "--text", "hello", "--repeat", "2", "--warmup", "0", "--json"]
)
@ -169,11 +69,17 @@ class AmanCliTests(unittest.TestCase):
def test_parse_cli_args_bench_requires_input(self):
with self.assertRaises(SystemExit):
aman._parse_cli_args(["bench"])
aman_cli.parse_cli_args(["bench"])
def test_parse_cli_args_eval_models_command(self):
args = aman._parse_cli_args(
["eval-models", "--dataset", "benchmarks/cleanup_dataset.jsonl", "--matrix", "benchmarks/model_matrix.small_first.json"]
args = aman_cli.parse_cli_args(
[
"eval-models",
"--dataset",
"benchmarks/cleanup_dataset.jsonl",
"--matrix",
"benchmarks/model_matrix.small_first.json",
]
)
self.assertEqual(args.command, "eval-models")
self.assertEqual(args.dataset, "benchmarks/cleanup_dataset.jsonl")
@ -183,7 +89,7 @@ class AmanCliTests(unittest.TestCase):
self.assertEqual(args.report_version, 2)
def test_parse_cli_args_eval_models_with_heuristic_options(self):
args = aman._parse_cli_args(
args = aman_cli.parse_cli_args(
[
"eval-models",
"--dataset",
@ -203,7 +109,7 @@ class AmanCliTests(unittest.TestCase):
self.assertEqual(args.report_version, 2)
def test_parse_cli_args_build_heuristic_dataset_command(self):
args = aman._parse_cli_args(
args = aman_cli.parse_cli_args(
[
"build-heuristic-dataset",
"--input",
@ -216,79 +122,40 @@ class AmanCliTests(unittest.TestCase):
self.assertEqual(args.input, "benchmarks/heuristics_dataset.raw.jsonl")
self.assertEqual(args.output, "benchmarks/heuristics_dataset.jsonl")
def test_parse_cli_args_sync_default_model_command(self):
args = aman._parse_cli_args(
[
"sync-default-model",
"--report",
"benchmarks/results/latest.json",
"--artifacts",
"benchmarks/model_artifacts.json",
"--constants",
"src/constants.py",
"--check",
]
)
self.assertEqual(args.command, "sync-default-model")
self.assertEqual(args.report, "benchmarks/results/latest.json")
self.assertEqual(args.artifacts, "benchmarks/model_artifacts.json")
self.assertEqual(args.constants, "src/constants.py")
self.assertTrue(args.check)
def test_parse_cli_args_legacy_maint_command_errors_with_migration_hint(self):
err = io.StringIO()
with patch("sys.stderr", err), self.assertRaises(SystemExit) as exc:
aman_cli.parse_cli_args(["sync-default-model"])
self.assertEqual(exc.exception.code, 2)
self.assertIn("aman-maint sync-default-model", err.getvalue())
self.assertIn("make sync-default-model", err.getvalue())
def test_version_command_prints_version(self):
out = io.StringIO()
args = aman._parse_cli_args(["version"])
with patch("aman._app_version", return_value="1.2.3"), patch("sys.stdout", out):
exit_code = aman._version_command(args)
args = aman_cli.parse_cli_args(["version"])
with patch("aman_cli.app_version", return_value="1.2.3"), patch("sys.stdout", out):
exit_code = aman_cli.version_command(args)
self.assertEqual(exit_code, 0)
self.assertEqual(out.getvalue().strip(), "1.2.3")
def test_version_command_does_not_import_config_ui(self):
script = f"""
import builtins
import sys
from pathlib import Path
sys.path.insert(0, {str(SRC)!r})
real_import = builtins.__import__
def blocked(name, globals=None, locals=None, fromlist=(), level=0):
if name == "config_ui":
raise ModuleNotFoundError("blocked config_ui")
return real_import(name, globals, locals, fromlist, level)
builtins.__import__ = blocked
import aman
args = aman._parse_cli_args(["version"])
raise SystemExit(aman._version_command(args))
"""
result = subprocess.run(
[sys.executable, "-c", script],
cwd=ROOT,
text=True,
capture_output=True,
check=False,
)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertRegex(result.stdout.strip(), r"\S+")
def test_app_version_prefers_local_pyproject_version(self):
pyproject_text = '[project]\nversion = "9.9.9"\n'
with patch.object(aman.Path, "exists", return_value=True), patch.object(
aman.Path, "read_text", return_value=pyproject_text
), patch("aman.importlib.metadata.version", return_value="1.0.0"):
self.assertEqual(aman._app_version(), "9.9.9")
with patch.object(aman_cli.Path, "exists", return_value=True), patch.object(
aman_cli.Path, "read_text", return_value=pyproject_text
), patch("aman_cli.importlib.metadata.version", return_value="1.0.0"):
self.assertEqual(aman_cli.app_version(), "9.9.9")
def test_doctor_command_json_output_and_exit_code(self):
report = DiagnosticReport(
checks=[DiagnosticCheck(id="config.load", status="ok", message="ok", next_step="")]
)
args = aman._parse_cli_args(["doctor", "--json"])
args = aman_cli.parse_cli_args(["doctor", "--json"])
out = io.StringIO()
with patch("aman.run_doctor", return_value=report), patch("sys.stdout", out):
exit_code = aman._doctor_command(args)
with patch("aman_cli.run_doctor", return_value=report), patch("sys.stdout", out):
exit_code = aman_cli.doctor_command(args)
self.assertEqual(exit_code, 0)
payload = json.loads(out.getvalue())
@ -300,10 +167,10 @@ raise SystemExit(aman._version_command(args))
report = DiagnosticReport(
checks=[DiagnosticCheck(id="config.load", status="fail", message="broken", next_step="fix")]
)
args = aman._parse_cli_args(["doctor"])
args = aman_cli.parse_cli_args(["doctor"])
out = io.StringIO()
with patch("aman.run_doctor", return_value=report), patch("sys.stdout", out):
exit_code = aman._doctor_command(args)
with patch("aman_cli.run_doctor", return_value=report), patch("sys.stdout", out):
exit_code = aman_cli.doctor_command(args)
self.assertEqual(exit_code, 2)
self.assertIn("[FAIL] config.load", out.getvalue())
@ -313,10 +180,10 @@ raise SystemExit(aman._version_command(args))
report = DiagnosticReport(
checks=[DiagnosticCheck(id="model.cache", status="warn", message="missing", next_step="run aman once")]
)
args = aman._parse_cli_args(["doctor"])
args = aman_cli.parse_cli_args(["doctor"])
out = io.StringIO()
with patch("aman.run_doctor", return_value=report), patch("sys.stdout", out):
exit_code = aman._doctor_command(args)
with patch("aman_cli.run_doctor", return_value=report), patch("sys.stdout", out):
exit_code = aman_cli.doctor_command(args)
self.assertEqual(exit_code, 0)
self.assertIn("[WARN] model.cache", out.getvalue())
@ -326,275 +193,22 @@ raise SystemExit(aman._version_command(args))
report = DiagnosticReport(
checks=[DiagnosticCheck(id="startup.readiness", status="ok", message="ready", next_step="")]
)
args = aman._parse_cli_args(["self-check", "--json"])
args = aman_cli.parse_cli_args(["self-check", "--json"])
out = io.StringIO()
with patch("aman.run_self_check", return_value=report) as runner, patch("sys.stdout", out):
exit_code = aman._self_check_command(args)
with patch("aman_cli.run_self_check", return_value=report) as runner, patch("sys.stdout", out):
exit_code = aman_cli.self_check_command(args)
self.assertEqual(exit_code, 0)
runner.assert_called_once_with("")
payload = json.loads(out.getvalue())
self.assertEqual(payload["status"], "ok")
def test_bench_command_json_output(self):
args = aman._parse_cli_args(["bench", "--text", "hello", "--repeat", "2", "--warmup", "0", "--json"])
out = io.StringIO()
with patch("aman.load", return_value=Config()), patch(
"aman._build_editor_stage", return_value=_FakeBenchEditorStage()
), patch("sys.stdout", out):
exit_code = aman._bench_command(args)
self.assertEqual(exit_code, 0)
payload = json.loads(out.getvalue())
self.assertEqual(payload["measured_runs"], 2)
self.assertEqual(payload["summary"]["runs"], 2)
self.assertEqual(len(payload["runs"]), 2)
self.assertEqual(payload["editor_backend"], "local_llama_builtin")
self.assertIn("avg_alignment_ms", payload["summary"])
self.assertIn("avg_fact_guard_ms", payload["summary"])
self.assertIn("alignment_applied", payload["runs"][0])
self.assertIn("fact_guard_action", payload["runs"][0])
def test_bench_command_supports_text_file_input(self):
with tempfile.TemporaryDirectory() as td:
text_file = Path(td) / "input.txt"
text_file.write_text("hello from file", encoding="utf-8")
args = aman._parse_cli_args(
["bench", "--text-file", str(text_file), "--repeat", "1", "--warmup", "0", "--print-output"]
)
out = io.StringIO()
with patch("aman.load", return_value=Config()), patch(
"aman._build_editor_stage", return_value=_FakeBenchEditorStage()
), patch("sys.stdout", out):
exit_code = aman._bench_command(args)
self.assertEqual(exit_code, 0)
self.assertIn("[auto] hello from file", out.getvalue())
def test_bench_command_rejects_empty_input(self):
args = aman._parse_cli_args(["bench", "--text", " "])
with patch("aman.load", return_value=Config()), patch(
"aman._build_editor_stage", return_value=_FakeBenchEditorStage()
):
exit_code = aman._bench_command(args)
self.assertEqual(exit_code, 1)
def test_bench_command_rejects_non_positive_repeat(self):
args = aman._parse_cli_args(["bench", "--text", "hello", "--repeat", "0"])
with patch("aman.load", return_value=Config()), patch(
"aman._build_editor_stage", return_value=_FakeBenchEditorStage()
):
exit_code = aman._bench_command(args)
self.assertEqual(exit_code, 1)
def test_eval_models_command_writes_report(self):
with tempfile.TemporaryDirectory() as td:
output_path = Path(td) / "report.json"
args = aman._parse_cli_args(
[
"eval-models",
"--dataset",
"benchmarks/cleanup_dataset.jsonl",
"--matrix",
"benchmarks/model_matrix.small_first.json",
"--output",
str(output_path),
"--json",
]
)
out = io.StringIO()
fake_report = {
"models": [{"name": "base", "best_param_set": {"latency_ms": {"p50": 1000.0}, "quality": {"hybrid_score_avg": 0.8, "parse_valid_rate": 1.0}}}],
"winner_recommendation": {"name": "base", "reason": "test"},
}
with patch("aman.run_model_eval", return_value=fake_report), patch("sys.stdout", out):
exit_code = aman._eval_models_command(args)
self.assertEqual(exit_code, 0)
self.assertTrue(output_path.exists())
payload = json.loads(output_path.read_text(encoding="utf-8"))
self.assertEqual(payload["winner_recommendation"]["name"], "base")
def test_eval_models_command_forwards_heuristic_arguments(self):
args = aman._parse_cli_args(
[
"eval-models",
"--dataset",
"benchmarks/cleanup_dataset.jsonl",
"--matrix",
"benchmarks/model_matrix.small_first.json",
"--heuristic-dataset",
"benchmarks/heuristics_dataset.jsonl",
"--heuristic-weight",
"0.35",
"--report-version",
"2",
"--json",
]
)
out = io.StringIO()
fake_report = {
"models": [{"name": "base", "best_param_set": {}}],
"winner_recommendation": {"name": "base", "reason": "ok"},
}
with patch("aman.run_model_eval", return_value=fake_report) as run_eval_mock, patch(
"sys.stdout", out
):
exit_code = aman._eval_models_command(args)
self.assertEqual(exit_code, 0)
run_eval_mock.assert_called_once_with(
"benchmarks/cleanup_dataset.jsonl",
"benchmarks/model_matrix.small_first.json",
heuristic_dataset_path="benchmarks/heuristics_dataset.jsonl",
heuristic_weight=0.35,
report_version=2,
verbose=False,
)
def test_build_heuristic_dataset_command_json_output(self):
args = aman._parse_cli_args(
[
"build-heuristic-dataset",
"--input",
"benchmarks/heuristics_dataset.raw.jsonl",
"--output",
"benchmarks/heuristics_dataset.jsonl",
"--json",
]
)
out = io.StringIO()
summary = {
"raw_rows": 4,
"written_rows": 4,
"generated_word_rows": 2,
"output_path": "benchmarks/heuristics_dataset.jsonl",
}
with patch("aman.build_heuristic_dataset", return_value=summary), patch("sys.stdout", out):
exit_code = aman._build_heuristic_dataset_command(args)
self.assertEqual(exit_code, 0)
payload = json.loads(out.getvalue())
self.assertEqual(payload["written_rows"], 4)
def test_sync_default_model_command_updates_constants(self):
with tempfile.TemporaryDirectory() as td:
report_path = Path(td) / "latest.json"
artifacts_path = Path(td) / "artifacts.json"
constants_path = Path(td) / "constants.py"
report_path.write_text(
json.dumps(
{
"winner_recommendation": {
"name": "test-model",
}
}
),
encoding="utf-8",
)
artifacts_path.write_text(
json.dumps(
{
"models": [
{
"name": "test-model",
"filename": "winner.gguf",
"url": "https://example.invalid/winner.gguf",
"sha256": "a" * 64,
}
]
}
),
encoding="utf-8",
)
constants_path.write_text(
(
'MODEL_NAME = "old.gguf"\n'
'MODEL_URL = "https://example.invalid/old.gguf"\n'
'MODEL_SHA256 = "' + ("b" * 64) + '"\n'
),
encoding="utf-8",
)
args = aman._parse_cli_args(
[
"sync-default-model",
"--report",
str(report_path),
"--artifacts",
str(artifacts_path),
"--constants",
str(constants_path),
]
)
exit_code = aman._sync_default_model_command(args)
self.assertEqual(exit_code, 0)
updated = constants_path.read_text(encoding="utf-8")
self.assertIn('MODEL_NAME = "winner.gguf"', updated)
self.assertIn('MODEL_URL = "https://example.invalid/winner.gguf"', updated)
self.assertIn('MODEL_SHA256 = "' + ("a" * 64) + '"', updated)
def test_sync_default_model_command_check_mode_returns_2_on_drift(self):
with tempfile.TemporaryDirectory() as td:
report_path = Path(td) / "latest.json"
artifacts_path = Path(td) / "artifacts.json"
constants_path = Path(td) / "constants.py"
report_path.write_text(
json.dumps(
{
"winner_recommendation": {
"name": "test-model",
}
}
),
encoding="utf-8",
)
artifacts_path.write_text(
json.dumps(
{
"models": [
{
"name": "test-model",
"filename": "winner.gguf",
"url": "https://example.invalid/winner.gguf",
"sha256": "a" * 64,
}
]
}
),
encoding="utf-8",
)
constants_path.write_text(
(
'MODEL_NAME = "old.gguf"\n'
'MODEL_URL = "https://example.invalid/old.gguf"\n'
'MODEL_SHA256 = "' + ("b" * 64) + '"\n'
),
encoding="utf-8",
)
args = aman._parse_cli_args(
[
"sync-default-model",
"--report",
str(report_path),
"--artifacts",
str(artifacts_path),
"--constants",
str(constants_path),
"--check",
]
)
exit_code = aman._sync_default_model_command(args)
self.assertEqual(exit_code, 2)
updated = constants_path.read_text(encoding="utf-8")
self.assertIn('MODEL_NAME = "old.gguf"', updated)
def test_init_command_creates_default_config(self):
with tempfile.TemporaryDirectory() as td:
path = Path(td) / "config.json"
args = aman._parse_cli_args(["init", "--config", str(path)])
args = aman_cli.parse_cli_args(["init", "--config", str(path)])
exit_code = aman._init_command(args)
exit_code = aman_cli.init_command(args)
self.assertEqual(exit_code, 0)
self.assertTrue(path.exists())
payload = json.loads(path.read_text(encoding="utf-8"))
@ -604,9 +218,9 @@ raise SystemExit(aman._version_command(args))
with tempfile.TemporaryDirectory() as td:
path = Path(td) / "config.json"
path.write_text('{"daemon":{"hotkey":"Super+m"}}\n', encoding="utf-8")
args = aman._parse_cli_args(["init", "--config", str(path)])
args = aman_cli.parse_cli_args(["init", "--config", str(path)])
exit_code = aman._init_command(args)
exit_code = aman_cli.init_command(args)
self.assertEqual(exit_code, 1)
self.assertIn("Super+m", path.read_text(encoding="utf-8"))
@ -614,109 +228,13 @@ raise SystemExit(aman._version_command(args))
with tempfile.TemporaryDirectory() as td:
path = Path(td) / "config.json"
path.write_text('{"daemon":{"hotkey":"Super+m"}}\n', encoding="utf-8")
args = aman._parse_cli_args(["init", "--config", str(path), "--force"])
args = aman_cli.parse_cli_args(["init", "--config", str(path), "--force"])
exit_code = aman._init_command(args)
exit_code = aman_cli.init_command(args)
self.assertEqual(exit_code, 0)
payload = json.loads(path.read_text(encoding="utf-8"))
self.assertEqual(payload["daemon"]["hotkey"], "Cmd+m")
def test_run_command_missing_config_uses_settings_ui_and_writes_file(self):
with tempfile.TemporaryDirectory() as td:
path = Path(td) / "config.json"
args = aman._parse_cli_args(["run", "--config", str(path)])
desktop = _FakeDesktop()
onboard_cfg = Config()
onboard_cfg.daemon.hotkey = "Super+m"
with patch("aman._lock_single_instance", return_value=object()), patch(
"aman.get_desktop_adapter", return_value=desktop
), patch(
"aman._run_config_ui",
return_value=ConfigUiResult(saved=True, config=onboard_cfg, closed_reason="saved"),
) as config_ui_mock, patch("aman.Daemon", _FakeDaemon):
exit_code = aman._run_command(args)
self.assertEqual(exit_code, 0)
self.assertTrue(path.exists())
self.assertEqual(desktop.hotkey, "Super+m")
config_ui_mock.assert_called_once()
def test_run_command_missing_config_cancel_returns_without_starting_daemon(self):
with tempfile.TemporaryDirectory() as td:
path = Path(td) / "config.json"
args = aman._parse_cli_args(["run", "--config", str(path)])
desktop = _FakeDesktop()
with patch("aman._lock_single_instance", return_value=object()), patch(
"aman.get_desktop_adapter", return_value=desktop
), patch(
"aman._run_config_ui",
return_value=ConfigUiResult(saved=False, config=None, closed_reason="cancelled"),
), patch("aman.Daemon") as daemon_cls:
exit_code = aman._run_command(args)
self.assertEqual(exit_code, 0)
self.assertFalse(path.exists())
daemon_cls.assert_not_called()
def test_run_command_missing_config_cancel_then_retry_settings(self):
with tempfile.TemporaryDirectory() as td:
path = Path(td) / "config.json"
args = aman._parse_cli_args(["run", "--config", str(path)])
desktop = _RetrySetupDesktop()
onboard_cfg = Config()
config_ui_results = [
ConfigUiResult(saved=False, config=None, closed_reason="cancelled"),
ConfigUiResult(saved=True, config=onboard_cfg, closed_reason="saved"),
]
with patch("aman._lock_single_instance", return_value=object()), patch(
"aman.get_desktop_adapter", return_value=desktop
), patch(
"aman._run_config_ui",
side_effect=config_ui_results,
), patch("aman.Daemon", _FakeDaemon):
exit_code = aman._run_command(args)
self.assertEqual(exit_code, 0)
self.assertTrue(path.exists())
self.assertEqual(desktop.settings_invocations, 1)
def test_run_command_hotkey_failure_logs_actionable_issue(self):
with tempfile.TemporaryDirectory() as td:
path = Path(td) / "config.json"
path.write_text(json.dumps({"config_version": 1}) + "\n", encoding="utf-8")
args = aman._parse_cli_args(["run", "--config", str(path)])
desktop = _HotkeyFailDesktop()
with patch("aman._lock_single_instance", return_value=object()), patch(
"aman.get_desktop_adapter", return_value=desktop
), patch("aman.load", return_value=Config()), patch("aman.Daemon", _FakeDaemon), self.assertLogs(
level="ERROR"
) as logs:
exit_code = aman._run_command(args)
self.assertEqual(exit_code, 1)
rendered = "\n".join(logs.output)
self.assertIn("hotkey.parse: hotkey setup failed: already in use", rendered)
self.assertIn("next_step: run `aman doctor --config", rendered)
def test_run_command_daemon_init_failure_logs_self_check_next_step(self):
with tempfile.TemporaryDirectory() as td:
path = Path(td) / "config.json"
path.write_text(json.dumps({"config_version": 1}) + "\n", encoding="utf-8")
args = aman._parse_cli_args(["run", "--config", str(path)])
desktop = _FakeDesktop()
with patch("aman._lock_single_instance", return_value=object()), patch(
"aman.get_desktop_adapter", return_value=desktop
), patch("aman.load", return_value=Config()), patch(
"aman.Daemon", side_effect=RuntimeError("warmup boom")
), self.assertLogs(level="ERROR") as logs:
exit_code = aman._run_command(args)
self.assertEqual(exit_code, 1)
rendered = "\n".join(logs.output)
self.assertIn("startup.readiness: startup failed: warmup boom", rendered)
self.assertIn("next_step: run `aman self-check --config", rendered)
if __name__ == "__main__":
unittest.main()