Harden runtime diagnostics for milestone 3
Make the milestone 3 runtime story predictable instead of treating doctor, self-check, and startup failures as loosely related surfaces. Split doctor and self-check into distinct read-only flows, add tri-state diagnostic status with stable IDs and next steps, and reuse that wording in CLI output, service logs, and tray-triggered diagnostics. Add non-mutating config/model probes, a make runtime-check gate, and public recovery/validation docs for the X11 GA roadmap. Validation: make runtime-check; PYTHONPATH=src python3 -m unittest discover -s tests -p 'test_*.py'; python3 -m py_compile src/*.py tests/*.py; PYTHONPATH=src python3 -m aman doctor --help; PYTHONPATH=src python3 -m aman self-check --help. Leave milestone 3 open in the roadmap until the manual X11 validation rows are filled.
This commit is contained in:
parent
a3368056ff
commit
ed1b59240b
16 changed files with 1298 additions and 248 deletions
|
|
@ -47,6 +47,18 @@ class FakeDesktop:
|
|||
self.quit_calls += 1
|
||||
|
||||
|
||||
class FailingInjectDesktop(FakeDesktop):
|
||||
def inject_text(
|
||||
self,
|
||||
text: str,
|
||||
backend: str,
|
||||
*,
|
||||
remove_transcription_from_clipboard: bool = False,
|
||||
) -> None:
|
||||
_ = (text, backend, remove_transcription_from_clipboard)
|
||||
raise RuntimeError("xtest unavailable")
|
||||
|
||||
|
||||
class FakeSegment:
|
||||
def __init__(self, text: str):
|
||||
self.text = text
|
||||
|
|
@ -517,6 +529,37 @@ class DaemonTests(unittest.TestCase):
|
|||
self.assertEqual(stream.stop_calls, 1)
|
||||
self.assertEqual(stream.close_calls, 1)
|
||||
|
||||
@patch("aman.start_audio_recording", side_effect=RuntimeError("device missing"))
|
||||
def test_record_start_failure_logs_actionable_issue(self, _start_mock):
|
||||
desktop = FakeDesktop()
|
||||
daemon = self._build_daemon(desktop, FakeModel(), verbose=False)
|
||||
|
||||
with self.assertLogs(level="ERROR") as logs:
|
||||
daemon.toggle()
|
||||
|
||||
rendered = "\n".join(logs.output)
|
||||
self.assertIn("audio.input: record start failed: device missing", rendered)
|
||||
self.assertIn("next_step: run `aman doctor --config", rendered)
|
||||
|
||||
@patch("aman.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("aman.start_audio_recording", return_value=(object(), object()))
|
||||
def test_output_failure_logs_actionable_issue(self, _start_mock, _stop_mock):
|
||||
desktop = FailingInjectDesktop()
|
||||
daemon = self._build_daemon(desktop, FakeModel(), verbose=False)
|
||||
daemon._start_stop_worker = (
|
||||
lambda stream, record, trigger, process_audio: daemon._stop_and_process(
|
||||
stream, record, trigger, process_audio
|
||||
)
|
||||
)
|
||||
|
||||
with self.assertLogs(level="ERROR") as logs:
|
||||
daemon.toggle()
|
||||
daemon.toggle()
|
||||
|
||||
rendered = "\n".join(logs.output)
|
||||
self.assertIn("injection.backend: output failed: xtest unavailable", rendered)
|
||||
self.assertIn("next_step: run `aman doctor --config", rendered)
|
||||
|
||||
@patch("aman.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("aman.start_audio_recording", return_value=(object(), object()))
|
||||
def test_ai_processor_receives_active_profile(self, _start_mock, _stop_mock):
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue