Add vocabulary correction pipeline and example config
This commit is contained in:
parent
f9224621fa
commit
c3503fbbde
9 changed files with 865 additions and 23 deletions
|
|
@ -11,7 +11,7 @@ if str(SRC) not in sys.path:
|
|||
sys.path.insert(0, str(SRC))
|
||||
|
||||
import leld
|
||||
from config import Config
|
||||
from config import Config, VocabularyReplacement
|
||||
|
||||
|
||||
class FakeDesktop:
|
||||
|
|
@ -32,8 +32,43 @@ class FakeSegment:
|
|||
|
||||
|
||||
class FakeModel:
|
||||
def __init__(self, text: str = "hello world"):
|
||||
self.text = text
|
||||
self.last_kwargs = {}
|
||||
|
||||
def transcribe(self, _audio, language=None, vad_filter=None):
|
||||
return [FakeSegment("hello world")], {"language": language, "vad_filter": vad_filter}
|
||||
self.last_kwargs = {
|
||||
"language": language,
|
||||
"vad_filter": vad_filter,
|
||||
}
|
||||
return [FakeSegment(self.text)], self.last_kwargs
|
||||
|
||||
|
||||
class FakeHintModel:
|
||||
def __init__(self, text: str = "hello world"):
|
||||
self.text = text
|
||||
self.last_kwargs = {}
|
||||
|
||||
def transcribe(
|
||||
self,
|
||||
_audio,
|
||||
language=None,
|
||||
vad_filter=None,
|
||||
hotwords=None,
|
||||
initial_prompt=None,
|
||||
):
|
||||
self.last_kwargs = {
|
||||
"language": language,
|
||||
"vad_filter": vad_filter,
|
||||
"hotwords": hotwords,
|
||||
"initial_prompt": initial_prompt,
|
||||
}
|
||||
return [FakeSegment(self.text)], self.last_kwargs
|
||||
|
||||
|
||||
class FakeAIProcessor:
|
||||
def process(self, text, lang="en", **_kwargs):
|
||||
return text
|
||||
|
||||
|
||||
class FakeAudio:
|
||||
|
|
@ -48,12 +83,13 @@ class DaemonTests(unittest.TestCase):
|
|||
cfg.logging.log_transcript = False
|
||||
return cfg
|
||||
|
||||
@patch("leld._build_whisper_model", return_value=FakeModel())
|
||||
@patch("leld.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("leld.start_audio_recording", return_value=(object(), object()))
|
||||
def test_toggle_start_stop_injects_text(self, _start_mock, _stop_mock, _model_mock):
|
||||
def test_toggle_start_stop_injects_text(self, _start_mock, _stop_mock):
|
||||
desktop = FakeDesktop()
|
||||
daemon = leld.Daemon(self._config(), desktop, verbose=False)
|
||||
with patch("leld._build_whisper_model", return_value=FakeModel()):
|
||||
daemon = leld.Daemon(self._config(), desktop, verbose=False)
|
||||
daemon.ai_processor = FakeAIProcessor()
|
||||
daemon._start_stop_worker = (
|
||||
lambda stream, record, trigger, process_audio: daemon._stop_and_process(
|
||||
stream, record, trigger, process_audio
|
||||
|
|
@ -68,12 +104,13 @@ class DaemonTests(unittest.TestCase):
|
|||
self.assertEqual(daemon.get_state(), leld.State.IDLE)
|
||||
self.assertEqual(desktop.inject_calls, [("hello world", "clipboard")])
|
||||
|
||||
@patch("leld._build_whisper_model", return_value=FakeModel())
|
||||
@patch("leld.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("leld.start_audio_recording", return_value=(object(), object()))
|
||||
def test_shutdown_stops_recording_without_injection(self, _start_mock, _stop_mock, _model_mock):
|
||||
def test_shutdown_stops_recording_without_injection(self, _start_mock, _stop_mock):
|
||||
desktop = FakeDesktop()
|
||||
daemon = leld.Daemon(self._config(), desktop, verbose=False)
|
||||
with patch("leld._build_whisper_model", return_value=FakeModel()):
|
||||
daemon = leld.Daemon(self._config(), desktop, verbose=False)
|
||||
daemon.ai_processor = FakeAIProcessor()
|
||||
daemon._start_stop_worker = (
|
||||
lambda stream, record, trigger, process_audio: daemon._stop_and_process(
|
||||
stream, record, trigger, process_audio
|
||||
|
|
@ -87,6 +124,60 @@ class DaemonTests(unittest.TestCase):
|
|||
self.assertEqual(daemon.get_state(), leld.State.IDLE)
|
||||
self.assertEqual(desktop.inject_calls, [])
|
||||
|
||||
@patch("leld.stop_audio_recording", return_value=FakeAudio(8))
|
||||
@patch("leld.start_audio_recording", return_value=(object(), object()))
|
||||
def test_dictionary_replacement_applies_after_ai(self, _start_mock, _stop_mock):
|
||||
desktop = FakeDesktop()
|
||||
model = FakeModel(text="good morning martha")
|
||||
cfg = self._config()
|
||||
cfg.vocabulary.replacements = [VocabularyReplacement(source="Martha", target="Marta")]
|
||||
|
||||
with patch("leld._build_whisper_model", return_value=model):
|
||||
daemon = leld.Daemon(cfg, desktop, verbose=False)
|
||||
daemon.ai_processor = FakeAIProcessor()
|
||||
daemon._start_stop_worker = (
|
||||
lambda stream, record, trigger, process_audio: daemon._stop_and_process(
|
||||
stream, record, trigger, process_audio
|
||||
)
|
||||
)
|
||||
|
||||
daemon.toggle()
|
||||
daemon.toggle()
|
||||
|
||||
self.assertEqual(desktop.inject_calls, [("good morning Marta", "clipboard")])
|
||||
|
||||
def test_transcribe_skips_hints_when_model_does_not_support_them(self):
|
||||
desktop = FakeDesktop()
|
||||
model = FakeModel(text="hello")
|
||||
cfg = self._config()
|
||||
cfg.vocabulary.terms = ["Docker", "Systemd"]
|
||||
|
||||
with patch("leld._build_whisper_model", return_value=model):
|
||||
daemon = leld.Daemon(cfg, desktop, verbose=False)
|
||||
|
||||
result = daemon._transcribe(object())
|
||||
|
||||
self.assertEqual(result, "hello")
|
||||
self.assertNotIn("hotwords", model.last_kwargs)
|
||||
self.assertNotIn("initial_prompt", model.last_kwargs)
|
||||
|
||||
def test_transcribe_applies_hints_when_model_supports_them(self):
|
||||
desktop = FakeDesktop()
|
||||
model = FakeHintModel(text="hello")
|
||||
cfg = self._config()
|
||||
cfg.vocabulary.terms = ["Systemd"]
|
||||
cfg.vocabulary.replacements = [VocabularyReplacement(source="docker", target="Docker")]
|
||||
|
||||
with patch("leld._build_whisper_model", return_value=model):
|
||||
daemon = leld.Daemon(cfg, desktop, verbose=False)
|
||||
|
||||
result = daemon._transcribe(object())
|
||||
|
||||
self.assertEqual(result, "hello")
|
||||
self.assertIn("Docker", model.last_kwargs["hotwords"])
|
||||
self.assertIn("Systemd", model.last_kwargs["hotwords"])
|
||||
self.assertIn("Preferred vocabulary", model.last_kwargs["initial_prompt"])
|
||||
|
||||
|
||||
class LockTests(unittest.TestCase):
|
||||
def test_lock_rejects_second_instance(self):
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue