Rename project from lel to aman

This commit is contained in:
Thales Maciel 2026-02-25 11:11:10 -03:00
parent 2c570c7a87
commit 09090102a2
12 changed files with 77 additions and 77 deletions

View file

@ -2,9 +2,9 @@
## Project Structure & Module Organization
- `src/leld.py` is the primary entrypoint (X11 STT daemon).
- `src/aman.py` is the primary entrypoint (X11 STT daemon).
- `src/recorder.py` handles audio capture using PortAudio via `sounddevice`.
- `src/leld.py` owns Whisper setup and transcription.
- `src/aman.py` owns Whisper setup and transcription.
- `src/aiprocess.py` runs the in-process Llama-3.2-3B cleanup.
- `src/desktop_x11.py` encapsulates X11 hotkeys, tray, and injection.
- `src/desktop_wayland.py` scaffolds Wayland support (exits with a message).
@ -13,7 +13,7 @@
- Install deps (X11): `uv sync --extra x11`.
- Install deps (Wayland scaffold): `uv sync --extra wayland`.
- Run daemon: `uv run python3 src/leld.py --config ~/.config/lel/config.json`.
- Run daemon: `uv run python3 src/aman.py --config ~/.config/aman/config.json`.
System packages (example names):
@ -39,6 +39,6 @@ System packages (example names):
- Audio input is configured via the `recording.input` field in `config.json`.
- STT model and device are configured via the `stt` section in `config.json`.
- LLM model settings are locked; model downloads to `~/.cache/lel/models/`.
- LLM model settings are locked; model downloads to `~/.cache/aman/models/`.
- `-v/--verbose` enables verbose logs (including llama.cpp) with `llama::` prefix.
- Press `Esc` while recording to cancel without processing.

View file

@ -1,9 +1,9 @@
CONFIG := $(HOME)/.config/lel/config.json
CONFIG := $(HOME)/.config/aman/config.json
.PHONY: run install sync test check
run:
uv run python3 src/leld.py --config $(CONFIG)
uv run python3 src/aman.py --config $(CONFIG)
sync:
uv sync
@ -16,9 +16,9 @@ check:
$(MAKE) test
install:
mkdir -p $(HOME)/.local/share/lel/src/assets
cp src/*.py $(HOME)/.local/share/lel/src/
cp src/assets/*.png $(HOME)/.local/share/lel/src/assets/
cp systemd/lel.service $(HOME)/.config/systemd/user/lel.service
mkdir -p $(HOME)/.local/share/aman/src/assets
cp src/*.py $(HOME)/.local/share/aman/src/
cp src/assets/*.png $(HOME)/.local/share/aman/src/assets/
cp systemd/aman.service $(HOME)/.config/systemd/user/aman.service
systemctl --user daemon-reload
systemctl --user enable --now lel
systemctl --user enable --now aman

View file

@ -1,4 +1,4 @@
# lel
# aman
Python X11 STT daemon that records audio, runs Whisper, applies local AI cleanup, and injects text.
@ -78,12 +78,12 @@ uv sync --extra wayland
Run:
```bash
uv run python3 src/leld.py --config ~/.config/lel/config.json
uv run python3 src/aman.py --config ~/.config/aman/config.json
```
## Config
Create `~/.config/lel/config.json`:
Create `~/.config/aman/config.json`:
```json
{
@ -111,7 +111,7 @@ Recording input can be a device index (preferred) or a substring of the device
name.
AI cleanup is always enabled and uses the locked local Llama-3.2-3B GGUF model
downloaded to `~/.cache/lel/models/` on first use.
downloaded to `~/.cache/aman/models/` on first use.
Use `-v/--verbose` to enable DEBUG logs, including recognized/processed
transcript text and llama.cpp logs (`llama::` prefix). Without `-v`, logs are
@ -139,12 +139,12 @@ STT hinting:
## systemd user service
```bash
mkdir -p ~/.local/share/lel/src/assets
cp src/*.py ~/.local/share/lel/src/
cp src/assets/*.png ~/.local/share/lel/src/assets/
cp systemd/lel.service ~/.config/systemd/user/lel.service
mkdir -p ~/.local/share/aman/src/assets
cp src/*.py ~/.local/share/aman/src/
cp src/assets/*.png ~/.local/share/aman/src/assets/
cp systemd/aman.service ~/.config/systemd/user/aman.service
systemctl --user daemon-reload
systemctl --user enable --now lel
systemctl --user enable --now aman
```
## Usage

View file

@ -1,5 +1,5 @@
[project]
name = "lel"
name = "aman"
version = "0.0.0"
description = "X11 STT daemon with faster-whisper and optional AI cleanup"
readme = "README.md"

View file

@ -319,9 +319,9 @@ def _read_lock_pid(lock_file) -> str:
def _lock_single_instance():
runtime_dir = Path(os.getenv("XDG_RUNTIME_DIR", "/tmp")) / "lel"
runtime_dir = Path(os.getenv("XDG_RUNTIME_DIR", "/tmp")) / "aman"
runtime_dir.mkdir(parents=True, exist_ok=True)
lock_path = runtime_dir / "lel.lock"
lock_path = runtime_dir / "aman.lock"
lock_file = open(lock_path, "a+", encoding="utf-8")
try:
import fcntl
@ -360,7 +360,7 @@ def main():
logging.basicConfig(
stream=sys.stderr,
level=logging.DEBUG if args.verbose else logging.INFO,
format="lel: %(asctime)s %(levelname)s %(message)s",
format="aman: %(asctime)s %(levelname)s %(message)s",
)
cfg = load(args.config)
_LOCK_HANDLE = _lock_single_instance()
@ -368,7 +368,7 @@ def main():
logging.info("hotkey: %s", cfg.daemon.hotkey)
logging.info(
"config (%s):\n%s",
args.config or str(Path.home() / ".config" / "lel" / "config.json"),
args.config or str(Path.home() / ".config" / "aman" / "config.json"),
json.dumps(redacted_dict(cfg), indent=2),
)

View file

@ -1,7 +1,7 @@
from pathlib import Path
DEFAULT_CONFIG_PATH = Path.home() / ".config" / "lel" / "config.json"
DEFAULT_CONFIG_PATH = Path.home() / ".config" / "aman" / "config.json"
RECORD_TIMEOUT_SEC = 300
STT_LANGUAGE = "en"
TRAY_UPDATE_MS = 250
@ -12,5 +12,5 @@ MODEL_URL = (
"https://huggingface.co/bartowski/Llama-3.2-3B-Instruct-GGUF/resolve/main/"
"Llama-3.2-3B-Instruct-Q4_K_M.gguf"
)
MODEL_DIR = Path.home() / ".cache" / "lel" / "models"
MODEL_DIR = Path.home() / ".cache" / "aman" / "models"
MODEL_PATH = MODEL_DIR / MODEL_NAME

View file

@ -31,7 +31,7 @@ def get_desktop_adapter() -> DesktopAdapter:
session_type = os.getenv("XDG_SESSION_TYPE", "").lower()
if session_type == "wayland" or os.getenv("WAYLAND_DISPLAY"):
raise SystemExit(
"Wayland is not supported yet. Run under X11 (XDG_SESSION_TYPE=x11) to use lel."
"Wayland is not supported yet. Run under X11 (XDG_SESSION_TYPE=x11) to use aman."
)
from desktop_x11 import X11Adapter

View file

@ -44,7 +44,7 @@ class X11Adapter:
self.menu = None
if AppIndicator3 is not None:
self.indicator = AppIndicator3.Indicator.new(
"lel",
"aman",
self._icon_path("idle"),
AppIndicator3.IndicatorCategory.APPLICATION_STATUS,
)

13
systemd/aman.service Normal file
View file

@ -0,0 +1,13 @@
[Unit]
Description=aman X11 STT daemon
After=default.target
[Service]
Type=simple
WorkingDirectory=%h/.local/share/aman
ExecStart=/usr/bin/uv run python3 %h/.local/share/aman/src/aman.py --config %h/.config/aman/config.json
Restart=on-failure
RestartSec=2
[Install]
WantedBy=default.target

View file

@ -1,13 +0,0 @@
[Unit]
Description=lel X11 STT daemon
After=default.target
[Service]
Type=simple
WorkingDirectory=%h/.local/share/lel
ExecStart=/usr/bin/uv run python3 %h/.local/share/lel/src/leld.py --config %h/.config/lel/config.json
Restart=on-failure
RestartSec=2
[Install]
WantedBy=default.target

View file

@ -10,7 +10,7 @@ SRC = ROOT / "src"
if str(SRC) not in sys.path:
sys.path.insert(0, str(SRC))
import leld
import aman
from config import Config, VocabularyReplacement
@ -87,12 +87,12 @@ class DaemonTests(unittest.TestCase):
cfg = Config()
return cfg
@patch("leld.stop_audio_recording", return_value=FakeAudio(8))
@patch("leld.start_audio_recording", return_value=(object(), object()))
@patch("aman.stop_audio_recording", return_value=FakeAudio(8))
@patch("aman.start_audio_recording", return_value=(object(), object()))
def test_toggle_start_stop_injects_text(self, _start_mock, _stop_mock):
desktop = FakeDesktop()
with patch("leld._build_whisper_model", return_value=FakeModel()):
daemon = leld.Daemon(self._config(), desktop, verbose=False)
with patch("aman._build_whisper_model", return_value=FakeModel()):
daemon = aman.Daemon(self._config(), desktop, verbose=False)
daemon.ai_processor = FakeAIProcessor()
daemon._start_stop_worker = (
lambda stream, record, trigger, process_audio: daemon._stop_and_process(
@ -101,19 +101,19 @@ class DaemonTests(unittest.TestCase):
)
daemon.toggle()
self.assertEqual(daemon.get_state(), leld.State.RECORDING)
self.assertEqual(daemon.get_state(), aman.State.RECORDING)
daemon.toggle()
self.assertEqual(daemon.get_state(), leld.State.IDLE)
self.assertEqual(daemon.get_state(), aman.State.IDLE)
self.assertEqual(desktop.inject_calls, [("hello world", "clipboard", False)])
@patch("leld.stop_audio_recording", return_value=FakeAudio(8))
@patch("leld.start_audio_recording", return_value=(object(), object()))
@patch("aman.stop_audio_recording", return_value=FakeAudio(8))
@patch("aman.start_audio_recording", return_value=(object(), object()))
def test_shutdown_stops_recording_without_injection(self, _start_mock, _stop_mock):
desktop = FakeDesktop()
with patch("leld._build_whisper_model", return_value=FakeModel()):
daemon = leld.Daemon(self._config(), desktop, verbose=False)
with patch("aman._build_whisper_model", return_value=FakeModel()):
daemon = aman.Daemon(self._config(), desktop, verbose=False)
daemon.ai_processor = FakeAIProcessor()
daemon._start_stop_worker = (
lambda stream, record, trigger, process_audio: daemon._stop_and_process(
@ -122,22 +122,22 @@ class DaemonTests(unittest.TestCase):
)
daemon.toggle()
self.assertEqual(daemon.get_state(), leld.State.RECORDING)
self.assertEqual(daemon.get_state(), aman.State.RECORDING)
self.assertTrue(daemon.shutdown(timeout=0.2))
self.assertEqual(daemon.get_state(), leld.State.IDLE)
self.assertEqual(daemon.get_state(), aman.State.IDLE)
self.assertEqual(desktop.inject_calls, [])
@patch("leld.stop_audio_recording", return_value=FakeAudio(8))
@patch("leld.start_audio_recording", return_value=(object(), object()))
@patch("aman.stop_audio_recording", return_value=FakeAudio(8))
@patch("aman.start_audio_recording", return_value=(object(), object()))
def test_dictionary_replacement_applies_after_ai(self, _start_mock, _stop_mock):
desktop = FakeDesktop()
model = FakeModel(text="good morning martha")
cfg = self._config()
cfg.vocabulary.replacements = [VocabularyReplacement(source="Martha", target="Marta")]
with patch("leld._build_whisper_model", return_value=model):
daemon = leld.Daemon(cfg, desktop, verbose=False)
with patch("aman._build_whisper_model", return_value=model):
daemon = aman.Daemon(cfg, desktop, verbose=False)
daemon.ai_processor = FakeAIProcessor()
daemon._start_stop_worker = (
lambda stream, record, trigger, process_audio: daemon._stop_and_process(
@ -156,8 +156,8 @@ class DaemonTests(unittest.TestCase):
cfg = self._config()
cfg.vocabulary.terms = ["Docker", "Systemd"]
with patch("leld._build_whisper_model", return_value=model):
daemon = leld.Daemon(cfg, desktop, verbose=False)
with patch("aman._build_whisper_model", return_value=model):
daemon = aman.Daemon(cfg, desktop, verbose=False)
result = daemon._transcribe(object())
@ -172,8 +172,8 @@ class DaemonTests(unittest.TestCase):
cfg.vocabulary.terms = ["Systemd"]
cfg.vocabulary.replacements = [VocabularyReplacement(source="docker", target="Docker")]
with patch("leld._build_whisper_model", return_value=model):
daemon = leld.Daemon(cfg, desktop, verbose=False)
with patch("aman._build_whisper_model", return_value=model):
daemon = aman.Daemon(cfg, desktop, verbose=False)
result = daemon._transcribe(object())
@ -186,24 +186,24 @@ class DaemonTests(unittest.TestCase):
desktop = FakeDesktop()
cfg = self._config()
with patch("leld._build_whisper_model", return_value=FakeModel()):
daemon = leld.Daemon(cfg, desktop, verbose=False)
with patch("aman._build_whisper_model", return_value=FakeModel()):
daemon = aman.Daemon(cfg, desktop, verbose=False)
self.assertFalse(daemon.log_transcript)
with patch("leld._build_whisper_model", return_value=FakeModel()):
daemon_verbose = leld.Daemon(cfg, desktop, verbose=True)
with patch("aman._build_whisper_model", return_value=FakeModel()):
daemon_verbose = aman.Daemon(cfg, desktop, verbose=True)
self.assertTrue(daemon_verbose.log_transcript)
@patch("leld.stop_audio_recording", return_value=FakeAudio(8))
@patch("leld.start_audio_recording", return_value=(object(), object()))
@patch("aman.stop_audio_recording", return_value=FakeAudio(8))
@patch("aman.start_audio_recording", return_value=(object(), object()))
def test_passes_clipboard_remove_option_to_desktop(self, _start_mock, _stop_mock):
desktop = FakeDesktop()
model = FakeModel(text="hello world")
cfg = self._config()
cfg.injection.remove_transcription_from_clipboard = True
with patch("leld._build_whisper_model", return_value=model):
daemon = leld.Daemon(cfg, desktop, verbose=False)
with patch("aman._build_whisper_model", return_value=model):
daemon = aman.Daemon(cfg, desktop, verbose=False)
daemon.ai_processor = FakeAIProcessor()
daemon._start_stop_worker = (
lambda stream, record, trigger, process_audio: daemon._stop_and_process(
@ -218,11 +218,11 @@ class DaemonTests(unittest.TestCase):
def test_state_changes_are_debug_level(self):
desktop = FakeDesktop()
with patch("leld._build_whisper_model", return_value=FakeModel()):
daemon = leld.Daemon(self._config(), desktop, verbose=False)
with patch("aman._build_whisper_model", return_value=FakeModel()):
daemon = aman.Daemon(self._config(), desktop, verbose=False)
with self.assertLogs(level="DEBUG") as logs:
daemon.set_state(leld.State.RECORDING)
daemon.set_state(aman.State.RECORDING)
self.assertTrue(
any("DEBUG:root:state: idle -> recording" in line for line in logs.output)
@ -233,10 +233,10 @@ class LockTests(unittest.TestCase):
def test_lock_rejects_second_instance(self):
with tempfile.TemporaryDirectory() as td:
with patch.dict(os.environ, {"XDG_RUNTIME_DIR": td}, clear=False):
first = leld._lock_single_instance()
first = aman._lock_single_instance()
try:
with self.assertRaises(SystemExit) as ctx:
leld._lock_single_instance()
aman._lock_single_instance()
self.assertIn("already running", str(ctx.exception))
finally:
first.close()

2
uv.lock generated
View file

@ -404,7 +404,7 @@ wheels = [
]
[[package]]
name = "lel"
name = "aman"
version = "0.0.0"
source = { virtual = "." }
dependencies = [