Centralize constants

This commit is contained in:
Thales Maciel 2026-02-24 13:45:31 -03:00
parent 72ad571ff2
commit a4da1ddd73
No known key found for this signature in database
GPG key ID: 33112E6833C34679
5 changed files with 24 additions and 19 deletions

View file

@ -6,11 +6,12 @@ import os
import sys
import urllib.request
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Callable, cast
from llama_cpp import Llama, llama_cpp as llama_cpp_lib # type: ignore[import-not-found]
from constants import LLM_LANGUAGE, MODEL_DIR, MODEL_NAME, MODEL_PATH, MODEL_URL
SYSTEM_PROMPT = (
"You are an amanuensis. Rewrite the user's dictated text into clean, grammatical prose.\n\n"
@ -26,14 +27,6 @@ SYSTEM_PROMPT = (
" - \"let's ask Bob, I mean Janice, let's ask Janice\" -> \"let's ask Janice\"\n"
)
MODEL_NAME = "Llama-3.2-3B-Instruct-Q4_K_M.gguf"
MODEL_URL = (
"https://huggingface.co/bartowski/Llama-3.2-3B-Instruct-GGUF/resolve/main/"
"Llama-3.2-3B-Instruct-Q4_K_M.gguf"
)
MODEL_DIR = Path.home() / ".cache" / "lel" / "models"
MODEL_PATH = MODEL_DIR / MODEL_NAME
class LlamaProcessor:
def __init__(self, verbose=False):

View file

@ -2,6 +2,8 @@ import json
from dataclasses import dataclass, field
from pathlib import Path
from constants import DEFAULT_CONFIG_PATH
@dataclass
class Config:
@ -11,13 +13,9 @@ class Config:
injection: dict = field(default_factory=lambda: {"backend": "clipboard"})
def default_path() -> Path:
return Path.home() / ".config" / "lel" / "config.json"
def load(path: str | None) -> Config:
cfg = Config()
p = Path(path) if path else default_path()
p = Path(path) if path else DEFAULT_CONFIG_PATH
if p.exists():
data = json.loads(p.read_text(encoding="utf-8"))
if any(k in data for k in ("daemon", "recording", "stt", "injection")):

17
src/constants.py Normal file
View file

@ -0,0 +1,17 @@
from pathlib import Path
DEFAULT_CONFIG_PATH = Path.home() / ".config" / "lel" / "config.json"
RECORD_TIMEOUT_SEC = 300
STT_LANGUAGE = "en"
TRAY_UPDATE_MS = 250
ASSETS_DIR = Path(__file__).parent / "assets"
MODEL_NAME = "Llama-3.2-3B-Instruct-Q4_K_M.gguf"
MODEL_URL = (
"https://huggingface.co/bartowski/Llama-3.2-3B-Instruct-GGUF/resolve/main/"
"Llama-3.2-3B-Instruct-Q4_K_M.gguf"
)
MODEL_DIR = Path.home() / ".cache" / "lel" / "models"
MODEL_PATH = MODEL_DIR / MODEL_NAME
LLM_LANGUAGE = "en"

View file

@ -3,7 +3,6 @@ from __future__ import annotations
import logging
import threading
import warnings
from pathlib import Path
from typing import Callable, Iterable
import gi
@ -20,9 +19,8 @@ except ValueError:
from gi.repository import GLib, Gdk, Gtk # type: ignore[import-not-found]
from constants import ASSETS_DIR, TRAY_UPDATE_MS
ASSETS_DIR = Path(__file__).parent / "assets"
TRAY_UPDATE_MS = 250
MOD_MAP = {
"shift": X.ShiftMask,

View file

@ -13,6 +13,7 @@ import gi
from faster_whisper import WhisperModel
from config import Config, load, redacted_dict
from constants import RECORD_TIMEOUT_SEC, STT_LANGUAGE
from recorder import start_recording, stop_recording
from aiprocess import LlamaProcessor
from desktop import get_desktop_adapter
@ -26,8 +27,6 @@ class State:
OUTPUTTING = "outputting"
RECORD_TIMEOUT_SEC = 300
STT_LANGUAGE = "en"
def _compute_type(device: str) -> str: