Add persian-tutor: Gradio-based GCSE Persian language learning app
Vocabulary study with FSRS spaced repetition, AI tutoring (Ollama/Claude), essay marking, idioms browser, Anki export, and dashboard. 918 vocabulary entries across 39 categories. 41 tests passing. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
0
python/persian-tutor/tests/__init__.py
Normal file
0
python/persian-tutor/tests/__init__.py
Normal file
89
python/persian-tutor/tests/test_ai.py
Normal file
89
python/persian-tutor/tests/test_ai.py
Normal file
@@ -0,0 +1,89 @@
|
||||
"""Tests for ai.py — dual AI backend."""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
import ai
|
||||
|
||||
|
||||
def test_ask_ollama_calls_ollama_chat():
|
||||
"""ask_ollama should call ollama.chat with correct messages."""
|
||||
mock_response = MagicMock()
|
||||
mock_response.message.content = "test response"
|
||||
|
||||
with patch("ai.ollama.chat", return_value=mock_response) as mock_chat:
|
||||
result = ai.ask_ollama("Hello", system="Be helpful")
|
||||
assert result == "test response"
|
||||
|
||||
call_args = mock_chat.call_args
|
||||
messages = call_args.kwargs.get("messages") or call_args[1].get("messages")
|
||||
assert len(messages) == 2
|
||||
assert messages[0]["role"] == "system"
|
||||
assert messages[1]["role"] == "user"
|
||||
assert messages[1]["content"] == "Hello"
|
||||
|
||||
|
||||
def test_ask_ollama_no_system():
|
||||
"""ask_ollama without system prompt should only send user message."""
|
||||
mock_response = MagicMock()
|
||||
mock_response.message.content = "response"
|
||||
|
||||
with patch("ai.ollama.chat", return_value=mock_response) as mock_chat:
|
||||
ai.ask_ollama("Hi")
|
||||
call_args = mock_chat.call_args
|
||||
messages = call_args.kwargs.get("messages") or call_args[1].get("messages")
|
||||
assert len(messages) == 1
|
||||
assert messages[0]["role"] == "user"
|
||||
|
||||
|
||||
def test_ask_claude_calls_subprocess():
|
||||
"""ask_claude should call claude CLI via subprocess."""
|
||||
with patch("ai.subprocess.run") as mock_run:
|
||||
mock_run.return_value = MagicMock(stdout="Claude says hi\n")
|
||||
result = ai.ask_claude("Hello")
|
||||
assert result == "Claude says hi"
|
||||
mock_run.assert_called_once()
|
||||
args = mock_run.call_args[0][0]
|
||||
assert args[0] == "claude"
|
||||
assert "-p" in args
|
||||
|
||||
|
||||
def test_ask_fast_uses_ollama():
|
||||
"""ask with quality='fast' should use Ollama."""
|
||||
with patch("ai.ask_ollama", return_value="ollama response") as mock:
|
||||
result = ai.ask("test", quality="fast")
|
||||
assert result == "ollama response"
|
||||
mock.assert_called_once()
|
||||
|
||||
|
||||
def test_ask_smart_uses_claude():
|
||||
"""ask with quality='smart' should use Claude."""
|
||||
with patch("ai.ask_claude", return_value="claude response") as mock:
|
||||
result = ai.ask("test", quality="smart")
|
||||
assert result == "claude response"
|
||||
mock.assert_called_once()
|
||||
|
||||
|
||||
def test_chat_ollama():
|
||||
"""chat_ollama should pass multi-turn messages."""
|
||||
mock_response = MagicMock()
|
||||
mock_response.message.content = "continuation"
|
||||
|
||||
with patch("ai.ollama.chat", return_value=mock_response) as mock_chat:
|
||||
messages = [
|
||||
{"role": "user", "content": "Hi"},
|
||||
{"role": "assistant", "content": "Hello!"},
|
||||
{"role": "user", "content": "How are you?"},
|
||||
]
|
||||
result = ai.chat_ollama(messages, system="Be helpful")
|
||||
assert result == "continuation"
|
||||
|
||||
call_args = mock_chat.call_args
|
||||
all_msgs = call_args.kwargs.get("messages") or call_args[1].get("messages")
|
||||
# system + 3 conversation messages
|
||||
assert len(all_msgs) == 4
|
||||
86
python/persian-tutor/tests/test_anki_export.py
Normal file
86
python/persian-tutor/tests/test_anki_export.py
Normal file
@@ -0,0 +1,86 @@
|
||||
"""Tests for anki_export.py — Anki .apkg generation."""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from anki_export import export_deck
|
||||
|
||||
SAMPLE_VOCAB = [
|
||||
{
|
||||
"id": "verb_go",
|
||||
"section": "High-frequency language",
|
||||
"category": "Common verbs",
|
||||
"english": "to go",
|
||||
"persian": "رفتن",
|
||||
"finglish": "raftan",
|
||||
},
|
||||
{
|
||||
"id": "verb_eat",
|
||||
"section": "High-frequency language",
|
||||
"category": "Common verbs",
|
||||
"english": "to eat",
|
||||
"persian": "خوردن",
|
||||
"finglish": "khordan",
|
||||
},
|
||||
{
|
||||
"id": "colour_red",
|
||||
"section": "High-frequency language",
|
||||
"category": "Colours",
|
||||
"english": "red",
|
||||
"persian": "قرمز",
|
||||
"finglish": "ghermez",
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
def test_export_deck_creates_file(tmp_path):
|
||||
"""export_deck should create a valid .apkg file."""
|
||||
output = str(tmp_path / "test.apkg")
|
||||
result = export_deck(SAMPLE_VOCAB, output_path=output)
|
||||
assert result == output
|
||||
assert os.path.exists(output)
|
||||
assert os.path.getsize(output) > 0
|
||||
|
||||
|
||||
def test_export_deck_is_valid_zip(tmp_path):
|
||||
"""An .apkg file is a zip archive containing an Anki SQLite database."""
|
||||
output = str(tmp_path / "test.apkg")
|
||||
export_deck(SAMPLE_VOCAB, output_path=output)
|
||||
assert zipfile.is_zipfile(output)
|
||||
|
||||
|
||||
def test_export_deck_with_category_filter(tmp_path):
|
||||
"""export_deck with category filter should only include matching entries."""
|
||||
output = str(tmp_path / "test.apkg")
|
||||
export_deck(SAMPLE_VOCAB, categories=["Colours"], output_path=output)
|
||||
# File should exist and be smaller than unfiltered
|
||||
assert os.path.exists(output)
|
||||
size_filtered = os.path.getsize(output)
|
||||
|
||||
output2 = str(tmp_path / "test_all.apkg")
|
||||
export_deck(SAMPLE_VOCAB, output_path=output2)
|
||||
size_all = os.path.getsize(output2)
|
||||
|
||||
# Filtered deck should be smaller (fewer cards)
|
||||
assert size_filtered <= size_all
|
||||
|
||||
|
||||
def test_export_deck_empty_vocab(tmp_path):
|
||||
"""export_deck with empty vocabulary should still create a valid file."""
|
||||
output = str(tmp_path / "test.apkg")
|
||||
export_deck([], output_path=output)
|
||||
assert os.path.exists(output)
|
||||
|
||||
|
||||
def test_export_deck_no_category_match(tmp_path):
|
||||
"""export_deck with non-matching category filter should create empty deck."""
|
||||
output = str(tmp_path / "test.apkg")
|
||||
export_deck(SAMPLE_VOCAB, categories=["Nonexistent"], output_path=output)
|
||||
assert os.path.exists(output)
|
||||
151
python/persian-tutor/tests/test_db.py
Normal file
151
python/persian-tutor/tests/test_db.py
Normal file
@@ -0,0 +1,151 @@
|
||||
"""Tests for db.py — SQLite database layer with FSRS integration."""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
# Add project root to path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
import fsrs
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def temp_db(tmp_path):
|
||||
"""Use a temporary database for each test."""
|
||||
import db as db_mod
|
||||
|
||||
db_mod._conn = None
|
||||
db_mod.DB_PATH = tmp_path / "test.db"
|
||||
db_mod.init_db()
|
||||
yield db_mod
|
||||
db_mod.close()
|
||||
|
||||
|
||||
def test_init_db_creates_tables(temp_db):
|
||||
"""init_db should create all required tables."""
|
||||
conn = temp_db.get_connection()
|
||||
tables = conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table'"
|
||||
).fetchall()
|
||||
table_names = {row["name"] for row in tables}
|
||||
assert "word_progress" in table_names
|
||||
assert "quiz_sessions" in table_names
|
||||
assert "essays" in table_names
|
||||
assert "tutor_sessions" in table_names
|
||||
|
||||
|
||||
def test_get_word_progress_nonexistent(temp_db):
|
||||
"""Should return None for a word that hasn't been reviewed."""
|
||||
assert temp_db.get_word_progress("nonexistent") is None
|
||||
|
||||
|
||||
def test_update_and_get_word_progress(temp_db):
|
||||
"""update_word_progress should create and update progress."""
|
||||
card = temp_db.update_word_progress("verb_go", fsrs.Rating.Good)
|
||||
assert card is not None
|
||||
assert card.stability is not None
|
||||
|
||||
progress = temp_db.get_word_progress("verb_go")
|
||||
assert progress is not None
|
||||
assert progress["word_id"] == "verb_go"
|
||||
assert progress["reps"] == 1
|
||||
assert progress["fsrs_state"] is not None
|
||||
|
||||
|
||||
def test_update_word_progress_increments_reps(temp_db):
|
||||
"""Reviewing the same word multiple times should increment reps."""
|
||||
temp_db.update_word_progress("verb_go", fsrs.Rating.Good)
|
||||
temp_db.update_word_progress("verb_go", fsrs.Rating.Easy)
|
||||
progress = temp_db.get_word_progress("verb_go")
|
||||
assert progress["reps"] == 2
|
||||
|
||||
|
||||
def test_get_due_words(temp_db):
|
||||
"""get_due_words should return words that are due for review."""
|
||||
# A newly reviewed word with Rating.Again should be due soon
|
||||
temp_db.update_word_progress("verb_go", fsrs.Rating.Again)
|
||||
# An easy word should have a later due date
|
||||
temp_db.update_word_progress("verb_eat", fsrs.Rating.Easy)
|
||||
|
||||
# Due words depend on timing; at minimum both should be in the system
|
||||
all_progress = temp_db.get_connection().execute(
|
||||
"SELECT word_id FROM word_progress"
|
||||
).fetchall()
|
||||
assert len(all_progress) == 2
|
||||
|
||||
|
||||
def test_get_word_counts(temp_db):
|
||||
"""get_word_counts should return correct counts."""
|
||||
counts = temp_db.get_word_counts(total_vocab_size=100)
|
||||
assert counts["total"] == 100
|
||||
assert counts["seen"] == 0
|
||||
assert counts["mastered"] == 0
|
||||
assert counts["due"] == 0
|
||||
|
||||
temp_db.update_word_progress("verb_go", fsrs.Rating.Good)
|
||||
counts = temp_db.get_word_counts(total_vocab_size=100)
|
||||
assert counts["seen"] == 1
|
||||
|
||||
|
||||
def test_record_quiz_session(temp_db):
|
||||
"""record_quiz_session should insert a quiz record."""
|
||||
temp_db.record_quiz_session("Common verbs", 10, 7, 120)
|
||||
rows = temp_db.get_connection().execute(
|
||||
"SELECT * FROM quiz_sessions"
|
||||
).fetchall()
|
||||
assert len(rows) == 1
|
||||
assert rows[0]["correct"] == 7
|
||||
assert rows[0]["total_questions"] == 10
|
||||
|
||||
|
||||
def test_save_essay(temp_db):
|
||||
"""save_essay should store the essay and feedback."""
|
||||
temp_db.save_essay("متن آزمایشی", "B1", "Good effort!", "Identity and culture")
|
||||
essays = temp_db.get_recent_essays()
|
||||
assert len(essays) == 1
|
||||
assert essays[0]["grade"] == "B1"
|
||||
|
||||
|
||||
def test_save_tutor_session(temp_db):
|
||||
"""save_tutor_session should store the conversation."""
|
||||
messages = [
|
||||
{"role": "user", "content": "سلام"},
|
||||
{"role": "assistant", "content": "سلام! حالت چطوره؟"},
|
||||
]
|
||||
temp_db.save_tutor_session("Identity and culture", messages, 300)
|
||||
rows = temp_db.get_connection().execute(
|
||||
"SELECT * FROM tutor_sessions"
|
||||
).fetchall()
|
||||
assert len(rows) == 1
|
||||
assert rows[0]["theme"] == "Identity and culture"
|
||||
|
||||
|
||||
def test_get_stats(temp_db):
|
||||
"""get_stats should return aggregated stats."""
|
||||
stats = temp_db.get_stats()
|
||||
assert stats["total_reviews"] == 0
|
||||
assert stats["total_quizzes"] == 0
|
||||
assert stats["streak"] == 0
|
||||
assert isinstance(stats["recent_quizzes"], list)
|
||||
|
||||
|
||||
def test_close_and_reopen(temp_db):
|
||||
"""Closing and reopening should preserve data."""
|
||||
temp_db.update_word_progress("verb_go", fsrs.Rating.Good)
|
||||
db_path = temp_db.DB_PATH
|
||||
|
||||
temp_db.close()
|
||||
|
||||
# Reopen
|
||||
temp_db._conn = None
|
||||
temp_db.DB_PATH = db_path
|
||||
temp_db.init_db()
|
||||
|
||||
progress = temp_db.get_word_progress("verb_go")
|
||||
assert progress is not None
|
||||
assert progress["reps"] == 1
|
||||
204
python/persian-tutor/tests/test_vocab.py
Normal file
204
python/persian-tutor/tests/test_vocab.py
Normal file
@@ -0,0 +1,204 @@
|
||||
"""Tests for modules/vocab.py — vocabulary search and flashcard logic."""
|
||||
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
SAMPLE_VOCAB = [
|
||||
{
|
||||
"id": "verb_go",
|
||||
"section": "High-frequency language",
|
||||
"category": "Common verbs",
|
||||
"english": "to go",
|
||||
"persian": "رفتن",
|
||||
"finglish": "raftan",
|
||||
},
|
||||
{
|
||||
"id": "verb_eat",
|
||||
"section": "High-frequency language",
|
||||
"category": "Common verbs",
|
||||
"english": "to eat",
|
||||
"persian": "خوردن",
|
||||
"finglish": "khordan",
|
||||
},
|
||||
{
|
||||
"id": "adj_big",
|
||||
"section": "High-frequency language",
|
||||
"category": "Common adjectives",
|
||||
"english": "big",
|
||||
"persian": "بزرگ",
|
||||
"finglish": "bozorg",
|
||||
},
|
||||
{
|
||||
"id": "colour_red",
|
||||
"section": "High-frequency language",
|
||||
"category": "Colours",
|
||||
"english": "red",
|
||||
"persian": "قرمز",
|
||||
"finglish": "ghermez",
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def mock_vocab_and_db(tmp_path):
|
||||
"""Mock vocabulary loading and use temp DB."""
|
||||
import db as db_mod
|
||||
import modules.vocab as vocab_mod
|
||||
|
||||
# Temp DB
|
||||
db_mod._conn = None
|
||||
db_mod.DB_PATH = tmp_path / "test.db"
|
||||
db_mod.init_db()
|
||||
|
||||
# Mock vocab
|
||||
vocab_mod._vocab_data = SAMPLE_VOCAB
|
||||
|
||||
yield vocab_mod
|
||||
|
||||
db_mod.close()
|
||||
vocab_mod._vocab_data = None
|
||||
|
||||
|
||||
def test_load_vocab(mock_vocab_and_db):
|
||||
"""load_vocab should return the vocabulary data."""
|
||||
data = mock_vocab_and_db.load_vocab()
|
||||
assert len(data) == 4
|
||||
|
||||
|
||||
def test_get_categories(mock_vocab_and_db):
|
||||
"""get_categories should return unique sorted categories."""
|
||||
cats = mock_vocab_and_db.get_categories()
|
||||
assert "Colours" in cats
|
||||
assert "Common verbs" in cats
|
||||
assert "Common adjectives" in cats
|
||||
|
||||
|
||||
def test_search_english(mock_vocab_and_db):
|
||||
"""Search should find entries by English text."""
|
||||
results = mock_vocab_and_db.search("go")
|
||||
assert len(results) == 1
|
||||
assert results[0]["id"] == "verb_go"
|
||||
|
||||
|
||||
def test_search_persian(mock_vocab_and_db):
|
||||
"""Search should find entries by Persian text."""
|
||||
results = mock_vocab_and_db.search("رفتن")
|
||||
assert len(results) == 1
|
||||
assert results[0]["id"] == "verb_go"
|
||||
|
||||
|
||||
def test_search_finglish(mock_vocab_and_db):
|
||||
"""Search should find entries by Finglish text."""
|
||||
results = mock_vocab_and_db.search("raftan")
|
||||
assert len(results) == 1
|
||||
assert results[0]["id"] == "verb_go"
|
||||
|
||||
|
||||
def test_search_empty(mock_vocab_and_db):
|
||||
"""Empty search should return empty list."""
|
||||
assert mock_vocab_and_db.search("") == []
|
||||
assert mock_vocab_and_db.search(None) == []
|
||||
|
||||
|
||||
def test_search_no_match(mock_vocab_and_db):
|
||||
"""Search with no match should return empty list."""
|
||||
assert mock_vocab_and_db.search("zzzzz") == []
|
||||
|
||||
|
||||
def test_get_random_word(mock_vocab_and_db):
|
||||
"""get_random_word should return a valid entry."""
|
||||
word = mock_vocab_and_db.get_random_word()
|
||||
assert word is not None
|
||||
assert "id" in word
|
||||
assert "english" in word
|
||||
assert "persian" in word
|
||||
|
||||
|
||||
def test_get_random_word_with_category(mock_vocab_and_db):
|
||||
"""get_random_word with category filter should only return matching entries."""
|
||||
word = mock_vocab_and_db.get_random_word(category="Colours")
|
||||
assert word is not None
|
||||
assert word["category"] == "Colours"
|
||||
|
||||
|
||||
def test_get_random_word_nonexistent_category(mock_vocab_and_db):
|
||||
"""get_random_word with bad category should return None."""
|
||||
word = mock_vocab_and_db.get_random_word(category="Nonexistent")
|
||||
assert word is None
|
||||
|
||||
|
||||
def test_check_answer_correct_en_to_fa(mock_vocab_and_db):
|
||||
"""Correct Persian answer should be marked correct."""
|
||||
correct, answer, entry = mock_vocab_and_db.check_answer(
|
||||
"verb_go", "رفتن", direction="en_to_fa"
|
||||
)
|
||||
assert correct is True
|
||||
|
||||
|
||||
def test_check_answer_incorrect_en_to_fa(mock_vocab_and_db):
|
||||
"""Incorrect Persian answer should be marked incorrect with correct answer."""
|
||||
correct, answer, entry = mock_vocab_and_db.check_answer(
|
||||
"verb_go", "خوردن", direction="en_to_fa"
|
||||
)
|
||||
assert correct is False
|
||||
assert answer == "رفتن"
|
||||
|
||||
|
||||
def test_check_answer_fa_to_en(mock_vocab_and_db):
|
||||
"""Correct English answer (case-insensitive) should be marked correct."""
|
||||
correct, answer, entry = mock_vocab_and_db.check_answer(
|
||||
"verb_go", "To Go", direction="fa_to_en"
|
||||
)
|
||||
assert correct is True
|
||||
|
||||
|
||||
def test_check_answer_nonexistent_word(mock_vocab_and_db):
|
||||
"""Checking answer for nonexistent word should return False."""
|
||||
correct, answer, entry = mock_vocab_and_db.check_answer(
|
||||
"nonexistent", "test", direction="en_to_fa"
|
||||
)
|
||||
assert correct is False
|
||||
assert entry is None
|
||||
|
||||
|
||||
def test_format_word_card(mock_vocab_and_db):
|
||||
"""format_word_card should produce RTL HTML with correct content."""
|
||||
entry = SAMPLE_VOCAB[0]
|
||||
html = mock_vocab_and_db.format_word_card(entry, show_transliteration="Finglish")
|
||||
assert "رفتن" in html
|
||||
assert "to go" in html
|
||||
assert "raftan" in html
|
||||
|
||||
|
||||
def test_format_word_card_no_transliteration(mock_vocab_and_db):
|
||||
"""format_word_card with transliteration off should not show finglish."""
|
||||
entry = SAMPLE_VOCAB[0]
|
||||
html = mock_vocab_and_db.format_word_card(entry, show_transliteration="off")
|
||||
assert "raftan" not in html
|
||||
|
||||
|
||||
def test_get_flashcard_batch(mock_vocab_and_db):
|
||||
"""get_flashcard_batch should return a batch of entries."""
|
||||
batch = mock_vocab_and_db.get_flashcard_batch(count=2)
|
||||
assert len(batch) == 2
|
||||
assert all("id" in e for e in batch)
|
||||
|
||||
|
||||
def test_get_word_status_new(mock_vocab_and_db):
|
||||
"""Unreviewed word should have status 'new'."""
|
||||
assert mock_vocab_and_db.get_word_status("verb_go") == "new"
|
||||
|
||||
|
||||
def test_get_word_status_learning(mock_vocab_and_db):
|
||||
"""Recently reviewed word should have status 'learning'."""
|
||||
import db
|
||||
import fsrs
|
||||
|
||||
db.update_word_progress("verb_go", fsrs.Rating.Good)
|
||||
assert mock_vocab_and_db.get_word_status("verb_go") == "learning"
|
||||
Reference in New Issue
Block a user