Add persian-tutor: Gradio-based GCSE Persian language learning app
Vocabulary study with FSRS spaced repetition, AI tutoring (Ollama/Claude), essay marking, idioms browser, Anki export, and dashboard. 918 vocabulary entries across 39 categories. 41 tests passing. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
89
python/persian-tutor/tests/test_ai.py
Normal file
89
python/persian-tutor/tests/test_ai.py
Normal file
@@ -0,0 +1,89 @@
|
||||
"""Tests for ai.py — dual AI backend."""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
import ai
|
||||
|
||||
|
||||
def test_ask_ollama_calls_ollama_chat():
|
||||
"""ask_ollama should call ollama.chat with correct messages."""
|
||||
mock_response = MagicMock()
|
||||
mock_response.message.content = "test response"
|
||||
|
||||
with patch("ai.ollama.chat", return_value=mock_response) as mock_chat:
|
||||
result = ai.ask_ollama("Hello", system="Be helpful")
|
||||
assert result == "test response"
|
||||
|
||||
call_args = mock_chat.call_args
|
||||
messages = call_args.kwargs.get("messages") or call_args[1].get("messages")
|
||||
assert len(messages) == 2
|
||||
assert messages[0]["role"] == "system"
|
||||
assert messages[1]["role"] == "user"
|
||||
assert messages[1]["content"] == "Hello"
|
||||
|
||||
|
||||
def test_ask_ollama_no_system():
|
||||
"""ask_ollama without system prompt should only send user message."""
|
||||
mock_response = MagicMock()
|
||||
mock_response.message.content = "response"
|
||||
|
||||
with patch("ai.ollama.chat", return_value=mock_response) as mock_chat:
|
||||
ai.ask_ollama("Hi")
|
||||
call_args = mock_chat.call_args
|
||||
messages = call_args.kwargs.get("messages") or call_args[1].get("messages")
|
||||
assert len(messages) == 1
|
||||
assert messages[0]["role"] == "user"
|
||||
|
||||
|
||||
def test_ask_claude_calls_subprocess():
|
||||
"""ask_claude should call claude CLI via subprocess."""
|
||||
with patch("ai.subprocess.run") as mock_run:
|
||||
mock_run.return_value = MagicMock(stdout="Claude says hi\n")
|
||||
result = ai.ask_claude("Hello")
|
||||
assert result == "Claude says hi"
|
||||
mock_run.assert_called_once()
|
||||
args = mock_run.call_args[0][0]
|
||||
assert args[0] == "claude"
|
||||
assert "-p" in args
|
||||
|
||||
|
||||
def test_ask_fast_uses_ollama():
|
||||
"""ask with quality='fast' should use Ollama."""
|
||||
with patch("ai.ask_ollama", return_value="ollama response") as mock:
|
||||
result = ai.ask("test", quality="fast")
|
||||
assert result == "ollama response"
|
||||
mock.assert_called_once()
|
||||
|
||||
|
||||
def test_ask_smart_uses_claude():
|
||||
"""ask with quality='smart' should use Claude."""
|
||||
with patch("ai.ask_claude", return_value="claude response") as mock:
|
||||
result = ai.ask("test", quality="smart")
|
||||
assert result == "claude response"
|
||||
mock.assert_called_once()
|
||||
|
||||
|
||||
def test_chat_ollama():
|
||||
"""chat_ollama should pass multi-turn messages."""
|
||||
mock_response = MagicMock()
|
||||
mock_response.message.content = "continuation"
|
||||
|
||||
with patch("ai.ollama.chat", return_value=mock_response) as mock_chat:
|
||||
messages = [
|
||||
{"role": "user", "content": "Hi"},
|
||||
{"role": "assistant", "content": "Hello!"},
|
||||
{"role": "user", "content": "How are you?"},
|
||||
]
|
||||
result = ai.chat_ollama(messages, system="Be helpful")
|
||||
assert result == "continuation"
|
||||
|
||||
call_args = mock_chat.call_args
|
||||
all_msgs = call_args.kwargs.get("messages") or call_args[1].get("messages")
|
||||
# system + 3 conversation messages
|
||||
assert len(all_msgs) == 4
|
||||
Reference in New Issue
Block a user