Vocabulary study with FSRS spaced repetition, AI tutoring (Ollama/Claude), essay marking, idioms browser, Anki export, and dashboard. 918 vocabulary entries across 39 categories. 41 tests passing. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
45 lines
1.3 KiB
Python
45 lines
1.3 KiB
Python
"""Dual AI backend: Ollama (fast/local) and Claude CLI (smart)."""
|
|
|
|
import subprocess
|
|
|
|
import ollama
|
|
|
|
DEFAULT_OLLAMA_MODEL = "qwen2.5:7b"
|
|
|
|
|
|
def ask_ollama(prompt, system=None, model=DEFAULT_OLLAMA_MODEL):
|
|
"""Query Ollama with an optional system prompt."""
|
|
messages = []
|
|
if system:
|
|
messages.append({"role": "system", "content": system})
|
|
messages.append({"role": "user", "content": prompt})
|
|
response = ollama.chat(model=model, messages=messages)
|
|
return response.message.content
|
|
|
|
|
|
def ask_claude(prompt):
|
|
"""Query Claude via the CLI subprocess."""
|
|
result = subprocess.run(
|
|
["claude", "-p", prompt],
|
|
capture_output=True,
|
|
text=True,
|
|
)
|
|
return result.stdout.strip()
|
|
|
|
|
|
def ask(prompt, system=None, quality="fast"):
|
|
"""Unified interface. quality='fast' uses Ollama, 'smart' uses Claude."""
|
|
if quality == "smart":
|
|
return ask_claude(prompt)
|
|
return ask_ollama(prompt, system=system)
|
|
|
|
|
|
def chat_ollama(messages, system=None, model=DEFAULT_OLLAMA_MODEL):
|
|
"""Multi-turn conversation with Ollama."""
|
|
all_messages = []
|
|
if system:
|
|
all_messages.append({"role": "system", "content": system})
|
|
all_messages.extend(messages)
|
|
response = ollama.chat(model=model, messages=all_messages)
|
|
return response.message.content
|