# Filoma Filaraki Configuration Template
# Copy this file to '.env' and fill in the values for ONE scenario only.
# Comment out all other scenarios to avoid conflicts.
#
# IMPORTANT: When using `uvx filoma filaraki chat`, this file is NOT auto-loaded.
# Export variables in your shell, or run: uv run --env-file .env filoma filaraki chat
#
# Provider selection priority (automatic, first match wins):
#   1. Ollama    — auto-detected if running on localhost:11434 (no keys needed)
#   2. Mistral   — if MISTRAL_API_KEY is set
#   3. Gemini    — if GEMINI_API_KEY is set
#   4. OpenAI-compatible — if FILOMA_FILARAKI_BASE_URL is set (e.g. OpenAI, OpenRouter, etc.)

# ==============================================================================
# SCENARIO A: Ollama (Local - Privacy First, Default)
# ==============================================================================
# Use this for zero-cost, 100% private analysis.
# Requires Ollama app running with `ollama serve` and model pulled.
# Default model: qwen2.5:14b (recommended for tool calling)
# Other good options: dolphincoder, codellama, deepseek-coder
#
# Setup:
#   ollama pull qwen2.5:14b
#   ollama serve
#
# FILOMA_FILARAKI_BASE_URL is optional - filoma will auto-detect Ollama on localhost:11434
FILOMA_FILARAKI_MODEL=qwen2.5:14b
# FILOMA_FILARAKI_BASE_URL=http://localhost:11434/v1

# ==============================================================================
# SCENARIO B: Mistral AI (Cloud)
# ==============================================================================
# Use this for a "plug and play" European cloud experience.
# Recommended for best tool-calling performance without local setup.
# Get a key at https://console.mistral.ai/
# MISTRAL_API_KEY=your_api_key_here
# FILOMA_FILARAKI_MODEL=mistral:mistral-small-latest  # Optional override

# ==============================================================================
# SCENARIO C: Google Gemini (Cloud)
# ==============================================================================
# Use this for Google's Gemini models with generous free tier.
# Get a key at https://aistudio.google.com/
# GEMINI_API_KEY=your_api_key_here
# FILOMA_FILARAKI_MODEL=gemini-1.5-flash  # Optional override

# ==============================================================================
# SCENARIO D: OpenAI-Compatible (Generic)
# ==============================================================================
# Use this for any OpenAI-compatible API endpoint.
# Examples: OpenAI, OpenRouter, Together AI, Azure OpenAI, etc.
# Requires both base URL and API key.
#
# OpenAI:
#   FILOMA_FILARAKI_BASE_URL=https://api.openai.com/v1
#   OPENAI_API_KEY=your_openai_key_here
#   FILOMA_FILARAKI_MODEL=gpt-4o-mini
#
# OpenRouter (access to Claude, GPT-4, Llama, Gemini, etc.):
#   Get a key at https://openrouter.ai/keys
#   FILOMA_FILARAKI_BASE_URL=https://openrouter.ai/api/v1
#   OPENAI_API_KEY=your_openrouter_key_here
#   FILOMA_FILARAKI_MODEL=anthropic/claude-3.5-sonnet
#
# Together AI:
#   FILOMA_FILARAKI_BASE_URL=https://api.together.xyz/v1
#   OPENAI_API_KEY=your_together_key_here
#   FILOMA_FILARAKI_MODEL=meta-llama/Llama-3-8b-chat-hf
#
# Uncomment and fill in ONE of the above blocks to activate:
# FILOMA_FILARAKI_BASE_URL=https://api.openai.com/v1
# OPENAI_API_KEY=your_api_key_here
# FILOMA_FILARAKI_MODEL=gpt-4o-mini
