# ==========================================
# Mermaid + LiteLLM Configuration
# ==========================================
# Copy this file to .env and add your API keys
# ------------------------------------------
# LiteLLM Proxy Configuration
# ------------------------------------------
# Master key for LiteLLM admin access (REQUIRED - generate a strong random key!)
# Generate with: openssl rand -base64 32
LITELLM_MASTER_KEY=
# LiteLLM Proxy URL (default: http://localhost:4000)
LITELLM_PROXY_URL=http://localhost:4000
# Database password (REQUIRED - use a strong password!)
# Generate with: openssl rand -base64 24
POSTGRES_PASSWORD=
# Database connection URL (auto-configured by docker-compose)
# DATABASE_URL=postgresql://mermaid:${POSTGRES_PASSWORD}@localhost:5433/litellm
# ------------------------------------------
# API Keys for LLM Providers
# ------------------------------------------
# Uncomment and add your keys to enable providers
# OpenAI
# Get your key at: https://platform.openai.com/api-keys
# OPENAI_API_KEY=sk-...
# Anthropic (Claude)
# Get your key at: https://console.anthropic.com/
# ANTHROPIC_API_KEY=sk-ant-api03-...
# Groq (Fast inference for Llama, Mixtral)
# Get your key at: https://console.groq.com/keys
# GROQ_API_KEY=gsk_...
# Google (Gemini)
# Get your key at: https://makersuite.google.com/app/apikey
# GOOGLE_API_KEY=...
# Azure OpenAI
# AZURE_API_KEY=...
# AZURE_API_BASE=https://your-resource.openai.azure.com/
# AZURE_API_VERSION=2024-02-15-preview
# Cohere
# Get your key at: https://dashboard.cohere.com/api-keys
# COHERE_API_KEY=...
# Mistral AI
# Get your key at: https://console.mistral.ai/
# MISTRAL_API_KEY=...
# Perplexity
# Get your key at: https://www.perplexity.ai/settings/api
# PERPLEXITYAI_API_KEY=pplx-...
# DeepInfra
# Get your key at: https://deepinfra.com/
# DEEPINFRA_API_KEY=...
# Together AI
# Get your key at: https://api.together.xyz/
# TOGETHERAI_API_KEY=...
# Replicate
# Get your key at: https://replicate.com/account/api-tokens
# REPLICATE_API_KEY=...
# Hugging Face
# Get your key at: https://huggingface.co/settings/tokens
# HUGGINGFACE_API_KEY=hf_...
# ------------------------------------------
# Logging and Monitoring (Optional)
# ------------------------------------------
# Langfuse (LLM observability)
# LANGFUSE_PUBLIC_KEY=...
# LANGFUSE_SECRET_KEY=...
# LANGFUSE_HOST=https://cloud.langfuse.com
# Helicone (LLM analytics)
# HELICONE_API_KEY=...
# Weights & Biases (experiment tracking)
# WANDB_API_KEY=...
# ------------------------------------------
# Mermaid-specific Configuration
# ------------------------------------------
# Default model to use if not specified
MERMAID_DEFAULT_MODEL=ollama/tinyllama
# Enable debug logging
# RUST_LOG=debug
# Maximum context tokens (for large projects)
MERMAID_MAX_CONTEXT_TOKENS=75000
# ------------------------------------------
# Ollama Configuration (for local models)
# ------------------------------------------
# Ollama host (if not using default)
# OLLAMA_HOST=http://localhost:11434
# Ollama models to auto-pull on startup
# OLLAMA_MODELS=tinyllama,codellama:7b,mistral
# ------------------------------------------
# Security Settings
# ------------------------------------------
# Enable CORS (for web frontends)
# LITELLM_CORS_ALLOW_ORIGINS=*
# Rate limiting (requests per minute)
# LITELLM_DEFAULT_RPM_LIMIT=100
# ------------------------------------------
# Performance Tuning
# ------------------------------------------
# Connection pool size
# LITELLM_CONNECTION_POOL_SIZE=10
# Request timeout (seconds)
# LITELLM_REQUEST_TIMEOUT=600
# Cache TTL (seconds)
# LITELLM_CACHE_TTL=3600