# Server Configuration
SERVER_HOST=127.0.0.1
SERVER_PORT=8081
# Database Configuration
DB_PATH=cache.db
DB_JOURNAL_MODE=wal
# Embedding Models (comma-separated list)
ENABLED_MODELS=AllMiniLML6V2,BGESmallENV15
# LLM Configuration (optional - for LLM-based chunking)
# Supported providers: ollama, openai, anthropic
# LLM_PROVIDER=ollama
# LLM_MODEL=llama3
# LLM_BASE_URL=http://localhost:11434
# LLM_API_KEY=
# LLM_TIMEOUT=60