embedcache 0.1.1

High-performance text embedding service with caching capabilities
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# Server Configuration
SERVER_HOST=127.0.0.1
SERVER_PORT=8081

# Database Configuration
DB_PATH=cache.db
DB_JOURNAL_MODE=wal

# Embedding Models (comma-separated list)
ENABLED_MODELS=AllMiniLML6V2,BGESmallENV15

# LLM Configuration (optional - for LLM-based chunking)
# Supported providers: ollama, openai, anthropic
# LLM_PROVIDER=ollama
# LLM_MODEL=llama3
# LLM_BASE_URL=http://localhost:11434
# LLM_API_KEY=
# LLM_TIMEOUT=60