# Symbiont OSS Environment Configuration
# Copy this file to .env and replace with actual values
# ==============================================================================
# API KEYS (REQUIRED)
# ==============================================================================
# OpenRouter API Key - Get from https://openrouter.ai/keys
OPENROUTER_API_KEY=your_openrouter_api_key_here
# OpenAI API Key - Required for RAG functionality (leave empty if using local models)
OPENAI_API_KEY=your_openai_api_key_here
# OpenAI API Base URL - Use for OpenAI-compatible endpoints like Ollama
# Examples:
# OpenAI: https://api.openai.com/v1
# Ollama: http://localhost:11434/v1
# LocalAI: http://localhost:8080/v1
OPENAI_API_BASE_URL=https://api.openai.com/v1
# ==============================================================================
# LLM & EMBEDDING MODEL CONFIGURATION
# ==============================================================================
# Chat model for general AI operations
# Examples:
# OpenAI: gpt-4, gpt-3.5-turbo
# Ollama: llama2, codellama, mistral
# LocalAI: your-model-name
CHAT_MODEL=gpt-3.5-turbo
# Embedding provider: "ollama" (local) or "openai" (cloud)
# Auto-detected from URL/API key if not set
EMBEDDING_PROVIDER=
# Embedding model for RAG functionality
# Examples:
# OpenAI: text-embedding-3-small, text-embedding-3-large
# Ollama: nomic-embed-text, all-minilm
EMBEDDING_MODEL=text-embedding-3-small
# Embedding API Base URL (can be different from chat API)
# Leave empty to use same as OPENAI_API_BASE_URL
EMBEDDING_API_BASE_URL=
# Embedding API Key (can be different from OpenAI API key)
# Leave empty to use OPENAI_API_KEY
EMBEDDING_API_KEY=
# ==============================================================================
# DATABASE CONFIGURATION
# ==============================================================================
# PostgreSQL connection string for main database
DATABASE_URL=postgresql://symbiont:password@localhost:5432/symbiont
# Redis connection string for caching and session management
REDIS_URL=redis://localhost:6379
# Qdrant vector database URL for RAG functionality
QDRANT_URL=http://localhost:6333
# ==============================================================================
# APPLICATION CONFIGURATION
# ==============================================================================
# Logging level: error, warn, info, debug, trace
LOG_LEVEL=info
# API server port
API_PORT=8080
# Application environment: development, staging, production
ENVIRONMENT=development
# ==============================================================================
# STORAGE PATHS
# ==============================================================================
# Directory for agent context and knowledge storage
CONTEXT_STORAGE_PATH=./agent_storage
# Git repository clone base path
GIT_CLONE_BASE_PATH=./temp_repos
# Backup directory for workflow operations
BACKUP_DIRECTORY=./backups
# ==============================================================================
# OPTIONAL CONFIGURATION OVERRIDES
# ==============================================================================
# Qdrant collection name for vector storage
QDRANT_COLLECTION_NAME=agent_knowledge
# Vector dimension for embeddings (typically 1536 for OpenAI)
VECTOR_DIMENSION=1536
# Maximum context size in MB
MAX_CONTEXT_SIZE_MB=100
# API request timeout in seconds
API_TIMEOUT_SECONDS=60
# Maximum tokens for AI responses
MAX_TOKENS=4000
# AI model temperature (0.0-1.0)
TEMPERATURE=0.1
# Default AI model for OpenRouter
OPENROUTER_MODEL=anthropic/claude-3.5-sonnet
# Enable/disable basic features (true/false)
ENABLE_COMPRESSION=true
ENABLE_BACKUPS=true
ENABLE_SAFETY_CHECKS=true