# Me And My Friends - Configuration Example
# Copy this to ~/.config/mamf/config.yaml or ./mamf.yaml
# =============================================================================
# PROVIDER CONFIGURATIONS
# =============================================================================
providers:
# Ollama - Local or remote LLM server
ollama:
base_url: "http://localhost:11434"
# For remote Ollama behind Cloudflare Access:
# base_url: "https://ollama.example.com/v1"
# headers:
# CF-Access-Client-Id: "your-client-id"
# CF-Access-Client-Secret: "your-client-secret"
# OpenAI - Uses OPENAI_API_KEY env var by default
openai:
# api_key: "sk-..." # Or set explicitly
# base_url: "https://api.openai.com/v1" # For OpenAI-compatible APIs
# Anthropic - Uses ANTHROPIC_API_KEY env var
anthropic:
# api_key: "sk-ant-..."
# Google Gemini - Uses GOOGLE_API_KEY env var
google:
# api_key: "..."
# Claude CLI - Uses installed `claude` command (npm install -g @anthropic-ai/claude-code)
claude_cli:
timeout_secs: 600 # 10 minutes for deep research
model: "claude-sonnet-4-5-20250929"
# =============================================================================
# ADVISOR CONFIGURATIONS
# =============================================================================
# Each advisor has: model, provider, temperature, order, and optional system_prompt
#
# Tiered Architecture:
# Light: Ollama models (fast, local/remote)
# Medium: Gemini (synthesis, balanced)
# Heavy: Claude CLI (deep research with MCP tools)
#
# VRAM Optimization: Group advisors by model to minimize model swaps
# Order determines execution sequence (lower = earlier)
advisors:
# --- phi4:14b group (analytical reasoning) ---
cfo:
model: "phi4:14b"
provider: ollama
temperature: 0.3
order: 1
investor:
model: "phi4:14b"
provider: ollama
temperature: 0.6
order: 2
# --- deepseek-r1:7b (technical reasoning) ---
cto:
model: "deepseek-r1:7b"
provider: ollama
temperature: 0.4
order: 3
# --- qwen2.5:7b (creative) ---
cmo:
model: "qwen2.5:7b"
provider: ollama
temperature: 0.7
order: 4
# --- mistral:7b-instruct (structured, practical) ---
coo:
model: "mistral:7b-instruct"
provider: ollama
temperature: 0.4
order: 5
legal:
model: "mistral:7b-instruct"
provider: ollama
temperature: 0.2
order: 6
# --- dolphin-llama3:8b (uncensored, contrarian) ---
wildcard:
model: "dolphin-llama3:8b"
provider: ollama
temperature: 0.9
order: 7
# --- Chairman: Synthesizes all responses (Gemini for quality) ---
chairman:
model: "gemini-2.5-flash"
provider: google
temperature: 0.5
order: 100
# =============================================================================
# DEFAULT SETTINGS
# =============================================================================
defaults:
timeout_secs: 300 # 5 minutes (increase for remote/slow models)
max_tokens: 4096
stream: true
# =============================================================================
# RAG (KNOWLEDGE BASE) CONFIGURATION
# =============================================================================
# Requires: Qdrant running (docker run -p 6333:6333 qdrant/qdrant)
# Ollama with embedding model (ollama pull nomic-embed-text)
rag:
qdrant_url: "http://localhost:6333"
collection: "mamf_docs"
embedding_model: "nomic-embed-text"
chunk_size: 1000
chunk_overlap: 200
top_k: 5
docs_dir: "./docs"
# =============================================================================
# COMPANY CONTEXT (Optional)
# =============================================================================
# Personalizes advisor responses with your company's context
# company:
# name: "Your Company"
# product: "Your Product"
# stage: "Seed" # Pre-seed, Seed, Series A, etc.
# industry: "Technology"