# Swink Agent — Environment Variables
#
# Copy to .env and fill in the values for the providers you want to test.
# The TUI selects the first available provider in priority order:
# 1. Custom Proxy (LLM_BASE_URL set)
# 2. OpenAI (OPENAI_API_KEY set)
# 3. Anthropic (ANTHROPIC_API_KEY set)
# 4. Local (on-device inference, when `local` feature is enabled)
# 5. Ollama (always available as fallback)
#
# Keys can also be stored in the OS keychain via the first-run wizard
# or the #key command inside the TUI.
# ── Live Tests ──────────────────────────────────────────────────────
# Set these keys to run live adapter tests:
# cargo test -p swink-agent-adapters -- --ignored
# ANTHROPIC_API_KEY, OPENAI_API_KEY, GEMINI_API_KEY, and the provider-specific
# keys below are also used by examples that opt into those providers.
# ── Anthropic ────────────────────────────────────────────────────────
ANTHROPIC_API_KEY=sk-ant-...
# ANTHROPIC_BASE_URL=https://api.anthropic.com
# ANTHROPIC_MODEL=claude-sonnet-4-6
# ── OpenAI ───────────────────────────────────────────────────────────
OPENAI_API_KEY=sk-...
# OPENAI_BASE_URL=https://api.openai.com
# OPENAI_MODEL=gpt-4o
# ── Google Gemini ─────────────────────────────────────────────────────
GEMINI_API_KEY=AIza...
# GEMINI_BASE_URL=https://generativelanguage.googleapis.com
# GEMINI_MODEL=gemini-3-flash-preview
# ── Azure OpenAI / Azure AI Foundry ───────────────────────────────────
AZURE_API_KEY=...
# Azure OpenAI example:
# AZURE_BASE_URL=https://your-resource.openai.azure.com/openai/v1
# Azure AI Foundry example:
# AZURE_BASE_URL=https://your-project.services.ai.azure.com/openai/v1
# AZURE_MODEL=gpt-5.4
# ── xAI / Grok ────────────────────────────────────────────────────────
XAI_API_KEY=...
# XAI_BASE_URL=https://api.x.ai
# XAI_MODEL=grok-3
# ── Mistral ───────────────────────────────────────────────────────────
MISTRAL_API_KEY=...
# MISTRAL_BASE_URL=https://api.mistral.ai
# MISTRAL_MODEL=mistral-medium-latest
# ── AWS Bedrock ───────────────────────────────────────────────────────
AWS_ACCESS_KEY_ID=...
AWS_SECRET_ACCESS_KEY=...
# AWS_SESSION_TOKEN=...
AWS_REGION=us-east-1
# BEDROCK_MODEL=amazon.nova-pro-v1:0
# ── Custom SSE Proxy ─────────────────────────────────────────────────
# Set LLM_BASE_URL to enable proxy mode (highest priority).
# LLM_BASE_URL=http://localhost:8080
# LLM_API_KEY=your-proxy-bearer-token
# LLM_MODEL=claude-sonnet-4-6
# ── Ollama (local, no key required) ──────────────────────────────────
# OLLAMA_HOST=http://localhost:11434
# OLLAMA_MODEL=llama3.2
# ── Local LLM (swink-agent-local-llm) ────────────────────────────────
# Override the HuggingFace model cache directory (default: ~/.cache/huggingface/hub/).
# Applies to hf-hub downloads for GGUF model files.
# HF_HUB_CACHE=E:\models
# Optional: HuggingFace token for gated models (e.g. gemma-embedding-300m).
# HF_TOKEN=hf_...
# Model repo / file overrides (defaults come from the model catalog):
# LOCAL_MODEL_REPO=unsloth/SmolLM3-3B-GGUF
# LOCAL_MODEL_FILE=SmolLM3-3B-Q4_K_M.gguf
# LOCAL_GPU_LAYERS=0
# LOCAL_CONTEXT_LENGTH=8192
# Embedding model overrides (used by memory/RAG features):
# LOCAL_EMBED_REPO=Snowflake/snowflake-arctic-embed-xs
# LOCAL_EMBED_FILE=snowflake-arctic-embed-xs-F32.gguf
# LOCAL_EMBED_DIMS=384
# ── System Prompt (TUI only) ────────────────────────────────────────
# Used by the TUI binary, not the core library. The core library accepts
# the system prompt as a parameter to AgentOptions::new().
# Resolution order: explicit parameter > env var > tui.toml > default.
# LLM_SYSTEM_PROMPT=You are a helpful assistant.