1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
# LocalGPT Configuration
# Copy to ~/.localgpt/config.toml
[]
# Default model to use for chat
#
# Format: "provider/model-id" (OpenClaw-compatible)
#
# Anthropic API (recommended, requires ANTHROPIC_API_KEY):
# - "anthropic/claude-opus-4-5" (recommended)
# - "anthropic/claude-sonnet-4-5"
#
# Short aliases (auto-resolved to latest 4.5 models):
# - "opus" → anthropic/claude-opus-4-5
# - "sonnet" → anthropic/claude-sonnet-4-5
# - "gpt" → openai/gpt-4o
#
# OpenAI API (requires OPENAI_API_KEY):
# - "openai/gpt-4o", "openai/gpt-4o-mini", "openai/gpt-4-turbo"
#
# Claude CLI (local, no API key needed):
# - "claude-cli/opus", "claude-cli/sonnet", "claude-cli/haiku"
#
# Ollama (local):
# - "ollama/llama3", "ollama/mistral", etc.
#
= "claude-cli/opus"
# Context window size (in tokens)
= 128000
# Reserve tokens for response
= 8000
# Anthropic configuration (REQUIRED for default model)
# Get your API key at: https://console.anthropic.com/
[]
= "${ANTHROPIC_API_KEY}" # Set: export ANTHROPIC_API_KEY="sk-ant-..."
= "https://api.anthropic.com"
# OpenAI configuration (optional)
# [providers.openai]
# api_key = "${OPENAI_API_KEY}"
# base_url = "https://api.openai.com/v1"
# Ollama configuration (for local models)
# [providers.ollama]
# endpoint = "http://localhost:11434"
# model = "llama3"
# Claude CLI configuration (uses local claude CLI command)
# Requires claude CLI to be installed: https://github.com/anthropics/claude-code
# [providers.claude_cli]
# command = "claude"
# model = "opus" # opus, sonnet, or haiku
[]
# Enable automatic heartbeat
= true
# How often to check HEARTBEAT.md
= "30m"
# Only run during these hours (optional)
# [heartbeat.active_hours]
# start = "09:00"
# end = "22:00"
[]
# Where to store memory files
= "~/.localgpt/workspace"
# Embedding provider for semantic search: "local" (default), "openai", or "none"
# - "local": Uses FastEmbed (all-MiniLM-L6-v2), no API key needed
# - "openai": Uses OpenAI embeddings (requires providers.openai config)
# - "none": FTS-only search, no vector embeddings
= "local"
# Embedding model for local provider (FastEmbed):
#
# English:
# - all-MiniLM-L6-v2 (default, ~80MB, fastest)
# - bge-base-en-v1.5 (~430MB, higher quality)
#
# Chinese:
# - bge-small-zh-v1.5 (~95MB, Chinese-specific)
#
# Multilingual (Chinese, Japanese, Korean, 100+ languages):
# - multilingual-e5-small (~470MB, compact)
# - multilingual-e5-base (~1.1GB, recommended for Chinese)
# - bge-m3 (~2.2GB, best quality)
#
# For OpenAI provider: text-embedding-3-small, text-embedding-3-large
= "all-MiniLM-L6-v2"
# Cache directory for local embedding models (ONNX format)
# Models are downloaded from HuggingFace on first use
# Default: ~/.cache/localgpt/models
# Can also be set via FASTEMBED_CACHE_DIR environment variable
# embedding_cache_dir = "~/.cache/localgpt/models"
# Chunk size for indexing (tokens)
= 400
# Overlap between chunks (tokens)
= 80
[]
# Enable HTTP server
= true
# Port to listen on
= 31327
# Bind address (127.0.0.1 for localhost only)
= "127.0.0.1"
[]
# Log level: trace, debug, info, warn, error
= "info"
# Log file path
= "~/.localgpt/logs/agent.log"