1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
# LocalGPT Configuration
# Copy to ~/.localgpt/config.toml
[]
# Default model to use for chat
#
# Format: "provider/model-id" (OpenClaw-compatible)
#
# Anthropic API (recommended, requires ANTHROPIC_API_KEY):
# - "anthropic/claude-opus-4-5" (recommended)
# - "anthropic/claude-sonnet-4-5"
#
# Short aliases (auto-resolved to latest 4.5 models):
# - "opus" → anthropic/claude-opus-4-5
# - "sonnet" → anthropic/claude-sonnet-4-5
# - "gpt" → openai/gpt-4o
#
# OpenAI API (requires OPENAI_API_KEY):
# - "openai/gpt-4o", "openai/gpt-4o-mini", "openai/gpt-4-turbo"
#
# GLM / Z.AI API (requires GLM API key):
# - "glm/glm-4.7" (or use alias "glm")
#
# Claude CLI (local, no API key needed):
# - "claude-cli/opus", "claude-cli/sonnet", "claude-cli/haiku"
#
# Ollama (local):
# - "ollama/llama3", "ollama/mistral", etc.
#
= "claude-cli/opus"
# Context window size (in tokens)
= 128000
# Reserve tokens for response
= 8000
# Anthropic configuration (REQUIRED for default model)
# Get your API key at: https://console.anthropic.com/
[]
= "${ANTHROPIC_API_KEY}" # Set: export ANTHROPIC_API_KEY="sk-ant-..."
= "https://api.anthropic.com"
# OpenAI configuration (optional)
# [providers.openai]
# api_key = "${OPENAI_API_KEY}"
# base_url = "https://api.openai.com/v1"
# Ollama configuration (for local models)
# [providers.ollama]
# endpoint = "http://localhost:11434"
# model = "llama3"
# Claude CLI configuration (uses local claude CLI command)
# Requires claude CLI to be installed: https://github.com/anthropics/claude-code
# [providers.claude_cli]
# command = "claude"
# model = "opus" # opus, sonnet, or haiku
# GLM / Z.AI configuration (optional)
# Get your API key at: https://z.ai/manage-apikey/apikey-list
# [providers.glm]
# api_key = "${GLM_API_KEY}"
# base_url = "https://api.z.ai/api/coding/paas/v4"
[]
# Enable automatic heartbeat
= true
# How often to check HEARTBEAT.md
= "30m"
# Only run during these hours (optional)
# [heartbeat.active_hours]
# start = "09:00"
# end = "22:00"
[]
# Where to store memory files
= "~/.localgpt/workspace"
# Embedding provider for semantic search: "local" (default), "gguf", "openai", or "none"
# - "local": Uses FastEmbed/ONNX (all-MiniLM-L6-v2), no API key needed
# - "gguf": Uses llama.cpp for GGUF models (requires --features gguf build)
# - "openai": Uses OpenAI embeddings (requires providers.openai config)
# - "none": FTS-only search, no vector embeddings
= "local"
# Embedding model for local provider (FastEmbed):
#
# English:
# - all-MiniLM-L6-v2 (default, ~80MB, fastest)
# - bge-base-en-v1.5 (~430MB, higher quality)
#
# Chinese:
# - bge-small-zh-v1.5 (~95MB, Chinese-specific)
#
# Multilingual (Chinese, Japanese, Korean, 100+ languages):
# - multilingual-e5-small (~470MB, compact)
# - multilingual-e5-base (~1.1GB, recommended for Chinese)
# - bge-m3 (~2.2GB, best quality)
#
# For OpenAI provider: text-embedding-3-small, text-embedding-3-large
#
# For GGUF provider (requires --features gguf):
# - embeddinggemma-300M-Q8_0.gguf (~320MB, 1024 dims, multilingual)
# - nomic-embed-text-v1.5.Q8_0.gguf (~270MB, 768 dims)
# Note: Download models from HuggingFace and place in embedding_cache_dir
= "all-MiniLM-L6-v2"
# Cache directory for local embedding models (ONNX format)
# Models are downloaded from HuggingFace on first use
# Default: ~/.cache/localgpt/models
# Can also be set via FASTEMBED_CACHE_DIR environment variable
# embedding_cache_dir = "~/.cache/localgpt/models"
# Chunk size for indexing (tokens)
= 400
# Overlap between chunks (tokens)
= 80
[]
# Enable HTTP server
= true
# Port to listen on
= 31327
# Bind address (127.0.0.1 for localhost only)
= "127.0.0.1"
# Telegram bot (optional)
# Create a bot via @BotFather on Telegram to get an API token
# [telegram]
# enabled = true
# api_token = "${TELEGRAM_BOT_TOKEN}"
[]
# Abort on tamper or suspicious content in LocalGPT.md (default: false)
# strict_policy = false
# Skip loading the LocalGPT.md workspace security policy (default: false)
# disable_policy = false
# Skip the hardcoded security suffix injected at end of context (default: false)
# disable_suffix = false
[]
# Log level: trace, debug, info, warn, error
= "info"
# Log file path
= "~/.localgpt/logs/agent.log"