1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
# VT Code Minimal Configuration File
# Essential configuration options only
[]
# Primary LLM provider to use (e.g., "openai", "gemini", "anthropic", "openrouter")
= "ollama"
# Environment variable containing the API key for the provider
= "OLLAMA_API_KEY"
# Default model to use when no specific model is specified
= "nemotron-3-nano:30b-cloud"
# Visual theme for the terminal interface
= "vitesse-dark"
# Temperature for main LLM responses (0.0-1.0)
# Lower values = more deterministic, higher values = more creative
= 0.7
# UI surface to use ("auto", "alternate", "inline")
= "auto"
# Maximum number of conversation turns before rotating context (affects memory usage)
= 80
# Reasoning effort level ("none", "minimal", "low", "medium", "high") - affects model usage and response speed
= "medium"
# Tool security configuration
[]
# Default policy when no specific policy is defined ("allow", "prompt", "deny")
# "allow" - Execute without confirmation
# "prompt" - Ask for confirmation
# "deny" - Block the tool
= "prompt"
# Maximum number of tool loops allowed per turn (prevents infinite loops)
= 50
# Security configuration
[]
# Require human confirmation for potentially dangerous actions
= true
# UI configuration
[]
# Tool output display mode
= "compact"
# Maximum number of lines to display in tool output (prevents transcript flooding)
= 50
# Status line configuration
[]
= "auto"
# PTY (Pseudo Terminal) configuration
[]
= true
= 24
= 120
= 10
= 3600
# Timeouts
[]
# Maximum duration for standard (non-PTY) tools in seconds
= 180
# Maximum duration for PTY-backed commands in seconds
= 300
# Maximum duration for streaming API responses in seconds
= 600