# VT Code Minimal Configuration File
# Essential configuration options only
[agent]
# Primary LLM provider to use (e.g., "openai", "gemini", "anthropic", "openrouter")
provider = "ollama"
# Environment variable containing the API key for the provider
api_key_env = "OLLAMA_API_KEY"
# Default model to use when no specific model is specified
default_model = "nemotron-3-nano:30b-cloud"
# Visual theme for the terminal interface
theme = "vitesse-dark"
# Temperature for main LLM responses (0.0-1.0)
# Lower values = more deterministic, higher values = more creative
temperature = 0.7
# UI surface to use ("auto", "alternate", "inline")
ui_surface = "auto"
# Maximum number of conversation turns before rotating context (affects memory usage)
max_conversation_turns = 80
[tools]
# Default policy when no specific policy is defined ("allow", "prompt", "deny")
default_policy = "prompt"
# Maximum number of tool loops allowed per turn (prevents infinite loops)
max_tool_loops = 50
# Security configuration
[security]
# Require human confirmation for potentially dangerous actions
human_in_the_loop = true
# UI configuration
[ui]
# Tool output display mode
tool_output_mode = "compact"
# Maximum number of lines to display in tool output (prevents transcript flooding)
tool_output_max_lines = 50
# Status line configuration
[ui.status_line]
mode = "auto"
# PTY (Pseudo Terminal) configuration
[pty]
enabled = true
default_rows = 24
default_cols = 120
max_sessions = 10
command_timeout_seconds = 3600
# Timeouts
[timeouts]
# Maximum duration for standard (non-PTY) tools in seconds
default_ceiling_seconds = 180
# Maximum duration for PTY-backed commands in seconds
pty_ceiling_seconds = 300
# Maximum duration for streaming API responses in seconds
streaming_ceiling_seconds = 600