pub struct AgentConfig {Show 36 fields
pub provider: String,
pub api_key_env: String,
pub default_model: String,
pub theme: String,
pub system_prompt_mode: SystemPromptMode,
pub tool_documentation_mode: ToolDocumentationMode,
pub enable_split_tool_results: bool,
pub todo_planning_mode: bool,
pub ui_surface: UiSurfacePreference,
pub max_conversation_turns: usize,
pub reasoning_effort: ReasoningEffortLevel,
pub verbosity: VerbosityLevel,
pub temperature: f32,
pub refine_temperature: f32,
pub enable_self_review: bool,
pub max_review_passes: usize,
pub refine_prompts_enabled: bool,
pub refine_prompts_max_passes: usize,
pub refine_prompts_model: String,
pub small_model: AgentSmallModelConfig,
pub onboarding: AgentOnboardingConfig,
pub project_doc_max_bytes: usize,
pub instruction_max_bytes: usize,
pub instruction_files: Vec<String>,
pub custom_prompts: AgentCustomPromptsConfig,
pub custom_slash_commands: AgentCustomSlashCommandsConfig,
pub custom_api_keys: BTreeMap<String, String>,
pub checkpointing: AgentCheckpointingConfig,
pub vibe_coding: AgentVibeCodingConfig,
pub max_task_retries: u32,
pub include_temporal_context: bool,
pub temporal_context_use_utc: bool,
pub include_working_directory: bool,
pub user_instructions: Option<String>,
pub default_editing_mode: EditingMode,
pub require_plan_confirmation: bool,
}Expand description
Agent-wide configuration
Fields§
§provider: StringAI provider for single agent mode (gemini, openai, anthropic, openrouter, xai, zai)
api_key_env: StringEnvironment variable that stores the API key for the active provider
default_model: StringDefault model to use
theme: StringUI theme identifier controlling ANSI styling
system_prompt_mode: SystemPromptModeSystem prompt mode controlling verbosity and token overhead Options: minimal (~500-800 tokens), lightweight (~1-2k), default (~6-7k), specialized (~7-8k) Inspired by pi-coding-agent: modern models often perform well with minimal prompts
tool_documentation_mode: ToolDocumentationModeTool documentation mode controlling token overhead for tool definitions Options: minimal (~800 tokens), progressive (~1.2k), full (~3k current) Progressive: signatures upfront, detailed docs on-demand (recommended) Minimal: signatures only, pi-coding-agent style (power users) Full: all documentation upfront (current behavior, default)
enable_split_tool_results: boolEnable split tool results for massive token savings (Phase 4) When enabled, tools return dual-channel output:
- llm_content: Concise summary sent to LLM (token-optimized, 53-95% reduction)
- ui_content: Rich output displayed to user (full details preserved) Applies to: grep_file, list_files, read_file, run_pty_cmd, write_file, edit_file Default: true (opt-out for compatibility), recommended for production use
todo_planning_mode: bool§ui_surface: UiSurfacePreferencePreferred rendering surface for the interactive chat UI (auto, alternate, inline)
max_conversation_turns: usizeMaximum number of conversation turns before auto-termination
reasoning_effort: ReasoningEffortLevelReasoning effort level for models that support it (none, low, medium, high) Applies to: Claude, GPT-5, GPT-5.1, Gemini, Qwen3, DeepSeek with reasoning capability
verbosity: VerbosityLevelVerbosity level for output text (low, medium, high) Applies to: GPT-5.1 and other models that support verbosity control
temperature: f32Temperature for main LLM responses (0.0-1.0) Lower values = more deterministic, higher values = more creative Recommended: 0.7 for balanced creativity and consistency Range: 0.0 (deterministic) to 1.0 (maximum randomness)
refine_temperature: f32Temperature for prompt refinement (0.0-1.0, default: 0.3) Lower values ensure prompt refinement is more deterministic/consistent Keep lower than main temperature for stable prompt improvement
enable_self_review: boolEnable an extra self-review pass to refine final responses
max_review_passes: usizeMaximum number of self-review passes
refine_prompts_enabled: boolEnable prompt refinement pass before sending to LLM
refine_prompts_max_passes: usizeMax refinement passes for prompt writing
refine_prompts_model: StringOptional model override for the refiner (empty = auto pick efficient sibling)
small_model: AgentSmallModelConfigSmall/lightweight model configuration for efficient operations Used for tasks like large file reads, parsing, git history, conversation summarization Typically 70-80% cheaper than main model; ~50% of VT Code’s calls use this tier
onboarding: AgentOnboardingConfigSession onboarding and welcome message configuration
project_doc_max_bytes: usizeMaximum bytes of AGENTS.md content to load from project hierarchy
instruction_max_bytes: usizeMaximum bytes of instruction content to load from AGENTS.md hierarchy
instruction_files: Vec<String>Additional instruction files or globs to merge into the hierarchy
custom_prompts: AgentCustomPromptsConfigCustom prompt configuration for slash command shortcuts
custom_slash_commands: AgentCustomSlashCommandsConfigConfiguration for custom slash commands
custom_api_keys: BTreeMap<String, String>Provider-specific API keys captured from interactive configuration flows
checkpointing: AgentCheckpointingConfigCheckpointing configuration for automatic turn snapshots
vibe_coding: AgentVibeCodingConfigVibe coding configuration for lazy/vague request support
max_task_retries: u32Maximum number of retries for agent task execution (default: 2) When an agent task fails due to retryable errors (timeout, network, 503, etc.), it will be retried up to this many times with exponential backoff
include_temporal_context: boolInclude current date/time in system prompt for temporal awareness Helps LLM understand context for time-sensitive tasks (default: true)
temporal_context_use_utc: boolUse UTC instead of local time for temporal context (default: false)
include_working_directory: boolInclude current working directory in system prompt (default: true)
user_instructions: Option<String>Custom instructions provided by the user via configuration
default_editing_mode: EditingModeDefault editing mode: “edit” (default) or “plan” In “plan” mode, the agent is read-only and produces implementation plans. In “edit” mode, the agent can modify files and execute commands. Toggle with Shift+Tab or /plan command during a session. Codex-inspired: Encourages structured planning before execution.
require_plan_confirmation: boolRequire user confirmation before executing a plan (HITL pattern) When true, exiting plan mode shows the implementation blueprint and requires explicit user approval before enabling edit tools. Options in confirmation dialog: Execute, Edit Plan, Cancel
Implementations§
Source§impl AgentConfig
impl AgentConfig
Sourcepub fn validate_llm_params(&self) -> Result<(), String>
pub fn validate_llm_params(&self) -> Result<(), String>
Validate LLM generation parameters
Trait Implementations§
Source§impl Clone for AgentConfig
impl Clone for AgentConfig
Source§fn clone(&self) -> AgentConfig
fn clone(&self) -> AgentConfig
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source. Read moreSource§impl Debug for AgentConfig
impl Debug for AgentConfig
Source§impl Default for AgentConfig
impl Default for AgentConfig
Source§impl<'de> Deserialize<'de> for AgentConfig
impl<'de> Deserialize<'de> for AgentConfig
Source§fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
Source§impl JsonSchema for AgentConfig
impl JsonSchema for AgentConfig
Source§fn schema_name() -> String
fn schema_name() -> String
Source§fn schema_id() -> Cow<'static, str>
fn schema_id() -> Cow<'static, str>
Source§fn json_schema(generator: &mut SchemaGenerator) -> Schema
fn json_schema(generator: &mut SchemaGenerator) -> Schema
Source§fn is_referenceable() -> bool
fn is_referenceable() -> bool
$ref keyword. Read more