pub struct AgentConfig {Show 39 fields
pub provider: String,
pub api_key_env: String,
pub default_model: String,
pub theme: String,
pub system_prompt_mode: SystemPromptMode,
pub tool_documentation_mode: ToolDocumentationMode,
pub enable_split_tool_results: bool,
pub todo_planning_mode: bool,
pub ui_surface: UiSurfacePreference,
pub max_conversation_turns: usize,
pub reasoning_effort: ReasoningEffortLevel,
pub verbosity: VerbosityLevel,
pub temperature: f32,
pub refine_temperature: f32,
pub enable_self_review: bool,
pub max_review_passes: usize,
pub refine_prompts_enabled: bool,
pub refine_prompts_max_passes: usize,
pub refine_prompts_model: String,
pub small_model: AgentSmallModelConfig,
pub onboarding: AgentOnboardingConfig,
pub project_doc_max_bytes: usize,
pub instruction_max_bytes: usize,
pub instruction_files: Vec<String>,
pub custom_api_keys: BTreeMap<String, String>,
pub credential_storage_mode: AuthCredentialsStoreMode,
pub checkpointing: AgentCheckpointingConfig,
pub vibe_coding: AgentVibeCodingConfig,
pub max_task_retries: u32,
pub harness: AgentHarnessConfig,
pub include_temporal_context: bool,
pub temporal_context_use_utc: bool,
pub include_working_directory: bool,
pub user_instructions: Option<String>,
pub default_editing_mode: EditingMode,
pub require_plan_confirmation: bool,
pub autonomous_mode: bool,
pub circuit_breaker: CircuitBreakerConfig,
pub open_responses: OpenResponsesConfig,
}Expand description
Agent-wide configuration
Fields§
§provider: StringAI provider for single agent mode (gemini, openai, anthropic, openrouter, xai, zai)
api_key_env: StringEnvironment variable that stores the API key for the active provider
default_model: StringDefault model to use
theme: StringUI theme identifier controlling ANSI styling
system_prompt_mode: SystemPromptModeSystem prompt mode controlling verbosity and token overhead Options: minimal (~500-800 tokens), lightweight (~1-2k), default (~6-7k), specialized (~7-8k) Inspired by pi-coding-agent: modern models often perform well with minimal prompts
tool_documentation_mode: ToolDocumentationModeTool documentation mode controlling token overhead for tool definitions Options: minimal (~800 tokens), progressive (~1.2k), full (~3k current) Progressive: signatures upfront, detailed docs on-demand (recommended) Minimal: signatures only, pi-coding-agent style (power users) Full: all documentation upfront (current behavior, default)
enable_split_tool_results: boolEnable split tool results for massive token savings (Phase 4) When enabled, tools return dual-channel output:
- llm_content: Concise summary sent to LLM (token-optimized, 53-95% reduction)
- ui_content: Rich output displayed to user (full details preserved) Applies to: grep_file, list_files, read_file, run_pty_cmd, write_file, edit_file Default: true (opt-out for compatibility), recommended for production use
todo_planning_mode: boolEnable TODO planning helper mode for structured task management
ui_surface: UiSurfacePreferencePreferred rendering surface for the interactive chat UI (auto, alternate, inline)
max_conversation_turns: usizeMaximum number of conversation turns before auto-termination
reasoning_effort: ReasoningEffortLevelReasoning effort level for models that support it (none, low, medium, high) Applies to: Claude, GPT-5, GPT-5.1, Gemini, Qwen3, DeepSeek with reasoning capability
verbosity: VerbosityLevelVerbosity level for output text (low, medium, high) Applies to: GPT-5.1 and other models that support verbosity control
temperature: f32Temperature for main LLM responses (0.0-1.0) Lower values = more deterministic, higher values = more creative Recommended: 0.7 for balanced creativity and consistency Range: 0.0 (deterministic) to 1.0 (maximum randomness)
refine_temperature: f32Temperature for prompt refinement (0.0-1.0, default: 0.3) Lower values ensure prompt refinement is more deterministic/consistent Keep lower than main temperature for stable prompt improvement
enable_self_review: boolEnable an extra self-review pass to refine final responses
max_review_passes: usizeMaximum number of self-review passes
refine_prompts_enabled: boolEnable prompt refinement pass before sending to LLM
refine_prompts_max_passes: usizeMax refinement passes for prompt writing
refine_prompts_model: StringOptional model override for the refiner (empty = auto pick efficient sibling)
small_model: AgentSmallModelConfigSmall/lightweight model configuration for efficient operations Used for tasks like large file reads, parsing, git history, conversation summarization Typically 70-80% cheaper than main model; ~50% of VT Code’s calls use this tier
onboarding: AgentOnboardingConfigSession onboarding and welcome message configuration
project_doc_max_bytes: usizeMaximum bytes of AGENTS.md content to load from project hierarchy
instruction_max_bytes: usizeMaximum bytes of instruction content to load from AGENTS.md hierarchy
instruction_files: Vec<String>Additional instruction files or globs to merge into the hierarchy
custom_api_keys: BTreeMap<String, String>Provider-specific API keys captured from interactive configuration flows
credential_storage_mode: AuthCredentialsStoreModePreferred storage backend for credentials (OAuth tokens, API keys, etc.)
keyring: Use OS-specific secure storage (macOS Keychain, Windows Credential Manager, Linux Secret Service). This is the default as it’s the most secure.file: Use AES-256-GCM encrypted file with machine-derived keyauto: Try keyring first, fall back to file if unavailable
checkpointing: AgentCheckpointingConfigCheckpointing configuration for automatic turn snapshots
vibe_coding: AgentVibeCodingConfigVibe coding configuration for lazy or vague request support
max_task_retries: u32Maximum number of retries for agent task execution (default: 2) When an agent task fails due to retryable errors (timeout, network, 503, etc.), it will be retried up to this many times with exponential backoff
harness: AgentHarnessConfigHarness configuration for turn-level budgets, telemetry, and execution limits
include_temporal_context: boolInclude current date/time in system prompt for temporal awareness Helps LLM understand context for time-sensitive tasks (default: true)
temporal_context_use_utc: boolUse UTC instead of local time for temporal context in system prompts
include_working_directory: boolInclude current working directory in system prompt (default: true)
user_instructions: Option<String>Custom instructions provided by the user via configuration to guide agent behavior
default_editing_mode: EditingModeDefault editing mode on startup: “edit” (default) or “plan” Codex-inspired: Encourages structured planning before execution.
require_plan_confirmation: boolRequire user confirmation before executing a plan generated in plan mode When true, exiting plan mode shows the implementation blueprint and requires explicit user approval before enabling edit tools.
autonomous_mode: boolEnable autonomous mode - auto-approve safe tools with reduced HITL prompts When true, the agent operates with fewer confirmation prompts for safe tools.
circuit_breaker: CircuitBreakerConfigCircuit breaker configuration for resilient tool execution Controls when the agent should pause and ask for user guidance due to repeated failures
open_responses: OpenResponsesConfigOpen Responses specification compliance configuration Enables vendor-neutral LLM API format for interoperable workflows
Implementations§
Source§impl AgentConfig
impl AgentConfig
Sourcepub fn validate_llm_params(&self) -> Result<(), String>
pub fn validate_llm_params(&self) -> Result<(), String>
Validate LLM generation parameters
Trait Implementations§
Source§impl Clone for AgentConfig
impl Clone for AgentConfig
Source§fn clone(&self) -> AgentConfig
fn clone(&self) -> AgentConfig
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source. Read moreSource§impl Debug for AgentConfig
impl Debug for AgentConfig
Source§impl Default for AgentConfig
impl Default for AgentConfig
Source§impl<'de> Deserialize<'de> for AgentConfig
impl<'de> Deserialize<'de> for AgentConfig
Source§fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
Source§impl JsonSchema for AgentConfig
impl JsonSchema for AgentConfig
Source§fn schema_id() -> Cow<'static, str>
fn schema_id() -> Cow<'static, str>
Source§fn json_schema(generator: &mut SchemaGenerator) -> Schema
fn json_schema(generator: &mut SchemaGenerator) -> Schema
Source§fn inline_schema() -> bool
fn inline_schema() -> bool
$ref keyword. Read moreAuto Trait Implementations§
impl Freeze for AgentConfig
impl RefUnwindSafe for AgentConfig
impl Send for AgentConfig
impl Sync for AgentConfig
impl Unpin for AgentConfig
impl UnsafeUnpin for AgentConfig
impl UnwindSafe for AgentConfig
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<T> Instrument for T
impl<T> Instrument for T
Source§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
Source§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read more