pub struct AgentConfig {Show 26 fields
pub provider: String,
pub api_key_env: String,
pub default_model: String,
pub theme: String,
pub todo_planning_mode: bool,
pub ui_surface: UiSurfacePreference,
pub max_conversation_turns: usize,
pub reasoning_effort: ReasoningEffortLevel,
pub verbosity: VerbosityLevel,
pub temperature: f32,
pub max_tokens: u32,
pub refine_temperature: f32,
pub refine_max_tokens: u32,
pub enable_self_review: bool,
pub max_review_passes: usize,
pub refine_prompts_enabled: bool,
pub refine_prompts_max_passes: usize,
pub refine_prompts_model: String,
pub small_model: AgentSmallModelConfig,
pub onboarding: AgentOnboardingConfig,
pub project_doc_max_bytes: usize,
pub instruction_max_bytes: usize,
pub instruction_files: Vec<String>,
pub custom_prompts: AgentCustomPromptsConfig,
pub custom_api_keys: BTreeMap<String, String>,
pub checkpointing: AgentCheckpointingConfig,
}Expand description
Agent-wide configuration
Fields§
§provider: StringAI provider for single agent mode (gemini, openai, anthropic, openrouter, xai, zai)
api_key_env: StringEnvironment variable that stores the API key for the active provider
default_model: StringDefault model to use
theme: StringUI theme identifier controlling ANSI styling
todo_planning_mode: boolEnable TODO planning workflow integrations (update_plan tool, onboarding hints)
ui_surface: UiSurfacePreferencePreferred rendering surface for the interactive chat UI (auto, alternate, inline)
max_conversation_turns: usizeMaximum number of conversation turns before auto-termination
reasoning_effort: ReasoningEffortLevelReasoning effort level for models that support it (none, low, medium, high) Applies to: Claude, GPT-5, GPT-5.1, Gemini, Qwen3, DeepSeek with reasoning capability
verbosity: VerbosityLevelVerbosity level for output text (low, medium, high) Applies to: GPT-5.1 and other models that support verbosity control
temperature: f32Temperature for main LLM responses (0.0-1.0) Lower values = more deterministic, higher values = more creative Recommended: 0.7 for balanced creativity and consistency Range: 0.0 (deterministic) to 1.0 (maximum randomness)
max_tokens: u32Maximum tokens for main LLM generation responses (default: 2000) Adjust based on model context window size:
- 2000 for standard tasks
- 16384 for models with 128k context
- 32768 for models with 256k context
refine_temperature: f32Temperature for prompt refinement (0.0-1.0, default: 0.3) Lower values ensure prompt refinement is more deterministic/consistent Keep lower than main temperature for stable prompt improvement
refine_max_tokens: u32Maximum tokens for prompt refinement (default: 800) Prompts are typically shorter, so 800 tokens is usually sufficient
enable_self_review: boolEnable an extra self-review pass to refine final responses
max_review_passes: usizeMaximum number of self-review passes
refine_prompts_enabled: boolEnable prompt refinement pass before sending to LLM
refine_prompts_max_passes: usizeMax refinement passes for prompt writing
refine_prompts_model: StringOptional model override for the refiner (empty = auto pick efficient sibling)
small_model: AgentSmallModelConfigSmall/lightweight model configuration for efficient operations Used for tasks like large file reads, parsing, git history, conversation summarization Typically 70-80% cheaper than main model; ~50% of Claude Code’s calls use this tier
onboarding: AgentOnboardingConfigSession onboarding and welcome message configuration
project_doc_max_bytes: usizeMaximum bytes of AGENTS.md content to load from project hierarchy
instruction_max_bytes: usizeMaximum bytes of instruction content to load from AGENTS.md hierarchy
instruction_files: Vec<String>Additional instruction files or globs to merge into the hierarchy
custom_prompts: AgentCustomPromptsConfigCustom prompt configuration for slash command shortcuts
custom_api_keys: BTreeMap<String, String>Provider-specific API keys captured from interactive configuration flows
checkpointing: AgentCheckpointingConfigCheckpointing configuration for automatic turn snapshots
Implementations§
Source§impl AgentConfig
impl AgentConfig
Sourcepub fn validate_llm_params(&self) -> Result<(), String>
pub fn validate_llm_params(&self) -> Result<(), String>
Validate LLM generation parameters
Trait Implementations§
Source§impl Clone for AgentConfig
impl Clone for AgentConfig
Source§fn clone(&self) -> AgentConfig
fn clone(&self) -> AgentConfig
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source. Read moreSource§impl Debug for AgentConfig
impl Debug for AgentConfig
Source§impl Default for AgentConfig
impl Default for AgentConfig
Source§impl<'de> Deserialize<'de> for AgentConfig
impl<'de> Deserialize<'de> for AgentConfig
Source§fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
Source§impl JsonSchema for AgentConfig
impl JsonSchema for AgentConfig
Source§fn schema_name() -> String
fn schema_name() -> String
Source§fn schema_id() -> Cow<'static, str>
fn schema_id() -> Cow<'static, str>
Source§fn json_schema(generator: &mut SchemaGenerator) -> Schema
fn json_schema(generator: &mut SchemaGenerator) -> Schema
Source§fn is_referenceable() -> bool
fn is_referenceable() -> bool
$ref keyword. Read more