Skip to main content

vtcode_config/core/
agent.rs

1use crate::constants::{defaults, llm_generation, prompt_budget};
2use crate::types::{
3    ReasoningEffortLevel, SystemPromptMode, ToolDocumentationMode, UiSurfacePreference,
4    VerbosityLevel,
5};
6use serde::{Deserialize, Serialize};
7use std::collections::BTreeMap;
8
9const DEFAULT_CHECKPOINTS_ENABLED: bool = true;
10const DEFAULT_MAX_SNAPSHOTS: usize = 50;
11const DEFAULT_MAX_AGE_DAYS: u64 = 30;
12
13/// Agent-wide configuration
14#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
15#[derive(Debug, Clone, Deserialize, Serialize)]
16pub struct AgentConfig {
17    /// AI provider for single agent mode (gemini, openai, anthropic, openrouter, zai)
18    #[serde(default = "default_provider")]
19    pub provider: String,
20
21    /// Environment variable that stores the API key for the active provider
22    #[serde(default = "default_api_key_env")]
23    pub api_key_env: String,
24
25    /// Default model to use
26    #[serde(default = "default_model")]
27    pub default_model: String,
28
29    /// UI theme identifier controlling ANSI styling
30    #[serde(default = "default_theme")]
31    pub theme: String,
32
33    /// System prompt mode controlling prompt verbosity and token overhead.
34    /// Options target lean base prompts: minimal (~150-250 tokens), lightweight/default
35    /// (~250-350 tokens), specialized (~350-500 tokens) before dynamic runtime addenda.
36    #[serde(default)]
37    pub system_prompt_mode: SystemPromptMode,
38
39    /// Tool documentation mode controlling token overhead for tool definitions
40    /// Options: minimal (~800 tokens), progressive (~1.2k), full (~3k current)
41    /// Progressive: signatures upfront, detailed docs on-demand (recommended)
42    /// Minimal: signatures only, pi-coding-agent style (power users)
43    /// Full: all documentation upfront (current behavior, default)
44    #[serde(default)]
45    pub tool_documentation_mode: ToolDocumentationMode,
46
47    /// Enable split tool results for massive token savings (Phase 4)
48    /// When enabled, tools return dual-channel output:
49    /// - llm_content: Concise summary sent to LLM (token-optimized, 53-95% reduction)
50    /// - ui_content: Rich output displayed to user (full details preserved)
51    ///   Applies to: unified_search, unified_file, unified_exec
52    ///   Default: true (opt-out for compatibility), recommended for production use
53    #[serde(default = "default_enable_split_tool_results")]
54    pub enable_split_tool_results: bool,
55
56    /// Enable TODO planning helper mode for structured task management
57    #[serde(default = "default_todo_planning_mode")]
58    pub todo_planning_mode: bool,
59
60    /// Preferred rendering surface for the interactive chat UI (auto, alternate, inline)
61    #[serde(default)]
62    pub ui_surface: UiSurfacePreference,
63
64    /// Maximum number of conversation turns before auto-termination
65    #[serde(default = "default_max_conversation_turns")]
66    pub max_conversation_turns: usize,
67
68    /// Reasoning effort level for models that support it (none, minimal, low, medium, high, xhigh)
69    /// Applies to: Claude, GPT-5 family, Gemini, Qwen3, DeepSeek with reasoning capability
70    #[serde(default = "default_reasoning_effort")]
71    pub reasoning_effort: ReasoningEffortLevel,
72
73    /// Verbosity level for output text (low, medium, high)
74    /// Applies to: GPT-5.4-family Responses workflows and other models that support verbosity control
75    #[serde(default = "default_verbosity")]
76    pub verbosity: VerbosityLevel,
77
78    /// Temperature for main LLM responses (0.0-1.0)
79    /// Lower values = more deterministic, higher values = more creative
80    /// Recommended: 0.7 for balanced creativity and consistency
81    /// Range: 0.0 (deterministic) to 1.0 (maximum randomness)
82    #[serde(default = "default_temperature")]
83    pub temperature: f32,
84
85    /// Temperature for prompt refinement (0.0-1.0, default: 0.3)
86    /// Lower values ensure prompt refinement is more deterministic/consistent
87    /// Keep lower than main temperature for stable prompt improvement
88    #[serde(default = "default_refine_temperature")]
89    pub refine_temperature: f32,
90
91    /// Enable an extra self-review pass to refine final responses
92    #[serde(default = "default_enable_self_review")]
93    pub enable_self_review: bool,
94
95    /// Maximum number of self-review passes
96    #[serde(default = "default_max_review_passes")]
97    pub max_review_passes: usize,
98
99    /// Enable prompt refinement pass before sending to LLM
100    #[serde(default = "default_refine_prompts_enabled")]
101    pub refine_prompts_enabled: bool,
102
103    /// Max refinement passes for prompt writing
104    #[serde(default = "default_refine_max_passes")]
105    pub refine_prompts_max_passes: usize,
106
107    /// Optional model override for the refiner (empty = auto pick efficient sibling)
108    #[serde(default)]
109    pub refine_prompts_model: String,
110
111    /// Small/lightweight model configuration for efficient operations
112    /// Used for tasks like large file reads, parsing, git history, conversation summarization
113    /// Typically 70-80% cheaper than main model; ~50% of VT Code's calls use this tier
114    #[serde(default)]
115    pub small_model: AgentSmallModelConfig,
116
117    /// Inline prompt suggestion configuration for the chat composer
118    #[serde(default)]
119    pub prompt_suggestions: AgentPromptSuggestionsConfig,
120
121    /// Session onboarding and welcome message configuration
122    #[serde(default)]
123    pub onboarding: AgentOnboardingConfig,
124
125    /// Maximum bytes of AGENTS.md content to load from project hierarchy
126    #[serde(default = "default_project_doc_max_bytes")]
127    pub project_doc_max_bytes: usize,
128
129    /// Additional filenames to check when AGENTS.md is absent at a directory level.
130    #[serde(default)]
131    pub project_doc_fallback_filenames: Vec<String>,
132
133    /// Maximum bytes of instruction content to load from AGENTS.md hierarchy
134    #[serde(
135        default = "default_instruction_max_bytes",
136        alias = "rule_doc_max_bytes"
137    )]
138    pub instruction_max_bytes: usize,
139
140    /// Additional instruction files or globs to merge into the hierarchy
141    #[serde(default, alias = "instruction_paths", alias = "instructions")]
142    pub instruction_files: Vec<String>,
143
144    /// Instruction files or globs to exclude from AGENTS.md and rules discovery
145    #[serde(default)]
146    pub instruction_excludes: Vec<String>,
147
148    /// Maximum recursive `@path` import depth for instruction and rule files
149    #[serde(default = "default_instruction_import_max_depth")]
150    pub instruction_import_max_depth: usize,
151
152    /// Durable per-repository memory for main sessions
153    #[serde(default)]
154    pub persistent_memory: PersistentMemoryConfig,
155
156    /// Provider-specific API keys captured from interactive configuration flows
157    ///
158    /// Note: Actual API keys are stored securely in the OS keyring.
159    /// This field only tracks which providers have keys stored (for UI/migration purposes).
160    /// The keys themselves are NOT serialized to the config file for security.
161    #[serde(default, skip_serializing)]
162    pub custom_api_keys: BTreeMap<String, String>,
163
164    /// Preferred storage backend for credentials (OAuth tokens, API keys, etc.)
165    ///
166    /// - `keyring`: Use OS-specific secure storage (macOS Keychain, Windows Credential
167    ///   Manager, Linux Secret Service). This is the default as it's the most secure.
168    /// - `file`: Use AES-256-GCM encrypted file with machine-derived key
169    /// - `auto`: Try keyring first, fall back to file if unavailable
170    #[serde(default)]
171    pub credential_storage_mode: crate::auth::AuthCredentialsStoreMode,
172
173    /// Checkpointing configuration for automatic turn snapshots
174    #[serde(default)]
175    pub checkpointing: AgentCheckpointingConfig,
176
177    /// Vibe coding configuration for lazy or vague request support
178    #[serde(default)]
179    pub vibe_coding: AgentVibeCodingConfig,
180
181    /// Maximum number of retries for agent task execution (default: 2)
182    /// When an agent task fails due to retryable errors (timeout, network, 503, etc.),
183    /// it will be retried up to this many times with exponential backoff
184    #[serde(default = "default_max_task_retries")]
185    pub max_task_retries: u32,
186
187    /// Harness configuration for turn-level budgets, telemetry, and execution limits
188    #[serde(default)]
189    pub harness: AgentHarnessConfig,
190
191    /// Experimental Codex app-server sidecar configuration.
192    #[serde(default)]
193    pub codex_app_server: AgentCodexAppServerConfig,
194
195    /// Include current date/time in system prompt for temporal awareness
196    /// Helps LLM understand context for time-sensitive tasks (default: true)
197    #[serde(default = "default_include_temporal_context")]
198    pub include_temporal_context: bool,
199
200    /// Use UTC instead of local time for temporal context in system prompts
201    #[serde(default)]
202    pub temporal_context_use_utc: bool,
203
204    /// Include current working directory in system prompt (default: true)
205    #[serde(default = "default_include_working_directory")]
206    pub include_working_directory: bool,
207
208    /// Controls inclusion of the structured reasoning tag instructions block.
209    ///
210    /// Behavior:
211    /// - `Some(true)`: always include structured reasoning instructions.
212    /// - `Some(false)`: never include structured reasoning instructions.
213    /// - `None` (default): include only for `default` and `specialized` prompt modes.
214    ///
215    /// This keeps lightweight/minimal prompts smaller by default while allowing
216    /// explicit opt-in when users want tag-based reasoning guidance.
217    #[serde(default)]
218    pub include_structured_reasoning_tags: Option<bool>,
219
220    /// Custom instructions provided by the user via configuration to guide agent behavior
221    #[serde(default)]
222    pub user_instructions: Option<String>,
223
224    /// Require user confirmation before executing a plan generated in plan mode
225    /// When true, exiting plan mode shows the implementation blueprint and
226    /// requires explicit user approval before enabling edit tools.
227    #[serde(default = "default_require_plan_confirmation")]
228    pub require_plan_confirmation: bool,
229
230    /// Circuit breaker configuration for resilient tool execution
231    /// Controls when the agent should pause and ask for user guidance due to repeated failures
232    #[serde(default)]
233    pub circuit_breaker: CircuitBreakerConfig,
234
235    /// Open Responses specification compliance configuration
236    /// Enables vendor-neutral LLM API format for interoperable workflows
237    #[serde(default)]
238    pub open_responses: OpenResponsesConfig,
239}
240
241#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
242#[cfg_attr(feature = "schema", schemars(rename_all = "snake_case"))]
243#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize)]
244#[serde(rename_all = "snake_case")]
245pub enum ContinuationPolicy {
246    Off,
247    ExecOnly,
248    #[default]
249    All,
250}
251
252impl ContinuationPolicy {
253    pub fn as_str(&self) -> &'static str {
254        match self {
255            Self::Off => "off",
256            Self::ExecOnly => "exec_only",
257            Self::All => "all",
258        }
259    }
260
261    pub fn parse(value: &str) -> Option<Self> {
262        let normalized = value.trim();
263        if normalized.eq_ignore_ascii_case("off") {
264            Some(Self::Off)
265        } else if normalized.eq_ignore_ascii_case("exec_only")
266            || normalized.eq_ignore_ascii_case("exec-only")
267        {
268            Some(Self::ExecOnly)
269        } else if normalized.eq_ignore_ascii_case("all") {
270            Some(Self::All)
271        } else {
272            None
273        }
274    }
275}
276
277impl<'de> Deserialize<'de> for ContinuationPolicy {
278    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
279    where
280        D: serde::Deserializer<'de>,
281    {
282        let raw = String::deserialize(deserializer)?;
283        Ok(Self::parse(&raw).unwrap_or_default())
284    }
285}
286
287#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
288#[cfg_attr(feature = "schema", schemars(rename_all = "snake_case"))]
289#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize)]
290#[serde(rename_all = "snake_case")]
291pub enum HarnessOrchestrationMode {
292    #[default]
293    Single,
294    PlanBuildEvaluate,
295}
296
297impl HarnessOrchestrationMode {
298    pub fn as_str(&self) -> &'static str {
299        match self {
300            Self::Single => "single",
301            Self::PlanBuildEvaluate => "plan_build_evaluate",
302        }
303    }
304
305    pub fn parse(value: &str) -> Option<Self> {
306        let normalized = value.trim();
307        if normalized.eq_ignore_ascii_case("single") {
308            Some(Self::Single)
309        } else if normalized.eq_ignore_ascii_case("plan_build_evaluate")
310            || normalized.eq_ignore_ascii_case("plan-build-evaluate")
311            || normalized.eq_ignore_ascii_case("planner_generator_evaluator")
312            || normalized.eq_ignore_ascii_case("planner-generator-evaluator")
313        {
314            Some(Self::PlanBuildEvaluate)
315        } else {
316            None
317        }
318    }
319}
320
321impl<'de> Deserialize<'de> for HarnessOrchestrationMode {
322    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
323    where
324        D: serde::Deserializer<'de>,
325    {
326        let raw = String::deserialize(deserializer)?;
327        Ok(Self::parse(&raw).unwrap_or_default())
328    }
329}
330
331#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
332#[derive(Debug, Clone, Deserialize, Serialize)]
333pub struct AgentHarnessConfig {
334    /// Maximum number of tool calls allowed per turn. Set to `0` to disable the cap.
335    #[serde(default = "default_harness_max_tool_calls_per_turn")]
336    pub max_tool_calls_per_turn: usize,
337    /// Maximum wall clock time (seconds) for tool execution in a turn
338    #[serde(default = "default_harness_max_tool_wall_clock_secs")]
339    pub max_tool_wall_clock_secs: u64,
340    /// Maximum retries for retryable tool errors
341    #[serde(default = "default_harness_max_tool_retries")]
342    pub max_tool_retries: u32,
343    /// Enable automatic context compaction when token pressure crosses threshold.
344    ///
345    /// Disabled by default. When disabled, no automatic compaction is triggered.
346    #[serde(default = "default_harness_auto_compaction_enabled")]
347    pub auto_compaction_enabled: bool,
348    /// Optional absolute compact threshold (tokens) for Responses server-side compaction.
349    ///
350    /// When unset, VT Code derives a threshold from the provider context window.
351    #[serde(default)]
352    pub auto_compaction_threshold_tokens: Option<u64>,
353    /// Provider-native tool-result clearing policy.
354    #[serde(default)]
355    pub tool_result_clearing: ToolResultClearingConfig,
356    /// Optional maximum estimated API cost in USD before VT Code stops the session.
357    #[serde(default)]
358    pub max_budget_usd: Option<f64>,
359    /// Controls whether harness-managed continuation loops are enabled.
360    #[serde(default)]
361    pub continuation_policy: ContinuationPolicy,
362    /// Optional JSONL event log path for harness events.
363    /// Defaults to `~/.vtcode/sessions/` when unset.
364    #[serde(default)]
365    pub event_log_path: Option<String>,
366    /// Select the exec/full-auto harness orchestration path.
367    #[serde(default)]
368    pub orchestration_mode: HarnessOrchestrationMode,
369    /// Maximum generator revision rounds after evaluator rejection.
370    #[serde(default = "default_harness_max_revision_rounds")]
371    pub max_revision_rounds: usize,
372}
373
374impl Default for AgentHarnessConfig {
375    fn default() -> Self {
376        Self {
377            max_tool_calls_per_turn: default_harness_max_tool_calls_per_turn(),
378            max_tool_wall_clock_secs: default_harness_max_tool_wall_clock_secs(),
379            max_tool_retries: default_harness_max_tool_retries(),
380            auto_compaction_enabled: default_harness_auto_compaction_enabled(),
381            auto_compaction_threshold_tokens: None,
382            tool_result_clearing: ToolResultClearingConfig::default(),
383            max_budget_usd: None,
384            continuation_policy: ContinuationPolicy::default(),
385            event_log_path: None,
386            orchestration_mode: HarnessOrchestrationMode::default(),
387            max_revision_rounds: default_harness_max_revision_rounds(),
388        }
389    }
390}
391
392#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
393#[derive(Debug, Clone, Deserialize, Serialize)]
394pub struct ToolResultClearingConfig {
395    #[serde(default = "default_tool_result_clearing_enabled")]
396    pub enabled: bool,
397    #[serde(default = "default_tool_result_clearing_trigger_tokens")]
398    pub trigger_tokens: u64,
399    #[serde(default = "default_tool_result_clearing_keep_tool_uses")]
400    pub keep_tool_uses: u32,
401    #[serde(default = "default_tool_result_clearing_clear_at_least_tokens")]
402    pub clear_at_least_tokens: u64,
403    #[serde(default)]
404    pub clear_tool_inputs: bool,
405}
406
407impl Default for ToolResultClearingConfig {
408    fn default() -> Self {
409        Self {
410            enabled: default_tool_result_clearing_enabled(),
411            trigger_tokens: default_tool_result_clearing_trigger_tokens(),
412            keep_tool_uses: default_tool_result_clearing_keep_tool_uses(),
413            clear_at_least_tokens: default_tool_result_clearing_clear_at_least_tokens(),
414            clear_tool_inputs: false,
415        }
416    }
417}
418
419impl ToolResultClearingConfig {
420    pub fn validate(&self) -> Result<(), String> {
421        if self.trigger_tokens == 0 {
422            return Err("tool_result_clearing.trigger_tokens must be greater than 0".to_string());
423        }
424        if self.keep_tool_uses == 0 {
425            return Err("tool_result_clearing.keep_tool_uses must be greater than 0".to_string());
426        }
427        if self.clear_at_least_tokens == 0 {
428            return Err(
429                "tool_result_clearing.clear_at_least_tokens must be greater than 0".to_string(),
430            );
431        }
432        Ok(())
433    }
434}
435
436#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
437#[derive(Debug, Clone, Deserialize, Serialize)]
438pub struct AgentCodexAppServerConfig {
439    /// Executable used to launch the official Codex app-server sidecar.
440    #[serde(default = "default_codex_app_server_command")]
441    pub command: String,
442    /// Arguments passed before VT Code appends `--listen stdio://`.
443    #[serde(default = "default_codex_app_server_args")]
444    pub args: Vec<String>,
445    /// Maximum startup handshake time when launching the sidecar.
446    #[serde(default = "default_codex_app_server_startup_timeout_secs")]
447    pub startup_timeout_secs: u64,
448    /// Enable experimental Codex app-server features such as collaboration modes
449    /// and native review routing.
450    #[serde(default = "default_codex_app_server_experimental_features")]
451    pub experimental_features: bool,
452}
453
454impl Default for AgentCodexAppServerConfig {
455    fn default() -> Self {
456        Self {
457            command: default_codex_app_server_command(),
458            args: default_codex_app_server_args(),
459            startup_timeout_secs: default_codex_app_server_startup_timeout_secs(),
460            experimental_features: default_codex_app_server_experimental_features(),
461        }
462    }
463}
464
465#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
466#[derive(Debug, Clone, Deserialize, Serialize)]
467pub struct CircuitBreakerConfig {
468    /// Enable circuit breaker functionality
469    #[serde(default = "default_circuit_breaker_enabled")]
470    pub enabled: bool,
471
472    /// Number of consecutive failures before opening circuit
473    #[serde(default = "default_failure_threshold")]
474    pub failure_threshold: u32,
475
476    /// Pause and ask user when circuit opens (vs auto-backoff)
477    #[serde(default = "default_pause_on_open")]
478    pub pause_on_open: bool,
479
480    /// Number of open circuits before triggering pause
481    #[serde(default = "default_max_open_circuits")]
482    pub max_open_circuits: usize,
483
484    /// Cooldown period between recovery prompts (seconds)
485    #[serde(default = "default_recovery_cooldown")]
486    pub recovery_cooldown: u64,
487}
488
489impl Default for CircuitBreakerConfig {
490    fn default() -> Self {
491        Self {
492            enabled: default_circuit_breaker_enabled(),
493            failure_threshold: default_failure_threshold(),
494            pause_on_open: default_pause_on_open(),
495            max_open_circuits: default_max_open_circuits(),
496            recovery_cooldown: default_recovery_cooldown(),
497        }
498    }
499}
500
501/// Open Responses specification compliance configuration
502///
503/// Enables vendor-neutral LLM API format per the Open Responses specification
504/// (<https://www.openresponses.org/>). When enabled, VT Code emits semantic
505/// streaming events and uses standardized response/item structures.
506#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
507#[derive(Debug, Clone, Deserialize, Serialize)]
508pub struct OpenResponsesConfig {
509    /// Enable Open Responses specification compliance layer
510    /// When true, VT Code emits semantic streaming events alongside internal events
511    /// Default: false (opt-in feature)
512    #[serde(default)]
513    pub enabled: bool,
514
515    /// Emit Open Responses events to the event sink
516    /// When true, streaming events follow Open Responses format
517    /// (response.created, response.output_item.added, response.output_text.delta, etc.)
518    #[serde(default = "default_open_responses_emit_events")]
519    pub emit_events: bool,
520
521    /// Include VT Code extension items (vtcode:file_change, vtcode:web_search, etc.)
522    /// When false, extension items are omitted from the Open Responses output
523    #[serde(default = "default_open_responses_include_extensions")]
524    pub include_extensions: bool,
525
526    /// Map internal tool calls to Open Responses function_call items
527    /// When true, command executions and MCP tool calls are represented as function_call items
528    #[serde(default = "default_open_responses_map_tool_calls")]
529    pub map_tool_calls: bool,
530
531    /// Include reasoning items in Open Responses output
532    /// When true, model reasoning/thinking is exposed as reasoning items
533    #[serde(default = "default_open_responses_include_reasoning")]
534    pub include_reasoning: bool,
535}
536
537impl Default for OpenResponsesConfig {
538    fn default() -> Self {
539        Self {
540            enabled: false, // Opt-in by default
541            emit_events: default_open_responses_emit_events(),
542            include_extensions: default_open_responses_include_extensions(),
543            map_tool_calls: default_open_responses_map_tool_calls(),
544            include_reasoning: default_open_responses_include_reasoning(),
545        }
546    }
547}
548
549#[inline]
550const fn default_open_responses_emit_events() -> bool {
551    true // When enabled, emit events by default
552}
553
554#[inline]
555const fn default_open_responses_include_extensions() -> bool {
556    true // Include VT Code-specific extensions by default
557}
558
559#[inline]
560const fn default_open_responses_map_tool_calls() -> bool {
561    true // Map tool calls to function_call items by default
562}
563
564#[inline]
565const fn default_open_responses_include_reasoning() -> bool {
566    true // Include reasoning items by default
567}
568
569#[inline]
570fn default_codex_app_server_command() -> String {
571    "codex".to_string()
572}
573
574#[inline]
575fn default_codex_app_server_args() -> Vec<String> {
576    vec!["app-server".to_string()]
577}
578
579#[inline]
580const fn default_codex_app_server_startup_timeout_secs() -> u64 {
581    10
582}
583
584#[inline]
585const fn default_codex_app_server_experimental_features() -> bool {
586    false
587}
588
589impl Default for AgentConfig {
590    fn default() -> Self {
591        Self {
592            provider: default_provider(),
593            api_key_env: default_api_key_env(),
594            default_model: default_model(),
595            theme: default_theme(),
596            system_prompt_mode: SystemPromptMode::default(),
597            tool_documentation_mode: ToolDocumentationMode::default(),
598            enable_split_tool_results: default_enable_split_tool_results(),
599            todo_planning_mode: default_todo_planning_mode(),
600            ui_surface: UiSurfacePreference::default(),
601            max_conversation_turns: default_max_conversation_turns(),
602            reasoning_effort: default_reasoning_effort(),
603            verbosity: default_verbosity(),
604            temperature: default_temperature(),
605            refine_temperature: default_refine_temperature(),
606            enable_self_review: default_enable_self_review(),
607            max_review_passes: default_max_review_passes(),
608            refine_prompts_enabled: default_refine_prompts_enabled(),
609            refine_prompts_max_passes: default_refine_max_passes(),
610            refine_prompts_model: String::new(),
611            small_model: AgentSmallModelConfig::default(),
612            prompt_suggestions: AgentPromptSuggestionsConfig::default(),
613            onboarding: AgentOnboardingConfig::default(),
614            project_doc_max_bytes: default_project_doc_max_bytes(),
615            project_doc_fallback_filenames: Vec::new(),
616            instruction_max_bytes: default_instruction_max_bytes(),
617            instruction_files: Vec::new(),
618            instruction_excludes: Vec::new(),
619            instruction_import_max_depth: default_instruction_import_max_depth(),
620            persistent_memory: PersistentMemoryConfig::default(),
621            custom_api_keys: BTreeMap::new(),
622            credential_storage_mode: crate::auth::AuthCredentialsStoreMode::default(),
623            checkpointing: AgentCheckpointingConfig::default(),
624            vibe_coding: AgentVibeCodingConfig::default(),
625            max_task_retries: default_max_task_retries(),
626            harness: AgentHarnessConfig::default(),
627            codex_app_server: AgentCodexAppServerConfig::default(),
628            include_temporal_context: default_include_temporal_context(),
629            temporal_context_use_utc: false, // Default to local time
630            include_working_directory: default_include_working_directory(),
631            include_structured_reasoning_tags: None,
632            user_instructions: None,
633            require_plan_confirmation: default_require_plan_confirmation(),
634            circuit_breaker: CircuitBreakerConfig::default(),
635            open_responses: OpenResponsesConfig::default(),
636        }
637    }
638}
639
640impl AgentConfig {
641    /// Determine whether structured reasoning tag instructions should be included.
642    pub fn should_include_structured_reasoning_tags(&self) -> bool {
643        self.include_structured_reasoning_tags.unwrap_or(matches!(
644            self.system_prompt_mode,
645            SystemPromptMode::Specialized
646        ))
647    }
648
649    /// Validate LLM generation parameters
650    pub fn validate_llm_params(&self) -> Result<(), String> {
651        // Validate temperature range
652        if !(0.0..=1.0).contains(&self.temperature) {
653            return Err(format!(
654                "temperature must be between 0.0 and 1.0, got {}",
655                self.temperature
656            ));
657        }
658
659        if !(0.0..=1.0).contains(&self.refine_temperature) {
660            return Err(format!(
661                "refine_temperature must be between 0.0 and 1.0, got {}",
662                self.refine_temperature
663            ));
664        }
665
666        if self.instruction_import_max_depth == 0 {
667            return Err("instruction_import_max_depth must be greater than 0".to_string());
668        }
669
670        self.persistent_memory.validate()?;
671        self.harness.tool_result_clearing.validate()?;
672
673        Ok(())
674    }
675}
676
677// Optimized: Use inline defaults with constants to reduce function call overhead
678#[inline]
679fn default_provider() -> String {
680    defaults::DEFAULT_PROVIDER.into()
681}
682
683#[inline]
684fn default_api_key_env() -> String {
685    defaults::DEFAULT_API_KEY_ENV.into()
686}
687
688#[inline]
689fn default_model() -> String {
690    defaults::DEFAULT_MODEL.into()
691}
692
693#[inline]
694fn default_theme() -> String {
695    defaults::DEFAULT_THEME.into()
696}
697
698#[inline]
699const fn default_todo_planning_mode() -> bool {
700    true
701}
702
703#[inline]
704const fn default_enable_split_tool_results() -> bool {
705    true // Default: enabled for production use (84% token savings)
706}
707
708#[inline]
709const fn default_max_conversation_turns() -> usize {
710    defaults::DEFAULT_MAX_CONVERSATION_TURNS
711}
712
713#[inline]
714fn default_reasoning_effort() -> ReasoningEffortLevel {
715    ReasoningEffortLevel::None
716}
717
718#[inline]
719fn default_verbosity() -> VerbosityLevel {
720    VerbosityLevel::default()
721}
722
723#[inline]
724const fn default_temperature() -> f32 {
725    llm_generation::DEFAULT_TEMPERATURE
726}
727
728#[inline]
729const fn default_refine_temperature() -> f32 {
730    llm_generation::DEFAULT_REFINE_TEMPERATURE
731}
732
733#[inline]
734const fn default_enable_self_review() -> bool {
735    false
736}
737
738#[inline]
739const fn default_max_review_passes() -> usize {
740    1
741}
742
743#[inline]
744const fn default_refine_prompts_enabled() -> bool {
745    false
746}
747
748#[inline]
749const fn default_refine_max_passes() -> usize {
750    1
751}
752
753#[inline]
754const fn default_project_doc_max_bytes() -> usize {
755    prompt_budget::DEFAULT_MAX_BYTES
756}
757
758#[inline]
759const fn default_instruction_max_bytes() -> usize {
760    prompt_budget::DEFAULT_MAX_BYTES
761}
762
763#[inline]
764const fn default_instruction_import_max_depth() -> usize {
765    5
766}
767
768#[inline]
769const fn default_max_task_retries() -> u32 {
770    2 // Retry twice on transient failures
771}
772
773#[inline]
774const fn default_harness_max_tool_calls_per_turn() -> usize {
775    defaults::DEFAULT_MAX_TOOL_CALLS_PER_TURN
776}
777
778#[inline]
779const fn default_harness_max_tool_wall_clock_secs() -> u64 {
780    defaults::DEFAULT_MAX_TOOL_WALL_CLOCK_SECS
781}
782
783#[inline]
784const fn default_harness_max_tool_retries() -> u32 {
785    defaults::DEFAULT_MAX_TOOL_RETRIES
786}
787
788#[inline]
789const fn default_harness_auto_compaction_enabled() -> bool {
790    false
791}
792
793#[inline]
794const fn default_tool_result_clearing_enabled() -> bool {
795    false
796}
797
798#[inline]
799const fn default_tool_result_clearing_trigger_tokens() -> u64 {
800    100_000
801}
802
803#[inline]
804const fn default_tool_result_clearing_keep_tool_uses() -> u32 {
805    3
806}
807
808#[inline]
809const fn default_tool_result_clearing_clear_at_least_tokens() -> u64 {
810    30_000
811}
812
813#[inline]
814const fn default_harness_max_revision_rounds() -> usize {
815    2
816}
817
818#[inline]
819const fn default_include_temporal_context() -> bool {
820    true // Enable by default - minimal overhead (~20 tokens)
821}
822
823#[inline]
824const fn default_include_working_directory() -> bool {
825    true // Enable by default - minimal overhead (~10 tokens)
826}
827
828#[inline]
829const fn default_require_plan_confirmation() -> bool {
830    true // Default: require confirmation (HITL pattern)
831}
832
833#[inline]
834const fn default_circuit_breaker_enabled() -> bool {
835    true // Default: enabled for resilient execution
836}
837
838#[inline]
839const fn default_failure_threshold() -> u32 {
840    7 // Open circuit after 7 consecutive failures
841}
842
843#[inline]
844const fn default_pause_on_open() -> bool {
845    true // Default: ask user for guidance on circuit breaker
846}
847
848#[inline]
849const fn default_max_open_circuits() -> usize {
850    3 // Pause when 3+ tools have open circuits
851}
852
853#[inline]
854const fn default_recovery_cooldown() -> u64 {
855    60 // Cooldown between recovery prompts (seconds)
856}
857
858#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
859#[derive(Debug, Clone, Deserialize, Serialize)]
860pub struct AgentCheckpointingConfig {
861    /// Enable automatic checkpoints after each successful turn
862    #[serde(default = "default_checkpointing_enabled")]
863    pub enabled: bool,
864
865    /// Optional custom directory for storing checkpoints (relative to workspace or absolute)
866    #[serde(default)]
867    pub storage_dir: Option<String>,
868
869    /// Maximum number of checkpoints to retain on disk
870    #[serde(default = "default_checkpointing_max_snapshots")]
871    pub max_snapshots: usize,
872
873    /// Maximum age in days before checkpoints are removed automatically (None disables)
874    #[serde(default = "default_checkpointing_max_age_days")]
875    pub max_age_days: Option<u64>,
876}
877
878impl Default for AgentCheckpointingConfig {
879    fn default() -> Self {
880        Self {
881            enabled: default_checkpointing_enabled(),
882            storage_dir: None,
883            max_snapshots: default_checkpointing_max_snapshots(),
884            max_age_days: default_checkpointing_max_age_days(),
885        }
886    }
887}
888
889#[inline]
890const fn default_checkpointing_enabled() -> bool {
891    DEFAULT_CHECKPOINTS_ENABLED
892}
893
894#[inline]
895const fn default_checkpointing_max_snapshots() -> usize {
896    DEFAULT_MAX_SNAPSHOTS
897}
898
899#[inline]
900const fn default_checkpointing_max_age_days() -> Option<u64> {
901    Some(DEFAULT_MAX_AGE_DAYS)
902}
903
904#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
905#[derive(Debug, Clone, Deserialize, Serialize)]
906pub struct PersistentMemoryConfig {
907    /// Toggle main-session persistent memory for this repository
908    #[serde(default = "default_persistent_memory_enabled")]
909    pub enabled: bool,
910
911    /// Write durable memory after completed turns and session finalization
912    #[serde(default = "default_persistent_memory_auto_write")]
913    pub auto_write: bool,
914
915    /// Optional user-local directory override for persistent memory storage
916    #[serde(default)]
917    pub directory_override: Option<String>,
918
919    /// Startup line budget scanned from memory_summary.md before VT Code renders a compact startup summary
920    #[serde(default = "default_persistent_memory_startup_line_limit")]
921    pub startup_line_limit: usize,
922
923    /// Startup byte budget scanned from memory_summary.md before VT Code renders a compact startup summary
924    #[serde(default = "default_persistent_memory_startup_byte_limit")]
925    pub startup_byte_limit: usize,
926}
927
928impl Default for PersistentMemoryConfig {
929    fn default() -> Self {
930        Self {
931            enabled: default_persistent_memory_enabled(),
932            auto_write: default_persistent_memory_auto_write(),
933            directory_override: None,
934            startup_line_limit: default_persistent_memory_startup_line_limit(),
935            startup_byte_limit: default_persistent_memory_startup_byte_limit(),
936        }
937    }
938}
939
940impl PersistentMemoryConfig {
941    pub fn validate(&self) -> Result<(), String> {
942        if self.startup_line_limit == 0 {
943            return Err("persistent_memory.startup_line_limit must be greater than 0".to_string());
944        }
945
946        if self.startup_byte_limit == 0 {
947            return Err("persistent_memory.startup_byte_limit must be greater than 0".to_string());
948        }
949
950        Ok(())
951    }
952}
953
954#[inline]
955const fn default_persistent_memory_enabled() -> bool {
956    false
957}
958
959#[inline]
960const fn default_persistent_memory_auto_write() -> bool {
961    true
962}
963
964#[inline]
965const fn default_persistent_memory_startup_line_limit() -> usize {
966    200
967}
968
969#[inline]
970const fn default_persistent_memory_startup_byte_limit() -> usize {
971    25 * 1024
972}
973
974#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
975#[derive(Debug, Clone, Deserialize, Serialize)]
976pub struct AgentOnboardingConfig {
977    /// Toggle onboarding message rendering
978    #[serde(default = "default_onboarding_enabled")]
979    pub enabled: bool,
980
981    /// Introductory text shown at session start
982    #[serde(default = "default_intro_text")]
983    pub intro_text: String,
984
985    /// Whether to include project overview in onboarding message
986    #[serde(default = "default_show_project_overview")]
987    pub include_project_overview: bool,
988
989    /// Whether to include language summary in onboarding message
990    #[serde(default = "default_show_language_summary")]
991    pub include_language_summary: bool,
992
993    /// Whether to include AGENTS.md highlights in onboarding message
994    #[serde(default = "default_show_guideline_highlights")]
995    pub include_guideline_highlights: bool,
996
997    /// Whether to surface usage tips inside the welcome text banner
998    #[serde(default = "default_show_usage_tips_in_welcome")]
999    pub include_usage_tips_in_welcome: bool,
1000
1001    /// Whether to surface suggested actions inside the welcome text banner
1002    #[serde(default = "default_show_recommended_actions_in_welcome")]
1003    pub include_recommended_actions_in_welcome: bool,
1004
1005    /// Maximum number of guideline bullets to surface
1006    #[serde(default = "default_guideline_highlight_limit")]
1007    pub guideline_highlight_limit: usize,
1008
1009    /// Tips for collaborating with the agent effectively
1010    #[serde(default = "default_usage_tips")]
1011    pub usage_tips: Vec<String>,
1012
1013    /// Recommended follow-up actions to display
1014    #[serde(default = "default_recommended_actions")]
1015    pub recommended_actions: Vec<String>,
1016
1017    /// Placeholder suggestion for the chat input bar
1018    #[serde(default)]
1019    pub chat_placeholder: Option<String>,
1020}
1021
1022impl Default for AgentOnboardingConfig {
1023    fn default() -> Self {
1024        Self {
1025            enabled: default_onboarding_enabled(),
1026            intro_text: default_intro_text(),
1027            include_project_overview: default_show_project_overview(),
1028            include_language_summary: default_show_language_summary(),
1029            include_guideline_highlights: default_show_guideline_highlights(),
1030            include_usage_tips_in_welcome: default_show_usage_tips_in_welcome(),
1031            include_recommended_actions_in_welcome: default_show_recommended_actions_in_welcome(),
1032            guideline_highlight_limit: default_guideline_highlight_limit(),
1033            usage_tips: default_usage_tips(),
1034            recommended_actions: default_recommended_actions(),
1035            chat_placeholder: None,
1036        }
1037    }
1038}
1039
1040#[inline]
1041const fn default_onboarding_enabled() -> bool {
1042    true
1043}
1044
1045const DEFAULT_INTRO_TEXT: &str =
1046    "Let's get oriented. I preloaded workspace context so we can move fast.";
1047
1048#[inline]
1049fn default_intro_text() -> String {
1050    DEFAULT_INTRO_TEXT.into()
1051}
1052
1053#[inline]
1054const fn default_show_project_overview() -> bool {
1055    true
1056}
1057
1058#[inline]
1059const fn default_show_language_summary() -> bool {
1060    false
1061}
1062
1063#[inline]
1064const fn default_show_guideline_highlights() -> bool {
1065    true
1066}
1067
1068#[inline]
1069const fn default_show_usage_tips_in_welcome() -> bool {
1070    false
1071}
1072
1073#[inline]
1074const fn default_show_recommended_actions_in_welcome() -> bool {
1075    false
1076}
1077
1078#[inline]
1079const fn default_guideline_highlight_limit() -> usize {
1080    3
1081}
1082
1083const DEFAULT_USAGE_TIPS: &[&str] = &[
1084    "Describe your current coding goal or ask for a quick status overview.",
1085    "Reference AGENTS.md guidelines when proposing changes.",
1086    "Prefer asking for targeted file reads or diffs before editing.",
1087];
1088
1089const DEFAULT_RECOMMENDED_ACTIONS: &[&str] = &[
1090    "Review the highlighted guidelines and share the task you want to tackle.",
1091    "Ask for a workspace tour if you need more context.",
1092];
1093
1094fn default_usage_tips() -> Vec<String> {
1095    DEFAULT_USAGE_TIPS.iter().map(|s| (*s).into()).collect()
1096}
1097
1098fn default_recommended_actions() -> Vec<String> {
1099    DEFAULT_RECOMMENDED_ACTIONS
1100        .iter()
1101        .map(|s| (*s).into())
1102        .collect()
1103}
1104
1105/// Small/lightweight model configuration for efficient operations
1106///
1107/// Following VT Code's pattern, use a smaller model (e.g., Haiku, GPT-4 Mini) for 50%+ of calls:
1108/// - Large file reads and parsing (>50KB)
1109/// - Web page summarization and analysis
1110/// - Git history and commit message processing
1111/// - One-word processing labels and simple classifications
1112///
1113/// Typically 70-80% cheaper than the main model while maintaining quality for these tasks.
1114#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
1115#[derive(Debug, Clone, Deserialize, Serialize)]
1116pub struct AgentSmallModelConfig {
1117    /// Enable small model tier for efficient operations
1118    #[serde(default = "default_small_model_enabled")]
1119    pub enabled: bool,
1120
1121    /// Small model to use (e.g., claude-4-5-haiku, "gpt-4-mini", "gemini-2.0-flash")
1122    /// Leave empty to auto-select a lightweight sibling of the main model
1123    #[serde(default)]
1124    pub model: String,
1125
1126    /// Temperature for small model responses
1127    #[serde(default = "default_small_model_temperature")]
1128    pub temperature: f32,
1129
1130    /// Enable small model for large file reads (>50KB)
1131    #[serde(default = "default_small_model_for_large_reads")]
1132    pub use_for_large_reads: bool,
1133
1134    /// Enable small model for web content summarization
1135    #[serde(default = "default_small_model_for_web_summary")]
1136    pub use_for_web_summary: bool,
1137
1138    /// Enable small model for git history processing
1139    #[serde(default = "default_small_model_for_git_history")]
1140    pub use_for_git_history: bool,
1141
1142    /// Enable small model for persistent memory classification and summary refresh
1143    #[serde(default = "default_small_model_for_memory")]
1144    pub use_for_memory: bool,
1145}
1146
1147impl Default for AgentSmallModelConfig {
1148    fn default() -> Self {
1149        Self {
1150            enabled: default_small_model_enabled(),
1151            model: String::new(),
1152            temperature: default_small_model_temperature(),
1153            use_for_large_reads: default_small_model_for_large_reads(),
1154            use_for_web_summary: default_small_model_for_web_summary(),
1155            use_for_git_history: default_small_model_for_git_history(),
1156            use_for_memory: default_small_model_for_memory(),
1157        }
1158    }
1159}
1160
1161#[inline]
1162const fn default_small_model_enabled() -> bool {
1163    true // Enable by default following VT Code pattern
1164}
1165
1166#[inline]
1167const fn default_small_model_temperature() -> f32 {
1168    0.3 // More deterministic for parsing/summarization
1169}
1170
1171#[inline]
1172const fn default_small_model_for_large_reads() -> bool {
1173    true
1174}
1175
1176#[inline]
1177const fn default_small_model_for_web_summary() -> bool {
1178    true
1179}
1180
1181#[inline]
1182const fn default_small_model_for_git_history() -> bool {
1183    true
1184}
1185
1186#[inline]
1187const fn default_small_model_for_memory() -> bool {
1188    true
1189}
1190
1191/// Inline prompt suggestion configuration for the chat composer.
1192#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
1193#[derive(Debug, Clone, Deserialize, Serialize)]
1194pub struct AgentPromptSuggestionsConfig {
1195    /// Enable inline prompt suggestions in the chat composer.
1196    #[serde(default = "default_prompt_suggestions_enabled")]
1197    pub enabled: bool,
1198
1199    /// Lightweight model to use for suggestions.
1200    /// Leave empty to auto-select an efficient sibling of the main model.
1201    #[serde(default)]
1202    pub model: String,
1203
1204    /// Temperature for inline prompt suggestion generation.
1205    #[serde(default = "default_prompt_suggestions_temperature")]
1206    pub temperature: f32,
1207
1208    /// Whether VT Code should remind users that LLM-backed suggestions consume tokens.
1209    #[serde(default = "default_prompt_suggestions_show_cost_notice")]
1210    pub show_cost_notice: bool,
1211}
1212
1213impl Default for AgentPromptSuggestionsConfig {
1214    fn default() -> Self {
1215        Self {
1216            enabled: default_prompt_suggestions_enabled(),
1217            model: String::new(),
1218            temperature: default_prompt_suggestions_temperature(),
1219            show_cost_notice: default_prompt_suggestions_show_cost_notice(),
1220        }
1221    }
1222}
1223
1224#[inline]
1225const fn default_prompt_suggestions_enabled() -> bool {
1226    true
1227}
1228
1229#[inline]
1230const fn default_prompt_suggestions_temperature() -> f32 {
1231    0.3
1232}
1233
1234#[inline]
1235const fn default_prompt_suggestions_show_cost_notice() -> bool {
1236    true
1237}
1238
1239/// Vibe coding configuration for lazy/vague request support
1240///
1241/// Enables intelligent context gathering and entity resolution to support
1242/// casual, imprecise requests like "make it blue" or "decrease by half".
1243#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
1244#[derive(Debug, Clone, Deserialize, Serialize)]
1245pub struct AgentVibeCodingConfig {
1246    /// Enable vibe coding support
1247    #[serde(default = "default_vibe_coding_enabled")]
1248    pub enabled: bool,
1249
1250    /// Minimum prompt length for refinement (default: 5 chars)
1251    #[serde(default = "default_vibe_min_prompt_length")]
1252    pub min_prompt_length: usize,
1253
1254    /// Minimum prompt words for refinement (default: 2 words)
1255    #[serde(default = "default_vibe_min_prompt_words")]
1256    pub min_prompt_words: usize,
1257
1258    /// Enable fuzzy entity resolution
1259    #[serde(default = "default_vibe_entity_resolution")]
1260    pub enable_entity_resolution: bool,
1261
1262    /// Entity index cache file path (relative to workspace)
1263    #[serde(default = "default_vibe_entity_cache")]
1264    pub entity_index_cache: String,
1265
1266    /// Maximum entity matches to return (default: 5)
1267    #[serde(default = "default_vibe_max_entity_matches")]
1268    pub max_entity_matches: usize,
1269
1270    /// Track workspace state (file activity, value changes)
1271    #[serde(default = "default_vibe_track_workspace")]
1272    pub track_workspace_state: bool,
1273
1274    /// Maximum recent files to track (default: 20)
1275    #[serde(default = "default_vibe_max_recent_files")]
1276    pub max_recent_files: usize,
1277
1278    /// Track value history for inference
1279    #[serde(default = "default_vibe_track_values")]
1280    pub track_value_history: bool,
1281
1282    /// Enable conversation memory for pronoun resolution
1283    #[serde(default = "default_vibe_conversation_memory")]
1284    pub enable_conversation_memory: bool,
1285
1286    /// Maximum conversation turns to remember (default: 50)
1287    #[serde(default = "default_vibe_max_memory_turns")]
1288    pub max_memory_turns: usize,
1289
1290    /// Enable pronoun resolution (it, that, this)
1291    #[serde(default = "default_vibe_pronoun_resolution")]
1292    pub enable_pronoun_resolution: bool,
1293
1294    /// Enable proactive context gathering
1295    #[serde(default = "default_vibe_proactive_context")]
1296    pub enable_proactive_context: bool,
1297
1298    /// Maximum files to gather for context (default: 3)
1299    #[serde(default = "default_vibe_max_context_files")]
1300    pub max_context_files: usize,
1301
1302    /// Maximum code snippets per file (default: 20 lines)
1303    #[serde(default = "default_vibe_max_snippets_per_file")]
1304    pub max_context_snippets_per_file: usize,
1305
1306    /// Maximum search results to include (default: 5)
1307    #[serde(default = "default_vibe_max_search_results")]
1308    pub max_search_results: usize,
1309
1310    /// Enable relative value inference (by half, double, etc.)
1311    #[serde(default = "default_vibe_value_inference")]
1312    pub enable_relative_value_inference: bool,
1313}
1314
1315impl Default for AgentVibeCodingConfig {
1316    fn default() -> Self {
1317        Self {
1318            enabled: default_vibe_coding_enabled(),
1319            min_prompt_length: default_vibe_min_prompt_length(),
1320            min_prompt_words: default_vibe_min_prompt_words(),
1321            enable_entity_resolution: default_vibe_entity_resolution(),
1322            entity_index_cache: default_vibe_entity_cache(),
1323            max_entity_matches: default_vibe_max_entity_matches(),
1324            track_workspace_state: default_vibe_track_workspace(),
1325            max_recent_files: default_vibe_max_recent_files(),
1326            track_value_history: default_vibe_track_values(),
1327            enable_conversation_memory: default_vibe_conversation_memory(),
1328            max_memory_turns: default_vibe_max_memory_turns(),
1329            enable_pronoun_resolution: default_vibe_pronoun_resolution(),
1330            enable_proactive_context: default_vibe_proactive_context(),
1331            max_context_files: default_vibe_max_context_files(),
1332            max_context_snippets_per_file: default_vibe_max_snippets_per_file(),
1333            max_search_results: default_vibe_max_search_results(),
1334            enable_relative_value_inference: default_vibe_value_inference(),
1335        }
1336    }
1337}
1338
1339// Vibe coding default functions
1340#[inline]
1341const fn default_vibe_coding_enabled() -> bool {
1342    false // Conservative default, opt-in
1343}
1344
1345#[inline]
1346const fn default_vibe_min_prompt_length() -> usize {
1347    5
1348}
1349
1350#[inline]
1351const fn default_vibe_min_prompt_words() -> usize {
1352    2
1353}
1354
1355#[inline]
1356const fn default_vibe_entity_resolution() -> bool {
1357    true
1358}
1359
1360#[inline]
1361fn default_vibe_entity_cache() -> String {
1362    ".vtcode/entity_index.json".into()
1363}
1364
1365#[inline]
1366const fn default_vibe_max_entity_matches() -> usize {
1367    5
1368}
1369
1370#[inline]
1371const fn default_vibe_track_workspace() -> bool {
1372    true
1373}
1374
1375#[inline]
1376const fn default_vibe_max_recent_files() -> usize {
1377    20
1378}
1379
1380#[inline]
1381const fn default_vibe_track_values() -> bool {
1382    true
1383}
1384
1385#[inline]
1386const fn default_vibe_conversation_memory() -> bool {
1387    true
1388}
1389
1390#[inline]
1391const fn default_vibe_max_memory_turns() -> usize {
1392    50
1393}
1394
1395#[inline]
1396const fn default_vibe_pronoun_resolution() -> bool {
1397    true
1398}
1399
1400#[inline]
1401const fn default_vibe_proactive_context() -> bool {
1402    true
1403}
1404
1405#[inline]
1406const fn default_vibe_max_context_files() -> usize {
1407    3
1408}
1409
1410#[inline]
1411const fn default_vibe_max_snippets_per_file() -> usize {
1412    20
1413}
1414
1415#[inline]
1416const fn default_vibe_max_search_results() -> usize {
1417    5
1418}
1419
1420#[inline]
1421const fn default_vibe_value_inference() -> bool {
1422    true
1423}
1424
1425#[cfg(test)]
1426mod tests {
1427    use super::*;
1428
1429    #[test]
1430    fn test_continuation_policy_defaults_and_parses() {
1431        assert_eq!(ContinuationPolicy::default(), ContinuationPolicy::All);
1432        assert_eq!(
1433            ContinuationPolicy::parse("off"),
1434            Some(ContinuationPolicy::Off)
1435        );
1436        assert_eq!(
1437            ContinuationPolicy::parse("exec-only"),
1438            Some(ContinuationPolicy::ExecOnly)
1439        );
1440        assert_eq!(
1441            ContinuationPolicy::parse("all"),
1442            Some(ContinuationPolicy::All)
1443        );
1444        assert_eq!(ContinuationPolicy::parse("invalid"), None);
1445    }
1446
1447    #[test]
1448    fn test_harness_config_continuation_policy_deserializes_with_fallback() {
1449        let parsed: AgentHarnessConfig =
1450            toml::from_str("continuation_policy = \"all\"").expect("valid harness config");
1451        assert_eq!(parsed.continuation_policy, ContinuationPolicy::All);
1452
1453        let fallback: AgentHarnessConfig =
1454            toml::from_str("continuation_policy = \"unexpected\"").expect("fallback config");
1455        assert_eq!(fallback.continuation_policy, ContinuationPolicy::All);
1456    }
1457
1458    #[test]
1459    fn test_harness_orchestration_mode_defaults_and_parses() {
1460        assert_eq!(
1461            HarnessOrchestrationMode::default(),
1462            HarnessOrchestrationMode::Single
1463        );
1464        assert_eq!(
1465            HarnessOrchestrationMode::parse("single"),
1466            Some(HarnessOrchestrationMode::Single)
1467        );
1468        assert_eq!(
1469            HarnessOrchestrationMode::parse("plan_build_evaluate"),
1470            Some(HarnessOrchestrationMode::PlanBuildEvaluate)
1471        );
1472        assert_eq!(
1473            HarnessOrchestrationMode::parse("planner-generator-evaluator"),
1474            Some(HarnessOrchestrationMode::PlanBuildEvaluate)
1475        );
1476        assert_eq!(HarnessOrchestrationMode::parse("unexpected"), None);
1477    }
1478
1479    #[test]
1480    fn test_harness_config_orchestration_deserializes_with_fallback() {
1481        let parsed: AgentHarnessConfig =
1482            toml::from_str("orchestration_mode = \"plan_build_evaluate\"")
1483                .expect("valid harness config");
1484        assert_eq!(
1485            parsed.orchestration_mode,
1486            HarnessOrchestrationMode::PlanBuildEvaluate
1487        );
1488        assert_eq!(parsed.max_revision_rounds, 2);
1489
1490        let fallback: AgentHarnessConfig =
1491            toml::from_str("orchestration_mode = \"unexpected\"").expect("fallback config");
1492        assert_eq!(
1493            fallback.orchestration_mode,
1494            HarnessOrchestrationMode::Single
1495        );
1496    }
1497
1498    #[test]
1499    fn test_plan_confirmation_config_default() {
1500        let config = AgentConfig::default();
1501        assert!(config.require_plan_confirmation);
1502    }
1503
1504    #[test]
1505    fn test_persistent_memory_is_disabled_by_default() {
1506        let config = AgentConfig::default();
1507        assert!(!config.persistent_memory.enabled);
1508        assert!(config.persistent_memory.auto_write);
1509    }
1510
1511    #[test]
1512    fn test_tool_result_clearing_defaults() {
1513        let config = AgentConfig::default();
1514        let clearing = config.harness.tool_result_clearing;
1515
1516        assert!(!clearing.enabled);
1517        assert_eq!(clearing.trigger_tokens, 100_000);
1518        assert_eq!(clearing.keep_tool_uses, 3);
1519        assert_eq!(clearing.clear_at_least_tokens, 30_000);
1520        assert!(!clearing.clear_tool_inputs);
1521    }
1522
1523    #[test]
1524    fn test_codex_app_server_experimental_features_default_to_disabled() {
1525        let config = AgentConfig::default();
1526
1527        assert!(!config.codex_app_server.experimental_features);
1528    }
1529
1530    #[test]
1531    fn test_codex_app_server_experimental_features_parse_from_toml() {
1532        let parsed: AgentCodexAppServerConfig = toml::from_str(
1533            r#"
1534                command = "codex"
1535                args = ["app-server"]
1536                startup_timeout_secs = 15
1537                experimental_features = true
1538            "#,
1539        )
1540        .expect("valid codex app-server config");
1541
1542        assert!(parsed.experimental_features);
1543        assert_eq!(parsed.startup_timeout_secs, 15);
1544    }
1545
1546    #[test]
1547    fn test_tool_result_clearing_parses_and_validates() {
1548        let parsed: AgentHarnessConfig = toml::from_str(
1549            r#"
1550                [tool_result_clearing]
1551                enabled = true
1552                trigger_tokens = 123456
1553                keep_tool_uses = 6
1554                clear_at_least_tokens = 4096
1555                clear_tool_inputs = true
1556            "#,
1557        )
1558        .expect("valid harness config");
1559
1560        assert!(parsed.tool_result_clearing.enabled);
1561        assert_eq!(parsed.tool_result_clearing.trigger_tokens, 123_456);
1562        assert_eq!(parsed.tool_result_clearing.keep_tool_uses, 6);
1563        assert_eq!(parsed.tool_result_clearing.clear_at_least_tokens, 4_096);
1564        assert!(parsed.tool_result_clearing.clear_tool_inputs);
1565        assert!(parsed.tool_result_clearing.validate().is_ok());
1566    }
1567
1568    #[test]
1569    fn test_tool_result_clearing_rejects_zero_values() {
1570        let clearing = ToolResultClearingConfig {
1571            trigger_tokens: 0,
1572            ..ToolResultClearingConfig::default()
1573        };
1574        assert!(clearing.validate().is_err());
1575
1576        let clearing = ToolResultClearingConfig {
1577            keep_tool_uses: 0,
1578            ..ToolResultClearingConfig::default()
1579        };
1580        assert!(clearing.validate().is_err());
1581
1582        let clearing = ToolResultClearingConfig {
1583            clear_at_least_tokens: 0,
1584            ..ToolResultClearingConfig::default()
1585        };
1586        assert!(clearing.validate().is_err());
1587    }
1588
1589    #[test]
1590    fn test_structured_reasoning_defaults_follow_prompt_mode() {
1591        let default_mode = AgentConfig {
1592            system_prompt_mode: SystemPromptMode::Default,
1593            ..Default::default()
1594        };
1595        assert!(!default_mode.should_include_structured_reasoning_tags());
1596
1597        let specialized_mode = AgentConfig {
1598            system_prompt_mode: SystemPromptMode::Specialized,
1599            ..Default::default()
1600        };
1601        assert!(specialized_mode.should_include_structured_reasoning_tags());
1602
1603        let minimal_mode = AgentConfig {
1604            system_prompt_mode: SystemPromptMode::Minimal,
1605            ..Default::default()
1606        };
1607        assert!(!minimal_mode.should_include_structured_reasoning_tags());
1608
1609        let lightweight_mode = AgentConfig {
1610            system_prompt_mode: SystemPromptMode::Lightweight,
1611            ..Default::default()
1612        };
1613        assert!(!lightweight_mode.should_include_structured_reasoning_tags());
1614    }
1615
1616    #[test]
1617    fn test_structured_reasoning_explicit_override() {
1618        let mut config = AgentConfig {
1619            system_prompt_mode: SystemPromptMode::Minimal,
1620            include_structured_reasoning_tags: Some(true),
1621            ..AgentConfig::default()
1622        };
1623        assert!(config.should_include_structured_reasoning_tags());
1624
1625        config.include_structured_reasoning_tags = Some(false);
1626        assert!(!config.should_include_structured_reasoning_tags());
1627    }
1628}