Skip to main content

vtcode_config/core/
agent.rs

1use crate::constants::{defaults, llm_generation, prompt_budget};
2use crate::types::{
3    EditingMode, ReasoningEffortLevel, SystemPromptMode, ToolDocumentationMode,
4    UiSurfacePreference, VerbosityLevel,
5};
6use serde::{Deserialize, Serialize};
7use std::collections::BTreeMap;
8
9const DEFAULT_CHECKPOINTS_ENABLED: bool = true;
10const DEFAULT_MAX_SNAPSHOTS: usize = 50;
11const DEFAULT_MAX_AGE_DAYS: u64 = 30;
12
13/// Agent-wide configuration
14#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
15#[derive(Debug, Clone, Deserialize, Serialize)]
16pub struct AgentConfig {
17    /// AI provider for single agent mode (gemini, openai, anthropic, openrouter, zai)
18    #[serde(default = "default_provider")]
19    pub provider: String,
20
21    /// Environment variable that stores the API key for the active provider
22    #[serde(default = "default_api_key_env")]
23    pub api_key_env: String,
24
25    /// Default model to use
26    #[serde(default = "default_model")]
27    pub default_model: String,
28
29    /// UI theme identifier controlling ANSI styling
30    #[serde(default = "default_theme")]
31    pub theme: String,
32
33    /// System prompt mode controlling verbosity and token overhead
34    /// Options: minimal (~500-800 tokens), lightweight (~1-2k), default (~6-7k), specialized (~7-8k)
35    /// Inspired by pi-coding-agent: modern models often perform well with minimal prompts
36    #[serde(default)]
37    pub system_prompt_mode: SystemPromptMode,
38
39    /// Tool documentation mode controlling token overhead for tool definitions
40    /// Options: minimal (~800 tokens), progressive (~1.2k), full (~3k current)
41    /// Progressive: signatures upfront, detailed docs on-demand (recommended)
42    /// Minimal: signatures only, pi-coding-agent style (power users)
43    /// Full: all documentation upfront (current behavior, default)
44    #[serde(default)]
45    pub tool_documentation_mode: ToolDocumentationMode,
46
47    /// Enable split tool results for massive token savings (Phase 4)
48    /// When enabled, tools return dual-channel output:
49    /// - llm_content: Concise summary sent to LLM (token-optimized, 53-95% reduction)
50    /// - ui_content: Rich output displayed to user (full details preserved)
51    ///   Applies to: unified_search, unified_file, unified_exec
52    ///   Default: true (opt-out for compatibility), recommended for production use
53    #[serde(default = "default_enable_split_tool_results")]
54    pub enable_split_tool_results: bool,
55
56    /// Enable TODO planning helper mode for structured task management
57    #[serde(default = "default_todo_planning_mode")]
58    pub todo_planning_mode: bool,
59
60    /// Preferred rendering surface for the interactive chat UI (auto, alternate, inline)
61    #[serde(default)]
62    pub ui_surface: UiSurfacePreference,
63
64    /// Maximum number of conversation turns before auto-termination
65    #[serde(default = "default_max_conversation_turns")]
66    pub max_conversation_turns: usize,
67
68    /// Reasoning effort level for models that support it (none, minimal, low, medium, high, xhigh)
69    /// Applies to: Claude, GPT-5 family, Gemini, Qwen3, DeepSeek with reasoning capability
70    #[serde(default = "default_reasoning_effort")]
71    pub reasoning_effort: ReasoningEffortLevel,
72
73    /// Verbosity level for output text (low, medium, high)
74    /// Applies to: GPT-5.4-family Responses workflows and other models that support verbosity control
75    #[serde(default = "default_verbosity")]
76    pub verbosity: VerbosityLevel,
77
78    /// Temperature for main LLM responses (0.0-1.0)
79    /// Lower values = more deterministic, higher values = more creative
80    /// Recommended: 0.7 for balanced creativity and consistency
81    /// Range: 0.0 (deterministic) to 1.0 (maximum randomness)
82    #[serde(default = "default_temperature")]
83    pub temperature: f32,
84
85    /// Temperature for prompt refinement (0.0-1.0, default: 0.3)
86    /// Lower values ensure prompt refinement is more deterministic/consistent
87    /// Keep lower than main temperature for stable prompt improvement
88    #[serde(default = "default_refine_temperature")]
89    pub refine_temperature: f32,
90
91    /// Enable an extra self-review pass to refine final responses
92    #[serde(default = "default_enable_self_review")]
93    pub enable_self_review: bool,
94
95    /// Maximum number of self-review passes
96    #[serde(default = "default_max_review_passes")]
97    pub max_review_passes: usize,
98
99    /// Enable prompt refinement pass before sending to LLM
100    #[serde(default = "default_refine_prompts_enabled")]
101    pub refine_prompts_enabled: bool,
102
103    /// Max refinement passes for prompt writing
104    #[serde(default = "default_refine_max_passes")]
105    pub refine_prompts_max_passes: usize,
106
107    /// Optional model override for the refiner (empty = auto pick efficient sibling)
108    #[serde(default)]
109    pub refine_prompts_model: String,
110
111    /// Small/lightweight model configuration for efficient operations
112    /// Used for tasks like large file reads, parsing, git history, conversation summarization
113    /// Typically 70-80% cheaper than main model; ~50% of VT Code's calls use this tier
114    #[serde(default)]
115    pub small_model: AgentSmallModelConfig,
116
117    /// Inline prompt suggestion configuration for the chat composer
118    #[serde(default)]
119    pub prompt_suggestions: AgentPromptSuggestionsConfig,
120
121    /// Session onboarding and welcome message configuration
122    #[serde(default)]
123    pub onboarding: AgentOnboardingConfig,
124
125    /// Maximum bytes of AGENTS.md content to load from project hierarchy
126    #[serde(default = "default_project_doc_max_bytes")]
127    pub project_doc_max_bytes: usize,
128
129    /// Additional filenames to check when AGENTS.md is absent at a directory level.
130    #[serde(default)]
131    pub project_doc_fallback_filenames: Vec<String>,
132
133    /// Maximum bytes of instruction content to load from AGENTS.md hierarchy
134    #[serde(
135        default = "default_instruction_max_bytes",
136        alias = "rule_doc_max_bytes"
137    )]
138    pub instruction_max_bytes: usize,
139
140    /// Additional instruction files or globs to merge into the hierarchy
141    #[serde(default, alias = "instruction_paths", alias = "instructions")]
142    pub instruction_files: Vec<String>,
143
144    /// Instruction files or globs to exclude from AGENTS.md and rules discovery
145    #[serde(default)]
146    pub instruction_excludes: Vec<String>,
147
148    /// Maximum recursive `@path` import depth for instruction and rule files
149    #[serde(default = "default_instruction_import_max_depth")]
150    pub instruction_import_max_depth: usize,
151
152    /// Durable per-repository memory for main sessions
153    #[serde(default)]
154    pub persistent_memory: PersistentMemoryConfig,
155
156    /// Provider-specific API keys captured from interactive configuration flows
157    ///
158    /// Note: Actual API keys are stored securely in the OS keyring.
159    /// This field only tracks which providers have keys stored (for UI/migration purposes).
160    /// The keys themselves are NOT serialized to the config file for security.
161    #[serde(default, skip_serializing)]
162    pub custom_api_keys: BTreeMap<String, String>,
163
164    /// Preferred storage backend for credentials (OAuth tokens, API keys, etc.)
165    ///
166    /// - `keyring`: Use OS-specific secure storage (macOS Keychain, Windows Credential
167    ///   Manager, Linux Secret Service). This is the default as it's the most secure.
168    /// - `file`: Use AES-256-GCM encrypted file with machine-derived key
169    /// - `auto`: Try keyring first, fall back to file if unavailable
170    #[serde(default)]
171    pub credential_storage_mode: crate::auth::AuthCredentialsStoreMode,
172
173    /// Checkpointing configuration for automatic turn snapshots
174    #[serde(default)]
175    pub checkpointing: AgentCheckpointingConfig,
176
177    /// Vibe coding configuration for lazy or vague request support
178    #[serde(default)]
179    pub vibe_coding: AgentVibeCodingConfig,
180
181    /// Maximum number of retries for agent task execution (default: 2)
182    /// When an agent task fails due to retryable errors (timeout, network, 503, etc.),
183    /// it will be retried up to this many times with exponential backoff
184    #[serde(default = "default_max_task_retries")]
185    pub max_task_retries: u32,
186
187    /// Harness configuration for turn-level budgets, telemetry, and execution limits
188    #[serde(default)]
189    pub harness: AgentHarnessConfig,
190
191    /// Include current date/time in system prompt for temporal awareness
192    /// Helps LLM understand context for time-sensitive tasks (default: true)
193    #[serde(default = "default_include_temporal_context")]
194    pub include_temporal_context: bool,
195
196    /// Use UTC instead of local time for temporal context in system prompts
197    #[serde(default)]
198    pub temporal_context_use_utc: bool,
199
200    /// Include current working directory in system prompt (default: true)
201    #[serde(default = "default_include_working_directory")]
202    pub include_working_directory: bool,
203
204    /// Controls inclusion of the structured reasoning tag instructions block.
205    ///
206    /// Behavior:
207    /// - `Some(true)`: always include structured reasoning instructions.
208    /// - `Some(false)`: never include structured reasoning instructions.
209    /// - `None` (default): include only for `default` and `specialized` prompt modes.
210    ///
211    /// This keeps lightweight/minimal prompts smaller by default while allowing
212    /// explicit opt-in when users want tag-based reasoning guidance.
213    #[serde(default)]
214    pub include_structured_reasoning_tags: Option<bool>,
215
216    /// Custom instructions provided by the user via configuration to guide agent behavior
217    #[serde(default)]
218    pub user_instructions: Option<String>,
219
220    /// Default editing mode on startup: "edit" (default) or "plan"
221    /// Codex-inspired: Encourages structured planning before execution.
222    #[serde(default)]
223    pub default_editing_mode: EditingMode,
224
225    /// Require user confirmation before executing a plan generated in plan mode
226    /// When true, exiting plan mode shows the implementation blueprint and
227    /// requires explicit user approval before enabling edit tools.
228    #[serde(default = "default_require_plan_confirmation")]
229    pub require_plan_confirmation: bool,
230
231    /// Deprecated compatibility flag for pre-classifier autonomous mode settings.
232    /// When true and `[permissions].default_mode` is not explicitly set, VT Code maps
233    /// the session to `permissions.default_mode = "auto"`.
234    #[serde(default = "default_autonomous_mode")]
235    pub autonomous_mode: bool,
236
237    /// Circuit breaker configuration for resilient tool execution
238    /// Controls when the agent should pause and ask for user guidance due to repeated failures
239    #[serde(default)]
240    pub circuit_breaker: CircuitBreakerConfig,
241
242    /// Open Responses specification compliance configuration
243    /// Enables vendor-neutral LLM API format for interoperable workflows
244    #[serde(default)]
245    pub open_responses: OpenResponsesConfig,
246}
247
248#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
249#[cfg_attr(feature = "schema", schemars(rename_all = "snake_case"))]
250#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize)]
251#[serde(rename_all = "snake_case")]
252pub enum ContinuationPolicy {
253    Off,
254    ExecOnly,
255    #[default]
256    All,
257}
258
259impl ContinuationPolicy {
260    pub fn as_str(&self) -> &'static str {
261        match self {
262            Self::Off => "off",
263            Self::ExecOnly => "exec_only",
264            Self::All => "all",
265        }
266    }
267
268    pub fn parse(value: &str) -> Option<Self> {
269        let normalized = value.trim();
270        if normalized.eq_ignore_ascii_case("off") {
271            Some(Self::Off)
272        } else if normalized.eq_ignore_ascii_case("exec_only")
273            || normalized.eq_ignore_ascii_case("exec-only")
274        {
275            Some(Self::ExecOnly)
276        } else if normalized.eq_ignore_ascii_case("all") {
277            Some(Self::All)
278        } else {
279            None
280        }
281    }
282}
283
284impl<'de> Deserialize<'de> for ContinuationPolicy {
285    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
286    where
287        D: serde::Deserializer<'de>,
288    {
289        let raw = String::deserialize(deserializer)?;
290        Ok(Self::parse(&raw).unwrap_or_default())
291    }
292}
293
294#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
295#[cfg_attr(feature = "schema", schemars(rename_all = "snake_case"))]
296#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize)]
297#[serde(rename_all = "snake_case")]
298pub enum HarnessOrchestrationMode {
299    #[default]
300    Single,
301    PlanBuildEvaluate,
302}
303
304impl HarnessOrchestrationMode {
305    pub fn as_str(&self) -> &'static str {
306        match self {
307            Self::Single => "single",
308            Self::PlanBuildEvaluate => "plan_build_evaluate",
309        }
310    }
311
312    pub fn parse(value: &str) -> Option<Self> {
313        let normalized = value.trim();
314        if normalized.eq_ignore_ascii_case("single") {
315            Some(Self::Single)
316        } else if normalized.eq_ignore_ascii_case("plan_build_evaluate")
317            || normalized.eq_ignore_ascii_case("plan-build-evaluate")
318            || normalized.eq_ignore_ascii_case("planner_generator_evaluator")
319            || normalized.eq_ignore_ascii_case("planner-generator-evaluator")
320        {
321            Some(Self::PlanBuildEvaluate)
322        } else {
323            None
324        }
325    }
326}
327
328impl<'de> Deserialize<'de> for HarnessOrchestrationMode {
329    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
330    where
331        D: serde::Deserializer<'de>,
332    {
333        let raw = String::deserialize(deserializer)?;
334        Ok(Self::parse(&raw).unwrap_or_default())
335    }
336}
337
338#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
339#[derive(Debug, Clone, Deserialize, Serialize)]
340pub struct AgentHarnessConfig {
341    /// Maximum number of tool calls allowed per turn. Set to `0` to disable the cap.
342    #[serde(default = "default_harness_max_tool_calls_per_turn")]
343    pub max_tool_calls_per_turn: usize,
344    /// Maximum wall clock time (seconds) for tool execution in a turn
345    #[serde(default = "default_harness_max_tool_wall_clock_secs")]
346    pub max_tool_wall_clock_secs: u64,
347    /// Maximum retries for retryable tool errors
348    #[serde(default = "default_harness_max_tool_retries")]
349    pub max_tool_retries: u32,
350    /// Enable automatic context compaction when token pressure crosses threshold.
351    ///
352    /// Disabled by default. When disabled, no automatic compaction is triggered.
353    #[serde(default = "default_harness_auto_compaction_enabled")]
354    pub auto_compaction_enabled: bool,
355    /// Optional absolute compact threshold (tokens) for Responses server-side compaction.
356    ///
357    /// When unset, VT Code derives a threshold from the provider context window.
358    #[serde(default)]
359    pub auto_compaction_threshold_tokens: Option<u64>,
360    /// Optional maximum estimated API cost in USD before VT Code stops the session.
361    #[serde(default)]
362    pub max_budget_usd: Option<f64>,
363    /// Controls whether harness-managed continuation loops are enabled.
364    #[serde(default)]
365    pub continuation_policy: ContinuationPolicy,
366    /// Optional JSONL event log path for harness events.
367    /// Defaults to `~/.vtcode/sessions/` when unset.
368    #[serde(default)]
369    pub event_log_path: Option<String>,
370    /// Select the exec/full-auto harness orchestration path.
371    #[serde(default)]
372    pub orchestration_mode: HarnessOrchestrationMode,
373    /// Maximum generator revision rounds after evaluator rejection.
374    #[serde(default = "default_harness_max_revision_rounds")]
375    pub max_revision_rounds: usize,
376}
377
378impl Default for AgentHarnessConfig {
379    fn default() -> Self {
380        Self {
381            max_tool_calls_per_turn: default_harness_max_tool_calls_per_turn(),
382            max_tool_wall_clock_secs: default_harness_max_tool_wall_clock_secs(),
383            max_tool_retries: default_harness_max_tool_retries(),
384            auto_compaction_enabled: default_harness_auto_compaction_enabled(),
385            auto_compaction_threshold_tokens: None,
386            max_budget_usd: None,
387            continuation_policy: ContinuationPolicy::default(),
388            event_log_path: None,
389            orchestration_mode: HarnessOrchestrationMode::default(),
390            max_revision_rounds: default_harness_max_revision_rounds(),
391        }
392    }
393}
394
395#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
396#[derive(Debug, Clone, Deserialize, Serialize)]
397pub struct CircuitBreakerConfig {
398    /// Enable circuit breaker functionality
399    #[serde(default = "default_circuit_breaker_enabled")]
400    pub enabled: bool,
401
402    /// Number of consecutive failures before opening circuit
403    #[serde(default = "default_failure_threshold")]
404    pub failure_threshold: u32,
405
406    /// Pause and ask user when circuit opens (vs auto-backoff)
407    #[serde(default = "default_pause_on_open")]
408    pub pause_on_open: bool,
409
410    /// Number of open circuits before triggering pause
411    #[serde(default = "default_max_open_circuits")]
412    pub max_open_circuits: usize,
413
414    /// Cooldown period between recovery prompts (seconds)
415    #[serde(default = "default_recovery_cooldown")]
416    pub recovery_cooldown: u64,
417}
418
419impl Default for CircuitBreakerConfig {
420    fn default() -> Self {
421        Self {
422            enabled: default_circuit_breaker_enabled(),
423            failure_threshold: default_failure_threshold(),
424            pause_on_open: default_pause_on_open(),
425            max_open_circuits: default_max_open_circuits(),
426            recovery_cooldown: default_recovery_cooldown(),
427        }
428    }
429}
430
431/// Open Responses specification compliance configuration
432///
433/// Enables vendor-neutral LLM API format per the Open Responses specification
434/// (<https://www.openresponses.org/>). When enabled, VT Code emits semantic
435/// streaming events and uses standardized response/item structures.
436#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
437#[derive(Debug, Clone, Deserialize, Serialize)]
438pub struct OpenResponsesConfig {
439    /// Enable Open Responses specification compliance layer
440    /// When true, VT Code emits semantic streaming events alongside internal events
441    /// Default: false (opt-in feature)
442    #[serde(default)]
443    pub enabled: bool,
444
445    /// Emit Open Responses events to the event sink
446    /// When true, streaming events follow Open Responses format
447    /// (response.created, response.output_item.added, response.output_text.delta, etc.)
448    #[serde(default = "default_open_responses_emit_events")]
449    pub emit_events: bool,
450
451    /// Include VT Code extension items (vtcode:file_change, vtcode:web_search, etc.)
452    /// When false, extension items are omitted from the Open Responses output
453    #[serde(default = "default_open_responses_include_extensions")]
454    pub include_extensions: bool,
455
456    /// Map internal tool calls to Open Responses function_call items
457    /// When true, command executions and MCP tool calls are represented as function_call items
458    #[serde(default = "default_open_responses_map_tool_calls")]
459    pub map_tool_calls: bool,
460
461    /// Include reasoning items in Open Responses output
462    /// When true, model reasoning/thinking is exposed as reasoning items
463    #[serde(default = "default_open_responses_include_reasoning")]
464    pub include_reasoning: bool,
465}
466
467impl Default for OpenResponsesConfig {
468    fn default() -> Self {
469        Self {
470            enabled: false, // Opt-in by default
471            emit_events: default_open_responses_emit_events(),
472            include_extensions: default_open_responses_include_extensions(),
473            map_tool_calls: default_open_responses_map_tool_calls(),
474            include_reasoning: default_open_responses_include_reasoning(),
475        }
476    }
477}
478
479#[inline]
480const fn default_open_responses_emit_events() -> bool {
481    true // When enabled, emit events by default
482}
483
484#[inline]
485const fn default_open_responses_include_extensions() -> bool {
486    true // Include VT Code-specific extensions by default
487}
488
489#[inline]
490const fn default_open_responses_map_tool_calls() -> bool {
491    true // Map tool calls to function_call items by default
492}
493
494#[inline]
495const fn default_open_responses_include_reasoning() -> bool {
496    true // Include reasoning items by default
497}
498
499impl Default for AgentConfig {
500    fn default() -> Self {
501        Self {
502            provider: default_provider(),
503            api_key_env: default_api_key_env(),
504            default_model: default_model(),
505            theme: default_theme(),
506            system_prompt_mode: SystemPromptMode::default(),
507            tool_documentation_mode: ToolDocumentationMode::default(),
508            enable_split_tool_results: default_enable_split_tool_results(),
509            todo_planning_mode: default_todo_planning_mode(),
510            ui_surface: UiSurfacePreference::default(),
511            max_conversation_turns: default_max_conversation_turns(),
512            reasoning_effort: default_reasoning_effort(),
513            verbosity: default_verbosity(),
514            temperature: default_temperature(),
515            refine_temperature: default_refine_temperature(),
516            enable_self_review: default_enable_self_review(),
517            max_review_passes: default_max_review_passes(),
518            refine_prompts_enabled: default_refine_prompts_enabled(),
519            refine_prompts_max_passes: default_refine_max_passes(),
520            refine_prompts_model: String::new(),
521            small_model: AgentSmallModelConfig::default(),
522            prompt_suggestions: AgentPromptSuggestionsConfig::default(),
523            onboarding: AgentOnboardingConfig::default(),
524            project_doc_max_bytes: default_project_doc_max_bytes(),
525            project_doc_fallback_filenames: Vec::new(),
526            instruction_max_bytes: default_instruction_max_bytes(),
527            instruction_files: Vec::new(),
528            instruction_excludes: Vec::new(),
529            instruction_import_max_depth: default_instruction_import_max_depth(),
530            persistent_memory: PersistentMemoryConfig::default(),
531            custom_api_keys: BTreeMap::new(),
532            credential_storage_mode: crate::auth::AuthCredentialsStoreMode::default(),
533            checkpointing: AgentCheckpointingConfig::default(),
534            vibe_coding: AgentVibeCodingConfig::default(),
535            max_task_retries: default_max_task_retries(),
536            harness: AgentHarnessConfig::default(),
537            include_temporal_context: default_include_temporal_context(),
538            temporal_context_use_utc: false, // Default to local time
539            include_working_directory: default_include_working_directory(),
540            include_structured_reasoning_tags: None,
541            user_instructions: None,
542            default_editing_mode: EditingMode::default(),
543            require_plan_confirmation: default_require_plan_confirmation(),
544            autonomous_mode: default_autonomous_mode(),
545            circuit_breaker: CircuitBreakerConfig::default(),
546            open_responses: OpenResponsesConfig::default(),
547        }
548    }
549}
550
551impl AgentConfig {
552    /// Determine whether structured reasoning tag instructions should be included.
553    pub fn should_include_structured_reasoning_tags(&self) -> bool {
554        self.include_structured_reasoning_tags.unwrap_or(matches!(
555            self.system_prompt_mode,
556            SystemPromptMode::Default | SystemPromptMode::Specialized
557        ))
558    }
559
560    /// Validate LLM generation parameters
561    pub fn validate_llm_params(&self) -> Result<(), String> {
562        // Validate temperature range
563        if !(0.0..=1.0).contains(&self.temperature) {
564            return Err(format!(
565                "temperature must be between 0.0 and 1.0, got {}",
566                self.temperature
567            ));
568        }
569
570        if !(0.0..=1.0).contains(&self.refine_temperature) {
571            return Err(format!(
572                "refine_temperature must be between 0.0 and 1.0, got {}",
573                self.refine_temperature
574            ));
575        }
576
577        if self.instruction_import_max_depth == 0 {
578            return Err("instruction_import_max_depth must be greater than 0".to_string());
579        }
580
581        self.persistent_memory.validate()?;
582
583        Ok(())
584    }
585}
586
587// Optimized: Use inline defaults with constants to reduce function call overhead
588#[inline]
589fn default_provider() -> String {
590    defaults::DEFAULT_PROVIDER.into()
591}
592
593#[inline]
594fn default_api_key_env() -> String {
595    defaults::DEFAULT_API_KEY_ENV.into()
596}
597
598#[inline]
599fn default_model() -> String {
600    defaults::DEFAULT_MODEL.into()
601}
602
603#[inline]
604fn default_theme() -> String {
605    defaults::DEFAULT_THEME.into()
606}
607
608#[inline]
609const fn default_todo_planning_mode() -> bool {
610    true
611}
612
613#[inline]
614const fn default_enable_split_tool_results() -> bool {
615    true // Default: enabled for production use (84% token savings)
616}
617
618#[inline]
619const fn default_max_conversation_turns() -> usize {
620    defaults::DEFAULT_MAX_CONVERSATION_TURNS
621}
622
623#[inline]
624fn default_reasoning_effort() -> ReasoningEffortLevel {
625    ReasoningEffortLevel::None
626}
627
628#[inline]
629fn default_verbosity() -> VerbosityLevel {
630    VerbosityLevel::default()
631}
632
633#[inline]
634const fn default_temperature() -> f32 {
635    llm_generation::DEFAULT_TEMPERATURE
636}
637
638#[inline]
639const fn default_refine_temperature() -> f32 {
640    llm_generation::DEFAULT_REFINE_TEMPERATURE
641}
642
643#[inline]
644const fn default_enable_self_review() -> bool {
645    false
646}
647
648#[inline]
649const fn default_max_review_passes() -> usize {
650    1
651}
652
653#[inline]
654const fn default_refine_prompts_enabled() -> bool {
655    false
656}
657
658#[inline]
659const fn default_refine_max_passes() -> usize {
660    1
661}
662
663#[inline]
664const fn default_project_doc_max_bytes() -> usize {
665    prompt_budget::DEFAULT_MAX_BYTES
666}
667
668#[inline]
669const fn default_instruction_max_bytes() -> usize {
670    prompt_budget::DEFAULT_MAX_BYTES
671}
672
673#[inline]
674const fn default_instruction_import_max_depth() -> usize {
675    5
676}
677
678#[inline]
679const fn default_max_task_retries() -> u32 {
680    2 // Retry twice on transient failures
681}
682
683#[inline]
684const fn default_harness_max_tool_calls_per_turn() -> usize {
685    defaults::DEFAULT_MAX_TOOL_CALLS_PER_TURN
686}
687
688#[inline]
689const fn default_harness_max_tool_wall_clock_secs() -> u64 {
690    defaults::DEFAULT_MAX_TOOL_WALL_CLOCK_SECS
691}
692
693#[inline]
694const fn default_harness_max_tool_retries() -> u32 {
695    defaults::DEFAULT_MAX_TOOL_RETRIES
696}
697
698#[inline]
699const fn default_harness_auto_compaction_enabled() -> bool {
700    false
701}
702
703#[inline]
704const fn default_harness_max_revision_rounds() -> usize {
705    2
706}
707
708#[inline]
709const fn default_include_temporal_context() -> bool {
710    true // Enable by default - minimal overhead (~20 tokens)
711}
712
713#[inline]
714const fn default_include_working_directory() -> bool {
715    true // Enable by default - minimal overhead (~10 tokens)
716}
717
718#[inline]
719const fn default_require_plan_confirmation() -> bool {
720    true // Default: require confirmation (HITL pattern)
721}
722
723#[inline]
724const fn default_autonomous_mode() -> bool {
725    false // Default: interactive mode with full HITL
726}
727
728#[inline]
729const fn default_circuit_breaker_enabled() -> bool {
730    true // Default: enabled for resilient execution
731}
732
733#[inline]
734const fn default_failure_threshold() -> u32 {
735    7 // Open circuit after 7 consecutive failures
736}
737
738#[inline]
739const fn default_pause_on_open() -> bool {
740    true // Default: ask user for guidance on circuit breaker
741}
742
743#[inline]
744const fn default_max_open_circuits() -> usize {
745    3 // Pause when 3+ tools have open circuits
746}
747
748#[inline]
749const fn default_recovery_cooldown() -> u64 {
750    60 // Cooldown between recovery prompts (seconds)
751}
752
753#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
754#[derive(Debug, Clone, Deserialize, Serialize)]
755pub struct AgentCheckpointingConfig {
756    /// Enable automatic checkpoints after each successful turn
757    #[serde(default = "default_checkpointing_enabled")]
758    pub enabled: bool,
759
760    /// Optional custom directory for storing checkpoints (relative to workspace or absolute)
761    #[serde(default)]
762    pub storage_dir: Option<String>,
763
764    /// Maximum number of checkpoints to retain on disk
765    #[serde(default = "default_checkpointing_max_snapshots")]
766    pub max_snapshots: usize,
767
768    /// Maximum age in days before checkpoints are removed automatically (None disables)
769    #[serde(default = "default_checkpointing_max_age_days")]
770    pub max_age_days: Option<u64>,
771}
772
773impl Default for AgentCheckpointingConfig {
774    fn default() -> Self {
775        Self {
776            enabled: default_checkpointing_enabled(),
777            storage_dir: None,
778            max_snapshots: default_checkpointing_max_snapshots(),
779            max_age_days: default_checkpointing_max_age_days(),
780        }
781    }
782}
783
784#[inline]
785const fn default_checkpointing_enabled() -> bool {
786    DEFAULT_CHECKPOINTS_ENABLED
787}
788
789#[inline]
790const fn default_checkpointing_max_snapshots() -> usize {
791    DEFAULT_MAX_SNAPSHOTS
792}
793
794#[inline]
795const fn default_checkpointing_max_age_days() -> Option<u64> {
796    Some(DEFAULT_MAX_AGE_DAYS)
797}
798
799#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
800#[derive(Debug, Clone, Deserialize, Serialize)]
801pub struct PersistentMemoryConfig {
802    /// Toggle main-session persistent memory for this repository
803    #[serde(default = "default_persistent_memory_enabled")]
804    pub enabled: bool,
805
806    /// Write durable memory after completed turns and session finalization
807    #[serde(default = "default_persistent_memory_auto_write")]
808    pub auto_write: bool,
809
810    /// Optional user-local directory override for persistent memory storage
811    #[serde(default)]
812    pub directory_override: Option<String>,
813
814    /// Startup line budget loaded from memory_summary.md
815    #[serde(default = "default_persistent_memory_startup_line_limit")]
816    pub startup_line_limit: usize,
817
818    /// Startup byte budget loaded from memory_summary.md
819    #[serde(default = "default_persistent_memory_startup_byte_limit")]
820    pub startup_byte_limit: usize,
821}
822
823impl Default for PersistentMemoryConfig {
824    fn default() -> Self {
825        Self {
826            enabled: default_persistent_memory_enabled(),
827            auto_write: default_persistent_memory_auto_write(),
828            directory_override: None,
829            startup_line_limit: default_persistent_memory_startup_line_limit(),
830            startup_byte_limit: default_persistent_memory_startup_byte_limit(),
831        }
832    }
833}
834
835impl PersistentMemoryConfig {
836    pub fn validate(&self) -> Result<(), String> {
837        if self.startup_line_limit == 0 {
838            return Err("persistent_memory.startup_line_limit must be greater than 0".to_string());
839        }
840
841        if self.startup_byte_limit == 0 {
842            return Err("persistent_memory.startup_byte_limit must be greater than 0".to_string());
843        }
844
845        Ok(())
846    }
847}
848
849#[inline]
850const fn default_persistent_memory_enabled() -> bool {
851    false
852}
853
854#[inline]
855const fn default_persistent_memory_auto_write() -> bool {
856    true
857}
858
859#[inline]
860const fn default_persistent_memory_startup_line_limit() -> usize {
861    200
862}
863
864#[inline]
865const fn default_persistent_memory_startup_byte_limit() -> usize {
866    25 * 1024
867}
868
869#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
870#[derive(Debug, Clone, Deserialize, Serialize)]
871pub struct AgentOnboardingConfig {
872    /// Toggle onboarding message rendering
873    #[serde(default = "default_onboarding_enabled")]
874    pub enabled: bool,
875
876    /// Introductory text shown at session start
877    #[serde(default = "default_intro_text")]
878    pub intro_text: String,
879
880    /// Whether to include project overview in onboarding message
881    #[serde(default = "default_show_project_overview")]
882    pub include_project_overview: bool,
883
884    /// Whether to include language summary in onboarding message
885    #[serde(default = "default_show_language_summary")]
886    pub include_language_summary: bool,
887
888    /// Whether to include AGENTS.md highlights in onboarding message
889    #[serde(default = "default_show_guideline_highlights")]
890    pub include_guideline_highlights: bool,
891
892    /// Whether to surface usage tips inside the welcome text banner
893    #[serde(default = "default_show_usage_tips_in_welcome")]
894    pub include_usage_tips_in_welcome: bool,
895
896    /// Whether to surface suggested actions inside the welcome text banner
897    #[serde(default = "default_show_recommended_actions_in_welcome")]
898    pub include_recommended_actions_in_welcome: bool,
899
900    /// Maximum number of guideline bullets to surface
901    #[serde(default = "default_guideline_highlight_limit")]
902    pub guideline_highlight_limit: usize,
903
904    /// Tips for collaborating with the agent effectively
905    #[serde(default = "default_usage_tips")]
906    pub usage_tips: Vec<String>,
907
908    /// Recommended follow-up actions to display
909    #[serde(default = "default_recommended_actions")]
910    pub recommended_actions: Vec<String>,
911
912    /// Placeholder suggestion for the chat input bar
913    #[serde(default)]
914    pub chat_placeholder: Option<String>,
915}
916
917impl Default for AgentOnboardingConfig {
918    fn default() -> Self {
919        Self {
920            enabled: default_onboarding_enabled(),
921            intro_text: default_intro_text(),
922            include_project_overview: default_show_project_overview(),
923            include_language_summary: default_show_language_summary(),
924            include_guideline_highlights: default_show_guideline_highlights(),
925            include_usage_tips_in_welcome: default_show_usage_tips_in_welcome(),
926            include_recommended_actions_in_welcome: default_show_recommended_actions_in_welcome(),
927            guideline_highlight_limit: default_guideline_highlight_limit(),
928            usage_tips: default_usage_tips(),
929            recommended_actions: default_recommended_actions(),
930            chat_placeholder: None,
931        }
932    }
933}
934
935#[inline]
936const fn default_onboarding_enabled() -> bool {
937    true
938}
939
940const DEFAULT_INTRO_TEXT: &str =
941    "Let's get oriented. I preloaded workspace context so we can move fast.";
942
943#[inline]
944fn default_intro_text() -> String {
945    DEFAULT_INTRO_TEXT.into()
946}
947
948#[inline]
949const fn default_show_project_overview() -> bool {
950    true
951}
952
953#[inline]
954const fn default_show_language_summary() -> bool {
955    false
956}
957
958#[inline]
959const fn default_show_guideline_highlights() -> bool {
960    true
961}
962
963#[inline]
964const fn default_show_usage_tips_in_welcome() -> bool {
965    false
966}
967
968#[inline]
969const fn default_show_recommended_actions_in_welcome() -> bool {
970    false
971}
972
973#[inline]
974const fn default_guideline_highlight_limit() -> usize {
975    3
976}
977
978const DEFAULT_USAGE_TIPS: &[&str] = &[
979    "Describe your current coding goal or ask for a quick status overview.",
980    "Reference AGENTS.md guidelines when proposing changes.",
981    "Prefer asking for targeted file reads or diffs before editing.",
982];
983
984const DEFAULT_RECOMMENDED_ACTIONS: &[&str] = &[
985    "Review the highlighted guidelines and share the task you want to tackle.",
986    "Ask for a workspace tour if you need more context.",
987];
988
989fn default_usage_tips() -> Vec<String> {
990    DEFAULT_USAGE_TIPS.iter().map(|s| (*s).into()).collect()
991}
992
993fn default_recommended_actions() -> Vec<String> {
994    DEFAULT_RECOMMENDED_ACTIONS
995        .iter()
996        .map(|s| (*s).into())
997        .collect()
998}
999
1000/// Small/lightweight model configuration for efficient operations
1001///
1002/// Following VT Code's pattern, use a smaller model (e.g., Haiku, GPT-4 Mini) for 50%+ of calls:
1003/// - Large file reads and parsing (>50KB)
1004/// - Web page summarization and analysis
1005/// - Git history and commit message processing
1006/// - One-word processing labels and simple classifications
1007///
1008/// Typically 70-80% cheaper than the main model while maintaining quality for these tasks.
1009#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
1010#[derive(Debug, Clone, Deserialize, Serialize)]
1011pub struct AgentSmallModelConfig {
1012    /// Enable small model tier for efficient operations
1013    #[serde(default = "default_small_model_enabled")]
1014    pub enabled: bool,
1015
1016    /// Small model to use (e.g., claude-4-5-haiku, "gpt-4-mini", "gemini-2.0-flash")
1017    /// Leave empty to auto-select a lightweight sibling of the main model
1018    #[serde(default)]
1019    pub model: String,
1020
1021    /// Temperature for small model responses
1022    #[serde(default = "default_small_model_temperature")]
1023    pub temperature: f32,
1024
1025    /// Enable small model for large file reads (>50KB)
1026    #[serde(default = "default_small_model_for_large_reads")]
1027    pub use_for_large_reads: bool,
1028
1029    /// Enable small model for web content summarization
1030    #[serde(default = "default_small_model_for_web_summary")]
1031    pub use_for_web_summary: bool,
1032
1033    /// Enable small model for git history processing
1034    #[serde(default = "default_small_model_for_git_history")]
1035    pub use_for_git_history: bool,
1036
1037    /// Enable small model for persistent memory classification and summary refresh
1038    #[serde(default = "default_small_model_for_memory")]
1039    pub use_for_memory: bool,
1040}
1041
1042impl Default for AgentSmallModelConfig {
1043    fn default() -> Self {
1044        Self {
1045            enabled: default_small_model_enabled(),
1046            model: String::new(),
1047            temperature: default_small_model_temperature(),
1048            use_for_large_reads: default_small_model_for_large_reads(),
1049            use_for_web_summary: default_small_model_for_web_summary(),
1050            use_for_git_history: default_small_model_for_git_history(),
1051            use_for_memory: default_small_model_for_memory(),
1052        }
1053    }
1054}
1055
1056#[inline]
1057const fn default_small_model_enabled() -> bool {
1058    true // Enable by default following VT Code pattern
1059}
1060
1061#[inline]
1062const fn default_small_model_temperature() -> f32 {
1063    0.3 // More deterministic for parsing/summarization
1064}
1065
1066#[inline]
1067const fn default_small_model_for_large_reads() -> bool {
1068    true
1069}
1070
1071#[inline]
1072const fn default_small_model_for_web_summary() -> bool {
1073    true
1074}
1075
1076#[inline]
1077const fn default_small_model_for_git_history() -> bool {
1078    true
1079}
1080
1081#[inline]
1082const fn default_small_model_for_memory() -> bool {
1083    true
1084}
1085
1086/// Inline prompt suggestion configuration for the chat composer.
1087#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
1088#[derive(Debug, Clone, Deserialize, Serialize)]
1089pub struct AgentPromptSuggestionsConfig {
1090    /// Enable inline prompt suggestions in the chat composer.
1091    #[serde(default = "default_prompt_suggestions_enabled")]
1092    pub enabled: bool,
1093
1094    /// Lightweight model to use for suggestions.
1095    /// Leave empty to auto-select an efficient sibling of the main model.
1096    #[serde(default)]
1097    pub model: String,
1098
1099    /// Temperature for inline prompt suggestion generation.
1100    #[serde(default = "default_prompt_suggestions_temperature")]
1101    pub temperature: f32,
1102
1103    /// Whether VT Code should remind users that LLM-backed suggestions consume tokens.
1104    #[serde(default = "default_prompt_suggestions_show_cost_notice")]
1105    pub show_cost_notice: bool,
1106}
1107
1108impl Default for AgentPromptSuggestionsConfig {
1109    fn default() -> Self {
1110        Self {
1111            enabled: default_prompt_suggestions_enabled(),
1112            model: String::new(),
1113            temperature: default_prompt_suggestions_temperature(),
1114            show_cost_notice: default_prompt_suggestions_show_cost_notice(),
1115        }
1116    }
1117}
1118
1119#[inline]
1120const fn default_prompt_suggestions_enabled() -> bool {
1121    true
1122}
1123
1124#[inline]
1125const fn default_prompt_suggestions_temperature() -> f32 {
1126    0.3
1127}
1128
1129#[inline]
1130const fn default_prompt_suggestions_show_cost_notice() -> bool {
1131    true
1132}
1133
1134/// Vibe coding configuration for lazy/vague request support
1135///
1136/// Enables intelligent context gathering and entity resolution to support
1137/// casual, imprecise requests like "make it blue" or "decrease by half".
1138#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
1139#[derive(Debug, Clone, Deserialize, Serialize)]
1140pub struct AgentVibeCodingConfig {
1141    /// Enable vibe coding support
1142    #[serde(default = "default_vibe_coding_enabled")]
1143    pub enabled: bool,
1144
1145    /// Minimum prompt length for refinement (default: 5 chars)
1146    #[serde(default = "default_vibe_min_prompt_length")]
1147    pub min_prompt_length: usize,
1148
1149    /// Minimum prompt words for refinement (default: 2 words)
1150    #[serde(default = "default_vibe_min_prompt_words")]
1151    pub min_prompt_words: usize,
1152
1153    /// Enable fuzzy entity resolution
1154    #[serde(default = "default_vibe_entity_resolution")]
1155    pub enable_entity_resolution: bool,
1156
1157    /// Entity index cache file path (relative to workspace)
1158    #[serde(default = "default_vibe_entity_cache")]
1159    pub entity_index_cache: String,
1160
1161    /// Maximum entity matches to return (default: 5)
1162    #[serde(default = "default_vibe_max_entity_matches")]
1163    pub max_entity_matches: usize,
1164
1165    /// Track workspace state (file activity, value changes)
1166    #[serde(default = "default_vibe_track_workspace")]
1167    pub track_workspace_state: bool,
1168
1169    /// Maximum recent files to track (default: 20)
1170    #[serde(default = "default_vibe_max_recent_files")]
1171    pub max_recent_files: usize,
1172
1173    /// Track value history for inference
1174    #[serde(default = "default_vibe_track_values")]
1175    pub track_value_history: bool,
1176
1177    /// Enable conversation memory for pronoun resolution
1178    #[serde(default = "default_vibe_conversation_memory")]
1179    pub enable_conversation_memory: bool,
1180
1181    /// Maximum conversation turns to remember (default: 50)
1182    #[serde(default = "default_vibe_max_memory_turns")]
1183    pub max_memory_turns: usize,
1184
1185    /// Enable pronoun resolution (it, that, this)
1186    #[serde(default = "default_vibe_pronoun_resolution")]
1187    pub enable_pronoun_resolution: bool,
1188
1189    /// Enable proactive context gathering
1190    #[serde(default = "default_vibe_proactive_context")]
1191    pub enable_proactive_context: bool,
1192
1193    /// Maximum files to gather for context (default: 3)
1194    #[serde(default = "default_vibe_max_context_files")]
1195    pub max_context_files: usize,
1196
1197    /// Maximum code snippets per file (default: 20 lines)
1198    #[serde(default = "default_vibe_max_snippets_per_file")]
1199    pub max_context_snippets_per_file: usize,
1200
1201    /// Maximum search results to include (default: 5)
1202    #[serde(default = "default_vibe_max_search_results")]
1203    pub max_search_results: usize,
1204
1205    /// Enable relative value inference (by half, double, etc.)
1206    #[serde(default = "default_vibe_value_inference")]
1207    pub enable_relative_value_inference: bool,
1208}
1209
1210impl Default for AgentVibeCodingConfig {
1211    fn default() -> Self {
1212        Self {
1213            enabled: default_vibe_coding_enabled(),
1214            min_prompt_length: default_vibe_min_prompt_length(),
1215            min_prompt_words: default_vibe_min_prompt_words(),
1216            enable_entity_resolution: default_vibe_entity_resolution(),
1217            entity_index_cache: default_vibe_entity_cache(),
1218            max_entity_matches: default_vibe_max_entity_matches(),
1219            track_workspace_state: default_vibe_track_workspace(),
1220            max_recent_files: default_vibe_max_recent_files(),
1221            track_value_history: default_vibe_track_values(),
1222            enable_conversation_memory: default_vibe_conversation_memory(),
1223            max_memory_turns: default_vibe_max_memory_turns(),
1224            enable_pronoun_resolution: default_vibe_pronoun_resolution(),
1225            enable_proactive_context: default_vibe_proactive_context(),
1226            max_context_files: default_vibe_max_context_files(),
1227            max_context_snippets_per_file: default_vibe_max_snippets_per_file(),
1228            max_search_results: default_vibe_max_search_results(),
1229            enable_relative_value_inference: default_vibe_value_inference(),
1230        }
1231    }
1232}
1233
1234// Vibe coding default functions
1235#[inline]
1236const fn default_vibe_coding_enabled() -> bool {
1237    false // Conservative default, opt-in
1238}
1239
1240#[inline]
1241const fn default_vibe_min_prompt_length() -> usize {
1242    5
1243}
1244
1245#[inline]
1246const fn default_vibe_min_prompt_words() -> usize {
1247    2
1248}
1249
1250#[inline]
1251const fn default_vibe_entity_resolution() -> bool {
1252    true
1253}
1254
1255#[inline]
1256fn default_vibe_entity_cache() -> String {
1257    ".vtcode/entity_index.json".into()
1258}
1259
1260#[inline]
1261const fn default_vibe_max_entity_matches() -> usize {
1262    5
1263}
1264
1265#[inline]
1266const fn default_vibe_track_workspace() -> bool {
1267    true
1268}
1269
1270#[inline]
1271const fn default_vibe_max_recent_files() -> usize {
1272    20
1273}
1274
1275#[inline]
1276const fn default_vibe_track_values() -> bool {
1277    true
1278}
1279
1280#[inline]
1281const fn default_vibe_conversation_memory() -> bool {
1282    true
1283}
1284
1285#[inline]
1286const fn default_vibe_max_memory_turns() -> usize {
1287    50
1288}
1289
1290#[inline]
1291const fn default_vibe_pronoun_resolution() -> bool {
1292    true
1293}
1294
1295#[inline]
1296const fn default_vibe_proactive_context() -> bool {
1297    true
1298}
1299
1300#[inline]
1301const fn default_vibe_max_context_files() -> usize {
1302    3
1303}
1304
1305#[inline]
1306const fn default_vibe_max_snippets_per_file() -> usize {
1307    20
1308}
1309
1310#[inline]
1311const fn default_vibe_max_search_results() -> usize {
1312    5
1313}
1314
1315#[inline]
1316const fn default_vibe_value_inference() -> bool {
1317    true
1318}
1319
1320#[cfg(test)]
1321mod tests {
1322    use super::*;
1323
1324    #[test]
1325    fn test_continuation_policy_defaults_and_parses() {
1326        assert_eq!(ContinuationPolicy::default(), ContinuationPolicy::All);
1327        assert_eq!(
1328            ContinuationPolicy::parse("off"),
1329            Some(ContinuationPolicy::Off)
1330        );
1331        assert_eq!(
1332            ContinuationPolicy::parse("exec-only"),
1333            Some(ContinuationPolicy::ExecOnly)
1334        );
1335        assert_eq!(
1336            ContinuationPolicy::parse("all"),
1337            Some(ContinuationPolicy::All)
1338        );
1339        assert_eq!(ContinuationPolicy::parse("invalid"), None);
1340    }
1341
1342    #[test]
1343    fn test_harness_config_continuation_policy_deserializes_with_fallback() {
1344        let parsed: AgentHarnessConfig =
1345            toml::from_str("continuation_policy = \"all\"").expect("valid harness config");
1346        assert_eq!(parsed.continuation_policy, ContinuationPolicy::All);
1347
1348        let fallback: AgentHarnessConfig =
1349            toml::from_str("continuation_policy = \"unexpected\"").expect("fallback config");
1350        assert_eq!(fallback.continuation_policy, ContinuationPolicy::All);
1351    }
1352
1353    #[test]
1354    fn test_harness_orchestration_mode_defaults_and_parses() {
1355        assert_eq!(
1356            HarnessOrchestrationMode::default(),
1357            HarnessOrchestrationMode::Single
1358        );
1359        assert_eq!(
1360            HarnessOrchestrationMode::parse("single"),
1361            Some(HarnessOrchestrationMode::Single)
1362        );
1363        assert_eq!(
1364            HarnessOrchestrationMode::parse("plan_build_evaluate"),
1365            Some(HarnessOrchestrationMode::PlanBuildEvaluate)
1366        );
1367        assert_eq!(
1368            HarnessOrchestrationMode::parse("planner-generator-evaluator"),
1369            Some(HarnessOrchestrationMode::PlanBuildEvaluate)
1370        );
1371        assert_eq!(HarnessOrchestrationMode::parse("unexpected"), None);
1372    }
1373
1374    #[test]
1375    fn test_harness_config_orchestration_deserializes_with_fallback() {
1376        let parsed: AgentHarnessConfig =
1377            toml::from_str("orchestration_mode = \"plan_build_evaluate\"")
1378                .expect("valid harness config");
1379        assert_eq!(
1380            parsed.orchestration_mode,
1381            HarnessOrchestrationMode::PlanBuildEvaluate
1382        );
1383        assert_eq!(parsed.max_revision_rounds, 2);
1384
1385        let fallback: AgentHarnessConfig =
1386            toml::from_str("orchestration_mode = \"unexpected\"").expect("fallback config");
1387        assert_eq!(
1388            fallback.orchestration_mode,
1389            HarnessOrchestrationMode::Single
1390        );
1391    }
1392
1393    #[test]
1394    fn test_editing_mode_config_default() {
1395        let config = AgentConfig::default();
1396        assert_eq!(config.default_editing_mode, EditingMode::Edit);
1397        assert!(config.require_plan_confirmation);
1398        assert!(!config.autonomous_mode);
1399    }
1400
1401    #[test]
1402    fn test_persistent_memory_is_disabled_by_default() {
1403        let config = AgentConfig::default();
1404        assert!(!config.persistent_memory.enabled);
1405        assert!(config.persistent_memory.auto_write);
1406    }
1407
1408    #[test]
1409    fn test_structured_reasoning_defaults_follow_prompt_mode() {
1410        let default_mode = AgentConfig {
1411            system_prompt_mode: SystemPromptMode::Default,
1412            ..Default::default()
1413        };
1414        assert!(default_mode.should_include_structured_reasoning_tags());
1415
1416        let specialized_mode = AgentConfig {
1417            system_prompt_mode: SystemPromptMode::Specialized,
1418            ..Default::default()
1419        };
1420        assert!(specialized_mode.should_include_structured_reasoning_tags());
1421
1422        let minimal_mode = AgentConfig {
1423            system_prompt_mode: SystemPromptMode::Minimal,
1424            ..Default::default()
1425        };
1426        assert!(!minimal_mode.should_include_structured_reasoning_tags());
1427
1428        let lightweight_mode = AgentConfig {
1429            system_prompt_mode: SystemPromptMode::Lightweight,
1430            ..Default::default()
1431        };
1432        assert!(!lightweight_mode.should_include_structured_reasoning_tags());
1433    }
1434
1435    #[test]
1436    fn test_structured_reasoning_explicit_override() {
1437        let mut config = AgentConfig {
1438            system_prompt_mode: SystemPromptMode::Minimal,
1439            include_structured_reasoning_tags: Some(true),
1440            ..AgentConfig::default()
1441        };
1442        assert!(config.should_include_structured_reasoning_tags());
1443
1444        config.include_structured_reasoning_tags = Some(false);
1445        assert!(!config.should_include_structured_reasoning_tags());
1446    }
1447}