Skip to main content

vtcode_config/core/
agent.rs

1use crate::constants::{defaults, llm_generation, prompt_budget};
2use crate::types::{
3    EditingMode, ReasoningEffortLevel, SystemPromptMode, ToolDocumentationMode,
4    UiSurfacePreference, VerbosityLevel,
5};
6use serde::{Deserialize, Serialize};
7use std::collections::BTreeMap;
8
9const DEFAULT_CHECKPOINTS_ENABLED: bool = true;
10const DEFAULT_MAX_SNAPSHOTS: usize = 50;
11const DEFAULT_MAX_AGE_DAYS: u64 = 30;
12
13/// Agent-wide configuration
14#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
15#[derive(Debug, Clone, Deserialize, Serialize)]
16pub struct AgentConfig {
17    /// AI provider for single agent mode (gemini, openai, anthropic, openrouter, zai)
18    #[serde(default = "default_provider")]
19    pub provider: String,
20
21    /// Environment variable that stores the API key for the active provider
22    #[serde(default = "default_api_key_env")]
23    pub api_key_env: String,
24
25    /// Default model to use
26    #[serde(default = "default_model")]
27    pub default_model: String,
28
29    /// UI theme identifier controlling ANSI styling
30    #[serde(default = "default_theme")]
31    pub theme: String,
32
33    /// System prompt mode controlling verbosity and token overhead
34    /// Options: minimal (~500-800 tokens), lightweight (~1-2k), default (~6-7k), specialized (~7-8k)
35    /// Inspired by pi-coding-agent: modern models often perform well with minimal prompts
36    #[serde(default)]
37    pub system_prompt_mode: SystemPromptMode,
38
39    /// Tool documentation mode controlling token overhead for tool definitions
40    /// Options: minimal (~800 tokens), progressive (~1.2k), full (~3k current)
41    /// Progressive: signatures upfront, detailed docs on-demand (recommended)
42    /// Minimal: signatures only, pi-coding-agent style (power users)
43    /// Full: all documentation upfront (current behavior, default)
44    #[serde(default)]
45    pub tool_documentation_mode: ToolDocumentationMode,
46
47    /// Enable split tool results for massive token savings (Phase 4)
48    /// When enabled, tools return dual-channel output:
49    /// - llm_content: Concise summary sent to LLM (token-optimized, 53-95% reduction)
50    /// - ui_content: Rich output displayed to user (full details preserved)
51    ///   Applies to: unified_search, unified_file, unified_exec
52    ///   Default: true (opt-out for compatibility), recommended for production use
53    #[serde(default = "default_enable_split_tool_results")]
54    pub enable_split_tool_results: bool,
55
56    /// Enable TODO planning helper mode for structured task management
57    #[serde(default = "default_todo_planning_mode")]
58    pub todo_planning_mode: bool,
59
60    /// Preferred rendering surface for the interactive chat UI (auto, alternate, inline)
61    #[serde(default)]
62    pub ui_surface: UiSurfacePreference,
63
64    /// Maximum number of conversation turns before auto-termination
65    #[serde(default = "default_max_conversation_turns")]
66    pub max_conversation_turns: usize,
67
68    /// Reasoning effort level for models that support it (none, minimal, low, medium, high, xhigh)
69    /// Applies to: Claude, GPT-5 family, Gemini, Qwen3, DeepSeek with reasoning capability
70    #[serde(default = "default_reasoning_effort")]
71    pub reasoning_effort: ReasoningEffortLevel,
72
73    /// Verbosity level for output text (low, medium, high)
74    /// Applies to: GPT-5.4-family Responses workflows and other models that support verbosity control
75    #[serde(default = "default_verbosity")]
76    pub verbosity: VerbosityLevel,
77
78    /// Temperature for main LLM responses (0.0-1.0)
79    /// Lower values = more deterministic, higher values = more creative
80    /// Recommended: 0.7 for balanced creativity and consistency
81    /// Range: 0.0 (deterministic) to 1.0 (maximum randomness)
82    #[serde(default = "default_temperature")]
83    pub temperature: f32,
84
85    /// Temperature for prompt refinement (0.0-1.0, default: 0.3)
86    /// Lower values ensure prompt refinement is more deterministic/consistent
87    /// Keep lower than main temperature for stable prompt improvement
88    #[serde(default = "default_refine_temperature")]
89    pub refine_temperature: f32,
90
91    /// Enable an extra self-review pass to refine final responses
92    #[serde(default = "default_enable_self_review")]
93    pub enable_self_review: bool,
94
95    /// Maximum number of self-review passes
96    #[serde(default = "default_max_review_passes")]
97    pub max_review_passes: usize,
98
99    /// Enable prompt refinement pass before sending to LLM
100    #[serde(default = "default_refine_prompts_enabled")]
101    pub refine_prompts_enabled: bool,
102
103    /// Max refinement passes for prompt writing
104    #[serde(default = "default_refine_max_passes")]
105    pub refine_prompts_max_passes: usize,
106
107    /// Optional model override for the refiner (empty = auto pick efficient sibling)
108    #[serde(default)]
109    pub refine_prompts_model: String,
110
111    /// Small/lightweight model configuration for efficient operations
112    /// Used for tasks like large file reads, parsing, git history, conversation summarization
113    /// Typically 70-80% cheaper than main model; ~50% of VT Code's calls use this tier
114    #[serde(default)]
115    pub small_model: AgentSmallModelConfig,
116
117    /// Inline prompt suggestion configuration for the chat composer
118    #[serde(default)]
119    pub prompt_suggestions: AgentPromptSuggestionsConfig,
120
121    /// Session onboarding and welcome message configuration
122    #[serde(default)]
123    pub onboarding: AgentOnboardingConfig,
124
125    /// Maximum bytes of AGENTS.md content to load from project hierarchy
126    #[serde(default = "default_project_doc_max_bytes")]
127    pub project_doc_max_bytes: usize,
128
129    /// Additional filenames to check when AGENTS.md is absent at a directory level.
130    #[serde(default)]
131    pub project_doc_fallback_filenames: Vec<String>,
132
133    /// Maximum bytes of instruction content to load from AGENTS.md hierarchy
134    #[serde(
135        default = "default_instruction_max_bytes",
136        alias = "rule_doc_max_bytes"
137    )]
138    pub instruction_max_bytes: usize,
139
140    /// Additional instruction files or globs to merge into the hierarchy
141    #[serde(default, alias = "instruction_paths", alias = "instructions")]
142    pub instruction_files: Vec<String>,
143
144    /// Instruction files or globs to exclude from AGENTS.md and rules discovery
145    #[serde(default)]
146    pub instruction_excludes: Vec<String>,
147
148    /// Maximum recursive `@path` import depth for instruction and rule files
149    #[serde(default = "default_instruction_import_max_depth")]
150    pub instruction_import_max_depth: usize,
151
152    /// Durable per-repository memory for main sessions
153    #[serde(default)]
154    pub persistent_memory: PersistentMemoryConfig,
155
156    /// Provider-specific API keys captured from interactive configuration flows
157    ///
158    /// Note: Actual API keys are stored securely in the OS keyring.
159    /// This field only tracks which providers have keys stored (for UI/migration purposes).
160    /// The keys themselves are NOT serialized to the config file for security.
161    #[serde(default, skip_serializing)]
162    pub custom_api_keys: BTreeMap<String, String>,
163
164    /// Preferred storage backend for credentials (OAuth tokens, API keys, etc.)
165    ///
166    /// - `keyring`: Use OS-specific secure storage (macOS Keychain, Windows Credential
167    ///   Manager, Linux Secret Service). This is the default as it's the most secure.
168    /// - `file`: Use AES-256-GCM encrypted file with machine-derived key
169    /// - `auto`: Try keyring first, fall back to file if unavailable
170    #[serde(default)]
171    pub credential_storage_mode: crate::auth::AuthCredentialsStoreMode,
172
173    /// Checkpointing configuration for automatic turn snapshots
174    #[serde(default)]
175    pub checkpointing: AgentCheckpointingConfig,
176
177    /// Vibe coding configuration for lazy or vague request support
178    #[serde(default)]
179    pub vibe_coding: AgentVibeCodingConfig,
180
181    /// Maximum number of retries for agent task execution (default: 2)
182    /// When an agent task fails due to retryable errors (timeout, network, 503, etc.),
183    /// it will be retried up to this many times with exponential backoff
184    #[serde(default = "default_max_task_retries")]
185    pub max_task_retries: u32,
186
187    /// Harness configuration for turn-level budgets, telemetry, and execution limits
188    #[serde(default)]
189    pub harness: AgentHarnessConfig,
190
191    /// Experimental Codex app-server sidecar configuration.
192    #[serde(default)]
193    pub codex_app_server: AgentCodexAppServerConfig,
194
195    /// Include current date/time in system prompt for temporal awareness
196    /// Helps LLM understand context for time-sensitive tasks (default: true)
197    #[serde(default = "default_include_temporal_context")]
198    pub include_temporal_context: bool,
199
200    /// Use UTC instead of local time for temporal context in system prompts
201    #[serde(default)]
202    pub temporal_context_use_utc: bool,
203
204    /// Include current working directory in system prompt (default: true)
205    #[serde(default = "default_include_working_directory")]
206    pub include_working_directory: bool,
207
208    /// Controls inclusion of the structured reasoning tag instructions block.
209    ///
210    /// Behavior:
211    /// - `Some(true)`: always include structured reasoning instructions.
212    /// - `Some(false)`: never include structured reasoning instructions.
213    /// - `None` (default): include only for `default` and `specialized` prompt modes.
214    ///
215    /// This keeps lightweight/minimal prompts smaller by default while allowing
216    /// explicit opt-in when users want tag-based reasoning guidance.
217    #[serde(default)]
218    pub include_structured_reasoning_tags: Option<bool>,
219
220    /// Custom instructions provided by the user via configuration to guide agent behavior
221    #[serde(default)]
222    pub user_instructions: Option<String>,
223
224    /// Default editing mode on startup: "edit" (default) or "plan"
225    /// Codex-inspired: Encourages structured planning before execution.
226    #[serde(default)]
227    pub default_editing_mode: EditingMode,
228
229    /// Require user confirmation before executing a plan generated in plan mode
230    /// When true, exiting plan mode shows the implementation blueprint and
231    /// requires explicit user approval before enabling edit tools.
232    #[serde(default = "default_require_plan_confirmation")]
233    pub require_plan_confirmation: bool,
234
235    /// Deprecated compatibility flag for pre-classifier autonomous mode settings.
236    /// When true and `[permissions].default_mode` is not explicitly set, VT Code maps
237    /// the session to `permissions.default_mode = "auto"`.
238    #[serde(default = "default_autonomous_mode")]
239    pub autonomous_mode: bool,
240
241    /// Circuit breaker configuration for resilient tool execution
242    /// Controls when the agent should pause and ask for user guidance due to repeated failures
243    #[serde(default)]
244    pub circuit_breaker: CircuitBreakerConfig,
245
246    /// Open Responses specification compliance configuration
247    /// Enables vendor-neutral LLM API format for interoperable workflows
248    #[serde(default)]
249    pub open_responses: OpenResponsesConfig,
250}
251
252#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
253#[cfg_attr(feature = "schema", schemars(rename_all = "snake_case"))]
254#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize)]
255#[serde(rename_all = "snake_case")]
256pub enum ContinuationPolicy {
257    Off,
258    ExecOnly,
259    #[default]
260    All,
261}
262
263impl ContinuationPolicy {
264    pub fn as_str(&self) -> &'static str {
265        match self {
266            Self::Off => "off",
267            Self::ExecOnly => "exec_only",
268            Self::All => "all",
269        }
270    }
271
272    pub fn parse(value: &str) -> Option<Self> {
273        let normalized = value.trim();
274        if normalized.eq_ignore_ascii_case("off") {
275            Some(Self::Off)
276        } else if normalized.eq_ignore_ascii_case("exec_only")
277            || normalized.eq_ignore_ascii_case("exec-only")
278        {
279            Some(Self::ExecOnly)
280        } else if normalized.eq_ignore_ascii_case("all") {
281            Some(Self::All)
282        } else {
283            None
284        }
285    }
286}
287
288impl<'de> Deserialize<'de> for ContinuationPolicy {
289    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
290    where
291        D: serde::Deserializer<'de>,
292    {
293        let raw = String::deserialize(deserializer)?;
294        Ok(Self::parse(&raw).unwrap_or_default())
295    }
296}
297
298#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
299#[cfg_attr(feature = "schema", schemars(rename_all = "snake_case"))]
300#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize)]
301#[serde(rename_all = "snake_case")]
302pub enum HarnessOrchestrationMode {
303    #[default]
304    Single,
305    PlanBuildEvaluate,
306}
307
308impl HarnessOrchestrationMode {
309    pub fn as_str(&self) -> &'static str {
310        match self {
311            Self::Single => "single",
312            Self::PlanBuildEvaluate => "plan_build_evaluate",
313        }
314    }
315
316    pub fn parse(value: &str) -> Option<Self> {
317        let normalized = value.trim();
318        if normalized.eq_ignore_ascii_case("single") {
319            Some(Self::Single)
320        } else if normalized.eq_ignore_ascii_case("plan_build_evaluate")
321            || normalized.eq_ignore_ascii_case("plan-build-evaluate")
322            || normalized.eq_ignore_ascii_case("planner_generator_evaluator")
323            || normalized.eq_ignore_ascii_case("planner-generator-evaluator")
324        {
325            Some(Self::PlanBuildEvaluate)
326        } else {
327            None
328        }
329    }
330}
331
332impl<'de> Deserialize<'de> for HarnessOrchestrationMode {
333    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
334    where
335        D: serde::Deserializer<'de>,
336    {
337        let raw = String::deserialize(deserializer)?;
338        Ok(Self::parse(&raw).unwrap_or_default())
339    }
340}
341
342#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
343#[derive(Debug, Clone, Deserialize, Serialize)]
344pub struct AgentHarnessConfig {
345    /// Maximum number of tool calls allowed per turn. Set to `0` to disable the cap.
346    #[serde(default = "default_harness_max_tool_calls_per_turn")]
347    pub max_tool_calls_per_turn: usize,
348    /// Maximum wall clock time (seconds) for tool execution in a turn
349    #[serde(default = "default_harness_max_tool_wall_clock_secs")]
350    pub max_tool_wall_clock_secs: u64,
351    /// Maximum retries for retryable tool errors
352    #[serde(default = "default_harness_max_tool_retries")]
353    pub max_tool_retries: u32,
354    /// Enable automatic context compaction when token pressure crosses threshold.
355    ///
356    /// Disabled by default. When disabled, no automatic compaction is triggered.
357    #[serde(default = "default_harness_auto_compaction_enabled")]
358    pub auto_compaction_enabled: bool,
359    /// Optional absolute compact threshold (tokens) for Responses server-side compaction.
360    ///
361    /// When unset, VT Code derives a threshold from the provider context window.
362    #[serde(default)]
363    pub auto_compaction_threshold_tokens: Option<u64>,
364    /// Optional maximum estimated API cost in USD before VT Code stops the session.
365    #[serde(default)]
366    pub max_budget_usd: Option<f64>,
367    /// Controls whether harness-managed continuation loops are enabled.
368    #[serde(default)]
369    pub continuation_policy: ContinuationPolicy,
370    /// Optional JSONL event log path for harness events.
371    /// Defaults to `~/.vtcode/sessions/` when unset.
372    #[serde(default)]
373    pub event_log_path: Option<String>,
374    /// Select the exec/full-auto harness orchestration path.
375    #[serde(default)]
376    pub orchestration_mode: HarnessOrchestrationMode,
377    /// Maximum generator revision rounds after evaluator rejection.
378    #[serde(default = "default_harness_max_revision_rounds")]
379    pub max_revision_rounds: usize,
380}
381
382impl Default for AgentHarnessConfig {
383    fn default() -> Self {
384        Self {
385            max_tool_calls_per_turn: default_harness_max_tool_calls_per_turn(),
386            max_tool_wall_clock_secs: default_harness_max_tool_wall_clock_secs(),
387            max_tool_retries: default_harness_max_tool_retries(),
388            auto_compaction_enabled: default_harness_auto_compaction_enabled(),
389            auto_compaction_threshold_tokens: None,
390            max_budget_usd: None,
391            continuation_policy: ContinuationPolicy::default(),
392            event_log_path: None,
393            orchestration_mode: HarnessOrchestrationMode::default(),
394            max_revision_rounds: default_harness_max_revision_rounds(),
395        }
396    }
397}
398
399#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
400#[derive(Debug, Clone, Deserialize, Serialize)]
401pub struct AgentCodexAppServerConfig {
402    /// Executable used to launch the official Codex app-server sidecar.
403    #[serde(default = "default_codex_app_server_command")]
404    pub command: String,
405    /// Arguments passed before VT Code appends `--listen stdio://`.
406    #[serde(default = "default_codex_app_server_args")]
407    pub args: Vec<String>,
408    /// Maximum startup handshake time when launching the sidecar.
409    #[serde(default = "default_codex_app_server_startup_timeout_secs")]
410    pub startup_timeout_secs: u64,
411}
412
413impl Default for AgentCodexAppServerConfig {
414    fn default() -> Self {
415        Self {
416            command: default_codex_app_server_command(),
417            args: default_codex_app_server_args(),
418            startup_timeout_secs: default_codex_app_server_startup_timeout_secs(),
419        }
420    }
421}
422
423#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
424#[derive(Debug, Clone, Deserialize, Serialize)]
425pub struct CircuitBreakerConfig {
426    /// Enable circuit breaker functionality
427    #[serde(default = "default_circuit_breaker_enabled")]
428    pub enabled: bool,
429
430    /// Number of consecutive failures before opening circuit
431    #[serde(default = "default_failure_threshold")]
432    pub failure_threshold: u32,
433
434    /// Pause and ask user when circuit opens (vs auto-backoff)
435    #[serde(default = "default_pause_on_open")]
436    pub pause_on_open: bool,
437
438    /// Number of open circuits before triggering pause
439    #[serde(default = "default_max_open_circuits")]
440    pub max_open_circuits: usize,
441
442    /// Cooldown period between recovery prompts (seconds)
443    #[serde(default = "default_recovery_cooldown")]
444    pub recovery_cooldown: u64,
445}
446
447impl Default for CircuitBreakerConfig {
448    fn default() -> Self {
449        Self {
450            enabled: default_circuit_breaker_enabled(),
451            failure_threshold: default_failure_threshold(),
452            pause_on_open: default_pause_on_open(),
453            max_open_circuits: default_max_open_circuits(),
454            recovery_cooldown: default_recovery_cooldown(),
455        }
456    }
457}
458
459/// Open Responses specification compliance configuration
460///
461/// Enables vendor-neutral LLM API format per the Open Responses specification
462/// (<https://www.openresponses.org/>). When enabled, VT Code emits semantic
463/// streaming events and uses standardized response/item structures.
464#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
465#[derive(Debug, Clone, Deserialize, Serialize)]
466pub struct OpenResponsesConfig {
467    /// Enable Open Responses specification compliance layer
468    /// When true, VT Code emits semantic streaming events alongside internal events
469    /// Default: false (opt-in feature)
470    #[serde(default)]
471    pub enabled: bool,
472
473    /// Emit Open Responses events to the event sink
474    /// When true, streaming events follow Open Responses format
475    /// (response.created, response.output_item.added, response.output_text.delta, etc.)
476    #[serde(default = "default_open_responses_emit_events")]
477    pub emit_events: bool,
478
479    /// Include VT Code extension items (vtcode:file_change, vtcode:web_search, etc.)
480    /// When false, extension items are omitted from the Open Responses output
481    #[serde(default = "default_open_responses_include_extensions")]
482    pub include_extensions: bool,
483
484    /// Map internal tool calls to Open Responses function_call items
485    /// When true, command executions and MCP tool calls are represented as function_call items
486    #[serde(default = "default_open_responses_map_tool_calls")]
487    pub map_tool_calls: bool,
488
489    /// Include reasoning items in Open Responses output
490    /// When true, model reasoning/thinking is exposed as reasoning items
491    #[serde(default = "default_open_responses_include_reasoning")]
492    pub include_reasoning: bool,
493}
494
495impl Default for OpenResponsesConfig {
496    fn default() -> Self {
497        Self {
498            enabled: false, // Opt-in by default
499            emit_events: default_open_responses_emit_events(),
500            include_extensions: default_open_responses_include_extensions(),
501            map_tool_calls: default_open_responses_map_tool_calls(),
502            include_reasoning: default_open_responses_include_reasoning(),
503        }
504    }
505}
506
507#[inline]
508const fn default_open_responses_emit_events() -> bool {
509    true // When enabled, emit events by default
510}
511
512#[inline]
513const fn default_open_responses_include_extensions() -> bool {
514    true // Include VT Code-specific extensions by default
515}
516
517#[inline]
518const fn default_open_responses_map_tool_calls() -> bool {
519    true // Map tool calls to function_call items by default
520}
521
522#[inline]
523const fn default_open_responses_include_reasoning() -> bool {
524    true // Include reasoning items by default
525}
526
527#[inline]
528fn default_codex_app_server_command() -> String {
529    "codex".to_string()
530}
531
532#[inline]
533fn default_codex_app_server_args() -> Vec<String> {
534    vec!["app-server".to_string()]
535}
536
537#[inline]
538const fn default_codex_app_server_startup_timeout_secs() -> u64 {
539    10
540}
541
542impl Default for AgentConfig {
543    fn default() -> Self {
544        Self {
545            provider: default_provider(),
546            api_key_env: default_api_key_env(),
547            default_model: default_model(),
548            theme: default_theme(),
549            system_prompt_mode: SystemPromptMode::default(),
550            tool_documentation_mode: ToolDocumentationMode::default(),
551            enable_split_tool_results: default_enable_split_tool_results(),
552            todo_planning_mode: default_todo_planning_mode(),
553            ui_surface: UiSurfacePreference::default(),
554            max_conversation_turns: default_max_conversation_turns(),
555            reasoning_effort: default_reasoning_effort(),
556            verbosity: default_verbosity(),
557            temperature: default_temperature(),
558            refine_temperature: default_refine_temperature(),
559            enable_self_review: default_enable_self_review(),
560            max_review_passes: default_max_review_passes(),
561            refine_prompts_enabled: default_refine_prompts_enabled(),
562            refine_prompts_max_passes: default_refine_max_passes(),
563            refine_prompts_model: String::new(),
564            small_model: AgentSmallModelConfig::default(),
565            prompt_suggestions: AgentPromptSuggestionsConfig::default(),
566            onboarding: AgentOnboardingConfig::default(),
567            project_doc_max_bytes: default_project_doc_max_bytes(),
568            project_doc_fallback_filenames: Vec::new(),
569            instruction_max_bytes: default_instruction_max_bytes(),
570            instruction_files: Vec::new(),
571            instruction_excludes: Vec::new(),
572            instruction_import_max_depth: default_instruction_import_max_depth(),
573            persistent_memory: PersistentMemoryConfig::default(),
574            custom_api_keys: BTreeMap::new(),
575            credential_storage_mode: crate::auth::AuthCredentialsStoreMode::default(),
576            checkpointing: AgentCheckpointingConfig::default(),
577            vibe_coding: AgentVibeCodingConfig::default(),
578            max_task_retries: default_max_task_retries(),
579            harness: AgentHarnessConfig::default(),
580            codex_app_server: AgentCodexAppServerConfig::default(),
581            include_temporal_context: default_include_temporal_context(),
582            temporal_context_use_utc: false, // Default to local time
583            include_working_directory: default_include_working_directory(),
584            include_structured_reasoning_tags: None,
585            user_instructions: None,
586            default_editing_mode: EditingMode::default(),
587            require_plan_confirmation: default_require_plan_confirmation(),
588            autonomous_mode: default_autonomous_mode(),
589            circuit_breaker: CircuitBreakerConfig::default(),
590            open_responses: OpenResponsesConfig::default(),
591        }
592    }
593}
594
595impl AgentConfig {
596    /// Determine whether structured reasoning tag instructions should be included.
597    pub fn should_include_structured_reasoning_tags(&self) -> bool {
598        self.include_structured_reasoning_tags.unwrap_or(matches!(
599            self.system_prompt_mode,
600            SystemPromptMode::Default | SystemPromptMode::Specialized
601        ))
602    }
603
604    /// Validate LLM generation parameters
605    pub fn validate_llm_params(&self) -> Result<(), String> {
606        // Validate temperature range
607        if !(0.0..=1.0).contains(&self.temperature) {
608            return Err(format!(
609                "temperature must be between 0.0 and 1.0, got {}",
610                self.temperature
611            ));
612        }
613
614        if !(0.0..=1.0).contains(&self.refine_temperature) {
615            return Err(format!(
616                "refine_temperature must be between 0.0 and 1.0, got {}",
617                self.refine_temperature
618            ));
619        }
620
621        if self.instruction_import_max_depth == 0 {
622            return Err("instruction_import_max_depth must be greater than 0".to_string());
623        }
624
625        self.persistent_memory.validate()?;
626
627        Ok(())
628    }
629}
630
631// Optimized: Use inline defaults with constants to reduce function call overhead
632#[inline]
633fn default_provider() -> String {
634    defaults::DEFAULT_PROVIDER.into()
635}
636
637#[inline]
638fn default_api_key_env() -> String {
639    defaults::DEFAULT_API_KEY_ENV.into()
640}
641
642#[inline]
643fn default_model() -> String {
644    defaults::DEFAULT_MODEL.into()
645}
646
647#[inline]
648fn default_theme() -> String {
649    defaults::DEFAULT_THEME.into()
650}
651
652#[inline]
653const fn default_todo_planning_mode() -> bool {
654    true
655}
656
657#[inline]
658const fn default_enable_split_tool_results() -> bool {
659    true // Default: enabled for production use (84% token savings)
660}
661
662#[inline]
663const fn default_max_conversation_turns() -> usize {
664    defaults::DEFAULT_MAX_CONVERSATION_TURNS
665}
666
667#[inline]
668fn default_reasoning_effort() -> ReasoningEffortLevel {
669    ReasoningEffortLevel::None
670}
671
672#[inline]
673fn default_verbosity() -> VerbosityLevel {
674    VerbosityLevel::default()
675}
676
677#[inline]
678const fn default_temperature() -> f32 {
679    llm_generation::DEFAULT_TEMPERATURE
680}
681
682#[inline]
683const fn default_refine_temperature() -> f32 {
684    llm_generation::DEFAULT_REFINE_TEMPERATURE
685}
686
687#[inline]
688const fn default_enable_self_review() -> bool {
689    false
690}
691
692#[inline]
693const fn default_max_review_passes() -> usize {
694    1
695}
696
697#[inline]
698const fn default_refine_prompts_enabled() -> bool {
699    false
700}
701
702#[inline]
703const fn default_refine_max_passes() -> usize {
704    1
705}
706
707#[inline]
708const fn default_project_doc_max_bytes() -> usize {
709    prompt_budget::DEFAULT_MAX_BYTES
710}
711
712#[inline]
713const fn default_instruction_max_bytes() -> usize {
714    prompt_budget::DEFAULT_MAX_BYTES
715}
716
717#[inline]
718const fn default_instruction_import_max_depth() -> usize {
719    5
720}
721
722#[inline]
723const fn default_max_task_retries() -> u32 {
724    2 // Retry twice on transient failures
725}
726
727#[inline]
728const fn default_harness_max_tool_calls_per_turn() -> usize {
729    defaults::DEFAULT_MAX_TOOL_CALLS_PER_TURN
730}
731
732#[inline]
733const fn default_harness_max_tool_wall_clock_secs() -> u64 {
734    defaults::DEFAULT_MAX_TOOL_WALL_CLOCK_SECS
735}
736
737#[inline]
738const fn default_harness_max_tool_retries() -> u32 {
739    defaults::DEFAULT_MAX_TOOL_RETRIES
740}
741
742#[inline]
743const fn default_harness_auto_compaction_enabled() -> bool {
744    false
745}
746
747#[inline]
748const fn default_harness_max_revision_rounds() -> usize {
749    2
750}
751
752#[inline]
753const fn default_include_temporal_context() -> bool {
754    true // Enable by default - minimal overhead (~20 tokens)
755}
756
757#[inline]
758const fn default_include_working_directory() -> bool {
759    true // Enable by default - minimal overhead (~10 tokens)
760}
761
762#[inline]
763const fn default_require_plan_confirmation() -> bool {
764    true // Default: require confirmation (HITL pattern)
765}
766
767#[inline]
768const fn default_autonomous_mode() -> bool {
769    false // Default: interactive mode with full HITL
770}
771
772#[inline]
773const fn default_circuit_breaker_enabled() -> bool {
774    true // Default: enabled for resilient execution
775}
776
777#[inline]
778const fn default_failure_threshold() -> u32 {
779    7 // Open circuit after 7 consecutive failures
780}
781
782#[inline]
783const fn default_pause_on_open() -> bool {
784    true // Default: ask user for guidance on circuit breaker
785}
786
787#[inline]
788const fn default_max_open_circuits() -> usize {
789    3 // Pause when 3+ tools have open circuits
790}
791
792#[inline]
793const fn default_recovery_cooldown() -> u64 {
794    60 // Cooldown between recovery prompts (seconds)
795}
796
797#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
798#[derive(Debug, Clone, Deserialize, Serialize)]
799pub struct AgentCheckpointingConfig {
800    /// Enable automatic checkpoints after each successful turn
801    #[serde(default = "default_checkpointing_enabled")]
802    pub enabled: bool,
803
804    /// Optional custom directory for storing checkpoints (relative to workspace or absolute)
805    #[serde(default)]
806    pub storage_dir: Option<String>,
807
808    /// Maximum number of checkpoints to retain on disk
809    #[serde(default = "default_checkpointing_max_snapshots")]
810    pub max_snapshots: usize,
811
812    /// Maximum age in days before checkpoints are removed automatically (None disables)
813    #[serde(default = "default_checkpointing_max_age_days")]
814    pub max_age_days: Option<u64>,
815}
816
817impl Default for AgentCheckpointingConfig {
818    fn default() -> Self {
819        Self {
820            enabled: default_checkpointing_enabled(),
821            storage_dir: None,
822            max_snapshots: default_checkpointing_max_snapshots(),
823            max_age_days: default_checkpointing_max_age_days(),
824        }
825    }
826}
827
828#[inline]
829const fn default_checkpointing_enabled() -> bool {
830    DEFAULT_CHECKPOINTS_ENABLED
831}
832
833#[inline]
834const fn default_checkpointing_max_snapshots() -> usize {
835    DEFAULT_MAX_SNAPSHOTS
836}
837
838#[inline]
839const fn default_checkpointing_max_age_days() -> Option<u64> {
840    Some(DEFAULT_MAX_AGE_DAYS)
841}
842
843#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
844#[derive(Debug, Clone, Deserialize, Serialize)]
845pub struct PersistentMemoryConfig {
846    /// Toggle main-session persistent memory for this repository
847    #[serde(default = "default_persistent_memory_enabled")]
848    pub enabled: bool,
849
850    /// Write durable memory after completed turns and session finalization
851    #[serde(default = "default_persistent_memory_auto_write")]
852    pub auto_write: bool,
853
854    /// Optional user-local directory override for persistent memory storage
855    #[serde(default)]
856    pub directory_override: Option<String>,
857
858    /// Startup line budget loaded from memory_summary.md
859    #[serde(default = "default_persistent_memory_startup_line_limit")]
860    pub startup_line_limit: usize,
861
862    /// Startup byte budget loaded from memory_summary.md
863    #[serde(default = "default_persistent_memory_startup_byte_limit")]
864    pub startup_byte_limit: usize,
865}
866
867impl Default for PersistentMemoryConfig {
868    fn default() -> Self {
869        Self {
870            enabled: default_persistent_memory_enabled(),
871            auto_write: default_persistent_memory_auto_write(),
872            directory_override: None,
873            startup_line_limit: default_persistent_memory_startup_line_limit(),
874            startup_byte_limit: default_persistent_memory_startup_byte_limit(),
875        }
876    }
877}
878
879impl PersistentMemoryConfig {
880    pub fn validate(&self) -> Result<(), String> {
881        if self.startup_line_limit == 0 {
882            return Err("persistent_memory.startup_line_limit must be greater than 0".to_string());
883        }
884
885        if self.startup_byte_limit == 0 {
886            return Err("persistent_memory.startup_byte_limit must be greater than 0".to_string());
887        }
888
889        Ok(())
890    }
891}
892
893#[inline]
894const fn default_persistent_memory_enabled() -> bool {
895    false
896}
897
898#[inline]
899const fn default_persistent_memory_auto_write() -> bool {
900    true
901}
902
903#[inline]
904const fn default_persistent_memory_startup_line_limit() -> usize {
905    200
906}
907
908#[inline]
909const fn default_persistent_memory_startup_byte_limit() -> usize {
910    25 * 1024
911}
912
913#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
914#[derive(Debug, Clone, Deserialize, Serialize)]
915pub struct AgentOnboardingConfig {
916    /// Toggle onboarding message rendering
917    #[serde(default = "default_onboarding_enabled")]
918    pub enabled: bool,
919
920    /// Introductory text shown at session start
921    #[serde(default = "default_intro_text")]
922    pub intro_text: String,
923
924    /// Whether to include project overview in onboarding message
925    #[serde(default = "default_show_project_overview")]
926    pub include_project_overview: bool,
927
928    /// Whether to include language summary in onboarding message
929    #[serde(default = "default_show_language_summary")]
930    pub include_language_summary: bool,
931
932    /// Whether to include AGENTS.md highlights in onboarding message
933    #[serde(default = "default_show_guideline_highlights")]
934    pub include_guideline_highlights: bool,
935
936    /// Whether to surface usage tips inside the welcome text banner
937    #[serde(default = "default_show_usage_tips_in_welcome")]
938    pub include_usage_tips_in_welcome: bool,
939
940    /// Whether to surface suggested actions inside the welcome text banner
941    #[serde(default = "default_show_recommended_actions_in_welcome")]
942    pub include_recommended_actions_in_welcome: bool,
943
944    /// Maximum number of guideline bullets to surface
945    #[serde(default = "default_guideline_highlight_limit")]
946    pub guideline_highlight_limit: usize,
947
948    /// Tips for collaborating with the agent effectively
949    #[serde(default = "default_usage_tips")]
950    pub usage_tips: Vec<String>,
951
952    /// Recommended follow-up actions to display
953    #[serde(default = "default_recommended_actions")]
954    pub recommended_actions: Vec<String>,
955
956    /// Placeholder suggestion for the chat input bar
957    #[serde(default)]
958    pub chat_placeholder: Option<String>,
959}
960
961impl Default for AgentOnboardingConfig {
962    fn default() -> Self {
963        Self {
964            enabled: default_onboarding_enabled(),
965            intro_text: default_intro_text(),
966            include_project_overview: default_show_project_overview(),
967            include_language_summary: default_show_language_summary(),
968            include_guideline_highlights: default_show_guideline_highlights(),
969            include_usage_tips_in_welcome: default_show_usage_tips_in_welcome(),
970            include_recommended_actions_in_welcome: default_show_recommended_actions_in_welcome(),
971            guideline_highlight_limit: default_guideline_highlight_limit(),
972            usage_tips: default_usage_tips(),
973            recommended_actions: default_recommended_actions(),
974            chat_placeholder: None,
975        }
976    }
977}
978
979#[inline]
980const fn default_onboarding_enabled() -> bool {
981    true
982}
983
984const DEFAULT_INTRO_TEXT: &str =
985    "Let's get oriented. I preloaded workspace context so we can move fast.";
986
987#[inline]
988fn default_intro_text() -> String {
989    DEFAULT_INTRO_TEXT.into()
990}
991
992#[inline]
993const fn default_show_project_overview() -> bool {
994    true
995}
996
997#[inline]
998const fn default_show_language_summary() -> bool {
999    false
1000}
1001
1002#[inline]
1003const fn default_show_guideline_highlights() -> bool {
1004    true
1005}
1006
1007#[inline]
1008const fn default_show_usage_tips_in_welcome() -> bool {
1009    false
1010}
1011
1012#[inline]
1013const fn default_show_recommended_actions_in_welcome() -> bool {
1014    false
1015}
1016
1017#[inline]
1018const fn default_guideline_highlight_limit() -> usize {
1019    3
1020}
1021
1022const DEFAULT_USAGE_TIPS: &[&str] = &[
1023    "Describe your current coding goal or ask for a quick status overview.",
1024    "Reference AGENTS.md guidelines when proposing changes.",
1025    "Prefer asking for targeted file reads or diffs before editing.",
1026];
1027
1028const DEFAULT_RECOMMENDED_ACTIONS: &[&str] = &[
1029    "Review the highlighted guidelines and share the task you want to tackle.",
1030    "Ask for a workspace tour if you need more context.",
1031];
1032
1033fn default_usage_tips() -> Vec<String> {
1034    DEFAULT_USAGE_TIPS.iter().map(|s| (*s).into()).collect()
1035}
1036
1037fn default_recommended_actions() -> Vec<String> {
1038    DEFAULT_RECOMMENDED_ACTIONS
1039        .iter()
1040        .map(|s| (*s).into())
1041        .collect()
1042}
1043
1044/// Small/lightweight model configuration for efficient operations
1045///
1046/// Following VT Code's pattern, use a smaller model (e.g., Haiku, GPT-4 Mini) for 50%+ of calls:
1047/// - Large file reads and parsing (>50KB)
1048/// - Web page summarization and analysis
1049/// - Git history and commit message processing
1050/// - One-word processing labels and simple classifications
1051///
1052/// Typically 70-80% cheaper than the main model while maintaining quality for these tasks.
1053#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
1054#[derive(Debug, Clone, Deserialize, Serialize)]
1055pub struct AgentSmallModelConfig {
1056    /// Enable small model tier for efficient operations
1057    #[serde(default = "default_small_model_enabled")]
1058    pub enabled: bool,
1059
1060    /// Small model to use (e.g., claude-4-5-haiku, "gpt-4-mini", "gemini-2.0-flash")
1061    /// Leave empty to auto-select a lightweight sibling of the main model
1062    #[serde(default)]
1063    pub model: String,
1064
1065    /// Temperature for small model responses
1066    #[serde(default = "default_small_model_temperature")]
1067    pub temperature: f32,
1068
1069    /// Enable small model for large file reads (>50KB)
1070    #[serde(default = "default_small_model_for_large_reads")]
1071    pub use_for_large_reads: bool,
1072
1073    /// Enable small model for web content summarization
1074    #[serde(default = "default_small_model_for_web_summary")]
1075    pub use_for_web_summary: bool,
1076
1077    /// Enable small model for git history processing
1078    #[serde(default = "default_small_model_for_git_history")]
1079    pub use_for_git_history: bool,
1080
1081    /// Enable small model for persistent memory classification and summary refresh
1082    #[serde(default = "default_small_model_for_memory")]
1083    pub use_for_memory: bool,
1084}
1085
1086impl Default for AgentSmallModelConfig {
1087    fn default() -> Self {
1088        Self {
1089            enabled: default_small_model_enabled(),
1090            model: String::new(),
1091            temperature: default_small_model_temperature(),
1092            use_for_large_reads: default_small_model_for_large_reads(),
1093            use_for_web_summary: default_small_model_for_web_summary(),
1094            use_for_git_history: default_small_model_for_git_history(),
1095            use_for_memory: default_small_model_for_memory(),
1096        }
1097    }
1098}
1099
1100#[inline]
1101const fn default_small_model_enabled() -> bool {
1102    true // Enable by default following VT Code pattern
1103}
1104
1105#[inline]
1106const fn default_small_model_temperature() -> f32 {
1107    0.3 // More deterministic for parsing/summarization
1108}
1109
1110#[inline]
1111const fn default_small_model_for_large_reads() -> bool {
1112    true
1113}
1114
1115#[inline]
1116const fn default_small_model_for_web_summary() -> bool {
1117    true
1118}
1119
1120#[inline]
1121const fn default_small_model_for_git_history() -> bool {
1122    true
1123}
1124
1125#[inline]
1126const fn default_small_model_for_memory() -> bool {
1127    true
1128}
1129
1130/// Inline prompt suggestion configuration for the chat composer.
1131#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
1132#[derive(Debug, Clone, Deserialize, Serialize)]
1133pub struct AgentPromptSuggestionsConfig {
1134    /// Enable inline prompt suggestions in the chat composer.
1135    #[serde(default = "default_prompt_suggestions_enabled")]
1136    pub enabled: bool,
1137
1138    /// Lightweight model to use for suggestions.
1139    /// Leave empty to auto-select an efficient sibling of the main model.
1140    #[serde(default)]
1141    pub model: String,
1142
1143    /// Temperature for inline prompt suggestion generation.
1144    #[serde(default = "default_prompt_suggestions_temperature")]
1145    pub temperature: f32,
1146
1147    /// Whether VT Code should remind users that LLM-backed suggestions consume tokens.
1148    #[serde(default = "default_prompt_suggestions_show_cost_notice")]
1149    pub show_cost_notice: bool,
1150}
1151
1152impl Default for AgentPromptSuggestionsConfig {
1153    fn default() -> Self {
1154        Self {
1155            enabled: default_prompt_suggestions_enabled(),
1156            model: String::new(),
1157            temperature: default_prompt_suggestions_temperature(),
1158            show_cost_notice: default_prompt_suggestions_show_cost_notice(),
1159        }
1160    }
1161}
1162
1163#[inline]
1164const fn default_prompt_suggestions_enabled() -> bool {
1165    true
1166}
1167
1168#[inline]
1169const fn default_prompt_suggestions_temperature() -> f32 {
1170    0.3
1171}
1172
1173#[inline]
1174const fn default_prompt_suggestions_show_cost_notice() -> bool {
1175    true
1176}
1177
1178/// Vibe coding configuration for lazy/vague request support
1179///
1180/// Enables intelligent context gathering and entity resolution to support
1181/// casual, imprecise requests like "make it blue" or "decrease by half".
1182#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
1183#[derive(Debug, Clone, Deserialize, Serialize)]
1184pub struct AgentVibeCodingConfig {
1185    /// Enable vibe coding support
1186    #[serde(default = "default_vibe_coding_enabled")]
1187    pub enabled: bool,
1188
1189    /// Minimum prompt length for refinement (default: 5 chars)
1190    #[serde(default = "default_vibe_min_prompt_length")]
1191    pub min_prompt_length: usize,
1192
1193    /// Minimum prompt words for refinement (default: 2 words)
1194    #[serde(default = "default_vibe_min_prompt_words")]
1195    pub min_prompt_words: usize,
1196
1197    /// Enable fuzzy entity resolution
1198    #[serde(default = "default_vibe_entity_resolution")]
1199    pub enable_entity_resolution: bool,
1200
1201    /// Entity index cache file path (relative to workspace)
1202    #[serde(default = "default_vibe_entity_cache")]
1203    pub entity_index_cache: String,
1204
1205    /// Maximum entity matches to return (default: 5)
1206    #[serde(default = "default_vibe_max_entity_matches")]
1207    pub max_entity_matches: usize,
1208
1209    /// Track workspace state (file activity, value changes)
1210    #[serde(default = "default_vibe_track_workspace")]
1211    pub track_workspace_state: bool,
1212
1213    /// Maximum recent files to track (default: 20)
1214    #[serde(default = "default_vibe_max_recent_files")]
1215    pub max_recent_files: usize,
1216
1217    /// Track value history for inference
1218    #[serde(default = "default_vibe_track_values")]
1219    pub track_value_history: bool,
1220
1221    /// Enable conversation memory for pronoun resolution
1222    #[serde(default = "default_vibe_conversation_memory")]
1223    pub enable_conversation_memory: bool,
1224
1225    /// Maximum conversation turns to remember (default: 50)
1226    #[serde(default = "default_vibe_max_memory_turns")]
1227    pub max_memory_turns: usize,
1228
1229    /// Enable pronoun resolution (it, that, this)
1230    #[serde(default = "default_vibe_pronoun_resolution")]
1231    pub enable_pronoun_resolution: bool,
1232
1233    /// Enable proactive context gathering
1234    #[serde(default = "default_vibe_proactive_context")]
1235    pub enable_proactive_context: bool,
1236
1237    /// Maximum files to gather for context (default: 3)
1238    #[serde(default = "default_vibe_max_context_files")]
1239    pub max_context_files: usize,
1240
1241    /// Maximum code snippets per file (default: 20 lines)
1242    #[serde(default = "default_vibe_max_snippets_per_file")]
1243    pub max_context_snippets_per_file: usize,
1244
1245    /// Maximum search results to include (default: 5)
1246    #[serde(default = "default_vibe_max_search_results")]
1247    pub max_search_results: usize,
1248
1249    /// Enable relative value inference (by half, double, etc.)
1250    #[serde(default = "default_vibe_value_inference")]
1251    pub enable_relative_value_inference: bool,
1252}
1253
1254impl Default for AgentVibeCodingConfig {
1255    fn default() -> Self {
1256        Self {
1257            enabled: default_vibe_coding_enabled(),
1258            min_prompt_length: default_vibe_min_prompt_length(),
1259            min_prompt_words: default_vibe_min_prompt_words(),
1260            enable_entity_resolution: default_vibe_entity_resolution(),
1261            entity_index_cache: default_vibe_entity_cache(),
1262            max_entity_matches: default_vibe_max_entity_matches(),
1263            track_workspace_state: default_vibe_track_workspace(),
1264            max_recent_files: default_vibe_max_recent_files(),
1265            track_value_history: default_vibe_track_values(),
1266            enable_conversation_memory: default_vibe_conversation_memory(),
1267            max_memory_turns: default_vibe_max_memory_turns(),
1268            enable_pronoun_resolution: default_vibe_pronoun_resolution(),
1269            enable_proactive_context: default_vibe_proactive_context(),
1270            max_context_files: default_vibe_max_context_files(),
1271            max_context_snippets_per_file: default_vibe_max_snippets_per_file(),
1272            max_search_results: default_vibe_max_search_results(),
1273            enable_relative_value_inference: default_vibe_value_inference(),
1274        }
1275    }
1276}
1277
1278// Vibe coding default functions
1279#[inline]
1280const fn default_vibe_coding_enabled() -> bool {
1281    false // Conservative default, opt-in
1282}
1283
1284#[inline]
1285const fn default_vibe_min_prompt_length() -> usize {
1286    5
1287}
1288
1289#[inline]
1290const fn default_vibe_min_prompt_words() -> usize {
1291    2
1292}
1293
1294#[inline]
1295const fn default_vibe_entity_resolution() -> bool {
1296    true
1297}
1298
1299#[inline]
1300fn default_vibe_entity_cache() -> String {
1301    ".vtcode/entity_index.json".into()
1302}
1303
1304#[inline]
1305const fn default_vibe_max_entity_matches() -> usize {
1306    5
1307}
1308
1309#[inline]
1310const fn default_vibe_track_workspace() -> bool {
1311    true
1312}
1313
1314#[inline]
1315const fn default_vibe_max_recent_files() -> usize {
1316    20
1317}
1318
1319#[inline]
1320const fn default_vibe_track_values() -> bool {
1321    true
1322}
1323
1324#[inline]
1325const fn default_vibe_conversation_memory() -> bool {
1326    true
1327}
1328
1329#[inline]
1330const fn default_vibe_max_memory_turns() -> usize {
1331    50
1332}
1333
1334#[inline]
1335const fn default_vibe_pronoun_resolution() -> bool {
1336    true
1337}
1338
1339#[inline]
1340const fn default_vibe_proactive_context() -> bool {
1341    true
1342}
1343
1344#[inline]
1345const fn default_vibe_max_context_files() -> usize {
1346    3
1347}
1348
1349#[inline]
1350const fn default_vibe_max_snippets_per_file() -> usize {
1351    20
1352}
1353
1354#[inline]
1355const fn default_vibe_max_search_results() -> usize {
1356    5
1357}
1358
1359#[inline]
1360const fn default_vibe_value_inference() -> bool {
1361    true
1362}
1363
1364#[cfg(test)]
1365mod tests {
1366    use super::*;
1367
1368    #[test]
1369    fn test_continuation_policy_defaults_and_parses() {
1370        assert_eq!(ContinuationPolicy::default(), ContinuationPolicy::All);
1371        assert_eq!(
1372            ContinuationPolicy::parse("off"),
1373            Some(ContinuationPolicy::Off)
1374        );
1375        assert_eq!(
1376            ContinuationPolicy::parse("exec-only"),
1377            Some(ContinuationPolicy::ExecOnly)
1378        );
1379        assert_eq!(
1380            ContinuationPolicy::parse("all"),
1381            Some(ContinuationPolicy::All)
1382        );
1383        assert_eq!(ContinuationPolicy::parse("invalid"), None);
1384    }
1385
1386    #[test]
1387    fn test_harness_config_continuation_policy_deserializes_with_fallback() {
1388        let parsed: AgentHarnessConfig =
1389            toml::from_str("continuation_policy = \"all\"").expect("valid harness config");
1390        assert_eq!(parsed.continuation_policy, ContinuationPolicy::All);
1391
1392        let fallback: AgentHarnessConfig =
1393            toml::from_str("continuation_policy = \"unexpected\"").expect("fallback config");
1394        assert_eq!(fallback.continuation_policy, ContinuationPolicy::All);
1395    }
1396
1397    #[test]
1398    fn test_harness_orchestration_mode_defaults_and_parses() {
1399        assert_eq!(
1400            HarnessOrchestrationMode::default(),
1401            HarnessOrchestrationMode::Single
1402        );
1403        assert_eq!(
1404            HarnessOrchestrationMode::parse("single"),
1405            Some(HarnessOrchestrationMode::Single)
1406        );
1407        assert_eq!(
1408            HarnessOrchestrationMode::parse("plan_build_evaluate"),
1409            Some(HarnessOrchestrationMode::PlanBuildEvaluate)
1410        );
1411        assert_eq!(
1412            HarnessOrchestrationMode::parse("planner-generator-evaluator"),
1413            Some(HarnessOrchestrationMode::PlanBuildEvaluate)
1414        );
1415        assert_eq!(HarnessOrchestrationMode::parse("unexpected"), None);
1416    }
1417
1418    #[test]
1419    fn test_harness_config_orchestration_deserializes_with_fallback() {
1420        let parsed: AgentHarnessConfig =
1421            toml::from_str("orchestration_mode = \"plan_build_evaluate\"")
1422                .expect("valid harness config");
1423        assert_eq!(
1424            parsed.orchestration_mode,
1425            HarnessOrchestrationMode::PlanBuildEvaluate
1426        );
1427        assert_eq!(parsed.max_revision_rounds, 2);
1428
1429        let fallback: AgentHarnessConfig =
1430            toml::from_str("orchestration_mode = \"unexpected\"").expect("fallback config");
1431        assert_eq!(
1432            fallback.orchestration_mode,
1433            HarnessOrchestrationMode::Single
1434        );
1435    }
1436
1437    #[test]
1438    fn test_editing_mode_config_default() {
1439        let config = AgentConfig::default();
1440        assert_eq!(config.default_editing_mode, EditingMode::Edit);
1441        assert!(config.require_plan_confirmation);
1442        assert!(!config.autonomous_mode);
1443    }
1444
1445    #[test]
1446    fn test_persistent_memory_is_disabled_by_default() {
1447        let config = AgentConfig::default();
1448        assert!(!config.persistent_memory.enabled);
1449        assert!(config.persistent_memory.auto_write);
1450    }
1451
1452    #[test]
1453    fn test_structured_reasoning_defaults_follow_prompt_mode() {
1454        let default_mode = AgentConfig {
1455            system_prompt_mode: SystemPromptMode::Default,
1456            ..Default::default()
1457        };
1458        assert!(default_mode.should_include_structured_reasoning_tags());
1459
1460        let specialized_mode = AgentConfig {
1461            system_prompt_mode: SystemPromptMode::Specialized,
1462            ..Default::default()
1463        };
1464        assert!(specialized_mode.should_include_structured_reasoning_tags());
1465
1466        let minimal_mode = AgentConfig {
1467            system_prompt_mode: SystemPromptMode::Minimal,
1468            ..Default::default()
1469        };
1470        assert!(!minimal_mode.should_include_structured_reasoning_tags());
1471
1472        let lightweight_mode = AgentConfig {
1473            system_prompt_mode: SystemPromptMode::Lightweight,
1474            ..Default::default()
1475        };
1476        assert!(!lightweight_mode.should_include_structured_reasoning_tags());
1477    }
1478
1479    #[test]
1480    fn test_structured_reasoning_explicit_override() {
1481        let mut config = AgentConfig {
1482            system_prompt_mode: SystemPromptMode::Minimal,
1483            include_structured_reasoning_tags: Some(true),
1484            ..AgentConfig::default()
1485        };
1486        assert!(config.should_include_structured_reasoning_tags());
1487
1488        config.include_structured_reasoning_tags = Some(false);
1489        assert!(!config.should_include_structured_reasoning_tags());
1490    }
1491}