Skip to main content

vtcode_config/core/
agent.rs

1use crate::constants::{defaults, instructions, llm_generation, project_doc};
2use crate::types::{
3    EditingMode, ReasoningEffortLevel, SystemPromptMode, ToolDocumentationMode,
4    UiSurfacePreference, VerbosityLevel,
5};
6use serde::{Deserialize, Serialize};
7use std::collections::BTreeMap;
8
9const DEFAULT_CHECKPOINTS_ENABLED: bool = true;
10const DEFAULT_MAX_SNAPSHOTS: usize = 50;
11const DEFAULT_MAX_AGE_DAYS: u64 = 30;
12
13/// Agent-wide configuration
14#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
15#[derive(Debug, Clone, Deserialize, Serialize)]
16pub struct AgentConfig {
17    /// AI provider for single agent mode (gemini, openai, anthropic, openrouter, zai)
18    #[serde(default = "default_provider")]
19    pub provider: String,
20
21    /// Environment variable that stores the API key for the active provider
22    #[serde(default = "default_api_key_env")]
23    pub api_key_env: String,
24
25    /// Default model to use
26    #[serde(default = "default_model")]
27    pub default_model: String,
28
29    /// UI theme identifier controlling ANSI styling
30    #[serde(default = "default_theme")]
31    pub theme: String,
32
33    /// System prompt mode controlling verbosity and token overhead
34    /// Options: minimal (~500-800 tokens), lightweight (~1-2k), default (~6-7k), specialized (~7-8k)
35    /// Inspired by pi-coding-agent: modern models often perform well with minimal prompts
36    #[serde(default)]
37    pub system_prompt_mode: SystemPromptMode,
38
39    /// Tool documentation mode controlling token overhead for tool definitions
40    /// Options: minimal (~800 tokens), progressive (~1.2k), full (~3k current)
41    /// Progressive: signatures upfront, detailed docs on-demand (recommended)
42    /// Minimal: signatures only, pi-coding-agent style (power users)
43    /// Full: all documentation upfront (current behavior, default)
44    #[serde(default)]
45    pub tool_documentation_mode: ToolDocumentationMode,
46
47    /// Enable split tool results for massive token savings (Phase 4)
48    /// When enabled, tools return dual-channel output:
49    /// - llm_content: Concise summary sent to LLM (token-optimized, 53-95% reduction)
50    /// - ui_content: Rich output displayed to user (full details preserved)
51    ///   Applies to: grep_file, list_files, read_file, run_pty_cmd, write_file, edit_file
52    ///   Default: true (opt-out for compatibility), recommended for production use
53    #[serde(default = "default_enable_split_tool_results")]
54    pub enable_split_tool_results: bool,
55
56    /// Enable TODO planning helper mode for structured task management
57    #[serde(default = "default_todo_planning_mode")]
58    pub todo_planning_mode: bool,
59
60    /// Preferred rendering surface for the interactive chat UI (auto, alternate, inline)
61    #[serde(default)]
62    pub ui_surface: UiSurfacePreference,
63
64    /// Maximum number of conversation turns before auto-termination
65    #[serde(default = "default_max_conversation_turns")]
66    pub max_conversation_turns: usize,
67
68    /// Reasoning effort level for models that support it (none, low, medium, high)
69    /// Applies to: Claude, GPT-5, GPT-5.3, Gemini, Qwen3, DeepSeek with reasoning capability
70    #[serde(default = "default_reasoning_effort")]
71    pub reasoning_effort: ReasoningEffortLevel,
72
73    /// Verbosity level for output text (low, medium, high)
74    /// Applies to: GPT-5.3 and other models that support verbosity control
75    #[serde(default = "default_verbosity")]
76    pub verbosity: VerbosityLevel,
77
78    /// Temperature for main LLM responses (0.0-1.0)
79    /// Lower values = more deterministic, higher values = more creative
80    /// Recommended: 0.7 for balanced creativity and consistency
81    /// Range: 0.0 (deterministic) to 1.0 (maximum randomness)
82    #[serde(default = "default_temperature")]
83    pub temperature: f32,
84
85    /// Temperature for prompt refinement (0.0-1.0, default: 0.3)
86    /// Lower values ensure prompt refinement is more deterministic/consistent
87    /// Keep lower than main temperature for stable prompt improvement
88    #[serde(default = "default_refine_temperature")]
89    pub refine_temperature: f32,
90
91    /// Enable an extra self-review pass to refine final responses
92    #[serde(default = "default_enable_self_review")]
93    pub enable_self_review: bool,
94
95    /// Maximum number of self-review passes
96    #[serde(default = "default_max_review_passes")]
97    pub max_review_passes: usize,
98
99    /// Enable prompt refinement pass before sending to LLM
100    #[serde(default = "default_refine_prompts_enabled")]
101    pub refine_prompts_enabled: bool,
102
103    /// Max refinement passes for prompt writing
104    #[serde(default = "default_refine_max_passes")]
105    pub refine_prompts_max_passes: usize,
106
107    /// Optional model override for the refiner (empty = auto pick efficient sibling)
108    #[serde(default)]
109    pub refine_prompts_model: String,
110
111    /// Small/lightweight model configuration for efficient operations
112    /// Used for tasks like large file reads, parsing, git history, conversation summarization
113    /// Typically 70-80% cheaper than main model; ~50% of VT Code's calls use this tier
114    #[serde(default)]
115    pub small_model: AgentSmallModelConfig,
116
117    /// Session onboarding and welcome message configuration
118    #[serde(default)]
119    pub onboarding: AgentOnboardingConfig,
120
121    /// Maximum bytes of AGENTS.md content to load from project hierarchy
122    #[serde(default = "default_project_doc_max_bytes")]
123    pub project_doc_max_bytes: usize,
124
125    /// Maximum bytes of instruction content to load from AGENTS.md hierarchy
126    #[serde(
127        default = "default_instruction_max_bytes",
128        alias = "rule_doc_max_bytes"
129    )]
130    pub instruction_max_bytes: usize,
131
132    /// Additional instruction files or globs to merge into the hierarchy
133    #[serde(default, alias = "instruction_paths", alias = "instructions")]
134    pub instruction_files: Vec<String>,
135
136    /// Provider-specific API keys captured from interactive configuration flows
137    ///
138    /// Note: Actual API keys are stored securely in the OS keyring.
139    /// This field only tracks which providers have keys stored (for UI/migration purposes).
140    /// The keys themselves are NOT serialized to the config file for security.
141    #[serde(default, skip_serializing)]
142    pub custom_api_keys: BTreeMap<String, String>,
143
144    /// Preferred storage backend for credentials (OAuth tokens, API keys, etc.)
145    ///
146    /// - `keyring`: Use OS-specific secure storage (macOS Keychain, Windows Credential
147    ///   Manager, Linux Secret Service). This is the default as it's the most secure.
148    /// - `file`: Use AES-256-GCM encrypted file with machine-derived key
149    /// - `auto`: Try keyring first, fall back to file if unavailable
150    #[serde(default)]
151    pub credential_storage_mode: crate::auth::AuthCredentialsStoreMode,
152
153    /// Checkpointing configuration for automatic turn snapshots
154    #[serde(default)]
155    pub checkpointing: AgentCheckpointingConfig,
156
157    /// Vibe coding configuration for lazy or vague request support
158    #[serde(default)]
159    pub vibe_coding: AgentVibeCodingConfig,
160
161    /// Maximum number of retries for agent task execution (default: 2)
162    /// When an agent task fails due to retryable errors (timeout, network, 503, etc.),
163    /// it will be retried up to this many times with exponential backoff
164    #[serde(default = "default_max_task_retries")]
165    pub max_task_retries: u32,
166
167    /// Harness configuration for turn-level budgets, telemetry, and execution limits
168    #[serde(default)]
169    pub harness: AgentHarnessConfig,
170
171    /// Include current date/time in system prompt for temporal awareness
172    /// Helps LLM understand context for time-sensitive tasks (default: true)
173    #[serde(default = "default_include_temporal_context")]
174    pub include_temporal_context: bool,
175
176    /// Use UTC instead of local time for temporal context in system prompts
177    #[serde(default)]
178    pub temporal_context_use_utc: bool,
179
180    /// Include current working directory in system prompt (default: true)
181    #[serde(default = "default_include_working_directory")]
182    pub include_working_directory: bool,
183
184    /// Controls inclusion of the structured reasoning tag instructions block.
185    ///
186    /// Behavior:
187    /// - `Some(true)`: always include structured reasoning instructions.
188    /// - `Some(false)`: never include structured reasoning instructions.
189    /// - `None` (default): include only for `default` and `specialized` prompt modes.
190    ///
191    /// This keeps lightweight/minimal prompts smaller by default while allowing
192    /// explicit opt-in when users want tag-based reasoning guidance.
193    #[serde(default)]
194    pub include_structured_reasoning_tags: Option<bool>,
195
196    /// Custom instructions provided by the user via configuration to guide agent behavior
197    #[serde(default)]
198    pub user_instructions: Option<String>,
199
200    /// Default editing mode on startup: "edit" (default) or "plan"
201    /// Codex-inspired: Encourages structured planning before execution.
202    #[serde(default)]
203    pub default_editing_mode: EditingMode,
204
205    /// Require user confirmation before executing a plan generated in plan mode
206    /// When true, exiting plan mode shows the implementation blueprint and
207    /// requires explicit user approval before enabling edit tools.
208    #[serde(default = "default_require_plan_confirmation")]
209    pub require_plan_confirmation: bool,
210
211    /// Enable autonomous mode - auto-approve safe tools with reduced HITL prompts
212    /// When true, the agent operates with fewer confirmation prompts for safe tools.
213    #[serde(default = "default_autonomous_mode")]
214    pub autonomous_mode: bool,
215
216    /// Circuit breaker configuration for resilient tool execution
217    /// Controls when the agent should pause and ask for user guidance due to repeated failures
218    #[serde(default)]
219    pub circuit_breaker: CircuitBreakerConfig,
220
221    /// Open Responses specification compliance configuration
222    /// Enables vendor-neutral LLM API format for interoperable workflows
223    #[serde(default)]
224    pub open_responses: OpenResponsesConfig,
225}
226
227#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
228#[derive(Debug, Clone, Deserialize, Serialize)]
229pub struct AgentHarnessConfig {
230    /// Maximum number of tool calls allowed per turn
231    #[serde(default = "default_harness_max_tool_calls_per_turn")]
232    pub max_tool_calls_per_turn: usize,
233    /// Maximum wall clock time (seconds) for tool execution in a turn
234    #[serde(default = "default_harness_max_tool_wall_clock_secs")]
235    pub max_tool_wall_clock_secs: u64,
236    /// Maximum retries for retryable tool errors
237    #[serde(default = "default_harness_max_tool_retries")]
238    pub max_tool_retries: u32,
239    /// Enable automatic context compaction when token pressure crosses threshold.
240    ///
241    /// Disabled by default. When disabled, no automatic compaction is triggered.
242    #[serde(default = "default_harness_auto_compaction_enabled")]
243    pub auto_compaction_enabled: bool,
244    /// Optional absolute compact threshold (tokens) for Responses server-side compaction.
245    ///
246    /// When unset, VT Code derives a threshold from the provider context window.
247    #[serde(default)]
248    pub auto_compaction_threshold_tokens: Option<u64>,
249    /// Optional JSONL event log path for harness events
250    #[serde(default)]
251    pub event_log_path: Option<String>,
252}
253
254impl Default for AgentHarnessConfig {
255    fn default() -> Self {
256        Self {
257            max_tool_calls_per_turn: default_harness_max_tool_calls_per_turn(),
258            max_tool_wall_clock_secs: default_harness_max_tool_wall_clock_secs(),
259            max_tool_retries: default_harness_max_tool_retries(),
260            auto_compaction_enabled: default_harness_auto_compaction_enabled(),
261            auto_compaction_threshold_tokens: None,
262            event_log_path: None,
263        }
264    }
265}
266
267#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
268#[derive(Debug, Clone, Deserialize, Serialize)]
269pub struct CircuitBreakerConfig {
270    /// Enable circuit breaker functionality
271    #[serde(default = "default_circuit_breaker_enabled")]
272    pub enabled: bool,
273
274    /// Number of consecutive failures before opening circuit
275    #[serde(default = "default_failure_threshold")]
276    pub failure_threshold: u32,
277
278    /// Pause and ask user when circuit opens (vs auto-backoff)
279    #[serde(default = "default_pause_on_open")]
280    pub pause_on_open: bool,
281
282    /// Number of open circuits before triggering pause
283    #[serde(default = "default_max_open_circuits")]
284    pub max_open_circuits: usize,
285
286    /// Cooldown period between recovery prompts (seconds)
287    #[serde(default = "default_recovery_cooldown")]
288    pub recovery_cooldown: u64,
289}
290
291impl Default for CircuitBreakerConfig {
292    fn default() -> Self {
293        Self {
294            enabled: default_circuit_breaker_enabled(),
295            failure_threshold: default_failure_threshold(),
296            pause_on_open: default_pause_on_open(),
297            max_open_circuits: default_max_open_circuits(),
298            recovery_cooldown: default_recovery_cooldown(),
299        }
300    }
301}
302
303/// Open Responses specification compliance configuration
304///
305/// Enables vendor-neutral LLM API format per the Open Responses specification
306/// (<https://www.openresponses.org/>). When enabled, VT Code emits semantic
307/// streaming events and uses standardized response/item structures.
308#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
309#[derive(Debug, Clone, Deserialize, Serialize)]
310pub struct OpenResponsesConfig {
311    /// Enable Open Responses specification compliance layer
312    /// When true, VT Code emits semantic streaming events alongside internal events
313    /// Default: false (opt-in feature)
314    #[serde(default)]
315    pub enabled: bool,
316
317    /// Emit Open Responses events to the event sink
318    /// When true, streaming events follow Open Responses format
319    /// (response.created, response.output_item.added, response.output_text.delta, etc.)
320    #[serde(default = "default_open_responses_emit_events")]
321    pub emit_events: bool,
322
323    /// Include VT Code extension items (vtcode:file_change, vtcode:web_search, etc.)
324    /// When false, extension items are omitted from the Open Responses output
325    #[serde(default = "default_open_responses_include_extensions")]
326    pub include_extensions: bool,
327
328    /// Map internal tool calls to Open Responses function_call items
329    /// When true, command executions and MCP tool calls are represented as function_call items
330    #[serde(default = "default_open_responses_map_tool_calls")]
331    pub map_tool_calls: bool,
332
333    /// Include reasoning items in Open Responses output
334    /// When true, model reasoning/thinking is exposed as reasoning items
335    #[serde(default = "default_open_responses_include_reasoning")]
336    pub include_reasoning: bool,
337}
338
339impl Default for OpenResponsesConfig {
340    fn default() -> Self {
341        Self {
342            enabled: false, // Opt-in by default
343            emit_events: default_open_responses_emit_events(),
344            include_extensions: default_open_responses_include_extensions(),
345            map_tool_calls: default_open_responses_map_tool_calls(),
346            include_reasoning: default_open_responses_include_reasoning(),
347        }
348    }
349}
350
351#[inline]
352const fn default_open_responses_emit_events() -> bool {
353    true // When enabled, emit events by default
354}
355
356#[inline]
357const fn default_open_responses_include_extensions() -> bool {
358    true // Include VT Code-specific extensions by default
359}
360
361#[inline]
362const fn default_open_responses_map_tool_calls() -> bool {
363    true // Map tool calls to function_call items by default
364}
365
366#[inline]
367const fn default_open_responses_include_reasoning() -> bool {
368    true // Include reasoning items by default
369}
370
371impl Default for AgentConfig {
372    fn default() -> Self {
373        Self {
374            provider: default_provider(),
375            api_key_env: default_api_key_env(),
376            default_model: default_model(),
377            theme: default_theme(),
378            system_prompt_mode: SystemPromptMode::default(),
379            tool_documentation_mode: ToolDocumentationMode::default(),
380            enable_split_tool_results: default_enable_split_tool_results(),
381            todo_planning_mode: default_todo_planning_mode(),
382            ui_surface: UiSurfacePreference::default(),
383            max_conversation_turns: default_max_conversation_turns(),
384            reasoning_effort: default_reasoning_effort(),
385            verbosity: default_verbosity(),
386            temperature: default_temperature(),
387            refine_temperature: default_refine_temperature(),
388            enable_self_review: default_enable_self_review(),
389            max_review_passes: default_max_review_passes(),
390            refine_prompts_enabled: default_refine_prompts_enabled(),
391            refine_prompts_max_passes: default_refine_max_passes(),
392            refine_prompts_model: String::new(),
393            small_model: AgentSmallModelConfig::default(),
394            onboarding: AgentOnboardingConfig::default(),
395            project_doc_max_bytes: default_project_doc_max_bytes(),
396            instruction_max_bytes: default_instruction_max_bytes(),
397            instruction_files: Vec::new(),
398            custom_api_keys: BTreeMap::new(),
399            credential_storage_mode: crate::auth::AuthCredentialsStoreMode::default(),
400            checkpointing: AgentCheckpointingConfig::default(),
401            vibe_coding: AgentVibeCodingConfig::default(),
402            max_task_retries: default_max_task_retries(),
403            harness: AgentHarnessConfig::default(),
404            include_temporal_context: default_include_temporal_context(),
405            temporal_context_use_utc: false, // Default to local time
406            include_working_directory: default_include_working_directory(),
407            include_structured_reasoning_tags: None,
408            user_instructions: None,
409            default_editing_mode: EditingMode::default(),
410            require_plan_confirmation: default_require_plan_confirmation(),
411            autonomous_mode: default_autonomous_mode(),
412            circuit_breaker: CircuitBreakerConfig::default(),
413            open_responses: OpenResponsesConfig::default(),
414        }
415    }
416}
417
418impl AgentConfig {
419    /// Determine whether structured reasoning tag instructions should be included.
420    pub fn should_include_structured_reasoning_tags(&self) -> bool {
421        self.include_structured_reasoning_tags.unwrap_or(matches!(
422            self.system_prompt_mode,
423            SystemPromptMode::Default | SystemPromptMode::Specialized
424        ))
425    }
426
427    /// Validate LLM generation parameters
428    pub fn validate_llm_params(&self) -> Result<(), String> {
429        // Validate temperature range
430        if !(0.0..=1.0).contains(&self.temperature) {
431            return Err(format!(
432                "temperature must be between 0.0 and 1.0, got {}",
433                self.temperature
434            ));
435        }
436
437        if !(0.0..=1.0).contains(&self.refine_temperature) {
438            return Err(format!(
439                "refine_temperature must be between 0.0 and 1.0, got {}",
440                self.refine_temperature
441            ));
442        }
443
444        Ok(())
445    }
446}
447
448// Optimized: Use inline defaults with constants to reduce function call overhead
449#[inline]
450fn default_provider() -> String {
451    defaults::DEFAULT_PROVIDER.into()
452}
453
454#[inline]
455fn default_api_key_env() -> String {
456    defaults::DEFAULT_API_KEY_ENV.into()
457}
458
459#[inline]
460fn default_model() -> String {
461    defaults::DEFAULT_MODEL.into()
462}
463
464#[inline]
465fn default_theme() -> String {
466    defaults::DEFAULT_THEME.into()
467}
468
469#[inline]
470const fn default_todo_planning_mode() -> bool {
471    true
472}
473
474#[inline]
475const fn default_enable_split_tool_results() -> bool {
476    true // Default: enabled for production use (84% token savings)
477}
478
479#[inline]
480const fn default_max_conversation_turns() -> usize {
481    defaults::DEFAULT_MAX_CONVERSATION_TURNS
482}
483
484#[inline]
485fn default_reasoning_effort() -> ReasoningEffortLevel {
486    ReasoningEffortLevel::default()
487}
488
489#[inline]
490fn default_verbosity() -> VerbosityLevel {
491    VerbosityLevel::default()
492}
493
494#[inline]
495const fn default_temperature() -> f32 {
496    llm_generation::DEFAULT_TEMPERATURE
497}
498
499#[inline]
500const fn default_refine_temperature() -> f32 {
501    llm_generation::DEFAULT_REFINE_TEMPERATURE
502}
503
504#[inline]
505const fn default_enable_self_review() -> bool {
506    false
507}
508
509#[inline]
510const fn default_max_review_passes() -> usize {
511    1
512}
513
514#[inline]
515const fn default_refine_prompts_enabled() -> bool {
516    false
517}
518
519#[inline]
520const fn default_refine_max_passes() -> usize {
521    1
522}
523
524#[inline]
525const fn default_project_doc_max_bytes() -> usize {
526    project_doc::DEFAULT_MAX_BYTES
527}
528
529#[inline]
530const fn default_instruction_max_bytes() -> usize {
531    instructions::DEFAULT_MAX_BYTES
532}
533
534#[inline]
535const fn default_max_task_retries() -> u32 {
536    2 // Retry twice on transient failures
537}
538
539#[inline]
540const fn default_harness_max_tool_calls_per_turn() -> usize {
541    defaults::DEFAULT_MAX_TOOL_CALLS_PER_TURN
542}
543
544#[inline]
545const fn default_harness_max_tool_wall_clock_secs() -> u64 {
546    defaults::DEFAULT_MAX_TOOL_WALL_CLOCK_SECS
547}
548
549#[inline]
550const fn default_harness_max_tool_retries() -> u32 {
551    defaults::DEFAULT_MAX_TOOL_RETRIES
552}
553
554#[inline]
555const fn default_harness_auto_compaction_enabled() -> bool {
556    false
557}
558
559#[inline]
560const fn default_include_temporal_context() -> bool {
561    true // Enable by default - minimal overhead (~20 tokens)
562}
563
564#[inline]
565const fn default_include_working_directory() -> bool {
566    true // Enable by default - minimal overhead (~10 tokens)
567}
568
569#[inline]
570const fn default_require_plan_confirmation() -> bool {
571    true // Default: require confirmation (HITL pattern)
572}
573
574#[inline]
575const fn default_autonomous_mode() -> bool {
576    false // Default: interactive mode with full HITL
577}
578
579#[inline]
580const fn default_circuit_breaker_enabled() -> bool {
581    true // Default: enabled for resilient execution
582}
583
584#[inline]
585const fn default_failure_threshold() -> u32 {
586    5 // Open circuit after 5 consecutive failures
587}
588
589#[inline]
590const fn default_pause_on_open() -> bool {
591    true // Default: ask user for guidance on circuit breaker
592}
593
594#[inline]
595const fn default_max_open_circuits() -> usize {
596    3 // Pause when 3+ tools have open circuits
597}
598
599#[inline]
600const fn default_recovery_cooldown() -> u64 {
601    60 // Cooldown between recovery prompts (seconds)
602}
603
604#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
605#[derive(Debug, Clone, Deserialize, Serialize)]
606pub struct AgentCheckpointingConfig {
607    /// Enable automatic checkpoints after each successful turn
608    #[serde(default = "default_checkpointing_enabled")]
609    pub enabled: bool,
610
611    /// Optional custom directory for storing checkpoints (relative to workspace or absolute)
612    #[serde(default)]
613    pub storage_dir: Option<String>,
614
615    /// Maximum number of checkpoints to retain on disk
616    #[serde(default = "default_checkpointing_max_snapshots")]
617    pub max_snapshots: usize,
618
619    /// Maximum age in days before checkpoints are removed automatically (None disables)
620    #[serde(default = "default_checkpointing_max_age_days")]
621    pub max_age_days: Option<u64>,
622}
623
624impl Default for AgentCheckpointingConfig {
625    fn default() -> Self {
626        Self {
627            enabled: default_checkpointing_enabled(),
628            storage_dir: None,
629            max_snapshots: default_checkpointing_max_snapshots(),
630            max_age_days: default_checkpointing_max_age_days(),
631        }
632    }
633}
634
635#[inline]
636const fn default_checkpointing_enabled() -> bool {
637    DEFAULT_CHECKPOINTS_ENABLED
638}
639
640#[inline]
641const fn default_checkpointing_max_snapshots() -> usize {
642    DEFAULT_MAX_SNAPSHOTS
643}
644
645#[inline]
646const fn default_checkpointing_max_age_days() -> Option<u64> {
647    Some(DEFAULT_MAX_AGE_DAYS)
648}
649
650#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
651#[derive(Debug, Clone, Deserialize, Serialize)]
652pub struct AgentOnboardingConfig {
653    /// Toggle onboarding message rendering
654    #[serde(default = "default_onboarding_enabled")]
655    pub enabled: bool,
656
657    /// Introductory text shown at session start
658    #[serde(default = "default_intro_text")]
659    pub intro_text: String,
660
661    /// Whether to include project overview in onboarding message
662    #[serde(default = "default_show_project_overview")]
663    pub include_project_overview: bool,
664
665    /// Whether to include language summary in onboarding message
666    #[serde(default = "default_show_language_summary")]
667    pub include_language_summary: bool,
668
669    /// Whether to include AGENTS.md highlights in onboarding message
670    #[serde(default = "default_show_guideline_highlights")]
671    pub include_guideline_highlights: bool,
672
673    /// Whether to surface usage tips inside the welcome text banner
674    #[serde(default = "default_show_usage_tips_in_welcome")]
675    pub include_usage_tips_in_welcome: bool,
676
677    /// Whether to surface suggested actions inside the welcome text banner
678    #[serde(default = "default_show_recommended_actions_in_welcome")]
679    pub include_recommended_actions_in_welcome: bool,
680
681    /// Maximum number of guideline bullets to surface
682    #[serde(default = "default_guideline_highlight_limit")]
683    pub guideline_highlight_limit: usize,
684
685    /// Tips for collaborating with the agent effectively
686    #[serde(default = "default_usage_tips")]
687    pub usage_tips: Vec<String>,
688
689    /// Recommended follow-up actions to display
690    #[serde(default = "default_recommended_actions")]
691    pub recommended_actions: Vec<String>,
692
693    /// Placeholder suggestion for the chat input bar
694    #[serde(default)]
695    pub chat_placeholder: Option<String>,
696}
697
698impl Default for AgentOnboardingConfig {
699    fn default() -> Self {
700        Self {
701            enabled: default_onboarding_enabled(),
702            intro_text: default_intro_text(),
703            include_project_overview: default_show_project_overview(),
704            include_language_summary: default_show_language_summary(),
705            include_guideline_highlights: default_show_guideline_highlights(),
706            include_usage_tips_in_welcome: default_show_usage_tips_in_welcome(),
707            include_recommended_actions_in_welcome: default_show_recommended_actions_in_welcome(),
708            guideline_highlight_limit: default_guideline_highlight_limit(),
709            usage_tips: default_usage_tips(),
710            recommended_actions: default_recommended_actions(),
711            chat_placeholder: None,
712        }
713    }
714}
715
716#[inline]
717const fn default_onboarding_enabled() -> bool {
718    true
719}
720
721const DEFAULT_INTRO_TEXT: &str =
722    "Let's get oriented. I preloaded workspace context so we can move fast.";
723
724#[inline]
725fn default_intro_text() -> String {
726    DEFAULT_INTRO_TEXT.into()
727}
728
729#[inline]
730const fn default_show_project_overview() -> bool {
731    true
732}
733
734#[inline]
735const fn default_show_language_summary() -> bool {
736    false
737}
738
739#[inline]
740const fn default_show_guideline_highlights() -> bool {
741    true
742}
743
744#[inline]
745const fn default_show_usage_tips_in_welcome() -> bool {
746    false
747}
748
749#[inline]
750const fn default_show_recommended_actions_in_welcome() -> bool {
751    false
752}
753
754#[inline]
755const fn default_guideline_highlight_limit() -> usize {
756    3
757}
758
759const DEFAULT_USAGE_TIPS: &[&str] = &[
760    "Describe your current coding goal or ask for a quick status overview.",
761    "Reference AGENTS.md guidelines when proposing changes.",
762    "Prefer asking for targeted file reads or diffs before editing.",
763];
764
765const DEFAULT_RECOMMENDED_ACTIONS: &[&str] = &[
766    "Review the highlighted guidelines and share the task you want to tackle.",
767    "Ask for a workspace tour if you need more context.",
768];
769
770fn default_usage_tips() -> Vec<String> {
771    DEFAULT_USAGE_TIPS.iter().map(|s| (*s).into()).collect()
772}
773
774fn default_recommended_actions() -> Vec<String> {
775    DEFAULT_RECOMMENDED_ACTIONS
776        .iter()
777        .map(|s| (*s).into())
778        .collect()
779}
780
781/// Small/lightweight model configuration for efficient operations
782///
783/// Following VT Code's pattern, use a smaller model (e.g., Haiku, GPT-4 Mini) for 50%+ of calls:
784/// - Large file reads and parsing (>50KB)
785/// - Web page summarization and analysis
786/// - Git history and commit message processing
787/// - One-word processing labels and simple classifications
788///
789/// Typically 70-80% cheaper than the main model while maintaining quality for these tasks.
790#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
791#[derive(Debug, Clone, Deserialize, Serialize)]
792pub struct AgentSmallModelConfig {
793    /// Enable small model tier for efficient operations
794    #[serde(default = "default_small_model_enabled")]
795    pub enabled: bool,
796
797    /// Small model to use (e.g., claude-4-5-haiku, "gpt-4-mini", "gemini-2.0-flash")
798    /// Leave empty to auto-select a lightweight sibling of the main model
799    #[serde(default)]
800    pub model: String,
801
802    /// Temperature for small model responses
803    #[serde(default = "default_small_model_temperature")]
804    pub temperature: f32,
805
806    /// Enable small model for large file reads (>50KB)
807    #[serde(default = "default_small_model_for_large_reads")]
808    pub use_for_large_reads: bool,
809
810    /// Enable small model for web content summarization
811    #[serde(default = "default_small_model_for_web_summary")]
812    pub use_for_web_summary: bool,
813
814    /// Enable small model for git history processing
815    #[serde(default = "default_small_model_for_git_history")]
816    pub use_for_git_history: bool,
817}
818
819impl Default for AgentSmallModelConfig {
820    fn default() -> Self {
821        Self {
822            enabled: default_small_model_enabled(),
823            model: String::new(),
824            temperature: default_small_model_temperature(),
825            use_for_large_reads: default_small_model_for_large_reads(),
826            use_for_web_summary: default_small_model_for_web_summary(),
827            use_for_git_history: default_small_model_for_git_history(),
828        }
829    }
830}
831
832#[inline]
833const fn default_small_model_enabled() -> bool {
834    true // Enable by default following VT Code pattern
835}
836
837#[inline]
838const fn default_small_model_temperature() -> f32 {
839    0.3 // More deterministic for parsing/summarization
840}
841
842#[inline]
843const fn default_small_model_for_large_reads() -> bool {
844    true
845}
846
847#[inline]
848const fn default_small_model_for_web_summary() -> bool {
849    true
850}
851
852#[inline]
853const fn default_small_model_for_git_history() -> bool {
854    true
855}
856
857/// Vibe coding configuration for lazy/vague request support
858///
859/// Enables intelligent context gathering and entity resolution to support
860/// casual, imprecise requests like "make it blue" or "decrease by half".
861#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
862#[derive(Debug, Clone, Deserialize, Serialize)]
863pub struct AgentVibeCodingConfig {
864    /// Enable vibe coding support
865    #[serde(default = "default_vibe_coding_enabled")]
866    pub enabled: bool,
867
868    /// Minimum prompt length for refinement (default: 5 chars)
869    #[serde(default = "default_vibe_min_prompt_length")]
870    pub min_prompt_length: usize,
871
872    /// Minimum prompt words for refinement (default: 2 words)
873    #[serde(default = "default_vibe_min_prompt_words")]
874    pub min_prompt_words: usize,
875
876    /// Enable fuzzy entity resolution
877    #[serde(default = "default_vibe_entity_resolution")]
878    pub enable_entity_resolution: bool,
879
880    /// Entity index cache file path (relative to workspace)
881    #[serde(default = "default_vibe_entity_cache")]
882    pub entity_index_cache: String,
883
884    /// Maximum entity matches to return (default: 5)
885    #[serde(default = "default_vibe_max_entity_matches")]
886    pub max_entity_matches: usize,
887
888    /// Track workspace state (file activity, value changes)
889    #[serde(default = "default_vibe_track_workspace")]
890    pub track_workspace_state: bool,
891
892    /// Maximum recent files to track (default: 20)
893    #[serde(default = "default_vibe_max_recent_files")]
894    pub max_recent_files: usize,
895
896    /// Track value history for inference
897    #[serde(default = "default_vibe_track_values")]
898    pub track_value_history: bool,
899
900    /// Enable conversation memory for pronoun resolution
901    #[serde(default = "default_vibe_conversation_memory")]
902    pub enable_conversation_memory: bool,
903
904    /// Maximum conversation turns to remember (default: 50)
905    #[serde(default = "default_vibe_max_memory_turns")]
906    pub max_memory_turns: usize,
907
908    /// Enable pronoun resolution (it, that, this)
909    #[serde(default = "default_vibe_pronoun_resolution")]
910    pub enable_pronoun_resolution: bool,
911
912    /// Enable proactive context gathering
913    #[serde(default = "default_vibe_proactive_context")]
914    pub enable_proactive_context: bool,
915
916    /// Maximum files to gather for context (default: 3)
917    #[serde(default = "default_vibe_max_context_files")]
918    pub max_context_files: usize,
919
920    /// Maximum code snippets per file (default: 20 lines)
921    #[serde(default = "default_vibe_max_snippets_per_file")]
922    pub max_context_snippets_per_file: usize,
923
924    /// Maximum search results to include (default: 5)
925    #[serde(default = "default_vibe_max_search_results")]
926    pub max_search_results: usize,
927
928    /// Enable relative value inference (by half, double, etc.)
929    #[serde(default = "default_vibe_value_inference")]
930    pub enable_relative_value_inference: bool,
931}
932
933impl Default for AgentVibeCodingConfig {
934    fn default() -> Self {
935        Self {
936            enabled: default_vibe_coding_enabled(),
937            min_prompt_length: default_vibe_min_prompt_length(),
938            min_prompt_words: default_vibe_min_prompt_words(),
939            enable_entity_resolution: default_vibe_entity_resolution(),
940            entity_index_cache: default_vibe_entity_cache(),
941            max_entity_matches: default_vibe_max_entity_matches(),
942            track_workspace_state: default_vibe_track_workspace(),
943            max_recent_files: default_vibe_max_recent_files(),
944            track_value_history: default_vibe_track_values(),
945            enable_conversation_memory: default_vibe_conversation_memory(),
946            max_memory_turns: default_vibe_max_memory_turns(),
947            enable_pronoun_resolution: default_vibe_pronoun_resolution(),
948            enable_proactive_context: default_vibe_proactive_context(),
949            max_context_files: default_vibe_max_context_files(),
950            max_context_snippets_per_file: default_vibe_max_snippets_per_file(),
951            max_search_results: default_vibe_max_search_results(),
952            enable_relative_value_inference: default_vibe_value_inference(),
953        }
954    }
955}
956
957// Vibe coding default functions
958#[inline]
959const fn default_vibe_coding_enabled() -> bool {
960    false // Conservative default, opt-in
961}
962
963#[inline]
964const fn default_vibe_min_prompt_length() -> usize {
965    5
966}
967
968#[inline]
969const fn default_vibe_min_prompt_words() -> usize {
970    2
971}
972
973#[inline]
974const fn default_vibe_entity_resolution() -> bool {
975    true
976}
977
978#[inline]
979fn default_vibe_entity_cache() -> String {
980    ".vtcode/entity_index.json".into()
981}
982
983#[inline]
984const fn default_vibe_max_entity_matches() -> usize {
985    5
986}
987
988#[inline]
989const fn default_vibe_track_workspace() -> bool {
990    true
991}
992
993#[inline]
994const fn default_vibe_max_recent_files() -> usize {
995    20
996}
997
998#[inline]
999const fn default_vibe_track_values() -> bool {
1000    true
1001}
1002
1003#[inline]
1004const fn default_vibe_conversation_memory() -> bool {
1005    true
1006}
1007
1008#[inline]
1009const fn default_vibe_max_memory_turns() -> usize {
1010    50
1011}
1012
1013#[inline]
1014const fn default_vibe_pronoun_resolution() -> bool {
1015    true
1016}
1017
1018#[inline]
1019const fn default_vibe_proactive_context() -> bool {
1020    true
1021}
1022
1023#[inline]
1024const fn default_vibe_max_context_files() -> usize {
1025    3
1026}
1027
1028#[inline]
1029const fn default_vibe_max_snippets_per_file() -> usize {
1030    20
1031}
1032
1033#[inline]
1034const fn default_vibe_max_search_results() -> usize {
1035    5
1036}
1037
1038#[inline]
1039const fn default_vibe_value_inference() -> bool {
1040    true
1041}
1042
1043#[cfg(test)]
1044mod tests {
1045    use super::*;
1046
1047    #[test]
1048    fn test_editing_mode_config_default() {
1049        let config = AgentConfig::default();
1050        assert_eq!(config.default_editing_mode, EditingMode::Edit);
1051        assert!(config.require_plan_confirmation);
1052        assert!(!config.autonomous_mode);
1053    }
1054
1055    #[test]
1056    fn test_structured_reasoning_defaults_follow_prompt_mode() {
1057        let default_mode = AgentConfig {
1058            system_prompt_mode: SystemPromptMode::Default,
1059            ..Default::default()
1060        };
1061        assert!(default_mode.should_include_structured_reasoning_tags());
1062
1063        let specialized_mode = AgentConfig {
1064            system_prompt_mode: SystemPromptMode::Specialized,
1065            ..Default::default()
1066        };
1067        assert!(specialized_mode.should_include_structured_reasoning_tags());
1068
1069        let minimal_mode = AgentConfig {
1070            system_prompt_mode: SystemPromptMode::Minimal,
1071            ..Default::default()
1072        };
1073        assert!(!minimal_mode.should_include_structured_reasoning_tags());
1074
1075        let lightweight_mode = AgentConfig {
1076            system_prompt_mode: SystemPromptMode::Lightweight,
1077            ..Default::default()
1078        };
1079        assert!(!lightweight_mode.should_include_structured_reasoning_tags());
1080    }
1081
1082    #[test]
1083    fn test_structured_reasoning_explicit_override() {
1084        let mut config = AgentConfig {
1085            system_prompt_mode: SystemPromptMode::Minimal,
1086            include_structured_reasoning_tags: Some(true),
1087            ..AgentConfig::default()
1088        };
1089        assert!(config.should_include_structured_reasoning_tags());
1090
1091        config.include_structured_reasoning_tags = Some(false);
1092        assert!(!config.should_include_structured_reasoning_tags());
1093    }
1094}