Skip to main content

vtcode_config/core/
agent.rs

1use crate::constants::{defaults, instructions, llm_generation, project_doc, prompts};
2use crate::types::{
3    EditingMode, ReasoningEffortLevel, SystemPromptMode, ToolDocumentationMode,
4    UiSurfacePreference, VerbosityLevel,
5};
6use serde::{Deserialize, Serialize};
7use std::collections::BTreeMap;
8
9const DEFAULT_CHECKPOINTS_ENABLED: bool = true;
10const DEFAULT_MAX_SNAPSHOTS: usize = 50;
11const DEFAULT_MAX_AGE_DAYS: u64 = 30;
12
13/// Agent-wide configuration
14#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
15#[derive(Debug, Clone, Deserialize, Serialize)]
16pub struct AgentConfig {
17    /// AI provider for single agent mode (gemini, openai, anthropic, openrouter, xai, zai)
18    #[serde(default = "default_provider")]
19    pub provider: String,
20
21    /// Environment variable that stores the API key for the active provider
22    #[serde(default = "default_api_key_env")]
23    pub api_key_env: String,
24
25    /// Default model to use
26    #[serde(default = "default_model")]
27    pub default_model: String,
28
29    /// UI theme identifier controlling ANSI styling
30    #[serde(default = "default_theme")]
31    pub theme: String,
32
33    /// System prompt mode controlling verbosity and token overhead
34    /// Options: minimal (~500-800 tokens), lightweight (~1-2k), default (~6-7k), specialized (~7-8k)
35    /// Inspired by pi-coding-agent: modern models often perform well with minimal prompts
36    #[serde(default)]
37    pub system_prompt_mode: SystemPromptMode,
38
39    /// Tool documentation mode controlling token overhead for tool definitions
40    /// Options: minimal (~800 tokens), progressive (~1.2k), full (~3k current)
41    /// Progressive: signatures upfront, detailed docs on-demand (recommended)
42    /// Minimal: signatures only, pi-coding-agent style (power users)
43    /// Full: all documentation upfront (current behavior, default)
44    #[serde(default)]
45    pub tool_documentation_mode: ToolDocumentationMode,
46
47    /// Enable split tool results for massive token savings (Phase 4)
48    /// When enabled, tools return dual-channel output:
49    /// - llm_content: Concise summary sent to LLM (token-optimized, 53-95% reduction)
50    /// - ui_content: Rich output displayed to user (full details preserved)
51    ///   Applies to: grep_file, list_files, read_file, run_pty_cmd, write_file, edit_file
52    ///   Default: true (opt-out for compatibility), recommended for production use
53    #[serde(default = "default_enable_split_tool_results")]
54    pub enable_split_tool_results: bool,
55
56    #[serde(default = "default_todo_planning_mode")]
57    pub todo_planning_mode: bool,
58
59    /// Preferred rendering surface for the interactive chat UI (auto, alternate, inline)
60    #[serde(default)]
61    pub ui_surface: UiSurfacePreference,
62
63    /// Maximum number of conversation turns before auto-termination
64    #[serde(default = "default_max_conversation_turns")]
65    pub max_conversation_turns: usize,
66
67    /// Reasoning effort level for models that support it (none, low, medium, high)
68    /// Applies to: Claude, GPT-5, GPT-5.1, Gemini, Qwen3, DeepSeek with reasoning capability
69    #[serde(default = "default_reasoning_effort")]
70    pub reasoning_effort: ReasoningEffortLevel,
71
72    /// Verbosity level for output text (low, medium, high)
73    /// Applies to: GPT-5.1 and other models that support verbosity control
74    #[serde(default = "default_verbosity")]
75    pub verbosity: VerbosityLevel,
76
77    /// Temperature for main LLM responses (0.0-1.0)
78    /// Lower values = more deterministic, higher values = more creative
79    /// Recommended: 0.7 for balanced creativity and consistency
80    /// Range: 0.0 (deterministic) to 1.0 (maximum randomness)
81    #[serde(default = "default_temperature")]
82    pub temperature: f32,
83
84    /// Temperature for prompt refinement (0.0-1.0, default: 0.3)
85    /// Lower values ensure prompt refinement is more deterministic/consistent
86    /// Keep lower than main temperature for stable prompt improvement
87    #[serde(default = "default_refine_temperature")]
88    pub refine_temperature: f32,
89
90    /// Enable an extra self-review pass to refine final responses
91    #[serde(default = "default_enable_self_review")]
92    pub enable_self_review: bool,
93
94    /// Maximum number of self-review passes
95    #[serde(default = "default_max_review_passes")]
96    pub max_review_passes: usize,
97
98    /// Enable prompt refinement pass before sending to LLM
99    #[serde(default = "default_refine_prompts_enabled")]
100    pub refine_prompts_enabled: bool,
101
102    /// Max refinement passes for prompt writing
103    #[serde(default = "default_refine_max_passes")]
104    pub refine_prompts_max_passes: usize,
105
106    /// Optional model override for the refiner (empty = auto pick efficient sibling)
107    #[serde(default)]
108    pub refine_prompts_model: String,
109
110    /// Small/lightweight model configuration for efficient operations
111    /// Used for tasks like large file reads, parsing, git history, conversation summarization
112    /// Typically 70-80% cheaper than main model; ~50% of VT Code's calls use this tier
113    #[serde(default)]
114    pub small_model: AgentSmallModelConfig,
115
116    /// Session onboarding and welcome message configuration
117    #[serde(default)]
118    pub onboarding: AgentOnboardingConfig,
119
120    /// Maximum bytes of AGENTS.md content to load from project hierarchy
121    #[serde(default = "default_project_doc_max_bytes")]
122    pub project_doc_max_bytes: usize,
123
124    /// Maximum bytes of instruction content to load from AGENTS.md hierarchy
125    #[serde(
126        default = "default_instruction_max_bytes",
127        alias = "rule_doc_max_bytes"
128    )]
129    pub instruction_max_bytes: usize,
130
131    /// Additional instruction files or globs to merge into the hierarchy
132    #[serde(default, alias = "instruction_paths", alias = "instructions")]
133    pub instruction_files: Vec<String>,
134
135    /// Custom prompt configuration for slash command shortcuts
136    #[serde(default)]
137    pub custom_prompts: AgentCustomPromptsConfig,
138
139    /// Configuration for custom slash commands
140    #[serde(default)]
141    pub custom_slash_commands: AgentCustomSlashCommandsConfig,
142
143    /// Provider-specific API keys captured from interactive configuration flows
144    #[serde(default)]
145    pub custom_api_keys: BTreeMap<String, String>,
146
147    /// Checkpointing configuration for automatic turn snapshots
148    #[serde(default)]
149    pub checkpointing: AgentCheckpointingConfig,
150
151    /// Vibe coding configuration for lazy/vague request support
152    #[serde(default)]
153    pub vibe_coding: AgentVibeCodingConfig,
154
155    /// Maximum number of retries for agent task execution (default: 2)
156    /// When an agent task fails due to retryable errors (timeout, network, 503, etc.),
157    /// it will be retried up to this many times with exponential backoff
158    #[serde(default = "default_max_task_retries")]
159    pub max_task_retries: u32,
160
161    /// Harness configuration for turn-level budgets and telemetry
162    #[serde(default)]
163    pub harness: AgentHarnessConfig,
164
165    /// Include current date/time in system prompt for temporal awareness
166    /// Helps LLM understand context for time-sensitive tasks (default: true)
167    #[serde(default = "default_include_temporal_context")]
168    pub include_temporal_context: bool,
169
170    /// Use UTC instead of local time for temporal context (default: false)
171    #[serde(default)]
172    pub temporal_context_use_utc: bool,
173
174    /// Include current working directory in system prompt (default: true)
175    #[serde(default = "default_include_working_directory")]
176    pub include_working_directory: bool,
177
178    /// Custom instructions provided by the user via configuration
179    #[serde(default)]
180    pub user_instructions: Option<String>,
181
182    /// Default editing mode: "edit" (default) or "plan"
183    /// In "plan" mode, the agent is read-only and produces implementation plans.
184    /// In "edit" mode, the agent can modify files and execute commands.
185    /// Toggle with Shift+Tab or /plan command during a session.
186    /// Codex-inspired: Encourages structured planning before execution.
187    #[serde(default)]
188    pub default_editing_mode: EditingMode,
189
190    /// Require user confirmation before executing a plan (HITL pattern)
191    /// When true, exiting plan mode shows the implementation blueprint and
192    /// requires explicit user approval before enabling edit tools.
193    /// Options in confirmation dialog: Execute, Edit Plan, Cancel
194    #[serde(default = "default_require_plan_confirmation")]
195    pub require_plan_confirmation: bool,
196
197    /// Enable autonomous mode - auto-approve safe tools with reduced HITL prompts
198    /// When true, the agent operates with fewer confirmation prompts for safe tools
199    /// (read operations, grep_file, list_files, etc.) while still blocking dangerous operations.
200    /// Toggle with /agent command during a session.
201    #[serde(default = "default_autonomous_mode")]
202    pub autonomous_mode: bool,
203
204    /// Circuit breaker configuration for resilient tool execution
205    /// Controls when the agent should pause and ask for user guidance due to repeated failures
206    #[serde(default)]
207    pub circuit_breaker: CircuitBreakerConfig,
208
209    /// Open Responses specification compliance configuration
210    /// Enables vendor-neutral LLM API format for interoperable workflows
211    #[serde(default)]
212    pub open_responses: OpenResponsesConfig,
213}
214
215#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
216#[derive(Debug, Clone, Deserialize, Serialize)]
217pub struct AgentHarnessConfig {
218    /// Maximum number of tool calls allowed per turn
219    #[serde(default = "default_harness_max_tool_calls_per_turn")]
220    pub max_tool_calls_per_turn: usize,
221    /// Maximum wall clock time (seconds) for tool execution in a turn
222    #[serde(default = "default_harness_max_tool_wall_clock_secs")]
223    pub max_tool_wall_clock_secs: u64,
224    /// Maximum retries for retryable tool errors
225    #[serde(default = "default_harness_max_tool_retries")]
226    pub max_tool_retries: u32,
227    /// Optional JSONL event log path for harness events
228    #[serde(default)]
229    pub event_log_path: Option<String>,
230}
231
232impl Default for AgentHarnessConfig {
233    fn default() -> Self {
234        Self {
235            max_tool_calls_per_turn: default_harness_max_tool_calls_per_turn(),
236            max_tool_wall_clock_secs: default_harness_max_tool_wall_clock_secs(),
237            max_tool_retries: default_harness_max_tool_retries(),
238            event_log_path: None,
239        }
240    }
241}
242
243#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
244#[derive(Debug, Clone, Deserialize, Serialize)]
245pub struct CircuitBreakerConfig {
246    /// Enable circuit breaker functionality
247    #[serde(default = "default_circuit_breaker_enabled")]
248    pub enabled: bool,
249
250    /// Number of consecutive failures before opening circuit
251    #[serde(default = "default_failure_threshold")]
252    pub failure_threshold: u32,
253
254    /// Pause and ask user when circuit opens (vs auto-backoff)
255    #[serde(default = "default_pause_on_open")]
256    pub pause_on_open: bool,
257
258    /// Number of open circuits before triggering pause
259    #[serde(default = "default_max_open_circuits")]
260    pub max_open_circuits: usize,
261
262    /// Cooldown period between recovery prompts (seconds)
263    #[serde(default = "default_recovery_cooldown")]
264    pub recovery_cooldown: u64,
265}
266
267impl Default for CircuitBreakerConfig {
268    fn default() -> Self {
269        Self {
270            enabled: default_circuit_breaker_enabled(),
271            failure_threshold: default_failure_threshold(),
272            pause_on_open: default_pause_on_open(),
273            max_open_circuits: default_max_open_circuits(),
274            recovery_cooldown: default_recovery_cooldown(),
275        }
276    }
277}
278
279/// Open Responses specification compliance configuration
280///
281/// Enables vendor-neutral LLM API format per the Open Responses specification
282/// (<https://www.openresponses.org/>). When enabled, VT Code emits semantic
283/// streaming events and uses standardized response/item structures.
284#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
285#[derive(Debug, Clone, Deserialize, Serialize)]
286pub struct OpenResponsesConfig {
287    /// Enable Open Responses specification compliance layer
288    /// When true, VT Code emits semantic streaming events alongside internal events
289    /// Default: false (opt-in feature)
290    #[serde(default)]
291    pub enabled: bool,
292
293    /// Emit Open Responses events to the event sink
294    /// When true, streaming events follow Open Responses format
295    /// (response.created, response.output_item.added, response.output_text.delta, etc.)
296    #[serde(default = "default_open_responses_emit_events")]
297    pub emit_events: bool,
298
299    /// Include VT Code extension items (vtcode:file_change, vtcode:web_search, etc.)
300    /// When false, extension items are omitted from the Open Responses output
301    #[serde(default = "default_open_responses_include_extensions")]
302    pub include_extensions: bool,
303
304    /// Map internal tool calls to Open Responses function_call items
305    /// When true, command executions and MCP tool calls are represented as function_call items
306    #[serde(default = "default_open_responses_map_tool_calls")]
307    pub map_tool_calls: bool,
308
309    /// Include reasoning items in Open Responses output
310    /// When true, model reasoning/thinking is exposed as reasoning items
311    #[serde(default = "default_open_responses_include_reasoning")]
312    pub include_reasoning: bool,
313}
314
315impl Default for OpenResponsesConfig {
316    fn default() -> Self {
317        Self {
318            enabled: false, // Opt-in by default
319            emit_events: default_open_responses_emit_events(),
320            include_extensions: default_open_responses_include_extensions(),
321            map_tool_calls: default_open_responses_map_tool_calls(),
322            include_reasoning: default_open_responses_include_reasoning(),
323        }
324    }
325}
326
327#[inline]
328const fn default_open_responses_emit_events() -> bool {
329    true // When enabled, emit events by default
330}
331
332#[inline]
333const fn default_open_responses_include_extensions() -> bool {
334    true // Include VT Code-specific extensions by default
335}
336
337#[inline]
338const fn default_open_responses_map_tool_calls() -> bool {
339    true // Map tool calls to function_call items by default
340}
341
342#[inline]
343const fn default_open_responses_include_reasoning() -> bool {
344    true // Include reasoning items by default
345}
346
347impl Default for AgentConfig {
348    fn default() -> Self {
349        Self {
350            provider: default_provider(),
351            api_key_env: default_api_key_env(),
352            default_model: default_model(),
353            theme: default_theme(),
354            system_prompt_mode: SystemPromptMode::default(),
355            tool_documentation_mode: ToolDocumentationMode::default(),
356            enable_split_tool_results: default_enable_split_tool_results(),
357            todo_planning_mode: default_todo_planning_mode(),
358            ui_surface: UiSurfacePreference::default(),
359            max_conversation_turns: default_max_conversation_turns(),
360            reasoning_effort: default_reasoning_effort(),
361            verbosity: default_verbosity(),
362            temperature: default_temperature(),
363            refine_temperature: default_refine_temperature(),
364            enable_self_review: default_enable_self_review(),
365            max_review_passes: default_max_review_passes(),
366            refine_prompts_enabled: default_refine_prompts_enabled(),
367            refine_prompts_max_passes: default_refine_max_passes(),
368            refine_prompts_model: String::new(),
369            small_model: AgentSmallModelConfig::default(),
370            onboarding: AgentOnboardingConfig::default(),
371            project_doc_max_bytes: default_project_doc_max_bytes(),
372            instruction_max_bytes: default_instruction_max_bytes(),
373            instruction_files: Vec::new(),
374            custom_prompts: AgentCustomPromptsConfig::default(),
375            custom_slash_commands: AgentCustomSlashCommandsConfig::default(),
376            custom_api_keys: BTreeMap::new(),
377            checkpointing: AgentCheckpointingConfig::default(),
378            vibe_coding: AgentVibeCodingConfig::default(),
379            max_task_retries: default_max_task_retries(),
380            harness: AgentHarnessConfig::default(),
381            include_temporal_context: default_include_temporal_context(),
382            temporal_context_use_utc: false, // Default to local time
383            include_working_directory: default_include_working_directory(),
384            user_instructions: None,
385            default_editing_mode: EditingMode::default(),
386            require_plan_confirmation: default_require_plan_confirmation(),
387            autonomous_mode: default_autonomous_mode(),
388            circuit_breaker: CircuitBreakerConfig::default(),
389            open_responses: OpenResponsesConfig::default(),
390        }
391    }
392}
393
394impl AgentConfig {
395    /// Validate LLM generation parameters
396    pub fn validate_llm_params(&self) -> Result<(), String> {
397        // Validate temperature range
398        if !(0.0..=1.0).contains(&self.temperature) {
399            return Err(format!(
400                "temperature must be between 0.0 and 1.0, got {}",
401                self.temperature
402            ));
403        }
404
405        if !(0.0..=1.0).contains(&self.refine_temperature) {
406            return Err(format!(
407                "refine_temperature must be between 0.0 and 1.0, got {}",
408                self.refine_temperature
409            ));
410        }
411
412        Ok(())
413    }
414}
415
416// Optimized: Use inline defaults with constants to reduce function call overhead
417#[inline]
418fn default_provider() -> String {
419    defaults::DEFAULT_PROVIDER.into()
420}
421
422#[inline]
423fn default_api_key_env() -> String {
424    defaults::DEFAULT_API_KEY_ENV.into()
425}
426
427#[inline]
428fn default_model() -> String {
429    defaults::DEFAULT_MODEL.into()
430}
431
432#[inline]
433fn default_theme() -> String {
434    defaults::DEFAULT_THEME.into()
435}
436
437#[inline]
438const fn default_todo_planning_mode() -> bool {
439    true
440}
441
442#[inline]
443const fn default_enable_split_tool_results() -> bool {
444    true // Default: enabled for production use (84% token savings)
445}
446
447#[inline]
448const fn default_max_conversation_turns() -> usize {
449    150
450}
451
452#[inline]
453fn default_reasoning_effort() -> ReasoningEffortLevel {
454    ReasoningEffortLevel::default()
455}
456
457#[inline]
458fn default_verbosity() -> VerbosityLevel {
459    VerbosityLevel::default()
460}
461
462#[inline]
463const fn default_temperature() -> f32 {
464    llm_generation::DEFAULT_TEMPERATURE
465}
466
467#[inline]
468const fn default_refine_temperature() -> f32 {
469    llm_generation::DEFAULT_REFINE_TEMPERATURE
470}
471
472#[inline]
473const fn default_enable_self_review() -> bool {
474    false
475}
476
477#[inline]
478const fn default_max_review_passes() -> usize {
479    1
480}
481
482#[inline]
483const fn default_refine_prompts_enabled() -> bool {
484    false
485}
486
487#[inline]
488const fn default_refine_max_passes() -> usize {
489    1
490}
491
492#[inline]
493const fn default_project_doc_max_bytes() -> usize {
494    project_doc::DEFAULT_MAX_BYTES
495}
496
497#[inline]
498const fn default_instruction_max_bytes() -> usize {
499    instructions::DEFAULT_MAX_BYTES
500}
501
502#[inline]
503const fn default_max_task_retries() -> u32 {
504    2 // Retry twice on transient failures
505}
506
507#[inline]
508const fn default_harness_max_tool_calls_per_turn() -> usize {
509    defaults::DEFAULT_MAX_TOOL_CALLS_PER_TURN
510}
511
512#[inline]
513const fn default_harness_max_tool_wall_clock_secs() -> u64 {
514    defaults::DEFAULT_MAX_TOOL_WALL_CLOCK_SECS
515}
516
517#[inline]
518const fn default_harness_max_tool_retries() -> u32 {
519    defaults::DEFAULT_MAX_TOOL_RETRIES
520}
521
522#[inline]
523const fn default_include_temporal_context() -> bool {
524    true // Enable by default - minimal overhead (~20 tokens)
525}
526
527#[inline]
528const fn default_include_working_directory() -> bool {
529    true // Enable by default - minimal overhead (~10 tokens)
530}
531
532#[inline]
533const fn default_require_plan_confirmation() -> bool {
534    true // Default: require confirmation (HITL pattern)
535}
536
537#[inline]
538const fn default_autonomous_mode() -> bool {
539    false // Default: interactive mode with full HITL
540}
541
542#[inline]
543const fn default_circuit_breaker_enabled() -> bool {
544    true // Default: enabled for resilient execution
545}
546
547#[inline]
548const fn default_failure_threshold() -> u32 {
549    5 // Open circuit after 5 consecutive failures
550}
551
552#[inline]
553const fn default_pause_on_open() -> bool {
554    true // Default: ask user for guidance on circuit breaker
555}
556
557#[inline]
558const fn default_max_open_circuits() -> usize {
559    3 // Pause when 3+ tools have open circuits
560}
561
562#[inline]
563const fn default_recovery_cooldown() -> u64 {
564    60 // Cooldown between recovery prompts (seconds)
565}
566
567#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
568#[derive(Debug, Clone, Deserialize, Serialize)]
569pub struct AgentCustomPromptsConfig {
570    /// Master switch for custom prompt support
571    #[serde(default = "default_custom_prompts_enabled")]
572    pub enabled: bool,
573
574    /// Primary directory for prompt markdown files
575    #[serde(default = "default_custom_prompts_directory")]
576    pub directory: String,
577
578    /// Additional directories to search for prompts
579    #[serde(default)]
580    pub extra_directories: Vec<String>,
581
582    /// Maximum file size (KB) to load for a single prompt
583    #[serde(default = "default_custom_prompts_max_file_size_kb")]
584    pub max_file_size_kb: usize,
585}
586
587impl Default for AgentCustomPromptsConfig {
588    fn default() -> Self {
589        Self {
590            enabled: default_custom_prompts_enabled(),
591            directory: default_custom_prompts_directory(),
592            extra_directories: Vec::new(),
593            max_file_size_kb: default_custom_prompts_max_file_size_kb(),
594        }
595    }
596}
597
598/// Configuration for custom slash commands
599#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
600#[derive(Debug, Clone, Deserialize, Serialize, Default)]
601pub struct AgentCustomSlashCommandsConfig {
602    /// Master switch for custom slash command support
603    #[serde(default = "default_custom_slash_commands_enabled")]
604    pub enabled: bool,
605
606    /// Primary directory for slash command markdown files
607    #[serde(default = "default_custom_slash_commands_directory")]
608    pub directory: String,
609
610    /// Additional directories to search for slash commands
611    #[serde(default)]
612    pub extra_directories: Vec<String>,
613
614    /// Maximum file size (KB) to load for a single slash command
615    #[serde(default = "default_custom_slash_commands_max_file_size_kb")]
616    pub max_file_size_kb: usize,
617}
618
619#[inline]
620const fn default_custom_slash_commands_enabled() -> bool {
621    true
622}
623
624fn default_custom_slash_commands_directory() -> String {
625    crate::constants::prompts::DEFAULT_CUSTOM_SLASH_COMMANDS_DIR.into()
626}
627
628const fn default_custom_slash_commands_max_file_size_kb() -> usize {
629    64 // 64KB default, same as prompts
630}
631
632#[inline]
633const fn default_custom_prompts_enabled() -> bool {
634    true
635}
636
637#[inline]
638fn default_custom_prompts_directory() -> String {
639    prompts::DEFAULT_CUSTOM_PROMPTS_DIR.into()
640}
641
642#[inline]
643const fn default_custom_prompts_max_file_size_kb() -> usize {
644    prompts::DEFAULT_CUSTOM_PROMPT_MAX_FILE_SIZE_KB
645}
646
647#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
648#[derive(Debug, Clone, Deserialize, Serialize)]
649pub struct AgentCheckpointingConfig {
650    /// Enable automatic checkpoints after each successful turn
651    #[serde(default = "default_checkpointing_enabled")]
652    pub enabled: bool,
653
654    /// Optional custom directory for storing checkpoints (relative to workspace or absolute)
655    #[serde(default)]
656    pub storage_dir: Option<String>,
657
658    /// Maximum number of checkpoints to retain on disk
659    #[serde(default = "default_checkpointing_max_snapshots")]
660    pub max_snapshots: usize,
661
662    /// Maximum age in days before checkpoints are removed automatically (None disables)
663    #[serde(default = "default_checkpointing_max_age_days")]
664    pub max_age_days: Option<u64>,
665}
666
667impl Default for AgentCheckpointingConfig {
668    fn default() -> Self {
669        Self {
670            enabled: default_checkpointing_enabled(),
671            storage_dir: None,
672            max_snapshots: default_checkpointing_max_snapshots(),
673            max_age_days: default_checkpointing_max_age_days(),
674        }
675    }
676}
677
678#[inline]
679const fn default_checkpointing_enabled() -> bool {
680    DEFAULT_CHECKPOINTS_ENABLED
681}
682
683#[inline]
684const fn default_checkpointing_max_snapshots() -> usize {
685    DEFAULT_MAX_SNAPSHOTS
686}
687
688#[inline]
689const fn default_checkpointing_max_age_days() -> Option<u64> {
690    Some(DEFAULT_MAX_AGE_DAYS)
691}
692
693#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
694#[derive(Debug, Clone, Deserialize, Serialize)]
695pub struct AgentOnboardingConfig {
696    /// Toggle onboarding message rendering
697    #[serde(default = "default_onboarding_enabled")]
698    pub enabled: bool,
699
700    /// Introductory text shown at session start
701    #[serde(default = "default_intro_text")]
702    pub intro_text: String,
703
704    /// Whether to include project overview in onboarding message
705    #[serde(default = "default_show_project_overview")]
706    pub include_project_overview: bool,
707
708    /// Whether to include language summary in onboarding message
709    #[serde(default = "default_show_language_summary")]
710    pub include_language_summary: bool,
711
712    /// Whether to include AGENTS.md highlights in onboarding message
713    #[serde(default = "default_show_guideline_highlights")]
714    pub include_guideline_highlights: bool,
715
716    /// Whether to surface usage tips inside the welcome text banner
717    #[serde(default = "default_show_usage_tips_in_welcome")]
718    pub include_usage_tips_in_welcome: bool,
719
720    /// Whether to surface suggested actions inside the welcome text banner
721    #[serde(default = "default_show_recommended_actions_in_welcome")]
722    pub include_recommended_actions_in_welcome: bool,
723
724    /// Maximum number of guideline bullets to surface
725    #[serde(default = "default_guideline_highlight_limit")]
726    pub guideline_highlight_limit: usize,
727
728    /// Tips for collaborating with the agent effectively
729    #[serde(default = "default_usage_tips")]
730    pub usage_tips: Vec<String>,
731
732    /// Recommended follow-up actions to display
733    #[serde(default = "default_recommended_actions")]
734    pub recommended_actions: Vec<String>,
735
736    /// Placeholder suggestion for the chat input bar
737    #[serde(default)]
738    pub chat_placeholder: Option<String>,
739}
740
741impl Default for AgentOnboardingConfig {
742    fn default() -> Self {
743        Self {
744            enabled: default_onboarding_enabled(),
745            intro_text: default_intro_text(),
746            include_project_overview: default_show_project_overview(),
747            include_language_summary: default_show_language_summary(),
748            include_guideline_highlights: default_show_guideline_highlights(),
749            include_usage_tips_in_welcome: default_show_usage_tips_in_welcome(),
750            include_recommended_actions_in_welcome: default_show_recommended_actions_in_welcome(),
751            guideline_highlight_limit: default_guideline_highlight_limit(),
752            usage_tips: default_usage_tips(),
753            recommended_actions: default_recommended_actions(),
754            chat_placeholder: None,
755        }
756    }
757}
758
759#[inline]
760const fn default_onboarding_enabled() -> bool {
761    true
762}
763
764const DEFAULT_INTRO_TEXT: &str =
765    "Let's get oriented. I preloaded workspace context so we can move fast.";
766
767#[inline]
768fn default_intro_text() -> String {
769    DEFAULT_INTRO_TEXT.into()
770}
771
772#[inline]
773const fn default_show_project_overview() -> bool {
774    true
775}
776
777#[inline]
778const fn default_show_language_summary() -> bool {
779    false
780}
781
782#[inline]
783const fn default_show_guideline_highlights() -> bool {
784    true
785}
786
787#[inline]
788const fn default_show_usage_tips_in_welcome() -> bool {
789    false
790}
791
792#[inline]
793const fn default_show_recommended_actions_in_welcome() -> bool {
794    false
795}
796
797#[inline]
798const fn default_guideline_highlight_limit() -> usize {
799    3
800}
801
802const DEFAULT_USAGE_TIPS: &[&str] = &[
803    "Describe your current coding goal or ask for a quick status overview.",
804    "Reference AGENTS.md guidelines when proposing changes.",
805    "Prefer asking for targeted file reads or diffs before editing.",
806];
807
808const DEFAULT_RECOMMENDED_ACTIONS: &[&str] = &[
809    "Review the highlighted guidelines and share the task you want to tackle.",
810    "Ask for a workspace tour if you need more context.",
811];
812
813fn default_usage_tips() -> Vec<String> {
814    DEFAULT_USAGE_TIPS.iter().map(|s| (*s).into()).collect()
815}
816
817fn default_recommended_actions() -> Vec<String> {
818    DEFAULT_RECOMMENDED_ACTIONS
819        .iter()
820        .map(|s| (*s).into())
821        .collect()
822}
823
824/// Small/lightweight model configuration for efficient operations
825///
826/// Following VT Code's pattern, use a smaller model (e.g., Haiku, GPT-4 Mini) for 50%+ of calls:
827/// - Large file reads and parsing (>50KB)
828/// - Web page summarization and analysis
829/// - Git history and commit message processing
830/// - One-word processing labels and simple classifications
831///
832/// Typically 70-80% cheaper than the main model while maintaining quality for these tasks.
833#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
834#[derive(Debug, Clone, Deserialize, Serialize)]
835pub struct AgentSmallModelConfig {
836    /// Enable small model tier for efficient operations
837    #[serde(default = "default_small_model_enabled")]
838    pub enabled: bool,
839
840    /// Small model to use (e.g., "claude-3-5-haiku", "gpt-4o-mini", "gemini-2.0-flash")
841    /// Leave empty to auto-select a lightweight sibling of the main model
842    #[serde(default)]
843    pub model: String,
844
845    /// Temperature for small model responses
846    #[serde(default = "default_small_model_temperature")]
847    pub temperature: f32,
848
849    /// Enable small model for large file reads (>50KB)
850    #[serde(default = "default_small_model_for_large_reads")]
851    pub use_for_large_reads: bool,
852
853    /// Enable small model for web content summarization
854    #[serde(default = "default_small_model_for_web_summary")]
855    pub use_for_web_summary: bool,
856
857    /// Enable small model for git history processing
858    #[serde(default = "default_small_model_for_git_history")]
859    pub use_for_git_history: bool,
860}
861
862impl Default for AgentSmallModelConfig {
863    fn default() -> Self {
864        Self {
865            enabled: default_small_model_enabled(),
866            model: String::new(),
867            temperature: default_small_model_temperature(),
868            use_for_large_reads: default_small_model_for_large_reads(),
869            use_for_web_summary: default_small_model_for_web_summary(),
870            use_for_git_history: default_small_model_for_git_history(),
871        }
872    }
873}
874
875#[inline]
876const fn default_small_model_enabled() -> bool {
877    true // Enable by default following VT Code pattern
878}
879
880#[inline]
881const fn default_small_model_temperature() -> f32 {
882    0.3 // More deterministic for parsing/summarization
883}
884
885#[inline]
886const fn default_small_model_for_large_reads() -> bool {
887    true
888}
889
890#[inline]
891const fn default_small_model_for_web_summary() -> bool {
892    true
893}
894
895#[inline]
896const fn default_small_model_for_git_history() -> bool {
897    true
898}
899
900/// Vibe coding configuration for lazy/vague request support
901///
902/// Enables intelligent context gathering and entity resolution to support
903/// casual, imprecise requests like "make it blue" or "decrease by half".
904#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
905#[derive(Debug, Clone, Deserialize, Serialize)]
906pub struct AgentVibeCodingConfig {
907    /// Enable vibe coding support
908    #[serde(default = "default_vibe_coding_enabled")]
909    pub enabled: bool,
910
911    /// Minimum prompt length for refinement (default: 5 chars)
912    #[serde(default = "default_vibe_min_prompt_length")]
913    pub min_prompt_length: usize,
914
915    /// Minimum prompt words for refinement (default: 2 words)
916    #[serde(default = "default_vibe_min_prompt_words")]
917    pub min_prompt_words: usize,
918
919    /// Enable fuzzy entity resolution
920    #[serde(default = "default_vibe_entity_resolution")]
921    pub enable_entity_resolution: bool,
922
923    /// Entity index cache file path (relative to workspace)
924    #[serde(default = "default_vibe_entity_cache")]
925    pub entity_index_cache: String,
926
927    /// Maximum entity matches to return (default: 5)
928    #[serde(default = "default_vibe_max_entity_matches")]
929    pub max_entity_matches: usize,
930
931    /// Track workspace state (file activity, value changes)
932    #[serde(default = "default_vibe_track_workspace")]
933    pub track_workspace_state: bool,
934
935    /// Maximum recent files to track (default: 20)
936    #[serde(default = "default_vibe_max_recent_files")]
937    pub max_recent_files: usize,
938
939    /// Track value history for inference
940    #[serde(default = "default_vibe_track_values")]
941    pub track_value_history: bool,
942
943    /// Enable conversation memory for pronoun resolution
944    #[serde(default = "default_vibe_conversation_memory")]
945    pub enable_conversation_memory: bool,
946
947    /// Maximum conversation turns to remember (default: 50)
948    #[serde(default = "default_vibe_max_memory_turns")]
949    pub max_memory_turns: usize,
950
951    /// Enable pronoun resolution (it, that, this)
952    #[serde(default = "default_vibe_pronoun_resolution")]
953    pub enable_pronoun_resolution: bool,
954
955    /// Enable proactive context gathering
956    #[serde(default = "default_vibe_proactive_context")]
957    pub enable_proactive_context: bool,
958
959    /// Maximum files to gather for context (default: 3)
960    #[serde(default = "default_vibe_max_context_files")]
961    pub max_context_files: usize,
962
963    /// Maximum code snippets per file (default: 20 lines)
964    #[serde(default = "default_vibe_max_snippets_per_file")]
965    pub max_context_snippets_per_file: usize,
966
967    /// Maximum search results to include (default: 5)
968    #[serde(default = "default_vibe_max_search_results")]
969    pub max_search_results: usize,
970
971    /// Enable relative value inference (by half, double, etc.)
972    #[serde(default = "default_vibe_value_inference")]
973    pub enable_relative_value_inference: bool,
974}
975
976impl Default for AgentVibeCodingConfig {
977    fn default() -> Self {
978        Self {
979            enabled: default_vibe_coding_enabled(),
980            min_prompt_length: default_vibe_min_prompt_length(),
981            min_prompt_words: default_vibe_min_prompt_words(),
982            enable_entity_resolution: default_vibe_entity_resolution(),
983            entity_index_cache: default_vibe_entity_cache(),
984            max_entity_matches: default_vibe_max_entity_matches(),
985            track_workspace_state: default_vibe_track_workspace(),
986            max_recent_files: default_vibe_max_recent_files(),
987            track_value_history: default_vibe_track_values(),
988            enable_conversation_memory: default_vibe_conversation_memory(),
989            max_memory_turns: default_vibe_max_memory_turns(),
990            enable_pronoun_resolution: default_vibe_pronoun_resolution(),
991            enable_proactive_context: default_vibe_proactive_context(),
992            max_context_files: default_vibe_max_context_files(),
993            max_context_snippets_per_file: default_vibe_max_snippets_per_file(),
994            max_search_results: default_vibe_max_search_results(),
995            enable_relative_value_inference: default_vibe_value_inference(),
996        }
997    }
998}
999
1000// Vibe coding default functions
1001#[inline]
1002const fn default_vibe_coding_enabled() -> bool {
1003    false // Conservative default, opt-in
1004}
1005
1006#[inline]
1007const fn default_vibe_min_prompt_length() -> usize {
1008    5
1009}
1010
1011#[inline]
1012const fn default_vibe_min_prompt_words() -> usize {
1013    2
1014}
1015
1016#[inline]
1017const fn default_vibe_entity_resolution() -> bool {
1018    true
1019}
1020
1021#[inline]
1022fn default_vibe_entity_cache() -> String {
1023    ".vtcode/entity_index.json".into()
1024}
1025
1026#[inline]
1027const fn default_vibe_max_entity_matches() -> usize {
1028    5
1029}
1030
1031#[inline]
1032const fn default_vibe_track_workspace() -> bool {
1033    true
1034}
1035
1036#[inline]
1037const fn default_vibe_max_recent_files() -> usize {
1038    20
1039}
1040
1041#[inline]
1042const fn default_vibe_track_values() -> bool {
1043    true
1044}
1045
1046#[inline]
1047const fn default_vibe_conversation_memory() -> bool {
1048    true
1049}
1050
1051#[inline]
1052const fn default_vibe_max_memory_turns() -> usize {
1053    50
1054}
1055
1056#[inline]
1057const fn default_vibe_pronoun_resolution() -> bool {
1058    true
1059}
1060
1061#[inline]
1062const fn default_vibe_proactive_context() -> bool {
1063    true
1064}
1065
1066#[inline]
1067const fn default_vibe_max_context_files() -> usize {
1068    3
1069}
1070
1071#[inline]
1072const fn default_vibe_max_snippets_per_file() -> usize {
1073    20
1074}
1075
1076#[inline]
1077const fn default_vibe_max_search_results() -> usize {
1078    5
1079}
1080
1081#[inline]
1082const fn default_vibe_value_inference() -> bool {
1083    true
1084}
1085
1086#[cfg(test)]
1087mod tests {
1088    use super::*;
1089
1090    #[test]
1091    fn test_editing_mode_config_default() {
1092        let config = AgentConfig::default();
1093        assert_eq!(config.default_editing_mode, EditingMode::Edit);
1094        assert!(config.require_plan_confirmation);
1095        assert!(!config.autonomous_mode);
1096    }
1097}