vtcode_config/core/
agent.rs

1use crate::constants::{defaults, instructions, llm_generation, project_doc, prompts};
2use crate::types::{
3    EditingMode, ReasoningEffortLevel, SystemPromptMode, ToolDocumentationMode,
4    UiSurfacePreference, VerbosityLevel,
5};
6use serde::{Deserialize, Serialize};
7use std::collections::BTreeMap;
8
9const DEFAULT_CHECKPOINTS_ENABLED: bool = true;
10const DEFAULT_MAX_SNAPSHOTS: usize = 50;
11const DEFAULT_MAX_AGE_DAYS: u64 = 30;
12
13/// Agent-wide configuration
14#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
15#[derive(Debug, Clone, Deserialize, Serialize)]
16pub struct AgentConfig {
17    /// AI provider for single agent mode (gemini, openai, anthropic, openrouter, xai, zai)
18    #[serde(default = "default_provider")]
19    pub provider: String,
20
21    /// Environment variable that stores the API key for the active provider
22    #[serde(default = "default_api_key_env")]
23    pub api_key_env: String,
24
25    /// Default model to use
26    #[serde(default = "default_model")]
27    pub default_model: String,
28
29    /// UI theme identifier controlling ANSI styling
30    #[serde(default = "default_theme")]
31    pub theme: String,
32
33    /// System prompt mode controlling verbosity and token overhead
34    /// Options: minimal (~500-800 tokens), lightweight (~1-2k), default (~6-7k), specialized (~7-8k)
35    /// Inspired by pi-coding-agent: modern models often perform well with minimal prompts
36    #[serde(default)]
37    pub system_prompt_mode: SystemPromptMode,
38
39    /// Tool documentation mode controlling token overhead for tool definitions
40    /// Options: minimal (~800 tokens), progressive (~1.2k), full (~3k current)
41    /// Progressive: signatures upfront, detailed docs on-demand (recommended)
42    /// Minimal: signatures only, pi-coding-agent style (power users)
43    /// Full: all documentation upfront (current behavior, default)
44    #[serde(default)]
45    pub tool_documentation_mode: ToolDocumentationMode,
46
47    /// Enable split tool results for massive token savings (Phase 4)
48    /// When enabled, tools return dual-channel output:
49    /// - llm_content: Concise summary sent to LLM (token-optimized, 53-95% reduction)
50    /// - ui_content: Rich output displayed to user (full details preserved)
51    ///   Applies to: grep_file, list_files, read_file, run_pty_cmd, write_file, edit_file
52    ///   Default: true (opt-out for compatibility), recommended for production use
53    #[serde(default = "default_enable_split_tool_results")]
54    pub enable_split_tool_results: bool,
55
56    #[serde(default = "default_todo_planning_mode")]
57    pub todo_planning_mode: bool,
58
59    /// Preferred rendering surface for the interactive chat UI (auto, alternate, inline)
60    #[serde(default)]
61    pub ui_surface: UiSurfacePreference,
62
63    /// Maximum number of conversation turns before auto-termination
64    #[serde(default = "default_max_conversation_turns")]
65    pub max_conversation_turns: usize,
66
67    /// Reasoning effort level for models that support it (none, low, medium, high)
68    /// Applies to: Claude, GPT-5, GPT-5.1, Gemini, Qwen3, DeepSeek with reasoning capability
69    #[serde(default = "default_reasoning_effort")]
70    pub reasoning_effort: ReasoningEffortLevel,
71
72    /// Verbosity level for output text (low, medium, high)
73    /// Applies to: GPT-5.1 and other models that support verbosity control
74    #[serde(default = "default_verbosity")]
75    pub verbosity: VerbosityLevel,
76
77    /// Temperature for main LLM responses (0.0-1.0)
78    /// Lower values = more deterministic, higher values = more creative
79    /// Recommended: 0.7 for balanced creativity and consistency
80    /// Range: 0.0 (deterministic) to 1.0 (maximum randomness)
81    #[serde(default = "default_temperature")]
82    pub temperature: f32,
83
84    /// Temperature for prompt refinement (0.0-1.0, default: 0.3)
85    /// Lower values ensure prompt refinement is more deterministic/consistent
86    /// Keep lower than main temperature for stable prompt improvement
87    #[serde(default = "default_refine_temperature")]
88    pub refine_temperature: f32,
89
90    /// Enable an extra self-review pass to refine final responses
91    #[serde(default = "default_enable_self_review")]
92    pub enable_self_review: bool,
93
94    /// Maximum number of self-review passes
95    #[serde(default = "default_max_review_passes")]
96    pub max_review_passes: usize,
97
98    /// Enable prompt refinement pass before sending to LLM
99    #[serde(default = "default_refine_prompts_enabled")]
100    pub refine_prompts_enabled: bool,
101
102    /// Max refinement passes for prompt writing
103    #[serde(default = "default_refine_max_passes")]
104    pub refine_prompts_max_passes: usize,
105
106    /// Optional model override for the refiner (empty = auto pick efficient sibling)
107    #[serde(default)]
108    pub refine_prompts_model: String,
109
110    /// Small/lightweight model configuration for efficient operations
111    /// Used for tasks like large file reads, parsing, git history, conversation summarization
112    /// Typically 70-80% cheaper than main model; ~50% of VT Code's calls use this tier
113    #[serde(default)]
114    pub small_model: AgentSmallModelConfig,
115
116    /// Session onboarding and welcome message configuration
117    #[serde(default)]
118    pub onboarding: AgentOnboardingConfig,
119
120    /// Maximum bytes of AGENTS.md content to load from project hierarchy
121    #[serde(default = "default_project_doc_max_bytes")]
122    pub project_doc_max_bytes: usize,
123
124    /// Maximum bytes of instruction content to load from AGENTS.md hierarchy
125    #[serde(
126        default = "default_instruction_max_bytes",
127        alias = "rule_doc_max_bytes"
128    )]
129    pub instruction_max_bytes: usize,
130
131    /// Additional instruction files or globs to merge into the hierarchy
132    #[serde(default, alias = "instruction_paths", alias = "instructions")]
133    pub instruction_files: Vec<String>,
134
135    /// Custom prompt configuration for slash command shortcuts
136    #[serde(default)]
137    pub custom_prompts: AgentCustomPromptsConfig,
138
139    /// Configuration for custom slash commands
140    #[serde(default)]
141    pub custom_slash_commands: AgentCustomSlashCommandsConfig,
142
143    /// Provider-specific API keys captured from interactive configuration flows
144    #[serde(default)]
145    pub custom_api_keys: BTreeMap<String, String>,
146
147    /// Checkpointing configuration for automatic turn snapshots
148    #[serde(default)]
149    pub checkpointing: AgentCheckpointingConfig,
150
151    /// Vibe coding configuration for lazy/vague request support
152    #[serde(default)]
153    pub vibe_coding: AgentVibeCodingConfig,
154
155    /// Maximum number of retries for agent task execution (default: 2)
156    /// When an agent task fails due to retryable errors (timeout, network, 503, etc.),
157    /// it will be retried up to this many times with exponential backoff
158    #[serde(default = "default_max_task_retries")]
159    pub max_task_retries: u32,
160
161    /// Include current date/time in system prompt for temporal awareness
162    /// Helps LLM understand context for time-sensitive tasks (default: true)
163    #[serde(default = "default_include_temporal_context")]
164    pub include_temporal_context: bool,
165
166    /// Use UTC instead of local time for temporal context (default: false)
167    #[serde(default)]
168    pub temporal_context_use_utc: bool,
169
170    /// Include current working directory in system prompt (default: true)
171    #[serde(default = "default_include_working_directory")]
172    pub include_working_directory: bool,
173
174    /// Custom instructions provided by the user via configuration
175    #[serde(default)]
176    pub user_instructions: Option<String>,
177
178    /// Default editing mode: "edit" (default) or "plan"
179    /// In "plan" mode, the agent is read-only and produces implementation plans.
180    /// In "edit" mode, the agent can modify files and execute commands.
181    /// Toggle with Shift+Tab or /plan command during a session.
182    /// Codex-inspired: Encourages structured planning before execution.
183    #[serde(default)]
184    pub default_editing_mode: EditingMode,
185
186    /// Require user confirmation before executing a plan (HITL pattern)
187    /// When true, exiting plan mode shows the implementation blueprint and
188    /// requires explicit user approval before enabling edit tools.
189    /// Options in confirmation dialog: Execute, Edit Plan, Cancel
190    #[serde(default = "default_require_plan_confirmation")]
191    pub require_plan_confirmation: bool,
192}
193
194impl Default for AgentConfig {
195    fn default() -> Self {
196        Self {
197            provider: default_provider(),
198            api_key_env: default_api_key_env(),
199            default_model: default_model(),
200            theme: default_theme(),
201            system_prompt_mode: SystemPromptMode::default(),
202            tool_documentation_mode: ToolDocumentationMode::default(),
203            enable_split_tool_results: default_enable_split_tool_results(),
204            todo_planning_mode: default_todo_planning_mode(),
205            ui_surface: UiSurfacePreference::default(),
206            max_conversation_turns: default_max_conversation_turns(),
207            reasoning_effort: default_reasoning_effort(),
208            verbosity: default_verbosity(),
209            temperature: default_temperature(),
210            refine_temperature: default_refine_temperature(),
211            enable_self_review: default_enable_self_review(),
212            max_review_passes: default_max_review_passes(),
213            refine_prompts_enabled: default_refine_prompts_enabled(),
214            refine_prompts_max_passes: default_refine_max_passes(),
215            refine_prompts_model: String::new(),
216            small_model: AgentSmallModelConfig::default(),
217            onboarding: AgentOnboardingConfig::default(),
218            project_doc_max_bytes: default_project_doc_max_bytes(),
219            instruction_max_bytes: default_instruction_max_bytes(),
220            instruction_files: Vec::new(),
221            custom_prompts: AgentCustomPromptsConfig::default(),
222            custom_slash_commands: AgentCustomSlashCommandsConfig::default(),
223            custom_api_keys: BTreeMap::new(),
224            checkpointing: AgentCheckpointingConfig::default(),
225            vibe_coding: AgentVibeCodingConfig::default(),
226            max_task_retries: default_max_task_retries(),
227            include_temporal_context: default_include_temporal_context(),
228            temporal_context_use_utc: false, // Default to local time
229            include_working_directory: default_include_working_directory(),
230            user_instructions: None,
231            default_editing_mode: EditingMode::default(),
232            require_plan_confirmation: default_require_plan_confirmation(),
233        }
234    }
235}
236
237impl AgentConfig {
238    /// Validate LLM generation parameters
239    pub fn validate_llm_params(&self) -> Result<(), String> {
240        // Validate temperature range
241        if !(0.0..=1.0).contains(&self.temperature) {
242            return Err(format!(
243                "temperature must be between 0.0 and 1.0, got {}",
244                self.temperature
245            ));
246        }
247
248        if !(0.0..=1.0).contains(&self.refine_temperature) {
249            return Err(format!(
250                "refine_temperature must be between 0.0 and 1.0, got {}",
251                self.refine_temperature
252            ));
253        }
254
255        Ok(())
256    }
257}
258
259// Optimized: Use inline defaults with constants to reduce function call overhead
260#[inline]
261fn default_provider() -> String {
262    defaults::DEFAULT_PROVIDER.into()
263}
264
265#[inline]
266fn default_api_key_env() -> String {
267    defaults::DEFAULT_API_KEY_ENV.into()
268}
269
270#[inline]
271fn default_model() -> String {
272    defaults::DEFAULT_MODEL.into()
273}
274
275#[inline]
276fn default_theme() -> String {
277    defaults::DEFAULT_THEME.into()
278}
279
280#[inline]
281const fn default_todo_planning_mode() -> bool {
282    true
283}
284
285#[inline]
286const fn default_enable_split_tool_results() -> bool {
287    true // Default: enabled for production use (84% token savings)
288}
289
290#[inline]
291const fn default_max_conversation_turns() -> usize {
292    150
293}
294
295#[inline]
296fn default_reasoning_effort() -> ReasoningEffortLevel {
297    ReasoningEffortLevel::default()
298}
299
300#[inline]
301fn default_verbosity() -> VerbosityLevel {
302    VerbosityLevel::default()
303}
304
305#[inline]
306const fn default_temperature() -> f32 {
307    llm_generation::DEFAULT_TEMPERATURE
308}
309
310#[inline]
311const fn default_refine_temperature() -> f32 {
312    llm_generation::DEFAULT_REFINE_TEMPERATURE
313}
314
315#[inline]
316const fn default_enable_self_review() -> bool {
317    false
318}
319
320#[inline]
321const fn default_max_review_passes() -> usize {
322    1
323}
324
325#[inline]
326const fn default_refine_prompts_enabled() -> bool {
327    false
328}
329
330#[inline]
331const fn default_refine_max_passes() -> usize {
332    1
333}
334
335#[inline]
336const fn default_project_doc_max_bytes() -> usize {
337    project_doc::DEFAULT_MAX_BYTES
338}
339
340#[inline]
341const fn default_instruction_max_bytes() -> usize {
342    instructions::DEFAULT_MAX_BYTES
343}
344
345#[inline]
346const fn default_max_task_retries() -> u32 {
347    2 // Retry twice on transient failures
348}
349
350#[inline]
351const fn default_include_temporal_context() -> bool {
352    true // Enable by default - minimal overhead (~20 tokens)
353}
354
355#[inline]
356const fn default_include_working_directory() -> bool {
357    true // Enable by default - minimal overhead (~10 tokens)
358}
359
360#[inline]
361const fn default_require_plan_confirmation() -> bool {
362    true // Default: require confirmation (HITL pattern)
363}
364
365#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
366#[derive(Debug, Clone, Deserialize, Serialize)]
367pub struct AgentCustomPromptsConfig {
368    /// Master switch for custom prompt support
369    #[serde(default = "default_custom_prompts_enabled")]
370    pub enabled: bool,
371
372    /// Primary directory for prompt markdown files
373    #[serde(default = "default_custom_prompts_directory")]
374    pub directory: String,
375
376    /// Additional directories to search for prompts
377    #[serde(default)]
378    pub extra_directories: Vec<String>,
379
380    /// Maximum file size (KB) to load for a single prompt
381    #[serde(default = "default_custom_prompts_max_file_size_kb")]
382    pub max_file_size_kb: usize,
383}
384
385impl Default for AgentCustomPromptsConfig {
386    fn default() -> Self {
387        Self {
388            enabled: default_custom_prompts_enabled(),
389            directory: default_custom_prompts_directory(),
390            extra_directories: Vec::new(),
391            max_file_size_kb: default_custom_prompts_max_file_size_kb(),
392        }
393    }
394}
395
396/// Configuration for custom slash commands
397#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
398#[derive(Debug, Clone, Deserialize, Serialize, Default)]
399pub struct AgentCustomSlashCommandsConfig {
400    /// Master switch for custom slash command support
401    #[serde(default = "default_custom_slash_commands_enabled")]
402    pub enabled: bool,
403
404    /// Primary directory for slash command markdown files
405    #[serde(default = "default_custom_slash_commands_directory")]
406    pub directory: String,
407
408    /// Additional directories to search for slash commands
409    #[serde(default)]
410    pub extra_directories: Vec<String>,
411
412    /// Maximum file size (KB) to load for a single slash command
413    #[serde(default = "default_custom_slash_commands_max_file_size_kb")]
414    pub max_file_size_kb: usize,
415}
416
417#[inline]
418const fn default_custom_slash_commands_enabled() -> bool {
419    true
420}
421
422fn default_custom_slash_commands_directory() -> String {
423    crate::constants::prompts::DEFAULT_CUSTOM_SLASH_COMMANDS_DIR.into()
424}
425
426const fn default_custom_slash_commands_max_file_size_kb() -> usize {
427    64 // 64KB default, same as prompts
428}
429
430#[inline]
431const fn default_custom_prompts_enabled() -> bool {
432    true
433}
434
435#[inline]
436fn default_custom_prompts_directory() -> String {
437    prompts::DEFAULT_CUSTOM_PROMPTS_DIR.into()
438}
439
440#[inline]
441const fn default_custom_prompts_max_file_size_kb() -> usize {
442    prompts::DEFAULT_CUSTOM_PROMPT_MAX_FILE_SIZE_KB
443}
444
445#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
446#[derive(Debug, Clone, Deserialize, Serialize)]
447pub struct AgentCheckpointingConfig {
448    /// Enable automatic checkpoints after each successful turn
449    #[serde(default = "default_checkpointing_enabled")]
450    pub enabled: bool,
451
452    /// Optional custom directory for storing checkpoints (relative to workspace or absolute)
453    #[serde(default)]
454    pub storage_dir: Option<String>,
455
456    /// Maximum number of checkpoints to retain on disk
457    #[serde(default = "default_checkpointing_max_snapshots")]
458    pub max_snapshots: usize,
459
460    /// Maximum age in days before checkpoints are removed automatically (None disables)
461    #[serde(default = "default_checkpointing_max_age_days")]
462    pub max_age_days: Option<u64>,
463}
464
465impl Default for AgentCheckpointingConfig {
466    fn default() -> Self {
467        Self {
468            enabled: default_checkpointing_enabled(),
469            storage_dir: None,
470            max_snapshots: default_checkpointing_max_snapshots(),
471            max_age_days: default_checkpointing_max_age_days(),
472        }
473    }
474}
475
476#[inline]
477const fn default_checkpointing_enabled() -> bool {
478    DEFAULT_CHECKPOINTS_ENABLED
479}
480
481#[inline]
482const fn default_checkpointing_max_snapshots() -> usize {
483    DEFAULT_MAX_SNAPSHOTS
484}
485
486#[inline]
487const fn default_checkpointing_max_age_days() -> Option<u64> {
488    Some(DEFAULT_MAX_AGE_DAYS)
489}
490
491#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
492#[derive(Debug, Clone, Deserialize, Serialize)]
493pub struct AgentOnboardingConfig {
494    /// Toggle onboarding message rendering
495    #[serde(default = "default_onboarding_enabled")]
496    pub enabled: bool,
497
498    /// Introductory text shown at session start
499    #[serde(default = "default_intro_text")]
500    pub intro_text: String,
501
502    /// Whether to include project overview in onboarding message
503    #[serde(default = "default_show_project_overview")]
504    pub include_project_overview: bool,
505
506    /// Whether to include language summary in onboarding message
507    #[serde(default = "default_show_language_summary")]
508    pub include_language_summary: bool,
509
510    /// Whether to include AGENTS.md highlights in onboarding message
511    #[serde(default = "default_show_guideline_highlights")]
512    pub include_guideline_highlights: bool,
513
514    /// Whether to surface usage tips inside the welcome text banner
515    #[serde(default = "default_show_usage_tips_in_welcome")]
516    pub include_usage_tips_in_welcome: bool,
517
518    /// Whether to surface suggested actions inside the welcome text banner
519    #[serde(default = "default_show_recommended_actions_in_welcome")]
520    pub include_recommended_actions_in_welcome: bool,
521
522    /// Maximum number of guideline bullets to surface
523    #[serde(default = "default_guideline_highlight_limit")]
524    pub guideline_highlight_limit: usize,
525
526    /// Tips for collaborating with the agent effectively
527    #[serde(default = "default_usage_tips")]
528    pub usage_tips: Vec<String>,
529
530    /// Recommended follow-up actions to display
531    #[serde(default = "default_recommended_actions")]
532    pub recommended_actions: Vec<String>,
533
534    /// Placeholder suggestion for the chat input bar
535    #[serde(default)]
536    pub chat_placeholder: Option<String>,
537}
538
539impl Default for AgentOnboardingConfig {
540    fn default() -> Self {
541        Self {
542            enabled: default_onboarding_enabled(),
543            intro_text: default_intro_text(),
544            include_project_overview: default_show_project_overview(),
545            include_language_summary: default_show_language_summary(),
546            include_guideline_highlights: default_show_guideline_highlights(),
547            include_usage_tips_in_welcome: default_show_usage_tips_in_welcome(),
548            include_recommended_actions_in_welcome: default_show_recommended_actions_in_welcome(),
549            guideline_highlight_limit: default_guideline_highlight_limit(),
550            usage_tips: default_usage_tips(),
551            recommended_actions: default_recommended_actions(),
552            chat_placeholder: None,
553        }
554    }
555}
556
557#[inline]
558const fn default_onboarding_enabled() -> bool {
559    true
560}
561
562const DEFAULT_INTRO_TEXT: &str =
563    "Let's get oriented. I preloaded workspace context so we can move fast.";
564
565#[inline]
566fn default_intro_text() -> String {
567    DEFAULT_INTRO_TEXT.into()
568}
569
570#[inline]
571const fn default_show_project_overview() -> bool {
572    true
573}
574
575#[inline]
576const fn default_show_language_summary() -> bool {
577    false
578}
579
580#[inline]
581const fn default_show_guideline_highlights() -> bool {
582    true
583}
584
585#[inline]
586const fn default_show_usage_tips_in_welcome() -> bool {
587    false
588}
589
590#[inline]
591const fn default_show_recommended_actions_in_welcome() -> bool {
592    false
593}
594
595#[inline]
596const fn default_guideline_highlight_limit() -> usize {
597    3
598}
599
600const DEFAULT_USAGE_TIPS: &[&str] = &[
601    "Describe your current coding goal or ask for a quick status overview.",
602    "Reference AGENTS.md guidelines when proposing changes.",
603    "Prefer asking for targeted file reads or diffs before editing.",
604];
605
606const DEFAULT_RECOMMENDED_ACTIONS: &[&str] = &[
607    "Review the highlighted guidelines and share the task you want to tackle.",
608    "Ask for a workspace tour if you need more context.",
609];
610
611fn default_usage_tips() -> Vec<String> {
612    DEFAULT_USAGE_TIPS.iter().map(|s| (*s).into()).collect()
613}
614
615fn default_recommended_actions() -> Vec<String> {
616    DEFAULT_RECOMMENDED_ACTIONS
617        .iter()
618        .map(|s| (*s).into())
619        .collect()
620}
621
622/// Small/lightweight model configuration for efficient operations
623///
624/// Following VT Code's pattern, use a smaller model (e.g., Haiku, GPT-4 Mini) for 50%+ of calls:
625/// - Large file reads and parsing (>50KB)
626/// - Web page summarization and analysis
627/// - Git history and commit message processing
628/// - One-word processing labels and simple classifications
629///
630/// Typically 70-80% cheaper than the main model while maintaining quality for these tasks.
631#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
632#[derive(Debug, Clone, Deserialize, Serialize)]
633pub struct AgentSmallModelConfig {
634    /// Enable small model tier for efficient operations
635    #[serde(default = "default_small_model_enabled")]
636    pub enabled: bool,
637
638    /// Small model to use (e.g., "claude-3-5-haiku", "gpt-4o-mini", "gemini-2.0-flash")
639    /// Leave empty to auto-select a lightweight sibling of the main model
640    #[serde(default)]
641    pub model: String,
642
643    /// Temperature for small model responses
644    #[serde(default = "default_small_model_temperature")]
645    pub temperature: f32,
646
647    /// Enable small model for large file reads (>50KB)
648    #[serde(default = "default_small_model_for_large_reads")]
649    pub use_for_large_reads: bool,
650
651    /// Enable small model for web content summarization
652    #[serde(default = "default_small_model_for_web_summary")]
653    pub use_for_web_summary: bool,
654
655    /// Enable small model for git history processing
656    #[serde(default = "default_small_model_for_git_history")]
657    pub use_for_git_history: bool,
658}
659
660impl Default for AgentSmallModelConfig {
661    fn default() -> Self {
662        Self {
663            enabled: default_small_model_enabled(),
664            model: String::new(),
665            temperature: default_small_model_temperature(),
666            use_for_large_reads: default_small_model_for_large_reads(),
667            use_for_web_summary: default_small_model_for_web_summary(),
668            use_for_git_history: default_small_model_for_git_history(),
669        }
670    }
671}
672
673#[inline]
674const fn default_small_model_enabled() -> bool {
675    true // Enable by default following VT Code pattern
676}
677
678#[inline]
679const fn default_small_model_temperature() -> f32 {
680    0.3 // More deterministic for parsing/summarization
681}
682
683#[inline]
684const fn default_small_model_for_large_reads() -> bool {
685    true
686}
687
688#[inline]
689const fn default_small_model_for_web_summary() -> bool {
690    true
691}
692
693#[inline]
694const fn default_small_model_for_git_history() -> bool {
695    true
696}
697
698/// Vibe coding configuration for lazy/vague request support
699///
700/// Enables intelligent context gathering and entity resolution to support
701/// casual, imprecise requests like "make it blue" or "decrease by half".
702#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
703#[derive(Debug, Clone, Deserialize, Serialize)]
704pub struct AgentVibeCodingConfig {
705    /// Enable vibe coding support
706    #[serde(default = "default_vibe_coding_enabled")]
707    pub enabled: bool,
708
709    /// Minimum prompt length for refinement (default: 5 chars)
710    #[serde(default = "default_vibe_min_prompt_length")]
711    pub min_prompt_length: usize,
712
713    /// Minimum prompt words for refinement (default: 2 words)
714    #[serde(default = "default_vibe_min_prompt_words")]
715    pub min_prompt_words: usize,
716
717    /// Enable fuzzy entity resolution
718    #[serde(default = "default_vibe_entity_resolution")]
719    pub enable_entity_resolution: bool,
720
721    /// Entity index cache file path (relative to workspace)
722    #[serde(default = "default_vibe_entity_cache")]
723    pub entity_index_cache: String,
724
725    /// Maximum entity matches to return (default: 5)
726    #[serde(default = "default_vibe_max_entity_matches")]
727    pub max_entity_matches: usize,
728
729    /// Track workspace state (file activity, value changes)
730    #[serde(default = "default_vibe_track_workspace")]
731    pub track_workspace_state: bool,
732
733    /// Maximum recent files to track (default: 20)
734    #[serde(default = "default_vibe_max_recent_files")]
735    pub max_recent_files: usize,
736
737    /// Track value history for inference
738    #[serde(default = "default_vibe_track_values")]
739    pub track_value_history: bool,
740
741    /// Enable conversation memory for pronoun resolution
742    #[serde(default = "default_vibe_conversation_memory")]
743    pub enable_conversation_memory: bool,
744
745    /// Maximum conversation turns to remember (default: 50)
746    #[serde(default = "default_vibe_max_memory_turns")]
747    pub max_memory_turns: usize,
748
749    /// Enable pronoun resolution (it, that, this)
750    #[serde(default = "default_vibe_pronoun_resolution")]
751    pub enable_pronoun_resolution: bool,
752
753    /// Enable proactive context gathering
754    #[serde(default = "default_vibe_proactive_context")]
755    pub enable_proactive_context: bool,
756
757    /// Maximum files to gather for context (default: 3)
758    #[serde(default = "default_vibe_max_context_files")]
759    pub max_context_files: usize,
760
761    /// Maximum code snippets per file (default: 20 lines)
762    #[serde(default = "default_vibe_max_snippets_per_file")]
763    pub max_context_snippets_per_file: usize,
764
765    /// Maximum search results to include (default: 5)
766    #[serde(default = "default_vibe_max_search_results")]
767    pub max_search_results: usize,
768
769    /// Enable relative value inference (by half, double, etc.)
770    #[serde(default = "default_vibe_value_inference")]
771    pub enable_relative_value_inference: bool,
772}
773
774impl Default for AgentVibeCodingConfig {
775    fn default() -> Self {
776        Self {
777            enabled: default_vibe_coding_enabled(),
778            min_prompt_length: default_vibe_min_prompt_length(),
779            min_prompt_words: default_vibe_min_prompt_words(),
780            enable_entity_resolution: default_vibe_entity_resolution(),
781            entity_index_cache: default_vibe_entity_cache(),
782            max_entity_matches: default_vibe_max_entity_matches(),
783            track_workspace_state: default_vibe_track_workspace(),
784            max_recent_files: default_vibe_max_recent_files(),
785            track_value_history: default_vibe_track_values(),
786            enable_conversation_memory: default_vibe_conversation_memory(),
787            max_memory_turns: default_vibe_max_memory_turns(),
788            enable_pronoun_resolution: default_vibe_pronoun_resolution(),
789            enable_proactive_context: default_vibe_proactive_context(),
790            max_context_files: default_vibe_max_context_files(),
791            max_context_snippets_per_file: default_vibe_max_snippets_per_file(),
792            max_search_results: default_vibe_max_search_results(),
793            enable_relative_value_inference: default_vibe_value_inference(),
794        }
795    }
796}
797
798// Vibe coding default functions
799#[inline]
800const fn default_vibe_coding_enabled() -> bool {
801    false // Conservative default, opt-in
802}
803
804#[inline]
805const fn default_vibe_min_prompt_length() -> usize {
806    5
807}
808
809#[inline]
810const fn default_vibe_min_prompt_words() -> usize {
811    2
812}
813
814#[inline]
815const fn default_vibe_entity_resolution() -> bool {
816    true
817}
818
819#[inline]
820fn default_vibe_entity_cache() -> String {
821    ".vtcode/entity_index.json".into()
822}
823
824#[inline]
825const fn default_vibe_max_entity_matches() -> usize {
826    5
827}
828
829#[inline]
830const fn default_vibe_track_workspace() -> bool {
831    true
832}
833
834#[inline]
835const fn default_vibe_max_recent_files() -> usize {
836    20
837}
838
839#[inline]
840const fn default_vibe_track_values() -> bool {
841    true
842}
843
844#[inline]
845const fn default_vibe_conversation_memory() -> bool {
846    true
847}
848
849#[inline]
850const fn default_vibe_max_memory_turns() -> usize {
851    50
852}
853
854#[inline]
855const fn default_vibe_pronoun_resolution() -> bool {
856    true
857}
858
859#[inline]
860const fn default_vibe_proactive_context() -> bool {
861    true
862}
863
864#[inline]
865const fn default_vibe_max_context_files() -> usize {
866    3
867}
868
869#[inline]
870const fn default_vibe_max_snippets_per_file() -> usize {
871    20
872}
873
874#[inline]
875const fn default_vibe_max_search_results() -> usize {
876    5
877}
878
879#[inline]
880const fn default_vibe_value_inference() -> bool {
881    true
882}
883
884#[cfg(test)]
885mod tests {
886    use super::*;
887
888    #[test]
889    fn test_editing_mode_config_default() {
890        let config = AgentConfig::default();
891        assert_eq!(config.default_editing_mode, EditingMode::Edit);
892        assert!(config.require_plan_confirmation);
893    }
894}