Skip to main content

pawan/config/
mod.rs

1//! Configuration for Pawan
2//!
3//! Pawan can be configured via:
4//! - `pawan.toml` in the current directory
5//! - `[pawan]` section in `ares.toml`
6//! - Environment variables
7//! - Command line arguments
8
9use serde::{Deserialize, Serialize};
10use std::collections::HashMap;
11use std::path::PathBuf;
12use tracing;
13
14/// LLM Provider type
15#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
16#[serde(rename_all = "lowercase")]
17pub enum LlmProvider {
18    /// NVIDIA API (build.nvidia.com) - default
19    #[default]
20    Nvidia,
21    /// Local Ollama instance
22    Ollama,
23    /// OpenAI-compatible API
24    OpenAI,
25}
26
27/// Main configuration for Pawan
28#[derive(Debug, Clone, Serialize, Deserialize)]
29#[serde(default)]
30pub struct PawanConfig {
31    /// LLM provider to use
32    pub provider: LlmProvider,
33
34    /// LLM model to use for coding tasks
35    pub model: String,
36
37    /// Enable dry-run mode (show changes without applying)
38    pub dry_run: bool,
39
40    /// Create backups before editing files
41    pub auto_backup: bool,
42
43    /// Require clean git working directory
44    pub require_git_clean: bool,
45
46    /// Timeout for bash commands (seconds)
47    pub bash_timeout_secs: u64,
48
49    /// Maximum file size to read (KB)
50    pub max_file_size_kb: usize,
51
52    /// Maximum tool iterations per request
53    pub max_tool_iterations: usize,
54    /// Maximum context tokens before pruning
55    pub max_context_tokens: usize,
56
57    /// System prompt override
58    pub system_prompt: Option<String>,
59
60    /// Temperature for LLM responses
61    pub temperature: f32,
62
63    /// Top-p sampling parameter
64    pub top_p: f32,
65
66    /// Maximum tokens in response
67    pub max_tokens: usize,
68
69    /// Maximum retries for LLM API calls (429 or 5xx errors)
70    pub max_retries: usize,
71
72    /// Fallback models to try when primary model fails
73    pub fallback_models: Vec<String>,
74    /// Maximum characters in tool result before truncation
75    pub max_result_chars: usize,
76
77    /// Enable reasoning/thinking mode (for DeepSeek/Nemotron models)
78    pub reasoning_mode: bool,
79
80    /// Healing configuration
81    pub healing: HealingConfig,
82
83    /// Target projects
84    pub targets: HashMap<String, TargetConfig>,
85
86    /// TUI configuration
87    pub tui: TuiConfig,
88
89    /// MCP server configurations
90    #[serde(default)]
91    pub mcp: HashMap<String, McpServerEntry>,
92
93    /// Tool permission overrides (tool_name -> permission)
94    #[serde(default)]
95    pub permissions: HashMap<String, ToolPermission>,
96
97    /// Cloud fallback: when primary model fails, fall back to cloud provider.
98    /// Enables hybrid local+cloud routing.
99    pub cloud: Option<CloudConfig>,
100
101    /// Eruka context engine integration (3-tier memory injection)
102    #[serde(default)]
103    pub eruka: crate::eruka_bridge::ErukaConfig,
104}
105
106/// Cloud fallback configuration for hybrid local+cloud model routing.
107///
108/// When the primary provider (typically a local model via OpenAI-compatible API)
109/// fails or is unavailable, pawan automatically falls back to this cloud provider.
110/// This enables zero-cost local inference with cloud reliability as a safety net.
111///
112/// # Example (pawan.toml)
113/// ```toml
114/// provider = "openai"
115/// model = "Qwen3.5-9B-Q4_K_M"
116///
117/// [cloud]
118/// provider = "nvidia"
119/// model = "mistralai/devstral-2-123b-instruct-2512"
120/// ```
121#[derive(Debug, Clone, Serialize, Deserialize)]
122pub struct CloudConfig {
123    /// Cloud LLM provider to fall back to (nvidia or openai)
124    pub provider: LlmProvider,
125    /// Primary cloud model to try first on fallback
126    pub model: String,
127    /// Additional cloud models to try if the primary cloud model also fails
128    #[serde(default)]
129    pub fallback_models: Vec<String>,
130}
131
132/// Permission level for a tool
133#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
134#[serde(rename_all = "lowercase")]
135pub enum ToolPermission {
136    /// Always allow (default for most tools)
137    Allow,
138    /// Deny — tool is disabled
139    Deny,
140}
141
142impl Default for PawanConfig {
143    fn default() -> Self {
144        let mut targets = HashMap::new();
145        targets.insert(
146            "ares".to_string(),
147            TargetConfig {
148                path: PathBuf::from("../.."),
149                description: "A.R.E.S server codebase".to_string(),
150            },
151        );
152        targets.insert(
153            "self".to_string(),
154            TargetConfig {
155                path: PathBuf::from("."),
156                description: "Pawan's own codebase".to_string(),
157            },
158        );
159
160        Self {
161            provider: LlmProvider::Nvidia,
162            model: crate::DEFAULT_MODEL.to_string(),
163            dry_run: false,
164            auto_backup: true,
165            require_git_clean: false,
166            bash_timeout_secs: crate::DEFAULT_BASH_TIMEOUT,
167            max_file_size_kb: 1024,
168            max_tool_iterations: crate::MAX_TOOL_ITERATIONS,
169            max_context_tokens: 100000,
170            system_prompt: None,
171            temperature: 1.0,
172            top_p: 0.95,
173            max_tokens: 8192,
174            reasoning_mode: true,
175            max_retries: 3,
176            fallback_models: Vec::new(),
177            max_result_chars: 8000,
178            healing: HealingConfig::default(),
179            targets,
180            tui: TuiConfig::default(),
181            mcp: HashMap::new(),
182            permissions: HashMap::new(),
183            cloud: None,
184            eruka: crate::eruka_bridge::ErukaConfig::default(),
185        }
186    }
187}
188
189/// Configuration for self-healing behavior
190#[derive(Debug, Clone, Serialize, Deserialize)]
191#[serde(default)]
192pub struct HealingConfig {
193    /// Automatically commit fixes
194    pub auto_commit: bool,
195
196    /// Fix compilation errors
197    pub fix_errors: bool,
198
199    /// Fix clippy warnings
200    pub fix_warnings: bool,
201
202    /// Fix failing tests
203    pub fix_tests: bool,
204
205    /// Generate missing documentation
206    pub generate_docs: bool,
207
208    /// Maximum fix attempts per issue
209    pub max_attempts: usize,
210}
211
212impl Default for HealingConfig {
213    fn default() -> Self {
214        Self {
215            auto_commit: false,
216            fix_errors: true,
217            fix_warnings: true,
218            fix_tests: true,
219            generate_docs: false,
220            max_attempts: 3,
221        }
222    }
223}
224
225/// Configuration for a target project
226#[derive(Debug, Clone, Serialize, Deserialize)]
227pub struct TargetConfig {
228    /// Path to the project root
229    pub path: PathBuf,
230
231    /// Description of the project
232    pub description: String,
233}
234
235/// Configuration for the TUI
236#[derive(Debug, Clone, Serialize, Deserialize)]
237#[serde(default)]
238pub struct TuiConfig {
239    /// Enable syntax highlighting
240    pub syntax_highlighting: bool,
241
242    /// Theme for syntax highlighting
243    pub theme: String,
244
245    /// Show line numbers in code blocks
246    pub line_numbers: bool,
247
248    /// Enable mouse support
249    pub mouse_support: bool,
250
251    /// Scroll speed (lines per scroll event)
252    pub scroll_speed: usize,
253
254    /// Maximum history entries to keep
255    pub max_history: usize,
256}
257
258impl Default for TuiConfig {
259    fn default() -> Self {
260        Self {
261            syntax_highlighting: true,
262            theme: "base16-ocean.dark".to_string(),
263            line_numbers: true,
264            mouse_support: true,
265            scroll_speed: 3,
266            max_history: 1000,
267        }
268    }
269}
270
271/// Configuration for an MCP server in pawan.toml
272#[derive(Debug, Clone, Serialize, Deserialize)]
273pub struct McpServerEntry {
274    /// Command to run
275    pub command: String,
276    /// Command arguments
277    #[serde(default)]
278    pub args: Vec<String>,
279    /// Environment variables
280    #[serde(default)]
281    pub env: HashMap<String, String>,
282    /// Whether this server is enabled
283    #[serde(default = "default_true")]
284    pub enabled: bool,
285}
286
287fn default_true() -> bool {
288    true
289}
290
291impl PawanConfig {
292    /// Load configuration from file
293    pub fn load(path: Option<&PathBuf>) -> crate::Result<Self> {
294        let config_path = path.cloned().or_else(|| {
295            // Try pawan.toml first
296            let pawan_toml = PathBuf::from("pawan.toml");
297            if pawan_toml.exists() {
298                return Some(pawan_toml);
299            }
300
301            // Try ares.toml
302            let ares_toml = PathBuf::from("ares.toml");
303            if ares_toml.exists() {
304                return Some(ares_toml);
305            }
306
307            None
308        });
309
310        match config_path {
311            Some(path) => {
312                let content = std::fs::read_to_string(&path).map_err(|e| {
313                    crate::PawanError::Config(format!("Failed to read {}: {}", path.display(), e))
314                })?;
315
316                // Check if this is ares.toml (look for [pawan] section)
317                if path.file_name().map(|n| n == "ares.toml").unwrap_or(false) {
318                    // Parse as TOML and extract [pawan] section
319                    let value: toml::Value = toml::from_str(&content).map_err(|e| {
320                        crate::PawanError::Config(format!(
321                            "Failed to parse {}: {}",
322                            path.display(),
323                            e
324                        ))
325                    })?;
326
327                    if let Some(pawan_section) = value.get("pawan") {
328                        let config: PawanConfig =
329                            pawan_section.clone().try_into().map_err(|e| {
330                                crate::PawanError::Config(format!(
331                                    "Failed to parse [pawan] section: {}",
332                                    e
333                                ))
334                            })?;
335                        return Ok(config);
336                    }
337
338                    // No [pawan] section, use defaults
339                    Ok(Self::default())
340                } else {
341                    // Parse as pawan.toml
342                    toml::from_str(&content).map_err(|e| {
343                        crate::PawanError::Config(format!(
344                            "Failed to parse {}: {}",
345                            path.display(),
346                            e
347                        ))
348                    })
349                }
350            }
351            None => Ok(Self::default()),
352        }
353    }
354
355    /// Apply environment variable overrides (PAWAN_MODEL, PAWAN_PROVIDER, etc.)
356    pub fn apply_env_overrides(&mut self) {
357        if let Ok(model) = std::env::var("PAWAN_MODEL") {
358            self.model = model;
359        }
360        if let Ok(provider) = std::env::var("PAWAN_PROVIDER") {
361            match provider.to_lowercase().as_str() {
362                "nvidia" | "nim" => self.provider = LlmProvider::Nvidia,
363                "ollama" => self.provider = LlmProvider::Ollama,
364                "openai" => self.provider = LlmProvider::OpenAI,
365                _ => tracing::warn!(provider = provider.as_str(), "Unknown PAWAN_PROVIDER, ignoring"),
366            }
367        }
368        if let Ok(temp) = std::env::var("PAWAN_TEMPERATURE") {
369            if let Ok(t) = temp.parse::<f32>() {
370                self.temperature = t;
371            }
372        }
373        if let Ok(tokens) = std::env::var("PAWAN_MAX_TOKENS") {
374            if let Ok(t) = tokens.parse::<usize>() {
375                self.max_tokens = t;
376            }
377        }
378        if let Ok(iters) = std::env::var("PAWAN_MAX_ITERATIONS") {
379            if let Ok(i) = iters.parse::<usize>() {
380                self.max_tool_iterations = i;
381            }
382        }
383        if let Ok(ctx) = std::env::var("PAWAN_MAX_CONTEXT_TOKENS") {
384            if let Ok(c) = ctx.parse::<usize>() {
385                self.max_context_tokens = c;
386            }
387        }
388        if let Ok(models) = std::env::var("PAWAN_FALLBACK_MODELS") {
389            self.fallback_models = models.split(',').map(|s| s.trim().to_string()).filter(|s| !s.is_empty()).collect();
390        }
391        if let Ok(chars) = std::env::var("PAWAN_MAX_RESULT_CHARS") {
392            if let Ok(c) = chars.parse::<usize>() {
393                self.max_result_chars = c;
394            }
395        }
396    }
397
398    /// Get target by name
399    pub fn get_target(&self, name: &str) -> Option<&TargetConfig> {
400        self.targets.get(name)
401    }
402
403    /// Get the system prompt, with optional PAWAN.md context injection
404    pub fn get_system_prompt(&self) -> String {
405        let base = self
406            .system_prompt
407            .clone()
408            .unwrap_or_else(|| DEFAULT_SYSTEM_PROMPT.to_string());
409
410        // Try to load PAWAN.md from current directory for project context
411        let context = Self::load_context_file();
412        if let Some(ctx) = context {
413            format!("{}\n\n## Project Context (from PAWAN.md)\n\n{}", base, ctx)
414        } else {
415            base
416        }
417    }
418
419    /// Load PAWAN.md context file from current directory (if it exists)
420    fn load_context_file() -> Option<String> {
421        // Check PAWAN.md first, then .pawan/context.md
422        for path in &["PAWAN.md", ".pawan/context.md"] {
423            let p = PathBuf::from(path);
424            if p.exists() {
425                if let Ok(content) = std::fs::read_to_string(&p) {
426                    if !content.trim().is_empty() {
427                        return Some(content);
428                    }
429                }
430            }
431        }
432        None
433    }
434
435    /// Check if thinking mode should be enabled.
436    /// Only applicable to DeepSeek models (other NIM models don't support <think> tokens).
437    pub fn use_thinking_mode(&self) -> bool {
438        self.reasoning_mode && self.model.contains("deepseek")
439    }
440}
441
442/// Default system prompt for coding tasks
443pub const DEFAULT_SYSTEM_PROMPT: &str = r#"You are Pawan, an expert coding assistant capable of working on any project (Rust, Python, JavaScript, and more). You have self-healing, code review, and testing capabilities.
444
445Available tools:
446- File: read_file, write_file, edit_file, list_directory
447- Search: glob_search, grep_search
448- Shell: bash
449- Git: git_status, git_diff, git_add, git_commit, git_log, git_blame, git_branch, git_checkout, git_stash
450- Agent: spawn_agent
451
452When making changes:
4531. Always read files before modifying them to understand context
4542. Make minimal, focused changes
4553. Explain your reasoning before making changes
4564. Verify changes compile and tests pass when appropriate
4575. Follow existing code style and patterns
458
459When fixing issues:
4601. Understand the root cause before attempting fixes
4612. Make one fix at a time and verify it works
4623. If a fix doesn't work, try a different approach
4634. Document what you changed and why
464
465Be concise in explanations but thorough in code changes.
466
467Git commits: always use author `bkataru <baalateja.k@gmail.com>`. Pass -c user.name="bkataru" -c user.email="baalateja.k@gmail.com" on every git commit. Never use: kavesbteja@gmail.com, baalateja.kataru@gmail.com, or noreply emails."#;