1use serde::{Deserialize, Serialize};
10use std::collections::HashMap;
11use std::path::PathBuf;
12use tracing;
13
14#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
16#[serde(rename_all = "lowercase")]
17pub enum LlmProvider {
18 #[default]
20 Nvidia,
21 Ollama,
23 OpenAI,
25 Mlx,
27}
28
29#[derive(Debug, Clone, Serialize, Deserialize)]
31#[serde(default)]
32pub struct PawanConfig {
33 pub provider: LlmProvider,
35
36 pub model: String,
38
39 pub base_url: Option<String>,
42
43 pub dry_run: bool,
45
46 pub auto_backup: bool,
48
49 pub require_git_clean: bool,
51
52 pub bash_timeout_secs: u64,
54
55 pub max_file_size_kb: usize,
57
58 pub max_tool_iterations: usize,
60 pub max_context_tokens: usize,
62
63 pub system_prompt: Option<String>,
65
66 pub temperature: f32,
68
69 pub top_p: f32,
71
72 pub max_tokens: usize,
74
75 pub thinking_budget: usize,
79
80 pub max_retries: usize,
82
83 pub fallback_models: Vec<String>,
85 pub max_result_chars: usize,
87
88 pub reasoning_mode: bool,
90
91 pub healing: HealingConfig,
93
94 pub targets: HashMap<String, TargetConfig>,
96
97 pub tui: TuiConfig,
99
100 #[serde(default)]
102 pub mcp: HashMap<String, McpServerEntry>,
103
104 #[serde(default)]
106 pub permissions: HashMap<String, ToolPermission>,
107
108 pub cloud: Option<CloudConfig>,
111
112 #[serde(default)]
115 pub models: ModelRouting,
116
117 #[serde(default)]
119 pub eruka: crate::eruka_bridge::ErukaConfig,
120}
121
122#[derive(Debug, Clone, Default, Serialize, Deserialize)]
132pub struct ModelRouting {
133 pub code: Option<String>,
135 pub orchestrate: Option<String>,
137 pub execute: Option<String>,
139}
140
141impl ModelRouting {
142 pub fn route(&self, query: &str) -> Option<&str> {
145 let q = query.to_lowercase();
146
147 if self.code.is_some() {
149 let code_signals = ["implement", "write", "create", "refactor", "fix", "add test",
150 "add function", "struct", "enum", "trait", "algorithm", "data structure"];
151 if code_signals.iter().any(|s| q.contains(s)) {
152 return self.code.as_deref();
153 }
154 }
155
156 if self.orchestrate.is_some() {
158 let orch_signals = ["search", "find", "analyze", "review", "explain", "compare",
159 "list", "check", "verify", "diagnose", "audit"];
160 if orch_signals.iter().any(|s| q.contains(s)) {
161 return self.orchestrate.as_deref();
162 }
163 }
164
165 if self.execute.is_some() {
167 let exec_signals = ["run", "execute", "bash", "cargo", "test", "build",
168 "deploy", "install", "commit"];
169 if exec_signals.iter().any(|s| q.contains(s)) {
170 return self.execute.as_deref();
171 }
172 }
173
174 None
175 }
176}
177
178#[derive(Debug, Clone, Serialize, Deserialize)]
194pub struct CloudConfig {
195 pub provider: LlmProvider,
197 pub model: String,
199 #[serde(default)]
201 pub fallback_models: Vec<String>,
202}
203
204#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
206#[serde(rename_all = "lowercase")]
207pub enum ToolPermission {
208 Allow,
210 Deny,
212 Prompt,
214}
215
216impl ToolPermission {
217 pub fn resolve(name: &str, permissions: &HashMap<String, ToolPermission>) -> Self {
222 if let Some(p) = permissions.get(name) {
223 return p.clone();
224 }
225 match name {
227 "bash" | "git_commit" | "write_file" | "edit_file_lines"
228 | "insert_after" | "append_file" => ToolPermission::Allow, _ => ToolPermission::Allow,
230 }
231 }
232}
233
234impl Default for PawanConfig {
235 fn default() -> Self {
236 let mut targets = HashMap::new();
237 targets.insert(
238 "self".to_string(),
239 TargetConfig {
240 path: PathBuf::from("."),
241 description: "Current project codebase".to_string(),
242 },
243 );
244
245 Self {
246 provider: LlmProvider::Nvidia,
247 model: crate::DEFAULT_MODEL.to_string(),
248 base_url: None,
249 dry_run: false,
250 auto_backup: true,
251 require_git_clean: false,
252 bash_timeout_secs: crate::DEFAULT_BASH_TIMEOUT,
253 max_file_size_kb: 1024,
254 max_tool_iterations: crate::MAX_TOOL_ITERATIONS,
255 max_context_tokens: 100000,
256 system_prompt: None,
257 temperature: 1.0,
258 top_p: 0.95,
259 max_tokens: 8192,
260 thinking_budget: 0, reasoning_mode: true,
262 max_retries: 3,
263 fallback_models: Vec::new(),
264 max_result_chars: 8000,
265 healing: HealingConfig::default(),
266 targets,
267 tui: TuiConfig::default(),
268 mcp: HashMap::new(),
269 permissions: HashMap::new(),
270 cloud: None,
271 models: ModelRouting::default(),
272 eruka: crate::eruka_bridge::ErukaConfig::default(),
273 }
274 }
275}
276
277#[derive(Debug, Clone, Serialize, Deserialize)]
279#[serde(default)]
280pub struct HealingConfig {
281 pub auto_commit: bool,
283
284 pub fix_errors: bool,
286
287 pub fix_warnings: bool,
289
290 pub fix_tests: bool,
292
293 pub generate_docs: bool,
295
296 pub max_attempts: usize,
298}
299
300impl Default for HealingConfig {
301 fn default() -> Self {
302 Self {
303 auto_commit: false,
304 fix_errors: true,
305 fix_warnings: true,
306 fix_tests: true,
307 generate_docs: false,
308 max_attempts: 3,
309 }
310 }
311}
312
313#[derive(Debug, Clone, Serialize, Deserialize)]
315pub struct TargetConfig {
320 pub path: PathBuf,
322
323 pub description: String,
325}
326
327#[derive(Debug, Clone, Serialize, Deserialize)]
329#[serde(default)]
330pub struct TuiConfig {
331 pub syntax_highlighting: bool,
333
334 pub theme: String,
336
337 pub line_numbers: bool,
339
340 pub mouse_support: bool,
342
343 pub scroll_speed: usize,
345
346 pub max_history: usize,
348}
349
350impl Default for TuiConfig {
351 fn default() -> Self {
352 Self {
353 syntax_highlighting: true,
354 theme: "base16-ocean.dark".to_string(),
355 line_numbers: true,
356 mouse_support: true,
357 scroll_speed: 3,
358 max_history: 1000,
359 }
360 }
361}
362
363#[derive(Debug, Clone, Serialize, Deserialize)]
365pub struct McpServerEntry {
371 pub command: String,
373 #[serde(default)]
375 pub args: Vec<String>,
376 #[serde(default)]
378 pub env: HashMap<String, String>,
379 #[serde(default = "default_true")]
381 pub enabled: bool,
382}
383
384fn default_true() -> bool {
385 true
386}
387
388impl PawanConfig {
389 pub fn load(path: Option<&PathBuf>) -> crate::Result<Self> {
391 let config_path = path.cloned().or_else(|| {
392 let pawan_toml = PathBuf::from("pawan.toml");
394 if pawan_toml.exists() {
395 return Some(pawan_toml);
396 }
397
398 let ares_toml = PathBuf::from("ares.toml");
400 if ares_toml.exists() {
401 return Some(ares_toml);
402 }
403
404 if let Some(home) = dirs::home_dir() {
406 let global = home.join(".config/pawan/pawan.toml");
407 if global.exists() {
408 return Some(global);
409 }
410 }
411
412 None
413 });
414
415 match config_path {
416 Some(path) => {
417 let content = std::fs::read_to_string(&path).map_err(|e| {
418 crate::PawanError::Config(format!("Failed to read {}: {}", path.display(), e))
419 })?;
420
421 if path.file_name().map(|n| n == "ares.toml").unwrap_or(false) {
423 let value: toml::Value = toml::from_str(&content).map_err(|e| {
425 crate::PawanError::Config(format!(
426 "Failed to parse {}: {}",
427 path.display(),
428 e
429 ))
430 })?;
431
432 if let Some(pawan_section) = value.get("pawan") {
433 let config: PawanConfig =
434 pawan_section.clone().try_into().map_err(|e| {
435 crate::PawanError::Config(format!(
436 "Failed to parse [pawan] section: {}",
437 e
438 ))
439 })?;
440 return Ok(config);
441 }
442
443 Ok(Self::default())
445 } else {
446 toml::from_str(&content).map_err(|e| {
448 crate::PawanError::Config(format!(
449 "Failed to parse {}: {}",
450 path.display(),
451 e
452 ))
453 })
454 }
455 }
456 None => Ok(Self::default()),
457 }
458 }
459
460 pub fn apply_env_overrides(&mut self) {
462 if let Ok(model) = std::env::var("PAWAN_MODEL") {
463 self.model = model;
464 }
465 if let Ok(provider) = std::env::var("PAWAN_PROVIDER") {
466 match provider.to_lowercase().as_str() {
467 "nvidia" | "nim" => self.provider = LlmProvider::Nvidia,
468 "ollama" => self.provider = LlmProvider::Ollama,
469 "openai" => self.provider = LlmProvider::OpenAI,
470 "mlx" | "mlx-lm" => self.provider = LlmProvider::Mlx,
471 _ => tracing::warn!(provider = provider.as_str(), "Unknown PAWAN_PROVIDER, ignoring"),
472 }
473 }
474 if let Ok(temp) = std::env::var("PAWAN_TEMPERATURE") {
475 if let Ok(t) = temp.parse::<f32>() {
476 self.temperature = t;
477 }
478 }
479 if let Ok(tokens) = std::env::var("PAWAN_MAX_TOKENS") {
480 if let Ok(t) = tokens.parse::<usize>() {
481 self.max_tokens = t;
482 }
483 }
484 if let Ok(iters) = std::env::var("PAWAN_MAX_ITERATIONS") {
485 if let Ok(i) = iters.parse::<usize>() {
486 self.max_tool_iterations = i;
487 }
488 }
489 if let Ok(ctx) = std::env::var("PAWAN_MAX_CONTEXT_TOKENS") {
490 if let Ok(c) = ctx.parse::<usize>() {
491 self.max_context_tokens = c;
492 }
493 }
494 if let Ok(models) = std::env::var("PAWAN_FALLBACK_MODELS") {
495 self.fallback_models = models.split(',').map(|s| s.trim().to_string()).filter(|s| !s.is_empty()).collect();
496 }
497 if let Ok(chars) = std::env::var("PAWAN_MAX_RESULT_CHARS") {
498 if let Ok(c) = chars.parse::<usize>() {
499 self.max_result_chars = c;
500 }
501 }
502 }
503
504 pub fn get_target(&self, name: &str) -> Option<&TargetConfig> {
506 self.targets.get(name)
507 }
508
509 pub fn get_system_prompt(&self) -> String {
512 let base = self
513 .system_prompt
514 .clone()
515 .unwrap_or_else(|| DEFAULT_SYSTEM_PROMPT.to_string());
516
517 let mut prompt = base;
518
519 if let Some((filename, ctx)) = Self::load_context_file() {
520 prompt = format!("{}\n\n## Project Context (from {})\n\n{}", prompt, filename, ctx);
521 }
522
523 if let Some(skill_ctx) = Self::load_skill_context() {
524 prompt = format!("{}\n\n## Active Skill (from SKILL.md)\n\n{}", prompt, skill_ctx);
525 }
526
527 prompt
528 }
529
530 fn load_context_file() -> Option<(String, String)> {
534 for path in &["PAWAN.md", "AGENTS.md", "CLAUDE.md", ".pawan/context.md"] {
535 let p = PathBuf::from(path);
536 if p.exists() {
537 if let Ok(content) = std::fs::read_to_string(&p) {
538 if !content.trim().is_empty() {
539 return Some((path.to_string(), content));
540 }
541 }
542 }
543 }
544 None
545 }
546
547 pub fn load_skill_context() -> Option<String> {
550 use thulp_skill_files::SkillFile;
551
552 let skill_path = std::path::Path::new("SKILL.md");
553 if !skill_path.exists() {
554 return None;
555 }
556
557 match SkillFile::parse(skill_path) {
558 Ok(skill) => {
559 let name = skill.effective_name();
560 let desc = skill.frontmatter.description.as_deref().unwrap_or("no description");
561 let tools_str = match &skill.frontmatter.allowed_tools {
562 Some(tools) => tools.join(", "),
563 None => "all".to_string(),
564 };
565 Some(format!(
566 "[Skill: {}] {}\nAllowed tools: {}\n---\n{}",
567 name, desc, tools_str, skill.content
568 ))
569 }
570 Err(e) => {
571 tracing::warn!("Failed to parse SKILL.md: {}", e);
572 None
573 }
574 }
575 }
576
577 pub fn use_thinking_mode(&self) -> bool {
580 self.reasoning_mode
581 && (self.model.contains("deepseek")
582 || self.model.contains("gemma")
583 || self.model.contains("glm")
584 || self.model.contains("qwen")
585 || self.model.contains("mistral-small-4"))
586 }
587}
588
589pub const DEFAULT_SYSTEM_PROMPT: &str = r#"You are Pawan, an expert coding assistant.
591
592# Efficiency
593- Act immediately. Do NOT explore or plan before writing. Write code FIRST, then verify.
594- write_file creates parents automatically. No mkdir needed.
595- cargo check runs automatically after .rs writes — fix errors immediately.
596- Use relative paths from workspace root.
597- Missing tools are auto-installed via mise. Don't check dependencies.
598- You have limited tool iterations. Be direct. No preamble.
599
600# Tool Selection
601Use the BEST tool for the job — do NOT use bash for things dedicated tools handle:
602- File ops: read_file, write_file, edit_file, edit_file_lines, insert_after, append_file, list_directory
603- Code intelligence: ast_grep (AST search + rewrite via tree-sitter — prefer for structural changes)
604- Search: glob_search (files by pattern), grep_search (content by regex), ripgrep (native rg), fd (native find)
605- Shell: bash (commands), sd (find-replace in files), mise (tool/task/env manager), zoxide (smart cd)
606- Git: git_status, git_diff, git_add, git_commit, git_log, git_blame, git_branch, git_checkout, git_stash
607- Agent: spawn_agent (delegate subtask), spawn_agents (parallel sub-agents)
608- Web: mcp_daedra_web_search (ALWAYS use for web queries — never bash+curl)
609
610Prefer ast_grep over edit_file for code refactors. Prefer grep_search over bash grep.
611Prefer fd over bash find. Prefer sd over bash sed.
612
613# Parallel Execution
614Call multiple tools in a single response when they are independent.
615If tool B depends on tool A's result, call them sequentially.
616Never parallelize destructive operations (writes, deletes, commits).
617
618# Read Before Modifying
619Do NOT propose changes to code you haven't read. If asked to modify a file, read it first.
620Understand existing code, patterns, and style before suggesting changes.
621
622# Scope Discipline
623Make minimal, focused changes. Follow existing code style.
624- Don't add features, refactor, or "improve" code beyond what was asked.
625- Don't add docstrings, comments, or type annotations to code you didn't change.
626- A bug fix doesn't need surrounding code cleaned up.
627- Don't add error handling for scenarios that can't happen.
628
629# Executing Actions with Care
630Consider reversibility and blast radius before acting:
631- Freely take local, reversible actions (editing files, running tests).
632- For hard-to-reverse actions (force-push, rm -rf, dropping tables), ask first.
633- Match the scope of your actions to what was requested.
634- Investigate before deleting — unfamiliar files may be the user's in-progress work.
635- Don't use destructive shortcuts to bypass safety checks.
636
637# Git Safety
638- NEVER skip hooks (--no-verify) unless explicitly asked.
639- ALWAYS create NEW commits rather than amending (amend after hook failure destroys work).
640- NEVER force-push to main/master. Warn if requested.
641- Prefer staging specific files over `git add -A` (avoids committing secrets).
642- Only commit when explicitly asked. Don't be over-eager.
643- Commit messages: focus on WHY, not WHAT. Use HEREDOC for multi-line messages.
644- Use the git author from `git config user.name` / `git config user.email`.
645
646# Output Style
647Be concise. Lead with the answer, not the reasoning.
648Focus text output on: decisions needing input, status updates, errors/blockers.
649If you can say it in one sentence, don't use three.
650After .rs writes, cargo check auto-runs — fix errors immediately if it fails.
651Run tests when the task calls for it (cargo test -p <crate>).
652One fix at a time. If it doesn't work, try a different approach."#;
653
654#[cfg(test)]
655mod tests {
656 use super::*;
657
658 #[test]
659 fn test_provider_mlx_parsing() {
660 let toml = r#"
662provider = "mlx"
663model = "mlx-community/Qwen3.5-9B-4bit"
664"#;
665 let config: PawanConfig = toml::from_str(toml).expect("should parse without error");
666 assert_eq!(config.provider, LlmProvider::Mlx);
667 assert_eq!(config.model, "mlx-community/Qwen3.5-9B-4bit");
668 }
669
670 #[test]
671 fn test_provider_mlx_lm_alias() {
672 let mut config = PawanConfig::default();
674 std::env::set_var("PAWAN_PROVIDER", "mlx-lm");
675 config.apply_env_overrides();
676 std::env::remove_var("PAWAN_PROVIDER");
677 assert_eq!(config.provider, LlmProvider::Mlx);
678 }
679
680 #[test]
681 fn test_mlx_base_url_override() {
682 let toml = r#"
684provider = "mlx"
685model = "test-model"
686base_url = "http://192.168.1.100:8080/v1"
687"#;
688 let config: PawanConfig = toml::from_str(toml).expect("should parse without error");
689 assert_eq!(config.provider, LlmProvider::Mlx);
690 assert_eq!(
691 config.base_url.as_deref(),
692 Some("http://192.168.1.100:8080/v1")
693 );
694 }
695
696 #[test]
699 fn test_route_code_signals() {
700 let routing = ModelRouting {
701 code: Some("code-model".into()),
702 orchestrate: Some("orch-model".into()),
703 execute: Some("exec-model".into()),
704 };
705 assert_eq!(routing.route("implement a linked list"), Some("code-model"));
706 assert_eq!(routing.route("refactor the parser"), Some("code-model"));
707 assert_eq!(routing.route("add test for config"), Some("code-model"));
708 assert_eq!(routing.route("Write a new struct"), Some("code-model"));
709 }
710
711 #[test]
712 fn test_route_orchestration_signals() {
713 let routing = ModelRouting {
714 code: Some("code-model".into()),
715 orchestrate: Some("orch-model".into()),
716 execute: Some("exec-model".into()),
717 };
718 assert_eq!(routing.route("analyze the error logs"), Some("orch-model"));
719 assert_eq!(routing.route("review this PR"), Some("orch-model"));
720 assert_eq!(routing.route("explain how the agent works"), Some("orch-model"));
721 assert_eq!(routing.route("search for uses of foo"), Some("orch-model"));
722 }
723
724 #[test]
725 fn test_route_execution_signals() {
726 let routing = ModelRouting {
727 code: Some("code-model".into()),
728 orchestrate: Some("orch-model".into()),
729 execute: Some("exec-model".into()),
730 };
731 assert_eq!(routing.route("run cargo test"), Some("exec-model"));
732 assert_eq!(routing.route("execute the deploy script"), Some("exec-model"));
733 assert_eq!(routing.route("build the project"), Some("exec-model"));
734 assert_eq!(routing.route("commit these changes"), Some("exec-model"));
735 }
736
737 #[test]
738 fn test_route_no_match_returns_none() {
739 let routing = ModelRouting {
740 code: Some("code-model".into()),
741 orchestrate: Some("orch-model".into()),
742 execute: Some("exec-model".into()),
743 };
744 assert_eq!(routing.route("hello world"), None);
745 }
746
747 #[test]
748 fn test_route_empty_routing_returns_none() {
749 let routing = ModelRouting::default();
750 assert_eq!(routing.route("implement something"), None);
751 assert_eq!(routing.route("search for bugs"), None);
752 }
753
754 #[test]
755 fn test_route_case_insensitive() {
756 let routing = ModelRouting {
757 code: Some("code-model".into()),
758 orchestrate: None,
759 execute: None,
760 };
761 assert_eq!(routing.route("IMPLEMENT a FUNCTION"), Some("code-model"));
762 }
763
764 #[test]
765 fn test_route_partial_routing() {
766 let routing = ModelRouting {
768 code: Some("code-model".into()),
769 orchestrate: None,
770 execute: None,
771 };
772 assert_eq!(routing.route("implement x"), Some("code-model"));
773 assert_eq!(routing.route("search for y"), None);
774 assert_eq!(routing.route("run tests"), None);
775 }
776
777 #[test]
780 fn test_env_override_model() {
781 let mut config = PawanConfig::default();
782 std::env::set_var("PAWAN_MODEL", "custom/model-123");
783 config.apply_env_overrides();
784 std::env::remove_var("PAWAN_MODEL");
785 assert_eq!(config.model, "custom/model-123");
786 }
787
788 #[test]
789 fn test_env_override_temperature() {
790 let mut config = PawanConfig::default();
791 std::env::set_var("PAWAN_TEMPERATURE", "0.9");
792 config.apply_env_overrides();
793 std::env::remove_var("PAWAN_TEMPERATURE");
794 assert!((config.temperature - 0.9).abs() < f32::EPSILON);
795 }
796
797 #[test]
798 fn test_env_override_invalid_temperature_ignored() {
799 let mut config = PawanConfig::default();
800 let original = config.temperature;
801 std::env::set_var("PAWAN_TEMPERATURE", "not_a_number");
802 config.apply_env_overrides();
803 std::env::remove_var("PAWAN_TEMPERATURE");
804 assert!((config.temperature - original).abs() < f32::EPSILON);
805 }
806
807 #[test]
808 fn test_env_override_max_tokens() {
809 let mut config = PawanConfig::default();
810 std::env::set_var("PAWAN_MAX_TOKENS", "16384");
811 config.apply_env_overrides();
812 std::env::remove_var("PAWAN_MAX_TOKENS");
813 assert_eq!(config.max_tokens, 16384);
814 }
815
816 #[test]
817 fn test_env_override_fallback_models() {
818 let mut config = PawanConfig::default();
819 std::env::set_var("PAWAN_FALLBACK_MODELS", "model-a, model-b, model-c");
820 config.apply_env_overrides();
821 std::env::remove_var("PAWAN_FALLBACK_MODELS");
822 assert_eq!(config.fallback_models, vec!["model-a", "model-b", "model-c"]);
823 }
824
825 #[test]
826 fn test_env_override_fallback_models_filters_empty() {
827 let mut config = PawanConfig::default();
828 std::env::set_var("PAWAN_FALLBACK_MODELS", "model-a,,, model-b,");
829 config.apply_env_overrides();
830 std::env::remove_var("PAWAN_FALLBACK_MODELS");
831 assert_eq!(config.fallback_models, vec!["model-a", "model-b"]);
832 }
833
834 #[test]
835 fn test_env_override_provider_variants() {
836 for (env_val, expected) in [
837 ("nvidia", LlmProvider::Nvidia),
838 ("nim", LlmProvider::Nvidia),
839 ("ollama", LlmProvider::Ollama),
840 ("openai", LlmProvider::OpenAI),
841 ("mlx", LlmProvider::Mlx),
842 ] {
843 let mut config = PawanConfig::default();
844 std::env::set_var("PAWAN_PROVIDER", env_val);
845 config.apply_env_overrides();
846 std::env::remove_var("PAWAN_PROVIDER");
847 assert_eq!(config.provider, expected, "PAWAN_PROVIDER={} should map to {:?}", env_val, expected);
848 }
849 }
850
851 #[test]
854 fn test_thinking_mode_supported_models() {
855 for model in ["deepseek-ai/deepseek-r1", "google/gemma-4-31b-it", "z-ai/glm5",
856 "qwen/qwen3.5-122b", "mistralai/mistral-small-4-119b"] {
857 let config = PawanConfig { model: model.into(), reasoning_mode: true, ..Default::default() };
858 assert!(config.use_thinking_mode(), "thinking mode should be on for {}", model);
859 }
860 }
861
862 #[test]
863 fn test_thinking_mode_disabled_when_reasoning_off() {
864 let config = PawanConfig { model: "deepseek-ai/deepseek-r1".into(), reasoning_mode: false, ..Default::default() };
865 assert!(!config.use_thinking_mode());
866 }
867
868 #[test]
869 fn test_thinking_mode_unsupported_models() {
870 for model in ["meta/llama-3.1-70b", "minimaxai/minimax-m2.5", "stepfun-ai/step-3.5-flash"] {
871 let config = PawanConfig { model: model.into(), reasoning_mode: true, ..Default::default() };
872 assert!(!config.use_thinking_mode(), "thinking mode should be off for {}", model);
873 }
874 }
875
876 #[test]
879 fn test_system_prompt_default() {
880 let config = PawanConfig::default();
881 let prompt = config.get_system_prompt();
882 assert!(prompt.contains("Pawan"), "default prompt should mention Pawan");
883 assert!(prompt.contains("coding"), "default prompt should mention coding");
884 }
885
886 #[test]
887 fn test_system_prompt_custom_override() {
888 let config = PawanConfig { system_prompt: Some("Custom system prompt.".into()), ..Default::default() };
889 let prompt = config.get_system_prompt();
890 assert!(prompt.starts_with("Custom system prompt."));
891 }
892
893 #[test]
896 fn test_config_with_cloud_fallback() {
897 let toml = r#"
898model = "qwen/qwen3.5-122b-a10b"
899[cloud]
900provider = "nvidia"
901model = "minimaxai/minimax-m2.5"
902"#;
903 let config: PawanConfig = toml::from_str(toml).expect("should parse");
904 assert_eq!(config.model, "qwen/qwen3.5-122b-a10b");
905 let cloud = config.cloud.unwrap();
906 assert_eq!(cloud.model, "minimaxai/minimax-m2.5");
907 }
908
909 #[test]
910 fn test_config_with_healing() {
911 let toml = r#"
912model = "test"
913[healing]
914fix_errors = true
915fix_warnings = false
916fix_tests = true
917"#;
918 let config: PawanConfig = toml::from_str(toml).expect("should parse");
919 assert!(config.healing.fix_errors);
920 assert!(!config.healing.fix_warnings);
921 assert!(config.healing.fix_tests);
922 }
923
924 #[test]
925 fn test_config_defaults_sensible() {
926 let config = PawanConfig::default();
927 assert_eq!(config.provider, LlmProvider::Nvidia);
928 assert!(config.temperature > 0.0 && config.temperature <= 1.0);
929 assert!(config.max_tokens > 0);
930 assert!(config.max_tool_iterations > 0);
931 }
932
933 #[test]
934 fn test_context_file_search_order() {
935 let config = PawanConfig::default();
939 let prompt = config.get_system_prompt();
940 if std::path::Path::new("PAWAN.md").exists() {
942 assert!(prompt.contains("Project Context"), "Should inject project context when PAWAN.md exists");
943 assert!(prompt.contains("from PAWAN.md"), "Should identify source as PAWAN.md");
944 }
945 }
946
947 #[test]
948 fn test_system_prompt_injection_format() {
949 let config = PawanConfig {
951 system_prompt: Some("Base prompt.".into()),
952 ..Default::default()
953 };
954 let prompt = config.get_system_prompt();
955 if prompt.contains("Project Context") {
957 assert!(prompt.contains("from "), "Injection should include source filename");
958 }
959 }
960}