use crate::enhanced_ui::{
context::ProjectContext, smart_prompt::SmartPrompt, terminal::KandilTerminal,
};
use anyhow::{anyhow, Result};
use lazy_static::lazy_static;
use std::{
collections::VecDeque,
path::PathBuf,
sync::Arc,
cmp,
time::{Duration, Instant},
};
#[derive(Clone)]
pub struct CommandContext {
pub terminal: Arc<KandilTerminal>,
pub recent_commands: VecDeque<String>,
pub active_file: Option<PathBuf>,
pub job_tracker: JobTracker,
pub project_context: ProjectContext,
}
impl CommandContext {
pub fn new(terminal: Arc<KandilTerminal>) -> Self {
Self {
terminal,
recent_commands: VecDeque::with_capacity(100),
active_file: None,
job_tracker: JobTracker::default(),
project_context: ProjectContext::detect(),
}
}
pub fn remember_command(&mut self, command: &str) {
if self.recent_commands.len() == self.recent_commands.capacity() {
self.recent_commands.pop_front();
}
self.recent_commands.push_back(command.to_string());
}
pub fn refresh_project_context(&mut self) {
self.project_context = ProjectContext::detect_with_analysis();
}
pub async fn refresh_file_context(&mut self) {
if let Ok(current_dir) = std::env::current_dir() {
if let Ok(entries) = std::fs::read_dir(¤t_dir) {
let mut recent_files = Vec::new();
for entry in entries.flatten() {
if entry.path().is_file() {
if let Ok(metadata) = entry.metadata() {
if let Ok(modified) = metadata.modified() {
recent_files.push((entry.path(), modified));
}
}
}
}
recent_files.sort_by(|a, b| b.1.cmp(&a.1));
if let Some((path, _)) = recent_files.first() {
self.active_file = Some(path.clone());
}
}
}
}
pub async fn refresh_git_status(&mut self) {
}
pub fn contextual_suggestions(&self) -> Vec<&'static str> {
self.project_context.suggested_commands()
}
}
#[derive(Clone)]
pub struct SplashCommand {
pub trigger: &'static str,
pub description: &'static str,
pub requires_approval: bool,
pub preview_action: Option<&'static str>,
}
lazy_static! {
pub static ref SPLASH_COMMANDS: Vec<SplashCommand> = vec![
SplashCommand {
trigger: "/ask",
description: "Ask a question about your code or project",
requires_approval: false,
preview_action: None,
},
SplashCommand {
trigger: "/refactor",
description: "Run AI-assisted refactor suggestions",
requires_approval: true,
preview_action: Some("Preview code changes"),
},
SplashCommand {
trigger: "/test",
description: "Generate or run tests for the active file",
requires_approval: false,
preview_action: Some("Show affected tests"),
},
SplashCommand {
trigger: "/fix",
description: "Analyze and fix compilation/runtime errors",
requires_approval: true,
preview_action: Some("Show error summary"),
},
SplashCommand {
trigger: "/commit",
description: "Generate semantic commit message",
requires_approval: false,
preview_action: Some("Show diff summary"),
},
SplashCommand {
trigger: "/review",
description: "Request AI code review on staged changes",
requires_approval: false,
preview_action: None,
},
SplashCommand {
trigger: "/doc",
description: "Generate or update documentation",
requires_approval: true,
preview_action: Some("Show doc sections"),
},
SplashCommand {
trigger: "/deploy",
description: "Draft deployment plan with validation",
requires_approval: true,
preview_action: Some("Show deployment checklist"),
},
SplashCommand {
trigger: "/model",
description: "Switch the active AI model",
requires_approval: false,
preview_action: None,
},
SplashCommand {
trigger: "/history",
description: "Show recent splash commands",
requires_approval: false,
preview_action: None,
},
SplashCommand {
trigger: "/undo",
description: "Undo the last AI action",
requires_approval: false,
preview_action: Some("Show undo diff"),
}
];
}
pub async fn execute_splash_command(
trigger: &str,
args: &[String],
ctx: &mut CommandContext,
) -> Result<SplashResult> {
let normalized_trigger = if !trigger.starts_with('/') {
format!("/{}", trigger)
} else {
trigger.to_string()
};
match normalized_trigger.as_str() {
"/ask" => handle_ask(args).await,
"/refactor" => handle_refactor(args).await,
"/test" => handle_test(args, ctx).await,
"/fix" => handle_fix().await,
"/commit" => handle_commit().await,
"/review" => handle_review().await,
"/doc" => handle_doc(args).await,
"/deploy" => handle_deploy(args).await,
"/model" => handle_model_switch(args).await,
"/history" => handle_history(ctx).await,
"/undo" => handle_undo(ctx).await,
"/jobs" => handle_jobs(ctx).await,
_ => {
let matches: Vec<&SplashCommand> = SPLASH_COMMANDS
.iter()
.filter(|cmd| cmd.trigger.contains(&normalized_trigger))
.collect();
if !matches.is_empty() {
let suggestions: Vec<String> = matches.iter().map(|cmd| cmd.trigger.to_string()).collect();
return Err(anyhow!(
"Unknown command '{}'. Did you mean one of: {}",
trigger,
suggestions.join(", ")
));
} else {
return Err(anyhow!("Unknown splash command: {}", trigger));
}
}
}
}
pub async fn execute_splash_command_enhanced(
input: &str,
ctx: &mut CommandContext,
) -> Result<SplashResult> {
let parts: Vec<&str> = input.split_whitespace().collect();
if parts.is_empty() {
return Err(anyhow!("Empty command"));
}
let trigger = parts[0];
let args: Vec<String> = parts[1..].iter().map(|s| s.to_string()).collect();
execute_splash_command(trigger, &args, ctx).await
}
pub fn suggest_commands(prefix: &str) -> Vec<&'static SplashCommand> {
SPLASH_COMMANDS
.iter()
.filter(|cmd| cmd.trigger.starts_with(prefix))
.collect()
}
pub fn contextual_suggestions(ctx: &CommandContext, prefix: &str) -> Vec<SplashSuggestion> {
let mut suggestions = Vec::new();
for cmd in SPLASH_COMMANDS.iter() {
if cmd.trigger.starts_with(prefix) {
suggestions.push(SplashSuggestion {
command: cmd.trigger.to_string(),
description: cmd.description.to_string(),
score: 1.0, });
}
}
if prefix.is_empty() || prefix == "/" {
let project_suggestions = ctx.project_context.suggested_commands();
for cmd_name in project_suggestions {
if let Some(cmd) = SPLASH_COMMANDS.iter().find(|c| c.trigger == cmd_name) {
suggestions.push(SplashSuggestion {
command: cmd.trigger.to_string(),
description: cmd.description.to_string(),
score: 2.0, });
}
}
}
suggestions.sort_by(|a, b| b.score.partial_cmp(&a.score).unwrap_or(std::cmp::Ordering::Equal));
suggestions.truncate(5);
suggestions
}
#[derive(Debug, Clone)]
pub struct SplashSuggestion {
pub command: String,
pub description: String,
pub score: f64,
}
#[derive(Default, Clone)]
pub struct SplashResult {
pub message: Option<String>,
}
async fn handle_ask(args: &[String]) -> Result<SplashResult> {
let question = if args.is_empty() {
"What should I focus on next?".to_string()
} else {
args.join(" ")
};
Ok(SplashResult {
message: Some(format!("🤖 Answering question: {}", question)),
})
}
async fn handle_refactor(args: &[String]) -> Result<SplashResult> {
use crate::utils::refactoring::{RefactorEngine, RefactorParams};
use crate::core::adapters::ai::factory::AIProviderFactory;
use crate::utils::config::Config;
use crate::enhanced_ui::smart_prompt::SmartPrompt;
use std::sync::Arc;
let config = Config::load()?;
let factory = AIProviderFactory::new(config.clone());
let ai = Arc::new(factory.create_ai(&config.ai_provider, &config.ai_model)?);
let target = if args.is_empty() {
"current_module".to_string()
} else {
args.join(" ")
};
let mut engine = RefactorEngine::new();
let params = RefactorParams::new();
let original_code = "fn calculate_sum(a: i32, b: i32) -> i32 { a + b }".to_string();
let refactored_code = "/// Calculate the sum of two integers\nfn calculate_sum(a: i32, b: i32) -> i32 {\n a + b\n}".to_string();
let diff = SmartPrompt::diff_preview(&original_code, &refactored_code);
let preview = SmartPrompt::preview_actions("Refactoring", &["Analyze target file", "Generate suggestions", "Render diff"]);
Ok(SplashResult {
message: Some(format!(
"{}\n🔧 Target: {}\n📋 Diff Preview:\n{}\n✅ Refactoring analysis completed",
preview,
target,
diff
)),
})
}
async fn handle_test(args: &[String], ctx: &mut CommandContext) -> Result<SplashResult> {
use crate::utils::test_generation::TestGenerator;
use crate::core::adapters::ai::factory::AIProviderFactory;
use crate::utils::config::Config;
use std::sync::Arc;
if args.iter().any(|arg| arg == "--background") {
ctx.job_tracker.spawn_job("cargo test");
let msg = SmartPrompt::background_job_message("cargo test", Duration::from_secs(45));
return Ok(SplashResult {
message: Some(format!("🧪 {}", msg)),
});
}
let config = Config::load()?;
let factory = AIProviderFactory::new(config.clone());
let ai = Arc::new(factory.create_ai(&config.ai_provider, &config.ai_model)?);
let generator = TestGenerator::new(ai);
let active_file = &ctx.active_file.as_ref().map(|p| p.to_string_lossy().to_string());
let target = if args.is_empty() {
active_file.as_ref().unwrap_or(&"current project".to_string()).clone()
} else {
args.join(" ")
};
let tests = generator.generate_tests_for_file(&target, "rust").await?;
Ok(SplashResult {
message: Some(format!("🧪 Generated tests for: {}\nGenerated content:\n{}", target, tests)),
})
}
async fn handle_fix() -> Result<SplashResult> {
use crate::core::adapters::ai::factory::AIProviderFactory;
use crate::utils::config::Config;
use crate::core::agents::ReviewAgent;
use crate::enhanced_ui::smart_prompt::SmartPrompt;
use std::sync::Arc;
let config = Config::load()?;
let factory = AIProviderFactory::new(config.clone());
let ai = Arc::new(factory.create_ai(&config.ai_provider, &config.ai_model)?);
let review_agent = ReviewAgent::new(ai);
let original_code = "if x = 5 { println!(\"five\"); }".to_string(); let fixed_code = "if x == 5 { println!(\"five\"); }".to_string();
let diff = SmartPrompt::diff_preview(&original_code, &fixed_code);
let preview = SmartPrompt::preview_actions("Fix", &["Detect errors", "Generate fixes", "Apply corrections"]);
Ok(SplashResult {
message: Some(format!(
"{}\n🩺 Potential fix identified:\n📋 Code Diff:\n{}\n✅ Fix analysis completed",
preview,
diff
)),
})
}
async fn handle_commit() -> Result<SplashResult> {
Ok(SplashResult {
message: Some("✍️ Drafting semantic commit message".to_string()),
})
}
async fn handle_review() -> Result<SplashResult> {
use crate::core::adapters::ai::factory::AIProviderFactory;
use crate::utils::config::Config;
use crate::core::agents::ReviewAgent;
use crate::enhanced_ui::smart_prompt::SmartPrompt;
use std::sync::Arc;
let config = Config::load()?;
let factory = AIProviderFactory::new(config.clone());
let ai = Arc::new(factory.create_ai(&config.ai_provider, &config.ai_model)?);
let review_agent = ReviewAgent::new(ai);
let original_code = "fn process_data(data: Vec<i32>) -> Vec<i32> { data.iter().map(|x| x * 2).collect() }".to_string();
let improved_code = "// Process data by doubling each element\nfn process_data(data: Vec<i32>) -> Vec<i32> {\n data.iter()\n .map(|x| x * 2) // Double each element\n .collect()\n}".to_string();
let diff = SmartPrompt::diff_preview(&original_code, &improved_code);
let preview = SmartPrompt::preview_actions("Review", &["Analyze code", "Check for issues", "Suggest improvements"]);
Ok(SplashResult {
message: Some(format!(
"{}\n🔍 Code Review completed:\n📋 Suggested Changes:\n{}\n✅ Review analysis done",
preview,
diff
)),
})
}
async fn handle_doc(_args: &[String]) -> Result<SplashResult> {
let preview = SmartPrompt::preview_actions("Docs", &["Scan codebase", "Generate markdown"]);
Ok(SplashResult {
message: Some(format!("📘 {}", preview)),
})
}
async fn handle_deploy(args: &[String]) -> Result<SplashResult> {
let confirmed = SmartPrompt::confirm("Deploy may affect production. Continue?");
let target = if args.is_empty() {
"default environment"
} else {
&args[0]
};
Ok(SplashResult {
message: Some(format!(
"🚀 Deployment checklist for {} (approved: {})",
target, confirmed
)),
})
}
async fn handle_model_switch(args: &[String]) -> Result<SplashResult> {
if args.len() < 2 {
return Ok(SplashResult {
message: Some(
"Usage: /model <provider> <model>. Example: /model ollama qwen2.5-coder-3b".into(),
),
});
}
Ok(SplashResult {
message: Some(format!(
"Switching provider {} to model {}",
args[0], args[1]
)),
})
}
async fn handle_history(ctx: &CommandContext) -> Result<SplashResult> {
let entries: Vec<String> = ctx.recent_commands.iter().rev().take(5).cloned().collect();
if entries.is_empty() {
return Ok(SplashResult {
message: Some("No recent splash commands.".to_string()),
});
}
Ok(SplashResult {
message: Some(format!("Recent splash commands:\n{}", entries.join("\n"))),
})
}
async fn handle_undo(_ctx: &mut CommandContext) -> Result<SplashResult> {
Ok(SplashResult {
message: Some("↩️ Reverting last AI action (simulated)".to_string()),
})
}
async fn handle_jobs(ctx: &mut CommandContext) -> Result<SplashResult> {
Ok(SplashResult {
message: Some(ctx.job_tracker.render_jobs()),
})
}
#[derive(Default, Clone)]
pub struct JobTracker {
jobs: Vec<JobStatus>,
}
impl JobTracker {
pub fn spawn_job(&mut self, description: &str) {
self.jobs.push(JobStatus {
description: description.to_string(),
started_at: Instant::now(),
completed: false,
});
}
pub fn complete_all(&mut self) {
for job in &mut self.jobs {
job.completed = true;
}
}
pub fn auto_complete_elapsed(&mut self, threshold: Duration) {
for job in &mut self.jobs {
if !job.completed && job.started_at.elapsed() >= threshold {
job.completed = true;
}
}
}
pub fn render_jobs(&self) -> String {
if self.jobs.is_empty() {
return "No active jobs.".to_string();
}
self.jobs
.iter()
.map(|job| {
let duration = Instant::now().duration_since(job.started_at);
format!(
"{} - {:.1}s {}",
job.description,
duration.as_secs_f32(),
if job.completed { "(done)" } else { "(running)" }
)
})
.collect::<Vec<String>>()
.join("\n")
}
pub fn snapshot(&self) -> Vec<JobSnapshot> {
self.jobs
.iter()
.map(|job| JobSnapshot {
description: job.description.clone(),
completed: job.completed,
duration_secs: job.started_at.elapsed().as_secs_f32(),
})
.collect()
}
}
#[derive(Clone)]
struct JobStatus {
description: String,
started_at: Instant,
completed: bool,
}
#[derive(Clone)]
pub struct JobSnapshot {
pub description: String,
pub completed: bool,
pub duration_secs: f32,
}