use crate::ai::{analyze, generate};
use ggen_ai::{AiConfig, GenAiClient, LlmClient};
use ggen_utils::error::Result;
use serde_json::json;
use std::fs::OpenOptions;
use std::io::Write;
use std::path::Path;
#[derive(Debug, Clone)]
pub struct ExecuteGenerateResult {
pub analysis: String,
pub suggestions: Vec<String>,
pub model_used: String,
pub processing_time_ms: u64,
}
#[derive(Debug, Clone)]
pub struct ExecuteAnalyzeResult {
pub analysis: String,
pub file_analyzed: Option<String>,
}
#[derive(Debug, Clone)]
pub struct ExecuteChatResult {
pub response: String,
pub model_used: String,
}
fn log_debug(
session_id: &str, run_id: &str, hypothesis_id: &str, location: &str, message: &str,
data: serde_json::Value,
) {
if let Ok(mut file) = OpenOptions::new()
.create(true)
.append(true)
.open("/Users/sac/ggen/.cursor/debug.log")
{
let timestamp = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_millis())
.unwrap_or(0);
let entry = json!({
"sessionId": session_id,
"runId": run_id,
"hypothesisId": hypothesis_id,
"location": location,
"message": message,
"data": data,
"timestamp": timestamp,
});
let _ = writeln!(file, "{}", entry);
}
}
pub async fn execute_generate(
prompt: &str, code: Option<&str>, model: Option<&str>, suggestions: bool,
) -> Result<ExecuteGenerateResult> {
log_debug(
"debug-session",
"run1",
"H1",
"ai/execute.rs:execute_generate:entry",
"entered execute_generate",
json!({
"prompt_len": prompt.len(),
"prompt_trimmed_empty": prompt.trim().is_empty(),
"has_code": code.is_some(),
"model": model.map(|m| m.to_string()),
"suggestions": suggestions,
}),
);
if prompt.trim().is_empty() {
return Err(ggen_utils::error::Error::new("Prompt cannot be empty"));
}
let mut options =
generate::GenerateOptions::new(prompt).with_format(generate::OutputFormat::Json);
if suggestions {
options = options.with_suggestions();
}
if let Some(code_content) = code {
options = options.with_code(code_content.to_string());
}
if let Some(model_name) = model {
options = options.with_model(model_name);
}
let start = std::time::Instant::now();
let result = generate::generate_code(&options).await?;
let processing_time_ms = start.elapsed().as_millis() as u64;
log_debug(
"debug-session",
"run1",
"H1",
"ai/execute.rs:execute_generate:ok",
"execute_generate succeeded",
json!({
"model_used": result.metadata.model_used,
"processing_time_ms": processing_time_ms,
"suggestions_count": result.suggestions.as_ref().map(|s| s.len()).unwrap_or(0),
}),
);
Ok(ExecuteGenerateResult {
analysis: result.analysis,
suggestions: result.suggestions.unwrap_or_default(),
model_used: result.metadata.model_used,
processing_time_ms,
})
}
pub async fn execute_analyze(
code: Option<&str>, file: Option<&Path>,
) -> Result<ExecuteAnalyzeResult> {
log_debug(
"debug-session",
"run1",
"H3",
"ai/execute.rs:execute_analyze:entry",
"entered execute_analyze",
json!({
"has_code": code.is_some(),
"code_len": code.map(|c| c.len()),
"has_file": file.is_some(),
"file": file.map(|p| p.display().to_string()),
}),
);
if code.is_none() && file.is_none() {
return Err(ggen_utils::error::Error::new(
"Must provide either code or file to analyze",
));
}
let analysis_text = if let Some(code_content) = code {
if code_content.trim().is_empty() {
return Err(ggen_utils::error::Error::new("Code cannot be empty"));
}
log_debug(
"debug-session",
"run1",
"H3",
"ai/execute.rs:execute_analyze:branch",
"analyzing code snippet",
json!({
"trimmed_empty": code_content.trim().is_empty(),
"code_len": code_content.len(),
}),
);
analyze::analyze_code(code_content).await?
} else if let Some(file_path) = file {
if !file_path.exists() {
return Err(ggen_utils::error::Error::new(&format!(
"Path does not exist: {}",
file_path.display()
)));
}
log_debug(
"debug-session",
"run1",
"H3",
"ai/execute.rs:execute_analyze:branch",
"analyzing file path",
json!({
"path": file_path.display().to_string(),
"exists": file_path.exists(),
}),
);
analyze::analyze_project(file_path).await?
} else {
unreachable!("Already validated input above")
};
log_debug(
"debug-session",
"run1",
"H3",
"ai/execute.rs:execute_analyze:ok",
"execute_analyze succeeded",
json!({
"analysis_len": analysis_text.len(),
"file_analyzed": file.map(|p| p.display().to_string()),
}),
);
Ok(ExecuteAnalyzeResult {
analysis: analysis_text,
file_analyzed: file.map(|p| p.display().to_string()),
})
}
pub async fn execute_chat(message: &str, model: Option<&str>) -> Result<ExecuteChatResult> {
log_debug(
"debug-session",
"run1",
"H4",
"ai/execute.rs:execute_chat:entry",
"entered execute_chat",
json!({
"message_len": message.len(),
"message_trimmed_empty": message.trim().is_empty(),
"model": model.map(|m| m.to_string()),
}),
);
if message.trim().is_empty() {
return Err(ggen_utils::error::Error::new("Message cannot be empty"));
}
let mut config = AiConfig::from_env().unwrap_or_else(|_| AiConfig::new()).llm;
if let Some(m) = model {
config.model = m.to_string();
}
if config.max_tokens.is_none() {
config.max_tokens = Some(512);
}
if config.temperature.is_none() {
config.temperature = Some(0.7);
}
let client = GenAiClient::new(config)
.map_err(|e| ggen_utils::error::Error::new(&format!("Failed to create LLM client: {e}")))?;
let prompt = format!(
"You are a concise Rust assistant. Respond helpfully to the user.
User: {}",
message
);
let resp = client
.complete(&prompt)
.await
.map_err(|e| ggen_utils::error::Error::new(&format!("Chat failed: {e}")))?;
log_debug(
"debug-session",
"run1",
"H4",
"ai/execute.rs:execute_chat:ok",
"execute_chat response",
json!({
"model_used": resp.model,
"response_preview": resp.content.chars().take(80).collect::<String>(),
}),
);
Ok(ExecuteChatResult {
response: resp.content,
model_used: resp.model,
})
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_execute_generate_validation() {
let result = execute_generate("", None, None, false).await;
assert!(result.is_err());
let result = execute_generate(" ", None, None, false).await;
assert!(result.is_err());
}
#[tokio::test]
async fn test_execute_analyze_validation() {
let result = execute_analyze(None, None).await;
assert!(result.is_err());
let result = execute_analyze(Some(""), None).await;
assert!(result.is_err());
let result = execute_analyze(None, Some(Path::new("/nonexistent/path"))).await;
assert!(result.is_err());
}
#[tokio::test]
async fn test_execute_chat_validation() {
let result = execute_chat("", None).await;
assert!(result.is_err());
let result = execute_chat(" ", None).await;
assert!(result.is_err());
}
#[ignore = "Requires API key and network access - integration test"]
#[tokio::test]
async fn test_execute_chat_placeholder() {
let result = execute_chat("hello", Some("gpt-4")).await;
assert!(result.is_ok());
let chat_result = result.unwrap();
assert!(!chat_result.response.is_empty());
assert_eq!(chat_result.model_used, "gpt-4");
}
}