1use clap_noun_verb::Result;
7use clap_noun_verb_macros::verb;
8use futures::StreamExt;
9use serde::Serialize;
10use std::collections::HashMap;
11use std::path::PathBuf;
12
13#[derive(Serialize)]
18pub struct GenerateOutput {
19 generated_code: String,
20 language: Option<String>,
21 tokens_used: Option<usize>,
22 model: String,
23 finish_reason: Option<String>,
24}
25
26#[derive(Serialize)]
27pub struct ChatMessage {
28 role: String,
29 content: String,
30}
31
32#[derive(Serialize)]
33pub struct ChatOutput {
34 messages: Vec<ChatMessage>,
35 session_id: String,
36 model: String,
37 tokens_used: Option<usize>,
38}
39
40#[derive(Serialize)]
41pub struct AnalyzeOutput {
42 file_path: Option<String>,
43 insights: Vec<String>,
44 suggestions: Vec<String>,
45 complexity_score: Option<f64>,
46 model: String,
47 tokens_used: Option<usize>,
48}
49
50#[allow(clippy::too_many_arguments)] #[verb]
74fn generate(
75 prompt: String, code: Option<String>, model: Option<String>, _api_key: Option<String>,
76 suggestions: bool, language: Option<String>, max_tokens: u32, temperature: f32,
77) -> Result<GenerateOutput> {
78 use ggen_ai::{GenAiClient, LlmClient, LlmConfig};
79
80 crate::runtime::block_on(async move {
81 let config = LlmConfig {
83 model: model.unwrap_or_else(|| "gpt-3.5-turbo".to_string()),
84 max_tokens: Some(max_tokens),
85 temperature: Some(temperature),
86 top_p: Some(0.9),
87 stop: None,
88 extra: HashMap::new(),
89 };
90
91 config.validate().map_err(|e| {
93 clap_noun_verb::NounVerbError::execution_error(format!("Invalid configuration: {}", e))
94 })?;
95
96 let client = GenAiClient::new(config).map_err(|e| {
98 clap_noun_verb::NounVerbError::execution_error(format!(
99 "Failed to create AI client: {}",
100 e
101 ))
102 })?;
103
104 let mut full_prompt = prompt.clone();
106
107 if let Some(lang) = &language {
108 full_prompt.push_str(&format!("\nTarget language: {}", lang));
109 }
110
111 if let Some(code) = &code {
112 full_prompt.push_str(&format!("\n\nExisting code:\n```\n{}\n```", code));
113 }
114
115 if suggestions {
116 full_prompt.push_str("\n\nInclude suggestions for improvements and best practices.");
117 }
118
119 let response = client.complete(&full_prompt).await.map_err(|e| {
121 clap_noun_verb::NounVerbError::execution_error(format!("AI generation failed: {}", e))
122 })?;
123
124 Ok(GenerateOutput {
125 generated_code: response.content,
126 language: language.clone(),
127 tokens_used: response.usage.map(|u| u.total_tokens as usize),
128 model: response.model,
129 finish_reason: response.finish_reason,
130 })
131 })
132}
133
134#[verb]
153fn chat(
154 message: Option<String>, model: Option<String>, _api_key: Option<String>, interactive: bool,
155 stream: bool, max_tokens: u32, temperature: f32,
156) -> Result<ChatOutput> {
157 use ggen_ai::{GenAiClient, LlmClient, LlmConfig};
158 use std::io::Write;
159
160 crate::runtime::block_on(async move {
161 let config = LlmConfig {
163 model: model.unwrap_or_else(|| "gpt-3.5-turbo".to_string()),
164 max_tokens: Some(max_tokens),
165 temperature: Some(temperature),
166 top_p: Some(0.9),
167 stop: None,
168 extra: HashMap::new(),
169 };
170
171 let client = GenAiClient::new(config).map_err(|e| {
173 clap_noun_verb::NounVerbError::execution_error(format!(
174 "Failed to create AI client: {}",
175 e
176 ))
177 })?;
178
179 let session_id = uuid::Uuid::new_v4().to_string();
180 let mut messages: Vec<ChatMessage> = Vec::new();
181 let mut total_tokens: Option<usize> = None;
182 let model_name = client.get_config().model.clone();
183
184 if interactive {
185 ggen_utils::alert_info!("🤖 AI Chat - Interactive Mode");
187 ggen_utils::alert_info!("Model: {}", model_name);
188 ggen_utils::alert_info!("Type 'exit' or 'quit' to end session\n");
189
190 loop {
191 eprint!("> ");
192 std::io::stderr().flush().map_err(|e| {
193 clap_noun_verb::NounVerbError::execution_error(format!(
194 "Failed to flush stderr: {}",
195 e
196 ))
197 })?;
198
199 let mut input = String::new();
200 std::io::stdin().read_line(&mut input).map_err(|e| {
201 clap_noun_verb::NounVerbError::execution_error(format!(
202 "Failed to read input: {}",
203 e
204 ))
205 })?;
206
207 let input = input.trim();
208 if input.is_empty() {
209 continue;
210 }
211
212 if input == "exit" || input == "quit" {
213 break;
214 }
215
216 messages.push(ChatMessage {
217 role: "user".to_string(),
218 content: input.to_string(),
219 });
220
221 if stream {
222 let mut stream = client.complete_stream(input).await.map_err(|e| {
224 clap_noun_verb::NounVerbError::execution_error(format!(
225 "Streaming failed: {}",
226 e
227 ))
228 })?;
229
230 let mut full_response = String::new();
231 eprint!("🤖: ");
232 while let Some(chunk) = stream.next().await {
233 eprint!("{}", chunk.content);
234 std::io::stderr().flush().map_err(|e| {
235 clap_noun_verb::NounVerbError::execution_error(format!(
236 "Failed to flush stderr: {}",
237 e
238 ))
239 })?;
240 full_response.push_str(&chunk.content);
241
242 if let Some(usage) = chunk.usage {
243 total_tokens = Some(usage.total_tokens as usize);
244 }
245 }
246 ggen_utils::alert_info!("\n");
247
248 messages.push(ChatMessage {
249 role: "assistant".to_string(),
250 content: full_response,
251 });
252 } else {
253 let response = client.complete(input).await.map_err(|e| {
255 clap_noun_verb::NounVerbError::execution_error(format!(
256 "Chat failed: {}",
257 e
258 ))
259 })?;
260
261 ggen_utils::alert_info!("🤖: {}\n", response.content);
262
263 if let Some(usage) = response.usage {
264 total_tokens = Some(usage.total_tokens as usize);
265 }
266
267 messages.push(ChatMessage {
268 role: "assistant".to_string(),
269 content: response.content,
270 });
271 }
272 }
273 } else if let Some(msg) = message {
274 messages.push(ChatMessage {
276 role: "user".to_string(),
277 content: msg.clone(),
278 });
279
280 if stream {
281 let mut stream = client.complete_stream(&msg).await.map_err(|e| {
283 clap_noun_verb::NounVerbError::execution_error(format!(
284 "Streaming failed: {}",
285 e
286 ))
287 })?;
288
289 let mut full_response = String::new();
290 while let Some(chunk) = stream.next().await {
291 eprint!("{}", chunk.content);
292 std::io::stderr().flush().map_err(|e| {
293 clap_noun_verb::NounVerbError::execution_error(format!(
294 "Failed to flush stderr: {}",
295 e
296 ))
297 })?;
298 full_response.push_str(&chunk.content);
299
300 if let Some(usage) = chunk.usage {
301 total_tokens = Some(usage.total_tokens as usize);
302 }
303 }
304
305 messages.push(ChatMessage {
306 role: "assistant".to_string(),
307 content: full_response,
308 });
309 } else {
310 let response = client.complete(&msg).await.map_err(|e| {
312 clap_noun_verb::NounVerbError::execution_error(format!("Chat failed: {}", e))
313 })?;
314
315 if let Some(usage) = response.usage {
316 total_tokens = Some(usage.total_tokens as usize);
317 }
318
319 messages.push(ChatMessage {
320 role: "assistant".to_string(),
321 content: response.content,
322 });
323 }
324 } else {
325 return Err(clap_noun_verb::NounVerbError::execution_error(
326 "Provide a message or use --interactive for chat session",
327 ));
328 }
329
330 Ok(ChatOutput {
331 messages,
332 session_id,
333 model: model_name,
334 tokens_used: total_tokens,
335 })
336 })
337}
338
339#[allow(clippy::too_many_arguments)] #[verb]
359fn analyze(
360 code: Option<String>, file: Option<PathBuf>, project: Option<PathBuf>, model: Option<String>,
361 api_key: Option<String>, complexity: bool, security: bool, performance: bool, max_tokens: u32,
362) -> Result<AnalyzeOutput> {
363 use ggen_ai::{GenAiClient, LlmClient, LlmConfig};
364
365 crate::runtime::block_on(async move {
366 let (code_content, file_path) = if let Some(code_str) = code {
368 (code_str, None)
369 } else if let Some(file_path) = &file {
370 let content = std::fs::read_to_string(file_path).map_err(|e| {
371 clap_noun_verb::NounVerbError::execution_error(format!(
372 "Failed to read file: {}",
373 e
374 ))
375 })?;
376 (content, Some(file_path.display().to_string()))
377 } else if let Some(project_path) = &project {
378 return analyze_project(project_path, model, api_key, max_tokens).await;
380 } else {
381 return Err(clap_noun_verb::NounVerbError::execution_error(
382 "Provide code, --file, or --project to analyze",
383 ));
384 };
385
386 let config = LlmConfig {
388 model: model.unwrap_or_else(|| "gpt-3.5-turbo".to_string()),
389 max_tokens: Some(max_tokens),
390 temperature: Some(0.3), top_p: Some(0.9),
392 stop: None,
393 extra: HashMap::new(),
394 };
395
396 let client = GenAiClient::new(config).map_err(|e| {
398 clap_noun_verb::NounVerbError::execution_error(format!(
399 "Failed to create AI client: {}",
400 e
401 ))
402 })?;
403
404 let mut prompt = format!(
406 "Analyze the following code and provide insights:\n\n```\n{}\n```\n\n",
407 code_content
408 );
409
410 prompt.push_str("Provide:\n");
411 prompt.push_str("1. Key insights about the code structure and design\n");
412 prompt.push_str("2. Suggestions for improvements\n");
413
414 if complexity {
415 prompt.push_str("3. Complexity analysis (cyclomatic, cognitive)\n");
416 }
417 if security {
418 prompt.push_str("4. Security considerations and potential vulnerabilities\n");
419 }
420 if performance {
421 prompt.push_str("5. Performance optimization opportunities\n");
422 }
423
424 prompt.push_str("\nFormat your response with clear sections.");
425
426 let response = client.complete(&prompt).await.map_err(|e| {
428 clap_noun_verb::NounVerbError::execution_error(format!("Analysis failed: {}", e))
429 })?;
430
431 let (insights, suggestions) = parse_analysis_response(&response.content);
433
434 let complexity_score = if complexity {
436 Some(estimate_complexity(&code_content))
437 } else {
438 None
439 };
440
441 Ok(AnalyzeOutput {
442 file_path,
443 insights,
444 suggestions,
445 complexity_score,
446 model: response.model,
447 tokens_used: response.usage.map(|u| u.total_tokens as usize),
448 })
449 })
450}
451
452async fn analyze_project(
458 project_path: &PathBuf, model: Option<String>, _api_key: Option<String>, max_tokens: u32,
459) -> Result<AnalyzeOutput> {
460 use ggen_ai::{GenAiClient, LlmClient, LlmConfig};
461 use walkdir::WalkDir;
462
463 let mut source_files = Vec::new();
465 for entry in WalkDir::new(project_path)
466 .max_depth(5)
467 .into_iter()
468 .filter_map(|e| e.ok())
469 {
470 let path = entry.path();
471 if path.is_file() {
472 if let Some(ext) = path.extension() {
473 if matches!(ext.to_str(), Some("rs") | Some("toml") | Some("md")) {
474 source_files.push(path.to_path_buf());
475 }
476 }
477 }
478 }
479
480 let config = LlmConfig {
482 model: model.unwrap_or_else(|| "gpt-3.5-turbo".to_string()),
483 max_tokens: Some(max_tokens),
484 temperature: Some(0.3),
485 top_p: Some(0.9),
486 stop: None,
487 extra: HashMap::new(),
488 };
489
490 let client = GenAiClient::new(config).map_err(|e| {
492 clap_noun_verb::NounVerbError::execution_error(format!("Failed to create AI client: {}", e))
493 })?;
494
495 let file_list: Vec<String> = source_files
497 .iter()
498 .map(|p| p.display().to_string())
499 .collect();
500
501 let prompt = format!(
502 "Analyze this project structure:\n\nProject: {}\n\nFiles:\n{}\n\n\
503 Provide insights about:\n\
504 1. Project architecture and organization\n\
505 2. Code quality and design patterns\n\
506 3. Suggested improvements\n\
507 4. Potential issues or technical debt",
508 project_path.display(),
509 file_list.join("\n")
510 );
511
512 let response = client.complete(&prompt).await.map_err(|e| {
514 clap_noun_verb::NounVerbError::execution_error(format!("Project analysis failed: {}", e))
515 })?;
516
517 let (insights, suggestions) = parse_analysis_response(&response.content);
519
520 Ok(AnalyzeOutput {
521 file_path: Some(project_path.display().to_string()),
522 insights,
523 suggestions,
524 complexity_score: None,
525 model: response.model,
526 tokens_used: response.usage.map(|u| u.total_tokens as usize),
527 })
528}
529
530fn parse_analysis_response(response: &str) -> (Vec<String>, Vec<String>) {
532 let mut insights = Vec::new();
533 let mut suggestions = Vec::new();
534
535 let mut current_section = "";
536 for line in response.lines() {
537 let line = line.trim();
538
539 if line.to_lowercase().contains("insight") {
541 current_section = "insights";
542 continue;
543 } else if line.to_lowercase().contains("suggestion")
544 || line.to_lowercase().contains("improvement")
545 {
546 current_section = "suggestions";
547 continue;
548 }
549
550 if !line.is_empty() && line.starts_with(|c: char| c.is_numeric() || c == '-' || c == '*') {
552 let cleaned = line
553 .trim_start_matches(|c: char| c.is_numeric() || c == '.' || c == '-' || c == '*')
554 .trim()
555 .to_string();
556
557 match current_section {
558 "insights" => insights.push(cleaned),
559 "suggestions" => suggestions.push(cleaned),
560 _ => {
561 insights.push(cleaned);
563 }
564 }
565 }
566 }
567
568 if insights.is_empty() && suggestions.is_empty() {
570 insights.push(response.to_string());
571 }
572
573 (insights, suggestions)
574}
575
576fn estimate_complexity(code: &str) -> f64 {
578 let mut complexity = 1.0;
579
580 let control_flow = ["if", "else", "match", "for", "while", "loop"];
582 for keyword in &control_flow {
583 complexity += code.matches(keyword).count() as f64;
584 }
585
586 let nesting_level = code.matches('{').count().max(1) as f64;
588 complexity *= nesting_level.log10().max(1.0);
589
590 complexity += code.matches("fn ").count() as f64 * 0.5;
592
593 (complexity.min(100.0) * 10.0).round() / 10.0
595}
596
597