1use clap_noun_verb::Result;
7use clap_noun_verb_macros::verb;
8use futures::StreamExt;
9use serde::Serialize;
10use std::collections::HashMap;
11use std::path::PathBuf;
12
13#[derive(Serialize)]
18pub struct GenerateOutput {
19 generated_code: String,
20 language: Option<String>,
21 tokens_used: Option<usize>,
22 model: String,
23 finish_reason: Option<String>,
24}
25
26#[derive(Serialize)]
27pub struct ChatMessage {
28 role: String,
29 content: String,
30}
31
32#[derive(Serialize)]
33pub struct ChatOutput {
34 messages: Vec<ChatMessage>,
35 session_id: String,
36 model: String,
37 tokens_used: Option<usize>,
38}
39
40#[derive(Serialize)]
41pub struct AnalyzeOutput {
42 file_path: Option<String>,
43 insights: Vec<String>,
44 suggestions: Vec<String>,
45 complexity_score: Option<f64>,
46 model: String,
47 tokens_used: Option<usize>,
48}
49
50#[verb]
73fn generate(
74 prompt: String,
75 code: Option<String>,
76 model: Option<String>,
77 api_key: Option<String>,
78 suggestions: bool,
79 language: Option<String>,
80 max_tokens: u32,
81 temperature: f32,
82) -> Result<GenerateOutput> {
83 use ggen_ai::{GenAiClient, LlmClient, LlmConfig};
84
85 crate::runtime::block_on(async move {
86 let mut config = LlmConfig {
88 model: model.unwrap_or_else(|| "gpt-3.5-turbo".to_string()),
89 max_tokens: Some(max_tokens),
90 temperature: Some(temperature),
91 top_p: Some(0.9),
92 stop: None,
93 extra: HashMap::new(),
94 };
95
96 config.validate().map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Invalid configuration: {}", e)))?;
98
99 let client = GenAiClient::new(config)
101 .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Failed to create AI client: {}", e)))?;
102
103 let mut full_prompt = prompt.clone();
105
106 if let Some(lang) = &language {
107 full_prompt.push_str(&format!("\nTarget language: {}", lang));
108 }
109
110 if let Some(code) = &code {
111 full_prompt.push_str(&format!("\n\nExisting code:\n```\n{}\n```", code));
112 }
113
114 if suggestions {
115 full_prompt.push_str("\n\nInclude suggestions for improvements and best practices.");
116 }
117
118 let response = client
120 .complete(&full_prompt)
121 .await
122 .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("AI generation failed: {}", e)))?;
123
124 Ok(GenerateOutput {
125 generated_code: response.content,
126 language: language.clone(),
127 tokens_used: response.usage.map(|u| u.total_tokens as usize),
128 model: response.model,
129 finish_reason: response.finish_reason,
130 })
131 })
132}
133
134#[verb]
153fn chat(
154 message: Option<String>,
155 model: Option<String>,
156 api_key: Option<String>,
157 interactive: bool,
158 stream: bool,
159 max_tokens: u32,
160 temperature: f32,
161) -> Result<ChatOutput> {
162 use ggen_ai::{GenAiClient, LlmClient, LlmConfig};
163 use std::io::Write;
164
165 crate::runtime::block_on(async move {
166 let config = LlmConfig {
168 model: model.unwrap_or_else(|| "gpt-3.5-turbo".to_string()),
169 max_tokens: Some(max_tokens),
170 temperature: Some(temperature),
171 top_p: Some(0.9),
172 stop: None,
173 extra: HashMap::new(),
174 };
175
176 let client = GenAiClient::new(config)
178 .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Failed to create AI client: {}", e)))?;
179
180 let session_id = uuid::Uuid::new_v4().to_string();
181 let mut messages: Vec<ChatMessage> = Vec::new();
182 let mut total_tokens: Option<usize> = None;
183 let model_name = client.get_config().model.clone();
184
185 if interactive {
186 eprintln!("🤖 AI Chat - Interactive Mode");
188 eprintln!("Model: {}", model_name);
189 eprintln!("Type 'exit' or 'quit' to end session\n");
190
191 loop {
192 eprint!("> ");
193 std::io::stderr().flush().unwrap();
194
195 let mut input = String::new();
196 std::io::stdin()
197 .read_line(&mut input)
198 .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Failed to read input: {}", e)))?;
199
200 let input = input.trim();
201 if input.is_empty() {
202 continue;
203 }
204
205 if input == "exit" || input == "quit" {
206 break;
207 }
208
209 messages.push(ChatMessage {
210 role: "user".to_string(),
211 content: input.to_string(),
212 });
213
214 if stream {
215 let mut stream = client
217 .complete_stream(input)
218 .await
219 .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Streaming failed: {}", e)))?;
220
221 let mut full_response = String::new();
222 eprint!("🤖: ");
223 while let Some(chunk) = stream.next().await {
224 eprint!("{}", chunk.content);
225 std::io::stderr().flush().unwrap();
226 full_response.push_str(&chunk.content);
227
228 if let Some(usage) = chunk.usage {
229 total_tokens = Some(usage.total_tokens as usize);
230 }
231 }
232 eprintln!("\n");
233
234 messages.push(ChatMessage {
235 role: "assistant".to_string(),
236 content: full_response,
237 });
238 } else {
239 let response = client
241 .complete(input)
242 .await
243 .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Chat failed: {}", e)))?;
244
245 eprintln!("🤖: {}\n", response.content);
246
247 if let Some(usage) = response.usage {
248 total_tokens = Some(usage.total_tokens as usize);
249 }
250
251 messages.push(ChatMessage {
252 role: "assistant".to_string(),
253 content: response.content,
254 });
255 }
256 }
257 } else if let Some(msg) = message {
258 messages.push(ChatMessage {
260 role: "user".to_string(),
261 content: msg.clone(),
262 });
263
264 if stream {
265 let mut stream = client
267 .complete_stream(&msg)
268 .await
269 .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Streaming failed: {}", e)))?;
270
271 let mut full_response = String::new();
272 while let Some(chunk) = stream.next().await {
273 eprint!("{}", chunk.content);
274 std::io::stderr().flush().unwrap();
275 full_response.push_str(&chunk.content);
276
277 if let Some(usage) = chunk.usage {
278 total_tokens = Some(usage.total_tokens as usize);
279 }
280 }
281 eprintln!();
282
283 messages.push(ChatMessage {
284 role: "assistant".to_string(),
285 content: full_response,
286 });
287 } else {
288 let response = client
290 .complete(&msg)
291 .await
292 .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Chat failed: {}", e)))?;
293
294 if let Some(usage) = response.usage {
295 total_tokens = Some(usage.total_tokens as usize);
296 }
297
298 messages.push(ChatMessage {
299 role: "assistant".to_string(),
300 content: response.content,
301 });
302 }
303 } else {
304 return Err(clap_noun_verb::NounVerbError::execution_error(
305 "Provide a message or use --interactive for chat session"
306 ));
307 }
308
309 Ok(ChatOutput {
310 messages,
311 session_id,
312 model: model_name,
313 tokens_used: total_tokens,
314 })
315 })
316}
317
318#[verb]
337fn analyze(
338 code: Option<String>,
339 file: Option<PathBuf>,
340 project: Option<PathBuf>,
341 model: Option<String>,
342 api_key: Option<String>,
343 complexity: bool,
344 security: bool,
345 performance: bool,
346 max_tokens: u32,
347) -> Result<AnalyzeOutput> {
348 use ggen_ai::{GenAiClient, LlmClient, LlmConfig};
349
350 crate::runtime::block_on(async move {
351 let (code_content, file_path) = if let Some(code_str) = code {
353 (code_str, None)
354 } else if let Some(file_path) = &file {
355 let content = std::fs::read_to_string(file_path)
356 .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Failed to read file: {}", e)))?;
357 (content, Some(file_path.display().to_string()))
358 } else if let Some(project_path) = &project {
359 return analyze_project(project_path, model, api_key, max_tokens).await;
361 } else {
362 return Err(clap_noun_verb::NounVerbError::execution_error("Provide code, --file, or --project to analyze"));
363 };
364
365 let config = LlmConfig {
367 model: model.unwrap_or_else(|| "gpt-3.5-turbo".to_string()),
368 max_tokens: Some(max_tokens),
369 temperature: Some(0.3), top_p: Some(0.9),
371 stop: None,
372 extra: HashMap::new(),
373 };
374
375 let client = GenAiClient::new(config)
377 .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Failed to create AI client: {}", e)))?;
378
379 let mut prompt = format!(
381 "Analyze the following code and provide insights:\n\n```\n{}\n```\n\n",
382 code_content
383 );
384
385 prompt.push_str("Provide:\n");
386 prompt.push_str("1. Key insights about the code structure and design\n");
387 prompt.push_str("2. Suggestions for improvements\n");
388
389 if complexity {
390 prompt.push_str("3. Complexity analysis (cyclomatic, cognitive)\n");
391 }
392 if security {
393 prompt.push_str("4. Security considerations and potential vulnerabilities\n");
394 }
395 if performance {
396 prompt.push_str("5. Performance optimization opportunities\n");
397 }
398
399 prompt.push_str("\nFormat your response with clear sections.");
400
401 let response = client
403 .complete(&prompt)
404 .await
405 .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Analysis failed: {}", e)))?;
406
407 let (insights, suggestions) = parse_analysis_response(&response.content);
409
410 let complexity_score = if complexity {
412 Some(estimate_complexity(&code_content))
413 } else {
414 None
415 };
416
417 Ok(AnalyzeOutput {
418 file_path,
419 insights,
420 suggestions,
421 complexity_score,
422 model: response.model,
423 tokens_used: response.usage.map(|u| u.total_tokens as usize),
424 })
425 })
426}
427
428async fn analyze_project(
434 project_path: &PathBuf,
435 model: Option<String>,
436 api_key: Option<String>,
437 max_tokens: u32,
438) -> Result<AnalyzeOutput> {
439 use ggen_ai::{GenAiClient, LlmClient, LlmConfig};
440 use walkdir::WalkDir;
441
442 let mut source_files = Vec::new();
444 for entry in WalkDir::new(project_path)
445 .max_depth(5)
446 .into_iter()
447 .filter_map(|e| e.ok())
448 {
449 let path = entry.path();
450 if path.is_file() {
451 if let Some(ext) = path.extension() {
452 if matches!(ext.to_str(), Some("rs") | Some("toml") | Some("md")) {
453 source_files.push(path.to_path_buf());
454 }
455 }
456 }
457 }
458
459 let config = LlmConfig {
461 model: model.unwrap_or_else(|| "gpt-3.5-turbo".to_string()),
462 max_tokens: Some(max_tokens),
463 temperature: Some(0.3),
464 top_p: Some(0.9),
465 stop: None,
466 extra: HashMap::new(),
467 };
468
469 let client = GenAiClient::new(config)
471 .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Failed to create AI client: {}", e)))?;
472
473 let file_list: Vec<String> = source_files
475 .iter()
476 .map(|p| p.display().to_string())
477 .collect();
478
479 let prompt = format!(
480 "Analyze this project structure:\n\nProject: {}\n\nFiles:\n{}\n\n\
481 Provide insights about:\n\
482 1. Project architecture and organization\n\
483 2. Code quality and design patterns\n\
484 3. Suggested improvements\n\
485 4. Potential issues or technical debt",
486 project_path.display(),
487 file_list.join("\n")
488 );
489
490 let response = client
492 .complete(&prompt)
493 .await
494 .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Project analysis failed: {}", e)))?;
495
496 let (insights, suggestions) = parse_analysis_response(&response.content);
498
499 Ok(AnalyzeOutput {
500 file_path: Some(project_path.display().to_string()),
501 insights,
502 suggestions,
503 complexity_score: None,
504 model: response.model,
505 tokens_used: response.usage.map(|u| u.total_tokens as usize),
506 })
507}
508
509fn parse_analysis_response(response: &str) -> (Vec<String>, Vec<String>) {
511 let mut insights = Vec::new();
512 let mut suggestions = Vec::new();
513
514 let mut current_section = "";
515 for line in response.lines() {
516 let line = line.trim();
517
518 if line.to_lowercase().contains("insight") {
520 current_section = "insights";
521 continue;
522 } else if line.to_lowercase().contains("suggestion")
523 || line.to_lowercase().contains("improvement")
524 {
525 current_section = "suggestions";
526 continue;
527 }
528
529 if !line.is_empty() && line.starts_with(|c: char| c.is_numeric() || c == '-' || c == '*') {
531 let cleaned = line
532 .trim_start_matches(|c: char| c.is_numeric() || c == '.' || c == '-' || c == '*')
533 .trim()
534 .to_string();
535
536 match current_section {
537 "insights" => insights.push(cleaned),
538 "suggestions" => suggestions.push(cleaned),
539 _ => {
540 insights.push(cleaned);
542 }
543 }
544 }
545 }
546
547 if insights.is_empty() && suggestions.is_empty() {
549 insights.push(response.to_string());
550 }
551
552 (insights, suggestions)
553}
554
555fn estimate_complexity(code: &str) -> f64 {
557 let mut complexity = 1.0;
558
559 let control_flow = ["if", "else", "match", "for", "while", "loop"];
561 for keyword in &control_flow {
562 complexity += code.matches(keyword).count() as f64;
563 }
564
565 let nesting_level = code.matches('{').count().max(1) as f64;
567 complexity *= nesting_level.log10().max(1.0);
568
569 complexity += code.matches("fn ").count() as f64 * 0.5;
571
572 (complexity.min(100.0) * 10.0).round() / 10.0
574}
575
576