ggen_cli_lib/cmds/
ai.rs

1//! AI commands - clap-noun-verb v3.4.0 migration
2//!
3//! This module implements AI operations using the #[verb] macro pattern for
4//! automatic discovery and JSON output support.
5
6use clap_noun_verb::Result;
7use clap_noun_verb_macros::verb;
8use futures::StreamExt;
9use serde::Serialize;
10use std::collections::HashMap;
11use std::path::PathBuf;
12
13// ============================================================================
14// Output Types (all must derive Serialize for JSON output)
15// ============================================================================
16
17#[derive(Serialize)]
18pub struct GenerateOutput {
19    generated_code: String,
20    language: Option<String>,
21    tokens_used: Option<usize>,
22    model: String,
23    finish_reason: Option<String>,
24}
25
26#[derive(Serialize)]
27pub struct ChatMessage {
28    role: String,
29    content: String,
30}
31
32#[derive(Serialize)]
33pub struct ChatOutput {
34    messages: Vec<ChatMessage>,
35    session_id: String,
36    model: String,
37    tokens_used: Option<usize>,
38}
39
40#[derive(Serialize)]
41pub struct AnalyzeOutput {
42    file_path: Option<String>,
43    insights: Vec<String>,
44    suggestions: Vec<String>,
45    complexity_score: Option<f64>,
46    model: String,
47    tokens_used: Option<usize>,
48}
49
50// ============================================================================
51// Verb Functions (the actual CLI commands)
52// ============================================================================
53
54/// Generate code with AI assistance
55///
56/// # Examples
57///
58/// Basic generation:
59/// ```bash
60/// ggen ai generate "Create a Rust function that calculates fibonacci numbers"
61/// ```
62///
63/// With existing code context:
64/// ```bash
65/// ggen ai generate "Add error handling" --code "fn main() { println!(\"hello\"); }"
66/// ```
67///
68/// With specific model:
69/// ```bash
70/// ggen ai generate "Generate REST API" --model gpt-4 --api-key $OPENAI_API_KEY
71/// ```
72#[allow(clippy::too_many_arguments)] // CLI command with many options
73#[verb]
74fn generate(
75    prompt: String, code: Option<String>, model: Option<String>, _api_key: Option<String>,
76    suggestions: bool, language: Option<String>, max_tokens: u32, temperature: f32,
77) -> Result<GenerateOutput> {
78    use ggen_ai::{GenAiClient, LlmClient, LlmConfig};
79
80    crate::runtime::block_on(async move {
81        // Build configuration
82        let config = LlmConfig {
83            model: model.unwrap_or_else(|| "gpt-3.5-turbo".to_string()),
84            max_tokens: Some(max_tokens),
85            temperature: Some(temperature),
86            top_p: Some(0.9),
87            stop: None,
88            extra: HashMap::new(),
89        };
90
91        // Validate configuration
92        config.validate().map_err(|e| {
93            clap_noun_verb::NounVerbError::execution_error(format!("Invalid configuration: {}", e))
94        })?;
95
96        // Create client
97        let client = GenAiClient::new(config).map_err(|e| {
98            clap_noun_verb::NounVerbError::execution_error(format!(
99                "Failed to create AI client: {}",
100                e
101            ))
102        })?;
103
104        // Build prompt
105        let mut full_prompt = prompt.clone();
106
107        if let Some(lang) = &language {
108            full_prompt.push_str(&format!("\nTarget language: {}", lang));
109        }
110
111        if let Some(code) = &code {
112            full_prompt.push_str(&format!("\n\nExisting code:\n```\n{}\n```", code));
113        }
114
115        if suggestions {
116            full_prompt.push_str("\n\nInclude suggestions for improvements and best practices.");
117        }
118
119        // Generate response
120        let response = client.complete(&full_prompt).await.map_err(|e| {
121            clap_noun_verb::NounVerbError::execution_error(format!("AI generation failed: {}", e))
122        })?;
123
124        Ok(GenerateOutput {
125            generated_code: response.content,
126            language: language.clone(),
127            tokens_used: response.usage.map(|u| u.total_tokens as usize),
128            model: response.model,
129            finish_reason: response.finish_reason,
130        })
131    })
132}
133
134/// Interactive AI chat session
135///
136/// # Examples
137///
138/// Single message:
139/// ```bash
140/// ggen ai chat "Explain Rust ownership"
141/// ```
142///
143/// Interactive mode:
144/// ```bash
145/// ggen ai chat --interactive --model claude-3-sonnet-20240229
146/// ```
147///
148/// Stream response:
149/// ```bash
150/// ggen ai chat "Write a web server" --stream --api-key $OPENAI_API_KEY
151/// ```
152#[verb]
153fn chat(
154    message: Option<String>, model: Option<String>, _api_key: Option<String>, interactive: bool,
155    stream: bool, max_tokens: u32, temperature: f32,
156) -> Result<ChatOutput> {
157    use ggen_ai::{GenAiClient, LlmClient, LlmConfig};
158    use std::io::Write;
159
160    crate::runtime::block_on(async move {
161        // Build configuration
162        let config = LlmConfig {
163            model: model.unwrap_or_else(|| "gpt-3.5-turbo".to_string()),
164            max_tokens: Some(max_tokens),
165            temperature: Some(temperature),
166            top_p: Some(0.9),
167            stop: None,
168            extra: HashMap::new(),
169        };
170
171        // Create client
172        let client = GenAiClient::new(config).map_err(|e| {
173            clap_noun_verb::NounVerbError::execution_error(format!(
174                "Failed to create AI client: {}",
175                e
176            ))
177        })?;
178
179        let session_id = uuid::Uuid::new_v4().to_string();
180        let mut messages: Vec<ChatMessage> = Vec::new();
181        let mut total_tokens: Option<usize> = None;
182        let model_name = client.get_config().model.clone();
183
184        if interactive {
185            // Interactive mode with multiple turns
186            ggen_utils::alert_info!("🤖 AI Chat - Interactive Mode");
187            ggen_utils::alert_info!("Model: {}", model_name);
188            ggen_utils::alert_info!("Type 'exit' or 'quit' to end session\n");
189
190            loop {
191                eprint!("> ");
192                std::io::stderr().flush().map_err(|e| {
193                    clap_noun_verb::NounVerbError::execution_error(format!(
194                        "Failed to flush stderr: {}",
195                        e
196                    ))
197                })?;
198
199                let mut input = String::new();
200                std::io::stdin().read_line(&mut input).map_err(|e| {
201                    clap_noun_verb::NounVerbError::execution_error(format!(
202                        "Failed to read input: {}",
203                        e
204                    ))
205                })?;
206
207                let input = input.trim();
208                if input.is_empty() {
209                    continue;
210                }
211
212                if input == "exit" || input == "quit" {
213                    break;
214                }
215
216                messages.push(ChatMessage {
217                    role: "user".to_string(),
218                    content: input.to_string(),
219                });
220
221                if stream {
222                    // Stream response
223                    let mut stream = client.complete_stream(input).await.map_err(|e| {
224                        clap_noun_verb::NounVerbError::execution_error(format!(
225                            "Streaming failed: {}",
226                            e
227                        ))
228                    })?;
229
230                    let mut full_response = String::new();
231                    eprint!("🤖: ");
232                    while let Some(chunk) = stream.next().await {
233                        eprint!("{}", chunk.content);
234                        std::io::stderr().flush().map_err(|e| {
235                            clap_noun_verb::NounVerbError::execution_error(format!(
236                                "Failed to flush stderr: {}",
237                                e
238                            ))
239                        })?;
240                        full_response.push_str(&chunk.content);
241
242                        if let Some(usage) = chunk.usage {
243                            total_tokens = Some(usage.total_tokens as usize);
244                        }
245                    }
246                    ggen_utils::alert_info!("\n");
247
248                    messages.push(ChatMessage {
249                        role: "assistant".to_string(),
250                        content: full_response,
251                    });
252                } else {
253                    // Non-streaming response
254                    let response = client.complete(input).await.map_err(|e| {
255                        clap_noun_verb::NounVerbError::execution_error(format!(
256                            "Chat failed: {}",
257                            e
258                        ))
259                    })?;
260
261                    ggen_utils::alert_info!("🤖: {}\n", response.content);
262
263                    if let Some(usage) = response.usage {
264                        total_tokens = Some(usage.total_tokens as usize);
265                    }
266
267                    messages.push(ChatMessage {
268                        role: "assistant".to_string(),
269                        content: response.content,
270                    });
271                }
272            }
273        } else if let Some(msg) = message {
274            // Single message mode
275            messages.push(ChatMessage {
276                role: "user".to_string(),
277                content: msg.clone(),
278            });
279
280            if stream {
281                // Stream response
282                let mut stream = client.complete_stream(&msg).await.map_err(|e| {
283                    clap_noun_verb::NounVerbError::execution_error(format!(
284                        "Streaming failed: {}",
285                        e
286                    ))
287                })?;
288
289                let mut full_response = String::new();
290                while let Some(chunk) = stream.next().await {
291                    eprint!("{}", chunk.content);
292                    std::io::stderr().flush().map_err(|e| {
293                        clap_noun_verb::NounVerbError::execution_error(format!(
294                            "Failed to flush stderr: {}",
295                            e
296                        ))
297                    })?;
298                    full_response.push_str(&chunk.content);
299
300                    if let Some(usage) = chunk.usage {
301                        total_tokens = Some(usage.total_tokens as usize);
302                    }
303                }
304
305                messages.push(ChatMessage {
306                    role: "assistant".to_string(),
307                    content: full_response,
308                });
309            } else {
310                // Non-streaming response
311                let response = client.complete(&msg).await.map_err(|e| {
312                    clap_noun_verb::NounVerbError::execution_error(format!("Chat failed: {}", e))
313                })?;
314
315                if let Some(usage) = response.usage {
316                    total_tokens = Some(usage.total_tokens as usize);
317                }
318
319                messages.push(ChatMessage {
320                    role: "assistant".to_string(),
321                    content: response.content,
322                });
323            }
324        } else {
325            return Err(clap_noun_verb::NounVerbError::execution_error(
326                "Provide a message or use --interactive for chat session",
327            ));
328        }
329
330        Ok(ChatOutput {
331            messages,
332            session_id,
333            model: model_name,
334            tokens_used: total_tokens,
335        })
336    })
337}
338
339/// Analyze code with AI insights
340///
341/// # Examples
342///
343/// Analyze code string:
344/// ```bash
345/// ggen ai analyze "fn main() { println!(\"hello\"); }"
346/// ```
347///
348/// Analyze from file:
349/// ```bash
350/// ggen ai analyze --file src/main.rs --api-key $OPENAI_API_KEY
351/// ```
352///
353/// Analyze project directory:
354/// ```bash
355/// ggen ai analyze --project ./my-crate --model gpt-4
356/// ```
357#[allow(clippy::too_many_arguments)] // CLI command with many options
358#[verb]
359fn analyze(
360    code: Option<String>, file: Option<PathBuf>, project: Option<PathBuf>, model: Option<String>,
361    api_key: Option<String>, complexity: bool, security: bool, performance: bool, max_tokens: u32,
362) -> Result<AnalyzeOutput> {
363    use ggen_ai::{GenAiClient, LlmClient, LlmConfig};
364
365    crate::runtime::block_on(async move {
366        // Determine what to analyze
367        let (code_content, file_path) = if let Some(code_str) = code {
368            (code_str, None)
369        } else if let Some(file_path) = &file {
370            let content = std::fs::read_to_string(file_path).map_err(|e| {
371                clap_noun_verb::NounVerbError::execution_error(format!(
372                    "Failed to read file: {}",
373                    e
374                ))
375            })?;
376            (content, Some(file_path.display().to_string()))
377        } else if let Some(project_path) = &project {
378            // For project analysis, we'll provide a summary prompt
379            return analyze_project(project_path, model, api_key, max_tokens).await;
380        } else {
381            return Err(clap_noun_verb::NounVerbError::execution_error(
382                "Provide code, --file, or --project to analyze",
383            ));
384        };
385
386        // Build configuration
387        let config = LlmConfig {
388            model: model.unwrap_or_else(|| "gpt-3.5-turbo".to_string()),
389            max_tokens: Some(max_tokens),
390            temperature: Some(0.3), // Lower temperature for analysis
391            top_p: Some(0.9),
392            stop: None,
393            extra: HashMap::new(),
394        };
395
396        // Create client
397        let client = GenAiClient::new(config).map_err(|e| {
398            clap_noun_verb::NounVerbError::execution_error(format!(
399                "Failed to create AI client: {}",
400                e
401            ))
402        })?;
403
404        // Build analysis prompt
405        let mut prompt = format!(
406            "Analyze the following code and provide insights:\n\n```\n{}\n```\n\n",
407            code_content
408        );
409
410        prompt.push_str("Provide:\n");
411        prompt.push_str("1. Key insights about the code structure and design\n");
412        prompt.push_str("2. Suggestions for improvements\n");
413
414        if complexity {
415            prompt.push_str("3. Complexity analysis (cyclomatic, cognitive)\n");
416        }
417        if security {
418            prompt.push_str("4. Security considerations and potential vulnerabilities\n");
419        }
420        if performance {
421            prompt.push_str("5. Performance optimization opportunities\n");
422        }
423
424        prompt.push_str("\nFormat your response with clear sections.");
425
426        // Generate analysis
427        let response = client.complete(&prompt).await.map_err(|e| {
428            clap_noun_verb::NounVerbError::execution_error(format!("Analysis failed: {}", e))
429        })?;
430
431        // Parse response into structured output
432        let (insights, suggestions) = parse_analysis_response(&response.content);
433
434        // Calculate complexity score if requested (simplified placeholder)
435        let complexity_score = if complexity {
436            Some(estimate_complexity(&code_content))
437        } else {
438            None
439        };
440
441        Ok(AnalyzeOutput {
442            file_path,
443            insights,
444            suggestions,
445            complexity_score,
446            model: response.model,
447            tokens_used: response.usage.map(|u| u.total_tokens as usize),
448        })
449    })
450}
451
452// ============================================================================
453// Helper Functions
454// ============================================================================
455
456/// Analyze a project directory
457async fn analyze_project(
458    project_path: &PathBuf, model: Option<String>, _api_key: Option<String>, max_tokens: u32,
459) -> Result<AnalyzeOutput> {
460    use ggen_ai::{GenAiClient, LlmClient, LlmConfig};
461    use walkdir::WalkDir;
462
463    // Collect source files
464    let mut source_files = Vec::new();
465    for entry in WalkDir::new(project_path)
466        .max_depth(5)
467        .into_iter()
468        .filter_map(|e| e.ok())
469    {
470        let path = entry.path();
471        if path.is_file() {
472            if let Some(ext) = path.extension() {
473                if matches!(ext.to_str(), Some("rs") | Some("toml") | Some("md")) {
474                    source_files.push(path.to_path_buf());
475                }
476            }
477        }
478    }
479
480    // Build configuration
481    let config = LlmConfig {
482        model: model.unwrap_or_else(|| "gpt-3.5-turbo".to_string()),
483        max_tokens: Some(max_tokens),
484        temperature: Some(0.3),
485        top_p: Some(0.9),
486        stop: None,
487        extra: HashMap::new(),
488    };
489
490    // Create client
491    let client = GenAiClient::new(config).map_err(|e| {
492        clap_noun_verb::NounVerbError::execution_error(format!("Failed to create AI client: {}", e))
493    })?;
494
495    // Build project summary
496    let file_list: Vec<String> = source_files
497        .iter()
498        .map(|p| p.display().to_string())
499        .collect();
500
501    let prompt = format!(
502        "Analyze this project structure:\n\nProject: {}\n\nFiles:\n{}\n\n\
503        Provide insights about:\n\
504        1. Project architecture and organization\n\
505        2. Code quality and design patterns\n\
506        3. Suggested improvements\n\
507        4. Potential issues or technical debt",
508        project_path.display(),
509        file_list.join("\n")
510    );
511
512    // Generate analysis
513    let response = client.complete(&prompt).await.map_err(|e| {
514        clap_noun_verb::NounVerbError::execution_error(format!("Project analysis failed: {}", e))
515    })?;
516
517    // Parse response
518    let (insights, suggestions) = parse_analysis_response(&response.content);
519
520    Ok(AnalyzeOutput {
521        file_path: Some(project_path.display().to_string()),
522        insights,
523        suggestions,
524        complexity_score: None,
525        model: response.model,
526        tokens_used: response.usage.map(|u| u.total_tokens as usize),
527    })
528}
529
530/// Parse analysis response into structured insights and suggestions
531fn parse_analysis_response(response: &str) -> (Vec<String>, Vec<String>) {
532    let mut insights = Vec::new();
533    let mut suggestions = Vec::new();
534
535    let mut current_section = "";
536    for line in response.lines() {
537        let line = line.trim();
538
539        // Detect section headers
540        if line.to_lowercase().contains("insight") {
541            current_section = "insights";
542            continue;
543        } else if line.to_lowercase().contains("suggestion")
544            || line.to_lowercase().contains("improvement")
545        {
546            current_section = "suggestions";
547            continue;
548        }
549
550        // Add to appropriate section
551        if !line.is_empty() && line.starts_with(|c: char| c.is_numeric() || c == '-' || c == '*') {
552            let cleaned = line
553                .trim_start_matches(|c: char| c.is_numeric() || c == '.' || c == '-' || c == '*')
554                .trim()
555                .to_string();
556
557            match current_section {
558                "insights" => insights.push(cleaned),
559                "suggestions" => suggestions.push(cleaned),
560                _ => {
561                    // Default to insights if unclear
562                    insights.push(cleaned);
563                }
564            }
565        }
566    }
567
568    // If parsing failed, put entire response in insights
569    if insights.is_empty() && suggestions.is_empty() {
570        insights.push(response.to_string());
571    }
572
573    (insights, suggestions)
574}
575
576/// Estimate code complexity (simplified metric)
577fn estimate_complexity(code: &str) -> f64 {
578    let mut complexity = 1.0;
579
580    // Count control flow keywords
581    let control_flow = ["if", "else", "match", "for", "while", "loop"];
582    for keyword in &control_flow {
583        complexity += code.matches(keyword).count() as f64;
584    }
585
586    // Count nested blocks (simplified)
587    let nesting_level = code.matches('{').count().max(1) as f64;
588    complexity *= nesting_level.log10().max(1.0);
589
590    // Count function definitions
591    complexity += code.matches("fn ").count() as f64 * 0.5;
592
593    // Normalize to 0-100 scale
594    (complexity.min(100.0) * 10.0).round() / 10.0
595}
596
597// ============================================================================
598// Usage Notes
599// ============================================================================
600
601// To use this in your CLI:
602// 1. Replace the existing ai.rs with this file (or integrate gradually)
603// 2. Update main.rs to use: clap_noun_verb::run()
604// 3. Ensure ggen-ai is properly configured with API keys
605// 4. Test with: cargo run -- ai --help
606// 5. JSON output: cargo run -- ai generate "hello world" --format json