ggen_cli_lib/cmds/
ai.rs

1//! AI commands - clap-noun-verb v3.4.0 migration
2//!
3//! This module implements AI operations using the #[verb] macro pattern for
4//! automatic discovery and JSON output support.
5
6use clap_noun_verb::Result;
7use clap_noun_verb_macros::verb;
8use futures::StreamExt;
9use serde::Serialize;
10use std::collections::HashMap;
11use std::path::PathBuf;
12
13// ============================================================================
14// Output Types (all must derive Serialize for JSON output)
15// ============================================================================
16
17#[derive(Serialize)]
18pub struct GenerateOutput {
19    generated_code: String,
20    language: Option<String>,
21    tokens_used: Option<usize>,
22    model: String,
23    finish_reason: Option<String>,
24}
25
26#[derive(Serialize)]
27pub struct ChatMessage {
28    role: String,
29    content: String,
30}
31
32#[derive(Serialize)]
33pub struct ChatOutput {
34    messages: Vec<ChatMessage>,
35    session_id: String,
36    model: String,
37    tokens_used: Option<usize>,
38}
39
40#[derive(Serialize)]
41pub struct AnalyzeOutput {
42    file_path: Option<String>,
43    insights: Vec<String>,
44    suggestions: Vec<String>,
45    complexity_score: Option<f64>,
46    model: String,
47    tokens_used: Option<usize>,
48}
49
50// ============================================================================
51// Verb Functions (the actual CLI commands)
52// ============================================================================
53
54/// Generate code with AI assistance
55///
56/// # Examples
57///
58/// Basic generation:
59/// ```bash
60/// ggen ai generate "Create a Rust function that calculates fibonacci numbers"
61/// ```
62///
63/// With existing code context:
64/// ```bash
65/// ggen ai generate "Add error handling" --code "fn main() { println!(\"hello\"); }"
66/// ```
67///
68/// With specific model:
69/// ```bash
70/// ggen ai generate "Generate REST API" --model gpt-4 --api-key $OPENAI_API_KEY
71/// ```
72#[verb]
73fn generate(
74    prompt: String,
75    code: Option<String>,
76    model: Option<String>,
77    api_key: Option<String>,
78    suggestions: bool,
79    language: Option<String>,
80    max_tokens: u32,
81    temperature: f32,
82) -> Result<GenerateOutput> {
83    use ggen_ai::{GenAiClient, LlmClient, LlmConfig};
84
85    crate::runtime::block_on(async move {
86        // Build configuration
87        let mut config = LlmConfig {
88            model: model.unwrap_or_else(|| "gpt-3.5-turbo".to_string()),
89            max_tokens: Some(max_tokens),
90            temperature: Some(temperature),
91            top_p: Some(0.9),
92            stop: None,
93            extra: HashMap::new(),
94        };
95
96        // Validate configuration
97        config.validate().map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Invalid configuration: {}", e)))?;
98
99        // Create client
100        let client = GenAiClient::new(config)
101            .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Failed to create AI client: {}", e)))?;
102
103        // Build prompt
104        let mut full_prompt = prompt.clone();
105
106        if let Some(lang) = &language {
107            full_prompt.push_str(&format!("\nTarget language: {}", lang));
108        }
109
110        if let Some(code) = &code {
111            full_prompt.push_str(&format!("\n\nExisting code:\n```\n{}\n```", code));
112        }
113
114        if suggestions {
115            full_prompt.push_str("\n\nInclude suggestions for improvements and best practices.");
116        }
117
118        // Generate response
119        let response = client
120            .complete(&full_prompt)
121            .await
122            .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("AI generation failed: {}", e)))?;
123
124        Ok(GenerateOutput {
125            generated_code: response.content,
126            language: language.clone(),
127            tokens_used: response.usage.map(|u| u.total_tokens as usize),
128            model: response.model,
129            finish_reason: response.finish_reason,
130        })
131    })
132}
133
134/// Interactive AI chat session
135///
136/// # Examples
137///
138/// Single message:
139/// ```bash
140/// ggen ai chat "Explain Rust ownership"
141/// ```
142///
143/// Interactive mode:
144/// ```bash
145/// ggen ai chat --interactive --model claude-3-sonnet-20240229
146/// ```
147///
148/// Stream response:
149/// ```bash
150/// ggen ai chat "Write a web server" --stream --api-key $OPENAI_API_KEY
151/// ```
152#[verb]
153fn chat(
154    message: Option<String>,
155    model: Option<String>,
156    api_key: Option<String>,
157    interactive: bool,
158    stream: bool,
159    max_tokens: u32,
160    temperature: f32,
161) -> Result<ChatOutput> {
162    use ggen_ai::{GenAiClient, LlmClient, LlmConfig};
163    use std::io::Write;
164
165    crate::runtime::block_on(async move {
166        // Build configuration
167        let config = LlmConfig {
168        model: model.unwrap_or_else(|| "gpt-3.5-turbo".to_string()),
169        max_tokens: Some(max_tokens),
170        temperature: Some(temperature),
171        top_p: Some(0.9),
172        stop: None,
173        extra: HashMap::new(),
174    };
175
176    // Create client
177    let client = GenAiClient::new(config)
178        .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Failed to create AI client: {}", e)))?;
179
180    let session_id = uuid::Uuid::new_v4().to_string();
181    let mut messages: Vec<ChatMessage> = Vec::new();
182    let mut total_tokens: Option<usize> = None;
183    let model_name = client.get_config().model.clone();
184
185    if interactive {
186        // Interactive mode with multiple turns
187        eprintln!("🤖 AI Chat - Interactive Mode");
188        eprintln!("Model: {}", model_name);
189        eprintln!("Type 'exit' or 'quit' to end session\n");
190
191        loop {
192            eprint!("> ");
193            std::io::stderr().flush().unwrap();
194
195            let mut input = String::new();
196            std::io::stdin()
197                .read_line(&mut input)
198                .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Failed to read input: {}", e)))?;
199
200            let input = input.trim();
201            if input.is_empty() {
202                continue;
203            }
204
205            if input == "exit" || input == "quit" {
206                break;
207            }
208
209            messages.push(ChatMessage {
210                role: "user".to_string(),
211                content: input.to_string(),
212            });
213
214            if stream {
215                // Stream response
216                let mut stream = client
217                    .complete_stream(input)
218                    .await
219                    .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Streaming failed: {}", e)))?;
220
221                let mut full_response = String::new();
222                eprint!("🤖: ");
223                while let Some(chunk) = stream.next().await {
224                    eprint!("{}", chunk.content);
225                    std::io::stderr().flush().unwrap();
226                    full_response.push_str(&chunk.content);
227
228                    if let Some(usage) = chunk.usage {
229                        total_tokens = Some(usage.total_tokens as usize);
230                    }
231                }
232                eprintln!("\n");
233
234                messages.push(ChatMessage {
235                    role: "assistant".to_string(),
236                    content: full_response,
237                });
238            } else {
239                // Non-streaming response
240                let response = client
241                    .complete(input)
242                    .await
243                    .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Chat failed: {}", e)))?;
244
245                eprintln!("🤖: {}\n", response.content);
246
247                if let Some(usage) = response.usage {
248                    total_tokens = Some(usage.total_tokens as usize);
249                }
250
251                messages.push(ChatMessage {
252                    role: "assistant".to_string(),
253                    content: response.content,
254                });
255            }
256        }
257    } else if let Some(msg) = message {
258        // Single message mode
259        messages.push(ChatMessage {
260            role: "user".to_string(),
261            content: msg.clone(),
262        });
263
264        if stream {
265            // Stream response
266            let mut stream = client
267                .complete_stream(&msg)
268                .await
269                .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Streaming failed: {}", e)))?;
270
271            let mut full_response = String::new();
272            while let Some(chunk) = stream.next().await {
273                eprint!("{}", chunk.content);
274                std::io::stderr().flush().unwrap();
275                full_response.push_str(&chunk.content);
276
277                if let Some(usage) = chunk.usage {
278                    total_tokens = Some(usage.total_tokens as usize);
279                }
280            }
281            eprintln!();
282
283            messages.push(ChatMessage {
284                role: "assistant".to_string(),
285                content: full_response,
286            });
287        } else {
288            // Non-streaming response
289            let response = client
290                .complete(&msg)
291                .await
292                .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Chat failed: {}", e)))?;
293
294            if let Some(usage) = response.usage {
295                total_tokens = Some(usage.total_tokens as usize);
296            }
297
298            messages.push(ChatMessage {
299                role: "assistant".to_string(),
300                content: response.content,
301            });
302        }
303    } else {
304        return Err(clap_noun_verb::NounVerbError::execution_error(
305            "Provide a message or use --interactive for chat session"
306        ));
307    }
308
309        Ok(ChatOutput {
310            messages,
311            session_id,
312            model: model_name,
313            tokens_used: total_tokens,
314        })
315    })
316}
317
318/// Analyze code with AI insights
319///
320/// # Examples
321///
322/// Analyze code string:
323/// ```bash
324/// ggen ai analyze "fn main() { println!(\"hello\"); }"
325/// ```
326///
327/// Analyze from file:
328/// ```bash
329/// ggen ai analyze --file src/main.rs --api-key $OPENAI_API_KEY
330/// ```
331///
332/// Analyze project directory:
333/// ```bash
334/// ggen ai analyze --project ./my-crate --model gpt-4
335/// ```
336#[verb]
337fn analyze(
338    code: Option<String>,
339    file: Option<PathBuf>,
340    project: Option<PathBuf>,
341    model: Option<String>,
342    api_key: Option<String>,
343    complexity: bool,
344    security: bool,
345    performance: bool,
346    max_tokens: u32,
347) -> Result<AnalyzeOutput> {
348    use ggen_ai::{GenAiClient, LlmClient, LlmConfig};
349
350    crate::runtime::block_on(async move {
351        // Determine what to analyze
352        let (code_content, file_path) = if let Some(code_str) = code {
353        (code_str, None)
354    } else if let Some(file_path) = &file {
355        let content = std::fs::read_to_string(file_path)
356            .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Failed to read file: {}", e)))?;
357        (content, Some(file_path.display().to_string()))
358    } else if let Some(project_path) = &project {
359        // For project analysis, we'll provide a summary prompt
360        return analyze_project(project_path, model, api_key, max_tokens).await;
361    } else {
362        return Err(clap_noun_verb::NounVerbError::execution_error("Provide code, --file, or --project to analyze"));
363    };
364
365    // Build configuration
366    let config = LlmConfig {
367        model: model.unwrap_or_else(|| "gpt-3.5-turbo".to_string()),
368        max_tokens: Some(max_tokens),
369        temperature: Some(0.3), // Lower temperature for analysis
370        top_p: Some(0.9),
371        stop: None,
372        extra: HashMap::new(),
373    };
374
375    // Create client
376    let client = GenAiClient::new(config)
377        .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Failed to create AI client: {}", e)))?;
378
379    // Build analysis prompt
380    let mut prompt = format!(
381        "Analyze the following code and provide insights:\n\n```\n{}\n```\n\n",
382        code_content
383    );
384
385    prompt.push_str("Provide:\n");
386    prompt.push_str("1. Key insights about the code structure and design\n");
387    prompt.push_str("2. Suggestions for improvements\n");
388
389    if complexity {
390        prompt.push_str("3. Complexity analysis (cyclomatic, cognitive)\n");
391    }
392    if security {
393        prompt.push_str("4. Security considerations and potential vulnerabilities\n");
394    }
395    if performance {
396        prompt.push_str("5. Performance optimization opportunities\n");
397    }
398
399    prompt.push_str("\nFormat your response with clear sections.");
400
401    // Generate analysis
402    let response = client
403        .complete(&prompt)
404        .await
405        .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Analysis failed: {}", e)))?;
406
407    // Parse response into structured output
408    let (insights, suggestions) = parse_analysis_response(&response.content);
409
410    // Calculate complexity score if requested (simplified placeholder)
411    let complexity_score = if complexity {
412        Some(estimate_complexity(&code_content))
413    } else {
414        None
415    };
416
417        Ok(AnalyzeOutput {
418            file_path,
419            insights,
420            suggestions,
421            complexity_score,
422            model: response.model,
423            tokens_used: response.usage.map(|u| u.total_tokens as usize),
424        })
425    })
426}
427
428// ============================================================================
429// Helper Functions
430// ============================================================================
431
432/// Analyze a project directory
433async fn analyze_project(
434    project_path: &PathBuf,
435    model: Option<String>,
436    api_key: Option<String>,
437    max_tokens: u32,
438) -> Result<AnalyzeOutput> {
439    use ggen_ai::{GenAiClient, LlmClient, LlmConfig};
440    use walkdir::WalkDir;
441
442    // Collect source files
443    let mut source_files = Vec::new();
444    for entry in WalkDir::new(project_path)
445        .max_depth(5)
446        .into_iter()
447        .filter_map(|e| e.ok())
448    {
449        let path = entry.path();
450        if path.is_file() {
451            if let Some(ext) = path.extension() {
452                if matches!(ext.to_str(), Some("rs") | Some("toml") | Some("md")) {
453                    source_files.push(path.to_path_buf());
454                }
455            }
456        }
457    }
458
459    // Build configuration
460    let config = LlmConfig {
461        model: model.unwrap_or_else(|| "gpt-3.5-turbo".to_string()),
462        max_tokens: Some(max_tokens),
463        temperature: Some(0.3),
464        top_p: Some(0.9),
465        stop: None,
466        extra: HashMap::new(),
467    };
468
469    // Create client
470    let client = GenAiClient::new(config)
471        .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Failed to create AI client: {}", e)))?;
472
473    // Build project summary
474    let file_list: Vec<String> = source_files
475        .iter()
476        .map(|p| p.display().to_string())
477        .collect();
478
479    let prompt = format!(
480        "Analyze this project structure:\n\nProject: {}\n\nFiles:\n{}\n\n\
481        Provide insights about:\n\
482        1. Project architecture and organization\n\
483        2. Code quality and design patterns\n\
484        3. Suggested improvements\n\
485        4. Potential issues or technical debt",
486        project_path.display(),
487        file_list.join("\n")
488    );
489
490    // Generate analysis
491    let response = client
492        .complete(&prompt)
493        .await
494        .map_err(|e| clap_noun_verb::NounVerbError::execution_error(format!("Project analysis failed: {}", e)))?;
495
496    // Parse response
497    let (insights, suggestions) = parse_analysis_response(&response.content);
498
499    Ok(AnalyzeOutput {
500        file_path: Some(project_path.display().to_string()),
501        insights,
502        suggestions,
503        complexity_score: None,
504        model: response.model,
505        tokens_used: response.usage.map(|u| u.total_tokens as usize),
506    })
507}
508
509/// Parse analysis response into structured insights and suggestions
510fn parse_analysis_response(response: &str) -> (Vec<String>, Vec<String>) {
511    let mut insights = Vec::new();
512    let mut suggestions = Vec::new();
513
514    let mut current_section = "";
515    for line in response.lines() {
516        let line = line.trim();
517
518        // Detect section headers
519        if line.to_lowercase().contains("insight") {
520            current_section = "insights";
521            continue;
522        } else if line.to_lowercase().contains("suggestion")
523            || line.to_lowercase().contains("improvement")
524        {
525            current_section = "suggestions";
526            continue;
527        }
528
529        // Add to appropriate section
530        if !line.is_empty() && line.starts_with(|c: char| c.is_numeric() || c == '-' || c == '*') {
531            let cleaned = line
532                .trim_start_matches(|c: char| c.is_numeric() || c == '.' || c == '-' || c == '*')
533                .trim()
534                .to_string();
535
536            match current_section {
537                "insights" => insights.push(cleaned),
538                "suggestions" => suggestions.push(cleaned),
539                _ => {
540                    // Default to insights if unclear
541                    insights.push(cleaned);
542                }
543            }
544        }
545    }
546
547    // If parsing failed, put entire response in insights
548    if insights.is_empty() && suggestions.is_empty() {
549        insights.push(response.to_string());
550    }
551
552    (insights, suggestions)
553}
554
555/// Estimate code complexity (simplified metric)
556fn estimate_complexity(code: &str) -> f64 {
557    let mut complexity = 1.0;
558
559    // Count control flow keywords
560    let control_flow = ["if", "else", "match", "for", "while", "loop"];
561    for keyword in &control_flow {
562        complexity += code.matches(keyword).count() as f64;
563    }
564
565    // Count nested blocks (simplified)
566    let nesting_level = code.matches('{').count().max(1) as f64;
567    complexity *= nesting_level.log10().max(1.0);
568
569    // Count function definitions
570    complexity += code.matches("fn ").count() as f64 * 0.5;
571
572    // Normalize to 0-100 scale
573    (complexity.min(100.0) * 10.0).round() / 10.0
574}
575
576// ============================================================================
577// Usage Notes
578// ============================================================================
579
580// To use this in your CLI:
581// 1. Replace the existing ai.rs with this file (or integrate gradually)
582// 2. Update main.rs to use: clap_noun_verb::run()
583// 3. Ensure ggen-ai is properly configured with API keys
584// 4. Test with: cargo run -- ai --help
585// 5. JSON output: cargo run -- ai generate "hello world" --format json