ggen_cli_lib/cmds/
ai.rs

1//! AI commands - clap-noun-verb v3.4.0 migration
2//!
3//! This module implements AI operations using the #[verb] macro pattern for
4//! automatic discovery and JSON output support.
5
6use clap_noun_verb::Result;
7use clap_noun_verb_macros::verb;
8use futures::StreamExt;
9use ggen_ai::config::get_global_config;
10use serde::Serialize;
11use std::path::PathBuf;
12
13// ============================================================================
14// Output Types (all must derive Serialize for JSON output)
15// ============================================================================
16
17#[derive(Serialize)]
18pub struct GenerateOutput {
19    generated_code: String,
20    language: Option<String>,
21    tokens_used: Option<usize>,
22    model: String,
23    finish_reason: Option<String>,
24}
25
26#[derive(Serialize)]
27pub struct ChatMessage {
28    role: String,
29    content: String,
30}
31
32#[derive(Serialize)]
33pub struct ChatOutput {
34    messages: Vec<ChatMessage>,
35    session_id: String,
36    model: String,
37    tokens_used: Option<usize>,
38}
39
40#[derive(Serialize)]
41pub struct AnalyzeOutput {
42    file_path: Option<String>,
43    insights: Vec<String>,
44    suggestions: Vec<String>,
45    complexity_score: Option<f64>,
46    model: String,
47    tokens_used: Option<usize>,
48}
49
50// ============================================================================
51// Verb Functions (the actual CLI commands)
52// ============================================================================
53
54/// Generate code with AI assistance
55///
56/// # Examples
57///
58/// Basic generation:
59/// ```bash
60/// ggen ai generate "Create a Rust function that calculates fibonacci numbers"
61/// ```
62///
63/// With existing code context:
64/// ```bash
65/// ggen ai generate "Add error handling" --code "fn main() { println!(\"hello\"); }"
66/// ```
67///
68/// With specific model:
69/// ```bash
70/// ggen ai generate "Generate REST API" --model gpt-4 --api-key $OPENAI_API_KEY
71/// ```
72#[allow(clippy::too_many_arguments)] // CLI command with many options
73#[verb]
74fn generate(
75    prompt: String, code: Option<String>, model: Option<String>, _api_key: Option<String>,
76    suggestions: bool, language: Option<String>, max_tokens: i64, temperature: f64,
77) -> Result<GenerateOutput> {
78    crate::runtime::block_on(async move {
79        let mut global_config = get_global_config().clone();
80
81        // Override model if provided
82        if let Some(model_name) = &model {
83            global_config.settings.default_model = Some(model_name.clone());
84            if let Some(provider_config) = global_config.providers.get_mut(&global_config.provider)
85            {
86                provider_config.model = model_name.clone();
87            }
88        }
89
90        // Override max_tokens and temperature
91        global_config.settings.default_max_tokens = Some(max_tokens as u32);
92        global_config.settings.default_temperature = Some(temperature as f32);
93        if let Some(provider_config) = global_config.providers.get_mut(&global_config.provider) {
94            provider_config.max_tokens = Some(max_tokens as u32);
95            provider_config.temperature = Some(temperature as f32);
96        }
97
98        // Create client using global config
99        let client = global_config.create_contextual_client().map_err(|e| {
100            clap_noun_verb::NounVerbError::execution_error(format!(
101                "Failed to create AI client: {}",
102                e
103            ))
104        })?;
105
106        // Build prompt
107        let mut full_prompt = prompt.clone();
108
109        if let Some(lang) = &language {
110            full_prompt.push_str(&format!("\nTarget language: {}", lang));
111        }
112
113        if let Some(code) = &code {
114            full_prompt.push_str(&format!("\n\nExisting code:\n```\n{}\n```", code));
115        }
116
117        if suggestions {
118            full_prompt.push_str("\n\nInclude suggestions for improvements and best practices.");
119        }
120
121        // Generate response
122        let response = client.complete(&full_prompt).await.map_err(|e| {
123            clap_noun_verb::NounVerbError::execution_error(format!("AI generation failed: {}", e))
124        })?;
125
126        Ok(GenerateOutput {
127            generated_code: response.content,
128            language: language.clone(),
129            tokens_used: response.usage.map(|u| u.total_tokens as usize),
130            model: client.get_config().model.clone(),
131            finish_reason: response.finish_reason,
132        })
133    })
134}
135
136/// Interactive AI chat session
137///
138/// # Examples
139///
140/// Single message:
141/// ```bash
142/// ggen ai chat "Explain Rust ownership"
143/// ```
144///
145/// Interactive mode:
146/// ```bash
147/// ggen ai chat --interactive --model claude-3-sonnet-20240229
148/// ```
149///
150/// Stream response:
151/// ```bash
152/// ggen ai chat "Write a web server" --stream --api-key $OPENAI_API_KEY
153/// ```
154#[verb]
155fn chat(
156    message: Option<String>, model: Option<String>, _api_key: Option<String>, interactive: bool,
157    stream: bool, max_tokens: i64, temperature: f64,
158) -> Result<ChatOutput> {
159    use std::io::Write;
160
161    crate::runtime::block_on(async move {
162        let mut global_config = get_global_config().clone();
163
164        // Override model if provided
165        if let Some(model_name) = &model {
166            global_config.settings.default_model = Some(model_name.clone());
167            if let Some(provider_config) = global_config.providers.get_mut(&global_config.provider)
168            {
169                provider_config.model = model_name.clone();
170            }
171        }
172
173        // Override max_tokens and temperature
174        global_config.settings.default_max_tokens = Some(max_tokens as u32);
175        global_config.settings.default_temperature = Some(temperature as f32);
176        if let Some(provider_config) = global_config.providers.get_mut(&global_config.provider) {
177            provider_config.max_tokens = Some(max_tokens as u32);
178            provider_config.temperature = Some(temperature as f32);
179        }
180
181        // Create client using global config
182        let client = global_config.create_contextual_client().map_err(|e| {
183            clap_noun_verb::NounVerbError::execution_error(format!(
184                "Failed to create AI client: {}",
185                e
186            ))
187        })?;
188
189        let session_id = uuid::Uuid::new_v4().to_string();
190        let mut messages: Vec<ChatMessage> = Vec::new();
191        let mut total_tokens: Option<usize> = None;
192        let model_name = client.get_config().model.clone();
193
194        if interactive {
195            // Interactive mode with multiple turns
196            ggen_utils::alert_info!("🤖 AI Chat - Interactive Mode");
197            ggen_utils::alert_info!("Model: {}", model_name);
198            ggen_utils::alert_info!("Type 'exit' or 'quit' to end session\n");
199
200            loop {
201                eprint!("> ");
202                std::io::stderr().flush().map_err(|e| {
203                    clap_noun_verb::NounVerbError::execution_error(format!(
204                        "Failed to flush stderr: {}",
205                        e
206                    ))
207                })?;
208
209                let mut input = String::new();
210                std::io::stdin().read_line(&mut input).map_err(|e| {
211                    clap_noun_verb::NounVerbError::execution_error(format!(
212                        "Failed to read input: {}",
213                        e
214                    ))
215                })?;
216
217                let input = input.trim();
218                if input.is_empty() {
219                    continue;
220                }
221
222                if input == "exit" || input == "quit" {
223                    break;
224                }
225
226                messages.push(ChatMessage {
227                    role: "user".to_string(),
228                    content: input.to_string(),
229                });
230
231                if stream {
232                    // Stream response
233                    let mut stream = client.complete_stream(input).await.map_err(|e| {
234                        clap_noun_verb::NounVerbError::execution_error(format!(
235                            "Streaming failed: {}",
236                            e
237                        ))
238                    })?;
239
240                    let mut full_response = String::new();
241                    eprint!("🤖: ");
242                    while let Some(chunk) = stream.next().await {
243                        eprint!("{}", chunk.content);
244                        std::io::stderr().flush().map_err(|e| {
245                            clap_noun_verb::NounVerbError::execution_error(format!(
246                                "Failed to flush stderr: {}",
247                                e
248                            ))
249                        })?;
250                        full_response.push_str(&chunk.content);
251
252                        if let Some(usage) = chunk.usage {
253                            total_tokens = Some(usage.total_tokens as usize);
254                        }
255                    }
256                    ggen_utils::alert_info!("\n");
257
258                    messages.push(ChatMessage {
259                        role: "assistant".to_string(),
260                        content: full_response,
261                    });
262                } else {
263                    // Non-streaming response
264                    let response = client.complete(input).await.map_err(|e| {
265                        clap_noun_verb::NounVerbError::execution_error(format!(
266                            "Chat failed: {}",
267                            e
268                        ))
269                    })?;
270
271                    ggen_utils::alert_info!("🤖: {}\n", response.content);
272
273                    if let Some(usage) = response.usage {
274                        total_tokens = Some(usage.total_tokens as usize);
275                    }
276
277                    messages.push(ChatMessage {
278                        role: "assistant".to_string(),
279                        content: response.content,
280                    });
281                }
282            }
283        } else if let Some(msg) = message {
284            // Single message mode
285            messages.push(ChatMessage {
286                role: "user".to_string(),
287                content: msg.clone(),
288            });
289
290            if stream {
291                // Stream response
292                let mut stream = client.complete_stream(&msg).await.map_err(|e| {
293                    clap_noun_verb::NounVerbError::execution_error(format!(
294                        "Streaming failed: {}",
295                        e
296                    ))
297                })?;
298
299                let mut full_response = String::new();
300                while let Some(chunk) = stream.next().await {
301                    eprint!("{}", chunk.content);
302                    std::io::stderr().flush().map_err(|e| {
303                        clap_noun_verb::NounVerbError::execution_error(format!(
304                            "Failed to flush stderr: {}",
305                            e
306                        ))
307                    })?;
308                    full_response.push_str(&chunk.content);
309
310                    if let Some(usage) = chunk.usage {
311                        total_tokens = Some(usage.total_tokens as usize);
312                    }
313                }
314
315                messages.push(ChatMessage {
316                    role: "assistant".to_string(),
317                    content: full_response,
318                });
319            } else {
320                // Non-streaming response
321                let response = client.complete(&msg).await.map_err(|e| {
322                    clap_noun_verb::NounVerbError::execution_error(format!("Chat failed: {}", e))
323                })?;
324
325                if let Some(usage) = response.usage {
326                    total_tokens = Some(usage.total_tokens as usize);
327                }
328
329                messages.push(ChatMessage {
330                    role: "assistant".to_string(),
331                    content: response.content,
332                });
333            }
334        } else {
335            return Err(clap_noun_verb::NounVerbError::execution_error(
336                "Provide a message or use --interactive for chat session",
337            ));
338        }
339
340        Ok(ChatOutput {
341            messages,
342            session_id,
343            model: model_name,
344            tokens_used: total_tokens,
345        })
346    })
347}
348
349/// Analyze code with AI insights
350///
351/// # Examples
352///
353/// Analyze code string:
354/// ```bash
355/// ggen ai analyze "fn main() { println!(\"hello\"); }"
356/// ```
357///
358/// Analyze from file:
359/// ```bash
360/// ggen ai analyze --file src/main.rs --api-key $OPENAI_API_KEY
361/// ```
362///
363/// Analyze project directory:
364/// ```bash
365/// ggen ai analyze --project ./my-crate --model gpt-4
366/// ```
367#[allow(clippy::too_many_arguments)] // CLI command with many options
368#[verb]
369fn analyze(
370    code: Option<String>, file: Option<PathBuf>, project: Option<PathBuf>, model: Option<String>,
371    api_key: Option<String>, complexity: bool, security: bool, performance: bool, max_tokens: i64,
372) -> Result<AnalyzeOutput> {
373    crate::runtime::block_on(async move {
374        // Determine what to analyze
375        let (code_content, file_path) = if let Some(code_str) = code {
376            (code_str, None)
377        } else if let Some(file_path) = &file {
378            let content = std::fs::read_to_string(file_path).map_err(|e| {
379                clap_noun_verb::NounVerbError::execution_error(format!(
380                    "Failed to read file: {}",
381                    e
382                ))
383            })?;
384            (content, Some(file_path.display().to_string()))
385        } else if let Some(project_path) = &project {
386            // For project analysis, we'll provide a summary prompt
387            return analyze_project(project_path, model, api_key, max_tokens as u32).await;
388        } else {
389            return Err(clap_noun_verb::NounVerbError::execution_error(
390                "Provide code, --file, or --project to analyze",
391            ));
392        };
393
394        let mut global_config = get_global_config().clone();
395
396        // Override model if provided
397        if let Some(model_name) = &model {
398            global_config.settings.default_model = Some(model_name.clone());
399            if let Some(provider_config) = global_config.providers.get_mut(&global_config.provider)
400            {
401                provider_config.model = model_name.clone();
402            }
403        }
404
405        // Override max_tokens and temperature (lower for analysis)
406        global_config.settings.default_max_tokens = Some(max_tokens as u32);
407        global_config.settings.default_temperature = Some(0.3);
408        if let Some(provider_config) = global_config.providers.get_mut(&global_config.provider) {
409            provider_config.max_tokens = Some(max_tokens as u32);
410            provider_config.temperature = Some(0.3);
411        }
412
413        // Create client using global config
414        let client = global_config.create_contextual_client().map_err(|e| {
415            clap_noun_verb::NounVerbError::execution_error(format!(
416                "Failed to create AI client: {}",
417                e
418            ))
419        })?;
420
421        // Build analysis prompt
422        let mut prompt = format!(
423            "Analyze the following code and provide insights:\n\n```\n{}\n```\n\n",
424            code_content
425        );
426
427        prompt.push_str("Provide:\n");
428        prompt.push_str("1. Key insights about the code structure and design\n");
429        prompt.push_str("2. Suggestions for improvements\n");
430
431        if complexity {
432            prompt.push_str("3. Complexity analysis (cyclomatic, cognitive)\n");
433        }
434        if security {
435            prompt.push_str("4. Security considerations and potential vulnerabilities\n");
436        }
437        if performance {
438            prompt.push_str("5. Performance optimization opportunities\n");
439        }
440
441        prompt.push_str("\nFormat your response with clear sections.");
442
443        // Generate analysis
444        let response = client.complete(&prompt).await.map_err(|e| {
445            clap_noun_verb::NounVerbError::execution_error(format!("Analysis failed: {}", e))
446        })?;
447
448        // Parse response into structured output
449        let (insights, suggestions) = parse_analysis_response(&response.content);
450
451        // Calculate complexity score if requested (simplified placeholder)
452        let complexity_score = if complexity {
453            Some(estimate_complexity(&code_content))
454        } else {
455            None
456        };
457
458        Ok(AnalyzeOutput {
459            file_path,
460            insights,
461            suggestions,
462            complexity_score,
463            model: client.get_config().model.clone(),
464            tokens_used: response.usage.map(|u| u.total_tokens as usize),
465        })
466    })
467}
468
469// ============================================================================
470// Helper Functions
471// ============================================================================
472
473/// Analyze a project directory
474async fn analyze_project(
475    project_path: &PathBuf, model: Option<String>, _api_key: Option<String>, max_tokens: u32,
476) -> Result<AnalyzeOutput> {
477    use walkdir::WalkDir;
478
479    // Collect source files
480    let mut source_files = Vec::new();
481    for entry in WalkDir::new(project_path)
482        .max_depth(5)
483        .into_iter()
484        .filter_map(|e| e.ok())
485    {
486        let path = entry.path();
487        if path.is_file() {
488            if let Some(ext) = path.extension() {
489                if matches!(ext.to_str(), Some("rs") | Some("toml") | Some("md")) {
490                    source_files.push(path.to_path_buf());
491                }
492            }
493        }
494    }
495
496    // Use global config system
497    let mut global_config = get_global_config().clone();
498
499    // Override model if provided
500    if let Some(model_name) = &model {
501        global_config.settings.default_model = Some(model_name.clone());
502        if let Some(provider_config) = global_config.providers.get_mut(&global_config.provider) {
503            provider_config.model = model_name.clone();
504        }
505    }
506
507    // Override max_tokens and temperature
508    global_config.settings.default_max_tokens = Some(max_tokens);
509    global_config.settings.default_temperature = Some(0.3);
510    if let Some(provider_config) = global_config.providers.get_mut(&global_config.provider) {
511        provider_config.max_tokens = Some(max_tokens);
512        provider_config.temperature = Some(0.3);
513    }
514
515    // Create client using global config
516    let client = global_config.create_contextual_client().map_err(|e| {
517        clap_noun_verb::NounVerbError::execution_error(format!("Failed to create AI client: {}", e))
518    })?;
519
520    // Build project summary
521    let file_list: Vec<String> = source_files
522        .iter()
523        .map(|p| p.display().to_string())
524        .collect();
525
526    let prompt = format!(
527        "Analyze this project structure:\n\nProject: {}\n\nFiles:\n{}\n\n\
528        Provide insights about:\n\
529        1. Project architecture and organization\n\
530        2. Code quality and design patterns\n\
531        3. Suggested improvements\n\
532        4. Potential issues or technical debt",
533        project_path.display(),
534        file_list.join("\n")
535    );
536
537    // Generate analysis
538    let response = client.complete(&prompt).await.map_err(|e| {
539        clap_noun_verb::NounVerbError::execution_error(format!("Project analysis failed: {}", e))
540    })?;
541
542    // Parse response
543    let (insights, suggestions) = parse_analysis_response(&response.content);
544
545    Ok(AnalyzeOutput {
546        file_path: Some(project_path.display().to_string()),
547        insights,
548        suggestions,
549        complexity_score: None,
550        model: client.get_config().model.clone(),
551        tokens_used: response.usage.map(|u| u.total_tokens as usize),
552    })
553}
554
555/// Parse analysis response into structured insights and suggestions
556fn parse_analysis_response(response: &str) -> (Vec<String>, Vec<String>) {
557    let mut insights = Vec::new();
558    let mut suggestions = Vec::new();
559
560    let mut current_section = "";
561    for line in response.lines() {
562        let line = line.trim();
563
564        // Detect section headers
565        if line.to_lowercase().contains("insight") {
566            current_section = "insights";
567            continue;
568        } else if line.to_lowercase().contains("suggestion")
569            || line.to_lowercase().contains("improvement")
570        {
571            current_section = "suggestions";
572            continue;
573        }
574
575        // Add to appropriate section
576        if !line.is_empty() && line.starts_with(|c: char| c.is_numeric() || c == '-' || c == '*') {
577            let cleaned = line
578                .trim_start_matches(|c: char| c.is_numeric() || c == '.' || c == '-' || c == '*')
579                .trim()
580                .to_string();
581
582            match current_section {
583                "insights" => insights.push(cleaned),
584                "suggestions" => suggestions.push(cleaned),
585                _ => {
586                    // Default to insights if unclear
587                    insights.push(cleaned);
588                }
589            }
590        }
591    }
592
593    // If parsing failed, put entire response in insights
594    if insights.is_empty() && suggestions.is_empty() {
595        insights.push(response.to_string());
596    }
597
598    (insights, suggestions)
599}
600
601/// Estimate code complexity (simplified metric)
602fn estimate_complexity(code: &str) -> f64 {
603    let mut complexity = 1.0;
604
605    // Count control flow keywords
606    let control_flow = ["if", "else", "match", "for", "while", "loop"];
607    for keyword in &control_flow {
608        complexity += code.matches(keyword).count() as f64;
609    }
610
611    // Count nested blocks (simplified)
612    let nesting_level = code.matches('{').count().max(1) as f64;
613    complexity *= nesting_level.log10().max(1.0);
614
615    // Count function definitions
616    complexity += code.matches("fn ").count() as f64 * 0.5;
617
618    // Normalize to 0-100 scale
619    (complexity.min(100.0) * 10.0).round() / 10.0
620}
621
622// ============================================================================
623// Usage Notes
624// ============================================================================
625
626// To use this in your CLI:
627// 1. Replace the existing ai.rs with this file (or integrate gradually)
628// 2. Update main.rs to use: clap_noun_verb::run()
629// 3. Ensure ggen-ai is properly configured with API keys
630// 4. Test with: cargo run -- ai --help
631// 5. JSON output: cargo run -- ai generate "hello world" --format json