Skip to main content

kardo_core/pro/
recommendations.rs

1//! AI Recommendations Engine — generates recommendations locally or via cloud.
2//!
3//! Strategy:
4//! 1. If cloud client available and connected → use Claude (better quality)
5//! 2. If cloud unavailable → use local Ollama (basic recommendations)
6//! 3. If Ollama unavailable → return rule-based fallback recommendations
7
8use crate::anonymize::Anonymizer;
9use crate::llm::ollama::OllamaClient;
10use crate::llm::GenerateRequest;
11use crate::scoring::{ProjectScore, QualityIssue};
12
13use super::cloud::{
14    AnalyzeRequest, CloudClient, FixSuggestion, Recommendation,
15};
16
17/// Safely truncate a string to at most `max_bytes` bytes without splitting a
18/// multi-byte UTF-8 character.
19fn safe_truncate(s: &str, max_bytes: usize) -> &str {
20    if s.len() <= max_bytes {
21        return s;
22    }
23    let mut end = max_bytes;
24    while end > 0 && !s.is_char_boundary(end) {
25        end -= 1;
26    }
27    &s[..end]
28}
29
30/// Generates AI-powered recommendations for project quality improvement.
31pub struct RecommendationEngine {
32    cloud_client: Option<CloudClient>,
33    anonymizer: Anonymizer,
34}
35
36impl RecommendationEngine {
37    /// Create a new engine, optionally with a cloud client for Claude-based analysis.
38    pub fn new(cloud_client: Option<CloudClient>) -> Self {
39        Self {
40            cloud_client,
41            anonymizer: Anonymizer::new(),
42        }
43    }
44
45    /// Generate recommendations for quality issues.
46    ///
47    /// Tries cloud first, falls back to local Ollama, then rule-based.
48    pub async fn generate_recommendations(
49        &self,
50        score: &ProjectScore,
51        issues: &[QualityIssue],
52    ) -> Vec<Recommendation> {
53        // Try cloud first
54        if let Some(client) = &self.cloud_client {
55            let prompt = Self::build_recommendation_prompt(score, issues);
56            let anonymized = self.anonymizer.anonymize(&prompt);
57
58            let request = AnalyzeRequest {
59                content: anonymized.text,
60                document_type: "project_analysis".to_string(),
61                context: Some(format!(
62                    "Score: {:.0}/100, Traffic: {:?}",
63                    score.total * 100.0,
64                    score.traffic_light
65                )),
66                task: "recommend".to_string(),
67            };
68
69            if let Ok(response) = client.analyze(request).await {
70                return response.recommendations;
71            }
72        }
73
74        // Try local Ollama
75        if let Some(recommendations) = self.try_ollama_recommendations(score, issues).await {
76            return recommendations;
77        }
78
79        // Fallback: rule-based recommendations
80        Self::rule_based_recommendations(score, issues)
81    }
82
83    /// Generate a fix suggestion for a specific issue.
84    ///
85    /// Tries cloud first, falls back to local Ollama.
86    pub async fn suggest_fix(
87        &self,
88        issue: &QualityIssue,
89        file_content: &str,
90    ) -> Option<FixSuggestion> {
91        // Anonymize the file content
92        let anonymized = self.anonymizer.anonymize(file_content);
93
94        // Try cloud
95        if let Some(client) = &self.cloud_client {
96            if let Ok(suggestions) = client
97                .get_fix_suggestions(vec![issue.clone()], &anonymized.text)
98                .await
99            {
100                if let Some(mut suggestion) = suggestions.into_iter().next() {
101                    // De-anonymize the suggestion
102                    suggestion.suggestion = Anonymizer::deanonymize(
103                        &suggestion.suggestion,
104                        &anonymized.replacements,
105                    );
106                    if let Some(snippet) = &suggestion.code_snippet {
107                        suggestion.code_snippet = Some(Anonymizer::deanonymize(
108                            snippet,
109                            &anonymized.replacements,
110                        ));
111                    }
112                    return Some(suggestion);
113                }
114            }
115        }
116
117        // Try local Ollama
118        self.try_ollama_fix(issue, &anonymized.text, &anonymized.replacements)
119            .await
120    }
121
122    /// Build prompt for generating recommendations.
123    fn build_recommendation_prompt(score: &ProjectScore, issues: &[QualityIssue]) -> String {
124        let mut prompt = String::new();
125        prompt.push_str(
126            "You are a code documentation quality expert. \
127             Analyze the following project health data and provide 3-5 actionable recommendations \
128             to improve the project's documentation and AI-readiness.\n\n",
129        );
130
131        prompt.push_str(&format!(
132            "## Project Score: {:.0}/100 (Traffic Light: {:?})\n\n",
133            score.total * 100.0,
134            score.traffic_light
135        ));
136
137        prompt.push_str("### Component Scores:\n");
138        prompt.push_str(&format!(
139            "- Freshness: {:.0}/100\n",
140            score.components.freshness
141        ));
142        prompt.push_str(&format!(
143            "- Configuration: {:.0}/100\n",
144            score.components.configuration
145        ));
146        prompt.push_str(&format!(
147            "- Integrity: {:.0}/100\n",
148            score.components.integrity
149        ));
150        prompt.push_str(&format!(
151            "- Agent Setup: {:.0}/100\n",
152            score.components.agent_setup
153        ));
154        prompt.push_str(&format!(
155            "- Structure: {:.0}/100\n\n",
156            score.components.structure
157        ));
158
159        if !issues.is_empty() {
160            prompt.push_str("### Quality Issues Found:\n");
161            for (i, issue) in issues.iter().enumerate().take(10) {
162                prompt.push_str(&format!(
163                    "{}. [{:?}] {:?}: {} — {}\n",
164                    i + 1,
165                    issue.severity,
166                    issue.category,
167                    issue.title,
168                    issue.attribution
169                ));
170                if let Some(suggestion) = &issue.suggestion {
171                    prompt.push_str(&format!("   Suggestion: {}\n", suggestion));
172                }
173            }
174            prompt.push('\n');
175        }
176
177        prompt.push_str(
178            "Respond in JSON format with an array of recommendations:\n\
179             ```json\n\
180             [{\"title\": \"...\", \"description\": \"...\", \"priority\": \"high|medium|low\", \"category\": \"...\"}]\n\
181             ```\n\
182             Focus on the most impactful changes first. Be specific and actionable.",
183        );
184
185        prompt
186    }
187
188    /// Build prompt for generating a fix suggestion.
189    fn build_fix_prompt(issue: &QualityIssue, content: &str) -> String {
190        let mut prompt = String::new();
191        prompt.push_str(
192            "You are a documentation quality expert. \
193             Given this quality issue in a documentation file, suggest a specific fix.\n\n",
194        );
195
196        prompt.push_str("## Issue\n");
197        prompt.push_str(&format!("- Title: {}\n", issue.title));
198        prompt.push_str(&format!("- Category: {:?}\n", issue.category));
199        prompt.push_str(&format!("- Severity: {:?}\n", issue.severity));
200        prompt.push_str(&format!("- Details: {}\n", issue.attribution));
201        if let Some(suggestion) = &issue.suggestion {
202            prompt.push_str(&format!("- Existing suggestion: {}\n", suggestion));
203        }
204        prompt.push('\n');
205
206        // Include a truncated version of the file content
207        let truncated = safe_truncate(content, 2000);
208        prompt.push_str(&format!(
209            "## File Content (may be truncated)\n```\n{}\n```\n\n",
210            truncated
211        ));
212
213        prompt.push_str(
214            "Respond in JSON format:\n\
215             ```json\n\
216             {\"suggestion\": \"...\", \"confidence\": 0.0-1.0, \"code_snippet\": \"...or null\"}\n\
217             ```\n\
218             Provide a clear, actionable suggestion. Include a code_snippet if applicable.",
219        );
220
221        prompt
222    }
223
224    /// Try to generate recommendations using local Ollama.
225    async fn try_ollama_recommendations(
226        &self,
227        score: &ProjectScore,
228        issues: &[QualityIssue],
229    ) -> Option<Vec<Recommendation>> {
230        let client = OllamaClient::new();
231        let status = client.check_status().await;
232        if !status.available {
233            return None;
234        }
235
236        let prompt = Self::build_recommendation_prompt(score, issues);
237        let anonymized = self.anonymizer.anonymize(&prompt);
238
239        let request = GenerateRequest {
240            prompt: anonymized.text,
241            max_tokens: 1024,
242            temperature: 0.3,
243        };
244
245        match client.generate(&request).await {
246            Ok(response) => parse_recommendations_json(&response.text),
247            Err(_) => None,
248        }
249    }
250
251    /// Try to generate a fix suggestion using local Ollama.
252    async fn try_ollama_fix(
253        &self,
254        issue: &QualityIssue,
255        anonymized_content: &str,
256        replacements: &[crate::anonymize::Replacement],
257    ) -> Option<FixSuggestion> {
258        let client = OllamaClient::new();
259        let status = client.check_status().await;
260        if !status.available {
261            return None;
262        }
263
264        let prompt = Self::build_fix_prompt(issue, anonymized_content);
265
266        let request = GenerateRequest {
267            prompt,
268            max_tokens: 512,
269            temperature: 0.3,
270        };
271
272        match client.generate(&request).await {
273            Ok(response) => {
274                if let Some(mut fix) = parse_fix_json(&response.text, &issue.id) {
275                    // De-anonymize
276                    fix.suggestion =
277                        Anonymizer::deanonymize(&fix.suggestion, replacements);
278                    if let Some(snippet) = &fix.code_snippet {
279                        fix.code_snippet =
280                            Some(Anonymizer::deanonymize(snippet, replacements));
281                    }
282                    Some(fix)
283                } else {
284                    None
285                }
286            }
287            Err(_) => None,
288        }
289    }
290
291    /// Rule-based fallback recommendations when no AI is available.
292    fn rule_based_recommendations(
293        score: &ProjectScore,
294        issues: &[QualityIssue],
295    ) -> Vec<Recommendation> {
296        let mut recommendations = Vec::new();
297
298        // Check freshness
299        if score.components.freshness < 60.0 {
300            recommendations.push(Recommendation {
301                title: "Update stale documentation".to_string(),
302                description: "Several documentation files haven't been updated recently. \
303                    Review and update files that are out of sync with your codebase."
304                    .to_string(),
305                priority: "high".to_string(),
306                category: "freshness".to_string(),
307            });
308        }
309
310        // Check configuration
311        if score.components.configuration < 60.0 {
312            recommendations.push(Recommendation {
313                title: "Improve project configuration files".to_string(),
314                description: "Your README or CLAUDE.md may be missing important sections. \
315                    Consider adding installation instructions, usage examples, and contribution guidelines."
316                    .to_string(),
317                priority: "high".to_string(),
318                category: "configuration".to_string(),
319            });
320        }
321
322        // Check integrity
323        if score.components.integrity < 80.0 {
324            recommendations.push(Recommendation {
325                title: "Fix broken internal links".to_string(),
326                description: "Some documentation files contain broken links to other files. \
327                    Verify all internal references point to existing files."
328                    .to_string(),
329                priority: "medium".to_string(),
330                category: "integrity".to_string(),
331            });
332        }
333
334        // Check agent setup
335        if score.components.agent_setup < 50.0 {
336            recommendations.push(Recommendation {
337                title: "Set up AI agent configuration".to_string(),
338                description: "Add a CLAUDE.md or .claude/instructions file to guide AI coding assistants. \
339                    This improves AI-readiness and helps agents understand your project."
340                    .to_string(),
341                priority: "medium".to_string(),
342                category: "agent_setup".to_string(),
343            });
344        }
345
346        // Check structure
347        if score.components.structure < 60.0 {
348            recommendations.push(Recommendation {
349                title: "Organize documentation structure".to_string(),
350                description: "Consider creating a docs/ directory and organizing documentation \
351                    by topic (guides, API reference, architecture)."
352                    .to_string(),
353                priority: "low".to_string(),
354                category: "structure".to_string(),
355            });
356        }
357
358        // Add issue-specific recommendations for high severity
359        let high_issues: Vec<_> = issues
360            .iter()
361            .filter(|i| matches!(i.severity, crate::scoring::IssueSeverity::Blocking | crate::scoring::IssueSeverity::High))
362            .take(2)
363            .collect();
364
365        for issue in high_issues {
366            if let Some(suggestion) = &issue.suggestion {
367                recommendations.push(Recommendation {
368                    title: issue.title.clone(),
369                    description: suggestion.clone(),
370                    priority: "high".to_string(),
371                    category: format!("{:?}", issue.category).to_lowercase(),
372                });
373            }
374        }
375
376        // Limit to 5
377        recommendations.truncate(5);
378        recommendations
379    }
380}
381
382/// Try to parse recommendation JSON from an LLM response.
383fn parse_recommendations_json(text: &str) -> Option<Vec<Recommendation>> {
384    // Try to find JSON array in the response
385    let trimmed = text.trim();
386
387    // Try direct parse
388    if let Ok(recs) = serde_json::from_str::<Vec<Recommendation>>(trimmed) {
389        return Some(recs);
390    }
391
392    // Try to extract from markdown code block
393    if let Some(start) = trimmed.find('[') {
394        if let Some(end) = trimmed.rfind(']') {
395            let json_str = &trimmed[start..=end];
396            if let Ok(recs) = serde_json::from_str::<Vec<Recommendation>>(json_str) {
397                return Some(recs);
398            }
399        }
400    }
401
402    None
403}
404
405/// Try to parse fix suggestion JSON from an LLM response.
406fn parse_fix_json(text: &str, issue_id: &str) -> Option<FixSuggestion> {
407    let trimmed = text.trim();
408
409    #[derive(serde::Deserialize)]
410    struct PartialFix {
411        suggestion: String,
412        confidence: f64,
413        code_snippet: Option<String>,
414    }
415
416    // Try direct parse
417    if let Ok(fix) = serde_json::from_str::<PartialFix>(trimmed) {
418        return Some(FixSuggestion {
419            issue_id: issue_id.to_string(),
420            suggestion: fix.suggestion,
421            confidence: fix.confidence,
422            code_snippet: fix.code_snippet,
423        });
424    }
425
426    // Try to extract from markdown code block
427    if let Some(start) = trimmed.find('{') {
428        if let Some(end) = trimmed.rfind('}') {
429            let json_str = &trimmed[start..=end];
430            if let Ok(fix) = serde_json::from_str::<PartialFix>(json_str) {
431                return Some(FixSuggestion {
432                    issue_id: issue_id.to_string(),
433                    suggestion: fix.suggestion,
434                    confidence: fix.confidence,
435                    code_snippet: fix.code_snippet,
436                });
437            }
438        }
439    }
440
441    None
442}
443
444#[cfg(test)]
445mod tests {
446    use super::*;
447    use crate::scoring::{
448        IssueCategory, IssueSeverity, ProjectScore, QualityIssue, TrafficLight,
449    };
450    use crate::scoring::types::ComponentScores;
451
452    fn make_test_score(total: f64) -> ProjectScore {
453        ProjectScore {
454            total,
455            components: ComponentScores {
456                freshness: total * 100.0,
457                configuration: total * 100.0,
458                integrity: total * 100.0,
459                agent_setup: total * 100.0,
460                structure: total * 100.0,
461            },
462            traffic_light: TrafficLight::from_score(total),
463            issues: vec![],
464        }
465    }
466
467    fn make_test_issues() -> Vec<QualityIssue> {
468        vec![
469            QualityIssue::new(
470                "stale-001".to_string(),
471                Some("README.md".to_string()),
472                IssueCategory::Freshness,
473                IssueSeverity::High,
474                "README.md is stale".to_string(),
475                "Not updated in 90 days".to_string(),
476                Some("Update README.md with current project status".to_string()),
477            ),
478            QualityIssue::new(
479                "integrity-001".to_string(),
480                Some("docs/guide.md".to_string()),
481                IssueCategory::Integrity,
482                IssueSeverity::Medium,
483                "Broken link in guide.md".to_string(),
484                "Link to setup.md is broken".to_string(),
485                Some("Fix or remove the broken link".to_string()),
486            ),
487        ]
488    }
489
490    #[test]
491    fn test_build_recommendation_prompt() {
492        let score = make_test_score(0.65);
493        let issues = make_test_issues();
494        let prompt = RecommendationEngine::build_recommendation_prompt(&score, &issues);
495
496        assert!(prompt.contains("documentation quality expert"));
497        assert!(prompt.contains("65/100"));
498        assert!(prompt.contains("README.md is stale"));
499        assert!(prompt.contains("JSON format"));
500    }
501
502    #[test]
503    fn test_build_fix_prompt() {
504        let issue = make_test_issues().remove(0);
505        let content = "# README\n\nOld content here.";
506        let prompt = RecommendationEngine::build_fix_prompt(&issue, content);
507
508        assert!(prompt.contains("README.md is stale"));
509        assert!(prompt.contains("Old content here"));
510        assert!(prompt.contains("JSON format"));
511    }
512
513    #[test]
514    fn test_rule_based_recommendations_low_score() {
515        let mut score = make_test_score(0.4);
516        score.components.freshness = 40.0;
517        score.components.configuration = 30.0;
518        score.components.agent_setup = 20.0;
519        let issues = make_test_issues();
520
521        let recs = RecommendationEngine::rule_based_recommendations(&score, &issues);
522        assert!(!recs.is_empty());
523        assert!(recs.len() <= 5);
524        // Should have freshness and configuration recommendations
525        assert!(recs.iter().any(|r| r.category == "freshness"));
526        assert!(recs.iter().any(|r| r.category == "configuration"));
527    }
528
529    #[test]
530    fn test_rule_based_recommendations_high_score() {
531        let score = make_test_score(0.95);
532        let recs = RecommendationEngine::rule_based_recommendations(&score, &[]);
533        // High score = few or no recommendations
534        assert!(recs.is_empty());
535    }
536
537    #[test]
538    fn test_parse_recommendations_json_direct() {
539        let json = r#"[{"title":"Test","description":"Desc","priority":"high","category":"structure"}]"#;
540        let recs = parse_recommendations_json(json);
541        assert!(recs.is_some());
542        assert_eq!(recs.unwrap().len(), 1);
543    }
544
545    #[test]
546    fn test_parse_recommendations_json_in_code_block() {
547        let text = "Here are my recommendations:\n```json\n[{\"title\":\"Test\",\"description\":\"Desc\",\"priority\":\"low\",\"category\":\"docs\"}]\n```";
548        let recs = parse_recommendations_json(text);
549        assert!(recs.is_some());
550    }
551
552    #[test]
553    fn test_parse_fix_json() {
554        let json = r##"{"suggestion":"Add a header","confidence":0.88,"code_snippet":"# Header"}"##;
555        let fix = parse_fix_json(json, "test-001");
556        assert!(fix.is_some());
557        let fix = fix.unwrap();
558        assert_eq!(fix.issue_id, "test-001");
559        assert!((fix.confidence - 0.88).abs() < f64::EPSILON);
560    }
561
562    #[test]
563    fn test_parse_fix_json_invalid() {
564        let fix = parse_fix_json("not json at all", "test-001");
565        assert!(fix.is_none());
566    }
567
568    #[tokio::test]
569    async fn test_generate_recommendations_no_cloud_no_ollama() {
570        // No cloud, no Ollama running → should return rule-based
571        let engine = RecommendationEngine::new(None);
572        let mut score = make_test_score(0.4);
573        score.components.freshness = 30.0;
574        let issues = make_test_issues();
575
576        let recs = engine.generate_recommendations(&score, &issues).await;
577        // Should get fallback rule-based recommendations
578        assert!(!recs.is_empty());
579    }
580}