Skip to main content

do_memory_mcp/patterns/
compatibility.rs

1//! # Tool Compatibility Assessment Module
2//!
3//! This module assesses the risk of pattern recommendations and validates tool compatibility scoring.
4
5use anyhow::Result;
6use serde::{Deserialize, Serialize};
7use std::collections::{HashMap, HashSet};
8
9mod analysis_helpers;
10
11/// Tool compatibility assessment result
12#[derive(Debug, Clone, Serialize, Deserialize)]
13pub struct CompatibilityAssessment {
14    /// Assessment ID
15    pub id: String,
16    /// Pattern being assessed
17    pub pattern_id: String,
18    /// Tool being assessed
19    pub tool_name: String,
20    /// Compatibility score (0-1)
21    pub compatibility_score: f64,
22    /// Confidence in assessment (0-1)
23    pub confidence: f64,
24    /// Risk factors identified
25    pub risk_factors: Vec<RiskFactor>,
26    /// Recommendations
27    pub recommendations: Vec<String>,
28    /// Overall risk level
29    pub risk_level: RiskLevel,
30    /// Confidence interval (lower, upper)
31    pub confidence_interval: (f64, f64),
32}
33
34/// Risk factor identified during assessment
35#[derive(Debug, Clone, Serialize, Deserialize)]
36pub struct RiskFactor {
37    /// Factor type
38    pub factor_type: RiskFactorType,
39    /// Severity (0-1)
40    pub severity: f64,
41    /// Description
42    pub description: String,
43    /// Mitigation suggestions
44    pub mitigation: Option<String>,
45}
46
47/// Risk factor types
48#[derive(Debug, Clone, Serialize, Deserialize)]
49pub enum RiskFactorType {
50    /// Data quality risk (missing, noisy, inconsistent)
51    DataQuality,
52    /// Model performance risk (accuracy, precision)
53    ModelPerformance,
54    /// Domain mismatch risk
55    DomainMismatch,
56    /// Temporal drift risk (pattern changes over time)
57    TemporalDrift,
58    /// Resource constraint risk (computation, memory)
59    ResourceConstraint,
60    /// Compatibility risk (tool version, dependencies)
61    Compatibility,
62}
63
64/// Risk level classification
65#[derive(Debug, Clone, Serialize, Deserialize)]
66pub enum RiskLevel {
67    /// Low risk: safe to proceed
68    Low,
69    /// Medium risk: proceed with caution
70    Medium,
71    /// High risk: requires review
72    High,
73    /// Critical risk: do not proceed
74    Critical,
75}
76
77/// Compatibility assessment configuration
78#[derive(Debug, Clone)]
79pub struct AssessmentConfig {
80    /// Threshold for low risk (>= this score is low risk)
81    pub low_risk_threshold: f64,
82    /// Threshold for medium risk (>= this score is medium risk)
83    pub medium_risk_threshold: f64,
84    /// Confidence level for intervals (default: 0.95)
85    pub confidence_level: f64,
86    /// Minimum pattern occurrences for reliability
87    pub min_occurrences: usize,
88}
89
90impl Default for AssessmentConfig {
91    fn default() -> Self {
92        Self {
93            low_risk_threshold: 0.8,
94            medium_risk_threshold: 0.6,
95            confidence_level: 0.95,
96            min_occurrences: 3,
97        }
98    }
99}
100
101/// Tool compatibility assessor
102pub struct CompatibilityAssessor {
103    config: AssessmentConfig,
104    /// Tool capabilities registry
105    tool_capabilities: HashMap<String, ToolCapabilities>,
106}
107
108/// Tool capabilities definition
109#[derive(Debug, Clone)]
110struct ToolCapabilities {
111    /// Supported data types
112    _supported_types: HashSet<String>,
113    /// Minimum data quality requirements
114    min_data_quality: f64,
115    /// Maximum resource usage (MB)
116    max_memory_mb: usize,
117    /// Supported domains
118    supported_domains: HashSet<String>,
119    /// Performance metrics
120    _avg_latency_ms: f64,
121    success_rate: f64,
122}
123
124impl CompatibilityAssessor {
125    /// Create a new compatibility assessor
126    pub fn new(config: AssessmentConfig) -> Self {
127        let mut assessor = Self {
128            config,
129            tool_capabilities: HashMap::new(),
130        };
131
132        // Initialize with known tools
133        assessor.initialize_tool_registry();
134        assessor
135    }
136
137    /// Create with default configuration
138    pub fn default_config() -> Self {
139        Self::new(AssessmentConfig::default())
140    }
141
142    /// Initialize tool registry with known capabilities
143    fn initialize_tool_registry(&mut self) {
144        // query_memory tool
145        self.tool_capabilities.insert(
146            "query_memory".to_string(),
147            ToolCapabilities {
148                _supported_types: vec!["episodic", "semantic", "temporal"]
149                    .into_iter()
150                    .map(String::from)
151                    .collect(),
152                min_data_quality: 0.5,
153                max_memory_mb: 100,
154                supported_domains: vec!["web-api", "cli", "data-processing"]
155                    .into_iter()
156                    .map(String::from)
157                    .collect(),
158                _avg_latency_ms: 10.0,
159                success_rate: 0.98,
160            },
161        );
162
163        // analyze_patterns tool
164        self.tool_capabilities.insert(
165            "analyze_patterns".to_string(),
166            ToolCapabilities {
167                _supported_types: vec!["statistical", "predictive", "causal"]
168                    .into_iter()
169                    .map(String::from)
170                    .collect(),
171                min_data_quality: 0.7,
172                max_memory_mb: 200,
173                supported_domains: vec!["data-processing", "analytics"]
174                    .into_iter()
175                    .map(String::from)
176                    .collect(),
177                _avg_latency_ms: 50.0,
178                success_rate: 0.92,
179            },
180        );
181
182        // advanced_pattern_analysis tool
183        self.tool_capabilities.insert(
184            "advanced_pattern_analysis".to_string(),
185            ToolCapabilities {
186                _supported_types: vec!["time_series", "multivariate", "temporal"]
187                    .into_iter()
188                    .map(String::from)
189                    .collect(),
190                min_data_quality: 0.8,
191                max_memory_mb: 500,
192                supported_domains: vec!["analytics", "forecasting", "anomaly_detection"]
193                    .into_iter()
194                    .map(String::from)
195                    .collect(),
196                _avg_latency_ms: 100.0,
197                success_rate: 0.88,
198            },
199        );
200    }
201
202    /// Assess tool compatibility for a pattern
203    pub fn assess_compatibility(
204        &self,
205        pattern_id: &str,
206        tool_name: &str,
207        pattern_context: &PatternContext,
208    ) -> Result<CompatibilityAssessment> {
209        // Get tool capabilities
210        let tool_caps = self
211            .tool_capabilities
212            .get(tool_name)
213            .ok_or_else(|| anyhow::anyhow!("Unknown tool: {}", tool_name))?;
214
215        // Compute compatibility score
216        let compatibility_score = self.compute_compatibility_score(tool_caps, pattern_context);
217
218        // Compute confidence
219        let confidence = self.compute_confidence(tool_caps, pattern_context);
220
221        // Identify risk factors
222        let risk_factors = self.identify_risk_factors(tool_caps, pattern_context);
223
224        // Determine risk level
225        let risk_level = self.determine_risk_level(compatibility_score, &risk_factors);
226
227        // Generate recommendations
228        let recommendations = self.generate_recommendations(&risk_factors, tool_name);
229
230        // Compute confidence interval
231        let confidence_interval = self.compute_confidence_interval(
232            compatibility_score,
233            confidence,
234            pattern_context.occurrences,
235        );
236
237        Ok(CompatibilityAssessment {
238            id: format!("{}_{}", pattern_id, tool_name),
239            pattern_id: pattern_id.to_string(),
240            tool_name: tool_name.to_string(),
241            compatibility_score,
242            confidence,
243            risk_factors,
244            recommendations,
245            risk_level,
246            confidence_interval,
247        })
248    }
249
250    /// Compute compatibility score
251    fn compute_compatibility_score(
252        &self,
253        tool_caps: &ToolCapabilities,
254        context: &PatternContext,
255    ) -> f64 {
256        let mut score = 0.0;
257        let mut total_weight = 0.0;
258
259        // Data quality compatibility (weight: 0.3)
260        let quality_score = if context.data_quality >= tool_caps.min_data_quality {
261            1.0
262        } else {
263            context.data_quality / tool_caps.min_data_quality
264        };
265        score += 0.3 * quality_score;
266        total_weight += 0.3;
267
268        // Domain compatibility (weight: 0.25)
269        let domain_score = if tool_caps.supported_domains.contains(&context.domain) {
270            1.0
271        } else {
272            0.5 // Partial credit if domain not directly supported
273        };
274        score += 0.25 * domain_score;
275        total_weight += 0.25;
276
277        // Occurrence reliability (weight: 0.2)
278        let occurrence_score = if context.occurrences >= self.config.min_occurrences {
279            1.0
280        } else {
281            context.occurrences as f64 / self.config.min_occurrences as f64
282        };
283        score += 0.2 * occurrence_score;
284        total_weight += 0.2;
285
286        // Temporal stability (weight: 0.15)
287        let stability_score = context.temporal_stability;
288        score += 0.15 * stability_score;
289        total_weight += 0.15;
290
291        // Resource availability (weight: 0.1)
292        let resource_score = if context.available_memory_mb >= tool_caps.max_memory_mb {
293            1.0
294        } else {
295            context.available_memory_mb as f64 / tool_caps.max_memory_mb as f64
296        };
297        score += 0.1 * resource_score;
298        total_weight += 0.1;
299
300        // Normalize score
301        if total_weight > 0.0 {
302            score / total_weight
303        } else {
304            0.5 // Default middle score
305        }
306    }
307
308    /// Compute confidence in assessment
309    fn compute_confidence(&self, tool_caps: &ToolCapabilities, context: &PatternContext) -> f64 {
310        let mut confidence = 0.5; // Base confidence
311
312        // Increase confidence based on tool success rate
313        confidence += 0.2 * tool_caps.success_rate;
314
315        // Increase confidence based on pattern occurrences
316        let occurrence_confidence = if context.occurrences >= 10 {
317            1.0
318        } else {
319            context.occurrences as f64 / 10.0
320        };
321        confidence += 0.2 * occurrence_confidence;
322
323        // Increase confidence based on data quality
324        confidence += 0.1 * context.data_quality;
325
326        confidence.clamp(0.0, 1.0)
327    }
328
329    /// Batch assess multiple tools
330    pub fn batch_assess(
331        &self,
332        pattern_id: &str,
333        tool_names: &[String],
334        context: &PatternContext,
335    ) -> Result<Vec<CompatibilityAssessment>> {
336        let mut assessments = Vec::new();
337
338        for tool_name in tool_names {
339            let assessment = self.assess_compatibility(pattern_id, tool_name, context)?;
340            assessments.push(assessment);
341        }
342
343        Ok(assessments)
344    }
345
346    /// Get best tool for a pattern
347    pub fn get_best_tool(
348        &self,
349        pattern_id: &str,
350        tool_names: &[String],
351        context: &PatternContext,
352    ) -> Result<Option<(String, CompatibilityAssessment)>> {
353        let assessments = self.batch_assess(pattern_id, tool_names, context)?;
354
355        let best = assessments
356            .into_iter()
357            .filter(|a| matches!(a.risk_level, RiskLevel::Low | RiskLevel::Medium))
358            .max_by(|a, b| {
359                a.compatibility_score
360                    .partial_cmp(&b.compatibility_score)
361                    .unwrap_or(std::cmp::Ordering::Equal)
362            });
363
364        Ok(best.map(|assessment| (assessment.tool_name.clone(), assessment)))
365    }
366}
367
368/// Pattern context for compatibility assessment
369#[derive(Debug, Clone)]
370pub struct PatternContext {
371    /// Domain of the pattern
372    pub domain: String,
373    /// Data quality score (0-1)
374    pub data_quality: f64,
375    /// Number of times pattern occurs
376    pub occurrences: usize,
377    /// Temporal stability (0-1, higher = more stable)
378    pub temporal_stability: f64,
379    /// Available memory in MB
380    pub available_memory_mb: usize,
381    /// Pattern complexity (0-1)
382    pub complexity: f64,
383}
384
385#[cfg(test)]
386mod tests;