1use anyhow::Result;
6use serde::{Deserialize, Serialize};
7use std::collections::{HashMap, HashSet};
8
9mod analysis_helpers;
10
11#[derive(Debug, Clone, Serialize, Deserialize)]
13pub struct CompatibilityAssessment {
14 pub id: String,
16 pub pattern_id: String,
18 pub tool_name: String,
20 pub compatibility_score: f64,
22 pub confidence: f64,
24 pub risk_factors: Vec<RiskFactor>,
26 pub recommendations: Vec<String>,
28 pub risk_level: RiskLevel,
30 pub confidence_interval: (f64, f64),
32}
33
34#[derive(Debug, Clone, Serialize, Deserialize)]
36pub struct RiskFactor {
37 pub factor_type: RiskFactorType,
39 pub severity: f64,
41 pub description: String,
43 pub mitigation: Option<String>,
45}
46
47#[derive(Debug, Clone, Serialize, Deserialize)]
49pub enum RiskFactorType {
50 DataQuality,
52 ModelPerformance,
54 DomainMismatch,
56 TemporalDrift,
58 ResourceConstraint,
60 Compatibility,
62}
63
64#[derive(Debug, Clone, Serialize, Deserialize)]
66pub enum RiskLevel {
67 Low,
69 Medium,
71 High,
73 Critical,
75}
76
77#[derive(Debug, Clone)]
79pub struct AssessmentConfig {
80 pub low_risk_threshold: f64,
82 pub medium_risk_threshold: f64,
84 pub confidence_level: f64,
86 pub min_occurrences: usize,
88}
89
90impl Default for AssessmentConfig {
91 fn default() -> Self {
92 Self {
93 low_risk_threshold: 0.8,
94 medium_risk_threshold: 0.6,
95 confidence_level: 0.95,
96 min_occurrences: 3,
97 }
98 }
99}
100
101pub struct CompatibilityAssessor {
103 config: AssessmentConfig,
104 tool_capabilities: HashMap<String, ToolCapabilities>,
106}
107
108#[derive(Debug, Clone)]
110struct ToolCapabilities {
111 _supported_types: HashSet<String>,
113 min_data_quality: f64,
115 max_memory_mb: usize,
117 supported_domains: HashSet<String>,
119 _avg_latency_ms: f64,
121 success_rate: f64,
122}
123
124impl CompatibilityAssessor {
125 pub fn new(config: AssessmentConfig) -> Self {
127 let mut assessor = Self {
128 config,
129 tool_capabilities: HashMap::new(),
130 };
131
132 assessor.initialize_tool_registry();
134 assessor
135 }
136
137 pub fn default_config() -> Self {
139 Self::new(AssessmentConfig::default())
140 }
141
142 fn initialize_tool_registry(&mut self) {
144 self.tool_capabilities.insert(
146 "query_memory".to_string(),
147 ToolCapabilities {
148 _supported_types: vec!["episodic", "semantic", "temporal"]
149 .into_iter()
150 .map(String::from)
151 .collect(),
152 min_data_quality: 0.5,
153 max_memory_mb: 100,
154 supported_domains: vec!["web-api", "cli", "data-processing"]
155 .into_iter()
156 .map(String::from)
157 .collect(),
158 _avg_latency_ms: 10.0,
159 success_rate: 0.98,
160 },
161 );
162
163 self.tool_capabilities.insert(
165 "analyze_patterns".to_string(),
166 ToolCapabilities {
167 _supported_types: vec!["statistical", "predictive", "causal"]
168 .into_iter()
169 .map(String::from)
170 .collect(),
171 min_data_quality: 0.7,
172 max_memory_mb: 200,
173 supported_domains: vec!["data-processing", "analytics"]
174 .into_iter()
175 .map(String::from)
176 .collect(),
177 _avg_latency_ms: 50.0,
178 success_rate: 0.92,
179 },
180 );
181
182 self.tool_capabilities.insert(
184 "advanced_pattern_analysis".to_string(),
185 ToolCapabilities {
186 _supported_types: vec!["time_series", "multivariate", "temporal"]
187 .into_iter()
188 .map(String::from)
189 .collect(),
190 min_data_quality: 0.8,
191 max_memory_mb: 500,
192 supported_domains: vec!["analytics", "forecasting", "anomaly_detection"]
193 .into_iter()
194 .map(String::from)
195 .collect(),
196 _avg_latency_ms: 100.0,
197 success_rate: 0.88,
198 },
199 );
200 }
201
202 pub fn assess_compatibility(
204 &self,
205 pattern_id: &str,
206 tool_name: &str,
207 pattern_context: &PatternContext,
208 ) -> Result<CompatibilityAssessment> {
209 let tool_caps = self
211 .tool_capabilities
212 .get(tool_name)
213 .ok_or_else(|| anyhow::anyhow!("Unknown tool: {}", tool_name))?;
214
215 let compatibility_score = self.compute_compatibility_score(tool_caps, pattern_context);
217
218 let confidence = self.compute_confidence(tool_caps, pattern_context);
220
221 let risk_factors = self.identify_risk_factors(tool_caps, pattern_context);
223
224 let risk_level = self.determine_risk_level(compatibility_score, &risk_factors);
226
227 let recommendations = self.generate_recommendations(&risk_factors, tool_name);
229
230 let confidence_interval = self.compute_confidence_interval(
232 compatibility_score,
233 confidence,
234 pattern_context.occurrences,
235 );
236
237 Ok(CompatibilityAssessment {
238 id: format!("{}_{}", pattern_id, tool_name),
239 pattern_id: pattern_id.to_string(),
240 tool_name: tool_name.to_string(),
241 compatibility_score,
242 confidence,
243 risk_factors,
244 recommendations,
245 risk_level,
246 confidence_interval,
247 })
248 }
249
250 fn compute_compatibility_score(
252 &self,
253 tool_caps: &ToolCapabilities,
254 context: &PatternContext,
255 ) -> f64 {
256 let mut score = 0.0;
257 let mut total_weight = 0.0;
258
259 let quality_score = if context.data_quality >= tool_caps.min_data_quality {
261 1.0
262 } else {
263 context.data_quality / tool_caps.min_data_quality
264 };
265 score += 0.3 * quality_score;
266 total_weight += 0.3;
267
268 let domain_score = if tool_caps.supported_domains.contains(&context.domain) {
270 1.0
271 } else {
272 0.5 };
274 score += 0.25 * domain_score;
275 total_weight += 0.25;
276
277 let occurrence_score = if context.occurrences >= self.config.min_occurrences {
279 1.0
280 } else {
281 context.occurrences as f64 / self.config.min_occurrences as f64
282 };
283 score += 0.2 * occurrence_score;
284 total_weight += 0.2;
285
286 let stability_score = context.temporal_stability;
288 score += 0.15 * stability_score;
289 total_weight += 0.15;
290
291 let resource_score = if context.available_memory_mb >= tool_caps.max_memory_mb {
293 1.0
294 } else {
295 context.available_memory_mb as f64 / tool_caps.max_memory_mb as f64
296 };
297 score += 0.1 * resource_score;
298 total_weight += 0.1;
299
300 if total_weight > 0.0 {
302 score / total_weight
303 } else {
304 0.5 }
306 }
307
308 fn compute_confidence(&self, tool_caps: &ToolCapabilities, context: &PatternContext) -> f64 {
310 let mut confidence = 0.5; confidence += 0.2 * tool_caps.success_rate;
314
315 let occurrence_confidence = if context.occurrences >= 10 {
317 1.0
318 } else {
319 context.occurrences as f64 / 10.0
320 };
321 confidence += 0.2 * occurrence_confidence;
322
323 confidence += 0.1 * context.data_quality;
325
326 confidence.clamp(0.0, 1.0)
327 }
328
329 pub fn batch_assess(
331 &self,
332 pattern_id: &str,
333 tool_names: &[String],
334 context: &PatternContext,
335 ) -> Result<Vec<CompatibilityAssessment>> {
336 let mut assessments = Vec::new();
337
338 for tool_name in tool_names {
339 let assessment = self.assess_compatibility(pattern_id, tool_name, context)?;
340 assessments.push(assessment);
341 }
342
343 Ok(assessments)
344 }
345
346 pub fn get_best_tool(
348 &self,
349 pattern_id: &str,
350 tool_names: &[String],
351 context: &PatternContext,
352 ) -> Result<Option<(String, CompatibilityAssessment)>> {
353 let assessments = self.batch_assess(pattern_id, tool_names, context)?;
354
355 let best = assessments
356 .into_iter()
357 .filter(|a| matches!(a.risk_level, RiskLevel::Low | RiskLevel::Medium))
358 .max_by(|a, b| {
359 a.compatibility_score
360 .partial_cmp(&b.compatibility_score)
361 .unwrap_or(std::cmp::Ordering::Equal)
362 });
363
364 Ok(best.map(|assessment| (assessment.tool_name.clone(), assessment)))
365 }
366}
367
368#[derive(Debug, Clone)]
370pub struct PatternContext {
371 pub domain: String,
373 pub data_quality: f64,
375 pub occurrences: usize,
377 pub temporal_stability: f64,
379 pub available_memory_mb: usize,
381 pub complexity: f64,
383}
384
385#[cfg(test)]
386mod tests;