1use super::result::*;
2use crate::error::Result;
3use crate::laws::benford::analyze_benford_law;
4use crate::laws::normal::analyze_normal_distribution;
5use crate::laws::pareto::analyze_pareto_distribution;
6use crate::laws::poisson::analyze_poisson_distribution;
7use crate::laws::zipf::analyze_numeric_zipf;
8use diffx_core::{diff, DiffResult};
9use rayon::prelude::*;
10use std::collections::HashSet;
11
12pub fn analyze_all_laws(numbers: &[f64], dataset_name: &str) -> Result<IntegrationResult> {
14 let mut result = IntegrationResult::new(dataset_name.to_string(), numbers);
15
16 let law_results = execute_laws_parallel(numbers, dataset_name);
18
19 for (law_name, law_result) in law_results {
21 if let Ok(res) = law_result {
22 result.add_law_result(&law_name, res);
23 }
24 }
25
26 result.finalize_analysis();
28
29 Ok(result)
30}
31
32pub fn analyze_selected_laws(
34 numbers: &[f64],
35 dataset_name: &str,
36 selected_laws: &[String],
37) -> Result<IntegrationResult> {
38 let mut result = IntegrationResult::new(dataset_name.to_string(), numbers);
39
40 let law_results = execute_selected_laws_parallel(numbers, dataset_name, selected_laws);
42
43 for (law_name, law_result) in law_results {
45 if let Ok(res) = law_result {
46 result.add_law_result(&law_name, res);
47 }
48 }
49
50 result.finalize_analysis();
52
53 Ok(result)
54}
55
56pub fn compare_laws(
58 numbers: &[f64],
59 dataset_name: &str,
60 focus: Option<&str>,
61) -> Result<IntegrationResult> {
62 let mut result = analyze_all_laws(numbers, dataset_name)?;
63
64 if let Some(focus_area) = focus {
66 apply_focus_analysis(&mut result, focus_area);
67 }
68
69 Ok(result)
70}
71
72pub fn cross_validate_laws(
74 numbers: &[f64],
75 dataset_name: &str,
76 confidence_level: f64,
77) -> Result<CrossValidationResult> {
78 let chunk_size = numbers.len() / 5; let mut validation_results = Vec::new();
81
82 for i in 0..5 {
83 let start = i * chunk_size;
84 let end = if i == 4 {
85 numbers.len()
86 } else {
87 (i + 1) * chunk_size
88 };
89
90 let test_data = &numbers[start..end];
91 let train_data: Vec<f64> = numbers[..start]
92 .iter()
93 .chain(numbers[end..].iter())
94 .cloned()
95 .collect();
96
97 if !train_data.is_empty() && !test_data.is_empty() {
98 let train_result = analyze_all_laws(&train_data, &format!("{dataset_name}_train_{i}"))?;
99 let test_result = analyze_all_laws(test_data, &format!("{dataset_name}_test_{i}"))?;
100
101 validation_results.push(ValidationFold {
102 fold_number: i + 1,
103 train_result: train_result.clone(),
104 test_result: test_result.clone(),
105 consistency_score: calculate_fold_consistency(&train_result, &test_result),
106 });
107 }
108 }
109
110 Ok(CrossValidationResult {
111 dataset_name: dataset_name.to_string(),
112 confidence_level,
113 validation_folds: validation_results.clone(),
114 overall_stability: calculate_overall_stability(&validation_results),
115 stability_assessment: assess_stability(&validation_results),
116 })
117}
118
119pub fn detect_conflicts_detailed(
121 numbers: &[f64],
122 dataset_name: &str,
123 threshold: f64,
124) -> Result<ConflictAnalysisResult> {
125 let integration_result = analyze_all_laws(numbers, dataset_name)?;
126
127 let detailed_conflicts = analyze_conflicts_in_depth(&integration_result, threshold);
128 let conflict_patterns = identify_conflict_patterns(&detailed_conflicts);
129 let resolution_strategies = generate_resolution_strategies(&detailed_conflicts);
130
131 Ok(ConflictAnalysisResult {
132 dataset_name: dataset_name.to_string(),
133 threshold,
134 integration_result,
135 detailed_conflicts: detailed_conflicts.clone(),
136 conflict_patterns,
137 resolution_strategies,
138 conflict_severity: assess_conflict_severity(&detailed_conflicts),
139 })
140}
141
142pub fn generate_detailed_recommendations(
144 numbers: &[f64],
145 dataset_name: &str,
146 analysis_purpose: AnalysisPurpose,
147) -> Result<DetailedRecommendationResult> {
148 let mut integration_result = analyze_all_laws(numbers, dataset_name)?;
149
150 integration_result.data_characteristics.analysis_purpose = analysis_purpose.clone();
152 integration_result.finalize_analysis();
153
154 let purpose_specific_recommendations = generate_purpose_recommendations(&integration_result);
155 let combination_analysis = analyze_law_combinations(&integration_result);
156 let effectiveness_scores = calculate_effectiveness_scores(&integration_result);
157
158 Ok(DetailedRecommendationResult {
159 dataset_name: dataset_name.to_string(),
160 analysis_purpose,
161 integration_result: integration_result.clone(),
162 purpose_specific_recommendations,
163 combination_analysis,
164 effectiveness_scores,
165 implementation_guidance: generate_implementation_guidance(&integration_result),
166 })
167}
168
169fn execute_laws_parallel(numbers: &[f64], dataset_name: &str) -> Vec<(String, Result<LawResult>)> {
172 let laws = vec!["benf", "pareto", "zipf", "normal", "poisson"];
173
174 laws.par_iter()
175 .map(|&law| {
176 let result = match law {
177 "benf" => analyze_benford_law(numbers, dataset_name).map(LawResult::Benford),
178 "pareto" => {
179 analyze_pareto_distribution(numbers, dataset_name).map(LawResult::Pareto)
180 }
181 "zipf" => analyze_numeric_zipf(numbers, dataset_name).map(LawResult::Zipf),
182 "normal" => {
183 analyze_normal_distribution(numbers, dataset_name).map(LawResult::Normal)
184 }
185 "poisson" => {
186 analyze_poisson_distribution(numbers, dataset_name).map(LawResult::Poisson)
187 }
188 _ => Err(crate::error::BenfError::InvalidInput(format!(
189 "Unknown law: {law}"
190 ))),
191 };
192 (law.to_string(), result)
193 })
194 .collect()
195}
196
197fn execute_selected_laws_parallel(
198 numbers: &[f64],
199 dataset_name: &str,
200 selected_laws: &[String],
201) -> Vec<(String, Result<LawResult>)> {
202 let available_laws: HashSet<&str> = ["benf", "pareto", "zipf", "normal", "poisson"]
203 .iter()
204 .cloned()
205 .collect();
206
207 selected_laws
208 .par_iter()
209 .filter(|law| available_laws.contains(law.as_str()))
210 .map(|law| {
211 let result = match law.as_str() {
212 "benf" => analyze_benford_law(numbers, dataset_name).map(LawResult::Benford),
213 "pareto" => {
214 analyze_pareto_distribution(numbers, dataset_name).map(LawResult::Pareto)
215 }
216 "zipf" => analyze_numeric_zipf(numbers, dataset_name).map(LawResult::Zipf),
217 "normal" => {
218 analyze_normal_distribution(numbers, dataset_name).map(LawResult::Normal)
219 }
220 "poisson" => {
221 analyze_poisson_distribution(numbers, dataset_name).map(LawResult::Poisson)
222 }
223 _ => Err(crate::error::BenfError::InvalidInput(format!(
224 "Unknown law: {law}"
225 ))),
226 };
227 (law.clone(), result)
228 })
229 .collect()
230}
231
232pub fn apply_focus_analysis(result: &mut IntegrationResult, focus: &str) {
233 result.focus = Some(focus.to_string());
235
236 match focus {
237 "quality" => {
238 if let Some(benf_score) = result.law_scores.get("benf") {
240 result
241 .law_scores
242 .insert("benf".to_string(), benf_score * 1.5);
243 }
244 if let Some(normal_score) = result.law_scores.get("normal") {
245 result
246 .law_scores
247 .insert("normal".to_string(), normal_score * 1.3);
248 }
249 }
250 "concentration" => {
251 if let Some(pareto_score) = result.law_scores.get("pareto") {
253 result
254 .law_scores
255 .insert("pareto".to_string(), pareto_score * 1.5);
256 }
257 if let Some(zipf_score) = result.law_scores.get("zipf") {
258 result
259 .law_scores
260 .insert("zipf".to_string(), zipf_score * 1.3);
261 }
262 }
263 "distribution" => {
264 if let Some(normal_score) = result.law_scores.get("normal") {
266 result
267 .law_scores
268 .insert("normal".to_string(), normal_score * 1.4);
269 }
270 if let Some(poisson_score) = result.law_scores.get("poisson") {
271 result
272 .law_scores
273 .insert("poisson".to_string(), poisson_score * 1.4);
274 }
275 }
276 "anomaly" => {
277 if let Some(normal_score) = result.law_scores.get("normal") {
279 result
280 .law_scores
281 .insert("normal".to_string(), normal_score * 1.6);
282 }
283 if let Some(poisson_score) = result.law_scores.get("poisson") {
284 result
285 .law_scores
286 .insert("poisson".to_string(), poisson_score * 1.4);
287 }
288 }
289 _ => {}
290 }
291
292 result.finalize_analysis();
294}
295
296fn calculate_fold_consistency(
297 train_result: &IntegrationResult,
298 test_result: &IntegrationResult,
299) -> f64 {
300 calculate_enhanced_consistency_with_diffx(train_result, test_result)
302}
303
304fn calculate_enhanced_consistency_with_diffx(
306 train_result: &IntegrationResult,
307 test_result: &IntegrationResult,
308) -> f64 {
309 let train_json = serde_json::to_value(&train_result.law_scores).unwrap_or_default();
311 let test_json = serde_json::to_value(&test_result.law_scores).unwrap_or_default();
312
313 let diff_results = diff(&train_json, &test_json, None, Some(0.01), None);
315
316 if diff_results.is_empty() {
317 return 1.0; }
319
320 let total_laws = train_result
322 .law_scores
323 .len()
324 .max(test_result.law_scores.len()) as f64;
325 if total_laws == 0.0 {
326 return 0.0;
327 }
328
329 let mut total_diff_impact = 0.0;
330
331 for diff_result in &diff_results {
332 let impact = match diff_result {
333 DiffResult::Added(_, _) => 0.5, DiffResult::Removed(_, _) => 0.5, DiffResult::Modified(_, old_val, new_val) => {
336 if let (Some(old_num), Some(new_num)) = (old_val.as_f64(), new_val.as_f64()) {
338 let diff_ratio = (old_num - new_num).abs() / old_num.max(new_num).max(0.01);
339 diff_ratio.min(1.0)
340 } else {
341 1.0 }
343 }
344 DiffResult::TypeChanged(_, _, _) => 1.0, };
346 total_diff_impact += impact;
347 }
348
349 let average_impact = total_diff_impact / diff_results.len() as f64;
351 (1.0 - average_impact).max(0.0)
352}
353
354fn calculate_overall_stability(validation_results: &[ValidationFold]) -> f64 {
355 if validation_results.is_empty() {
356 return 0.0;
357 }
358
359 let total_consistency: f64 = validation_results
360 .iter()
361 .map(|fold| fold.consistency_score)
362 .sum();
363
364 total_consistency / validation_results.len() as f64
365}
366
367fn assess_stability(validation_results: &[ValidationFold]) -> StabilityAssessment {
368 let overall_stability = calculate_overall_stability(validation_results);
369
370 match overall_stability {
371 s if s > 0.9 => StabilityAssessment::VeryStable,
372 s if s > 0.8 => StabilityAssessment::Stable,
373 s if s > 0.7 => StabilityAssessment::ModeratelyStable,
374 s if s > 0.6 => StabilityAssessment::Unstable,
375 _ => StabilityAssessment::VeryUnstable,
376 }
377}
378
379fn analyze_conflicts_in_depth(result: &IntegrationResult, threshold: f64) -> Vec<DetailedConflict> {
380 let mut detailed_conflicts = Vec::new();
381
382 for conflict in &result.conflicts {
383 if conflict.conflict_score >= threshold {
384 let statistical_significance = calculate_statistical_significance(conflict, result);
385 let impact_assessment = assess_conflict_impact(conflict, result);
386 let root_cause_analysis = perform_root_cause_analysis(conflict, result);
387
388 detailed_conflicts.push(DetailedConflict {
389 base_conflict: conflict.clone(),
390 statistical_significance,
391 impact_assessment,
392 root_cause_analysis,
393 confidence_interval: calculate_conflict_confidence_interval(conflict, result),
394 });
395 }
396 }
397
398 detailed_conflicts
399}
400
401fn identify_conflict_patterns(detailed_conflicts: &[DetailedConflict]) -> Vec<ConflictPattern> {
402 let mut patterns = Vec::new();
403
404 let mut type_counts = std::collections::HashMap::new();
406 for conflict in detailed_conflicts {
407 *type_counts
408 .entry(conflict.base_conflict.conflict_type.clone())
409 .or_insert(0) += 1;
410 }
411
412 for (conflict_type, count) in type_counts {
413 if count > 1 {
414 patterns.push(ConflictPattern {
415 pattern_type: conflict_type.clone(),
416 frequency: count,
417 severity: calculate_pattern_severity(detailed_conflicts, &conflict_type),
418 description: describe_conflict_pattern(&conflict_type),
419 });
420 }
421 }
422
423 patterns
424}
425
426fn generate_resolution_strategies(
427 detailed_conflicts: &[DetailedConflict],
428) -> Vec<ResolutionStrategy> {
429 let mut strategies = Vec::new();
430
431 for conflict in detailed_conflicts {
432 let strategy = match conflict.base_conflict.conflict_type {
433 ConflictType::DistributionMismatch => ResolutionStrategy {
434 strategy_name: "Distribution Type Optimization".to_string(),
435 priority: Priority::High,
436 steps: vec![
437 "Check data type (continuous/discrete)".to_string(),
438 "Select optimal distribution law".to_string(),
439 "Exclude inappropriate law results".to_string(),
440 ],
441 expected_outcome: "Improved distribution compatibility".to_string(),
442 confidence: 0.85,
443 },
444 ConflictType::QualityDisagreement => ResolutionStrategy {
445 strategy_name: "Quality Assessment Integration".to_string(),
446 priority: Priority::Medium,
447 steps: vec![
448 "Use Benford's Law as quality assessment baseline".to_string(),
449 "Utilize other laws as supplementary evaluation".to_string(),
450 "Make final decision with comprehensive quality score".to_string(),
451 ],
452 expected_outcome: "Consistent quality assessment".to_string(),
453 confidence: 0.75,
454 },
455 _ => ResolutionStrategy {
456 strategy_name: "Comprehensive Evaluation Focus".to_string(),
457 priority: Priority::Low,
458 steps: vec![
459 "Judge results from multiple laws comprehensively".to_string(),
460 "Utilize contradictory points as complementary information".to_string(),
461 ],
462 expected_outcome: "Comprehensive analysis results".to_string(),
463 confidence: 0.6,
464 },
465 };
466
467 strategies.push(strategy);
468 }
469
470 strategies
471}
472
473fn assess_conflict_severity(detailed_conflicts: &[DetailedConflict]) -> ConflictSeverity {
474 if detailed_conflicts.is_empty() {
475 return ConflictSeverity::None;
476 }
477
478 let max_score = detailed_conflicts
479 .iter()
480 .map(|c| c.base_conflict.conflict_score)
481 .fold(0.0, f64::max);
482
483 let high_severity_count = detailed_conflicts
484 .iter()
485 .filter(|c| c.base_conflict.conflict_score > 0.8)
486 .count();
487
488 match (max_score, high_severity_count) {
489 (s, _) if s > 0.9 => ConflictSeverity::Critical,
490 (s, c) if s > 0.7 && c > 2 => ConflictSeverity::High,
491 (s, c) if s > 0.5 && c > 0 => ConflictSeverity::Medium,
492 (s, _) if s > 0.3 => ConflictSeverity::Low,
493 _ => ConflictSeverity::None,
494 }
495}
496
497fn generate_purpose_recommendations(result: &IntegrationResult) -> Vec<PurposeRecommendation> {
498 let mut recommendations = Vec::new();
499
500 match result.data_characteristics.analysis_purpose {
501 AnalysisPurpose::QualityAudit => {
502 recommendations.push(PurposeRecommendation {
503 purpose: AnalysisPurpose::QualityAudit,
504 recommended_laws: vec!["benf".to_string(), "normal".to_string()],
505 rationale: "品質監査にはベンフォード法則での自然性チェックと正規分布での統計的品質評価が最適".to_string(),
506 effectiveness: 0.9,
507 implementation_priority: Priority::High,
508 });
509 }
510 AnalysisPurpose::ConcentrationAnalysis => {
511 recommendations.push(PurposeRecommendation {
512 purpose: AnalysisPurpose::ConcentrationAnalysis,
513 recommended_laws: vec!["pareto".to_string(), "zipf".to_string()],
514 rationale: "集中度分析にはパレート法則での80/20分析とZipf法則での順位分布が有効"
515 .to_string(),
516 effectiveness: 0.85,
517 implementation_priority: Priority::High,
518 });
519 }
520 AnalysisPurpose::AnomalyDetection => {
521 recommendations.push(PurposeRecommendation {
522 purpose: AnalysisPurpose::AnomalyDetection,
523 recommended_laws: vec!["normal".to_string(), "poisson".to_string()],
524 rationale:
525 "異常検知には正規分布での外れ値検出とポアソン分布での稀少事象検出が適用可能"
526 .to_string(),
527 effectiveness: 0.8,
528 implementation_priority: Priority::Medium,
529 });
530 }
531 _ => {
532 recommendations.push(PurposeRecommendation {
533 purpose: AnalysisPurpose::GeneralAnalysis,
534 recommended_laws: result.laws_executed.clone(),
535 rationale: "総合分析では全法則を活用して多角的な評価を実施".to_string(),
536 effectiveness: 0.7,
537 implementation_priority: Priority::Medium,
538 });
539 }
540 }
541
542 recommendations
543}
544
545fn analyze_law_combinations(result: &IntegrationResult) -> Vec<CombinationAnalysis> {
546 let mut combinations = Vec::new();
547
548 let laws: Vec<String> = result.law_scores.keys().cloned().collect();
549
550 for i in 0..laws.len() {
552 for j in i + 1..laws.len() {
553 let law_a = &laws[i];
554 let law_b = &laws[j];
555
556 let synergy_score = calculate_synergy_score(law_a, law_b, result);
557 let complementarity = assess_complementarity(law_a, law_b);
558
559 combinations.push(CombinationAnalysis {
560 laws: vec![law_a.clone(), law_b.clone()],
561 synergy_score,
562 complementarity,
563 use_cases: generate_combination_use_cases(law_a, law_b),
564 effectiveness_rating: rate_combination_effectiveness(
565 synergy_score,
566 complementarity,
567 ),
568 });
569 }
570 }
571
572 combinations.sort_by(|a, b| b.synergy_score.partial_cmp(&a.synergy_score).unwrap());
573 combinations
574}
575
576fn calculate_effectiveness_scores(
577 result: &IntegrationResult,
578) -> HashMap<String, EffectivenessScore> {
579 let mut scores = HashMap::new();
580
581 for (law, &base_score) in &result.law_scores {
582 let data_compatibility =
583 calculate_data_compatibility_score(law, &result.data_characteristics);
584 let purpose_alignment =
585 calculate_purpose_alignment_score(law, &result.data_characteristics.analysis_purpose);
586 let reliability = calculate_reliability_score(law, result);
587
588 let overall_effectiveness =
589 (base_score + data_compatibility + purpose_alignment + reliability) / 4.0;
590
591 scores.insert(
592 law.clone(),
593 EffectivenessScore {
594 base_score,
595 data_compatibility,
596 purpose_alignment,
597 reliability,
598 overall_effectiveness,
599 },
600 );
601 }
602
603 scores
604}
605
606fn generate_implementation_guidance(result: &IntegrationResult) -> ImplementationGuidance {
607 let primary_law = &result.recommendations.primary_law;
608 let setup_steps = generate_setup_steps(primary_law);
609 let validation_criteria = generate_validation_criteria(primary_law);
610 let monitoring_recommendations = generate_monitoring_recommendations(result);
611
612 ImplementationGuidance {
613 primary_law: primary_law.clone(),
614 setup_steps,
615 validation_criteria,
616 monitoring_recommendations,
617 estimated_effort: estimate_implementation_effort(result),
618 success_indicators: generate_success_indicators(result),
619 }
620}
621
622fn calculate_statistical_significance(_conflict: &Conflict, _result: &IntegrationResult) -> f64 {
627 0.5
628}
629fn assess_conflict_impact(_conflict: &Conflict, _result: &IntegrationResult) -> ImpactLevel {
630 ImpactLevel::Medium
631}
632fn perform_root_cause_analysis(_conflict: &Conflict, _result: &IntegrationResult) -> String {
633 "Under analysis".to_string()
634}
635fn calculate_conflict_confidence_interval(
636 _conflict: &Conflict,
637 _result: &IntegrationResult,
638) -> (f64, f64) {
639 (0.0, 1.0)
640}
641fn calculate_pattern_severity(
642 _conflicts: &[DetailedConflict],
643 _conflict_type: &ConflictType,
644) -> f64 {
645 0.5
646}
647fn describe_conflict_pattern(_conflict_type: &ConflictType) -> String {
648 "Pattern analysis in progress".to_string()
649}
650fn calculate_synergy_score(_law_a: &str, _law_b: &str, _result: &IntegrationResult) -> f64 {
651 0.5
652}
653fn assess_complementarity(_law_a: &str, _law_b: &str) -> f64 {
654 0.5
655}
656fn generate_combination_use_cases(_law_a: &str, _law_b: &str) -> Vec<String> {
657 vec!["一般分析".to_string()]
658}
659fn rate_combination_effectiveness(_synergy: f64, _complementarity: f64) -> f64 {
660 0.5
661}
662fn calculate_data_compatibility_score(_law: &str, _characteristics: &DataCharacteristics) -> f64 {
663 0.5
664}
665fn calculate_purpose_alignment_score(_law: &str, _purpose: &AnalysisPurpose) -> f64 {
666 0.5
667}
668fn calculate_reliability_score(_law: &str, _result: &IntegrationResult) -> f64 {
669 0.5
670}
671fn generate_setup_steps(_law: &str) -> Vec<String> {
672 vec!["セットアップ中".to_string()]
673}
674fn generate_validation_criteria(_law: &str) -> Vec<String> {
675 vec!["検証基準設定中".to_string()]
676}
677fn generate_monitoring_recommendations(_result: &IntegrationResult) -> Vec<String> {
678 vec!["監視設定中".to_string()]
679}
680fn estimate_implementation_effort(_result: &IntegrationResult) -> String {
681 "中程度".to_string()
682}
683fn generate_success_indicators(_result: &IntegrationResult) -> Vec<String> {
684 vec!["成功指標設定中".to_string()]
685}
686
687#[derive(Debug, Clone)]
689pub struct CrossValidationResult {
690 pub dataset_name: String,
691 pub confidence_level: f64,
692 pub validation_folds: Vec<ValidationFold>,
693 pub overall_stability: f64,
694 pub stability_assessment: StabilityAssessment,
695}
696
697#[derive(Debug, Clone)]
698pub struct ValidationFold {
699 pub fold_number: usize,
700 pub train_result: IntegrationResult,
701 pub test_result: IntegrationResult,
702 pub consistency_score: f64,
703}
704
705#[derive(Debug, Clone, PartialEq)]
706pub enum StabilityAssessment {
707 VeryStable,
708 Stable,
709 ModeratelyStable,
710 Unstable,
711 VeryUnstable,
712}
713
714#[derive(Debug, Clone)]
715pub struct ConflictAnalysisResult {
716 pub dataset_name: String,
717 pub threshold: f64,
718 pub integration_result: IntegrationResult,
719 pub detailed_conflicts: Vec<DetailedConflict>,
720 pub conflict_patterns: Vec<ConflictPattern>,
721 pub resolution_strategies: Vec<ResolutionStrategy>,
722 pub conflict_severity: ConflictSeverity,
723}
724
725#[derive(Debug, Clone)]
726pub struct DetailedConflict {
727 pub base_conflict: Conflict,
728 pub statistical_significance: f64,
729 pub impact_assessment: ImpactLevel,
730 pub root_cause_analysis: String,
731 pub confidence_interval: (f64, f64),
732}
733
734#[derive(Debug, Clone)]
735pub struct ConflictPattern {
736 pub pattern_type: ConflictType,
737 pub frequency: usize,
738 pub severity: f64,
739 pub description: String,
740}
741
742#[derive(Debug, Clone)]
743pub struct ResolutionStrategy {
744 pub strategy_name: String,
745 pub priority: Priority,
746 pub steps: Vec<String>,
747 pub expected_outcome: String,
748 pub confidence: f64,
749}
750
751#[derive(Debug, Clone, PartialEq)]
752pub enum Priority {
753 High,
754 Medium,
755 Low,
756}
757
758#[derive(Debug, Clone, PartialEq)]
759pub enum ImpactLevel {
760 High,
761 Medium,
762 Low,
763}
764
765#[derive(Debug, Clone, PartialEq)]
766pub enum ConflictSeverity {
767 Critical,
768 High,
769 Medium,
770 Low,
771 None,
772}
773
774#[derive(Debug, Clone)]
775pub struct DetailedRecommendationResult {
776 pub dataset_name: String,
777 pub analysis_purpose: AnalysisPurpose,
778 pub integration_result: IntegrationResult,
779 pub purpose_specific_recommendations: Vec<PurposeRecommendation>,
780 pub combination_analysis: Vec<CombinationAnalysis>,
781 pub effectiveness_scores: HashMap<String, EffectivenessScore>,
782 pub implementation_guidance: ImplementationGuidance,
783}
784
785#[derive(Debug, Clone)]
786pub struct PurposeRecommendation {
787 pub purpose: AnalysisPurpose,
788 pub recommended_laws: Vec<String>,
789 pub rationale: String,
790 pub effectiveness: f64,
791 pub implementation_priority: Priority,
792}
793
794#[derive(Debug, Clone)]
795pub struct CombinationAnalysis {
796 pub laws: Vec<String>,
797 pub synergy_score: f64,
798 pub complementarity: f64,
799 pub use_cases: Vec<String>,
800 pub effectiveness_rating: f64,
801}
802
803#[derive(Debug, Clone)]
804pub struct EffectivenessScore {
805 pub base_score: f64,
806 pub data_compatibility: f64,
807 pub purpose_alignment: f64,
808 pub reliability: f64,
809 pub overall_effectiveness: f64,
810}
811
812#[derive(Debug, Clone)]
813pub struct ImplementationGuidance {
814 pub primary_law: String,
815 pub setup_steps: Vec<String>,
816 pub validation_criteria: Vec<String>,
817 pub monitoring_recommendations: Vec<String>,
818 pub estimated_effort: String,
819 pub success_indicators: Vec<String>,
820}
821
822use std::collections::HashMap;