Skip to main content

lawkit_core/laws/integration/
analysis.rs

1use super::result::*;
2use crate::diffx_core_mock::{diff, DiffResult};
3use crate::error::Result;
4use crate::laws::benford::analyze_benford_law;
5use crate::laws::normal::analyze_normal_distribution;
6use crate::laws::pareto::analyze_pareto_distribution;
7use crate::laws::poisson::analyze_poisson_distribution;
8use crate::laws::zipf::analyze_numeric_zipf;
9use rayon::prelude::*;
10use std::collections::HashSet;
11
12/// 統合分析実行
13pub fn analyze_all_laws(numbers: &[f64], dataset_name: &str) -> Result<IntegrationResult> {
14    let mut result = IntegrationResult::new(dataset_name.to_string(), numbers);
15
16    // 全法則を並列実行
17    let law_results = execute_laws_parallel(numbers, dataset_name);
18
19    // 結果を統合
20    for (law_name, law_result) in law_results {
21        if let Ok(res) = law_result {
22            result.add_law_result(&law_name, res);
23        }
24    }
25
26    // 統合分析実行
27    result.finalize_analysis();
28
29    Ok(result)
30}
31
32/// 指定した法則のみで統合分析
33pub fn analyze_selected_laws(
34    numbers: &[f64],
35    dataset_name: &str,
36    selected_laws: &[String],
37) -> Result<IntegrationResult> {
38    let mut result = IntegrationResult::new(dataset_name.to_string(), numbers);
39
40    // 指定法則のみ実行
41    let law_results = execute_selected_laws_parallel(numbers, dataset_name, selected_laws);
42
43    // 結果を統合
44    for (law_name, law_result) in law_results {
45        if let Ok(res) = law_result {
46            result.add_law_result(&law_name, res);
47        }
48    }
49
50    // 統合分析実行
51    result.finalize_analysis();
52
53    Ok(result)
54}
55
56/// 法則間比較分析
57pub fn compare_laws(
58    numbers: &[f64],
59    dataset_name: &str,
60    focus: Option<&str>,
61) -> Result<IntegrationResult> {
62    let mut result = analyze_all_laws(numbers, dataset_name)?;
63
64    // フォーカス分析
65    if let Some(focus_area) = focus {
66        apply_focus_analysis(&mut result, focus_area);
67    }
68
69    Ok(result)
70}
71
72/// クロスバリデーション分析
73pub fn cross_validate_laws(
74    numbers: &[f64],
75    dataset_name: &str,
76    confidence_level: f64,
77) -> Result<CrossValidationResult> {
78    // データを分割してクロスバリデーション実行
79    let chunk_size = numbers.len() / 5; // 5-fold クロスバリデーション
80    let mut validation_results = Vec::new();
81
82    for i in 0..5 {
83        let start = i * chunk_size;
84        let end = if i == 4 {
85            numbers.len()
86        } else {
87            (i + 1) * chunk_size
88        };
89
90        let test_data = &numbers[start..end];
91        let train_data: Vec<f64> = numbers[..start]
92            .iter()
93            .chain(numbers[end..].iter())
94            .cloned()
95            .collect();
96
97        if !train_data.is_empty() && !test_data.is_empty() {
98            let train_result = analyze_all_laws(&train_data, &format!("{dataset_name}_train_{i}"))?;
99            let test_result = analyze_all_laws(test_data, &format!("{dataset_name}_test_{i}"))?;
100
101            validation_results.push(ValidationFold {
102                fold_number: i + 1,
103                train_result: train_result.clone(),
104                test_result: test_result.clone(),
105                consistency_score: calculate_fold_consistency(&train_result, &test_result),
106            });
107        }
108    }
109
110    Ok(CrossValidationResult {
111        dataset_name: dataset_name.to_string(),
112        confidence_level,
113        validation_folds: validation_results.clone(),
114        overall_stability: calculate_overall_stability(&validation_results),
115        stability_assessment: assess_stability(&validation_results),
116    })
117}
118
119/// 矛盾検出に特化した分析
120pub fn detect_conflicts_detailed(
121    numbers: &[f64],
122    dataset_name: &str,
123    threshold: f64,
124) -> Result<ConflictAnalysisResult> {
125    let integration_result = analyze_all_laws(numbers, dataset_name)?;
126
127    let detailed_conflicts = analyze_conflicts_in_depth(&integration_result, threshold);
128    let conflict_patterns = identify_conflict_patterns(&detailed_conflicts);
129    let resolution_strategies = generate_resolution_strategies(&detailed_conflicts);
130
131    Ok(ConflictAnalysisResult {
132        dataset_name: dataset_name.to_string(),
133        threshold,
134        integration_result,
135        detailed_conflicts: detailed_conflicts.clone(),
136        conflict_patterns,
137        resolution_strategies,
138        conflict_severity: assess_conflict_severity(&detailed_conflicts),
139    })
140}
141
142/// 推奨システム詳細分析
143pub fn generate_detailed_recommendations(
144    numbers: &[f64],
145    dataset_name: &str,
146    analysis_purpose: AnalysisPurpose,
147) -> Result<DetailedRecommendationResult> {
148    let mut integration_result = analyze_all_laws(numbers, dataset_name)?;
149
150    // 分析目的を設定
151    integration_result.data_characteristics.analysis_purpose = analysis_purpose.clone();
152    integration_result.finalize_analysis();
153
154    let purpose_specific_recommendations = generate_purpose_recommendations(&integration_result);
155    let combination_analysis = analyze_law_combinations(&integration_result);
156    let effectiveness_scores = calculate_effectiveness_scores(&integration_result);
157
158    Ok(DetailedRecommendationResult {
159        dataset_name: dataset_name.to_string(),
160        analysis_purpose,
161        integration_result: integration_result.clone(),
162        purpose_specific_recommendations,
163        combination_analysis,
164        effectiveness_scores,
165        implementation_guidance: generate_implementation_guidance(&integration_result),
166    })
167}
168
169// ヘルパー関数群
170
171fn execute_laws_parallel(numbers: &[f64], dataset_name: &str) -> Vec<(String, Result<LawResult>)> {
172    let laws = vec!["benf", "pareto", "zipf", "normal", "poisson"];
173
174    laws.par_iter()
175        .map(|&law| {
176            let result = match law {
177                "benf" => analyze_benford_law(numbers, dataset_name).map(LawResult::Benford),
178                "pareto" => {
179                    analyze_pareto_distribution(numbers, dataset_name).map(LawResult::Pareto)
180                }
181                "zipf" => analyze_numeric_zipf(numbers, dataset_name).map(LawResult::Zipf),
182                "normal" => {
183                    analyze_normal_distribution(numbers, dataset_name).map(LawResult::Normal)
184                }
185                "poisson" => {
186                    analyze_poisson_distribution(numbers, dataset_name).map(LawResult::Poisson)
187                }
188                _ => Err(crate::error::BenfError::InvalidInput(format!(
189                    "Unknown law: {law}"
190                ))),
191            };
192            (law.to_string(), result)
193        })
194        .collect()
195}
196
197fn execute_selected_laws_parallel(
198    numbers: &[f64],
199    dataset_name: &str,
200    selected_laws: &[String],
201) -> Vec<(String, Result<LawResult>)> {
202    let available_laws: HashSet<&str> = ["benf", "pareto", "zipf", "normal", "poisson"]
203        .iter()
204        .cloned()
205        .collect();
206
207    selected_laws
208        .par_iter()
209        .filter(|law| available_laws.contains(law.as_str()))
210        .map(|law| {
211            let result = match law.as_str() {
212                "benf" => analyze_benford_law(numbers, dataset_name).map(LawResult::Benford),
213                "pareto" => {
214                    analyze_pareto_distribution(numbers, dataset_name).map(LawResult::Pareto)
215                }
216                "zipf" => analyze_numeric_zipf(numbers, dataset_name).map(LawResult::Zipf),
217                "normal" => {
218                    analyze_normal_distribution(numbers, dataset_name).map(LawResult::Normal)
219                }
220                "poisson" => {
221                    analyze_poisson_distribution(numbers, dataset_name).map(LawResult::Poisson)
222                }
223                _ => Err(crate::error::BenfError::InvalidInput(format!(
224                    "Unknown law: {law}"
225                ))),
226            };
227            (law.clone(), result)
228        })
229        .collect()
230}
231
232pub fn apply_focus_analysis(result: &mut IntegrationResult, focus: &str) {
233    // Set the focus field
234    result.focus = Some(focus.to_string());
235
236    match focus {
237        "quality" => {
238            // 品質重視の重み調整
239            if let Some(benf_score) = result.law_scores.get("benf") {
240                result
241                    .law_scores
242                    .insert("benf".to_string(), benf_score * 1.5);
243            }
244            if let Some(normal_score) = result.law_scores.get("normal") {
245                result
246                    .law_scores
247                    .insert("normal".to_string(), normal_score * 1.3);
248            }
249        }
250        "concentration" => {
251            // 集中度重視の重み調整
252            if let Some(pareto_score) = result.law_scores.get("pareto") {
253                result
254                    .law_scores
255                    .insert("pareto".to_string(), pareto_score * 1.5);
256            }
257            if let Some(zipf_score) = result.law_scores.get("zipf") {
258                result
259                    .law_scores
260                    .insert("zipf".to_string(), zipf_score * 1.3);
261            }
262        }
263        "distribution" => {
264            // 分布適合重視
265            if let Some(normal_score) = result.law_scores.get("normal") {
266                result
267                    .law_scores
268                    .insert("normal".to_string(), normal_score * 1.4);
269            }
270            if let Some(poisson_score) = result.law_scores.get("poisson") {
271                result
272                    .law_scores
273                    .insert("poisson".to_string(), poisson_score * 1.4);
274            }
275        }
276        "anomaly" => {
277            // 異常検知重視
278            if let Some(normal_score) = result.law_scores.get("normal") {
279                result
280                    .law_scores
281                    .insert("normal".to_string(), normal_score * 1.6);
282            }
283            if let Some(poisson_score) = result.law_scores.get("poisson") {
284                result
285                    .law_scores
286                    .insert("poisson".to_string(), poisson_score * 1.4);
287            }
288        }
289        _ => {}
290    }
291
292    // 重み調整後に再計算
293    result.finalize_analysis();
294}
295
296fn calculate_fold_consistency(
297    train_result: &IntegrationResult,
298    test_result: &IntegrationResult,
299) -> f64 {
300    // diffx-coreを使用した一貫性分析
301    calculate_enhanced_consistency_with_diffx(train_result, test_result)
302}
303
304/// diffx-coreを使用した一貫性計算
305fn calculate_enhanced_consistency_with_diffx(
306    train_result: &IntegrationResult,
307    test_result: &IntegrationResult,
308) -> f64 {
309    // HashMap<String, f64>をJSONに変換してdiffx-coreで比較
310    let train_json = serde_json::to_value(&train_result.law_scores).unwrap_or_default();
311    let test_json = serde_json::to_value(&test_result.law_scores).unwrap_or_default();
312
313    // diffx-coreで構造的差分を検出
314    let diff_results = diff(&train_json, &test_json, None);
315
316    let results = match diff_results {
317        Ok(results) => results,
318        Err(_) => return 0.0, // エラーの場合は一致度0
319    };
320
321    if results.is_empty() {
322        return 1.0; // 完全一致
323    }
324
325    // 差分の種類と重要度に基づいて一貫性スコアを計算
326    let total_laws = train_result
327        .law_scores
328        .len()
329        .max(test_result.law_scores.len()) as f64;
330    if total_laws == 0.0 {
331        return 0.0;
332    }
333
334    let mut total_diff_impact = 0.0;
335
336    for diff_result in &results {
337        let impact = match diff_result {
338            DiffResult::Added(_, _) => 0.5,   // 追加は中程度の影響
339            DiffResult::Removed(_, _) => 0.5, // 削除は中程度の影響
340            DiffResult::Modified(_, old_val, new_val) => {
341                // 数値の変更は差分の大きさに応じて影響度を計算
342                if let (Some(old_num), Some(new_num)) = (old_val.as_f64(), new_val.as_f64()) {
343                    let diff_ratio = (old_num - new_num).abs() / old_num.max(new_num).max(0.01);
344                    diff_ratio.min(1.0)
345                } else {
346                    1.0 // 非数値の変更は最大影響
347                }
348            }
349            DiffResult::TypeChanged(_, _, _) => 1.0, // 型変更は最大影響
350            DiffResult::Unchanged => 0.0,            // 変更なし
351        };
352        total_diff_impact += impact;
353    }
354
355    // 一貫性スコア = 1 - (平均影響度)
356    let average_impact = total_diff_impact / results.len() as f64;
357    (1.0 - average_impact).max(0.0)
358}
359
360fn calculate_overall_stability(validation_results: &[ValidationFold]) -> f64 {
361    if validation_results.is_empty() {
362        return 0.0;
363    }
364
365    let total_consistency: f64 = validation_results
366        .iter()
367        .map(|fold| fold.consistency_score)
368        .sum();
369
370    total_consistency / validation_results.len() as f64
371}
372
373fn assess_stability(validation_results: &[ValidationFold]) -> StabilityAssessment {
374    let overall_stability = calculate_overall_stability(validation_results);
375
376    match overall_stability {
377        s if s > 0.9 => StabilityAssessment::VeryStable,
378        s if s > 0.8 => StabilityAssessment::Stable,
379        s if s > 0.7 => StabilityAssessment::ModeratelyStable,
380        s if s > 0.6 => StabilityAssessment::Unstable,
381        _ => StabilityAssessment::VeryUnstable,
382    }
383}
384
385fn analyze_conflicts_in_depth(result: &IntegrationResult, threshold: f64) -> Vec<DetailedConflict> {
386    let mut detailed_conflicts = Vec::new();
387
388    for conflict in &result.conflicts {
389        if conflict.conflict_score >= threshold {
390            let statistical_significance = calculate_statistical_significance(conflict, result);
391            let impact_assessment = assess_conflict_impact(conflict, result);
392            let root_cause_analysis = perform_root_cause_analysis(conflict, result);
393
394            detailed_conflicts.push(DetailedConflict {
395                base_conflict: conflict.clone(),
396                statistical_significance,
397                impact_assessment,
398                root_cause_analysis,
399                confidence_interval: calculate_conflict_confidence_interval(conflict, result),
400            });
401        }
402    }
403
404    detailed_conflicts
405}
406
407fn identify_conflict_patterns(detailed_conflicts: &[DetailedConflict]) -> Vec<ConflictPattern> {
408    let mut patterns = Vec::new();
409
410    // 頻出する矛盾タイプを特定
411    let mut type_counts = std::collections::HashMap::new();
412    for conflict in detailed_conflicts {
413        *type_counts
414            .entry(conflict.base_conflict.conflict_type.clone())
415            .or_insert(0) += 1;
416    }
417
418    for (conflict_type, count) in type_counts {
419        if count > 1 {
420            patterns.push(ConflictPattern {
421                pattern_type: conflict_type.clone(),
422                frequency: count,
423                severity: calculate_pattern_severity(detailed_conflicts, &conflict_type),
424                description: describe_conflict_pattern(&conflict_type),
425            });
426        }
427    }
428
429    patterns
430}
431
432fn generate_resolution_strategies(
433    detailed_conflicts: &[DetailedConflict],
434) -> Vec<ResolutionStrategy> {
435    let mut strategies = Vec::new();
436
437    for conflict in detailed_conflicts {
438        let strategy = match conflict.base_conflict.conflict_type {
439            ConflictType::DistributionMismatch => ResolutionStrategy {
440                strategy_name: "Distribution Type Optimization".to_string(),
441                priority: Priority::High,
442                steps: vec![
443                    "Check data type (continuous/discrete)".to_string(),
444                    "Select optimal distribution law".to_string(),
445                    "Exclude inappropriate law results".to_string(),
446                ],
447                expected_outcome: "Improved distribution compatibility".to_string(),
448                confidence: 0.85,
449            },
450            ConflictType::QualityDisagreement => ResolutionStrategy {
451                strategy_name: "Quality Assessment Integration".to_string(),
452                priority: Priority::Medium,
453                steps: vec![
454                    "Use Benford's Law as quality assessment baseline".to_string(),
455                    "Utilize other laws as supplementary evaluation".to_string(),
456                    "Make final decision with comprehensive quality score".to_string(),
457                ],
458                expected_outcome: "Consistent quality assessment".to_string(),
459                confidence: 0.75,
460            },
461            _ => ResolutionStrategy {
462                strategy_name: "Comprehensive Evaluation Focus".to_string(),
463                priority: Priority::Low,
464                steps: vec![
465                    "Judge results from multiple laws comprehensively".to_string(),
466                    "Utilize contradictory points as complementary information".to_string(),
467                ],
468                expected_outcome: "Comprehensive analysis results".to_string(),
469                confidence: 0.6,
470            },
471        };
472
473        strategies.push(strategy);
474    }
475
476    strategies
477}
478
479fn assess_conflict_severity(detailed_conflicts: &[DetailedConflict]) -> ConflictSeverity {
480    if detailed_conflicts.is_empty() {
481        return ConflictSeverity::None;
482    }
483
484    let max_score = detailed_conflicts
485        .iter()
486        .map(|c| c.base_conflict.conflict_score)
487        .fold(0.0, f64::max);
488
489    let high_severity_count = detailed_conflicts
490        .iter()
491        .filter(|c| c.base_conflict.conflict_score > 0.8)
492        .count();
493
494    match (max_score, high_severity_count) {
495        (s, _) if s > 0.9 => ConflictSeverity::Critical,
496        (s, c) if s > 0.7 && c > 2 => ConflictSeverity::High,
497        (s, c) if s > 0.5 && c > 0 => ConflictSeverity::Medium,
498        (s, _) if s > 0.3 => ConflictSeverity::Low,
499        _ => ConflictSeverity::None,
500    }
501}
502
503fn generate_purpose_recommendations(result: &IntegrationResult) -> Vec<PurposeRecommendation> {
504    let mut recommendations = Vec::new();
505
506    match result.data_characteristics.analysis_purpose {
507        AnalysisPurpose::QualityAudit => {
508            recommendations.push(PurposeRecommendation {
509                purpose: AnalysisPurpose::QualityAudit,
510                recommended_laws: vec!["benf".to_string(), "normal".to_string()],
511                rationale: "品質監査にはベンフォード法則での自然性チェックと正規分布での統計的品質評価が最適".to_string(),
512                effectiveness: 0.9,
513                implementation_priority: Priority::High,
514            });
515        }
516        AnalysisPurpose::ConcentrationAnalysis => {
517            recommendations.push(PurposeRecommendation {
518                purpose: AnalysisPurpose::ConcentrationAnalysis,
519                recommended_laws: vec!["pareto".to_string(), "zipf".to_string()],
520                rationale: "集中度分析にはパレート法則での80/20分析とZipf法則での順位分布が有効"
521                    .to_string(),
522                effectiveness: 0.85,
523                implementation_priority: Priority::High,
524            });
525        }
526        AnalysisPurpose::AnomalyDetection => {
527            recommendations.push(PurposeRecommendation {
528                purpose: AnalysisPurpose::AnomalyDetection,
529                recommended_laws: vec!["normal".to_string(), "poisson".to_string()],
530                rationale:
531                    "異常検知には正規分布での外れ値検出とポアソン分布での稀少事象検出が適用可能"
532                        .to_string(),
533                effectiveness: 0.8,
534                implementation_priority: Priority::Medium,
535            });
536        }
537        _ => {
538            recommendations.push(PurposeRecommendation {
539                purpose: AnalysisPurpose::GeneralAnalysis,
540                recommended_laws: result.laws_executed.clone(),
541                rationale: "総合分析では全法則を活用して多角的な評価を実施".to_string(),
542                effectiveness: 0.7,
543                implementation_priority: Priority::Medium,
544            });
545        }
546    }
547
548    recommendations
549}
550
551fn analyze_law_combinations(result: &IntegrationResult) -> Vec<CombinationAnalysis> {
552    let mut combinations = Vec::new();
553
554    let laws: Vec<String> = result.law_scores.keys().cloned().collect();
555
556    // 2法則組み合わせ分析
557    for i in 0..laws.len() {
558        for j in i + 1..laws.len() {
559            let law_a = &laws[i];
560            let law_b = &laws[j];
561
562            let synergy_score = calculate_synergy_score(law_a, law_b, result);
563            let complementarity = assess_complementarity(law_a, law_b);
564
565            combinations.push(CombinationAnalysis {
566                laws: vec![law_a.clone(), law_b.clone()],
567                synergy_score,
568                complementarity,
569                use_cases: generate_combination_use_cases(law_a, law_b),
570                effectiveness_rating: rate_combination_effectiveness(
571                    synergy_score,
572                    complementarity,
573                ),
574            });
575        }
576    }
577
578    combinations.sort_by(|a, b| b.synergy_score.partial_cmp(&a.synergy_score).unwrap());
579    combinations
580}
581
582fn calculate_effectiveness_scores(
583    result: &IntegrationResult,
584) -> HashMap<String, EffectivenessScore> {
585    let mut scores = HashMap::new();
586
587    for (law, &base_score) in &result.law_scores {
588        let data_compatibility =
589            calculate_data_compatibility_score(law, &result.data_characteristics);
590        let purpose_alignment =
591            calculate_purpose_alignment_score(law, &result.data_characteristics.analysis_purpose);
592        let reliability = calculate_reliability_score(law, result);
593
594        let overall_effectiveness =
595            (base_score + data_compatibility + purpose_alignment + reliability) / 4.0;
596
597        scores.insert(
598            law.clone(),
599            EffectivenessScore {
600                base_score,
601                data_compatibility,
602                purpose_alignment,
603                reliability,
604                overall_effectiveness,
605            },
606        );
607    }
608
609    scores
610}
611
612fn generate_implementation_guidance(result: &IntegrationResult) -> ImplementationGuidance {
613    let primary_law = &result.recommendations.primary_law;
614    let setup_steps = generate_setup_steps(primary_law);
615    let validation_criteria = generate_validation_criteria(primary_law);
616    let monitoring_recommendations = generate_monitoring_recommendations(result);
617
618    ImplementationGuidance {
619        primary_law: primary_law.clone(),
620        setup_steps,
621        validation_criteria,
622        monitoring_recommendations,
623        estimated_effort: estimate_implementation_effort(result),
624        success_indicators: generate_success_indicators(result),
625    }
626}
627
628// 以下、詳細なヘルパー関数の実装は省略...
629// (実際の実装では、上記の各関数の詳細な実装が必要)
630
631// プレースホルダー実装
632fn calculate_statistical_significance(_conflict: &Conflict, _result: &IntegrationResult) -> f64 {
633    0.5
634}
635fn assess_conflict_impact(_conflict: &Conflict, _result: &IntegrationResult) -> ImpactLevel {
636    ImpactLevel::Medium
637}
638fn perform_root_cause_analysis(_conflict: &Conflict, _result: &IntegrationResult) -> String {
639    "Under analysis".to_string()
640}
641fn calculate_conflict_confidence_interval(
642    _conflict: &Conflict,
643    _result: &IntegrationResult,
644) -> (f64, f64) {
645    (0.0, 1.0)
646}
647fn calculate_pattern_severity(
648    _conflicts: &[DetailedConflict],
649    _conflict_type: &ConflictType,
650) -> f64 {
651    0.5
652}
653fn describe_conflict_pattern(_conflict_type: &ConflictType) -> String {
654    "Pattern analysis in progress".to_string()
655}
656fn calculate_synergy_score(_law_a: &str, _law_b: &str, _result: &IntegrationResult) -> f64 {
657    0.5
658}
659fn assess_complementarity(_law_a: &str, _law_b: &str) -> f64 {
660    0.5
661}
662fn generate_combination_use_cases(_law_a: &str, _law_b: &str) -> Vec<String> {
663    vec!["一般分析".to_string()]
664}
665fn rate_combination_effectiveness(_synergy: f64, _complementarity: f64) -> f64 {
666    0.5
667}
668fn calculate_data_compatibility_score(_law: &str, _characteristics: &DataCharacteristics) -> f64 {
669    0.5
670}
671fn calculate_purpose_alignment_score(_law: &str, _purpose: &AnalysisPurpose) -> f64 {
672    0.5
673}
674fn calculate_reliability_score(_law: &str, _result: &IntegrationResult) -> f64 {
675    0.5
676}
677fn generate_setup_steps(_law: &str) -> Vec<String> {
678    vec!["セットアップ中".to_string()]
679}
680fn generate_validation_criteria(_law: &str) -> Vec<String> {
681    vec!["検証基準設定中".to_string()]
682}
683fn generate_monitoring_recommendations(_result: &IntegrationResult) -> Vec<String> {
684    vec!["監視設定中".to_string()]
685}
686fn estimate_implementation_effort(_result: &IntegrationResult) -> String {
687    "中程度".to_string()
688}
689fn generate_success_indicators(_result: &IntegrationResult) -> Vec<String> {
690    vec!["成功指標設定中".to_string()]
691}
692
693// 追加のデータ構造
694#[derive(Debug, Clone)]
695pub struct CrossValidationResult {
696    pub dataset_name: String,
697    pub confidence_level: f64,
698    pub validation_folds: Vec<ValidationFold>,
699    pub overall_stability: f64,
700    pub stability_assessment: StabilityAssessment,
701}
702
703#[derive(Debug, Clone)]
704pub struct ValidationFold {
705    pub fold_number: usize,
706    pub train_result: IntegrationResult,
707    pub test_result: IntegrationResult,
708    pub consistency_score: f64,
709}
710
711#[derive(Debug, Clone, PartialEq)]
712pub enum StabilityAssessment {
713    VeryStable,
714    Stable,
715    ModeratelyStable,
716    Unstable,
717    VeryUnstable,
718}
719
720#[derive(Debug, Clone)]
721pub struct ConflictAnalysisResult {
722    pub dataset_name: String,
723    pub threshold: f64,
724    pub integration_result: IntegrationResult,
725    pub detailed_conflicts: Vec<DetailedConflict>,
726    pub conflict_patterns: Vec<ConflictPattern>,
727    pub resolution_strategies: Vec<ResolutionStrategy>,
728    pub conflict_severity: ConflictSeverity,
729}
730
731#[derive(Debug, Clone)]
732pub struct DetailedConflict {
733    pub base_conflict: Conflict,
734    pub statistical_significance: f64,
735    pub impact_assessment: ImpactLevel,
736    pub root_cause_analysis: String,
737    pub confidence_interval: (f64, f64),
738}
739
740#[derive(Debug, Clone)]
741pub struct ConflictPattern {
742    pub pattern_type: ConflictType,
743    pub frequency: usize,
744    pub severity: f64,
745    pub description: String,
746}
747
748#[derive(Debug, Clone)]
749pub struct ResolutionStrategy {
750    pub strategy_name: String,
751    pub priority: Priority,
752    pub steps: Vec<String>,
753    pub expected_outcome: String,
754    pub confidence: f64,
755}
756
757#[derive(Debug, Clone, PartialEq)]
758pub enum Priority {
759    High,
760    Medium,
761    Low,
762}
763
764#[derive(Debug, Clone, PartialEq)]
765pub enum ImpactLevel {
766    High,
767    Medium,
768    Low,
769}
770
771#[derive(Debug, Clone, PartialEq)]
772pub enum ConflictSeverity {
773    Critical,
774    High,
775    Medium,
776    Low,
777    None,
778}
779
780#[derive(Debug, Clone)]
781pub struct DetailedRecommendationResult {
782    pub dataset_name: String,
783    pub analysis_purpose: AnalysisPurpose,
784    pub integration_result: IntegrationResult,
785    pub purpose_specific_recommendations: Vec<PurposeRecommendation>,
786    pub combination_analysis: Vec<CombinationAnalysis>,
787    pub effectiveness_scores: HashMap<String, EffectivenessScore>,
788    pub implementation_guidance: ImplementationGuidance,
789}
790
791#[derive(Debug, Clone)]
792pub struct PurposeRecommendation {
793    pub purpose: AnalysisPurpose,
794    pub recommended_laws: Vec<String>,
795    pub rationale: String,
796    pub effectiveness: f64,
797    pub implementation_priority: Priority,
798}
799
800#[derive(Debug, Clone)]
801pub struct CombinationAnalysis {
802    pub laws: Vec<String>,
803    pub synergy_score: f64,
804    pub complementarity: f64,
805    pub use_cases: Vec<String>,
806    pub effectiveness_rating: f64,
807}
808
809#[derive(Debug, Clone)]
810pub struct EffectivenessScore {
811    pub base_score: f64,
812    pub data_compatibility: f64,
813    pub purpose_alignment: f64,
814    pub reliability: f64,
815    pub overall_effectiveness: f64,
816}
817
818#[derive(Debug, Clone)]
819pub struct ImplementationGuidance {
820    pub primary_law: String,
821    pub setup_steps: Vec<String>,
822    pub validation_criteria: Vec<String>,
823    pub monitoring_recommendations: Vec<String>,
824    pub estimated_effort: String,
825    pub success_indicators: Vec<String>,
826}
827
828use std::collections::HashMap;