scirs2_metrics/lib.rs
1#![allow(deprecated)]
2#![allow(clippy::uninlined_format_args)]
3#![allow(clippy::new_without_default)]
4#![allow(clippy::useless_vec)]
5#![allow(clippy::too_many_arguments)]
6#![allow(dead_code)]
7#![allow(clippy::multiple_bound_locations)]
8#![allow(clippy::type_complexity)]
9#![allow(clippy::manual_clamp)]
10#![allow(clippy::should_implement_trait)]
11#![allow(clippy::field_reassign_with_default)]
12#![allow(clippy::unnecessary_unwrap)]
13#![allow(clippy::unnecessary_sort_by)]
14#![allow(clippy::collapsible_match)]
15//! # SciRS2 Metrics - Machine Learning Evaluation Metrics
16//!
17//! **scirs2-metrics** provides comprehensive evaluation metrics for machine learning models,
18//! offering classification, regression, clustering, and ranking metrics compatible with
19//! scikit-learn, with parallel processing and optimized implementations.
20//!
21//! ## 🎯 Key Features
22//!
23//! - **Classification Metrics**: Accuracy, precision, recall, F1, ROC-AUC, confusion matrix
24//! - **Regression Metrics**: MSE, RMSE, MAE, R², MAPE, explained variance
25//! - **Clustering Metrics**: Silhouette score, Davies-Bouldin, Calinski-Harabasz, adjusted Rand
26//! - **Ranking Metrics**: NDCG, MAP, MRR, precision@k, recall@k
27//! - **Cross-Validation**: K-fold, stratified, time series CV utilities
28//! - **Multi-label Support**: Hamming loss, Jaccard score, label ranking
29//!
30//! ## 📦 Module Overview
31//!
32//! | SciRS2 Module | scikit-learn Equivalent | Description |
33//! |---------------|-------------------------|-------------|
34//! | `classification` | `sklearn.metrics.accuracy_score` | Classification evaluation metrics |
35//! | `regression` | `sklearn.metrics.mean_squared_error` | Regression evaluation metrics |
36//! | `clustering` | `sklearn.metrics.silhouette_score` | Clustering evaluation metrics |
37//! | `ranking` | - | Ranking and recommendation metrics |
38//!
39//! ## 🚀 Quick Start
40//!
41//! ```toml
42//! [dependencies]
43//! scirs2-metrics = "0.1.0-rc.2"
44//! ```
45//!
46//! ```rust,no_run
47//! use scirs2_core::ndarray::array;
48//! use scirs2_metrics::classification::accuracy_score;
49//! use scirs2_metrics::regression::mean_squared_error;
50//!
51//! // Classification accuracy
52//! let y_true = array![0, 1, 2, 0, 1, 2];
53//! let y_pred = array![0, 2, 1, 0, 0, 2];
54//! let acc = accuracy_score(&y_true, &y_pred).unwrap();
55//!
56//! // Regression MSE
57//! let y_true_reg = array![3.0, -0.5, 2.0, 7.0];
58//! let y_pred_reg = array![2.5, 0.0, 2.0, 8.0];
59//! let mse = mean_squared_error(&y_true_reg, &y_pred_reg).unwrap();
60//! ```
61//!
62//! ## 🔒 Version: 0.1.0-rc.2 (October 03, 2025)
63//!
64//! # Classification Metrics
65//!
66//! Classification metrics evaluate the performance of classification models:
67//!
68//! ```ignore
69//! use scirs2_core::ndarray::array;
70//! use scirs2_metrics::classification::{accuracy_score, precision_score, f1_score};
71//!
72//! let y_true = array![0, 1, 2, 0, 1, 2];
73//! let y_pred = array![0, 2, 1, 0, 0, 2];
74//!
75//! let accuracy = accuracy_score(&y_true, &y_pred).unwrap();
76//! let precision = precision_score(&y_true, &y_pred, 1).unwrap();
77//! let f1 = f1_score(&y_true, &y_pred, 1).unwrap();
78//! ```
79//!
80//!
81//! ## One-vs-One Classification Metrics
82//!
83//! One-vs-One metrics are useful for evaluating multi-class classification problems by
84//! considering each pair of classes separately.
85//!
86//! # Regression Metrics
87//!
88//! Regression metrics evaluate the performance of regression models:
89//!
90//! ```ignore
91//! use scirs2_core::ndarray::array;
92//! use scirs2_metrics::regression::{mean_squared_error, r2_score};
93//!
94//! let y_true = array![3.0, -0.5, 2.0, 7.0];
95//! let y_pred = array![2.5, 0.0, 2.0, 8.0];
96//!
97//! let mse: f64 = mean_squared_error(&y_true, &y_pred).unwrap();
98//! let r2: f64 = r2_score(&y_true, &y_pred).unwrap();
99//! ```
100//!
101//! # Clustering Metrics
102//!
103//! Clustering metrics evaluate the performance of clustering algorithms:
104//!
105//! ```ignore
106//! use scirs2_core::ndarray::{array, Array2};
107//! use scirs2_metrics::clustering::silhouette_score;
108//!
109//! // Create a small dataset with 2 clusters
110//! let X = Array2::from_shape_vec((6, 2), vec![
111//! 1.0, 2.0,
112//! 1.5, 1.8,
113//! 1.2, 2.2,
114//! 5.0, 6.0,
115//! 5.2, 5.8,
116//! 5.5, 6.2,
117//! ]).unwrap();
118//!
119//! let labels = array![0, 0, 0, 1, 1, 1];
120//!
121//! let score = silhouette_score(&X, &labels, "euclidean").unwrap();
122//! ```
123//!
124//! # Ranking Metrics
125//!
126//! Ranking metrics evaluate the performance of ranking and recommendation models:
127//!
128//! ```ignore
129//! use scirs2_core::ndarray::array;
130//! use scirs2_metrics::ranking::{
131//! mean_reciprocal_rank, ndcg_score, mean_average_precision,
132//! precision_at_k, recall_at_k, map_at_k, click_through_rate
133//! };
134//! use scirs2_metrics::ranking::label::{
135//! coverage_error, label_ranking_loss, label_ranking_average_precision_score
136//! };
137//!
138//! // Example: search engine results where each array is a different query
139//! // Values indicate whether a result is relevant (1.0) or not (0.0)
140//! let y_true = vec![
141//! array![0.0, 1.0, 0.0, 0.0, 0.0], // First query: second result is relevant
142//! array![0.0, 0.0, 0.0, 1.0, 0.0], // Second query: fourth result is relevant
143//! ];
144//! let y_score = vec![
145//! array![0.1, 0.9, 0.2, 0.3, 0.4], // Scores for first query
146//! array![0.5, 0.6, 0.7, 0.9, 0.8], // Scores for second query
147//! ];
148//!
149//! // Basic ranking metrics
150//! let mrr = mean_reciprocal_rank(&y_true, &y_score).unwrap();
151//! let ndcg = ndcg_score(&y_true, &y_score, Some(5)).unwrap();
152//! let map = mean_average_precision(&y_true, &y_score, None).unwrap();
153//! let precision = precision_at_k(&y_true, &y_score, 3).unwrap();
154//! let recall = recall_at_k(&y_true, &y_score, 3).unwrap();
155//!
156//! // Advanced metrics
157//! let map_k = map_at_k(&y_true, &y_score, 3).unwrap();
158//! let ctr = click_through_rate(&y_true, &y_score, 3).unwrap();
159//! ```
160//!
161//! ## Rank Correlation Metrics
162//!
163//! For evaluating correlation between rankings:
164//!
165//! ```ignore
166//! use scirs2_core::ndarray::array;
167//! use scirs2_metrics::ranking::{kendalls_tau, spearmans_rho};
168//!
169//! // Compare two different ranking methods
170//! let ranking_a = array![1.0, 2.0, 3.0, 4.0, 5.0];
171//! let ranking_b = array![1.5, 2.5, 3.0, 3.5, 5.0];
172//!
173//! // Measure rank correlation
174//! let tau = kendalls_tau(&ranking_a, &ranking_b).unwrap();
175//! let rho = spearmans_rho(&ranking_a, &ranking_b).unwrap();
176//! ```
177//!
178//! ## Label Ranking Metrics
179//!
180//! For multi-label ranking problems:
181//!
182//! ```ignore
183//! use scirs2_core::ndarray::Array2;
184//! use scirs2_metrics::ranking::label::{
185//! coverage_error_multiple, label_ranking_loss, label_ranking_average_precision_score
186//! };
187//!
188//! // Multi-label data: 3 samples, 5 labels
189//! let y_true = Array2::from_shape_vec((3, 5), vec![
190//! 1.0, 0.0, 1.0, 0.0, 0.0, // Sample 1: labels 0 and 2 are relevant
191//! 0.0, 0.0, 1.0, 1.0, 0.0, // Sample 2: labels 2 and 3 are relevant
192//! 0.0, 1.0, 1.0, 0.0, 1.0, // Sample 3: labels 1, 2, and 4 are relevant
193//! ]).unwrap();
194//!
195//! // Predicted scores for each label
196//! let y_score = Array2::from_shape_vec((3, 5), vec![
197//! 0.9, 0.2, 0.8, 0.3, 0.1, // Scores for sample 1
198//! 0.2, 0.3, 0.9, 0.7, 0.1, // Scores for sample 2
199//! 0.1, 0.9, 0.8, 0.2, 0.7, // Scores for sample 3
200//! ]).unwrap();
201//!
202//! // Coverage error measures how far we need to go down the list to cover all true labels
203//! let coverage = coverage_error_multiple(&y_true, &y_score).unwrap();
204//!
205//! // Label ranking loss counts incorrectly ordered label pairs
206//! let loss = label_ranking_loss(&y_true, &y_score).unwrap();
207//!
208//! // Label ranking average precision measures precision at each relevant position
209//! let precision = label_ranking_average_precision_score(&y_true, &y_score).unwrap();
210//! ```
211//!
212//! # Anomaly Detection Metrics
213//!
214//! Metrics for evaluating anomaly detection systems:
215//!
216//! ```ignore
217//! use scirs2_core::ndarray::array;
218//! use scirs2_metrics::anomaly::{
219//! detection_accuracy, false_alarm_rate, miss_detection_rate,
220//! anomaly_auc_score, anomaly_average_precision_score
221//! };
222//!
223//! // Ground truth (1 for anomalies, 0 for normal points)
224//! let y_true = array![0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0];
225//!
226//! // Predicted labels
227//! let y_pred = array![0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0];
228//!
229//! // Anomaly scores (higher means more anomalous)
230//! let y_score = array![0.1, 0.2, 0.9, 0.7, 0.8, 0.3, 0.6, 0.95, 0.2, 0.1];
231//!
232//! // Detection accuracy
233//! let accuracy = detection_accuracy(&y_true, &y_pred).unwrap();
234//!
235//! // False alarm rate (Type I error)
236//! let far = false_alarm_rate(&y_true, &y_pred).unwrap();
237//!
238//! // Miss detection rate (Type II error)
239//! let mdr = miss_detection_rate(&y_true, &y_pred).unwrap();
240//!
241//! // AUC for anomaly detection
242//! let auc = anomaly_auc_score(&y_true, &y_score).unwrap();
243//!
244//! // Average precision score
245//! let ap = anomaly_average_precision_score(&y_true, &y_score).unwrap();
246//! ```
247//!
248//! ## Distribution Metrics
249//!
250//! ```ignore
251//! use scirs2_core::ndarray::array;
252//! use scirs2_metrics::anomaly::{
253//! kl_divergence, js_divergence, wasserstein_distance, maximum_mean_discrepancy
254//! };
255//!
256//! // Two probability distributions
257//! let p = array![0.2, 0.5, 0.3];
258//! let q = array![0.3, 0.4, 0.3];
259//!
260//! // Compute KL divergence
261//! let kl = kl_divergence(&p, &q).unwrap();
262//!
263//! // Jensen-Shannon divergence
264//! let js = js_divergence(&p, &q).unwrap();
265//!
266//! // Wasserstein distance (1D)
267//! let samples_p = array![1.0, 2.0, 3.0, 4.0, 5.0];
268//! let samples_q = array![1.5, 2.5, 3.5, 4.5, 5.5];
269//! let w_dist = wasserstein_distance(&samples_p, &samples_q).unwrap();
270//!
271//! // Maximum Mean Discrepancy (MMD)
272//! let x = array![1.0, 2.0, 3.0, 4.0, 5.0];
273//! let y = array![1.2, 2.1, 3.0, 4.1, 5.2];
274//! let mmd = maximum_mean_discrepancy(&x, &y, None).unwrap();
275//! ```
276//!
277//! # Fairness and Bias Metrics
278//!
279//! Metrics for evaluating fairness and bias in machine learning models:
280//!
281//! ```ignore
282//! use scirs2_core::ndarray::{array, Array2};
283//! use scirs2_metrics::fairness::{
284//! demographic_parity_difference, equalized_odds_difference, equal_opportunity_difference,
285//! disparate_impact, consistency_score
286//! };
287//! use scirs2_metrics::fairness::bias_detection::{
288//! slice_analysis, subgroup_performance, intersectional_fairness
289//! };
290//! use scirs2_metrics::classification::accuracy_score;
291//!
292//! // Example: binary predictions for two protected groups
293//! // y_true: ground truth labels (0 or 1)
294//! let y_true = array![0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0];
295//! // y_pred: predicted labels (0 or 1)
296//! let y_pred = array![0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0];
297//! // protected_group: binary array indicating protected group membership (1 for protected group, 0 otherwise)
298//! let protected_group = array![1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0];
299//!
300//! // Compute demographic parity difference
301//! // A value of 0 indicates perfect demographic parity
302//! let dp_diff = demographic_parity_difference(&y_pred, &protected_group).unwrap();
303//!
304//! // Compute equalized odds difference
305//! // A value of 0 indicates that the false positive and true positive rates are
306//! // the same for both groups
307//! let eod_diff = equalized_odds_difference(&y_true, &y_pred, &protected_group).unwrap();
308//!
309//! // Compute equal opportunity difference
310//! // A value of 0 indicates equal true positive rates across groups
311//! let eo_diff = equal_opportunity_difference(&y_true, &y_pred, &protected_group).unwrap();
312//!
313//! // Calculate disparate impact
314//! // A value of 1.0 indicates perfect fairness; less than 0.8 or greater than 1.25
315//! // is often considered problematic
316//! let di = disparate_impact(&y_pred, &protected_group).unwrap();
317//!
318//! // Comprehensive bias detection
319//! // Create a dataset with multiple demographic attributes
320//! let features = Array2::from_shape_vec((8, 3), vec![
321//! // Feature columns: age, gender(0=male, 1=female), income_level(0=low, 1=medium, 2=high)
322//! 30.0, 0.0, 1.0,
323//! 25.0, 0.0, 0.0,
324//! 35.0, 1.0, 2.0,
325//! 28.0, 1.0, 1.0,
326//! 45.0, 0.0, 2.0,
327//! 42.0, 0.0, 1.0,
328//! 33.0, 1.0, 0.0,
329//! 50.0, 1.0, 2.0,
330//! ]).unwrap();
331//!
332//! let ground_truth = array![0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0];
333//! let predictions = array![0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0];
334//!
335//! // Analyze model performance across different data slices
336//! let slice_results = slice_analysis(
337//! &features,
338//! &[1, 2], // Use gender and income level columns for slicing
339//! &ground_truth,
340//! &predictions,
341//! |y_t, y_p| {
342//! // Convert Vec<f64> to Array1<f64> for accuracy_score
343//! let y_t_array = scirs2_core::ndarray::Array::from_vec(y_t.to_vec());
344//! let y_p_array = scirs2_core::ndarray::Array::from_vec(y_p.to_vec());
345//! accuracy_score(&y_t_array, &y_p_array).unwrap_or(0.0)
346//! }
347//! ).unwrap();
348//!
349//! // Analyze performance for intersectional groups
350//! let protected_attrs = Array2::from_shape_vec((8, 2), vec![
351//! // gender, income_level (simplified to binary: 0=low, 1=high)
352//! 0.0, 1.0,
353//! 0.0, 0.0,
354//! 1.0, 1.0,
355//! 1.0, 1.0,
356//! 0.0, 1.0,
357//! 0.0, 1.0,
358//! 1.0, 0.0,
359//! 1.0, 1.0,
360//! ]).unwrap();
361//!
362//! let attr_names = vec!["gender".to_string(), "income".to_string()];
363//!
364//! // Analyze fairness metrics across intersectional groups
365//! let fairness_metrics = intersectional_fairness(
366//! &ground_truth,
367//! &predictions,
368//! &protected_attrs,
369//! &attr_names
370//! ).unwrap();
371//!
372//! // Evaluate model performance across different demographic subgroups
373//! let performance_metrics = subgroup_performance(
374//! &ground_truth,
375//! &predictions,
376//! &protected_attrs,
377//! &attr_names,
378//! |y_t, y_p| {
379//! // Convert Vec<f64> to Array1<f64> for accuracy_score
380//! let y_t_array = scirs2_core::ndarray::Array::from_vec(y_t.to_vec());
381//! let y_p_array = scirs2_core::ndarray::Array::from_vec(y_p.to_vec());
382//! accuracy_score(&y_t_array, &y_p_array).unwrap_or(0.0)
383//! }
384//! ).unwrap();
385//! ```
386//!
387//! # Model Evaluation Utilities
388//!
389//! Utilities for model evaluation like cross-validation:
390//!
391//! ```ignore
392//! use scirs2_core::ndarray::{Array, Ix1};
393//! use scirs2_metrics::evaluation::train_test_split;
394//!
395//! let x = Array::<f64, Ix1>::linspace(0., 9., 10).into_shape(Ix1(10)).unwrap();
396//! let y = &x * 2.;
397//!
398//! let (train_arrays, test_arrays) = train_test_split(&[&x, &y], 0.3, Some(42)).unwrap();
399//! ```
400//!
401//! # Optimization and Performance
402//!
403//! Optimized metrics computation for better performance and memory efficiency:
404//!
405//! ```ignore
406//! use scirs2_core::ndarray::array;
407//! use scirs2_metrics::optimization::parallel::ParallelConfig;
408//! use scirs2_metrics::optimization::memory::{ChunkedMetrics, StreamingMetric};
409//! use scirs2_metrics::optimization::numeric::StableMetrics;
410//! use scirs2_metrics::error::{Result, MetricsError};
411//! use scirs2_metrics::classification::{accuracy_score, precision_score};
412//!
413//! // Example data
414//! let y_true = array![0, 1, 2, 0, 1, 2];
415//! let y_pred = array![0, 2, 1, 0, 0, 2];
416//!
417//! // Compute metrics with parallel configuration
418//! let config = ParallelConfig {
419//! parallel_enabled: true,
420//! min_chunk_size: 1000,
421//! num_threads: None,
422//! };
423//!
424//! // Define metrics functions to compute
425//! // Note: We need to specify concrete types for these closures
426//! let metrics: Vec<Box<dyn Fn(&scirs2_core::ndarray::ArrayBase<scirs2_core::ndarray::OwnedRepr<i32>, scirs2_core::ndarray::Dim<[usize; 1]>>,
427//! &scirs2_core::ndarray::ArrayBase<scirs2_core::ndarray::OwnedRepr<i32>, scirs2_core::ndarray::Dim<[usize; 1]>>)
428//! -> Result<f64> + Send + Sync>> = vec![
429//! Box::new(|y_t, y_p| accuracy_score(y_t, y_p)),
430//! Box::new(|y_t, y_p| precision_score(y_t, y_p, 1)),
431//! ];
432//!
433//! // Use chunked metrics for memory efficiency
434//! let chunked = ChunkedMetrics::new()
435//! .with_chunk_size(1000)
436//! .with_parallel_config(config.clone());
437//!
438//! // Example of a streaming metric for incremental computation
439//! struct StreamingMeanAbsoluteError;
440//!
441//! impl StreamingMetric<f64> for StreamingMeanAbsoluteError {
442//! type State = (f64, usize); // Running sum and count
443//!
444//! fn init_state(&self) -> Self::State {
445//! (0.0, 0)
446//! }
447//!
448//! fn update_state(&self, state: &mut Self::State, batch_true: &[f64], batch_pred: &[f64]) -> Result<()> {
449//! for (y_t, y_p) in batch_true.iter().zip(batch_pred.iter()) {
450//! state.0 += (y_t - y_p).abs();
451//! state.1 += 1;
452//! }
453//! Ok(())
454//! }
455//!
456//! fn finalize(&self, state: &Self::State) -> Result<f64> {
457//! if state.1 == 0 {
458//! return Err(MetricsError::DivisionByZero);
459//! }
460//! Ok(state.0 / state.1 as f64)
461//! }
462//! }
463//!
464//! // Numerically stable computations
465//! let stable = StableMetrics::<f64>::default();
466//! let p = vec![0.5, 0.5, 0.0];
467//! let q = vec![0.25, 0.25, 0.5];
468//! let kl = stable.kl_divergence(&p, &q).unwrap();
469//! let js = stable.js_divergence(&p, &q).unwrap();
470//!
471//! // Compute additional stable metrics
472//! let data = vec![1.0, 2.0, 3.0, 4.0, 5.0];
473//! let mean = stable.stable_mean(&data).unwrap();
474//! let variance = stable.stable_variance(&data, 1).unwrap(); // Sample variance
475//! let std_dev = stable.stable_std(&data, 1).unwrap(); // Sample standard deviation
476//! ```
477//!
478//! # Visualization
479//!
480//! Visualization utilities for metrics results:
481//!
482//! ```ignore
483//! use scirs2_core::ndarray::{array, Array2};
484//! use scirs2_metrics::classification::confusion_matrix;
485//! use scirs2_metrics::classification::curves::{roc_curve, precision_recall_curve, calibration_curve};
486//! use scirs2_metrics::visualization::{
487//! MetricVisualizer, VisualizationData, VisualizationMetadata, PlotType,
488//! confusion_matrix::confusion_matrix_visualization,
489//! roc_curve::roc_curve_visualization,
490//! precision_recall::precision_recall_visualization,
491//! calibration::calibration_visualization,
492//! learning_curve::learning_curve_visualization,
493//! interactive::interactive_roc_curve_visualization
494//! };
495//!
496//! // Example: Confusion matrix visualization
497//! let y_true = array![0, 1, 2, 0, 1, 2];
498//! let y_pred = array![0, 2, 1, 0, 0, 2];
499//!
500//! let (cm, classes) = confusion_matrix(&y_true, &y_pred, None).unwrap();
501//! let labels = vec!["Class 0".to_string(), "Class 1".to_string(), "Class 2".to_string()];
502//!
503//! // Convert to f64 for visualization
504//! let cm_f64 = cm.mapv(|x| x as f64);
505//! let cm_viz = confusion_matrix_visualization(cm_f64, Some(labels), false);
506//!
507//! // Get data and metadata for visualization
508//! let viz_data = cm_viz.prepare_data().unwrap();
509//! let viz_metadata = cm_viz.get_metadata();
510//!
511//! // Example: ROC curve visualization
512//! let y_true_binary = array![0, 1, 1, 0, 1, 0];
513//! let y_score = array![0.1, 0.8, 0.7, 0.2, 0.9, 0.3];
514//!
515//! let (fpr, tpr, thresholds) = roc_curve(&y_true_binary, &y_score).unwrap();
516//! let auc = 0.83; // Example AUC value
517//!
518//! let roc_viz = roc_curve_visualization(fpr.to_vec(), tpr.to_vec(), Some(thresholds.to_vec()), Some(auc));
519//!
520//! // Example: Interactive ROC curve visualization with threshold adjustment
521//! let interactive_roc_viz = interactive_roc_curve_visualization(
522//! fpr.to_vec(), tpr.to_vec(), Some(thresholds.to_vec()), Some(auc));
523//!
524//! // Example: Precision-Recall curve visualization
525//! let (precision, recall, pr_thresholds) = precision_recall_curve(&y_true_binary, &y_score).unwrap();
526//! let ap = 0.75; // Example average precision
527//!
528//! let pr_viz = precision_recall_visualization(precision.to_vec(), recall.to_vec(), Some(pr_thresholds.to_vec()), Some(ap));
529//!
530//! // Example: Calibration curve visualization
531//! let (prob_true, prob_pred, counts) = calibration_curve(&y_true_binary, &y_score, Some(5)).unwrap();
532//!
533//! let cal_viz = calibration_visualization(prob_true.to_vec(), prob_pred.to_vec(), 5, "uniform".to_string());
534//!
535//! // Example: Learning curve visualization
536//! let train_sizes = vec![10, 30, 50, 100, 200];
537//! let train_scores = vec![
538//! vec![0.6, 0.62, 0.64], // 10 samples
539//! vec![0.7, 0.72, 0.74], // 30 samples
540//! vec![0.75, 0.77, 0.79], // 50 samples
541//! vec![0.8, 0.82, 0.84], // 100 samples
542//! vec![0.85, 0.87, 0.89], // 200 samples
543//! ];
544//! let val_scores = vec![
545//! vec![0.5, 0.52, 0.54], // 10 samples
546//! vec![0.6, 0.62, 0.64], // 30 samples
547//! vec![0.65, 0.67, 0.69], // 50 samples
548//! vec![0.7, 0.72, 0.74], // 100 samples
549//! vec![0.75, 0.77, 0.79], // 200 samples
550//! ];
551//!
552//! let lc_viz = learning_curve_visualization(train_sizes, train_scores, val_scores, "Accuracy").unwrap();
553//! ```
554//!
555//! ## Interactive Visualizations
556//!
557//! The library also provides interactive visualizations that allow for dynamic exploration
558//! of metrics via web interfaces:
559//!
560//! ```ignore
561//! use scirs2_core::ndarray::array;
562//! use scirs2_metrics::classification::curves::roc_curve;
563//! use scirs2_metrics::visualization::{
564//! helpers, InteractiveOptions,
565//! backends::{default_interactive_backend, PlotlyInteractiveBackendInterface},
566//! };
567//!
568//! // Create binary classification data
569//! let y_true = array![0, 0, 0, 0, 1, 1, 1, 1];
570//! let y_score = array![0.1, 0.2, 0.4, 0.6, 0.5, 0.7, 0.8, 0.9];
571//!
572//! // Compute ROC curve
573//! let (fpr, tpr, thresholds) = roc_curve(&y_true, &y_score).unwrap();
574//!
575//! // Interactive ROC curve with threshold adjustment
576//! let interactive_options = InteractiveOptions {
577//! width: 900,
578//! height: 600,
579//! show_threshold_slider: true,
580//! show_metric_values: true,
581//! show_confusion_matrix: true,
582//! custom_layout: std::collections::HashMap::new(),
583//! };
584//!
585//! // Create interactive ROC curve visualization
586//! let viz = helpers::visualize_interactive_roc_curve(
587//! fpr.view(),
588//! tpr.view(),
589//! Some(thresholds.view()),
590//! Some(0.94), // AUC value
591//! Some(interactive_options),
592//! );
593//!
594//! // Note: In a real application, you would save this to an HTML file with:
595//! // let viz_data = viz.prepare_data().unwrap();
596//! // let viz_metadata = viz.get_metadata();
597//! // let backend = default_interactive_backend();
598//! // backend.save_interactive_roc(&viz_data, &viz_metadata, &Default::default(), "interactive_roc.html");
599//! ```
600//!
601//! # Metric Serialization
602//!
603//! Utilities for saving, loading, and comparing metric results:
604//!
605//! ```ignore
606//! use std::collections::HashMap;
607//! use scirs2_metrics::serialization::{
608//! MetricResult, MetricMetadata, MetricCollection, SerializationFormat,
609//! create_metric_result,
610//! comparison::compare_collections
611//! };
612//!
613//! // Create metric results
614//! let accuracy_metadata = MetricMetadata {
615//! dataset_id: Some("test_dataset".to_string()),
616//! model_id: Some("model_v1".to_string()),
617//! parameters: Some({
618//! let mut params = HashMap::new();
619//! params.insert("normalize".to_string(), "true".to_string());
620//! params
621//! }),
622//! additional_metadata: None,
623//! };
624//!
625//! let accuracy = create_metric_result(
626//! "accuracy",
627//! 0.85,
628//! None,
629//! Some(accuracy_metadata),
630//! );
631//!
632//! let f1_score = create_metric_result(
633//! "f1_score",
634//! 0.82,
635//! Some({
636//! let mut values = HashMap::new();
637//! values.insert("precision".to_string(), 0.80);
638//! values.insert("recall".to_string(), 0.84);
639//! values
640//! }),
641//! None,
642//! );
643//!
644//! // Create a metric collection
645//! let mut collection1 = MetricCollection::new(
646//! "Model Evaluation - v1",
647//! Some("Evaluation results for model version 1"),
648//! );
649//!
650//! collection1.add_metric(accuracy);
651//! collection1.add_metric(f1_score);
652//!
653//! // Create another collection for comparison
654//! let mut collection2 = MetricCollection::new(
655//! "Model Evaluation - v2",
656//! Some("Evaluation results for model version 2"),
657//! );
658//!
659//! let accuracy_v2 = create_metric_result("accuracy", 0.87, None, None);
660//! let f1_score_v2 = create_metric_result("f1_score", 0.84, None, None);
661//!
662//! collection2.add_metric(accuracy_v2);
663//! collection2.add_metric(f1_score_v2);
664//!
665//! // Compare collections
666//! let comparison = compare_collections(&collection1, &collection2, Some(0.01));
667//!
668//! // Save collection to a file (in-memory example)
669//! // collection1.save("metrics_v1.json", SerializationFormat::Json).unwrap();
670//! // collection2.save("metrics_v2.json", SerializationFormat::Json).unwrap();
671//!
672//! // Load collection from a file (in-memory example)
673//! // let loaded = MetricCollection::load("metrics_v1.json", SerializationFormat::Json).unwrap();
674//! ```
675
676#![allow(
677 unused_imports,
678 unexpected_cfgs,
679 clippy::clone_on_copy,
680 clippy::needless_range_loop,
681 clippy::map_entry,
682 clippy::len_zero
683)]
684//#![warn(missing_docs)]
685
686pub mod anomaly;
687pub mod bayesian;
688pub mod classification;
689pub mod clustering;
690pub mod custom;
691pub mod dashboard;
692pub mod domains;
693pub mod error;
694pub mod evaluation;
695pub mod explainability;
696pub mod fairness;
697
698// Integration modules with conditional compilation
699#[cfg(any(feature = "neural_common", feature = "optim_integration"))]
700pub mod integration;
701
702pub mod optimization;
703pub mod ranking;
704pub mod regression;
705pub mod selection;
706pub mod serialization;
707pub mod sklearn_compat;
708pub mod streaming;
709pub mod visualization;