use crate::error::{EvalError, EvalResult};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DriftDetectionEntry {
pub period: u32,
pub value: f64,
pub ground_truth_drift: Option<bool>,
pub drift_type: Option<String>,
pub drift_magnitude: Option<f64>,
pub detection_difficulty: Option<f64>,
}
impl DriftDetectionEntry {
pub fn new(period: u32, value: f64, ground_truth_drift: Option<bool>) -> Self {
Self {
period,
value,
ground_truth_drift,
drift_type: None,
drift_magnitude: None,
detection_difficulty: None,
}
}
pub fn with_metadata(
period: u32,
value: f64,
ground_truth_drift: bool,
drift_type: impl Into<String>,
drift_magnitude: f64,
detection_difficulty: f64,
) -> Self {
Self {
period,
value,
ground_truth_drift: Some(ground_truth_drift),
drift_type: Some(drift_type.into()),
drift_magnitude: Some(drift_magnitude),
detection_difficulty: Some(detection_difficulty),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LabeledDriftEvent {
pub event_id: String,
pub event_type: DriftEventCategory,
pub start_period: u32,
pub end_period: Option<u32>,
pub affected_fields: Vec<String>,
pub magnitude: f64,
pub detection_difficulty: DetectionDifficulty,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum DriftEventCategory {
MeanShift,
VarianceChange,
TrendChange,
SeasonalityChange,
ProportionShift,
NewCategory,
OrganizationalEvent,
RegulatoryChange,
TechnologyTransition,
EconomicCycle,
ProcessEvolution,
}
impl DriftEventCategory {
pub fn name(&self) -> &'static str {
match self {
Self::MeanShift => "Mean Shift",
Self::VarianceChange => "Variance Change",
Self::TrendChange => "Trend Change",
Self::SeasonalityChange => "Seasonality Change",
Self::ProportionShift => "Proportion Shift",
Self::NewCategory => "New Category",
Self::OrganizationalEvent => "Organizational Event",
Self::RegulatoryChange => "Regulatory Change",
Self::TechnologyTransition => "Technology Transition",
Self::EconomicCycle => "Economic Cycle",
Self::ProcessEvolution => "Process Evolution",
}
}
pub fn is_statistical(&self) -> bool {
matches!(
self,
Self::MeanShift | Self::VarianceChange | Self::TrendChange | Self::SeasonalityChange
)
}
pub fn is_business_event(&self) -> bool {
matches!(
self,
Self::OrganizationalEvent
| Self::RegulatoryChange
| Self::TechnologyTransition
| Self::ProcessEvolution
)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum DetectionDifficulty {
Easy,
Medium,
Hard,
}
impl DetectionDifficulty {
pub fn to_score(&self) -> f64 {
match self {
Self::Easy => 0.0,
Self::Medium => 0.5,
Self::Hard => 1.0,
}
}
pub fn from_score(score: f64) -> Self {
if score < 0.33 {
Self::Easy
} else if score < 0.67 {
Self::Medium
} else {
Self::Hard
}
}
}
#[derive(Debug, Clone)]
pub struct DriftDetectionAnalyzer {
significance_level: f64,
window_size: usize,
min_magnitude_threshold: f64,
use_hellinger: bool,
use_psi: bool,
}
impl DriftDetectionAnalyzer {
pub fn new(significance_level: f64) -> Self {
Self {
significance_level,
window_size: 10,
min_magnitude_threshold: 0.05,
use_hellinger: true,
use_psi: true,
}
}
pub fn with_window_size(mut self, size: usize) -> Self {
self.window_size = size;
self
}
pub fn with_min_magnitude(mut self, threshold: f64) -> Self {
self.min_magnitude_threshold = threshold;
self
}
pub fn with_hellinger(mut self, enabled: bool) -> Self {
self.use_hellinger = enabled;
self
}
pub fn with_psi(mut self, enabled: bool) -> Self {
self.use_psi = enabled;
self
}
pub fn analyze(&self, entries: &[DriftDetectionEntry]) -> EvalResult<DriftDetectionAnalysis> {
if entries.len() < self.window_size * 2 {
return Err(EvalError::InsufficientData {
required: self.window_size * 2,
actual: entries.len(),
});
}
let values: Vec<f64> = entries.iter().map(|e| e.value).collect();
let ground_truth: Vec<Option<bool>> =
entries.iter().map(|e| e.ground_truth_drift).collect();
let rolling_means = self.calculate_rolling_means(&values);
let rolling_stds = self.calculate_rolling_stds(&values);
let detected_drift = self.detect_drift_points(&rolling_means, &rolling_stds);
let metrics = self.calculate_detection_metrics(&detected_drift, &ground_truth);
let hellinger_distance = if self.use_hellinger {
Some(self.calculate_hellinger_distance(&values))
} else {
None
};
let psi = if self.use_psi {
Some(self.calculate_psi(&values))
} else {
None
};
let drift_detected = detected_drift.iter().any(|&d| d);
let drift_count = detected_drift.iter().filter(|&&d| d).count();
let drift_magnitude = self.calculate_drift_magnitude(&rolling_means);
let passes = self.evaluate_pass_status(&metrics, drift_magnitude);
let issues = self.collect_issues(&metrics, drift_magnitude, drift_count);
Ok(DriftDetectionAnalysis {
sample_size: entries.len(),
drift_detected,
drift_count,
drift_magnitude,
detection_metrics: metrics,
hellinger_distance,
psi,
rolling_mean_change: self.calculate_mean_change(&rolling_means),
rolling_std_change: self.calculate_std_change(&rolling_stds),
passes,
issues,
})
}
pub fn analyze_labeled_events(
&self,
events: &[LabeledDriftEvent],
) -> EvalResult<LabeledEventAnalysis> {
if events.is_empty() {
return Ok(LabeledEventAnalysis::empty());
}
let mut category_counts: HashMap<DriftEventCategory, usize> = HashMap::new();
for event in events {
*category_counts.entry(event.event_type).or_insert(0) += 1;
}
let mut difficulty_counts: HashMap<DetectionDifficulty, usize> = HashMap::new();
for event in events {
*difficulty_counts
.entry(event.detection_difficulty)
.or_insert(0) += 1;
}
let total_events = events.len();
let statistical_events = events
.iter()
.filter(|e| e.event_type.is_statistical())
.count();
let business_events = events
.iter()
.filter(|e| e.event_type.is_business_event())
.count();
let avg_magnitude = events.iter().map(|e| e.magnitude).sum::<f64>() / total_events as f64;
let avg_difficulty = events
.iter()
.map(|e| e.detection_difficulty.to_score())
.sum::<f64>()
/ total_events as f64;
let min_period = events.iter().map(|e| e.start_period).min().unwrap_or(0);
let max_period = events
.iter()
.filter_map(|e| e.end_period)
.max()
.unwrap_or(min_period);
let passes = total_events > 0 && avg_magnitude >= self.min_magnitude_threshold;
let issues = if !passes {
vec!["Insufficient drift events or magnitude too low".to_string()]
} else {
Vec::new()
};
Ok(LabeledEventAnalysis {
total_events,
statistical_events,
business_events,
category_distribution: category_counts,
difficulty_distribution: difficulty_counts,
avg_magnitude,
avg_difficulty,
period_coverage: (min_period, max_period),
passes,
issues,
})
}
fn calculate_rolling_means(&self, values: &[f64]) -> Vec<f64> {
if values.len() < self.window_size {
tracing::debug!(
"Drift detection: not enough values ({}) for window size ({}), returning empty",
values.len(),
self.window_size
);
return Vec::new();
}
let mut means = Vec::with_capacity(values.len() - self.window_size + 1);
for i in 0..=(values.len() - self.window_size) {
let window = &values[i..i + self.window_size];
let mean = window.iter().sum::<f64>() / self.window_size as f64;
means.push(mean);
}
means
}
fn calculate_rolling_stds(&self, values: &[f64]) -> Vec<f64> {
if values.len() < self.window_size {
tracing::debug!(
"Drift detection: not enough values ({}) for window size ({}), returning empty",
values.len(),
self.window_size
);
return Vec::new();
}
let mut stds = Vec::with_capacity(values.len() - self.window_size + 1);
for i in 0..=(values.len() - self.window_size) {
let window = &values[i..i + self.window_size];
let mean = window.iter().sum::<f64>() / self.window_size as f64;
let variance =
window.iter().map(|x| (x - mean).powi(2)).sum::<f64>() / self.window_size as f64;
stds.push(variance.sqrt());
}
stds
}
fn detect_drift_points(&self, means: &[f64], stds: &[f64]) -> Vec<bool> {
if means.len() < 2 {
return vec![false; means.len()];
}
let mut detected = vec![false; means.len()];
let baseline_end = means.len() / 2;
let baseline_mean = means[..baseline_end].iter().sum::<f64>() / baseline_end as f64;
let baseline_std = if baseline_end > 1 {
let variance = means[..baseline_end]
.iter()
.map(|x| (x - baseline_mean).powi(2))
.sum::<f64>()
/ baseline_end as f64;
variance.sqrt().max(0.001) } else {
0.001
};
for i in baseline_end..means.len() {
let z_score = (means[i] - baseline_mean).abs() / baseline_std;
let threshold = 1.96 / self.significance_level.sqrt();
if z_score > threshold {
detected[i] = true;
}
if i < stds.len() && baseline_end > 0 {
let baseline_var_mean =
stds[..baseline_end].iter().sum::<f64>() / baseline_end as f64;
if baseline_var_mean > 0.001 {
let var_ratio = stds[i] / baseline_var_mean;
if !(0.5..=2.0).contains(&var_ratio) {
detected[i] = true;
}
}
}
}
detected
}
fn calculate_detection_metrics(
&self,
detected: &[bool],
ground_truth: &[Option<bool>],
) -> DriftDetectionMetrics {
let mut true_positives = 0;
let mut false_positives = 0;
let mut true_negatives = 0;
let mut false_negatives = 0;
let mut detection_delays = Vec::new();
let offset = detected.len().saturating_sub(ground_truth.len());
for (i, >) in ground_truth.iter().enumerate() {
let detected_idx = i + offset;
if detected_idx >= detected.len() {
break;
}
let pred = detected[detected_idx];
match gt {
Some(true) => {
if pred {
true_positives += 1;
} else {
false_negatives += 1;
}
}
Some(false) => {
if pred {
false_positives += 1;
} else {
true_negatives += 1;
}
}
None => {}
}
}
let mut last_drift_start: Option<usize> = None;
for (i, >) in ground_truth.iter().enumerate() {
if gt == Some(true) && last_drift_start.is_none() {
last_drift_start = Some(i);
} else if gt == Some(false) {
last_drift_start = None;
}
let detected_idx = i + offset;
if detected_idx < detected.len() && detected[detected_idx] {
if let Some(start) = last_drift_start {
detection_delays.push((i - start) as f64);
last_drift_start = None;
}
}
}
let precision = if true_positives + false_positives > 0 {
true_positives as f64 / (true_positives + false_positives) as f64
} else {
0.0
};
let recall = if true_positives + false_negatives > 0 {
true_positives as f64 / (true_positives + false_negatives) as f64
} else {
0.0
};
let f1_score = if precision + recall > 0.0 {
2.0 * precision * recall / (precision + recall)
} else {
0.0
};
let mean_detection_delay = if detection_delays.is_empty() {
None
} else {
Some(detection_delays.iter().sum::<f64>() / detection_delays.len() as f64)
};
DriftDetectionMetrics {
true_positives,
false_positives,
true_negatives,
false_negatives,
precision,
recall,
f1_score,
mean_detection_delay,
}
}
fn calculate_hellinger_distance(&self, values: &[f64]) -> f64 {
if values.len() < 20 {
return 0.0;
}
let mid = values.len() / 2;
let first_half = &values[..mid];
let second_half = &values[mid..];
let (min_val, max_val) = values.iter().fold((f64::MAX, f64::MIN), |(min, max), &v| {
(min.min(v), max.max(v))
});
if (max_val - min_val).abs() < f64::EPSILON {
return 0.0;
}
let num_bins = 10;
let bin_width = (max_val - min_val) / num_bins as f64;
let mut hist1 = vec![0.0; num_bins];
let mut hist2 = vec![0.0; num_bins];
for &v in first_half {
let bin = ((v - min_val) / bin_width).floor() as usize;
let bin = bin.min(num_bins - 1);
hist1[bin] += 1.0;
}
for &v in second_half {
let bin = ((v - min_val) / bin_width).floor() as usize;
let bin = bin.min(num_bins - 1);
hist2[bin] += 1.0;
}
let sum1: f64 = hist1.iter().sum();
let sum2: f64 = hist2.iter().sum();
if sum1 == 0.0 || sum2 == 0.0 {
return 0.0;
}
for h in &mut hist1 {
*h /= sum1;
}
for h in &mut hist2 {
*h /= sum2;
}
let mut sum_sq_diff = 0.0;
for i in 0..num_bins {
let diff = hist1[i].sqrt() - hist2[i].sqrt();
sum_sq_diff += diff * diff;
}
(sum_sq_diff / 2.0).sqrt()
}
fn calculate_psi(&self, values: &[f64]) -> f64 {
if values.len() < 20 {
return 0.0;
}
let mid = values.len() / 2;
let baseline = &values[..mid];
let current = &values[mid..];
let (min_val, max_val) = values.iter().fold((f64::MAX, f64::MIN), |(min, max), &v| {
(min.min(v), max.max(v))
});
if (max_val - min_val).abs() < f64::EPSILON {
return 0.0;
}
let num_bins = 10;
let bin_width = (max_val - min_val) / num_bins as f64;
let mut hist_baseline = vec![0.0; num_bins];
let mut hist_current = vec![0.0; num_bins];
for &v in baseline {
let bin = ((v - min_val) / bin_width).floor() as usize;
let bin = bin.min(num_bins - 1);
hist_baseline[bin] += 1.0;
}
for &v in current {
let bin = ((v - min_val) / bin_width).floor() as usize;
let bin = bin.min(num_bins - 1);
hist_current[bin] += 1.0;
}
let epsilon = 0.0001;
let sum_baseline: f64 = hist_baseline.iter().sum();
let sum_current: f64 = hist_current.iter().sum();
if sum_baseline == 0.0 || sum_current == 0.0 {
return 0.0;
}
for h in &mut hist_baseline {
*h = (*h / sum_baseline).max(epsilon);
}
for h in &mut hist_current {
*h = (*h / sum_current).max(epsilon);
}
let mut psi = 0.0;
for i in 0..num_bins {
let diff = hist_current[i] - hist_baseline[i];
let ratio = hist_current[i] / hist_baseline[i];
psi += diff * ratio.ln();
}
psi
}
fn calculate_drift_magnitude(&self, means: &[f64]) -> f64 {
if means.len() < 2 {
return 0.0;
}
let mid = means.len() / 2;
let first_mean = means[..mid].iter().sum::<f64>() / mid as f64;
let second_mean = means[mid..].iter().sum::<f64>() / (means.len() - mid) as f64;
if first_mean.abs() < f64::EPSILON {
return (second_mean - first_mean).abs();
}
((second_mean - first_mean) / first_mean).abs()
}
fn calculate_mean_change(&self, means: &[f64]) -> f64 {
if means.len() < 2 {
return 0.0;
}
let first = means.first().unwrap_or(&0.0);
let last = means.last().unwrap_or(&0.0);
if first.abs() < f64::EPSILON {
return 0.0;
}
(last - first) / first
}
fn calculate_std_change(&self, stds: &[f64]) -> f64 {
if stds.len() < 2 {
return 0.0;
}
let first = stds.first().unwrap_or(&0.0);
let last = stds.last().unwrap_or(&0.0);
if first.abs() < f64::EPSILON {
return 0.0;
}
(last - first) / first
}
fn evaluate_pass_status(&self, metrics: &DriftDetectionMetrics, drift_magnitude: f64) -> bool {
if drift_magnitude < self.min_magnitude_threshold {
return true; }
metrics.f1_score >= 0.5 || metrics.precision >= 0.6 || metrics.recall >= 0.6
}
fn collect_issues(
&self,
metrics: &DriftDetectionMetrics,
drift_magnitude: f64,
drift_count: usize,
) -> Vec<String> {
let mut issues = Vec::new();
if drift_magnitude >= self.min_magnitude_threshold {
if metrics.precision < 0.5 {
issues.push(format!(
"Low precision ({:.2}): many false positives",
metrics.precision
));
}
if metrics.recall < 0.5 {
issues.push(format!(
"Low recall ({:.2}): many drift events missed",
metrics.recall
));
}
if let Some(delay) = metrics.mean_detection_delay {
if delay > 3.0 {
issues.push(format!("High detection delay ({delay:.1} periods)"));
}
}
}
if drift_count == 0 && drift_magnitude >= self.min_magnitude_threshold {
issues.push("No drift detected despite significant magnitude change".to_string());
}
issues
}
}
impl Default for DriftDetectionAnalyzer {
fn default() -> Self {
Self::new(0.05)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DriftDetectionAnalysis {
pub sample_size: usize,
pub drift_detected: bool,
pub drift_count: usize,
pub drift_magnitude: f64,
pub detection_metrics: DriftDetectionMetrics,
pub hellinger_distance: Option<f64>,
pub psi: Option<f64>,
pub rolling_mean_change: f64,
pub rolling_std_change: f64,
pub passes: bool,
pub issues: Vec<String>,
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct DriftDetectionMetrics {
pub true_positives: usize,
pub false_positives: usize,
pub true_negatives: usize,
pub false_negatives: usize,
pub precision: f64,
pub recall: f64,
pub f1_score: f64,
pub mean_detection_delay: Option<f64>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LabeledEventAnalysis {
pub total_events: usize,
pub statistical_events: usize,
pub business_events: usize,
pub category_distribution: HashMap<DriftEventCategory, usize>,
pub difficulty_distribution: HashMap<DetectionDifficulty, usize>,
pub avg_magnitude: f64,
pub avg_difficulty: f64,
pub period_coverage: (u32, u32),
pub passes: bool,
pub issues: Vec<String>,
}
impl LabeledEventAnalysis {
pub fn empty() -> Self {
Self {
total_events: 0,
statistical_events: 0,
business_events: 0,
category_distribution: HashMap::new(),
difficulty_distribution: HashMap::new(),
avg_magnitude: 0.0,
avg_difficulty: 0.0,
period_coverage: (0, 0),
passes: true,
issues: Vec::new(),
}
}
}
#[cfg(test)]
#[allow(clippy::unwrap_used)]
mod tests {
use super::*;
#[test]
fn test_drift_detection_entry_creation() {
let entry = DriftDetectionEntry::new(1, 100.0, Some(true));
assert_eq!(entry.period, 1);
assert_eq!(entry.value, 100.0);
assert_eq!(entry.ground_truth_drift, Some(true));
}
#[test]
fn test_drift_detection_entry_with_metadata() {
let entry = DriftDetectionEntry::with_metadata(5, 150.0, true, "MeanShift", 0.15, 0.3);
assert_eq!(entry.period, 5);
assert_eq!(entry.drift_type, Some("MeanShift".to_string()));
assert_eq!(entry.drift_magnitude, Some(0.15));
assert_eq!(entry.detection_difficulty, Some(0.3));
}
#[test]
fn test_drift_event_category_names() {
assert_eq!(DriftEventCategory::MeanShift.name(), "Mean Shift");
assert_eq!(
DriftEventCategory::OrganizationalEvent.name(),
"Organizational Event"
);
}
#[test]
fn test_drift_event_category_classification() {
assert!(DriftEventCategory::MeanShift.is_statistical());
assert!(!DriftEventCategory::MeanShift.is_business_event());
assert!(DriftEventCategory::OrganizationalEvent.is_business_event());
assert!(!DriftEventCategory::OrganizationalEvent.is_statistical());
}
#[test]
fn test_detection_difficulty_conversion() {
assert_eq!(DetectionDifficulty::Easy.to_score(), 0.0);
assert_eq!(DetectionDifficulty::Medium.to_score(), 0.5);
assert_eq!(DetectionDifficulty::Hard.to_score(), 1.0);
assert_eq!(
DetectionDifficulty::from_score(0.1),
DetectionDifficulty::Easy
);
assert_eq!(
DetectionDifficulty::from_score(0.5),
DetectionDifficulty::Medium
);
assert_eq!(
DetectionDifficulty::from_score(0.8),
DetectionDifficulty::Hard
);
}
#[test]
fn test_analyzer_creation() {
let analyzer = DriftDetectionAnalyzer::new(0.05)
.with_window_size(15)
.with_min_magnitude(0.1)
.with_hellinger(true)
.with_psi(true);
assert_eq!(analyzer.significance_level, 0.05);
assert_eq!(analyzer.window_size, 15);
assert_eq!(analyzer.min_magnitude_threshold, 0.1);
}
#[test]
fn test_analyze_no_drift() {
let analyzer = DriftDetectionAnalyzer::new(0.05).with_window_size(5);
let entries: Vec<DriftDetectionEntry> = (0..30)
.map(|i| DriftDetectionEntry::new(i, 100.0 + (i as f64 * 0.01), Some(false)))
.collect();
let result = analyzer.analyze(&entries).unwrap();
assert!(!result.drift_detected || result.drift_count < 5);
assert!(result.drift_magnitude < 0.1);
}
#[test]
fn test_analyze_with_drift() {
let analyzer = DriftDetectionAnalyzer::new(0.05).with_window_size(5);
let mut entries: Vec<DriftDetectionEntry> = (0..15)
.map(|i| DriftDetectionEntry::new(i, 100.0, Some(false)))
.collect();
for i in 15..30 {
entries.push(DriftDetectionEntry::new(i, 150.0, Some(true)));
}
let result = analyzer.analyze(&entries).unwrap();
assert!(result.drift_detected);
assert!(result.drift_magnitude > 0.3);
}
#[test]
fn test_analyze_insufficient_data() {
let analyzer = DriftDetectionAnalyzer::new(0.05).with_window_size(10);
let entries: Vec<DriftDetectionEntry> = (0..5)
.map(|i| DriftDetectionEntry::new(i, 100.0, None))
.collect();
let result = analyzer.analyze(&entries);
assert!(result.is_err());
}
#[test]
fn test_analyze_labeled_events() {
let analyzer = DriftDetectionAnalyzer::new(0.05);
let events = vec![
LabeledDriftEvent {
event_id: "E1".to_string(),
event_type: DriftEventCategory::MeanShift,
start_period: 10,
end_period: Some(15),
affected_fields: vec!["amount".to_string()],
magnitude: 0.15,
detection_difficulty: DetectionDifficulty::Easy,
},
LabeledDriftEvent {
event_id: "E2".to_string(),
event_type: DriftEventCategory::OrganizationalEvent,
start_period: 20,
end_period: Some(25),
affected_fields: vec!["volume".to_string()],
magnitude: 0.30,
detection_difficulty: DetectionDifficulty::Medium,
},
];
let result = analyzer.analyze_labeled_events(&events).unwrap();
assert_eq!(result.total_events, 2);
assert_eq!(result.statistical_events, 1);
assert_eq!(result.business_events, 1);
assert!(result.avg_magnitude > 0.2);
assert!(result.passes);
}
#[test]
fn test_empty_labeled_events() {
let analyzer = DriftDetectionAnalyzer::new(0.05);
let result = analyzer.analyze_labeled_events(&[]).unwrap();
assert_eq!(result.total_events, 0);
assert!(result.passes);
}
#[test]
fn test_hellinger_distance_no_drift() {
let analyzer = DriftDetectionAnalyzer::new(0.05);
let entries: Vec<DriftDetectionEntry> = (0..40)
.map(|i| DriftDetectionEntry::new(i, 100.0 + (i as f64 % 5.0), None))
.collect();
let result = analyzer.analyze(&entries).unwrap();
assert!(result.hellinger_distance.unwrap() < 0.3);
}
#[test]
fn test_psi_calculation() {
let analyzer = DriftDetectionAnalyzer::new(0.05);
let mut entries: Vec<DriftDetectionEntry> = (0..20)
.map(|i| DriftDetectionEntry::new(i, 100.0, None))
.collect();
for i in 20..40 {
entries.push(DriftDetectionEntry::new(i, 200.0, None));
}
let result = analyzer.analyze(&entries).unwrap();
assert!(result.psi.is_some());
assert!(result.psi.unwrap() > 0.0);
}
#[test]
fn test_detection_metrics_calculation() {
let analyzer = DriftDetectionAnalyzer::new(0.05).with_window_size(3);
let mut entries = Vec::new();
for i in 0..10 {
entries.push(DriftDetectionEntry::new(i, 100.0, Some(false)));
}
for i in 10..20 {
entries.push(DriftDetectionEntry::new(i, 200.0, Some(true)));
}
let result = analyzer.analyze(&entries).unwrap();
assert!(result.detection_metrics.precision >= 0.0);
assert!(result.detection_metrics.recall >= 0.0);
}
}