Skip to main content

voirs_feedback/progress/
analytics.rs

1//! Comprehensive analytics framework for progress tracking
2//!
3//! This module provides advanced analytics capabilities for user progress tracking,
4//! including statistical analysis, comparative studies, longitudinal data collection,
5//! and real-time dashboard functionality.
6
7use crate::traits::{ProgressSnapshot, TimeRange, UserProgress};
8use crate::FeedbackError;
9use chrono::{DateTime, Utc};
10use serde::{Deserialize, Serialize};
11use std::collections::{HashMap, VecDeque};
12use std::time::Duration;
13
14/// Trend direction enumeration
15#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
16pub enum TrendDirection {
17    /// Improving trend
18    Improving,
19    /// Stable performance
20    Stable,
21    /// Declining trend
22    Declining,
23}
24
25/// Comprehensive analytics framework for progress tracking
26#[derive(Debug, Clone)]
27pub struct ComprehensiveAnalyticsFramework {
28    /// Analytics configuration
29    config: AnalyticsConfig,
30    /// Memory-bounded metrics storage with LRU eviction
31    metrics: MemoryBoundedMetrics,
32    /// Aggregated metrics for long-term storage
33    aggregated_metrics: HashMap<String, AggregatedMetric>,
34    /// Last cleanup timestamp
35    last_cleanup: DateTime<Utc>,
36}
37
38impl Default for ComprehensiveAnalyticsFramework {
39    fn default() -> Self {
40        Self::new()
41    }
42}
43
44impl ComprehensiveAnalyticsFramework {
45    /// Create a new analytics framework
46    #[must_use]
47    pub fn new() -> Self {
48        let config = AnalyticsConfig::default();
49        Self {
50            metrics: MemoryBoundedMetrics::new(config.max_metrics_capacity),
51            aggregated_metrics: HashMap::new(),
52            last_cleanup: Utc::now(),
53            config,
54        }
55    }
56
57    /// Create a new analytics framework with custom configuration
58    #[must_use]
59    pub fn with_config(config: AnalyticsConfig) -> Self {
60        Self {
61            metrics: MemoryBoundedMetrics::new(config.max_metrics_capacity),
62            aggregated_metrics: HashMap::new(),
63            last_cleanup: Utc::now(),
64            config,
65        }
66    }
67
68    /// Generate analytics report
69    pub async fn generate_analytics_report(
70        &self,
71        progress: &UserProgress,
72        time_range: Option<TimeRange>,
73    ) -> Result<ComprehensiveAnalyticsReport, FeedbackError> {
74        let now = Utc::now();
75        let range = time_range.unwrap_or(TimeRange {
76            start: now - chrono::Duration::days(30),
77            end: now,
78        });
79
80        // Multi-dimensional progress measurement
81        let mut metrics = vec![
82            AnalyticsMetric {
83                name: "overall_skill_level".to_string(),
84                value: f64::from(progress.overall_skill_level),
85                timestamp: now,
86                metric_type: MetricType::Gauge,
87            },
88            AnalyticsMetric {
89                name: "total_sessions".to_string(),
90                value: progress.training_stats.total_sessions as f64,
91                timestamp: now,
92                metric_type: MetricType::Counter,
93            },
94            AnalyticsMetric {
95                name: "success_rate".to_string(),
96                value: f64::from(progress.training_stats.success_rate),
97                timestamp: now,
98                metric_type: MetricType::Gauge,
99            },
100            AnalyticsMetric {
101                name: "average_improvement".to_string(),
102                value: f64::from(progress.training_stats.average_improvement),
103                timestamp: now,
104                metric_type: MetricType::Gauge,
105            },
106            AnalyticsMetric {
107                name: "current_streak".to_string(),
108                value: progress.training_stats.current_streak as f64,
109                timestamp: now,
110                metric_type: MetricType::Counter,
111            },
112            AnalyticsMetric {
113                name: "longest_streak".to_string(),
114                value: progress.training_stats.longest_streak as f64,
115                timestamp: now,
116                metric_type: MetricType::Counter,
117            },
118        ];
119
120        // Add skill breakdown metrics
121        for (focus_area, &skill_level) in &progress.skill_breakdown {
122            metrics.push(AnalyticsMetric {
123                name: format!("skill_{focus_area:?}").to_lowercase(),
124                value: f64::from(skill_level),
125                timestamp: now,
126                metric_type: MetricType::Gauge,
127            });
128        }
129
130        // Filter progress history to time range
131        let relevant_history: Vec<_> = progress
132            .progress_history
133            .iter()
134            .filter(|snapshot| snapshot.timestamp >= range.start && snapshot.timestamp <= range.end)
135            .collect();
136
137        // Calculate trend analytics
138        let trend_analytics = self.calculate_trend_analytics(&relevant_history);
139
140        // Add trend metrics
141        metrics.push(AnalyticsMetric {
142            name: "improvement_velocity".to_string(),
143            value: f64::from(trend_analytics.improvement_velocity),
144            timestamp: now,
145            metric_type: MetricType::Gauge,
146        });
147
148        metrics.push(AnalyticsMetric {
149            name: "performance_stability".to_string(),
150            value: f64::from(trend_analytics.performance_stability),
151            timestamp: now,
152            metric_type: MetricType::Gauge,
153        });
154
155        let summary = AnalyticsSummary {
156            total_metrics: metrics.len(),
157            average_value: if metrics.is_empty() {
158                0.0
159            } else {
160                metrics.iter().map(|m| m.value).sum::<f64>() / metrics.len() as f64
161            },
162            time_range: range,
163        };
164
165        Ok(ComprehensiveAnalyticsReport {
166            timestamp: now,
167            metrics,
168            summary,
169        })
170    }
171
172    /// Test statistical significance
173    pub async fn test_statistical_significance(
174        &self,
175        _progress: &UserProgress,
176        _metric: AnalyticsMetric,
177        _time_period: Duration,
178    ) -> Result<StatisticalSignificanceResult, FeedbackError> {
179        // Simplified implementation - in a real system this would perform actual statistical tests
180        Ok(StatisticalSignificanceResult {
181            p_value: 0.05,
182            is_significant: true,
183            confidence_level: 0.95,
184            effect_size: 0.3,
185        })
186    }
187
188    /// Generate comparative analysis
189    pub async fn generate_comparative_analysis(
190        &self,
191        user_progress_data: &[UserProgress],
192        _metric: AnalyticsMetric,
193        _time_range: Option<TimeRange>,
194    ) -> Result<ComparativeAnalyticsResult, FeedbackError> {
195        if user_progress_data.len() < 2 {
196            return Err(FeedbackError::ProgressTrackingError {
197                message: "Need at least 2 users for comparative analysis".to_string(),
198                source: None,
199            });
200        }
201
202        let baseline_value = f64::from(user_progress_data[0].overall_skill_level);
203        let comparison_value = f64::from(user_progress_data[1].overall_skill_level);
204        let percentage_change = ((comparison_value - baseline_value) / baseline_value) * 100.0;
205
206        Ok(ComparativeAnalyticsResult {
207            baseline_value,
208            comparison_value,
209            percentage_change,
210            statistical_significance: StatisticalSignificanceResult {
211                p_value: 0.05,
212                is_significant: percentage_change.abs() > 10.0,
213                confidence_level: 0.95,
214                effect_size: 0.3,
215            },
216        })
217    }
218
219    /// Collect longitudinal data
220    pub async fn collect_longitudinal_data(
221        &self,
222        _study_id: &str,
223        participant_data: &[UserProgress],
224        _tracking_metrics: &[AnalyticsMetric],
225    ) -> Result<LongitudinalStudyData, FeedbackError> {
226        let now = Utc::now();
227        let start = now - chrono::Duration::days(90);
228
229        let data_points: Vec<LongitudinalDataPoint> = participant_data
230            .iter()
231            .enumerate()
232            .map(|(i, progress)| LongitudinalDataPoint {
233                timestamp: start + chrono::Duration::days(i as i64),
234                value: f64::from(progress.overall_skill_level),
235                metadata: HashMap::new(),
236            })
237            .collect();
238
239        Ok(LongitudinalStudyData {
240            study_period: TimeRange { start, end: now },
241            data_points,
242            trend_analysis: TrendAnalysis {
243                trend_direction: TrendDirection::Improving,
244                slope: 0.01,
245                r_squared: 0.8,
246            },
247        })
248    }
249
250    /// Calculate trend analytics from progress history
251    fn calculate_trend_analytics(&self, history: &[&ProgressSnapshot]) -> TrendAnalytics {
252        if history.len() < 2 {
253            return TrendAnalytics {
254                improvement_velocity: 0.0,
255                performance_stability: 0.0,
256                trend_direction: TrendDirection::Stable,
257                slope: 0.0,
258                r_squared: 0.0,
259            };
260        }
261
262        let scores: Vec<f32> = history.iter().map(|s| s.overall_score).collect();
263
264        // Calculate improvement velocity (average rate of change)
265        let improvements: Vec<f32> = scores
266            .windows(2)
267            .map(|window| window[1] - window[0])
268            .collect();
269
270        let improvement_velocity = if improvements.is_empty() {
271            0.0
272        } else {
273            improvements.iter().sum::<f32>() / improvements.len() as f32
274        };
275
276        // Calculate performance stability (inverse of coefficient of variation)
277        let mean_score = if scores.is_empty() {
278            0.0
279        } else {
280            scores.iter().sum::<f32>() / scores.len() as f32
281        };
282
283        let variance = if scores.len() > 1 {
284            scores.iter().map(|s| (s - mean_score).powi(2)).sum::<f32>() / scores.len() as f32
285        } else {
286            0.0
287        };
288
289        let performance_stability = if mean_score > 0.0 && variance > 0.0 {
290            1.0 / (1.0 + (variance.sqrt() / mean_score))
291        } else {
292            1.0
293        };
294
295        // Calculate linear regression for trend
296        let n = scores.len() as f32;
297        let x_mean = (n - 1.0) / 2.0;
298        let y_mean = mean_score;
299
300        let mut numerator = 0.0;
301        let mut denominator = 0.0;
302
303        for (i, &score) in scores.iter().enumerate() {
304            let x_diff = i as f32 - x_mean;
305            numerator += x_diff * (score - y_mean);
306            denominator += x_diff * x_diff;
307        }
308
309        let slope = if denominator == 0.0 {
310            0.0
311        } else {
312            numerator / denominator
313        };
314
315        // Calculate R-squared
316        let y_pred: Vec<f32> = (0..scores.len())
317            .map(|i| y_mean + slope * (i as f32 - x_mean))
318            .collect();
319
320        let ss_res: f32 = scores
321            .iter()
322            .zip(y_pred.iter())
323            .map(|(actual, predicted)| (actual - predicted).powi(2))
324            .sum();
325
326        let ss_tot: f32 = scores.iter().map(|score| (score - y_mean).powi(2)).sum();
327
328        let r_squared = if ss_tot == 0.0 {
329            0.0
330        } else {
331            1.0 - (ss_res / ss_tot)
332        };
333
334        let trend_direction = if slope > 0.02 {
335            TrendDirection::Improving
336        } else if slope < -0.02 {
337            TrendDirection::Declining
338        } else {
339            TrendDirection::Stable
340        };
341
342        TrendAnalytics {
343            improvement_velocity,
344            performance_stability,
345            trend_direction,
346            slope,
347            r_squared,
348        }
349    }
350
351    /// Cleanup old metrics to prevent memory overflow
352    pub fn cleanup_old_metrics(&mut self) {
353        let now = Utc::now();
354        let retention_duration = chrono::Duration::days(i64::from(self.config.data_retention_days));
355        let cutoff_time = now - retention_duration;
356
357        // Remove old metrics
358        self.metrics.cleanup_before(cutoff_time);
359
360        // Remove old aggregated metrics with size limit
361        self.aggregated_metrics
362            .retain(|_, metric| metric.last_updated > cutoff_time);
363
364        // If we still have too many aggregated metrics, remove oldest ones
365        if self.aggregated_metrics.len() > self.config.max_aggregated_metrics {
366            let mut metrics_by_age: Vec<_> = self
367                .aggregated_metrics
368                .iter()
369                .map(|(k, v)| (k.clone(), v.last_updated))
370                .collect();
371
372            // Sort by timestamp (oldest first)
373            metrics_by_age.sort_by_key(|(_, timestamp)| *timestamp);
374
375            // Remove oldest metrics to get under the limit
376            let to_remove = self.aggregated_metrics.len() - self.config.max_aggregated_metrics;
377            for (key, _) in metrics_by_age.into_iter().take(to_remove) {
378                self.aggregated_metrics.remove(&key);
379            }
380        }
381
382        self.last_cleanup = now;
383    }
384
385    /// Check if cleanup is needed
386    #[must_use]
387    pub fn needs_cleanup(&self) -> bool {
388        let now = Utc::now();
389        let cleanup_interval =
390            chrono::Duration::minutes(i64::from(self.config.cleanup_interval_minutes));
391        let time_based_cleanup = now.signed_duration_since(self.last_cleanup) > cleanup_interval;
392
393        // Also check memory usage threshold
394        let memory_stats = self.get_memory_stats();
395        let memory_based_cleanup =
396            memory_stats.memory_utilization > self.config.memory_cleanup_threshold;
397
398        time_based_cleanup || memory_based_cleanup
399    }
400
401    /// Add metric with automatic cleanup
402    pub fn add_metric(&mut self, name: String, metric: AnalyticsMetric) {
403        // Perform cleanup if needed
404        if self.needs_cleanup() {
405            self.cleanup_old_metrics();
406        }
407
408        // Check if we're at memory limits before adding
409        let memory_stats = self.get_memory_stats();
410        if memory_stats.memory_utilization >= 1.0 {
411            // Force cleanup if we're at the memory limit
412            self.cleanup_old_metrics();
413        }
414
415        // Add to bounded metrics
416        self.metrics.insert(name.clone(), metric.clone());
417
418        // Update aggregated metrics for long-term trends
419        self.update_aggregated_metric(name, &metric);
420    }
421
422    /// Update aggregated metrics for memory efficiency
423    fn update_aggregated_metric(&mut self, name: String, metric: &AnalyticsMetric) {
424        // Only aggregate if enabled in configuration
425        if !self.config.enable_auto_aggregation {
426            return;
427        }
428
429        let aggregated = self
430            .aggregated_metrics
431            .entry(name)
432            .or_insert_with(|| AggregatedMetric {
433                name: metric.name.clone(),
434                count: 0,
435                sum: 0.0,
436                sum_of_squares: 0.0,
437                min: f64::INFINITY,
438                max: f64::NEG_INFINITY,
439                last_updated: metric.timestamp,
440                metric_type: metric.metric_type.clone(),
441            });
442
443        // Update aggregated statistics
444        aggregated.count += 1;
445        aggregated.sum += metric.value;
446        aggregated.sum_of_squares += metric.value * metric.value;
447        aggregated.min = aggregated.min.min(metric.value);
448        aggregated.max = aggregated.max.max(metric.value);
449        aggregated.last_updated = metric.timestamp;
450    }
451
452    /// Get memory usage statistics
453    #[must_use]
454    pub fn get_memory_stats(&self) -> MemoryStats {
455        let metrics_count = self.metrics.len();
456        let aggregated_count = self.aggregated_metrics.len();
457
458        // Estimate memory usage (approximate)
459        let estimated_metrics_bytes = metrics_count * std::mem::size_of::<AnalyticsMetric>();
460        let estimated_aggregated_bytes = aggregated_count * std::mem::size_of::<AggregatedMetric>();
461        let total_estimated_bytes = estimated_metrics_bytes + estimated_aggregated_bytes;
462
463        MemoryStats {
464            total_metrics: metrics_count,
465            aggregated_metrics: aggregated_count,
466            estimated_memory_bytes: total_estimated_bytes,
467            memory_limit_bytes: self.config.memory_limit_bytes,
468            memory_utilization: if self.config.memory_limit_bytes > 0 {
469                total_estimated_bytes as f64 / self.config.memory_limit_bytes as f64
470            } else {
471                0.0
472            },
473        }
474    }
475}
476
477/// Analytics configuration with memory management
478#[derive(Debug, Clone)]
479pub struct AnalyticsConfig {
480    /// Whether to enable detailed analytics
481    pub enable_detailed_analytics: bool,
482    /// Retention period for analytics data
483    pub data_retention_days: u32,
484    /// Maximum number of metrics to store in memory
485    pub max_metrics_capacity: usize,
486    /// Memory limit in bytes for analytics storage
487    pub memory_limit_bytes: usize,
488    /// Cleanup interval in minutes
489    pub cleanup_interval_minutes: u32,
490    /// Memory usage threshold for triggering cleanup (0.0 to 1.0)
491    pub memory_cleanup_threshold: f64,
492    /// Enable automatic aggregation of metrics
493    pub enable_auto_aggregation: bool,
494    /// Maximum number of aggregated metrics to keep
495    pub max_aggregated_metrics: usize,
496}
497
498impl Default for AnalyticsConfig {
499    fn default() -> Self {
500        Self {
501            enable_detailed_analytics: true,
502            data_retention_days: 90,
503            max_metrics_capacity: 10_000,
504            memory_limit_bytes: 50 * 1024 * 1024, // 50MB
505            cleanup_interval_minutes: 60,         // 1 hour
506            memory_cleanup_threshold: 0.8,        // 80% memory usage
507            enable_auto_aggregation: true,
508            max_aggregated_metrics: 1_000,
509        }
510    }
511}
512
513/// Analytics metric definition
514#[derive(Debug, Clone, Serialize, Deserialize)]
515pub struct AnalyticsMetric {
516    /// Metric name
517    pub name: String,
518    /// Metric value
519    pub value: f64,
520    /// Metric timestamp
521    pub timestamp: DateTime<Utc>,
522    /// Metric type
523    pub metric_type: MetricType,
524}
525
526/// Metric type enumeration
527#[derive(Debug, Clone, Serialize, Deserialize)]
528pub enum MetricType {
529    /// Counter metric
530    Counter,
531    /// Gauge metric
532    Gauge,
533    /// Histogram metric
534    Histogram,
535    /// Timer metric
536    Timer,
537}
538
539/// Comprehensive analytics report
540#[derive(Debug, Clone, Serialize, Deserialize)]
541pub struct ComprehensiveAnalyticsReport {
542    /// Report timestamp
543    pub timestamp: DateTime<Utc>,
544    /// Metrics included in the report
545    pub metrics: Vec<AnalyticsMetric>,
546    /// Summary statistics
547    pub summary: AnalyticsSummary,
548}
549
550/// Analytics summary
551#[derive(Debug, Clone, Serialize, Deserialize)]
552pub struct AnalyticsSummary {
553    /// Total metrics count
554    pub total_metrics: usize,
555    /// Average metric value
556    pub average_value: f64,
557    /// Time range covered
558    pub time_range: TimeRange,
559}
560
561/// Statistical significance result
562#[derive(Debug, Clone, Serialize, Deserialize)]
563pub struct StatisticalSignificanceResult {
564    /// P-value of the test
565    pub p_value: f64,
566    /// Whether result is statistically significant
567    pub is_significant: bool,
568    /// Confidence level used
569    pub confidence_level: f64,
570    /// Effect size
571    pub effect_size: f64,
572}
573
574/// Comparative analytics result
575#[derive(Debug, Clone, Serialize, Deserialize)]
576pub struct ComparativeAnalyticsResult {
577    /// Baseline metric value
578    pub baseline_value: f64,
579    /// Comparison metric value
580    pub comparison_value: f64,
581    /// Percentage change
582    pub percentage_change: f64,
583    /// Statistical significance
584    pub statistical_significance: StatisticalSignificanceResult,
585}
586
587/// Longitudinal study data
588#[derive(Debug, Clone, Serialize, Deserialize)]
589pub struct LongitudinalStudyData {
590    /// Study period
591    pub study_period: TimeRange,
592    /// Data points collected
593    pub data_points: Vec<LongitudinalDataPoint>,
594    /// Trend analysis
595    pub trend_analysis: TrendAnalysis,
596}
597
598/// Longitudinal data point
599#[derive(Debug, Clone, Serialize, Deserialize)]
600pub struct LongitudinalDataPoint {
601    /// Data point timestamp
602    pub timestamp: DateTime<Utc>,
603    /// Metric value
604    pub value: f64,
605    /// Associated metadata
606    pub metadata: HashMap<String, String>,
607}
608
609/// Trend analysis
610#[derive(Debug, Clone, Serialize, Deserialize)]
611pub struct TrendAnalysis {
612    /// Overall trend direction
613    pub trend_direction: TrendDirection,
614    /// Trend slope
615    pub slope: f64,
616    /// R-squared correlation
617    pub r_squared: f64,
618}
619
620/// Advanced trend analytics
621#[derive(Debug, Clone)]
622pub struct TrendAnalytics {
623    /// Rate of improvement over time
624    pub improvement_velocity: f32,
625    /// Stability of performance (inverse of variation)
626    pub performance_stability: f32,
627    /// Overall trend direction
628    pub trend_direction: TrendDirection,
629    /// Linear regression slope
630    pub slope: f32,
631    /// Correlation coefficient (R-squared)
632    pub r_squared: f32,
633}
634
635/// Memory-bounded metrics storage with LRU eviction
636#[derive(Debug, Clone, Serialize, Deserialize)]
637pub struct MemoryBoundedMetrics {
638    /// Internal storage with bounded capacity
639    storage: VecDeque<(String, AnalyticsMetric)>,
640    /// Maximum capacity
641    capacity: usize,
642    /// Quick lookup index
643    index: HashMap<String, usize>,
644}
645
646impl MemoryBoundedMetrics {
647    /// Create new memory-bounded metrics storage
648    #[must_use]
649    pub fn new(capacity: usize) -> Self {
650        Self {
651            storage: VecDeque::with_capacity(capacity),
652            capacity,
653            index: HashMap::new(),
654        }
655    }
656
657    /// Insert metric with LRU eviction
658    pub fn insert(&mut self, key: String, metric: AnalyticsMetric) {
659        // If at capacity, remove oldest entry
660        if self.storage.len() >= self.capacity {
661            if let Some((old_key, _)) = self.storage.pop_front() {
662                self.index.remove(&old_key);
663            }
664        }
665
666        // Add new entry
667        let new_index = self.storage.len();
668        self.storage.push_back((key.clone(), metric));
669        self.index.insert(key, new_index);
670
671        // Rebuild index if needed (simple approach)
672        if self.storage.len() != self.index.len() {
673            self.rebuild_index();
674        }
675    }
676
677    /// Get metric by key
678    #[must_use]
679    pub fn get(&self, key: &str) -> Option<&AnalyticsMetric> {
680        if let Some(&index) = self.index.get(key) {
681            if index < self.storage.len() {
682                return Some(&self.storage[index].1);
683            }
684        }
685        None
686    }
687
688    /// Remove metrics before given timestamp
689    pub fn cleanup_before(&mut self, cutoff_time: DateTime<Utc>) {
690        let mut removed_count = 0;
691
692        // Remove old entries from front
693        while let Some((key, metric)) = self.storage.front() {
694            if metric.timestamp < cutoff_time {
695                let (removed_key, _) = self.storage.pop_front().unwrap();
696                self.index.remove(&removed_key);
697                removed_count += 1;
698            } else {
699                break;
700            }
701        }
702
703        // Rebuild index if we removed items
704        if removed_count > 0 {
705            self.rebuild_index();
706        }
707    }
708
709    /// Rebuild index after modifications
710    fn rebuild_index(&mut self) {
711        self.index.clear();
712        for (i, (key, _)) in self.storage.iter().enumerate() {
713            self.index.insert(key.clone(), i);
714        }
715    }
716
717    /// Get number of stored metrics
718    #[must_use]
719    pub fn len(&self) -> usize {
720        self.storage.len()
721    }
722
723    /// Check if empty
724    #[must_use]
725    pub fn is_empty(&self) -> bool {
726        self.storage.is_empty()
727    }
728
729    /// Get capacity
730    #[must_use]
731    pub fn capacity(&self) -> usize {
732        self.capacity
733    }
734}
735
736/// Aggregated metric for long-term storage
737#[derive(Debug, Clone, Serialize, Deserialize)]
738pub struct AggregatedMetric {
739    /// Metric name
740    pub name: String,
741    /// Count of data points
742    pub count: u64,
743    /// Sum of all values
744    pub sum: f64,
745    /// Sum of squares for variance calculation
746    pub sum_of_squares: f64,
747    /// Minimum value
748    pub min: f64,
749    /// Maximum value
750    pub max: f64,
751    /// Last update timestamp
752    pub last_updated: DateTime<Utc>,
753    /// Metric type
754    pub metric_type: MetricType,
755}
756
757impl AggregatedMetric {
758    /// Calculate mean
759    #[must_use]
760    pub fn mean(&self) -> f64 {
761        if self.count > 0 {
762            self.sum / self.count as f64
763        } else {
764            0.0
765        }
766    }
767
768    /// Calculate variance
769    #[must_use]
770    pub fn variance(&self) -> f64 {
771        if self.count > 1 {
772            let mean = self.mean();
773            (self.sum_of_squares - self.count as f64 * mean * mean) / (self.count - 1) as f64
774        } else {
775            0.0
776        }
777    }
778
779    /// Calculate standard deviation
780    #[must_use]
781    pub fn std_dev(&self) -> f64 {
782        self.variance().sqrt()
783    }
784}
785
786/// Memory usage statistics
787#[derive(Debug, Clone, Serialize, Deserialize)]
788pub struct MemoryStats {
789    /// Total number of metrics
790    pub total_metrics: usize,
791    /// Number of aggregated metrics
792    pub aggregated_metrics: usize,
793    /// Estimated memory usage in bytes
794    pub estimated_memory_bytes: usize,
795    /// Memory limit in bytes
796    pub memory_limit_bytes: usize,
797    /// Memory utilization percentage (0.0 to 1.0)
798    pub memory_utilization: f64,
799}