scirs2_integrate/visualization/
error_viz.rs

1//! Error analysis and convergence visualization tools
2//!
3//! This module provides tools for visualizing numerical errors, convergence behavior,
4//! and algorithm performance analysis for iterative methods and numerical solvers.
5
6use super::types::*;
7use crate::error::{IntegrateError, IntegrateResult};
8use scirs2_core::ndarray::{s, Array1, Array2, Axis};
9use std::collections::HashMap;
10
11/// Error visualization engine for numerical analysis
12#[derive(Debug, Clone)]
13pub struct ErrorVisualizationEngine {
14    /// Error analysis options
15    pub options: ErrorVisualizationOptions,
16    /// Color schemes for different error types
17    pub error_color_schemes: HashMap<ErrorType, ColorScheme>,
18}
19
20impl ErrorVisualizationEngine {
21    /// Create new error visualization engine
22    pub fn new() -> Self {
23        let mut error_color_schemes = HashMap::new();
24        error_color_schemes.insert(ErrorType::Absolute, ColorScheme::Viridis);
25        error_color_schemes.insert(ErrorType::Relative, ColorScheme::Plasma);
26        error_color_schemes.insert(ErrorType::Truncation, ColorScheme::Inferno);
27        error_color_schemes.insert(ErrorType::Roundoff, ColorScheme::Grayscale);
28        error_color_schemes.insert(ErrorType::Discretization, ColorScheme::Viridis);
29
30        Self {
31            options: ErrorVisualizationOptions::default(),
32            error_color_schemes,
33        }
34    }
35
36    /// Visualize error distribution
37    pub fn visualize_error_distribution(
38        &self,
39        errors: &Array1<f64>,
40        error_type: ErrorType,
41    ) -> IntegrateResult<ErrorDistributionPlot> {
42        let n_bins = 50;
43        let min_error = errors.iter().copied().fold(f64::INFINITY, f64::min);
44        let max_error = errors.iter().copied().fold(f64::NEG_INFINITY, f64::max);
45
46        if min_error >= max_error {
47            return Err(IntegrateError::ValueError(
48                "Invalid error range for distribution".to_string(),
49            ));
50        }
51
52        let bin_width = (max_error - min_error) / n_bins as f64;
53        let mut histogram = Array1::zeros(n_bins);
54        let mut bin_centers = Array1::zeros(n_bins);
55
56        for i in 0..n_bins {
57            bin_centers[i] = min_error + (i as f64 + 0.5) * bin_width;
58        }
59
60        for &error in errors {
61            let bin_idx = ((error - min_error) / bin_width).floor() as usize;
62            let bin_idx = bin_idx.min(n_bins - 1);
63            histogram[bin_idx] += 1.0;
64        }
65
66        // Normalize histogram
67        let total_count = histogram.sum();
68        if total_count > 0.0 {
69            histogram /= total_count;
70        }
71
72        let statistics = ErrorStatistics {
73            mean: errors.iter().sum::<f64>() / errors.len() as f64,
74            std_dev: errors.std(0.0),
75            min: min_error,
76            max: max_error,
77            median: self.compute_median(errors),
78            percentile_95: self.compute_percentile(errors, 0.95),
79        };
80
81        Ok(ErrorDistributionPlot {
82            bin_centers,
83            histogram,
84            error_type,
85            statistics,
86            color_scheme: self.error_color_schemes[&error_type],
87        })
88    }
89
90    /// Compute median of array
91    fn compute_median(&self, values: &Array1<f64>) -> f64 {
92        let mut sorted_values: Vec<f64> = values.iter().copied().collect();
93        sorted_values.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
94
95        let n = sorted_values.len();
96        if n == 0 {
97            return 0.0;
98        }
99
100        if n.is_multiple_of(2) {
101            (sorted_values[n / 2 - 1] + sorted_values[n / 2]) / 2.0
102        } else {
103            sorted_values[n / 2]
104        }
105    }
106
107    /// Compute percentile of array
108    fn compute_percentile(&self, values: &Array1<f64>, percentile: f64) -> f64 {
109        let mut sorted_values: Vec<f64> = values.iter().copied().collect();
110        sorted_values.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
111
112        let n = sorted_values.len();
113        if n == 0 {
114            return 0.0;
115        }
116
117        let index = (percentile * (n - 1) as f64).round() as usize;
118        let index = index.min(n - 1);
119        sorted_values[index]
120    }
121
122    /// Create error evolution plot over time/iterations
123    pub fn visualize_error_evolution(
124        &self,
125        time_points: &Array1<f64>,
126        errors: &Array1<f64>,
127        error_type: ErrorType,
128    ) -> IntegrateResult<PhaseSpacePlot> {
129        let mut metadata = PlotMetadata::default();
130        metadata.title = format!("{:?} Error Evolution", error_type);
131        metadata.xlabel = "Time/Iteration".to_string();
132        metadata.ylabel = "Error Magnitude".to_string();
133
134        Ok(PhaseSpacePlot {
135            x: time_points.to_vec(),
136            y: errors.to_vec(),
137            colors: Some(errors.iter().map(|&e| e.log10()).collect()),
138            metadata,
139        })
140    }
141
142    /// Compare multiple error types
143    pub fn compare_error_types(
144        &self,
145        time_points: &Array1<f64>,
146        error_data: &[(ErrorType, Array1<f64>)],
147    ) -> IntegrateResult<Vec<PhaseSpacePlot>> {
148        let mut plots = Vec::new();
149
150        for (error_type, errors) in error_data {
151            let plot = self.visualize_error_evolution(time_points, errors, *error_type)?;
152            plots.push(plot);
153        }
154
155        Ok(plots)
156    }
157}
158
159impl Default for ErrorVisualizationEngine {
160    fn default() -> Self {
161        Self::new()
162    }
163}
164
165/// Advanced convergence visualization for iterative algorithms
166#[derive(Debug, Clone)]
167pub struct ConvergenceVisualizer {
168    /// Maximum number of iterations to track
169    pub max_iterations: usize,
170    /// Convergence tolerance
171    pub tolerance: f64,
172    /// Track multiple convergence metrics
173    pub track_multiple_metrics: bool,
174}
175
176impl ConvergenceVisualizer {
177    /// Create new convergence visualizer
178    pub fn new(maxiterations: usize, tolerance: f64) -> Self {
179        Self {
180            max_iterations: maxiterations,
181            tolerance,
182            track_multiple_metrics: true,
183        }
184    }
185
186    /// Create convergence plot for residuals
187    pub fn plot_residual_convergence(
188        &self,
189        residuals: &Array1<f64>,
190        algorithm_name: &str,
191    ) -> IntegrateResult<ConvergencePlot> {
192        let n_iter = residuals.len().min(self.max_iterations);
193        let iterations: Array1<f64> = Array1::range(1.0, n_iter as f64 + 1.0, 1.0);
194
195        // Apply log scale for better visualization
196        let log_residuals = residuals.slice(s![..n_iter]).mapv(|r| r.abs().log10());
197
198        // Detect convergence point
199        let convergence_iteration = self.detect_convergence_point(residuals);
200
201        // Compute convergence rate
202        let convergence_rate = self.estimate_convergence_rate(residuals);
203
204        // Create theoretical convergence line for comparison
205        let theoretical_line = if convergence_rate > 0.0 {
206            Some(self.create_theoretical_convergence(&iterations, convergence_rate))
207        } else {
208            None
209        };
210
211        Ok(ConvergencePlot {
212            iterations: iterations.slice(s![..n_iter]).to_owned(),
213            residuals: log_residuals,
214            convergence_iteration,
215            convergence_rate,
216            theoretical_line,
217            algorithm_name: algorithm_name.to_string(),
218            tolerance_line: self.tolerance.log10(),
219        })
220    }
221
222    /// Create multi-metric convergence plot
223    pub fn plot_multi_metric_convergence(
224        &self,
225        metrics: &[(String, Array1<f64>)],
226    ) -> IntegrateResult<MultiMetricConvergencePlot> {
227        let mut convergence_curves = Vec::new();
228        let mut convergence_rates = Vec::new();
229
230        let max_len = metrics
231            .iter()
232            .map(|(_, data)| data.len())
233            .max()
234            .unwrap_or(0);
235        let iterations: Array1<f64> = Array1::range(1.0, max_len as f64 + 1.0, 1.0);
236
237        for (name, data) in metrics {
238            let n_points = data.len().min(self.max_iterations);
239            let log_data = data.slice(s![..n_points]).mapv(|r| r.abs().log10());
240            let rate = self.estimate_convergence_rate(data);
241
242            convergence_curves.push(ConvergenceCurve {
243                name: name.clone(),
244                data: log_data,
245                convergence_rate: rate,
246                color: self.assign_curve_color(&convergence_curves),
247            });
248
249            convergence_rates.push((name.clone(), rate));
250        }
251
252        Ok(MultiMetricConvergencePlot {
253            iterations: iterations
254                .slice(s![..max_len.min(self.max_iterations)])
255                .to_owned(),
256            curves: convergence_curves,
257            convergence_rates,
258            tolerance_line: self.tolerance.log10(),
259        })
260    }
261
262    /// Visualize error vs. step size for method comparison
263    pub fn plot_step_size_analysis(
264        &self,
265        step_sizes: &Array1<f64>,
266        errors: &Array1<f64>,
267        method_name: &str,
268    ) -> IntegrateResult<StepSizeAnalysisPlot> {
269        let log_step_sizes = step_sizes.mapv(|h| h.log10());
270        let log_errors = errors.mapv(|e| e.abs().log10());
271
272        // Estimate order of accuracy via linear regression
273        let order = self.estimate_order_of_accuracy(&log_step_sizes, &log_errors);
274
275        // Create theoretical line showing expected convergence order
276        let theoretical_errors = if order > 0.0 {
277            Some(self.create_theoretical_error_line(&log_step_sizes, order, &log_errors))
278        } else {
279            None
280        };
281
282        Ok(StepSizeAnalysisPlot {
283            log_step_sizes,
284            log_errors,
285            theoretical_errors,
286            order_of_accuracy: order,
287            method_name: method_name.to_string(),
288        })
289    }
290
291    /// Create phase space density plot for attractor visualization
292    pub fn plot_phase_space_density(
293        &self,
294        x_data: &Array1<f64>,
295        y_data: &Array1<f64>,
296        grid_size: usize,
297    ) -> IntegrateResult<PhaseDensityPlot> {
298        // Find data bounds with padding
299        let x_min = x_data.iter().fold(f64::INFINITY, |a, &b| a.min(b));
300        let x_max = x_data.iter().fold(f64::NEG_INFINITY, |a, &b| a.max(b));
301        let y_min = y_data.iter().fold(f64::INFINITY, |a, &b| a.min(b));
302        let y_max = y_data.iter().fold(f64::NEG_INFINITY, |a, &b| a.max(b));
303
304        let x_range = x_max - x_min;
305        let y_range = y_max - y_min;
306        let padding = 0.1;
307
308        let x_bounds = (x_min - padding * x_range, x_max + padding * x_range);
309        let y_bounds = (y_min - padding * y_range, y_max + padding * y_range);
310
311        // Create density grid
312        let mut density_grid = Array2::zeros((grid_size, grid_size));
313        let dx = (x_bounds.1 - x_bounds.0) / grid_size as f64;
314        let dy = (y_bounds.1 - y_bounds.0) / grid_size as f64;
315
316        // Fill density grid
317        for (&x, &y) in x_data.iter().zip(y_data.iter()) {
318            let i = ((x - x_bounds.0) / dx).floor() as usize;
319            let j = ((y - y_bounds.0) / dy).floor() as usize;
320
321            let i = i.min(grid_size - 1);
322            let j = j.min(grid_size - 1);
323
324            density_grid[[i, j]] += 1.0;
325        }
326
327        // Normalize density
328        let max_density = density_grid.iter().fold(0.0_f64, |a, &b| a.max(b));
329        if max_density > 0.0 {
330            density_grid /= max_density;
331        }
332
333        // Create coordinate grids
334        let x_grid = Array1::range(x_bounds.0, x_bounds.1, dx);
335        let y_grid = Array1::range(y_bounds.0, y_bounds.1, dy);
336
337        Ok(PhaseDensityPlot {
338            x_grid,
339            y_grid,
340            density_grid,
341            x_bounds,
342            y_bounds,
343            n_points: x_data.len(),
344        })
345    }
346
347    /// Detect convergence point based on tolerance
348    fn detect_convergence_point(&self, residuals: &Array1<f64>) -> Option<usize> {
349        for (i, &residual) in residuals.iter().enumerate() {
350            if residual.abs() < self.tolerance {
351                return Some(i + 1);
352            }
353        }
354        None
355    }
356
357    /// Estimate convergence rate using linear regression on log data
358    fn estimate_convergence_rate(&self, residuals: &Array1<f64>) -> f64 {
359        if residuals.len() < 10 {
360            return 0.0;
361        }
362
363        // Use last portion of data for rate estimation
364        let start_idx = residuals.len() / 2;
365        let end_idx = residuals.len();
366
367        // Linear regression to estimate convergence rate
368        let x: Array1<f64> = Array1::range(start_idx as f64, end_idx as f64, 1.0);
369        let y: Array1<f64> = residuals
370            .slice(s![start_idx..end_idx])
371            .mapv(|r| r.abs().log10());
372
373        // Calculate slope using least squares
374        let x_mean = x.iter().sum::<f64>() / x.len() as f64;
375        let y_mean = y.iter().sum::<f64>() / y.len() as f64;
376
377        let numerator: f64 = x
378            .iter()
379            .zip(y.iter())
380            .map(|(&xi, &yi)| (xi - x_mean) * (yi - y_mean))
381            .sum();
382        let denominator: f64 = x.iter().map(|&xi| (xi - x_mean).powi(2)).sum();
383
384        if denominator.abs() > 1e-12 {
385            -numerator / denominator // Negative because residuals should decrease
386        } else {
387            0.0
388        }
389    }
390
391    /// Create theoretical convergence line
392    fn create_theoretical_convergence(&self, iterations: &Array1<f64>, rate: f64) -> Array1<f64> {
393        let initial_residual = self.tolerance * 10.0; // Start above tolerance
394        iterations.mapv(|iter| (initial_residual * (-rate * iter).exp()).log10())
395    }
396
397    /// Assign color to convergence curve
398    fn assign_curve_color(&self, existingcurves: &[ConvergenceCurve]) -> [f64; 3] {
399        let colors = [
400            [0.0, 0.4470, 0.7410],    // Blue
401            [0.8500, 0.3250, 0.0980], // Orange
402            [0.9290, 0.6940, 0.1250], // Yellow
403            [0.4940, 0.1840, 0.5560], // Purple
404            [0.4660, 0.6740, 0.1880], // Green
405            [0.3011, 0.7450, 0.9330], // Cyan
406            [0.6350, 0.0780, 0.1840], // Red
407        ];
408
409        let index = existingcurves.len() % colors.len();
410        colors[index]
411    }
412
413    /// Estimate order of accuracy via linear regression
414    fn estimate_order_of_accuracy(
415        &self,
416        log_step_sizes: &Array1<f64>,
417        log_errors: &Array1<f64>,
418    ) -> f64 {
419        if log_step_sizes.len() < 3 {
420            return 0.0;
421        }
422
423        let x_mean = log_step_sizes.iter().sum::<f64>() / log_step_sizes.len() as f64;
424        let y_mean = log_errors.iter().sum::<f64>() / log_errors.len() as f64;
425
426        let numerator: f64 = log_step_sizes
427            .iter()
428            .zip(log_errors.iter())
429            .map(|(&xi, &yi)| (xi - x_mean) * (yi - y_mean))
430            .sum();
431        let denominator: f64 = log_step_sizes.iter().map(|&xi| (xi - x_mean).powi(2)).sum();
432
433        if denominator.abs() > 1e-12 {
434            numerator / denominator
435        } else {
436            0.0
437        }
438    }
439
440    /// Create theoretical error line for step size analysis
441    fn create_theoretical_error_line(
442        &self,
443        log_step_sizes: &Array1<f64>,
444        order: f64,
445        log_errors: &Array1<f64>,
446    ) -> Array1<f64> {
447        if log_errors.is_empty() {
448            return Array1::zeros(log_step_sizes.len());
449        }
450
451        // Use first point as reference
452        let ref_log_h = log_step_sizes[0];
453        let ref_log_e = log_errors[0];
454
455        log_step_sizes.mapv(|log_h| ref_log_e + order * (log_h - ref_log_h))
456    }
457}
458
459impl Default for ConvergenceVisualizer {
460    fn default() -> Self {
461        Self::new(1000, 1e-6)
462    }
463}
464
465/// Visualization engine that combines error and convergence analysis
466#[derive(Debug, Clone)]
467pub struct ConvergenceVisualizationEngine {
468    /// Convergence visualizer
469    pub convergence_visualizer: ConvergenceVisualizer,
470    /// Error visualization engine
471    pub error_engine: ErrorVisualizationEngine,
472    /// Performance tracking
473    pub performance_tracker: PerformanceTracker,
474}
475
476impl ConvergenceVisualizationEngine {
477    /// Create new convergence visualization engine
478    pub fn new() -> Self {
479        Self {
480            convergence_visualizer: ConvergenceVisualizer::default(),
481            error_engine: ErrorVisualizationEngine::new(),
482            performance_tracker: PerformanceTracker::default(),
483        }
484    }
485
486    /// Track metric over time
487    pub fn track_metric(&mut self, metricname: &str, value: f64, time: f64) {
488        self.performance_tracker
489            .add_metric_value(metricname, value, time);
490    }
491
492    /// Create comprehensive convergence analysis
493    pub fn create_convergence_plot(
494        &self,
495        residuals: &Array1<f64>,
496        algorithm_name: &str,
497    ) -> IntegrateResult<ConvergencePlot> {
498        self.convergence_visualizer
499            .plot_residual_convergence(residuals, algorithm_name)
500    }
501
502    /// Create multi-metric analysis
503    pub fn create_multi_metric_plot(
504        &self,
505        metrics: &[(String, Array1<f64>)],
506    ) -> IntegrateResult<MultiMetricConvergencePlot> {
507        self.convergence_visualizer
508            .plot_multi_metric_convergence(metrics)
509    }
510
511    /// Analyze step size effects
512    pub fn create_step_size_analysis(
513        &self,
514        step_sizes: &Array1<f64>,
515        errors: &Array1<f64>,
516        method_name: &str,
517    ) -> IntegrateResult<StepSizeAnalysisPlot> {
518        self.convergence_visualizer
519            .plot_step_size_analysis(step_sizes, errors, method_name)
520    }
521}
522
523impl Default for ConvergenceVisualizationEngine {
524    fn default() -> Self {
525        Self::new()
526    }
527}
528
529/// Performance tracking for algorithm metrics
530#[derive(Debug, Clone)]
531pub struct PerformanceTracker {
532    /// Metric data storage
533    pub metrics: HashMap<String, Vec<(f64, f64)>>, // (time, value) pairs
534    /// Metric statistics
535    pub statistics: HashMap<String, MetricStatistics>,
536}
537
538impl PerformanceTracker {
539    /// Create new performance tracker
540    pub fn new() -> Self {
541        Self {
542            metrics: HashMap::new(),
543            statistics: HashMap::new(),
544        }
545    }
546
547    /// Add metric value at specific time
548    pub fn add_metric_value(&mut self, metricname: &str, value: f64, time: f64) {
549        // This would be implemented to track metrics over time
550        // Simplified for now
551    }
552
553    /// Get metric statistics
554    pub fn get_statistics(metricname: &str) -> Option<&MetricStatistics> {
555        // This would return computed statistics for the metric
556        None
557    }
558
559    /// Get all tracked metric names
560    pub fn get_metricnames(&self) -> Vec<String> {
561        self.metrics.keys().cloned().collect()
562    }
563}
564
565impl Default for PerformanceTracker {
566    fn default() -> Self {
567        Self::new()
568    }
569}
570
571/// Statistics for performance metrics
572#[derive(Debug, Clone)]
573pub struct MetricStatistics {
574    /// Mean value
575    pub mean: f64,
576    /// Standard deviation
577    pub std_dev: f64,
578    /// Minimum value
579    pub min: f64,
580    /// Maximum value
581    pub max: f64,
582    /// Total number of samples
583    pub count: usize,
584}
585
586/// Information about convergence behavior
587#[derive(Debug, Clone)]
588pub struct ConvergenceInfo {
589    /// Whether convergence was achieved
590    pub converged: bool,
591    /// Number of iterations to convergence
592    pub iterations: usize,
593    /// Final residual value
594    pub final_residual: f64,
595    /// Convergence rate estimate
596    pub convergence_rate: f64,
597}