apex_solver/optimizer/
visualization.rs

1//! Real-time optimization visualization using Rerun.
2//!
3//! This module provides comprehensive visualization of the optimization process,
4//! including time series plots, matrix visualizations, and manifold state tracking.
5//!
6//! # Features
7//!
8//! - **Time series plots**: Cost, gradient norm, damping parameter, step quality
9//! - **Sparse Hessian visualization**: Heat map showing matrix structure and values
10//! - **Gradient visualization**: Vector representation with magnitude encoding
11//! - **Manifold state**: Real-time pose updates for SE2/SE3 problems
12//!
13
14use crate::{core::problem, io};
15use faer::sparse;
16use std::collections;
17
18/// Optimization visualizer for real-time debugging with Rerun.
19///
20/// This struct manages all visualization state and provides methods for logging
21/// various aspects of the optimization process.
22#[derive(Debug)]
23pub struct OptimizationVisualizer {
24    rec: Option<rerun::RecordingStream>,
25    enabled: bool,
26}
27
28impl OptimizationVisualizer {
29    /// Create a new optimization visualizer.
30    ///
31    /// # Arguments
32    ///
33    /// * `enabled` - Whether to enable visualization
34    ///
35    /// # Returns
36    ///
37    /// A new visualizer instance.
38    pub fn new(enabled: bool) -> Result<Self, Box<dyn std::error::Error>> {
39        Self::new_with_options(enabled, None)
40    }
41
42    /// Create a new optimization visualizer with file save option.
43    ///
44    /// # Arguments
45    ///
46    /// * `enabled` - Whether to enable visualization
47    /// * `save_path` - Optional path to save recording to file instead of spawning viewer
48    pub fn new_with_options(
49        enabled: bool,
50        save_path: Option<&str>,
51    ) -> Result<Self, Box<dyn std::error::Error>> {
52        let rec = if enabled {
53            let rec = if let Some(path) = save_path {
54                // Save to file
55                println!("✓ Saving visualization to: {}", path);
56                rerun::RecordingStreamBuilder::new("apex-solver-optimization").save(path)?
57            } else {
58                // Try to spawn Rerun viewer
59                match rerun::RecordingStreamBuilder::new("apex-solver-optimization").spawn() {
60                    Ok(rec) => {
61                        println!("✓ Rerun viewer launched successfully");
62                        rec
63                    }
64                    Err(e) => {
65                        eprintln!("⚠ Could not launch Rerun viewer: {}", e);
66                        eprintln!("  Saving to file 'optimization.rrd' instead");
67                        eprintln!("  View it later with: rerun optimization.rrd\n");
68
69                        // Fall back to saving to file
70                        rerun::RecordingStreamBuilder::new("apex-solver-optimization")
71                            .save("optimization.rrd")?
72                    }
73                }
74            };
75
76            Some(rec)
77        } else {
78            None
79        };
80
81        Ok(Self { rec, enabled })
82    }
83
84    /// Check if visualization is enabled and active.
85    #[inline(always)]
86    pub fn is_enabled(&self) -> bool {
87        self.enabled && self.rec.is_some()
88    }
89
90    /// Log scalar values for time series plots.
91    ///
92    /// Each metric is logged to a separate entity path for independent scaling.
93    ///
94    /// # Arguments
95    ///
96    /// * `iteration` - Current iteration number
97    /// * `cost` - Current cost value
98    /// * `gradient_norm` - L2 norm of the gradient
99    /// * `damping` - Current damping parameter (λ)
100    /// * `step_norm` - L2 norm of the parameter update
101    /// * `step_quality` - Step quality metric (ρ)
102    pub fn log_scalars(
103        &self,
104        iteration: usize,
105        cost: f64,
106        gradient_norm: f64,
107        damping: f64,
108        step_norm: f64,
109        step_quality: Option<f64>,
110    ) -> Result<(), Box<dyn std::error::Error>> {
111        if !self.is_enabled() {
112            return Ok(());
113        }
114
115        let rec = self.rec.as_ref().unwrap();
116
117        // Set timeline
118        rec.set_time_sequence("iteration", iteration as i64);
119
120        // Log each metric to COMPLETELY SEPARATE top-level spaces
121        // Rerun automatically creates separate TimeSeriesView for each top-level space
122        // Using different prefixes (cost_plot, gradient_plot, etc.) ensures separate panels
123        rec.log("cost_plot/value", &rerun::archetypes::Scalars::new([cost]))?;
124
125        rec.log(
126            "gradient_plot/norm",
127            &rerun::archetypes::Scalars::new([gradient_norm]),
128        )?;
129
130        rec.log(
131            "damping_plot/lambda",
132            &rerun::archetypes::Scalars::new([damping]),
133        )?;
134
135        rec.log(
136            "step_plot/norm",
137            &rerun::archetypes::Scalars::new([step_norm]),
138        )?;
139
140        if let Some(rho) = step_quality {
141            rec.log("quality_plot/rho", &rerun::archetypes::Scalars::new([rho]))?;
142        }
143
144        Ok(())
145    }
146
147    /// Log iteration timing information.
148    ///
149    /// # Arguments
150    ///
151    /// * `iteration` - Current iteration number
152    /// * `duration_ms` - Time taken for this iteration in milliseconds
153    pub fn log_timing(
154        &self,
155        iteration: usize,
156        duration_ms: f64,
157    ) -> Result<(), Box<dyn std::error::Error>> {
158        if !self.is_enabled() {
159            return Ok(());
160        }
161
162        let rec = self.rec.as_ref().unwrap();
163        rec.set_time_sequence("iteration", iteration as i64);
164
165        rec.log(
166            "timing_plot/iteration_ms",
167            &rerun::archetypes::Scalars::new([duration_ms]),
168        )?;
169
170        Ok(())
171    }
172
173    /// Log sparse Hessian matrix as a heat map image.
174    ///
175    /// The Hessian is downsampled to a fixed 100×100 image for visualization.
176    /// Uses a blue-white-red color scheme:
177    /// - Negative values: blue
178    /// - Zero values: white
179    /// - Positive values: red
180    ///
181    /// # Arguments
182    ///
183    /// * `hessian` - The sparse Hessian matrix
184    /// * `iteration` - Current iteration number
185    pub fn log_hessian(
186        &self,
187        hessian: Option<&sparse::SparseColMat<usize, f64>>,
188        iteration: usize,
189    ) -> Result<(), Box<dyn std::error::Error>> {
190        if !self.is_enabled() {
191            return Ok(());
192        }
193
194        let Some(hessian) = hessian else {
195            return Ok(());
196        };
197
198        let rec = self.rec.as_ref().unwrap();
199        rec.set_time_sequence("iteration", iteration as i64);
200
201        // Convert sparse Hessian to fixed 100×100 image
202        let image_data = Self::sparse_hessian_to_image(hessian)?;
203
204        // Log as tensor (Rerun will display it as an image)
205        rec.log(
206            "optimization/matrices/hessian",
207            &rerun::archetypes::Tensor::new(image_data),
208        )?;
209
210        Ok(())
211    }
212
213    /// Log gradient vector as a tensor visualization.
214    ///
215    /// The gradient is downsampled to a fixed 100-element vector and displayed
216    /// as a horizontal bar image.
217    ///
218    /// # Arguments
219    ///
220    /// * `gradient` - The gradient vector
221    /// * `iteration` - Current iteration number
222    pub fn log_gradient(
223        &self,
224        gradient: Option<&faer::Mat<f64>>,
225        iteration: usize,
226    ) -> Result<(), Box<dyn std::error::Error>> {
227        if !self.is_enabled() {
228            return Ok(());
229        }
230
231        let Some(grad) = gradient else {
232            return Ok(());
233        };
234
235        let rec = self.rec.as_ref().unwrap();
236        rec.set_time_sequence("iteration", iteration as i64);
237
238        // Convert gradient to 1D heat map
239        let grad_vec: Vec<f64> = (0..grad.nrows()).map(|i| grad[(i, 0)]).collect();
240
241        // Create a horizontal bar visualization as an image (fixed 100 width)
242        let image_data = Self::gradient_to_image(&grad_vec)?;
243
244        rec.log(
245            "optimization/matrices/gradient",
246            &rerun::archetypes::Tensor::new(image_data),
247        )?;
248
249        Ok(())
250    }
251
252    /// Log current manifold states for SE2/SE3 problems.
253    ///
254    /// This visualizes the current pose estimates during optimization.
255    /// Only the latest poses are shown (previous iterations are cleared).
256    ///
257    /// # Arguments
258    ///
259    /// * `variables` - Current variable values
260    /// * `iteration` - Current iteration number
261    pub fn log_manifolds(
262        &self,
263        variables: &collections::HashMap<String, problem::VariableEnum>,
264        iteration: usize,
265    ) -> Result<(), Box<dyn std::error::Error>> {
266        if !self.is_enabled() {
267            return Ok(());
268        }
269
270        let rec = self.rec.as_ref().unwrap();
271        rec.set_time_sequence("iteration", iteration as i64);
272
273        // Visualize SE3 poses (only poses, no edges)
274        for (var_name, var) in variables {
275            match var {
276                problem::VariableEnum::SE3(v) => {
277                    let trans = v.value.translation();
278                    let rot = v.value.rotation_quaternion();
279
280                    // Convert to glam types for Rerun
281                    let position = rerun::external::glam::Vec3::new(
282                        trans.x as f32,
283                        trans.y as f32,
284                        trans.z as f32,
285                    );
286
287                    let nq = rot.as_ref();
288                    let rotation = rerun::external::glam::Quat::from_xyzw(
289                        nq.i as f32,
290                        nq.j as f32,
291                        nq.k as f32,
292                        nq.w as f32,
293                    );
294
295                    let transform =
296                        rerun::Transform3D::from_translation_rotation(position, rotation);
297
298                    rec.log(
299                        format!("optimized_graph/se3_poses/{}", var_name),
300                        &transform,
301                    )?;
302
303                    // Also log a small pinhole camera for better visualization
304                    rec.log(
305                        format!("optimized_graph/se3_poses/{}", var_name),
306                        &rerun::archetypes::Pinhole::from_fov_and_aspect_ratio(0.5, 1.0),
307                    )?;
308                }
309                problem::VariableEnum::SE2(v) => {
310                    // Visualize SE2 as 2D points or 3D poses at z=0
311                    let x = v.value.x();
312                    let y = v.value.y();
313
314                    let position = rerun::external::glam::Vec3::new(x as f32, y as f32, 0.0);
315                    let rotation = rerun::external::glam::Quat::IDENTITY;
316
317                    let transform =
318                        rerun::Transform3D::from_translation_rotation(position, rotation);
319
320                    rec.log(
321                        format!("optimized_graph/se2_poses/{}", var_name),
322                        &transform,
323                    )?;
324                }
325                _ => {
326                    // Skip other manifold types (SO2, SO3, Rn)
327                }
328            }
329        }
330
331        Ok(())
332    }
333
334    /// Log convergence status and final summary.
335    ///
336    /// # Arguments
337    ///
338    /// * `status` - Convergence status message
339    pub fn log_convergence(&self, status: &str) -> Result<(), Box<dyn std::error::Error>> {
340        if !self.is_enabled() {
341            return Ok(());
342        }
343
344        let rec = self.rec.as_ref().unwrap();
345
346        // Log as a text annotation
347        rec.log(
348            "optimization/status",
349            &rerun::archetypes::TextDocument::new(status),
350        )?;
351
352        Ok(())
353    }
354
355    /// Log the initial graph structure before optimization.
356    ///
357    /// This visualizes only the initial poses (vertices), not edges.
358    ///
359    /// # Arguments
360    ///
361    /// * `graph` - The graph structure loaded from G2O file
362    /// * `scale` - Scale factor for visualization
363    pub fn log_initial_graph(
364        &self,
365        graph: &io::Graph,
366        scale: f32,
367    ) -> Result<(), Box<dyn std::error::Error>> {
368        if !self.is_enabled() {
369            return Ok(());
370        }
371
372        let rec = self.rec.as_ref().unwrap();
373
374        // Visualize SE3 vertices only (no edges)
375        for (id, vertex) in &graph.vertices_se3 {
376            let (position, rotation) = vertex.to_rerun_transform(scale);
377            let transform = rerun::Transform3D::from_translation_rotation(position, rotation);
378
379            let entity_path = format!("initial_graph/se3_poses/{}", id);
380            rec.log(entity_path.as_str(), &transform)?;
381
382            // Add a small pinhole camera for better visualization
383            rec.log(
384                entity_path.as_str(),
385                &rerun::archetypes::Pinhole::from_fov_and_aspect_ratio(0.5, 1.0),
386            )?;
387        }
388
389        // Visualize SE2 vertices only (no edges)
390        if !graph.vertices_se2.is_empty() {
391            let positions: Vec<[f32; 2]> = graph
392                .vertices_se2
393                .values()
394                .map(|vertex| vertex.to_rerun_position_2d(scale))
395                .collect();
396
397            let colors = vec![rerun::components::Color::from_rgb(100, 150, 255); positions.len()];
398
399            rec.log(
400                "initial_graph/se2_poses",
401                &rerun::archetypes::Points2D::new(positions)
402                    .with_colors(colors)
403                    .with_radii([0.5 * scale]),
404            )?;
405        }
406
407        Ok(())
408    }
409
410    // ========================================================================
411    // Private helper methods for image conversions
412    // ========================================================================
413
414    /// Convert sparse Hessian matrix to fixed 100×100 RGB image with heat map coloring.
415    ///
416    /// Uses block averaging to downsample large matrices while preserving structure.
417    fn sparse_hessian_to_image(
418        hessian: &sparse::SparseColMat<usize, f64>,
419    ) -> Result<rerun::datatypes::TensorData, Box<dyn std::error::Error>> {
420        // Fixed target size for visualization
421        let target_size = 100;
422        let target_rows = target_size;
423        let target_cols = target_size;
424
425        // Downsample to fixed size using block averaging
426        let dense_matrix = Self::downsample_sparse_matrix(hessian, target_rows, target_cols);
427
428        // Find min/max for normalization
429        let mut min_val = f64::INFINITY;
430        let mut max_val = f64::NEG_INFINITY;
431
432        for &val in &dense_matrix {
433            if val.is_finite() {
434                min_val = min_val.min(val);
435                max_val = max_val.max(val);
436            }
437        }
438
439        let max_abs = max_val.abs().max(min_val.abs());
440
441        // Convert to RGB bytes
442        let mut rgb_data = Vec::with_capacity(target_rows * target_cols * 3);
443
444        for &val in &dense_matrix {
445            let rgb = Self::value_to_rgb_heatmap(val, max_abs);
446            rgb_data.extend_from_slice(&rgb);
447        }
448
449        // Create tensor data
450        let tensor = rerun::datatypes::TensorData::new(
451            vec![target_rows as u64, target_cols as u64, 3],
452            rerun::datatypes::TensorBuffer::U8(rgb_data.into()),
453        );
454
455        Ok(tensor)
456    }
457
458    /// Convert gradient vector to a fixed 100-width horizontal bar image.
459    fn gradient_to_image(
460        gradient: &[f64],
461    ) -> Result<rerun::datatypes::TensorData, Box<dyn std::error::Error>> {
462        let n = gradient.len();
463
464        // Fixed size for visualization
465        let bar_height = 50;
466        let target_width = 100;
467
468        // Find max absolute value for normalization
469        let max_abs = gradient
470            .iter()
471            .map(|&x| x.abs())
472            .fold(0.0f64, |a, b| a.max(b));
473
474        let mut rgb_data = Vec::with_capacity(bar_height * target_width * 3);
475
476        for _ in 0..bar_height {
477            for i in 0..target_width {
478                // Average over a block of the gradient
479                let start = (i * n) / target_width;
480                let end = ((i + 1) * n) / target_width;
481                let sum: f64 = gradient[start..end].iter().sum();
482                let val = sum / (end - start).max(1) as f64;
483
484                let rgb = Self::value_to_rgb_heatmap(val, max_abs);
485                rgb_data.extend_from_slice(&rgb);
486            }
487        }
488
489        let tensor = rerun::datatypes::TensorData::new(
490            vec![bar_height as u64, target_width as u64, 3],
491            rerun::datatypes::TensorBuffer::U8(rgb_data.into()),
492        );
493
494        Ok(tensor)
495    }
496
497    /// Downsample a sparse matrix to target size using block averaging.
498    ///
499    /// This function efficiently iterates only over non-zero elements using
500    /// the sparse CSC (Compressed Sparse Column) format, avoiding O(m*n) complexity.
501    fn downsample_sparse_matrix(
502        sparse: &sparse::SparseColMat<usize, f64>,
503        target_rows: usize,
504        target_cols: usize,
505    ) -> Vec<f64> {
506        let m = sparse.nrows();
507        let n = sparse.ncols();
508
509        let mut downsampled = vec![0.0; target_rows * target_cols];
510        let mut counts = vec![0usize; target_rows * target_cols];
511
512        // Efficiently iterate only over non-zero entries using CSC format
513        // This is O(nnz) instead of O(m*n), critical for large matrices!
514        let symbolic = sparse.symbolic();
515
516        for col in 0..n {
517            let row_indices = symbolic.row_idx_of_col_raw(col);
518            let col_values = sparse.val_of_col(col);
519
520            for (idx_in_col, &row) in row_indices.iter().enumerate() {
521                let value = col_values[idx_in_col];
522
523                if value.abs() > 1e-12 {
524                    let target_row = (row * target_rows) / m;
525                    let target_col = (col * target_cols) / n;
526                    let idx = target_row * target_cols + target_col;
527
528                    downsampled[idx] += value;
529                    counts[idx] += 1;
530                }
531            }
532        }
533
534        // Average bins
535        for i in 0..downsampled.len() {
536            if counts[i] > 0 {
537                downsampled[i] /= counts[i] as f64;
538            }
539        }
540
541        downsampled
542    }
543
544    /// Map a scalar value to RGB color using white-to-blue gradient.
545    ///
546    /// Uses a simple white→blue gradient based on magnitude:
547    /// - Low values: white (255, 255, 255)
548    /// - High values: blue (0, 0, 255)
549    ///
550    /// # Arguments
551    ///
552    /// * `value` - The value to map (absolute value is used)
553    /// * `max_abs` - Maximum absolute value for normalization
554    ///
555    /// # Returns
556    ///
557    /// RGB tuple as [r, g, b] bytes
558    fn value_to_rgb_heatmap(value: f64, max_abs: f64) -> [u8; 3] {
559        if !value.is_finite() || max_abs == 0.0 {
560            return [255, 255, 255]; // White for invalid/zero
561        }
562
563        // Normalize to [0, 1] using absolute value
564        let normalized = (value.abs() / max_abs).clamp(0.0, 1.0);
565
566        if normalized < 1e-10 {
567            // Near-zero: white
568            [255, 255, 255]
569        } else {
570            // White (255,255,255) → Blue (0,0,255)
571            // Keep blue channel at 255, reduce red and green
572            let intensity = (normalized * 255.0) as u8;
573            let remaining = 255 - intensity;
574            [remaining, remaining, 255]
575        }
576    }
577}
578
579impl Default for OptimizationVisualizer {
580    fn default() -> Self {
581        Self::new(false).unwrap_or_else(|_| Self {
582            rec: None,
583            enabled: false,
584        })
585    }
586}
587
588#[cfg(test)]
589mod tests {
590    use super::*;
591
592    #[test]
593    fn test_visualizer_creation() {
594        let vis = OptimizationVisualizer::new(false);
595        assert!(vis.is_ok());
596
597        let vis = vis.unwrap();
598        assert!(!vis.is_enabled());
599    }
600
601    #[test]
602    fn test_rgb_heatmap_conversion() {
603        // Test zero - should be white
604        let rgb = OptimizationVisualizer::value_to_rgb_heatmap(0.0, 1.0);
605        assert_eq!(rgb, [255, 255, 255]);
606
607        // Test max positive - should be blue
608        let rgb = OptimizationVisualizer::value_to_rgb_heatmap(1.0, 1.0);
609        assert_eq!(rgb, [0, 0, 255]);
610
611        // Test max negative (abs value) - should also be blue
612        let rgb = OptimizationVisualizer::value_to_rgb_heatmap(-1.0, 1.0);
613        assert_eq!(rgb, [0, 0, 255]);
614
615        // Test mid-range value - should be light blue
616        let rgb = OptimizationVisualizer::value_to_rgb_heatmap(0.5, 1.0);
617        assert_eq!(rgb, [128, 128, 255]);
618    }
619}