1use crate::{core::problem, io};
15use faer::sparse;
16use std::collections;
17
18#[derive(Debug)]
23pub struct OptimizationVisualizer {
24 rec: Option<rerun::RecordingStream>,
25 enabled: bool,
26}
27
28impl OptimizationVisualizer {
29 pub fn new(enabled: bool) -> Result<Self, Box<dyn std::error::Error>> {
39 Self::new_with_options(enabled, None)
40 }
41
42 pub fn new_with_options(
49 enabled: bool,
50 save_path: Option<&str>,
51 ) -> Result<Self, Box<dyn std::error::Error>> {
52 let rec = if enabled {
53 let rec = if let Some(path) = save_path {
54 println!("✓ Saving visualization to: {}", path);
56 rerun::RecordingStreamBuilder::new("apex-solver-optimization").save(path)?
57 } else {
58 match rerun::RecordingStreamBuilder::new("apex-solver-optimization").spawn() {
60 Ok(rec) => {
61 println!("✓ Rerun viewer launched successfully");
62 rec
63 }
64 Err(e) => {
65 eprintln!("⚠ Could not launch Rerun viewer: {}", e);
66 eprintln!(" Saving to file 'optimization.rrd' instead");
67 eprintln!(" View it later with: rerun optimization.rrd\n");
68
69 rerun::RecordingStreamBuilder::new("apex-solver-optimization")
71 .save("optimization.rrd")?
72 }
73 }
74 };
75
76 Some(rec)
77 } else {
78 None
79 };
80
81 Ok(Self { rec, enabled })
82 }
83
84 #[inline(always)]
86 pub fn is_enabled(&self) -> bool {
87 self.enabled && self.rec.is_some()
88 }
89
90 pub fn log_scalars(
103 &self,
104 iteration: usize,
105 cost: f64,
106 gradient_norm: f64,
107 damping: f64,
108 step_norm: f64,
109 step_quality: Option<f64>,
110 ) -> Result<(), Box<dyn std::error::Error>> {
111 if !self.is_enabled() {
112 return Ok(());
113 }
114
115 let rec = self.rec.as_ref().unwrap();
116
117 rec.set_time_sequence("iteration", iteration as i64);
119
120 rec.log("cost_plot/value", &rerun::archetypes::Scalars::new([cost]))?;
124
125 rec.log(
126 "gradient_plot/norm",
127 &rerun::archetypes::Scalars::new([gradient_norm]),
128 )?;
129
130 rec.log(
131 "damping_plot/lambda",
132 &rerun::archetypes::Scalars::new([damping]),
133 )?;
134
135 rec.log(
136 "step_plot/norm",
137 &rerun::archetypes::Scalars::new([step_norm]),
138 )?;
139
140 if let Some(rho) = step_quality {
141 rec.log("quality_plot/rho", &rerun::archetypes::Scalars::new([rho]))?;
142 }
143
144 Ok(())
145 }
146
147 pub fn log_timing(
154 &self,
155 iteration: usize,
156 duration_ms: f64,
157 ) -> Result<(), Box<dyn std::error::Error>> {
158 if !self.is_enabled() {
159 return Ok(());
160 }
161
162 let rec = self.rec.as_ref().unwrap();
163 rec.set_time_sequence("iteration", iteration as i64);
164
165 rec.log(
166 "timing_plot/iteration_ms",
167 &rerun::archetypes::Scalars::new([duration_ms]),
168 )?;
169
170 Ok(())
171 }
172
173 pub fn log_hessian(
186 &self,
187 hessian: Option<&sparse::SparseColMat<usize, f64>>,
188 iteration: usize,
189 ) -> Result<(), Box<dyn std::error::Error>> {
190 if !self.is_enabled() {
191 return Ok(());
192 }
193
194 let Some(hessian) = hessian else {
195 return Ok(());
196 };
197
198 let rec = self.rec.as_ref().unwrap();
199 rec.set_time_sequence("iteration", iteration as i64);
200
201 let image_data = Self::sparse_hessian_to_image(hessian)?;
203
204 rec.log(
206 "optimization/matrices/hessian",
207 &rerun::archetypes::Tensor::new(image_data),
208 )?;
209
210 Ok(())
211 }
212
213 pub fn log_gradient(
223 &self,
224 gradient: Option<&faer::Mat<f64>>,
225 iteration: usize,
226 ) -> Result<(), Box<dyn std::error::Error>> {
227 if !self.is_enabled() {
228 return Ok(());
229 }
230
231 let Some(grad) = gradient else {
232 return Ok(());
233 };
234
235 let rec = self.rec.as_ref().unwrap();
236 rec.set_time_sequence("iteration", iteration as i64);
237
238 let grad_vec: Vec<f64> = (0..grad.nrows()).map(|i| grad[(i, 0)]).collect();
240
241 let image_data = Self::gradient_to_image(&grad_vec)?;
243
244 rec.log(
245 "optimization/matrices/gradient",
246 &rerun::archetypes::Tensor::new(image_data),
247 )?;
248
249 Ok(())
250 }
251
252 pub fn log_manifolds(
262 &self,
263 variables: &collections::HashMap<String, problem::VariableEnum>,
264 iteration: usize,
265 ) -> Result<(), Box<dyn std::error::Error>> {
266 if !self.is_enabled() {
267 return Ok(());
268 }
269
270 let rec = self.rec.as_ref().unwrap();
271 rec.set_time_sequence("iteration", iteration as i64);
272
273 for (var_name, var) in variables {
275 match var {
276 problem::VariableEnum::SE3(v) => {
277 let trans = v.value.translation();
278 let rot = v.value.rotation_quaternion();
279
280 let position = rerun::external::glam::Vec3::new(
282 trans.x as f32,
283 trans.y as f32,
284 trans.z as f32,
285 );
286
287 let nq = rot.as_ref();
288 let rotation = rerun::external::glam::Quat::from_xyzw(
289 nq.i as f32,
290 nq.j as f32,
291 nq.k as f32,
292 nq.w as f32,
293 );
294
295 let transform =
296 rerun::Transform3D::from_translation_rotation(position, rotation);
297
298 rec.log(
299 format!("optimized_graph/se3_poses/{}", var_name),
300 &transform,
301 )?;
302
303 rec.log(
305 format!("optimized_graph/se3_poses/{}", var_name),
306 &rerun::archetypes::Pinhole::from_fov_and_aspect_ratio(0.5, 1.0),
307 )?;
308 }
309 problem::VariableEnum::SE2(v) => {
310 let x = v.value.x();
312 let y = v.value.y();
313
314 let position = rerun::external::glam::Vec3::new(x as f32, y as f32, 0.0);
315 let rotation = rerun::external::glam::Quat::IDENTITY;
316
317 let transform =
318 rerun::Transform3D::from_translation_rotation(position, rotation);
319
320 rec.log(
321 format!("optimized_graph/se2_poses/{}", var_name),
322 &transform,
323 )?;
324 }
325 _ => {
326 }
328 }
329 }
330
331 Ok(())
332 }
333
334 pub fn log_convergence(&self, status: &str) -> Result<(), Box<dyn std::error::Error>> {
340 if !self.is_enabled() {
341 return Ok(());
342 }
343
344 let rec = self.rec.as_ref().unwrap();
345
346 rec.log(
348 "optimization/status",
349 &rerun::archetypes::TextDocument::new(status),
350 )?;
351
352 Ok(())
353 }
354
355 pub fn log_initial_graph(
364 &self,
365 graph: &io::Graph,
366 scale: f32,
367 ) -> Result<(), Box<dyn std::error::Error>> {
368 if !self.is_enabled() {
369 return Ok(());
370 }
371
372 let rec = self.rec.as_ref().unwrap();
373
374 for (id, vertex) in &graph.vertices_se3 {
376 let (position, rotation) = vertex.to_rerun_transform(scale);
377 let transform = rerun::Transform3D::from_translation_rotation(position, rotation);
378
379 let entity_path = format!("initial_graph/se3_poses/{}", id);
380 rec.log(entity_path.as_str(), &transform)?;
381
382 rec.log(
384 entity_path.as_str(),
385 &rerun::archetypes::Pinhole::from_fov_and_aspect_ratio(0.5, 1.0),
386 )?;
387 }
388
389 if !graph.vertices_se2.is_empty() {
391 let positions: Vec<[f32; 2]> = graph
392 .vertices_se2
393 .values()
394 .map(|vertex| vertex.to_rerun_position_2d(scale))
395 .collect();
396
397 let colors = vec![rerun::components::Color::from_rgb(100, 150, 255); positions.len()];
398
399 rec.log(
400 "initial_graph/se2_poses",
401 &rerun::archetypes::Points2D::new(positions)
402 .with_colors(colors)
403 .with_radii([0.5 * scale]),
404 )?;
405 }
406
407 Ok(())
408 }
409
410 fn sparse_hessian_to_image(
418 hessian: &sparse::SparseColMat<usize, f64>,
419 ) -> Result<rerun::datatypes::TensorData, Box<dyn std::error::Error>> {
420 let target_size = 100;
422 let target_rows = target_size;
423 let target_cols = target_size;
424
425 let dense_matrix = Self::downsample_sparse_matrix(hessian, target_rows, target_cols);
427
428 let mut min_val = f64::INFINITY;
430 let mut max_val = f64::NEG_INFINITY;
431
432 for &val in &dense_matrix {
433 if val.is_finite() {
434 min_val = min_val.min(val);
435 max_val = max_val.max(val);
436 }
437 }
438
439 let max_abs = max_val.abs().max(min_val.abs());
440
441 let mut rgb_data = Vec::with_capacity(target_rows * target_cols * 3);
443
444 for &val in &dense_matrix {
445 let rgb = Self::value_to_rgb_heatmap(val, max_abs);
446 rgb_data.extend_from_slice(&rgb);
447 }
448
449 let tensor = rerun::datatypes::TensorData::new(
451 vec![target_rows as u64, target_cols as u64, 3],
452 rerun::datatypes::TensorBuffer::U8(rgb_data.into()),
453 );
454
455 Ok(tensor)
456 }
457
458 fn gradient_to_image(
460 gradient: &[f64],
461 ) -> Result<rerun::datatypes::TensorData, Box<dyn std::error::Error>> {
462 let n = gradient.len();
463
464 let bar_height = 50;
466 let target_width = 100;
467
468 let max_abs = gradient
470 .iter()
471 .map(|&x| x.abs())
472 .fold(0.0f64, |a, b| a.max(b));
473
474 let mut rgb_data = Vec::with_capacity(bar_height * target_width * 3);
475
476 for _ in 0..bar_height {
477 for i in 0..target_width {
478 let start = (i * n) / target_width;
480 let end = ((i + 1) * n) / target_width;
481 let sum: f64 = gradient[start..end].iter().sum();
482 let val = sum / (end - start).max(1) as f64;
483
484 let rgb = Self::value_to_rgb_heatmap(val, max_abs);
485 rgb_data.extend_from_slice(&rgb);
486 }
487 }
488
489 let tensor = rerun::datatypes::TensorData::new(
490 vec![bar_height as u64, target_width as u64, 3],
491 rerun::datatypes::TensorBuffer::U8(rgb_data.into()),
492 );
493
494 Ok(tensor)
495 }
496
497 fn downsample_sparse_matrix(
502 sparse: &sparse::SparseColMat<usize, f64>,
503 target_rows: usize,
504 target_cols: usize,
505 ) -> Vec<f64> {
506 let m = sparse.nrows();
507 let n = sparse.ncols();
508
509 let mut downsampled = vec![0.0; target_rows * target_cols];
510 let mut counts = vec![0usize; target_rows * target_cols];
511
512 let symbolic = sparse.symbolic();
515
516 for col in 0..n {
517 let row_indices = symbolic.row_idx_of_col_raw(col);
518 let col_values = sparse.val_of_col(col);
519
520 for (idx_in_col, &row) in row_indices.iter().enumerate() {
521 let value = col_values[idx_in_col];
522
523 if value.abs() > 1e-12 {
524 let target_row = (row * target_rows) / m;
525 let target_col = (col * target_cols) / n;
526 let idx = target_row * target_cols + target_col;
527
528 downsampled[idx] += value;
529 counts[idx] += 1;
530 }
531 }
532 }
533
534 for i in 0..downsampled.len() {
536 if counts[i] > 0 {
537 downsampled[i] /= counts[i] as f64;
538 }
539 }
540
541 downsampled
542 }
543
544 fn value_to_rgb_heatmap(value: f64, max_abs: f64) -> [u8; 3] {
559 if !value.is_finite() || max_abs == 0.0 {
560 return [255, 255, 255]; }
562
563 let normalized = (value.abs() / max_abs).clamp(0.0, 1.0);
565
566 if normalized < 1e-10 {
567 [255, 255, 255]
569 } else {
570 let intensity = (normalized * 255.0) as u8;
573 let remaining = 255 - intensity;
574 [remaining, remaining, 255]
575 }
576 }
577}
578
579impl Default for OptimizationVisualizer {
580 fn default() -> Self {
581 Self::new(false).unwrap_or_else(|_| Self {
582 rec: None,
583 enabled: false,
584 })
585 }
586}
587
588#[cfg(test)]
589mod tests {
590 use super::*;
591
592 #[test]
593 fn test_visualizer_creation() {
594 let vis = OptimizationVisualizer::new(false);
595 assert!(vis.is_ok());
596
597 let vis = vis.unwrap();
598 assert!(!vis.is_enabled());
599 }
600
601 #[test]
602 fn test_rgb_heatmap_conversion() {
603 let rgb = OptimizationVisualizer::value_to_rgb_heatmap(0.0, 1.0);
605 assert_eq!(rgb, [255, 255, 255]);
606
607 let rgb = OptimizationVisualizer::value_to_rgb_heatmap(1.0, 1.0);
609 assert_eq!(rgb, [0, 0, 255]);
610
611 let rgb = OptimizationVisualizer::value_to_rgb_heatmap(-1.0, 1.0);
613 assert_eq!(rgb, [0, 0, 255]);
614
615 let rgb = OptimizationVisualizer::value_to_rgb_heatmap(0.5, 1.0);
617 assert_eq!(rgb, [128, 128, 255]);
618 }
619}