apex_solver/observers/mod.rs
1//! Observer pattern for optimization monitoring.
2//!
3//! This module provides a clean observer pattern for monitoring optimization progress.
4//! Observers can be registered with any optimizer and will be notified at each iteration,
5//! enabling real-time visualization, logging, metrics collection, and custom analysis.
6//!
7//! # Design Philosophy
8//!
9//! The observer pattern provides complete separation between optimization algorithms
10//! and monitoring/visualization logic:
11//!
12//! - **Decoupling**: Optimization logic is independent of how progress is monitored
13//! - **Extensibility**: Easy to add new observers (Rerun, CSV, metrics, dashboards)
14//! - **Composability**: Multiple observers can run simultaneously
15//! - **Zero overhead**: When no observers are registered, notification is a no-op
16//!
17//! # Architecture
18//!
19//! ```text
20//! ┌─────────────────┐
21//! │ Optimizer │
22//! │ (LM/GN/DogLeg) │
23//! └────────┬────────┘
24//! │ observers.notify(values, iteration)
25//! ├──────────────┬──────────────┬──────────────┐
26//! ▼ ▼ ▼ ▼
27//! ┌──────────┐ ┌──────────┐ ┌──────────┐ ┌──────────┐
28//! │ Rerun │ │ CSV │ │ Metrics │ │ Custom │
29//! │ Observer │ │ Observer │ │ Observer │ │ Observer │
30//! └──────────┘ └──────────┘ └──────────┘ └──────────┘
31//! ```
32//!
33//! # Examples
34//!
35//! ## Single Observer
36//!
37//! ```no_run
38//! use apex_solver::{LevenbergMarquardt, LevenbergMarquardtConfig};
39//! use apex_solver::observers::OptObserver;
40//! # use apex_solver::core::problem::Problem;
41//! # use std::collections::HashMap;
42//!
43//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
44//! # let problem = Problem::new();
45//! # let initial_values = HashMap::new();
46//!
47//! let config = LevenbergMarquardtConfig::new().with_max_iterations(100);
48//! let mut solver = LevenbergMarquardt::with_config(config);
49//!
50//! #[cfg(feature = "visualization")]
51//! {
52//! use apex_solver::observers::RerunObserver;
53//! let rerun_observer = RerunObserver::new(true)?;
54//! solver.add_observer(rerun_observer);
55//! }
56//!
57//! let result = solver.optimize(&problem, &initial_values)?;
58//! # Ok(())
59//! # }
60//! ```
61//!
62//! ## Multiple Observers
63//!
64//! ```no_run
65//! # use apex_solver::{LevenbergMarquardt, LevenbergMarquardtConfig};
66//! # use apex_solver::core::problem::{Problem, VariableEnum};
67//! # use apex_solver::observers::OptObserver;
68//! # use std::collections::HashMap;
69//!
70//! // Custom observer that logs to CSV
71//! struct CsvObserver {
72//! file: std::fs::File,
73//! }
74//!
75//! impl OptObserver for CsvObserver {
76//! fn on_step(&self, _values: &HashMap<String, VariableEnum>, iteration: usize) {
77//! // Write iteration data to CSV
78//! // ... implementation ...
79//! }
80//! }
81//!
82//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
83//! # let problem = Problem::new();
84//! # let initial_values = HashMap::new();
85//! let mut solver = LevenbergMarquardt::new();
86//!
87//! // Add Rerun visualization
88//! #[cfg(feature = "visualization")]
89//! {
90//! use apex_solver::observers::RerunObserver;
91//! solver.add_observer(RerunObserver::new(true)?);
92//! }
93//!
94//! // Add CSV logging
95//! // solver.add_observer(CsvObserver { file: ... });
96//!
97//! let result = solver.optimize(&problem, &initial_values)?;
98//! # Ok(())
99//! # }
100//! ```
101//!
102//! ## Custom Observer
103//!
104//! ```no_run
105//! use apex_solver::observers::OptObserver;
106//! use apex_solver::core::problem::VariableEnum;
107//! use std::collections::HashMap;
108//!
109//! struct MetricsObserver {
110//! max_variables_seen: std::cell::RefCell<usize>,
111//! }
112//!
113//! impl OptObserver for MetricsObserver {
114//! fn on_step(&self, values: &HashMap<String, VariableEnum>, iteration: usize) {
115//! let count = values.len();
116//! let mut max = self.max_variables_seen.borrow_mut();
117//! *max = (*max).max(count);
118//! }
119//! }
120//! ```
121
122// Visualization-specific submodules (feature-gated)
123#[cfg(feature = "visualization")]
124pub mod conversions;
125#[cfg(feature = "visualization")]
126pub mod visualization;
127
128// Re-export RerunObserver when visualization is enabled
129#[cfg(feature = "visualization")]
130pub use visualization::{RerunObserver, VisualizationConfig, VisualizationMode};
131
132// Re-export conversion traits for ergonomic use
133#[cfg(feature = "visualization")]
134pub use conversions::{
135 CollectRerunArrows2D, CollectRerunArrows3D, CollectRerunPoints2D, CollectRerunPoints3D,
136 ToRerunArrows2D, ToRerunArrows3D, ToRerunPoints2D, ToRerunPoints3D, ToRerunTransform3D,
137 ToRerunTransform3DFrom2D, ToRerunVec2D, ToRerunVec3D,
138};
139
140use crate::core::problem::VariableEnum;
141use faer::Mat;
142use faer::sparse;
143use std::collections::HashMap;
144use thiserror::Error;
145use tracing::error;
146
147/// Observer-specific error types for apex-solver
148#[derive(Debug, Clone, Error)]
149pub enum ObserverError {
150 /// Failed to initialize Rerun recording stream
151 #[error("Failed to initialize Rerun recording stream: {0}")]
152 RerunInitialization(String),
153
154 /// Failed to spawn Rerun viewer process
155 #[error("Failed to spawn Rerun viewer: {0}")]
156 ViewerSpawnFailed(String),
157
158 /// Failed to save recording to file
159 #[error("Failed to save recording to file '{path}': {reason}")]
160 RecordingSaveFailed { path: String, reason: String },
161
162 /// Failed to log data to Rerun
163 #[error("Failed to log data to Rerun at '{entity_path}': {reason}")]
164 LoggingFailed { entity_path: String, reason: String },
165
166 /// Failed to convert matrix to visualization format
167 #[error("Failed to convert matrix to image: {0}")]
168 MatrixVisualizationFailed(String),
169
170 /// Failed to convert tensor data
171 #[error("Failed to create tensor data: {0}")]
172 TensorConversionFailed(String),
173
174 /// Recording stream is in invalid state
175 #[error("Recording stream is in invalid state: {0}")]
176 InvalidState(String),
177
178 /// Mutex was poisoned (thread panicked while holding lock)
179 #[error("Mutex poisoned in {context}: {reason}")]
180 MutexPoisoned { context: String, reason: String },
181}
182
183impl ObserverError {
184 /// Log the error with tracing::error and return self for chaining
185 ///
186 /// This method allows for a consistent error logging pattern throughout
187 /// the observers module, ensuring all errors are properly recorded.
188 ///
189 /// # Example
190 /// ```
191 /// # use apex_solver::observers::ObserverError;
192 /// # fn operation() -> Result<(), ObserverError> { Ok(()) }
193 /// # fn example() -> Result<(), ObserverError> {
194 /// operation()
195 /// .map_err(|e| e.log())?;
196 /// # Ok(())
197 /// # }
198 /// ```
199 #[must_use]
200 pub fn log(self) -> Self {
201 error!("{}", self);
202 self
203 }
204
205 /// Log the error with the original source error from a third-party library
206 ///
207 /// This method logs both the ObserverError and the underlying error
208 /// from external libraries (e.g., Rerun's errors). This provides full
209 /// debugging context when errors occur in third-party code.
210 ///
211 /// # Arguments
212 /// * `source_error` - The original error from the third-party library (must implement Debug)
213 ///
214 /// # Example
215 /// ```no_run
216 /// # use apex_solver::observers::ObserverError;
217 /// # fn rec_log() -> Result<(), std::io::Error> { Ok(()) }
218 /// # fn example() -> Result<(), ObserverError> {
219 /// rec_log()
220 /// .map_err(|e| {
221 /// ObserverError::LoggingFailed {
222 /// entity_path: "world/points".to_string(),
223 /// reason: format!("{}", e)
224 /// }
225 /// .log_with_source(e)
226 /// })?;
227 /// # Ok(())
228 /// # }
229 /// ```
230 #[must_use]
231 pub fn log_with_source<E: std::fmt::Debug>(self, source_error: E) -> Self {
232 error!("{} | Source: {:?}", self, source_error);
233 self
234 }
235}
236
237/// Result type for observer operations
238pub type ObserverResult<T> = Result<T, ObserverError>;
239
240/// Observer trait for monitoring optimization progress.
241///
242/// Implement this trait to create custom observers that are notified at each
243/// optimization iteration. Observers receive the current variable values and
244/// iteration number, enabling real-time monitoring, visualization, logging,
245/// or custom analysis.
246///
247/// # Design Notes
248///
249/// - Observers should be lightweight and non-blocking
250/// - Errors in observers should not crash optimization (handle internally)
251/// - For expensive operations (file I/O, network), consider buffering
252/// - Observers receive immutable references (cannot modify optimization state)
253///
254/// # Thread Safety
255///
256/// Observers must be `Send` to support parallel optimization in the future.
257/// Use interior mutability (`RefCell`, `Mutex`) if you need to mutate state.
258pub trait OptObserver: Send {
259 /// Called after each optimization iteration.
260 ///
261 /// # Arguments
262 ///
263 /// * `values` - Current variable values (manifold states)
264 /// * `iteration` - Current iteration number (0 = initial values, 1+ = after steps)
265 ///
266 /// # Implementation Guidelines
267 ///
268 /// - Keep this method fast to avoid slowing optimization
269 /// - Handle errors internally (log warnings, don't panic)
270 /// - Don't mutate `values` (you receive `&HashMap`)
271 /// - Consider buffering expensive operations
272 ///
273 /// # Examples
274 ///
275 /// ```no_run
276 /// use apex_solver::observers::OptObserver;
277 /// use apex_solver::core::problem::VariableEnum;
278 /// use std::collections::HashMap;
279 ///
280 /// struct SimpleLogger;
281 ///
282 /// impl OptObserver for SimpleLogger {
283 /// fn on_step(&self, values: &HashMap<String, VariableEnum>, iteration: usize) {
284 /// // Track optimization progress
285 /// }
286 /// }
287 /// ```
288 fn on_step(&self, values: &HashMap<String, VariableEnum>, iteration: usize);
289
290 /// Set iteration metrics for visualization and monitoring.
291 ///
292 /// This method is called before `on_step` to provide optimization metrics
293 /// such as cost, gradient norm, damping parameter, etc. Observers can use
294 /// this data for visualization, logging, or analysis.
295 ///
296 /// # Arguments
297 ///
298 /// * `cost` - Current cost function value
299 /// * `gradient_norm` - L2 norm of the gradient vector
300 /// * `damping` - Damping parameter (for Levenberg-Marquardt, may be None for other solvers)
301 /// * `step_norm` - L2 norm of the parameter update step
302 /// * `step_quality` - Step quality metric (e.g., rho for trust region methods)
303 ///
304 /// # Default Implementation
305 ///
306 /// The default implementation does nothing, allowing simple observers to ignore metrics.
307 fn set_iteration_metrics(
308 &self,
309 _cost: f64,
310 _gradient_norm: f64,
311 _damping: Option<f64>,
312 _step_norm: f64,
313 _step_quality: Option<f64>,
314 ) {
315 // Default implementation does nothing
316 }
317
318 /// Set matrix data for advanced visualization.
319 ///
320 /// This method provides access to the Hessian matrix and gradient vector
321 /// for observers that want to visualize matrix structure or perform
322 /// advanced analysis.
323 ///
324 /// # Arguments
325 ///
326 /// * `hessian` - Sparse Hessian matrix (J^T * J)
327 /// * `gradient` - Gradient vector (J^T * r)
328 ///
329 /// # Default Implementation
330 ///
331 /// The default implementation does nothing, allowing simple observers to ignore matrices.
332 fn set_matrix_data(
333 &self,
334 _hessian: Option<sparse::SparseColMat<usize, f64>>,
335 _gradient: Option<Mat<f64>>,
336 ) {
337 // Default implementation does nothing
338 }
339
340 /// Called when optimization completes.
341 ///
342 /// This method is called once at the end of optimization, after all iterations
343 /// are complete. Use this for final visualization, cleanup, or summary logging.
344 ///
345 /// # Arguments
346 ///
347 /// * `values` - Final optimized variable values
348 /// * `iterations` - Total number of iterations performed
349 ///
350 /// # Default Implementation
351 ///
352 /// The default implementation does nothing, allowing simple observers to ignore completion.
353 ///
354 /// # Examples
355 ///
356 /// ```no_run
357 /// use apex_solver::observers::OptObserver;
358 /// use apex_solver::core::problem::VariableEnum;
359 /// use std::collections::HashMap;
360 ///
361 /// struct FinalStateLogger;
362 ///
363 /// impl OptObserver for FinalStateLogger {
364 /// fn on_step(&self, _values: &HashMap<String, VariableEnum>, _iteration: usize) {}
365 ///
366 /// fn on_optimization_complete(&self, values: &HashMap<String, VariableEnum>, iterations: usize) {
367 /// println!("Optimization completed after {} iterations with {} variables",
368 /// iterations, values.len());
369 /// }
370 /// }
371 /// ```
372 fn on_optimization_complete(
373 &self,
374 _values: &HashMap<String, VariableEnum>,
375 _iterations: usize,
376 ) {
377 // Default implementation does nothing
378 }
379}
380
381/// Collection of observers for optimization monitoring.
382///
383/// This struct manages a vector of observers and provides a convenient
384/// `notify()` method to call all observers at once. Optimizers use this
385/// internally to manage their observers.
386///
387/// # Usage
388///
389/// Typically you don't create this directly - use the `add_observer()` method
390/// on optimizers. However, you can use it for custom optimization algorithms:
391///
392/// ```no_run
393/// use apex_solver::observers::{OptObserver, OptObserverVec};
394/// use apex_solver::core::problem::VariableEnum;
395/// use std::collections::HashMap;
396///
397/// struct MyOptimizer {
398/// observers: OptObserverVec,
399/// // ... other fields ...
400/// }
401///
402/// impl MyOptimizer {
403/// fn step(&mut self, values: &HashMap<String, VariableEnum>, iteration: usize) {
404/// // ... optimization logic ...
405///
406/// // Notify all observers
407/// self.observers.notify(values, iteration);
408/// }
409/// }
410/// ```
411#[derive(Default)]
412pub struct OptObserverVec {
413 observers: Vec<Box<dyn OptObserver>>,
414}
415
416impl OptObserverVec {
417 /// Create a new empty observer collection.
418 pub fn new() -> Self {
419 Self {
420 observers: Vec::new(),
421 }
422 }
423
424 /// Add an observer to the collection.
425 ///
426 /// The observer will be called at each optimization iteration in the order
427 /// it was added.
428 ///
429 /// # Arguments
430 ///
431 /// * `observer` - Any type implementing `OptObserver`
432 ///
433 /// # Examples
434 ///
435 /// ```no_run
436 /// use apex_solver::observers::{OptObserver, OptObserverVec};
437 /// use apex_solver::core::problem::VariableEnum;
438 /// use std::collections::HashMap;
439 ///
440 /// struct MyObserver;
441 /// impl OptObserver for MyObserver {
442 /// fn on_step(&self, _values: &HashMap<String, VariableEnum>, _iteration: usize) {
443 /// // Handle optimization step
444 /// }
445 /// }
446 ///
447 /// let mut observers = OptObserverVec::new();
448 /// observers.add(MyObserver);
449 /// ```
450 pub fn add(&mut self, observer: impl OptObserver + 'static) {
451 self.observers.push(Box::new(observer));
452 }
453
454 /// Set iteration metrics for all observers.
455 ///
456 /// Calls `set_iteration_metrics()` on each registered observer. This should
457 /// be called before `notify()` to provide optimization metrics.
458 ///
459 /// # Arguments
460 ///
461 /// * `cost` - Current cost function value
462 /// * `gradient_norm` - L2 norm of the gradient vector
463 /// * `damping` - Damping parameter (may be None)
464 /// * `step_norm` - L2 norm of the parameter update step
465 /// * `step_quality` - Step quality metric (may be None)
466 #[inline]
467 pub fn set_iteration_metrics(
468 &self,
469 cost: f64,
470 gradient_norm: f64,
471 damping: Option<f64>,
472 step_norm: f64,
473 step_quality: Option<f64>,
474 ) {
475 for observer in &self.observers {
476 observer.set_iteration_metrics(cost, gradient_norm, damping, step_norm, step_quality);
477 }
478 }
479
480 /// Set matrix data for all observers.
481 ///
482 /// Calls `set_matrix_data()` on each registered observer. This should
483 /// be called before `notify()` to provide matrix data for visualization.
484 ///
485 /// # Arguments
486 ///
487 /// * `hessian` - Sparse Hessian matrix
488 /// * `gradient` - Gradient vector
489 #[inline]
490 pub fn set_matrix_data(
491 &self,
492 hessian: Option<sparse::SparseColMat<usize, f64>>,
493 gradient: Option<Mat<f64>>,
494 ) {
495 for observer in &self.observers {
496 observer.set_matrix_data(hessian.clone(), gradient.clone());
497 }
498 }
499
500 /// Notify all observers with current optimization state.
501 ///
502 /// Calls `on_step()` on each registered observer in order. If no observers
503 /// are registered, this is a no-op with zero overhead.
504 ///
505 /// # Arguments
506 ///
507 /// * `values` - Current variable values
508 /// * `iteration` - Current iteration number
509 ///
510 /// # Examples
511 ///
512 /// ```no_run
513 /// use apex_solver::observers::OptObserverVec;
514 /// use std::collections::HashMap;
515 ///
516 /// let observers = OptObserverVec::new();
517 /// let values = HashMap::new();
518 ///
519 /// // Notify all observers (safe even if empty)
520 /// observers.notify(&values, 0);
521 /// ```
522 #[inline]
523 pub fn notify(&self, values: &HashMap<String, VariableEnum>, iteration: usize) {
524 for observer in &self.observers {
525 observer.on_step(values, iteration);
526 }
527 }
528
529 /// Notify all observers that optimization is complete.
530 ///
531 /// Calls `on_optimization_complete()` on each registered observer. This should
532 /// be called once at the end of optimization, after all iterations are done.
533 ///
534 /// # Arguments
535 ///
536 /// * `values` - Final optimized variable values
537 /// * `iterations` - Total number of iterations performed
538 ///
539 /// # Examples
540 ///
541 /// ```no_run
542 /// use apex_solver::observers::OptObserverVec;
543 /// use std::collections::HashMap;
544 ///
545 /// let observers = OptObserverVec::new();
546 /// let values = HashMap::new();
547 ///
548 /// // Notify all observers that optimization is complete
549 /// observers.notify_complete(&values, 50);
550 /// ```
551 #[inline]
552 pub fn notify_complete(&self, values: &HashMap<String, VariableEnum>, iterations: usize) {
553 for observer in &self.observers {
554 observer.on_optimization_complete(values, iterations);
555 }
556 }
557
558 /// Check if any observers are registered.
559 ///
560 /// Useful for conditional logic or debugging.
561 #[inline]
562 pub fn is_empty(&self) -> bool {
563 self.observers.is_empty()
564 }
565
566 /// Get the number of registered observers.
567 #[inline]
568 pub fn len(&self) -> usize {
569 self.observers.len()
570 }
571}
572
573#[cfg(test)]
574mod tests {
575 use super::*;
576 use std::sync::{Arc, Mutex};
577
578 #[derive(Clone)]
579 struct TestObserver {
580 calls: Arc<Mutex<Vec<usize>>>,
581 }
582
583 impl OptObserver for TestObserver {
584 fn on_step(&self, _values: &HashMap<String, VariableEnum>, iteration: usize) {
585 // In test code, we log and ignore mutex poisoning errors since they indicate test bugs
586 if let Ok(mut guard) = self.calls.lock().map_err(|e| {
587 ObserverError::MutexPoisoned {
588 context: "TestObserver::on_step".to_string(),
589 reason: e.to_string(),
590 }
591 .log()
592 }) {
593 guard.push(iteration);
594 }
595 }
596 }
597
598 #[test]
599 fn test_empty_observers() {
600 let observers = OptObserverVec::new();
601 assert!(observers.is_empty());
602 assert_eq!(observers.len(), 0);
603
604 // Should not panic with no observers
605 observers.notify(&HashMap::new(), 0);
606 }
607
608 #[test]
609 fn test_single_observer() -> Result<(), ObserverError> {
610 let calls = Arc::new(Mutex::new(Vec::new()));
611 let observer = TestObserver {
612 calls: calls.clone(),
613 };
614
615 let mut observers = OptObserverVec::new();
616 observers.add(observer);
617
618 assert_eq!(observers.len(), 1);
619
620 observers.notify(&HashMap::new(), 0);
621 observers.notify(&HashMap::new(), 1);
622 observers.notify(&HashMap::new(), 2);
623
624 let guard = calls.lock().map_err(|e| {
625 ObserverError::MutexPoisoned {
626 context: "test_single_observer".to_string(),
627 reason: e.to_string(),
628 }
629 .log()
630 })?;
631 assert_eq!(*guard, vec![0, 1, 2]);
632 Ok(())
633 }
634
635 #[test]
636 fn test_multiple_observers() -> Result<(), ObserverError> {
637 let calls1 = Arc::new(Mutex::new(Vec::new()));
638 let calls2 = Arc::new(Mutex::new(Vec::new()));
639
640 let observer1 = TestObserver {
641 calls: calls1.clone(),
642 };
643 let observer2 = TestObserver {
644 calls: calls2.clone(),
645 };
646
647 let mut observers = OptObserverVec::new();
648 observers.add(observer1);
649 observers.add(observer2);
650
651 assert_eq!(observers.len(), 2);
652
653 observers.notify(&HashMap::new(), 5);
654
655 let guard1 = calls1.lock().map_err(|e| {
656 ObserverError::MutexPoisoned {
657 context: "test_multiple_observers (calls1)".to_string(),
658 reason: e.to_string(),
659 }
660 .log()
661 })?;
662 assert_eq!(*guard1, vec![5]);
663
664 let guard2 = calls2.lock().map_err(|e| {
665 ObserverError::MutexPoisoned {
666 context: "test_multiple_observers (calls2)".to_string(),
667 reason: e.to_string(),
668 }
669 .log()
670 })?;
671 assert_eq!(*guard2, vec![5]);
672 Ok(())
673 }
674}