apex_solver/observers/
mod.rs

1//! Observer pattern for optimization monitoring.
2//!
3//! This module provides a clean observer pattern for monitoring optimization progress.
4//! Observers can be registered with any optimizer and will be notified at each iteration,
5//! enabling real-time visualization, logging, metrics collection, and custom analysis.
6//!
7//! # Design Philosophy
8//!
9//! The observer pattern provides complete separation between optimization algorithms
10//! and monitoring/visualization logic:
11//!
12//! - **Decoupling**: Optimization logic is independent of how progress is monitored
13//! - **Extensibility**: Easy to add new observers (Rerun, CSV, metrics, dashboards)
14//! - **Composability**: Multiple observers can run simultaneously
15//! - **Zero overhead**: When no observers are registered, notification is a no-op
16//!
17//! # Architecture
18//!
19//! ```text
20//! ┌─────────────────┐
21//! │   Optimizer     │
22//! │  (LM/GN/DogLeg) │
23//! └────────┬────────┘
24//!          │ observers.notify(values, iteration)
25//!          ├──────────────┬──────────────┬──────────────┐
26//!          ▼              ▼              ▼              ▼
27//!    ┌──────────┐  ┌──────────┐  ┌──────────┐  ┌──────────┐
28//!    │  Rerun   │  │   CSV    │  │ Metrics  │  │  Custom  │
29//!    │ Observer │  │ Observer │  │ Observer │  │ Observer │
30//!    └──────────┘  └──────────┘  └──────────┘  └──────────┘
31//! ```
32//!
33//! # Examples
34//!
35//! ## Single Observer
36//!
37//! ```no_run
38//! use apex_solver::{LevenbergMarquardt, LevenbergMarquardtConfig};
39//! use apex_solver::observers::OptObserver;
40//! # use apex_solver::core::problem::Problem;
41//! # use std::collections::HashMap;
42//!
43//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
44//! # let problem = Problem::new();
45//! # let initial_values = HashMap::new();
46//!
47//! let config = LevenbergMarquardtConfig::new().with_max_iterations(100);
48//! let mut solver = LevenbergMarquardt::with_config(config);
49//!
50//! #[cfg(feature = "visualization")]
51//! {
52//!     use apex_solver::observers::RerunObserver;
53//!     let rerun_observer = RerunObserver::new(true)?;
54//!     solver.add_observer(rerun_observer);
55//! }
56//!
57//! let result = solver.optimize(&problem, &initial_values)?;
58//! # Ok(())
59//! # }
60//! ```
61//!
62//! ## Multiple Observers
63//!
64//! ```no_run
65//! # use apex_solver::{LevenbergMarquardt, LevenbergMarquardtConfig};
66//! # use apex_solver::core::problem::{Problem, VariableEnum};
67//! # use apex_solver::observers::OptObserver;
68//! # use std::collections::HashMap;
69//!
70//! // Custom observer that logs to CSV
71//! struct CsvObserver {
72//!     file: std::fs::File,
73//! }
74//!
75//! impl OptObserver for CsvObserver {
76//!     fn on_step(&self, _values: &HashMap<String, VariableEnum>, iteration: usize) {
77//!         // Write iteration data to CSV
78//!         // ... implementation ...
79//!     }
80//! }
81//!
82//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
83//! # let problem = Problem::new();
84//! # let initial_values = HashMap::new();
85//! let mut solver = LevenbergMarquardt::new();
86//!
87//! // Add Rerun visualization
88//! #[cfg(feature = "visualization")]
89//! {
90//!     use apex_solver::observers::RerunObserver;
91//!     solver.add_observer(RerunObserver::new(true)?);
92//! }
93//!
94//! // Add CSV logging
95//! // solver.add_observer(CsvObserver { file: ... });
96//!
97//! let result = solver.optimize(&problem, &initial_values)?;
98//! # Ok(())
99//! # }
100//! ```
101//!
102//! ## Custom Observer
103//!
104//! ```no_run
105//! use apex_solver::observers::OptObserver;
106//! use apex_solver::core::problem::VariableEnum;
107//! use std::collections::HashMap;
108//!
109//! struct MetricsObserver {
110//!     max_variables_seen: std::cell::RefCell<usize>,
111//! }
112//!
113//! impl OptObserver for MetricsObserver {
114//!     fn on_step(&self, values: &HashMap<String, VariableEnum>, iteration: usize) {
115//!         let count = values.len();
116//!         let mut max = self.max_variables_seen.borrow_mut();
117//!         *max = (*max).max(count);
118//!     }
119//! }
120//! ```
121
122// Visualization-specific submodules (feature-gated)
123#[cfg(feature = "visualization")]
124pub mod conversions;
125#[cfg(feature = "visualization")]
126pub mod visualization;
127
128// Re-export RerunObserver when visualization is enabled
129#[cfg(feature = "visualization")]
130pub use visualization::{RerunObserver, VisualizationConfig, VisualizationMode};
131
132use crate::core::problem::VariableEnum;
133use faer::Mat;
134use faer::sparse;
135use std::collections::HashMap;
136use thiserror::Error;
137use tracing::error;
138
139/// Observer-specific error types for apex-solver
140#[derive(Debug, Clone, Error)]
141pub enum ObserverError {
142    /// Failed to initialize Rerun recording stream
143    #[error("Failed to initialize Rerun recording stream: {0}")]
144    RerunInitialization(String),
145
146    /// Failed to spawn Rerun viewer process
147    #[error("Failed to spawn Rerun viewer: {0}")]
148    ViewerSpawnFailed(String),
149
150    /// Failed to save recording to file
151    #[error("Failed to save recording to file '{path}': {reason}")]
152    RecordingSaveFailed { path: String, reason: String },
153
154    /// Failed to log data to Rerun
155    #[error("Failed to log data to Rerun at '{entity_path}': {reason}")]
156    LoggingFailed { entity_path: String, reason: String },
157
158    /// Failed to convert matrix to visualization format
159    #[error("Failed to convert matrix to image: {0}")]
160    MatrixVisualizationFailed(String),
161
162    /// Failed to convert tensor data
163    #[error("Failed to create tensor data: {0}")]
164    TensorConversionFailed(String),
165
166    /// Recording stream is in invalid state
167    #[error("Recording stream is in invalid state: {0}")]
168    InvalidState(String),
169
170    /// Mutex was poisoned (thread panicked while holding lock)
171    #[error("Mutex poisoned in {context}: {reason}")]
172    MutexPoisoned { context: String, reason: String },
173}
174
175impl ObserverError {
176    /// Log the error with tracing::error and return self for chaining
177    ///
178    /// This method allows for a consistent error logging pattern throughout
179    /// the observers module, ensuring all errors are properly recorded.
180    ///
181    /// # Example
182    /// ```
183    /// # use apex_solver::observers::ObserverError;
184    /// # fn operation() -> Result<(), ObserverError> { Ok(()) }
185    /// # fn example() -> Result<(), ObserverError> {
186    /// operation()
187    ///     .map_err(|e| e.log())?;
188    /// # Ok(())
189    /// # }
190    /// ```
191    #[must_use]
192    pub fn log(self) -> Self {
193        error!("{}", self);
194        self
195    }
196
197    /// Log the error with the original source error from a third-party library
198    ///
199    /// This method logs both the ObserverError and the underlying error
200    /// from external libraries (e.g., Rerun's errors). This provides full
201    /// debugging context when errors occur in third-party code.
202    ///
203    /// # Arguments
204    /// * `source_error` - The original error from the third-party library (must implement Debug)
205    ///
206    /// # Example
207    /// ```no_run
208    /// # use apex_solver::observers::ObserverError;
209    /// # fn rec_log() -> Result<(), std::io::Error> { Ok(()) }
210    /// # fn example() -> Result<(), ObserverError> {
211    /// rec_log()
212    ///     .map_err(|e| {
213    ///         ObserverError::LoggingFailed {
214    ///             entity_path: "world/points".to_string(),
215    ///             reason: format!("{}", e)
216    ///         }
217    ///         .log_with_source(e)
218    ///     })?;
219    /// # Ok(())
220    /// # }
221    /// ```
222    #[must_use]
223    pub fn log_with_source<E: std::fmt::Debug>(self, source_error: E) -> Self {
224        error!("{} | Source: {:?}", self, source_error);
225        self
226    }
227}
228
229/// Result type for observer operations
230pub type ObserverResult<T> = Result<T, ObserverError>;
231
232/// Observer trait for monitoring optimization progress.
233///
234/// Implement this trait to create custom observers that are notified at each
235/// optimization iteration. Observers receive the current variable values and
236/// iteration number, enabling real-time monitoring, visualization, logging,
237/// or custom analysis.
238///
239/// # Design Notes
240///
241/// - Observers should be lightweight and non-blocking
242/// - Errors in observers should not crash optimization (handle internally)
243/// - For expensive operations (file I/O, network), consider buffering
244/// - Observers receive immutable references (cannot modify optimization state)
245///
246/// # Thread Safety
247///
248/// Observers must be `Send` to support parallel optimization in the future.
249/// Use interior mutability (`RefCell`, `Mutex`) if you need to mutate state.
250pub trait OptObserver: Send {
251    /// Called after each optimization iteration.
252    ///
253    /// # Arguments
254    ///
255    /// * `values` - Current variable values (manifold states)
256    /// * `iteration` - Current iteration number (0 = initial values, 1+ = after steps)
257    ///
258    /// # Implementation Guidelines
259    ///
260    /// - Keep this method fast to avoid slowing optimization
261    /// - Handle errors internally (log warnings, don't panic)
262    /// - Don't mutate `values` (you receive `&HashMap`)
263    /// - Consider buffering expensive operations
264    ///
265    /// # Examples
266    ///
267    /// ```no_run
268    /// use apex_solver::observers::OptObserver;
269    /// use apex_solver::core::problem::VariableEnum;
270    /// use std::collections::HashMap;
271    ///
272    /// struct SimpleLogger;
273    ///
274    /// impl OptObserver for SimpleLogger {
275    ///     fn on_step(&self, values: &HashMap<String, VariableEnum>, iteration: usize) {
276    ///         // Track optimization progress
277    ///     }
278    /// }
279    /// ```
280    fn on_step(&self, values: &HashMap<String, VariableEnum>, iteration: usize);
281
282    /// Set iteration metrics for visualization and monitoring.
283    ///
284    /// This method is called before `on_step` to provide optimization metrics
285    /// such as cost, gradient norm, damping parameter, etc. Observers can use
286    /// this data for visualization, logging, or analysis.
287    ///
288    /// # Arguments
289    ///
290    /// * `cost` - Current cost function value
291    /// * `gradient_norm` - L2 norm of the gradient vector
292    /// * `damping` - Damping parameter (for Levenberg-Marquardt, may be None for other solvers)
293    /// * `step_norm` - L2 norm of the parameter update step
294    /// * `step_quality` - Step quality metric (e.g., rho for trust region methods)
295    ///
296    /// # Default Implementation
297    ///
298    /// The default implementation does nothing, allowing simple observers to ignore metrics.
299    fn set_iteration_metrics(
300        &self,
301        _cost: f64,
302        _gradient_norm: f64,
303        _damping: Option<f64>,
304        _step_norm: f64,
305        _step_quality: Option<f64>,
306    ) {
307        // Default implementation does nothing
308    }
309
310    /// Set matrix data for advanced visualization.
311    ///
312    /// This method provides access to the Hessian matrix and gradient vector
313    /// for observers that want to visualize matrix structure or perform
314    /// advanced analysis.
315    ///
316    /// # Arguments
317    ///
318    /// * `hessian` - Sparse Hessian matrix (J^T * J)
319    /// * `gradient` - Gradient vector (J^T * r)
320    ///
321    /// # Default Implementation
322    ///
323    /// The default implementation does nothing, allowing simple observers to ignore matrices.
324    fn set_matrix_data(
325        &self,
326        _hessian: Option<sparse::SparseColMat<usize, f64>>,
327        _gradient: Option<Mat<f64>>,
328    ) {
329        // Default implementation does nothing
330    }
331
332    /// Called when optimization completes.
333    ///
334    /// This method is called once at the end of optimization, after all iterations
335    /// are complete. Use this for final visualization, cleanup, or summary logging.
336    ///
337    /// # Arguments
338    ///
339    /// * `values` - Final optimized variable values
340    /// * `iterations` - Total number of iterations performed
341    ///
342    /// # Default Implementation
343    ///
344    /// The default implementation does nothing, allowing simple observers to ignore completion.
345    ///
346    /// # Examples
347    ///
348    /// ```no_run
349    /// use apex_solver::observers::OptObserver;
350    /// use apex_solver::core::problem::VariableEnum;
351    /// use std::collections::HashMap;
352    ///
353    /// struct FinalStateLogger;
354    ///
355    /// impl OptObserver for FinalStateLogger {
356    ///     fn on_step(&self, _values: &HashMap<String, VariableEnum>, _iteration: usize) {}
357    ///
358    ///     fn on_optimization_complete(&self, values: &HashMap<String, VariableEnum>, iterations: usize) {
359    ///         println!("Optimization completed after {} iterations with {} variables",
360    ///                  iterations, values.len());
361    ///     }
362    /// }
363    /// ```
364    fn on_optimization_complete(
365        &self,
366        _values: &HashMap<String, VariableEnum>,
367        _iterations: usize,
368    ) {
369        // Default implementation does nothing
370    }
371}
372
373/// Collection of observers for optimization monitoring.
374///
375/// This struct manages a vector of observers and provides a convenient
376/// `notify()` method to call all observers at once. Optimizers use this
377/// internally to manage their observers.
378///
379/// # Usage
380///
381/// Typically you don't create this directly - use the `add_observer()` method
382/// on optimizers. However, you can use it for custom optimization algorithms:
383///
384/// ```no_run
385/// use apex_solver::observers::{OptObserver, OptObserverVec};
386/// use apex_solver::core::problem::VariableEnum;
387/// use std::collections::HashMap;
388///
389/// struct MyOptimizer {
390///     observers: OptObserverVec,
391///     // ... other fields ...
392/// }
393///
394/// impl MyOptimizer {
395///     fn step(&mut self, values: &HashMap<String, VariableEnum>, iteration: usize) {
396///         // ... optimization logic ...
397///
398///         // Notify all observers
399///         self.observers.notify(values, iteration);
400///     }
401/// }
402/// ```
403#[derive(Default)]
404pub struct OptObserverVec {
405    observers: Vec<Box<dyn OptObserver>>,
406}
407
408impl OptObserverVec {
409    /// Create a new empty observer collection.
410    pub fn new() -> Self {
411        Self {
412            observers: Vec::new(),
413        }
414    }
415
416    /// Add an observer to the collection.
417    ///
418    /// The observer will be called at each optimization iteration in the order
419    /// it was added.
420    ///
421    /// # Arguments
422    ///
423    /// * `observer` - Any type implementing `OptObserver`
424    ///
425    /// # Examples
426    ///
427    /// ```no_run
428    /// use apex_solver::observers::{OptObserver, OptObserverVec};
429    /// use apex_solver::core::problem::VariableEnum;
430    /// use std::collections::HashMap;
431    ///
432    /// struct MyObserver;
433    /// impl OptObserver for MyObserver {
434    ///     fn on_step(&self, _values: &HashMap<String, VariableEnum>, _iteration: usize) {
435    ///         // Handle optimization step
436    ///     }
437    /// }
438    ///
439    /// let mut observers = OptObserverVec::new();
440    /// observers.add(MyObserver);
441    /// ```
442    pub fn add(&mut self, observer: impl OptObserver + 'static) {
443        self.observers.push(Box::new(observer));
444    }
445
446    /// Set iteration metrics for all observers.
447    ///
448    /// Calls `set_iteration_metrics()` on each registered observer. This should
449    /// be called before `notify()` to provide optimization metrics.
450    ///
451    /// # Arguments
452    ///
453    /// * `cost` - Current cost function value
454    /// * `gradient_norm` - L2 norm of the gradient vector
455    /// * `damping` - Damping parameter (may be None)
456    /// * `step_norm` - L2 norm of the parameter update step
457    /// * `step_quality` - Step quality metric (may be None)
458    #[inline]
459    pub fn set_iteration_metrics(
460        &self,
461        cost: f64,
462        gradient_norm: f64,
463        damping: Option<f64>,
464        step_norm: f64,
465        step_quality: Option<f64>,
466    ) {
467        for observer in &self.observers {
468            observer.set_iteration_metrics(cost, gradient_norm, damping, step_norm, step_quality);
469        }
470    }
471
472    /// Set matrix data for all observers.
473    ///
474    /// Calls `set_matrix_data()` on each registered observer. This should
475    /// be called before `notify()` to provide matrix data for visualization.
476    ///
477    /// # Arguments
478    ///
479    /// * `hessian` - Sparse Hessian matrix
480    /// * `gradient` - Gradient vector
481    #[inline]
482    pub fn set_matrix_data(
483        &self,
484        hessian: Option<sparse::SparseColMat<usize, f64>>,
485        gradient: Option<Mat<f64>>,
486    ) {
487        for observer in &self.observers {
488            observer.set_matrix_data(hessian.clone(), gradient.clone());
489        }
490    }
491
492    /// Notify all observers with current optimization state.
493    ///
494    /// Calls `on_step()` on each registered observer in order. If no observers
495    /// are registered, this is a no-op with zero overhead.
496    ///
497    /// # Arguments
498    ///
499    /// * `values` - Current variable values
500    /// * `iteration` - Current iteration number
501    ///
502    /// # Examples
503    ///
504    /// ```no_run
505    /// use apex_solver::observers::OptObserverVec;
506    /// use std::collections::HashMap;
507    ///
508    /// let observers = OptObserverVec::new();
509    /// let values = HashMap::new();
510    ///
511    /// // Notify all observers (safe even if empty)
512    /// observers.notify(&values, 0);
513    /// ```
514    #[inline]
515    pub fn notify(&self, values: &HashMap<String, VariableEnum>, iteration: usize) {
516        for observer in &self.observers {
517            observer.on_step(values, iteration);
518        }
519    }
520
521    /// Notify all observers that optimization is complete.
522    ///
523    /// Calls `on_optimization_complete()` on each registered observer. This should
524    /// be called once at the end of optimization, after all iterations are done.
525    ///
526    /// # Arguments
527    ///
528    /// * `values` - Final optimized variable values
529    /// * `iterations` - Total number of iterations performed
530    ///
531    /// # Examples
532    ///
533    /// ```no_run
534    /// use apex_solver::observers::OptObserverVec;
535    /// use std::collections::HashMap;
536    ///
537    /// let observers = OptObserverVec::new();
538    /// let values = HashMap::new();
539    ///
540    /// // Notify all observers that optimization is complete
541    /// observers.notify_complete(&values, 50);
542    /// ```
543    #[inline]
544    pub fn notify_complete(&self, values: &HashMap<String, VariableEnum>, iterations: usize) {
545        for observer in &self.observers {
546            observer.on_optimization_complete(values, iterations);
547        }
548    }
549
550    /// Check if any observers are registered.
551    ///
552    /// Useful for conditional logic or debugging.
553    #[inline]
554    pub fn is_empty(&self) -> bool {
555        self.observers.is_empty()
556    }
557
558    /// Get the number of registered observers.
559    #[inline]
560    pub fn len(&self) -> usize {
561        self.observers.len()
562    }
563}
564
565#[cfg(test)]
566mod tests {
567    use super::*;
568    use std::sync::{Arc, Mutex};
569
570    #[derive(Clone)]
571    struct TestObserver {
572        calls: Arc<Mutex<Vec<usize>>>,
573    }
574
575    impl OptObserver for TestObserver {
576        fn on_step(&self, _values: &HashMap<String, VariableEnum>, iteration: usize) {
577            // In test code, we log and ignore mutex poisoning errors since they indicate test bugs
578            if let Ok(mut guard) = self.calls.lock().map_err(|e| {
579                ObserverError::MutexPoisoned {
580                    context: "TestObserver::on_step".to_string(),
581                    reason: e.to_string(),
582                }
583                .log()
584            }) {
585                guard.push(iteration);
586            }
587        }
588    }
589
590    #[test]
591    fn test_empty_observers() {
592        let observers = OptObserverVec::new();
593        assert!(observers.is_empty());
594        assert_eq!(observers.len(), 0);
595
596        // Should not panic with no observers
597        observers.notify(&HashMap::new(), 0);
598    }
599
600    #[test]
601    fn test_single_observer() -> Result<(), ObserverError> {
602        let calls = Arc::new(Mutex::new(Vec::new()));
603        let observer = TestObserver {
604            calls: calls.clone(),
605        };
606
607        let mut observers = OptObserverVec::new();
608        observers.add(observer);
609
610        assert_eq!(observers.len(), 1);
611
612        observers.notify(&HashMap::new(), 0);
613        observers.notify(&HashMap::new(), 1);
614        observers.notify(&HashMap::new(), 2);
615
616        let guard = calls.lock().map_err(|e| {
617            ObserverError::MutexPoisoned {
618                context: "test_single_observer".to_string(),
619                reason: e.to_string(),
620            }
621            .log()
622        })?;
623        assert_eq!(*guard, vec![0, 1, 2]);
624        Ok(())
625    }
626
627    #[test]
628    fn test_multiple_observers() -> Result<(), ObserverError> {
629        let calls1 = Arc::new(Mutex::new(Vec::new()));
630        let calls2 = Arc::new(Mutex::new(Vec::new()));
631
632        let observer1 = TestObserver {
633            calls: calls1.clone(),
634        };
635        let observer2 = TestObserver {
636            calls: calls2.clone(),
637        };
638
639        let mut observers = OptObserverVec::new();
640        observers.add(observer1);
641        observers.add(observer2);
642
643        assert_eq!(observers.len(), 2);
644
645        observers.notify(&HashMap::new(), 5);
646
647        let guard1 = calls1.lock().map_err(|e| {
648            ObserverError::MutexPoisoned {
649                context: "test_multiple_observers (calls1)".to_string(),
650                reason: e.to_string(),
651            }
652            .log()
653        })?;
654        assert_eq!(*guard1, vec![5]);
655
656        let guard2 = calls2.lock().map_err(|e| {
657            ObserverError::MutexPoisoned {
658                context: "test_multiple_observers (calls2)".to_string(),
659                reason: e.to_string(),
660            }
661            .log()
662        })?;
663        assert_eq!(*guard2, vec![5]);
664        Ok(())
665    }
666}