apex_solver/observers/mod.rs
1//! Observer pattern for optimization monitoring.
2//!
3//! This module provides a clean observer pattern for monitoring optimization progress.
4//! Observers can be registered with any optimizer and will be notified at each iteration,
5//! enabling real-time visualization, logging, metrics collection, and custom analysis.
6//!
7//! # Design Philosophy
8//!
9//! The observer pattern provides complete separation between optimization algorithms
10//! and monitoring/visualization logic:
11//!
12//! - **Decoupling**: Optimization logic is independent of how progress is monitored
13//! - **Extensibility**: Easy to add new observers (Rerun, CSV, metrics, dashboards)
14//! - **Composability**: Multiple observers can run simultaneously
15//! - **Zero overhead**: When no observers are registered, notification is a no-op
16//!
17//! # Architecture
18//!
19//! ```text
20//! ┌─────────────────┐
21//! │ Optimizer │
22//! │ (LM/GN/DogLeg) │
23//! └────────┬────────┘
24//! │ observers.notify(values, iteration)
25//! ├──────────────┬──────────────┬──────────────┐
26//! ▼ ▼ ▼ ▼
27//! ┌──────────┐ ┌──────────┐ ┌──────────┐ ┌──────────┐
28//! │ Rerun │ │ CSV │ │ Metrics │ │ Custom │
29//! │ Observer │ │ Observer │ │ Observer │ │ Observer │
30//! └──────────┘ └──────────┘ └──────────┘ └──────────┘
31//! ```
32//!
33//! # Examples
34//!
35//! ## Single Observer
36//!
37//! ```no_run
38//! use apex_solver::{LevenbergMarquardt, LevenbergMarquardtConfig};
39//! use apex_solver::observers::OptObserver;
40//! # use apex_solver::core::problem::Problem;
41//! # use std::collections::HashMap;
42//!
43//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
44//! # let problem = Problem::new();
45//! # let initial_values = HashMap::new();
46//!
47//! let config = LevenbergMarquardtConfig::new().with_max_iterations(100);
48//! let mut solver = LevenbergMarquardt::with_config(config);
49//!
50//! #[cfg(feature = "visualization")]
51//! {
52//! use apex_solver::observers::RerunObserver;
53//! let rerun_observer = RerunObserver::new(true)?;
54//! solver.add_observer(rerun_observer);
55//! }
56//!
57//! let result = solver.optimize(&problem, &initial_values)?;
58//! # Ok(())
59//! # }
60//! ```
61//!
62//! ## Multiple Observers
63//!
64//! ```no_run
65//! # use apex_solver::{LevenbergMarquardt, LevenbergMarquardtConfig};
66//! # use apex_solver::core::problem::{Problem, VariableEnum};
67//! # use apex_solver::observers::OptObserver;
68//! # use std::collections::HashMap;
69//!
70//! // Custom observer that logs to CSV
71//! struct CsvObserver {
72//! file: std::fs::File,
73//! }
74//!
75//! impl OptObserver for CsvObserver {
76//! fn on_step(&self, _values: &HashMap<String, VariableEnum>, iteration: usize) {
77//! // Write iteration data to CSV
78//! // ... implementation ...
79//! }
80//! }
81//!
82//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
83//! # let problem = Problem::new();
84//! # let initial_values = HashMap::new();
85//! let mut solver = LevenbergMarquardt::new();
86//!
87//! // Add Rerun visualization
88//! #[cfg(feature = "visualization")]
89//! {
90//! use apex_solver::observers::RerunObserver;
91//! solver.add_observer(RerunObserver::new(true)?);
92//! }
93//!
94//! // Add CSV logging
95//! // solver.add_observer(CsvObserver { file: ... });
96//!
97//! let result = solver.optimize(&problem, &initial_values)?;
98//! # Ok(())
99//! # }
100//! ```
101//!
102//! ## Custom Observer
103//!
104//! ```no_run
105//! use apex_solver::observers::OptObserver;
106//! use apex_solver::core::problem::VariableEnum;
107//! use std::collections::HashMap;
108//!
109//! struct MetricsObserver {
110//! max_variables_seen: std::cell::RefCell<usize>,
111//! }
112//!
113//! impl OptObserver for MetricsObserver {
114//! fn on_step(&self, values: &HashMap<String, VariableEnum>, iteration: usize) {
115//! let count = values.len();
116//! let mut max = self.max_variables_seen.borrow_mut();
117//! *max = (*max).max(count);
118//! }
119//! }
120//! ```
121
122// Visualization-specific submodules (feature-gated)
123#[cfg(feature = "visualization")]
124pub mod conversions;
125#[cfg(feature = "visualization")]
126pub mod visualization;
127
128// Re-export RerunObserver when visualization is enabled
129#[cfg(feature = "visualization")]
130pub use visualization::RerunObserver;
131
132use crate::core::problem::VariableEnum;
133use faer::Mat;
134use faer::sparse;
135use std::collections::HashMap;
136use thiserror::Error;
137use tracing::error;
138
139/// Observer-specific error types for apex-solver
140#[derive(Debug, Clone, Error)]
141pub enum ObserverError {
142 /// Failed to initialize Rerun recording stream
143 #[error("Failed to initialize Rerun recording stream: {0}")]
144 RerunInitialization(String),
145
146 /// Failed to spawn Rerun viewer process
147 #[error("Failed to spawn Rerun viewer: {0}")]
148 ViewerSpawnFailed(String),
149
150 /// Failed to save recording to file
151 #[error("Failed to save recording to file '{path}': {reason}")]
152 RecordingSaveFailed { path: String, reason: String },
153
154 /// Failed to log data to Rerun
155 #[error("Failed to log data to Rerun at '{entity_path}': {reason}")]
156 LoggingFailed { entity_path: String, reason: String },
157
158 /// Failed to convert matrix to visualization format
159 #[error("Failed to convert matrix to image: {0}")]
160 MatrixVisualizationFailed(String),
161
162 /// Failed to convert tensor data
163 #[error("Failed to create tensor data: {0}")]
164 TensorConversionFailed(String),
165
166 /// Recording stream is in invalid state
167 #[error("Recording stream is in invalid state: {0}")]
168 InvalidState(String),
169
170 /// Mutex was poisoned (thread panicked while holding lock)
171 #[error("Mutex poisoned in {context}: {reason}")]
172 MutexPoisoned { context: String, reason: String },
173}
174
175impl ObserverError {
176 /// Log the error with tracing::error and return self for chaining
177 ///
178 /// This method allows for a consistent error logging pattern throughout
179 /// the observers module, ensuring all errors are properly recorded.
180 ///
181 /// # Example
182 /// ```ignore
183 /// operation()
184 /// .map_err(|e| ObserverError::from(e).log())?;
185 /// ```
186 #[must_use]
187 pub fn log(self) -> Self {
188 error!("{}", self);
189 self
190 }
191
192 /// Log the error with the original source error from a third-party library
193 ///
194 /// This method logs both the ObserverError and the underlying error
195 /// from external libraries (e.g., Rerun's errors). This provides full
196 /// debugging context when errors occur in third-party code.
197 ///
198 /// # Arguments
199 /// * `source_error` - The original error from the third-party library (must implement Debug)
200 ///
201 /// # Example
202 /// ```ignore
203 /// rec.log(entity_path, &data)
204 /// .map_err(|e| {
205 /// ObserverError::LoggingFailed {
206 /// entity_path: "world/points".to_string(),
207 /// reason: format!("{}", e)
208 /// }
209 /// .log_with_source(e)
210 /// })?;
211 /// ```
212 #[must_use]
213 pub fn log_with_source<E: std::fmt::Debug>(self, source_error: E) -> Self {
214 error!("{} | Source: {:?}", self, source_error);
215 self
216 }
217}
218
219/// Result type for observer operations
220pub type ObserverResult<T> = Result<T, ObserverError>;
221
222/// Observer trait for monitoring optimization progress.
223///
224/// Implement this trait to create custom observers that are notified at each
225/// optimization iteration. Observers receive the current variable values and
226/// iteration number, enabling real-time monitoring, visualization, logging,
227/// or custom analysis.
228///
229/// # Design Notes
230///
231/// - Observers should be lightweight and non-blocking
232/// - Errors in observers should not crash optimization (handle internally)
233/// - For expensive operations (file I/O, network), consider buffering
234/// - Observers receive immutable references (cannot modify optimization state)
235///
236/// # Thread Safety
237///
238/// Observers must be `Send` to support parallel optimization in the future.
239/// Use interior mutability (`RefCell`, `Mutex`) if you need to mutate state.
240pub trait OptObserver: Send {
241 /// Called after each optimization iteration.
242 ///
243 /// # Arguments
244 ///
245 /// * `values` - Current variable values (manifold states)
246 /// * `iteration` - Current iteration number (0 = initial values, 1+ = after steps)
247 ///
248 /// # Implementation Guidelines
249 ///
250 /// - Keep this method fast to avoid slowing optimization
251 /// - Handle errors internally (log warnings, don't panic)
252 /// - Don't mutate `values` (you receive `&HashMap`)
253 /// - Consider buffering expensive operations
254 ///
255 /// # Examples
256 ///
257 /// ```no_run
258 /// use apex_solver::observers::OptObserver;
259 /// use apex_solver::core::problem::VariableEnum;
260 /// use std::collections::HashMap;
261 ///
262 /// struct SimpleLogger;
263 ///
264 /// impl OptObserver for SimpleLogger {
265 /// fn on_step(&self, values: &HashMap<String, VariableEnum>, iteration: usize) {
266 /// // Track optimization progress
267 /// }
268 /// }
269 /// ```
270 fn on_step(&self, values: &HashMap<String, VariableEnum>, iteration: usize);
271
272 /// Set iteration metrics for visualization and monitoring.
273 ///
274 /// This method is called before `on_step` to provide optimization metrics
275 /// such as cost, gradient norm, damping parameter, etc. Observers can use
276 /// this data for visualization, logging, or analysis.
277 ///
278 /// # Arguments
279 ///
280 /// * `cost` - Current cost function value
281 /// * `gradient_norm` - L2 norm of the gradient vector
282 /// * `damping` - Damping parameter (for Levenberg-Marquardt, may be None for other solvers)
283 /// * `step_norm` - L2 norm of the parameter update step
284 /// * `step_quality` - Step quality metric (e.g., rho for trust region methods)
285 ///
286 /// # Default Implementation
287 ///
288 /// The default implementation does nothing, allowing simple observers to ignore metrics.
289 fn set_iteration_metrics(
290 &self,
291 _cost: f64,
292 _gradient_norm: f64,
293 _damping: Option<f64>,
294 _step_norm: f64,
295 _step_quality: Option<f64>,
296 ) {
297 // Default implementation does nothing
298 }
299
300 /// Set matrix data for advanced visualization.
301 ///
302 /// This method provides access to the Hessian matrix and gradient vector
303 /// for observers that want to visualize matrix structure or perform
304 /// advanced analysis.
305 ///
306 /// # Arguments
307 ///
308 /// * `hessian` - Sparse Hessian matrix (J^T * J)
309 /// * `gradient` - Gradient vector (J^T * r)
310 ///
311 /// # Default Implementation
312 ///
313 /// The default implementation does nothing, allowing simple observers to ignore matrices.
314 fn set_matrix_data(
315 &self,
316 _hessian: Option<sparse::SparseColMat<usize, f64>>,
317 _gradient: Option<Mat<f64>>,
318 ) {
319 // Default implementation does nothing
320 }
321}
322
323/// Collection of observers for optimization monitoring.
324///
325/// This struct manages a vector of observers and provides a convenient
326/// `notify()` method to call all observers at once. Optimizers use this
327/// internally to manage their observers.
328///
329/// # Usage
330///
331/// Typically you don't create this directly - use the `add_observer()` method
332/// on optimizers. However, you can use it for custom optimization algorithms:
333///
334/// ```no_run
335/// use apex_solver::observers::{OptObserver, OptObserverVec};
336/// use apex_solver::core::problem::VariableEnum;
337/// use std::collections::HashMap;
338///
339/// struct MyOptimizer {
340/// observers: OptObserverVec,
341/// // ... other fields ...
342/// }
343///
344/// impl MyOptimizer {
345/// fn step(&mut self, values: &HashMap<String, VariableEnum>, iteration: usize) {
346/// // ... optimization logic ...
347///
348/// // Notify all observers
349/// self.observers.notify(values, iteration);
350/// }
351/// }
352/// ```
353#[derive(Default)]
354pub struct OptObserverVec {
355 observers: Vec<Box<dyn OptObserver>>,
356}
357
358impl OptObserverVec {
359 /// Create a new empty observer collection.
360 pub fn new() -> Self {
361 Self {
362 observers: Vec::new(),
363 }
364 }
365
366 /// Add an observer to the collection.
367 ///
368 /// The observer will be called at each optimization iteration in the order
369 /// it was added.
370 ///
371 /// # Arguments
372 ///
373 /// * `observer` - Any type implementing `OptObserver`
374 ///
375 /// # Examples
376 ///
377 /// ```no_run
378 /// use apex_solver::observers::{OptObserver, OptObserverVec};
379 /// use apex_solver::core::problem::VariableEnum;
380 /// use std::collections::HashMap;
381 ///
382 /// struct MyObserver;
383 /// impl OptObserver for MyObserver {
384 /// fn on_step(&self, _values: &HashMap<String, VariableEnum>, _iteration: usize) {
385 /// // Handle optimization step
386 /// }
387 /// }
388 ///
389 /// let mut observers = OptObserverVec::new();
390 /// observers.add(MyObserver);
391 /// ```
392 pub fn add(&mut self, observer: impl OptObserver + 'static) {
393 self.observers.push(Box::new(observer));
394 }
395
396 /// Set iteration metrics for all observers.
397 ///
398 /// Calls `set_iteration_metrics()` on each registered observer. This should
399 /// be called before `notify()` to provide optimization metrics.
400 ///
401 /// # Arguments
402 ///
403 /// * `cost` - Current cost function value
404 /// * `gradient_norm` - L2 norm of the gradient vector
405 /// * `damping` - Damping parameter (may be None)
406 /// * `step_norm` - L2 norm of the parameter update step
407 /// * `step_quality` - Step quality metric (may be None)
408 #[inline]
409 pub fn set_iteration_metrics(
410 &self,
411 cost: f64,
412 gradient_norm: f64,
413 damping: Option<f64>,
414 step_norm: f64,
415 step_quality: Option<f64>,
416 ) {
417 for observer in &self.observers {
418 observer.set_iteration_metrics(cost, gradient_norm, damping, step_norm, step_quality);
419 }
420 }
421
422 /// Set matrix data for all observers.
423 ///
424 /// Calls `set_matrix_data()` on each registered observer. This should
425 /// be called before `notify()` to provide matrix data for visualization.
426 ///
427 /// # Arguments
428 ///
429 /// * `hessian` - Sparse Hessian matrix
430 /// * `gradient` - Gradient vector
431 #[inline]
432 pub fn set_matrix_data(
433 &self,
434 hessian: Option<sparse::SparseColMat<usize, f64>>,
435 gradient: Option<Mat<f64>>,
436 ) {
437 for observer in &self.observers {
438 observer.set_matrix_data(hessian.clone(), gradient.clone());
439 }
440 }
441
442 /// Notify all observers with current optimization state.
443 ///
444 /// Calls `on_step()` on each registered observer in order. If no observers
445 /// are registered, this is a no-op with zero overhead.
446 ///
447 /// # Arguments
448 ///
449 /// * `values` - Current variable values
450 /// * `iteration` - Current iteration number
451 ///
452 /// # Examples
453 ///
454 /// ```no_run
455 /// use apex_solver::observers::OptObserverVec;
456 /// use std::collections::HashMap;
457 ///
458 /// let observers = OptObserverVec::new();
459 /// let values = HashMap::new();
460 ///
461 /// // Notify all observers (safe even if empty)
462 /// observers.notify(&values, 0);
463 /// ```
464 #[inline]
465 pub fn notify(&self, values: &HashMap<String, VariableEnum>, iteration: usize) {
466 for observer in &self.observers {
467 observer.on_step(values, iteration);
468 }
469 }
470
471 /// Check if any observers are registered.
472 ///
473 /// Useful for conditional logic or debugging.
474 #[inline]
475 pub fn is_empty(&self) -> bool {
476 self.observers.is_empty()
477 }
478
479 /// Get the number of registered observers.
480 #[inline]
481 pub fn len(&self) -> usize {
482 self.observers.len()
483 }
484}
485
486#[cfg(test)]
487mod tests {
488 use super::*;
489 use std::sync::{Arc, Mutex};
490
491 #[derive(Clone)]
492 struct TestObserver {
493 calls: Arc<Mutex<Vec<usize>>>,
494 }
495
496 impl OptObserver for TestObserver {
497 fn on_step(&self, _values: &HashMap<String, VariableEnum>, iteration: usize) {
498 // In test code, we log and ignore mutex poisoning errors since they indicate test bugs
499 if let Ok(mut guard) = self.calls.lock().map_err(|e| {
500 ObserverError::MutexPoisoned {
501 context: "TestObserver::on_step".to_string(),
502 reason: e.to_string(),
503 }
504 .log()
505 }) {
506 guard.push(iteration);
507 }
508 }
509 }
510
511 #[test]
512 fn test_empty_observers() {
513 let observers = OptObserverVec::new();
514 assert!(observers.is_empty());
515 assert_eq!(observers.len(), 0);
516
517 // Should not panic with no observers
518 observers.notify(&HashMap::new(), 0);
519 }
520
521 #[test]
522 fn test_single_observer() -> Result<(), ObserverError> {
523 let calls = Arc::new(Mutex::new(Vec::new()));
524 let observer = TestObserver {
525 calls: calls.clone(),
526 };
527
528 let mut observers = OptObserverVec::new();
529 observers.add(observer);
530
531 assert_eq!(observers.len(), 1);
532
533 observers.notify(&HashMap::new(), 0);
534 observers.notify(&HashMap::new(), 1);
535 observers.notify(&HashMap::new(), 2);
536
537 let guard = calls.lock().map_err(|e| {
538 ObserverError::MutexPoisoned {
539 context: "test_single_observer".to_string(),
540 reason: e.to_string(),
541 }
542 .log()
543 })?;
544 assert_eq!(*guard, vec![0, 1, 2]);
545 Ok(())
546 }
547
548 #[test]
549 fn test_multiple_observers() -> Result<(), ObserverError> {
550 let calls1 = Arc::new(Mutex::new(Vec::new()));
551 let calls2 = Arc::new(Mutex::new(Vec::new()));
552
553 let observer1 = TestObserver {
554 calls: calls1.clone(),
555 };
556 let observer2 = TestObserver {
557 calls: calls2.clone(),
558 };
559
560 let mut observers = OptObserverVec::new();
561 observers.add(observer1);
562 observers.add(observer2);
563
564 assert_eq!(observers.len(), 2);
565
566 observers.notify(&HashMap::new(), 5);
567
568 let guard1 = calls1.lock().map_err(|e| {
569 ObserverError::MutexPoisoned {
570 context: "test_multiple_observers (calls1)".to_string(),
571 reason: e.to_string(),
572 }
573 .log()
574 })?;
575 assert_eq!(*guard1, vec![5]);
576
577 let guard2 = calls2.lock().map_err(|e| {
578 ObserverError::MutexPoisoned {
579 context: "test_multiple_observers (calls2)".to_string(),
580 reason: e.to_string(),
581 }
582 .log()
583 })?;
584 assert_eq!(*guard2, vec![5]);
585 Ok(())
586 }
587}