apex_solver/core/problem.rs
1//! Optimization problem definition and sparse Jacobian computation.
2//!
3//! The `Problem` struct is the central component that defines a factor graph optimization problem.
4//! It manages residual blocks (constraints), variables, and the construction of sparse Jacobian
5//! matrices for efficient nonlinear least squares optimization.
6//!
7//! # Factor Graph Representation
8//!
9//! The optimization problem is represented as a bipartite factor graph:
10//!
11//! ```text
12//! Variables: x0 --- x1 --- x2 --- x3
13//! | | | |
14//! Factors: f0 f1 f2 f3 (constraints/measurements)
15//! ```
16//!
17//! Each factor connects one or more variables and contributes a residual (error) term to the
18//! overall cost function:
19//!
20//! ```text
21//! minimize Σ_i ρ(||r_i(x)||²)
22//! ```
23//!
24//! where `r_i(x)` is the residual for factor i, and ρ is an optional robust loss function.
25//!
26//! # Key Responsibilities
27//!
28//! 1. **Residual Block Management**: Add/remove factors and track their structure
29//! 2. **Variable Management**: Initialize variables with manifold types and constraints
30//! 3. **Sparsity Pattern**: Build symbolic structure for efficient sparse linear algebra
31//! 4. **Linearization**: Compute residuals and Jacobians in parallel
32//! 5. **Covariance**: Extract per-variable uncertainty estimates after optimization
33//!
34//! # Sparse Jacobian Structure
35//!
36//! The Jacobian matrix `J = ∂r/∂x` is sparse because each factor only depends on a small
37//! subset of variables. For example, a between factor connecting x0 and x1 contributes
38//! a 3×6 block (SE2) or 6×12 block (SE3) to the Jacobian, leaving the rest as zeros.
39//!
40//! The Problem pre-computes the sparsity pattern once, then efficiently fills in the
41//! numerical values during each iteration.
42//!
43//! # Mixed Manifold Support
44//!
45//! The Problem supports mixed manifold types in a single optimization problem via
46//! [`VariableEnum`]. This allows:
47//! - SE2 and SE3 poses in the same graph
48//! - SO3 rotations with R³ landmarks
49//! - Any combination of supported manifolds
50//!
51//! # Example: Building a Problem
52//!
53//! ```
54//! use apex_solver::core::problem::Problem;
55//! use apex_solver::factors::{BetweenFactor, PriorFactor};
56//! use apex_solver::core::loss_functions::HuberLoss;
57//! use apex_solver::manifold::ManifoldType;
58//! use nalgebra::{DVector, dvector};
59//! use std::collections::HashMap;
60//! use apex_solver::manifold::se2::SE2;
61//! # use apex_solver::error::ApexSolverResult;
62//! # fn example() -> ApexSolverResult<()> {
63//!
64//! let mut problem = Problem::new();
65//!
66//! // Add prior factor to anchor the first pose
67//! let prior = Box::new(PriorFactor {
68//! data: dvector![0.0, 0.0, 0.0],
69//! });
70//! problem.add_residual_block(&["x0"], prior, None);
71//!
72//! // Add between factor with robust loss
73//! let between = Box::new(BetweenFactor::new(SE2::from_xy_angle(1.0, 0.0, 0.1)));
74//! let loss: Option<Box<dyn apex_solver::core::loss_functions::LossFunction + Send>> =
75//! Some(Box::new(HuberLoss::new(1.0)?));
76//! problem.add_residual_block(&["x0", "x1"], between, loss);
77//!
78//! // Initialize variables
79//! let mut initial_values = HashMap::new();
80//! initial_values.insert("x0".to_string(), (ManifoldType::SE2, dvector![0.0, 0.0, 0.0]));
81//! initial_values.insert("x1".to_string(), (ManifoldType::SE2, dvector![0.9, 0.1, 0.12]));
82//!
83//! let variables = problem.initialize_variables(&initial_values);
84//! # Ok(())
85//! # }
86//! # example().unwrap();
87//! ```
88
89use std::{
90 collections::{HashMap, HashSet},
91 fs::File,
92 io::{Error, Write},
93 sync::{Arc, Mutex},
94};
95
96use faer::{
97 Col, Mat, MatRef,
98 sparse::{Argsort, Pair, SparseColMat, SymbolicSparseColMat},
99};
100use nalgebra::DVector;
101use rayon::prelude::*;
102use tracing::warn;
103
104use crate::{
105 core::{
106 CoreError, corrector::Corrector, loss_functions::LossFunction,
107 residual_block::ResidualBlock, variable::Variable,
108 },
109 error::{ApexSolverError, ApexSolverResult},
110 factors::Factor,
111 linalg::{SparseLinearSolver, extract_variable_covariances},
112 manifold::{ManifoldType, rn, se2, se3, so2, so3},
113};
114
115/// Symbolic structure for sparse matrix operations.
116///
117/// Contains the sparsity pattern (which entries are non-zero) and an ordering
118/// for efficient numerical computation. This is computed once at the beginning
119/// and reused throughout optimization.
120///
121/// # Fields
122///
123/// - `pattern`: The symbolic sparse column matrix structure (row/col indices of non-zeros)
124/// - `order`: A fill-reducing ordering/permutation for numerical stability
125pub struct SymbolicStructure {
126 pub pattern: SymbolicSparseColMat<usize>,
127 pub order: Argsort<usize>,
128}
129
130/// Enum to handle mixed manifold variable types
131#[derive(Clone)]
132pub enum VariableEnum {
133 Rn(Variable<rn::Rn>),
134 SE2(Variable<se2::SE2>),
135 SE3(Variable<se3::SE3>),
136 SO2(Variable<so2::SO2>),
137 SO3(Variable<so3::SO3>),
138}
139
140impl VariableEnum {
141 /// Get the manifold type for this variable
142 pub fn manifold_type(&self) -> ManifoldType {
143 match self {
144 VariableEnum::Rn(_) => ManifoldType::RN,
145 VariableEnum::SE2(_) => ManifoldType::SE2,
146 VariableEnum::SE3(_) => ManifoldType::SE3,
147 VariableEnum::SO2(_) => ManifoldType::SO2,
148 VariableEnum::SO3(_) => ManifoldType::SO3,
149 }
150 }
151
152 /// Get the tangent space size for this variable
153 pub fn get_size(&self) -> usize {
154 match self {
155 VariableEnum::Rn(var) => var.get_size(),
156 VariableEnum::SE2(var) => var.get_size(),
157 VariableEnum::SE3(var) => var.get_size(),
158 VariableEnum::SO2(var) => var.get_size(),
159 VariableEnum::SO3(var) => var.get_size(),
160 }
161 }
162
163 /// Convert to DVector for use with Factor trait
164 pub fn to_vector(&self) -> DVector<f64> {
165 match self {
166 VariableEnum::Rn(var) => var.value.clone().into(),
167 VariableEnum::SE2(var) => var.value.clone().into(),
168 VariableEnum::SE3(var) => var.value.clone().into(),
169 VariableEnum::SO2(var) => var.value.clone().into(),
170 VariableEnum::SO3(var) => var.value.clone().into(),
171 }
172 }
173
174 /// Apply a tangent space step to update this variable.
175 ///
176 /// This method applies a manifold plus operation: x_new = x ⊞ δx
177 /// where δx is a tangent vector. It supports all manifold types.
178 ///
179 /// # Arguments
180 /// * `step_slice` - View into the full step vector for this variable's DOF
181 ///
182 /// # Implementation Notes
183 /// Uses explicit clone instead of unsafe memory copy (`IntoNalgebra`) for small vectors.
184 /// This is safe and performant for typical manifold dimensions (1-6 DOF).
185 ///
186 pub fn apply_tangent_step(&mut self, step_slice: MatRef<f64>) {
187 match self {
188 VariableEnum::SE3(var) => {
189 // SE3 has 6 DOF in tangent space
190 let mut step_data: Vec<f64> = (0..6).map(|i| step_slice[(i, 0)]).collect();
191
192 // Enforce fixed indices: zero out step components for fixed DOF
193 for &fixed_idx in &var.fixed_indices {
194 if fixed_idx < 6 {
195 step_data[fixed_idx] = 0.0;
196 }
197 }
198
199 let step_dvector = DVector::from_vec(step_data);
200 let tangent = se3::SE3Tangent::from(step_dvector);
201 let new_value = var.plus(&tangent);
202 var.set_value(new_value);
203 }
204 VariableEnum::SE2(var) => {
205 // SE2 has 3 DOF in tangent space
206 let mut step_data: Vec<f64> = (0..3).map(|i| step_slice[(i, 0)]).collect();
207
208 // Enforce fixed indices: zero out step components for fixed DOF
209 for &fixed_idx in &var.fixed_indices {
210 if fixed_idx < 3 {
211 step_data[fixed_idx] = 0.0;
212 }
213 }
214
215 let step_dvector = DVector::from_vec(step_data);
216 let tangent = se2::SE2Tangent::from(step_dvector);
217 let new_value = var.plus(&tangent);
218 var.set_value(new_value);
219 }
220 VariableEnum::SO3(var) => {
221 // SO3 has 3 DOF in tangent space
222 let mut step_data: Vec<f64> = (0..3).map(|i| step_slice[(i, 0)]).collect();
223
224 // Enforce fixed indices: zero out step components for fixed DOF
225 for &fixed_idx in &var.fixed_indices {
226 if fixed_idx < 3 {
227 step_data[fixed_idx] = 0.0;
228 }
229 }
230
231 let step_dvector = DVector::from_vec(step_data);
232 let tangent = so3::SO3Tangent::from(step_dvector);
233 let new_value = var.plus(&tangent);
234 var.set_value(new_value);
235 }
236 VariableEnum::SO2(var) => {
237 // SO2 has 1 DOF in tangent space
238 let mut step_data = step_slice[(0, 0)];
239
240 // Enforce fixed indices: zero out step if index 0 is fixed
241 if var.fixed_indices.contains(&0) {
242 step_data = 0.0;
243 }
244
245 let step_dvector = DVector::from_vec(vec![step_data]);
246 let tangent = so2::SO2Tangent::from(step_dvector);
247 let new_value = var.plus(&tangent);
248 var.set_value(new_value);
249 }
250 VariableEnum::Rn(var) => {
251 // Rn has dynamic size
252 let size = var.get_size();
253 let mut step_data: Vec<f64> = (0..size).map(|i| step_slice[(i, 0)]).collect();
254
255 // Enforce fixed indices: zero out step components for fixed DOF
256 for &fixed_idx in &var.fixed_indices {
257 if fixed_idx < size {
258 step_data[fixed_idx] = 0.0;
259 }
260 }
261
262 let step_dvector = DVector::from_vec(step_data);
263 let tangent = rn::RnTangent::new(step_dvector);
264 let new_value = var.plus(&tangent);
265 var.set_value(new_value);
266 }
267 }
268 }
269
270 /// Get the covariance matrix for this variable (if computed).
271 ///
272 /// Returns `None` if covariance has not been computed.
273 ///
274 /// # Returns
275 /// Reference to the covariance matrix in tangent space
276 pub fn get_covariance(&self) -> Option<&Mat<f64>> {
277 match self {
278 VariableEnum::Rn(var) => var.get_covariance(),
279 VariableEnum::SE2(var) => var.get_covariance(),
280 VariableEnum::SE3(var) => var.get_covariance(),
281 VariableEnum::SO2(var) => var.get_covariance(),
282 VariableEnum::SO3(var) => var.get_covariance(),
283 }
284 }
285
286 /// Set the covariance matrix for this variable.
287 ///
288 /// The covariance matrix should be square with dimension equal to
289 /// the tangent space dimension of this variable.
290 ///
291 /// # Arguments
292 /// * `cov` - Covariance matrix in tangent space
293 pub fn set_covariance(&mut self, cov: Mat<f64>) {
294 match self {
295 VariableEnum::Rn(var) => var.set_covariance(cov),
296 VariableEnum::SE2(var) => var.set_covariance(cov),
297 VariableEnum::SE3(var) => var.set_covariance(cov),
298 VariableEnum::SO2(var) => var.set_covariance(cov),
299 VariableEnum::SO3(var) => var.set_covariance(cov),
300 }
301 }
302
303 /// Clear the covariance matrix for this variable.
304 pub fn clear_covariance(&mut self) {
305 match self {
306 VariableEnum::Rn(var) => var.clear_covariance(),
307 VariableEnum::SE2(var) => var.clear_covariance(),
308 VariableEnum::SE3(var) => var.clear_covariance(),
309 VariableEnum::SO2(var) => var.clear_covariance(),
310 VariableEnum::SO3(var) => var.clear_covariance(),
311 }
312 }
313
314 /// Get the bounds for this variable.
315 ///
316 /// Returns a reference to the bounds map where keys are indices and values are (lower, upper) pairs.
317 pub fn get_bounds(&self) -> &HashMap<usize, (f64, f64)> {
318 match self {
319 VariableEnum::Rn(var) => &var.bounds,
320 VariableEnum::SE2(var) => &var.bounds,
321 VariableEnum::SE3(var) => &var.bounds,
322 VariableEnum::SO2(var) => &var.bounds,
323 VariableEnum::SO3(var) => &var.bounds,
324 }
325 }
326
327 /// Get the fixed indices for this variable.
328 ///
329 /// Returns a reference to the set of indices that should remain fixed during optimization.
330 pub fn get_fixed_indices(&self) -> &HashSet<usize> {
331 match self {
332 VariableEnum::Rn(var) => &var.fixed_indices,
333 VariableEnum::SE2(var) => &var.fixed_indices,
334 VariableEnum::SE3(var) => &var.fixed_indices,
335 VariableEnum::SO2(var) => &var.fixed_indices,
336 VariableEnum::SO3(var) => &var.fixed_indices,
337 }
338 }
339
340 /// Set the value of this variable from a vector representation.
341 ///
342 /// This is used to update the variable after applying constraints (bounds and fixed indices).
343 pub fn set_from_vector(&mut self, vec: &DVector<f64>) {
344 match self {
345 VariableEnum::Rn(var) => {
346 var.set_value(rn::Rn::new(vec.clone()));
347 }
348 VariableEnum::SE2(var) => {
349 let new_se2: se2::SE2 = vec.clone().into();
350 var.set_value(new_se2);
351 }
352 VariableEnum::SE3(var) => {
353 let new_se3: se3::SE3 = vec.clone().into();
354 var.set_value(new_se3);
355 }
356 VariableEnum::SO2(var) => {
357 let new_so2: so2::SO2 = vec.clone().into();
358 var.set_value(new_so2);
359 }
360 VariableEnum::SO3(var) => {
361 let new_so3: so3::SO3 = vec.clone().into();
362 var.set_value(new_so3);
363 }
364 }
365 }
366}
367
368/// The optimization problem definition for factor graph optimization.
369///
370/// Manages residual blocks (factors/constraints), variables, and the sparse Jacobian structure.
371/// Supports mixed manifold types (SE2, SE3, SO2, SO3, Rn) in a single problem and provides
372/// efficient parallel residual/Jacobian computation.
373///
374/// # Architecture
375///
376/// The Problem acts as a container and coordinator:
377/// - Stores all residual blocks (factors with optional loss functions)
378/// - Tracks the global structure (which variables connect to which factors)
379/// - Builds and maintains the sparse Jacobian pattern
380/// - Provides parallel residual/Jacobian evaluation using rayon
381/// - Manages variable constraints (fixed indices, bounds)
382///
383/// # Workflow
384///
385/// 1. **Construction**: Create a new Problem with `Problem::new()`
386/// 2. **Add Factors**: Use `add_residual_block()` to add constraints
387/// 3. **Initialize Variables**: Use `initialize_variables()` with initial values
388/// 4. **Build Sparsity**: Use `build_symbolic_structure()` once before optimization
389/// 5. **Linearize**: Call `compute_residual_and_jacobian_sparse()` each iteration
390/// 6. **Extract Covariance**: Use `compute_and_set_covariances()` after convergence
391///
392/// # Example
393///
394/// ```
395/// use apex_solver::core::problem::Problem;
396/// use apex_solver::factors::BetweenFactor;
397/// use apex_solver::manifold::ManifoldType;
398/// use apex_solver::manifold::se2::SE2;
399/// use nalgebra::dvector;
400/// use std::collections::HashMap;
401///
402/// let mut problem = Problem::new();
403///
404/// // Add a between factor
405/// let factor = Box::new(BetweenFactor::new(SE2::from_xy_angle(1.0, 0.0, 0.1)));
406/// problem.add_residual_block(&["x0", "x1"], factor, None);
407///
408/// // Initialize variables
409/// let mut initial = HashMap::new();
410/// initial.insert("x0".to_string(), (ManifoldType::SE2, dvector![0.0, 0.0, 0.0]));
411/// initial.insert("x1".to_string(), (ManifoldType::SE2, dvector![1.0, 0.0, 0.1]));
412///
413/// let variables = problem.initialize_variables(&initial);
414/// assert_eq!(variables.len(), 2);
415/// ```
416pub struct Problem {
417 /// Total dimension of the stacked residual vector (sum of all residual block dimensions)
418 pub total_residual_dimension: usize,
419
420 /// Counter for assigning unique IDs to residual blocks
421 residual_id_count: usize,
422
423 /// Map from residual block ID to ResidualBlock instance
424 residual_blocks: HashMap<usize, ResidualBlock>,
425
426 /// Variables with fixed indices (e.g., fix first pose's x,y coordinates)
427 /// Maps variable name -> set of indices to fix
428 pub fixed_variable_indexes: HashMap<String, HashSet<usize>>,
429
430 /// Variable bounds (box constraints on individual DOF)
431 /// Maps variable name -> (index -> (lower_bound, upper_bound))
432 pub variable_bounds: HashMap<String, HashMap<usize, (f64, f64)>>,
433}
434impl Default for Problem {
435 fn default() -> Self {
436 Self::new()
437 }
438}
439
440impl Problem {
441 /// Create a new empty optimization problem.
442 ///
443 /// # Returns
444 ///
445 /// A new `Problem` with no residual blocks or variables
446 ///
447 /// # Example
448 ///
449 /// ```
450 /// use apex_solver::core::problem::Problem;
451 ///
452 /// let problem = Problem::new();
453 /// assert_eq!(problem.num_residual_blocks(), 0);
454 /// assert_eq!(problem.total_residual_dimension, 0);
455 /// ```
456 pub fn new() -> Self {
457 Self {
458 total_residual_dimension: 0,
459 residual_id_count: 0,
460 residual_blocks: HashMap::new(),
461 fixed_variable_indexes: HashMap::new(),
462 variable_bounds: HashMap::new(),
463 }
464 }
465
466 /// Add a residual block (factor with optional loss function) to the problem.
467 ///
468 /// This is the primary method for building the factor graph. Each call adds one constraint
469 /// connecting one or more variables.
470 ///
471 /// # Arguments
472 ///
473 /// * `variable_key_size_list` - Names of the variables this factor connects (order matters)
474 /// * `factor` - The factor implementation that computes residuals and Jacobians
475 /// * `loss_func` - Optional robust loss function for outlier rejection
476 ///
477 /// # Returns
478 ///
479 /// The unique ID assigned to this residual block
480 ///
481 /// # Example
482 ///
483 /// ```
484 /// use apex_solver::core::problem::Problem;
485 /// use apex_solver::factors::{BetweenFactor, PriorFactor};
486 /// use apex_solver::core::loss_functions::HuberLoss;
487 /// use nalgebra::dvector;
488 /// use apex_solver::manifold::se2::SE2;
489 /// # use apex_solver::error::ApexSolverResult;
490 /// # fn example() -> ApexSolverResult<()> {
491 ///
492 /// let mut problem = Problem::new();
493 ///
494 /// // Add prior factor (unary constraint)
495 /// let prior = Box::new(PriorFactor { data: dvector![0.0, 0.0, 0.0] });
496 /// let id1 = problem.add_residual_block(&["x0"], prior, None);
497 ///
498 /// // Add between factor with robust loss (binary constraint)
499 /// let between = Box::new(BetweenFactor::new(SE2::from_xy_angle(1.0, 0.0, 0.1)));
500 /// let loss: Option<Box<dyn apex_solver::core::loss_functions::LossFunction + Send>> =
501 /// Some(Box::new(HuberLoss::new(1.0)?));
502 /// let id2 = problem.add_residual_block(&["x0", "x1"], between, loss);
503 ///
504 /// assert_eq!(id1, 0);
505 /// assert_eq!(id2, 1);
506 /// assert_eq!(problem.num_residual_blocks(), 2);
507 /// # Ok(())
508 /// # }
509 /// # example().unwrap();
510 /// ```
511 pub fn add_residual_block(
512 &mut self,
513 variable_key_size_list: &[&str],
514 factor: Box<dyn Factor + Send>,
515 loss_func: Option<Box<dyn LossFunction + Send>>,
516 ) -> usize {
517 let new_residual_dimension = factor.get_dimension();
518 self.residual_blocks.insert(
519 self.residual_id_count,
520 ResidualBlock::new(
521 self.residual_id_count,
522 self.total_residual_dimension,
523 variable_key_size_list,
524 factor,
525 loss_func,
526 ),
527 );
528 let block_id = self.residual_id_count;
529 self.residual_id_count += 1;
530
531 self.total_residual_dimension += new_residual_dimension;
532
533 block_id
534 }
535
536 pub fn remove_residual_block(&mut self, block_id: usize) -> Option<ResidualBlock> {
537 if let Some(residual_block) = self.residual_blocks.remove(&block_id) {
538 self.total_residual_dimension -= residual_block.factor.get_dimension();
539 Some(residual_block)
540 } else {
541 None
542 }
543 }
544
545 pub fn fix_variable(&mut self, var_to_fix: &str, idx: usize) {
546 if let Some(var_mut) = self.fixed_variable_indexes.get_mut(var_to_fix) {
547 var_mut.insert(idx);
548 } else {
549 self.fixed_variable_indexes
550 .insert(var_to_fix.to_owned(), HashSet::from([idx]));
551 }
552 }
553
554 pub fn unfix_variable(&mut self, var_to_unfix: &str) {
555 self.fixed_variable_indexes.remove(var_to_unfix);
556 }
557
558 pub fn set_variable_bounds(
559 &mut self,
560 var_to_bound: &str,
561 idx: usize,
562 lower_bound: f64,
563 upper_bound: f64,
564 ) {
565 if lower_bound > upper_bound {
566 warn!("lower bound is larger than upper bound");
567 } else if let Some(var_mut) = self.variable_bounds.get_mut(var_to_bound) {
568 var_mut.insert(idx, (lower_bound, upper_bound));
569 } else {
570 self.variable_bounds.insert(
571 var_to_bound.to_owned(),
572 HashMap::from([(idx, (lower_bound, upper_bound))]),
573 );
574 }
575 }
576
577 pub fn remove_variable_bounds(&mut self, var_to_unbound: &str) {
578 self.variable_bounds.remove(var_to_unbound);
579 }
580
581 /// Initialize variables from initial values with manifold types.
582 ///
583 /// Converts raw initial values into typed `Variable<M>` instances wrapped in `VariableEnum`.
584 /// This method also applies any fixed indices or bounds that were set via `fix_variable()`
585 /// or `set_variable_bounds()`.
586 ///
587 /// # Arguments
588 ///
589 /// * `initial_values` - Map from variable name to (manifold type, initial value vector)
590 ///
591 /// # Returns
592 ///
593 /// Map from variable name to `VariableEnum` (typed variables ready for optimization)
594 ///
595 /// # Manifold Formats
596 ///
597 /// - **SE2**: `[x, y, theta]` (3 elements)
598 /// - **SE3**: `[tx, ty, tz, qw, qx, qy, qz]` (7 elements)
599 /// - **SO2**: `[theta]` (1 element)
600 /// - **SO3**: `[qw, qx, qy, qz]` (4 elements)
601 /// - **Rn**: `[x1, x2, ..., xn]` (n elements)
602 ///
603 /// # Example
604 ///
605 /// ```
606 /// use apex_solver::core::problem::Problem;
607 /// use apex_solver::manifold::ManifoldType;
608 /// use nalgebra::dvector;
609 /// use std::collections::HashMap;
610 ///
611 /// let problem = Problem::new();
612 ///
613 /// let mut initial = HashMap::new();
614 /// initial.insert("pose0".to_string(), (ManifoldType::SE2, dvector![0.0, 0.0, 0.0]));
615 /// initial.insert("pose1".to_string(), (ManifoldType::SE2, dvector![1.0, 0.0, 0.1]));
616 /// initial.insert("landmark".to_string(), (ManifoldType::RN, dvector![5.0, 3.0]));
617 ///
618 /// let variables = problem.initialize_variables(&initial);
619 /// assert_eq!(variables.len(), 3);
620 /// ```
621 pub fn initialize_variables(
622 &self,
623 initial_values: &HashMap<String, (ManifoldType, DVector<f64>)>,
624 ) -> HashMap<String, VariableEnum> {
625 let variables: HashMap<String, VariableEnum> = initial_values
626 .iter()
627 .map(|(k, v)| {
628 let variable_enum = match v.0 {
629 ManifoldType::SO2 => {
630 let mut var = Variable::new(so2::SO2::from(v.1.clone()));
631 if let Some(indexes) = self.fixed_variable_indexes.get(k) {
632 var.fixed_indices = indexes.clone();
633 }
634 if let Some(bounds) = self.variable_bounds.get(k) {
635 var.bounds = bounds.clone();
636 }
637 VariableEnum::SO2(var)
638 }
639 ManifoldType::SO3 => {
640 let mut var = Variable::new(so3::SO3::from(v.1.clone()));
641 if let Some(indexes) = self.fixed_variable_indexes.get(k) {
642 var.fixed_indices = indexes.clone();
643 }
644 if let Some(bounds) = self.variable_bounds.get(k) {
645 var.bounds = bounds.clone();
646 }
647 VariableEnum::SO3(var)
648 }
649 ManifoldType::SE2 => {
650 let mut var = Variable::new(se2::SE2::from(v.1.clone()));
651 if let Some(indexes) = self.fixed_variable_indexes.get(k) {
652 var.fixed_indices = indexes.clone();
653 }
654 if let Some(bounds) = self.variable_bounds.get(k) {
655 var.bounds = bounds.clone();
656 }
657 VariableEnum::SE2(var)
658 }
659 ManifoldType::SE3 => {
660 let mut var = Variable::new(se3::SE3::from(v.1.clone()));
661 if let Some(indexes) = self.fixed_variable_indexes.get(k) {
662 var.fixed_indices = indexes.clone();
663 }
664 if let Some(bounds) = self.variable_bounds.get(k) {
665 var.bounds = bounds.clone();
666 }
667 VariableEnum::SE3(var)
668 }
669 ManifoldType::RN => {
670 let mut var = Variable::new(rn::Rn::from(v.1.clone()));
671 if let Some(indexes) = self.fixed_variable_indexes.get(k) {
672 var.fixed_indices = indexes.clone();
673 }
674 if let Some(bounds) = self.variable_bounds.get(k) {
675 var.bounds = bounds.clone();
676 }
677 VariableEnum::Rn(var)
678 }
679 };
680
681 (k.to_owned(), variable_enum)
682 })
683 .collect();
684 variables
685 }
686
687 /// Get the number of residual blocks
688 pub fn num_residual_blocks(&self) -> usize {
689 self.residual_blocks.len()
690 }
691
692 /// Build symbolic structure for sparse Jacobian computation
693 ///
694 /// This method constructs the sparsity pattern of the Jacobian matrix before numerical
695 /// computation. It determines which entries in the Jacobian will be non-zero based on
696 /// the structure of the optimization problem (which residual blocks connect which variables).
697 ///
698 /// # Purpose
699 /// - Pre-allocates memory for sparse matrix operations
700 /// - Enables efficient sparse linear algebra (avoiding dense operations)
701 /// - Computed once at the beginning, used throughout optimization
702 ///
703 /// # Arguments
704 /// * `variables` - Map of variable names to their values and properties (SE2, SE3, etc.)
705 /// * `variable_index_sparce_matrix` - Map from variable name to starting column index in Jacobian
706 /// * `total_dof` - Total degrees of freedom (number of columns in Jacobian)
707 ///
708 /// # Returns
709 /// A `SymbolicStructure` containing:
710 /// - `pattern`: The symbolic sparse column matrix structure (row/col indices of non-zeros)
711 /// - `order`: An ordering/permutation for efficient numerical computation
712 ///
713 /// # Algorithm
714 /// For each residual block:
715 /// 1. Identify which variables it depends on
716 /// 2. For each (residual_dimension × variable_dof) block, mark entries as non-zero
717 /// 3. Convert to optimized sparse matrix representation
718 ///
719 /// # Example Structure
720 /// For a simple problem with 3 SE2 poses (9 DOF total):
721 /// - Between(x0, x1): Creates 3×6 block at rows 0-2, cols 0-5
722 /// - Between(x1, x2): Creates 3×6 block at rows 3-5, cols 3-8
723 /// - Prior(x0): Creates 3×3 block at rows 6-8, cols 0-2
724 ///
725 /// Result: 9×9 sparse Jacobian with 45 non-zero entries
726 pub fn build_symbolic_structure(
727 &self,
728 variables: &HashMap<String, VariableEnum>,
729 variable_index_sparce_matrix: &HashMap<String, usize>,
730 total_dof: usize,
731 ) -> ApexSolverResult<SymbolicStructure> {
732 // Vector to accumulate all (row, col) pairs that will be non-zero in the Jacobian
733 // Each Pair represents one entry in the sparse matrix
734 let mut indices = Vec::<Pair<usize, usize>>::new();
735
736 // Iterate through all residual blocks (factors/constraints) in the problem
737 // Each residual block contributes a block of entries to the Jacobian
738 self.residual_blocks.iter().for_each(|(_, residual_block)| {
739 // Create local indexing for this residual block's variables
740 // Maps each variable to its local starting index and size within this factor
741 // Example: For Between(x0, x1) with SE2: [(0, 3), (3, 3)]
742 // - x0 starts at local index 0, has 3 DOF
743 // - x1 starts at local index 3, has 3 DOF
744 let mut variable_local_idx_size_list = Vec::<(usize, usize)>::new();
745 let mut count_variable_local_idx: usize = 0;
746
747 // Build the local index mapping for this residual block
748 for var_key in &residual_block.variable_key_list {
749 if let Some(variable) = variables.get(var_key) {
750 // Store (local_start_index, dof_size) for this variable
751 variable_local_idx_size_list
752 .push((count_variable_local_idx, variable.get_size()));
753 count_variable_local_idx += variable.get_size();
754 }
755 }
756
757 // For each variable in this residual block, generate Jacobian entries
758 for (i, var_key) in residual_block.variable_key_list.iter().enumerate() {
759 if let Some(variable_global_idx) = variable_index_sparce_matrix.get(var_key) {
760 // Get the DOF size for this variable
761 let (_, var_size) = variable_local_idx_size_list[i];
762
763 // Generate all (row, col) pairs for the Jacobian block:
764 // ∂(residual) / ∂(variable)
765 //
766 // For a residual block with dimension R and variable with DOF V:
767 // Creates R × V entries in the Jacobian
768
769 // Iterate over each residual dimension (rows)
770 for row_idx in 0..residual_block.factor.get_dimension() {
771 // Iterate over each variable DOF (columns)
772 for col_idx in 0..var_size {
773 // Compute global row index:
774 // Start from this residual block's first row, add offset
775 let global_row_idx = residual_block.residual_row_start_idx + row_idx;
776
777 // Compute global column index:
778 // Start from this variable's first column, add offset
779 let global_col_idx = variable_global_idx + col_idx;
780
781 // Record this (row, col) pair as a non-zero entry
782 indices.push(Pair::new(global_row_idx, global_col_idx));
783 }
784 }
785 }
786 }
787 });
788
789 // Convert the list of (row, col) pairs into an optimized symbolic sparse matrix
790 // This performs:
791 // 1. Duplicate elimination (same entry might be referenced multiple times)
792 // 2. Sorting for column-wise storage format
793 // 3. Computing a fill-reducing ordering for numerical stability
794 // 4. Allocating the symbolic structure (no values yet, just pattern)
795 let (pattern, order) = SymbolicSparseColMat::try_new_from_indices(
796 self.total_residual_dimension, // Number of rows (total residual dimension)
797 total_dof, // Number of columns (total DOF)
798 &indices, // List of non-zero entry locations
799 )
800 .map_err(|e| {
801 CoreError::SymbolicStructure(
802 "Failed to build symbolic sparse matrix structure".to_string(),
803 )
804 .log_with_source(e)
805 })?;
806
807 // Return the symbolic structure that will be filled with numerical values later
808 Ok(SymbolicStructure { pattern, order })
809 }
810
811 /// Compute only the residual vector for the current variable values.
812 ///
813 /// This is an optimized version that skips Jacobian computation when only the cost
814 /// function value is needed (e.g., during initialization or step evaluation).
815 ///
816 /// # Arguments
817 ///
818 /// * `variables` - Current variable values (from `initialize_variables()` or updated)
819 ///
820 /// # Returns
821 ///
822 /// Residual vector as N×1 column matrix (N = total residual dimension)
823 ///
824 /// # Performance
825 ///
826 /// Approximately **2x faster** than `compute_residual_and_jacobian_sparse()` since it:
827 /// - Skips Jacobian computation for each residual block
828 /// - Avoids Jacobian matrix assembly and storage
829 /// - Only parallelizes residual evaluation
830 ///
831 /// # When to Use
832 ///
833 /// - **Initial cost computation**: When setting up optimization state
834 /// - **Step evaluation**: When computing new cost after applying parameter updates
835 /// - **Cost-only queries**: When you don't need gradients
836 ///
837 /// Use `compute_residual_and_jacobian_sparse()` when you need both residual and Jacobian
838 /// (e.g., in the main optimization iteration loop for linearization).
839 ///
840 /// # Example
841 ///
842 /// ```no_run
843 /// # use apex_solver::core::problem::Problem;
844 /// # use std::collections::HashMap;
845 /// # fn example() -> Result<(), Box<dyn std::error::Error>> {
846 /// # let problem = Problem::new();
847 /// # let variables = HashMap::new();
848 /// // Initial cost evaluation (no Jacobian needed)
849 /// let residual = problem.compute_residual_sparse(&variables)?;
850 /// let initial_cost = residual.norm_l2() * residual.norm_l2();
851 /// # Ok(())
852 /// # }
853 /// ```
854 pub fn compute_residual_sparse(
855 &self,
856 variables: &HashMap<String, VariableEnum>,
857 ) -> ApexSolverResult<Mat<f64>> {
858 let total_residual = Arc::new(Mutex::new(Col::<f64>::zeros(self.total_residual_dimension)));
859
860 // Compute residuals in parallel (no Jacobian needed)
861 let result: Result<Vec<()>, ApexSolverError> = self
862 .residual_blocks
863 .par_iter()
864 .map(|(_, residual_block)| {
865 self.compute_residual_block(residual_block, variables, &total_residual)
866 })
867 .collect();
868
869 result?;
870
871 let total_residual = Arc::try_unwrap(total_residual)
872 .map_err(|_| {
873 CoreError::ParallelComputation(
874 "Failed to unwrap Arc for total residual".to_string(),
875 )
876 .log()
877 })?
878 .into_inner()
879 .map_err(|e| {
880 CoreError::ParallelComputation(
881 "Failed to extract mutex inner value for total residual".to_string(),
882 )
883 .log_with_source(e)
884 })?;
885
886 // Convert faer Col to Mat (column vector as n×1 matrix)
887 let residual_faer = total_residual.as_ref().as_mat().to_owned();
888 Ok(residual_faer)
889 }
890
891 /// Compute residual vector and sparse Jacobian matrix for the current variable values.
892 ///
893 /// This is the core linearization method called during each optimization iteration. It:
894 /// 1. Evaluates all residual blocks in parallel using rayon
895 /// 2. Assembles the full residual vector
896 /// 3. Constructs the sparse Jacobian matrix using the precomputed symbolic structure
897 ///
898 /// # Arguments
899 ///
900 /// * `variables` - Current variable values (from `initialize_variables()` or updated)
901 /// * `variable_index_sparce_matrix` - Map from variable name to starting column in Jacobian
902 /// * `symbolic_structure` - Precomputed sparsity pattern (from `build_symbolic_structure()`)
903 ///
904 /// # Returns
905 ///
906 /// Tuple `(residual, jacobian)` where:
907 /// - `residual`: N×1 column matrix (total residual dimension)
908 /// - `jacobian`: N×M sparse matrix (N = residual dim, M = total DOF)
909 ///
910 /// # Performance
911 ///
912 /// This method is highly optimized:
913 /// - **Parallel evaluation**: Each residual block is evaluated independently using rayon
914 /// - **Sparse storage**: Only non-zero Jacobian entries are stored and computed
915 /// - **Memory efficient**: Preallocated sparse structure avoids dynamic allocations
916 ///
917 /// Typically accounts for 40-60% of total optimization time (including sparse matrix ops).
918 ///
919 /// # When to Use
920 ///
921 /// Use this method in the main optimization loop when you need both residual and Jacobian
922 /// for linearization. For cost-only evaluation, use `compute_residual_sparse()` instead.
923 ///
924 /// # Example
925 ///
926 /// ```
927 /// # use apex_solver::core::problem::Problem;
928 /// # use std::collections::HashMap;
929 /// // Inside optimizer loop, compute both residual and Jacobian for linearization
930 /// // let (residual, jacobian) = problem.compute_residual_and_jacobian_sparse(
931 /// // &variables,
932 /// // &variable_index_map,
933 /// // &symbolic_structure,
934 /// // )?;
935 /// //
936 /// // Use for linear system: J^T J dx = -J^T r
937 /// ```
938 pub fn compute_residual_and_jacobian_sparse(
939 &self,
940 variables: &HashMap<String, VariableEnum>,
941 variable_index_sparce_matrix: &HashMap<String, usize>,
942 symbolic_structure: &SymbolicStructure,
943 ) -> ApexSolverResult<(Mat<f64>, SparseColMat<usize, f64>)> {
944 let total_residual = Arc::new(Mutex::new(Col::<f64>::zeros(self.total_residual_dimension)));
945
946 // OPTIMIZATION: Pre-allocate exact size to avoid double allocation and flattening
947 // This eliminates the Vec<Vec<f64>> → Vec<f64> conversion overhead
948 let total_nnz = symbolic_structure.pattern.compute_nnz();
949
950 // Collect block results with pre-computed sizes
951 let jacobian_blocks: Result<Vec<(usize, Vec<f64>)>, ApexSolverError> = self
952 .residual_blocks
953 .par_iter()
954 .map(|(_, residual_block)| {
955 let values = self.compute_residual_and_jacobian_block(
956 residual_block,
957 variables,
958 variable_index_sparce_matrix,
959 &total_residual,
960 )?;
961 let size = values.len();
962 Ok((size, values))
963 })
964 .collect();
965
966 let jacobian_blocks = jacobian_blocks?;
967
968 // Pre-allocate final vec with exact size
969 let mut jacobian_values = Vec::with_capacity(total_nnz);
970 for (_size, mut block_values) in jacobian_blocks {
971 jacobian_values.append(&mut block_values);
972 }
973
974 let total_residual = Arc::try_unwrap(total_residual)
975 .map_err(|_| {
976 CoreError::ParallelComputation(
977 "Failed to unwrap Arc for total residual".to_string(),
978 )
979 .log()
980 })?
981 .into_inner()
982 .map_err(|e| {
983 CoreError::ParallelComputation(
984 "Failed to extract mutex inner value for total residual".to_string(),
985 )
986 .log_with_source(e)
987 })?;
988
989 // Convert faer Col to Mat (column vector as n×1 matrix)
990 let residual_faer = total_residual.as_ref().as_mat().to_owned();
991 let jacobian_sparse = SparseColMat::new_from_argsort(
992 symbolic_structure.pattern.clone(),
993 &symbolic_structure.order,
994 jacobian_values.as_slice(),
995 )
996 .map_err(|e| {
997 CoreError::SymbolicStructure(
998 "Failed to create sparse Jacobian from argsort".to_string(),
999 )
1000 .log_with_source(e)
1001 })?;
1002
1003 Ok((residual_faer, jacobian_sparse))
1004 }
1005
1006 /// Compute only the residual for a single residual block (no Jacobian).
1007 ///
1008 /// Helper method for `compute_residual_sparse()`.
1009 fn compute_residual_block(
1010 &self,
1011 residual_block: &ResidualBlock,
1012 variables: &HashMap<String, VariableEnum>,
1013 total_residual: &Arc<Mutex<Col<f64>>>,
1014 ) -> ApexSolverResult<()> {
1015 let mut param_vectors: Vec<DVector<f64>> = Vec::new();
1016
1017 for var_key in &residual_block.variable_key_list {
1018 if let Some(variable) = variables.get(var_key) {
1019 param_vectors.push(variable.to_vector());
1020 }
1021 }
1022
1023 // Compute only residual (linearize still computes Jacobian internally,
1024 // but we don't extract/store it)
1025 let (mut res, _) = residual_block.factor.linearize(¶m_vectors, false);
1026
1027 // Apply loss function if present (critical for robust optimization)
1028 if let Some(loss_func) = &residual_block.loss_func {
1029 let squared_norm = res.dot(&res);
1030 let corrector = Corrector::new(loss_func.as_ref(), squared_norm);
1031 corrector.correct_residuals(&mut res);
1032 }
1033
1034 let mut total_residual = total_residual.lock().map_err(|e| {
1035 CoreError::ParallelComputation("Failed to acquire lock on total residual".to_string())
1036 .log_with_source(e)
1037 })?;
1038
1039 // Copy residual values from nalgebra DVector to faer Col
1040 let start_idx = residual_block.residual_row_start_idx;
1041 let dim = residual_block.factor.get_dimension();
1042 let mut total_residual_mut = total_residual.as_mut();
1043 for i in 0..dim {
1044 total_residual_mut[start_idx + i] = res[i];
1045 }
1046
1047 Ok(())
1048 }
1049
1050 /// Compute residual and Jacobian for a single residual block.
1051 ///
1052 /// Helper method for `compute_residual_and_jacobian_sparse()`.
1053 fn compute_residual_and_jacobian_block(
1054 &self,
1055 residual_block: &ResidualBlock,
1056 variables: &HashMap<String, VariableEnum>,
1057 variable_index_sparce_matrix: &HashMap<String, usize>,
1058 total_residual: &Arc<Mutex<Col<f64>>>,
1059 ) -> ApexSolverResult<Vec<f64>> {
1060 let mut param_vectors: Vec<DVector<f64>> = Vec::new();
1061 let mut var_sizes: Vec<usize> = Vec::new();
1062 let mut variable_local_idx_size_list = Vec::<(usize, usize)>::new();
1063 let mut count_variable_local_idx: usize = 0;
1064
1065 for var_key in &residual_block.variable_key_list {
1066 if let Some(variable) = variables.get(var_key) {
1067 param_vectors.push(variable.to_vector());
1068 let var_size = variable.get_size();
1069 var_sizes.push(var_size);
1070 variable_local_idx_size_list.push((count_variable_local_idx, var_size));
1071 count_variable_local_idx += var_size;
1072 }
1073 }
1074
1075 let (mut res, jac_opt) = residual_block.factor.linearize(¶m_vectors, true);
1076 let mut jac = jac_opt.ok_or_else(|| {
1077 CoreError::FactorLinearization(
1078 "Factor returned None for Jacobian when compute_jacobian=true".to_string(),
1079 )
1080 .log()
1081 })?;
1082
1083 // Apply loss function if present (critical for robust optimization)
1084 if let Some(loss_func) = &residual_block.loss_func {
1085 let squared_norm = res.dot(&res);
1086 let corrector = Corrector::new(loss_func.as_ref(), squared_norm);
1087 corrector.correct_jacobian(&res, &mut jac);
1088 corrector.correct_residuals(&mut res);
1089 }
1090
1091 // Update total residual
1092 {
1093 let mut total_residual = total_residual.lock().map_err(|e| {
1094 CoreError::ParallelComputation(
1095 "Failed to acquire lock on total residual".to_string(),
1096 )
1097 .log_with_source(e)
1098 })?;
1099
1100 // Copy residual values from nalgebra DVector to faer Col
1101 let start_idx = residual_block.residual_row_start_idx;
1102 let dim = residual_block.factor.get_dimension();
1103 let mut total_residual_mut = total_residual.as_mut();
1104 for i in 0..dim {
1105 total_residual_mut[start_idx + i] = res[i];
1106 }
1107 }
1108
1109 // Extract Jacobian values in the correct order
1110 let mut local_jacobian_values = Vec::new();
1111 for (i, var_key) in residual_block.variable_key_list.iter().enumerate() {
1112 if variable_index_sparce_matrix.contains_key(var_key) {
1113 let (variable_local_idx, var_size) = variable_local_idx_size_list[i];
1114 let variable_jac = jac.view((0, variable_local_idx), (jac.shape().0, var_size));
1115
1116 for row_idx in 0..jac.shape().0 {
1117 for col_idx in 0..var_size {
1118 local_jacobian_values.push(variable_jac[(row_idx, col_idx)]);
1119 }
1120 }
1121 } else {
1122 return Err(CoreError::Variable(format!(
1123 "Missing key {} in variable-to-column-index mapping",
1124 var_key
1125 ))
1126 .log()
1127 .into());
1128 }
1129 }
1130
1131 Ok(local_jacobian_values)
1132 }
1133
1134 /// Log residual vector to a text file
1135 pub fn log_residual_to_file(
1136 &self,
1137 residual: &nalgebra::DVector<f64>,
1138 filename: &str,
1139 ) -> Result<(), Error> {
1140 let mut file = File::create(filename)?;
1141 writeln!(file, "# Residual vector - {} elements", residual.len())?;
1142 for (i, &value) in residual.iter().enumerate() {
1143 writeln!(file, "{}: {:.12}", i, value)?;
1144 }
1145 Ok(())
1146 }
1147
1148 /// Log sparse Jacobian matrix to a text file
1149 pub fn log_sparse_jacobian_to_file(
1150 &self,
1151 jacobian: &SparseColMat<usize, f64>,
1152 filename: &str,
1153 ) -> Result<(), Error> {
1154 let mut file = File::create(filename)?;
1155 writeln!(
1156 file,
1157 "# Sparse Jacobian matrix - {} x {} ({} non-zeros)",
1158 jacobian.nrows(),
1159 jacobian.ncols(),
1160 jacobian.compute_nnz()
1161 )?;
1162 writeln!(file, "# Matrix saved as dimensions and non-zero count only")?;
1163 writeln!(file, "# For detailed access, convert to dense matrix first")?;
1164 Ok(())
1165 }
1166
1167 /// Log variables to a text file
1168 pub fn log_variables_to_file(
1169 &self,
1170 variables: &HashMap<String, VariableEnum>,
1171 filename: &str,
1172 ) -> Result<(), Error> {
1173 let mut file = File::create(filename)?;
1174 writeln!(file, "# Variables - {} total", variables.len())?;
1175 writeln!(file, "# Format: variable_name: [values...]")?;
1176
1177 let mut sorted_vars: Vec<_> = variables.keys().collect();
1178 sorted_vars.sort();
1179
1180 for var_name in sorted_vars {
1181 let var_vector = variables[var_name].to_vector();
1182 write!(file, "{}: [", var_name)?;
1183 for (i, &value) in var_vector.iter().enumerate() {
1184 write!(file, "{:.12}", value)?;
1185 if i < var_vector.len() - 1 {
1186 write!(file, ", ")?;
1187 }
1188 }
1189 writeln!(file, "]")?;
1190 }
1191 Ok(())
1192 }
1193
1194 /// Compute per-variable covariances and set them in Variable objects
1195 ///
1196 /// This method computes the full covariance matrix by inverting the Hessian
1197 /// from the linear solver, then extracts per-variable covariance blocks and
1198 /// stores them in the corresponding Variable objects.
1199 ///
1200 /// # Arguments
1201 /// * `linear_solver` - Mutable reference to the linear solver containing the cached Hessian
1202 /// * `variables` - Mutable map of variables where covariances will be stored
1203 /// * `variable_index_map` - Map from variable names to their starting column indices
1204 ///
1205 /// # Returns
1206 /// `Some(HashMap)` containing per-variable covariance matrices if successful, `None` otherwise
1207 ///
1208 pub fn compute_and_set_covariances(
1209 &self,
1210 linear_solver: &mut Box<dyn SparseLinearSolver>,
1211 variables: &mut HashMap<String, VariableEnum>,
1212 variable_index_map: &HashMap<String, usize>,
1213 ) -> Option<HashMap<String, Mat<f64>>> {
1214 // Compute the full covariance matrix (H^{-1}) using the linear solver
1215 linear_solver.compute_covariance_matrix()?;
1216 let full_cov = linear_solver.get_covariance_matrix()?.clone();
1217
1218 // Extract per-variable covariance blocks from the full matrix
1219 let per_var_covariances =
1220 extract_variable_covariances(&full_cov, variables, variable_index_map);
1221
1222 // Set covariances in Variable objects for easy access
1223 for (var_name, cov) in &per_var_covariances {
1224 if let Some(var) = variables.get_mut(var_name) {
1225 var.set_covariance(cov.clone());
1226 }
1227 }
1228
1229 Some(per_var_covariances)
1230 }
1231}
1232
1233#[cfg(test)]
1234mod tests {
1235 use super::*;
1236 use crate::core::loss_functions::HuberLoss;
1237 use crate::factors::{BetweenFactor, PriorFactor};
1238 use crate::manifold::{ManifoldType, se2::SE2, se3::SE3};
1239 use nalgebra::{Quaternion, Vector3, dvector};
1240 use std::collections::HashMap;
1241
1242 type TestResult = Result<(), Box<dyn std::error::Error>>;
1243 type TestProblemResult = Result<
1244 (
1245 Problem,
1246 HashMap<String, (ManifoldType, nalgebra::DVector<f64>)>,
1247 ),
1248 Box<dyn std::error::Error>,
1249 >;
1250
1251 /// Create a test SE2 dataset with 10 vertices in a loop
1252 fn create_se2_test_problem() -> TestProblemResult {
1253 let mut problem = Problem::new();
1254 let mut initial_values = HashMap::new();
1255
1256 // Create 10 SE2 poses in a rough circle pattern
1257 let poses = vec![
1258 (0.0, 0.0, 0.0), // x0: origin
1259 (1.0, 0.0, 0.1), // x1: move right
1260 (1.5, 1.0, 0.5), // x2: move up-right
1261 (1.0, 2.0, 1.0), // x3: move up
1262 (0.0, 2.5, 1.5), // x4: move up-left
1263 (-1.0, 2.0, 2.0), // x5: move left
1264 (-1.5, 1.0, 2.5), // x6: move down-left
1265 (-1.0, 0.0, 3.0), // x7: move down
1266 (-0.5, -0.5, -2.8), // x8: move down-right
1267 (0.5, -0.5, -2.3), // x9: back towards origin
1268 ];
1269
1270 // Add vertices using [x, y, theta] ordering
1271 for (i, (x, y, theta)) in poses.iter().enumerate() {
1272 let var_name = format!("x{}", i);
1273 let se2_data = dvector![*x, *y, *theta];
1274 initial_values.insert(var_name, (ManifoldType::SE2, se2_data));
1275 }
1276
1277 // Add chain of between factors
1278 for i in 0..9 {
1279 let from_pose = poses[i];
1280 let to_pose = poses[i + 1];
1281
1282 // Compute relative transformation
1283 let dx = to_pose.0 - from_pose.0;
1284 let dy = to_pose.1 - from_pose.1;
1285 let dtheta = to_pose.2 - from_pose.2;
1286
1287 let between_factor = BetweenFactor::new(SE2::from_xy_angle(dx, dy, dtheta));
1288 problem.add_residual_block(
1289 &[&format!("x{}", i), &format!("x{}", i + 1)],
1290 Box::new(between_factor),
1291 Some(Box::new(HuberLoss::new(1.0)?)),
1292 );
1293 }
1294
1295 // Add loop closure from x9 back to x0
1296 let dx = poses[0].0 - poses[9].0;
1297 let dy = poses[0].1 - poses[9].1;
1298 let dtheta = poses[0].2 - poses[9].2;
1299
1300 let loop_closure = BetweenFactor::new(SE2::from_xy_angle(dx, dy, dtheta));
1301 problem.add_residual_block(
1302 &["x9", "x0"],
1303 Box::new(loop_closure),
1304 Some(Box::new(HuberLoss::new(1.0)?)),
1305 );
1306
1307 // Add prior factor for x0
1308 let prior_factor = PriorFactor {
1309 data: dvector![0.0, 0.0, 0.0],
1310 };
1311 problem.add_residual_block(&["x0"], Box::new(prior_factor), None);
1312
1313 Ok((problem, initial_values))
1314 }
1315
1316 /// Create a test SE3 dataset with 8 vertices in a 3D pattern
1317 fn create_se3_test_problem() -> TestProblemResult {
1318 let mut problem = Problem::new();
1319 let mut initial_values = HashMap::new();
1320
1321 // Create 8 SE3 poses in a rough 3D cube pattern
1322 let poses = [
1323 // Bottom face of cube
1324 (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0), // x0: origin
1325 (1.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.995), // x1: +X
1326 (1.0, 1.0, 0.0, 0.0, 0.0, 0.2, 0.98), // x2: +X+Y
1327 (0.0, 1.0, 0.0, 0.0, 0.0, 0.3, 0.955), // x3: +Y
1328 // Top face of cube
1329 (0.0, 0.0, 1.0, 0.1, 0.0, 0.0, 0.995), // x4: +Z
1330 (1.0, 0.0, 1.0, 0.1, 0.0, 0.1, 0.99), // x5: +X+Z
1331 (1.0, 1.0, 1.0, 0.1, 0.0, 0.2, 0.975), // x6: +X+Y+Z
1332 (0.0, 1.0, 1.0, 0.1, 0.0, 0.3, 0.95), // x7: +Y+Z
1333 ];
1334
1335 // Add vertices using [tx, ty, tz, qw, qx, qy, qz] ordering
1336 for (i, (tx, ty, tz, qx, qy, qz, qw)) in poses.iter().enumerate() {
1337 let var_name = format!("x{}", i);
1338 let se3_data = dvector![*tx, *ty, *tz, *qw, *qx, *qy, *qz];
1339 initial_values.insert(var_name, (ManifoldType::SE3, se3_data));
1340 }
1341
1342 // Add between factors connecting the cube edges
1343 let edges = vec![
1344 (0, 1),
1345 (1, 2),
1346 (2, 3),
1347 (3, 0), // Bottom face
1348 (4, 5),
1349 (5, 6),
1350 (6, 7),
1351 (7, 4), // Top face
1352 (0, 4),
1353 (1, 5),
1354 (2, 6),
1355 (3, 7), // Vertical edges
1356 ];
1357
1358 for (from_idx, to_idx) in edges {
1359 let from_pose = poses[from_idx];
1360 let to_pose = poses[to_idx];
1361
1362 // Create a simple relative transformation (simplified for testing)
1363 let relative_se3 = SE3::from_translation_quaternion(
1364 Vector3::new(
1365 to_pose.0 - from_pose.0, // dx
1366 to_pose.1 - from_pose.1, // dy
1367 to_pose.2 - from_pose.2, // dz
1368 ),
1369 Quaternion::new(1.0, 0.0, 0.0, 0.0), // identity quaternion
1370 );
1371
1372 let between_factor = BetweenFactor::new(relative_se3);
1373 problem.add_residual_block(
1374 &[&format!("x{}", from_idx), &format!("x{}", to_idx)],
1375 Box::new(between_factor),
1376 Some(Box::new(HuberLoss::new(1.0)?)),
1377 );
1378 }
1379
1380 // Add prior factor for x0
1381 let prior_factor = PriorFactor {
1382 data: dvector![0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
1383 };
1384 problem.add_residual_block(&["x0"], Box::new(prior_factor), None);
1385
1386 Ok((problem, initial_values))
1387 }
1388
1389 #[test]
1390 fn test_problem_construction_se2() -> TestResult {
1391 let (problem, initial_values) = create_se2_test_problem()?;
1392
1393 // Test basic problem properties
1394 assert_eq!(problem.num_residual_blocks(), 11); // 9 between + 1 loop closure + 1 prior
1395 assert_eq!(problem.total_residual_dimension, 33); // 11 * 3
1396 assert_eq!(initial_values.len(), 10);
1397
1398 Ok(())
1399 }
1400
1401 #[test]
1402 fn test_problem_construction_se3() -> TestResult {
1403 let (problem, initial_values) = create_se3_test_problem()?;
1404
1405 // Test basic problem properties
1406 assert_eq!(problem.num_residual_blocks(), 13); // 12 between + 1 prior
1407 assert_eq!(problem.total_residual_dimension, 79); // 12 * 6 + 1 * 7 (SE3 between factors are 6-dim, prior factor is 7-dim)
1408 assert_eq!(initial_values.len(), 8);
1409
1410 Ok(())
1411 }
1412
1413 #[test]
1414 fn test_variable_initialization_se2() -> TestResult {
1415 let (problem, initial_values) = create_se2_test_problem()?;
1416
1417 // Test variable initialization
1418 let variables = problem.initialize_variables(&initial_values);
1419 assert_eq!(variables.len(), 10);
1420
1421 // Test variable sizes
1422 for (name, var) in &variables {
1423 assert_eq!(
1424 var.get_size(),
1425 3,
1426 "SE2 variable {} should have size 3",
1427 name
1428 );
1429 }
1430
1431 // Test conversion to DVector
1432 for (name, var) in &variables {
1433 let vec = var.to_vector();
1434 assert_eq!(
1435 vec.len(),
1436 3,
1437 "SE2 variable {} vector should have length 3",
1438 name
1439 );
1440 }
1441
1442 Ok(())
1443 }
1444
1445 #[test]
1446 fn test_variable_initialization_se3() -> TestResult {
1447 let (problem, initial_values) = create_se3_test_problem()?;
1448
1449 // Test variable initialization
1450 let variables = problem.initialize_variables(&initial_values);
1451 assert_eq!(variables.len(), 8);
1452
1453 // Test variable sizes
1454 for (name, var) in &variables {
1455 assert_eq!(
1456 var.get_size(),
1457 6,
1458 "SE3 variable {} should have size 6 (DOF)",
1459 name
1460 );
1461 }
1462
1463 // Test conversion to DVector
1464 for (name, var) in &variables {
1465 let vec = var.to_vector();
1466 assert_eq!(
1467 vec.len(),
1468 7,
1469 "SE3 variable {} vector should have length 7",
1470 name
1471 );
1472 }
1473
1474 Ok(())
1475 }
1476
1477 #[test]
1478 fn test_column_mapping_se2() -> TestResult {
1479 let (problem, initial_values) = create_se2_test_problem()?;
1480 let variables = problem.initialize_variables(&initial_values);
1481
1482 // Create column mapping for variables
1483 let mut variable_index_sparce_matrix = HashMap::new();
1484 let mut col_offset = 0;
1485 let mut sorted_vars: Vec<_> = variables.keys().collect();
1486 sorted_vars.sort(); // Ensure consistent ordering
1487
1488 for var_name in sorted_vars {
1489 variable_index_sparce_matrix.insert(var_name.clone(), col_offset);
1490 col_offset += variables[var_name].get_size();
1491 }
1492
1493 // Test total degrees of freedom
1494 let total_dof: usize = variables.values().map(|v| v.get_size()).sum();
1495 assert_eq!(total_dof, 30); // 10 variables * 3 DOF each
1496 assert_eq!(col_offset, 30);
1497
1498 // Test each variable has correct column mapping
1499 for (var_name, &col_idx) in &variable_index_sparce_matrix {
1500 assert!(
1501 col_idx < total_dof,
1502 "Column index {} for {} should be < {}",
1503 col_idx,
1504 var_name,
1505 total_dof
1506 );
1507 }
1508
1509 Ok(())
1510 }
1511
1512 #[test]
1513 fn test_symbolic_structure_se2() -> TestResult {
1514 let (problem, initial_values) = create_se2_test_problem()?;
1515 let variables = problem.initialize_variables(&initial_values);
1516
1517 // Create column mapping
1518 let mut variable_index_sparce_matrix = HashMap::new();
1519 let mut col_offset = 0;
1520 let mut sorted_vars: Vec<_> = variables.keys().collect();
1521 sorted_vars.sort();
1522
1523 for var_name in sorted_vars {
1524 variable_index_sparce_matrix.insert(var_name.clone(), col_offset);
1525 col_offset += variables[var_name].get_size();
1526 }
1527
1528 // Build symbolic structure
1529 let symbolic_structure = problem.build_symbolic_structure(
1530 &variables,
1531 &variable_index_sparce_matrix,
1532 col_offset,
1533 )?;
1534
1535 // Test symbolic structure dimensions
1536 assert_eq!(
1537 symbolic_structure.pattern.nrows(),
1538 problem.total_residual_dimension
1539 );
1540 assert_eq!(symbolic_structure.pattern.ncols(), 30); // total DOF
1541
1542 Ok(())
1543 }
1544
1545 #[test]
1546 fn test_residual_jacobian_computation_se2() -> TestResult {
1547 let (problem, initial_values) = create_se2_test_problem()?;
1548 let variables = problem.initialize_variables(&initial_values);
1549
1550 // Create column mapping
1551 let mut variable_index_sparce_matrix = HashMap::new();
1552 let mut col_offset = 0;
1553 let mut sorted_vars: Vec<_> = variables.keys().collect();
1554 sorted_vars.sort();
1555
1556 for var_name in sorted_vars {
1557 variable_index_sparce_matrix.insert(var_name.clone(), col_offset);
1558 col_offset += variables[var_name].get_size();
1559 }
1560
1561 // Test sparse computation
1562 let symbolic_structure = problem.build_symbolic_structure(
1563 &variables,
1564 &variable_index_sparce_matrix,
1565 col_offset,
1566 )?;
1567 let (residual_sparse, jacobian_sparse) = problem.compute_residual_and_jacobian_sparse(
1568 &variables,
1569 &variable_index_sparce_matrix,
1570 &symbolic_structure,
1571 )?;
1572
1573 // Test sparse dimensions
1574 assert_eq!(residual_sparse.nrows(), problem.total_residual_dimension);
1575 assert_eq!(jacobian_sparse.nrows(), problem.total_residual_dimension);
1576 assert_eq!(jacobian_sparse.ncols(), 30);
1577
1578 Ok(())
1579 }
1580
1581 #[test]
1582 fn test_residual_jacobian_computation_se3() -> TestResult {
1583 let (problem, initial_values) = create_se3_test_problem()?;
1584 let variables = problem.initialize_variables(&initial_values);
1585
1586 // Create column mapping
1587 let mut variable_index_sparce_matrix = HashMap::new();
1588 let mut col_offset = 0;
1589 let mut sorted_vars: Vec<_> = variables.keys().collect();
1590 sorted_vars.sort();
1591
1592 for var_name in sorted_vars {
1593 variable_index_sparce_matrix.insert(var_name.clone(), col_offset);
1594 col_offset += variables[var_name].get_size();
1595 }
1596
1597 // Test sparse computation
1598 let symbolic_structure = problem.build_symbolic_structure(
1599 &variables,
1600 &variable_index_sparce_matrix,
1601 col_offset,
1602 )?;
1603 let (residual_sparse, jacobian_sparse) = problem.compute_residual_and_jacobian_sparse(
1604 &variables,
1605 &variable_index_sparce_matrix,
1606 &symbolic_structure,
1607 )?;
1608
1609 // Test sparse dimensions match
1610 assert_eq!(residual_sparse.nrows(), problem.total_residual_dimension);
1611 assert_eq!(jacobian_sparse.nrows(), problem.total_residual_dimension);
1612 assert_eq!(jacobian_sparse.ncols(), 48); // 8 variables * 6 DOF each
1613
1614 Ok(())
1615 }
1616
1617 #[test]
1618 fn test_residual_block_operations() -> TestResult {
1619 let mut problem = Problem::new();
1620
1621 // Test adding residual blocks
1622 let block_id1 = problem.add_residual_block(
1623 &["x0", "x1"],
1624 Box::new(BetweenFactor::new(SE2::from_xy_angle(1.0, 0.0, 0.1))),
1625 Some(Box::new(HuberLoss::new(1.0)?)),
1626 );
1627
1628 let block_id2 = problem.add_residual_block(
1629 &["x0"],
1630 Box::new(PriorFactor {
1631 data: dvector![0.0, 0.0, 0.0],
1632 }),
1633 None,
1634 );
1635
1636 assert_eq!(problem.num_residual_blocks(), 2);
1637 assert_eq!(problem.total_residual_dimension, 6); // 3 + 3
1638 assert_eq!(block_id1, 0);
1639 assert_eq!(block_id2, 1);
1640
1641 // Test removing residual blocks
1642 let removed_block = problem.remove_residual_block(block_id1);
1643 assert!(removed_block.is_some());
1644 assert_eq!(problem.num_residual_blocks(), 1);
1645 assert_eq!(problem.total_residual_dimension, 3); // Only prior factor remains
1646
1647 // Test removing non-existent block
1648 let non_existent = problem.remove_residual_block(999);
1649 assert!(non_existent.is_none());
1650
1651 Ok(())
1652 }
1653
1654 #[test]
1655 fn test_variable_constraints() -> TestResult {
1656 let mut problem = Problem::new();
1657
1658 // Test fixing variables
1659 problem.fix_variable("x0", 0);
1660 problem.fix_variable("x0", 1);
1661 problem.fix_variable("x1", 2);
1662
1663 assert!(problem.fixed_variable_indexes.contains_key("x0"));
1664 assert!(problem.fixed_variable_indexes.contains_key("x1"));
1665 assert_eq!(problem.fixed_variable_indexes["x0"].len(), 2);
1666 assert_eq!(problem.fixed_variable_indexes["x1"].len(), 1);
1667
1668 // Test unfixing variables
1669 problem.unfix_variable("x0");
1670 assert!(!problem.fixed_variable_indexes.contains_key("x0"));
1671 assert!(problem.fixed_variable_indexes.contains_key("x1"));
1672
1673 // Test variable bounds
1674 problem.set_variable_bounds("x2", 0, -1.0, 1.0);
1675 problem.set_variable_bounds("x2", 1, -2.0, 2.0);
1676 problem.set_variable_bounds("x3", 0, 0.0, 5.0);
1677
1678 assert!(problem.variable_bounds.contains_key("x2"));
1679 assert!(problem.variable_bounds.contains_key("x3"));
1680 assert_eq!(problem.variable_bounds["x2"].len(), 2);
1681 assert_eq!(problem.variable_bounds["x3"].len(), 1);
1682
1683 // Test removing bounds
1684 problem.remove_variable_bounds("x2");
1685 assert!(!problem.variable_bounds.contains_key("x2"));
1686 assert!(problem.variable_bounds.contains_key("x3"));
1687
1688 Ok(())
1689 }
1690}