apex_solver/core/problem.rs
1//! Optimization problem definition and sparse Jacobian computation.
2//!
3//! The `Problem` struct is the central component that defines a factor graph optimization problem.
4//! It manages residual blocks (constraints), variables, and the construction of sparse Jacobian
5//! matrices for efficient nonlinear least squares optimization.
6//!
7//! # Factor Graph Representation
8//!
9//! The optimization problem is represented as a bipartite factor graph:
10//!
11//! ```text
12//! Variables: x0 --- x1 --- x2 --- x3
13//! | | | |
14//! Factors: f0 f1 f2 f3 (constraints/measurements)
15//! ```
16//!
17//! Each factor connects one or more variables and contributes a residual (error) term to the
18//! overall cost function:
19//!
20//! ```text
21//! minimize Σ_i ρ(||r_i(x)||²)
22//! ```
23//!
24//! where `r_i(x)` is the residual for factor i, and ρ is an optional robust loss function.
25//!
26//! # Key Responsibilities
27//!
28//! 1. **Residual Block Management**: Add/remove factors and track their structure
29//! 2. **Variable Management**: Initialize variables with manifold types and constraints
30//! 3. **Sparsity Pattern**: Build symbolic structure for efficient sparse linear algebra
31//! 4. **Linearization**: Compute residuals and Jacobians in parallel
32//! 5. **Covariance**: Extract per-variable uncertainty estimates after optimization
33//!
34//! # Sparse Jacobian Structure
35//!
36//! The Jacobian matrix `J = ∂r/∂x` is sparse because each factor only depends on a small
37//! subset of variables. For example, a between factor connecting x0 and x1 contributes
38//! a 3×6 block (SE2) or 6×12 block (SE3) to the Jacobian, leaving the rest as zeros.
39//!
40//! The Problem pre-computes the sparsity pattern once, then efficiently fills in the
41//! numerical values during each iteration.
42//!
43//! # Mixed Manifold Support
44//!
45//! The Problem supports mixed manifold types in a single optimization problem via
46//! [`VariableEnum`]. This allows:
47//! - SE2 and SE3 poses in the same graph
48//! - SO3 rotations with R³ landmarks
49//! - Any combination of supported manifolds
50//!
51//! # Example: Building a Problem
52//!
53//! ```
54//! use apex_solver::core::problem::Problem;
55//! use apex_solver::factors::{BetweenFactorSE2, PriorFactor};
56//! use apex_solver::core::loss_functions::HuberLoss;
57//! use apex_solver::manifold::ManifoldType;
58//! use nalgebra::{DVector, dvector};
59//! use std::collections::HashMap;
60//!
61//! let mut problem = Problem::new();
62//!
63//! // Add prior factor to anchor the first pose
64//! let prior = Box::new(PriorFactor {
65//! data: dvector![0.0, 0.0, 0.0],
66//! });
67//! problem.add_residual_block(&["x0"], prior, None);
68//!
69//! // Add between factor with robust loss
70//! let between = Box::new(BetweenFactorSE2::new(1.0, 0.0, 0.1));
71//! let loss: Option<Box<dyn apex_solver::core::loss_functions::LossFunction + Send>> =
72//! Some(Box::new(HuberLoss::new(1.0).unwrap()));
73//! problem.add_residual_block(&["x0", "x1"], between, loss);
74//!
75//! // Initialize variables
76//! let mut initial_values = HashMap::new();
77//! initial_values.insert("x0".to_string(), (ManifoldType::SE2, dvector![0.0, 0.0, 0.0]));
78//! initial_values.insert("x1".to_string(), (ManifoldType::SE2, dvector![0.9, 0.1, 0.12]));
79//!
80//! let variables = problem.initialize_variables(&initial_values);
81//!
82//! println!("Problem has {} residual blocks", problem.num_residual_blocks());
83//! println!("Total residual dimension: {}", problem.total_residual_dimension);
84//! ```
85
86use std::{
87 collections::{HashMap, HashSet},
88 fs::File,
89 io::{Error, Write},
90 sync::{Arc, Mutex},
91};
92
93use faer::{
94 Col, Mat, MatRef,
95 sparse::{Argsort, Pair, SparseColMat, SymbolicSparseColMat},
96};
97use nalgebra::DVector;
98use rayon::prelude::*;
99
100use crate::{
101 core::{
102 corrector::Corrector, loss_functions::LossFunction, residual_block::ResidualBlock,
103 variable::Variable,
104 },
105 error::{ApexError, ApexResult},
106 factors::Factor,
107 linalg::{SparseLinearSolver, extract_variable_covariances},
108 manifold::{ManifoldType, rn, se2, se3, so2, so3},
109};
110
111/// Symbolic structure for sparse matrix operations.
112///
113/// Contains the sparsity pattern (which entries are non-zero) and an ordering
114/// for efficient numerical computation. This is computed once at the beginning
115/// and reused throughout optimization.
116///
117/// # Fields
118///
119/// - `pattern`: The symbolic sparse column matrix structure (row/col indices of non-zeros)
120/// - `order`: A fill-reducing ordering/permutation for numerical stability
121pub struct SymbolicStructure {
122 pub pattern: SymbolicSparseColMat<usize>,
123 pub order: Argsort<usize>,
124}
125
126/// Enum to handle mixed manifold variable types
127#[derive(Clone)]
128pub enum VariableEnum {
129 Rn(Variable<rn::Rn>),
130 SE2(Variable<se2::SE2>),
131 SE3(Variable<se3::SE3>),
132 SO2(Variable<so2::SO2>),
133 SO3(Variable<so3::SO3>),
134}
135
136impl VariableEnum {
137 /// Get the tangent space size for this variable
138 pub fn get_size(&self) -> usize {
139 match self {
140 VariableEnum::Rn(var) => var.get_size(),
141 VariableEnum::SE2(var) => var.get_size(),
142 VariableEnum::SE3(var) => var.get_size(),
143 VariableEnum::SO2(var) => var.get_size(),
144 VariableEnum::SO3(var) => var.get_size(),
145 }
146 }
147
148 /// Convert to DVector for use with Factor trait
149 pub fn to_vector(&self) -> DVector<f64> {
150 match self {
151 VariableEnum::Rn(var) => var.value.clone().into(),
152 VariableEnum::SE2(var) => var.value.clone().into(),
153 VariableEnum::SE3(var) => var.value.clone().into(),
154 VariableEnum::SO2(var) => var.value.clone().into(),
155 VariableEnum::SO3(var) => var.value.clone().into(),
156 }
157 }
158
159 /// Apply a tangent space step to update this variable.
160 ///
161 /// This method applies a manifold plus operation: x_new = x ⊞ δx
162 /// where δx is a tangent vector. It supports all manifold types.
163 ///
164 /// # Arguments
165 /// * `step_slice` - View into the full step vector for this variable's DOF
166 ///
167 /// # Implementation Notes
168 /// Uses explicit clone instead of unsafe memory copy (`IntoNalgebra`) for small vectors.
169 /// This is safe and performant for typical manifold dimensions (1-6 DOF).
170 ///
171 pub fn apply_tangent_step(&mut self, step_slice: MatRef<f64>) {
172 match self {
173 VariableEnum::SE3(var) => {
174 // SE3 has 6 DOF in tangent space
175 let mut step_data: Vec<f64> = (0..6).map(|i| step_slice[(i, 0)]).collect();
176
177 // Enforce fixed indices: zero out step components for fixed DOF
178 for &fixed_idx in &var.fixed_indices {
179 if fixed_idx < 6 {
180 step_data[fixed_idx] = 0.0;
181 }
182 }
183
184 let step_dvector = DVector::from_vec(step_data);
185 let tangent = se3::SE3Tangent::from(step_dvector);
186 let new_value = var.plus(&tangent);
187 var.set_value(new_value);
188 }
189 VariableEnum::SE2(var) => {
190 // SE2 has 3 DOF in tangent space
191 let mut step_data: Vec<f64> = (0..3).map(|i| step_slice[(i, 0)]).collect();
192
193 // Enforce fixed indices: zero out step components for fixed DOF
194 for &fixed_idx in &var.fixed_indices {
195 if fixed_idx < 3 {
196 step_data[fixed_idx] = 0.0;
197 }
198 }
199
200 let step_dvector = DVector::from_vec(step_data);
201 let tangent = se2::SE2Tangent::from(step_dvector);
202 let new_value = var.plus(&tangent);
203 var.set_value(new_value);
204 }
205 VariableEnum::SO3(var) => {
206 // SO3 has 3 DOF in tangent space
207 let mut step_data: Vec<f64> = (0..3).map(|i| step_slice[(i, 0)]).collect();
208
209 // Enforce fixed indices: zero out step components for fixed DOF
210 for &fixed_idx in &var.fixed_indices {
211 if fixed_idx < 3 {
212 step_data[fixed_idx] = 0.0;
213 }
214 }
215
216 let step_dvector = DVector::from_vec(step_data);
217 let tangent = so3::SO3Tangent::from(step_dvector);
218 let new_value = var.plus(&tangent);
219 var.set_value(new_value);
220 }
221 VariableEnum::SO2(var) => {
222 // SO2 has 1 DOF in tangent space
223 let mut step_data = step_slice[(0, 0)];
224
225 // Enforce fixed indices: zero out step if index 0 is fixed
226 if var.fixed_indices.contains(&0) {
227 step_data = 0.0;
228 }
229
230 let step_dvector = DVector::from_vec(vec![step_data]);
231 let tangent = so2::SO2Tangent::from(step_dvector);
232 let new_value = var.plus(&tangent);
233 var.set_value(new_value);
234 }
235 VariableEnum::Rn(var) => {
236 // Rn has dynamic size
237 let size = var.get_size();
238 let mut step_data: Vec<f64> = (0..size).map(|i| step_slice[(i, 0)]).collect();
239
240 // Enforce fixed indices: zero out step components for fixed DOF
241 for &fixed_idx in &var.fixed_indices {
242 if fixed_idx < size {
243 step_data[fixed_idx] = 0.0;
244 }
245 }
246
247 let step_dvector = DVector::from_vec(step_data);
248 let tangent = rn::RnTangent::new(step_dvector);
249 let new_value = var.plus(&tangent);
250 var.set_value(new_value);
251 }
252 }
253 }
254
255 /// Get the covariance matrix for this variable (if computed).
256 ///
257 /// Returns `None` if covariance has not been computed.
258 ///
259 /// # Returns
260 /// Reference to the covariance matrix in tangent space
261 pub fn get_covariance(&self) -> Option<&Mat<f64>> {
262 match self {
263 VariableEnum::Rn(var) => var.get_covariance(),
264 VariableEnum::SE2(var) => var.get_covariance(),
265 VariableEnum::SE3(var) => var.get_covariance(),
266 VariableEnum::SO2(var) => var.get_covariance(),
267 VariableEnum::SO3(var) => var.get_covariance(),
268 }
269 }
270
271 /// Set the covariance matrix for this variable.
272 ///
273 /// The covariance matrix should be square with dimension equal to
274 /// the tangent space dimension of this variable.
275 ///
276 /// # Arguments
277 /// * `cov` - Covariance matrix in tangent space
278 pub fn set_covariance(&mut self, cov: Mat<f64>) {
279 match self {
280 VariableEnum::Rn(var) => var.set_covariance(cov),
281 VariableEnum::SE2(var) => var.set_covariance(cov),
282 VariableEnum::SE3(var) => var.set_covariance(cov),
283 VariableEnum::SO2(var) => var.set_covariance(cov),
284 VariableEnum::SO3(var) => var.set_covariance(cov),
285 }
286 }
287
288 /// Clear the covariance matrix for this variable.
289 pub fn clear_covariance(&mut self) {
290 match self {
291 VariableEnum::Rn(var) => var.clear_covariance(),
292 VariableEnum::SE2(var) => var.clear_covariance(),
293 VariableEnum::SE3(var) => var.clear_covariance(),
294 VariableEnum::SO2(var) => var.clear_covariance(),
295 VariableEnum::SO3(var) => var.clear_covariance(),
296 }
297 }
298
299 /// Get the bounds for this variable.
300 ///
301 /// Returns a reference to the bounds map where keys are indices and values are (lower, upper) pairs.
302 pub fn get_bounds(&self) -> &HashMap<usize, (f64, f64)> {
303 match self {
304 VariableEnum::Rn(var) => &var.bounds,
305 VariableEnum::SE2(var) => &var.bounds,
306 VariableEnum::SE3(var) => &var.bounds,
307 VariableEnum::SO2(var) => &var.bounds,
308 VariableEnum::SO3(var) => &var.bounds,
309 }
310 }
311
312 /// Get the fixed indices for this variable.
313 ///
314 /// Returns a reference to the set of indices that should remain fixed during optimization.
315 pub fn get_fixed_indices(&self) -> &HashSet<usize> {
316 match self {
317 VariableEnum::Rn(var) => &var.fixed_indices,
318 VariableEnum::SE2(var) => &var.fixed_indices,
319 VariableEnum::SE3(var) => &var.fixed_indices,
320 VariableEnum::SO2(var) => &var.fixed_indices,
321 VariableEnum::SO3(var) => &var.fixed_indices,
322 }
323 }
324
325 /// Set the value of this variable from a vector representation.
326 ///
327 /// This is used to update the variable after applying constraints (bounds and fixed indices).
328 pub fn set_from_vector(&mut self, vec: &DVector<f64>) {
329 match self {
330 VariableEnum::Rn(var) => {
331 var.set_value(rn::Rn::new(vec.clone()));
332 }
333 VariableEnum::SE2(var) => {
334 let new_se2: se2::SE2 = vec.clone().into();
335 var.set_value(new_se2);
336 }
337 VariableEnum::SE3(var) => {
338 let new_se3: se3::SE3 = vec.clone().into();
339 var.set_value(new_se3);
340 }
341 VariableEnum::SO2(var) => {
342 let new_so2: so2::SO2 = vec.clone().into();
343 var.set_value(new_so2);
344 }
345 VariableEnum::SO3(var) => {
346 let new_so3: so3::SO3 = vec.clone().into();
347 var.set_value(new_so3);
348 }
349 }
350 }
351}
352
353/// The optimization problem definition for factor graph optimization.
354///
355/// Manages residual blocks (factors/constraints), variables, and the sparse Jacobian structure.
356/// Supports mixed manifold types (SE2, SE3, SO2, SO3, Rn) in a single problem and provides
357/// efficient parallel residual/Jacobian computation.
358///
359/// # Architecture
360///
361/// The Problem acts as a container and coordinator:
362/// - Stores all residual blocks (factors with optional loss functions)
363/// - Tracks the global structure (which variables connect to which factors)
364/// - Builds and maintains the sparse Jacobian pattern
365/// - Provides parallel residual/Jacobian evaluation using rayon
366/// - Manages variable constraints (fixed indices, bounds)
367///
368/// # Workflow
369///
370/// 1. **Construction**: Create a new Problem with `Problem::new()`
371/// 2. **Add Factors**: Use `add_residual_block()` to add constraints
372/// 3. **Initialize Variables**: Use `initialize_variables()` with initial values
373/// 4. **Build Sparsity**: Use `build_symbolic_structure()` once before optimization
374/// 5. **Linearize**: Call `compute_residual_and_jacobian_sparse()` each iteration
375/// 6. **Extract Covariance**: Use `compute_and_set_covariances()` after convergence
376///
377/// # Example
378///
379/// ```
380/// use apex_solver::core::problem::Problem;
381/// use apex_solver::factors::BetweenFactorSE2;
382/// use apex_solver::manifold::ManifoldType;
383/// use nalgebra::dvector;
384/// use std::collections::HashMap;
385///
386/// let mut problem = Problem::new();
387///
388/// // Add a between factor
389/// let factor = Box::new(BetweenFactorSE2::new(1.0, 0.0, 0.1));
390/// problem.add_residual_block(&["x0", "x1"], factor, None);
391///
392/// // Initialize variables
393/// let mut initial = HashMap::new();
394/// initial.insert("x0".to_string(), (ManifoldType::SE2, dvector![0.0, 0.0, 0.0]));
395/// initial.insert("x1".to_string(), (ManifoldType::SE2, dvector![1.0, 0.0, 0.1]));
396///
397/// let variables = problem.initialize_variables(&initial);
398/// assert_eq!(variables.len(), 2);
399/// ```
400pub struct Problem {
401 /// Total dimension of the stacked residual vector (sum of all residual block dimensions)
402 pub total_residual_dimension: usize,
403
404 /// Counter for assigning unique IDs to residual blocks
405 residual_id_count: usize,
406
407 /// Map from residual block ID to ResidualBlock instance
408 residual_blocks: HashMap<usize, ResidualBlock>,
409
410 /// Variables with fixed indices (e.g., fix first pose's x,y coordinates)
411 /// Maps variable name -> set of indices to fix
412 pub fixed_variable_indexes: HashMap<String, HashSet<usize>>,
413
414 /// Variable bounds (box constraints on individual DOF)
415 /// Maps variable name -> (index -> (lower_bound, upper_bound))
416 pub variable_bounds: HashMap<String, HashMap<usize, (f64, f64)>>,
417}
418impl Default for Problem {
419 fn default() -> Self {
420 Self::new()
421 }
422}
423
424impl Problem {
425 /// Create a new empty optimization problem.
426 ///
427 /// # Returns
428 ///
429 /// A new `Problem` with no residual blocks or variables
430 ///
431 /// # Example
432 ///
433 /// ```
434 /// use apex_solver::core::problem::Problem;
435 ///
436 /// let problem = Problem::new();
437 /// assert_eq!(problem.num_residual_blocks(), 0);
438 /// assert_eq!(problem.total_residual_dimension, 0);
439 /// ```
440 pub fn new() -> Self {
441 Self {
442 total_residual_dimension: 0,
443 residual_id_count: 0,
444 residual_blocks: HashMap::new(),
445 fixed_variable_indexes: HashMap::new(),
446 variable_bounds: HashMap::new(),
447 }
448 }
449
450 /// Add a residual block (factor with optional loss function) to the problem.
451 ///
452 /// This is the primary method for building the factor graph. Each call adds one constraint
453 /// connecting one or more variables.
454 ///
455 /// # Arguments
456 ///
457 /// * `variable_key_size_list` - Names of the variables this factor connects (order matters)
458 /// * `factor` - The factor implementation that computes residuals and Jacobians
459 /// * `loss_func` - Optional robust loss function for outlier rejection
460 ///
461 /// # Returns
462 ///
463 /// The unique ID assigned to this residual block
464 ///
465 /// # Example
466 ///
467 /// ```
468 /// use apex_solver::core::problem::Problem;
469 /// use apex_solver::factors::{BetweenFactorSE2, PriorFactor};
470 /// use apex_solver::core::loss_functions::HuberLoss;
471 /// use nalgebra::dvector;
472 ///
473 /// let mut problem = Problem::new();
474 ///
475 /// // Add prior factor (unary constraint)
476 /// let prior = Box::new(PriorFactor { data: dvector![0.0, 0.0, 0.0] });
477 /// let id1 = problem.add_residual_block(&["x0"], prior, None);
478 ///
479 /// // Add between factor with robust loss (binary constraint)
480 /// let between = Box::new(BetweenFactorSE2::new(1.0, 0.0, 0.1));
481 /// let loss: Option<Box<dyn apex_solver::core::loss_functions::LossFunction + Send>> =
482 /// Some(Box::new(HuberLoss::new(1.0).unwrap()));
483 /// let id2 = problem.add_residual_block(&["x0", "x1"], between, loss);
484 ///
485 /// assert_eq!(id1, 0);
486 /// assert_eq!(id2, 1);
487 /// assert_eq!(problem.num_residual_blocks(), 2);
488 /// ```
489 pub fn add_residual_block(
490 &mut self,
491 variable_key_size_list: &[&str],
492 factor: Box<dyn Factor + Send>,
493 loss_func: Option<Box<dyn LossFunction + Send>>,
494 ) -> usize {
495 let new_residual_dimension = factor.get_dimension();
496 self.residual_blocks.insert(
497 self.residual_id_count,
498 ResidualBlock::new(
499 self.residual_id_count,
500 self.total_residual_dimension,
501 variable_key_size_list,
502 factor,
503 loss_func,
504 ),
505 );
506 let block_id = self.residual_id_count;
507 self.residual_id_count += 1;
508
509 self.total_residual_dimension += new_residual_dimension;
510
511 block_id
512 }
513
514 pub fn remove_residual_block(&mut self, block_id: usize) -> Option<ResidualBlock> {
515 if let Some(residual_block) = self.residual_blocks.remove(&block_id) {
516 self.total_residual_dimension -= residual_block.factor.get_dimension();
517 Some(residual_block)
518 } else {
519 None
520 }
521 }
522
523 pub fn fix_variable(&mut self, var_to_fix: &str, idx: usize) {
524 if let Some(var_mut) = self.fixed_variable_indexes.get_mut(var_to_fix) {
525 var_mut.insert(idx);
526 } else {
527 self.fixed_variable_indexes
528 .insert(var_to_fix.to_owned(), HashSet::from([idx]));
529 }
530 }
531
532 pub fn unfix_variable(&mut self, var_to_unfix: &str) {
533 self.fixed_variable_indexes.remove(var_to_unfix);
534 }
535
536 pub fn set_variable_bounds(
537 &mut self,
538 var_to_bound: &str,
539 idx: usize,
540 lower_bound: f64,
541 upper_bound: f64,
542 ) {
543 if lower_bound > upper_bound {
544 println!("lower bound is larger than upper bound");
545 } else if let Some(var_mut) = self.variable_bounds.get_mut(var_to_bound) {
546 var_mut.insert(idx, (lower_bound, upper_bound));
547 } else {
548 self.variable_bounds.insert(
549 var_to_bound.to_owned(),
550 HashMap::from([(idx, (lower_bound, upper_bound))]),
551 );
552 }
553 }
554
555 pub fn remove_variable_bounds(&mut self, var_to_unbound: &str) {
556 self.variable_bounds.remove(var_to_unbound);
557 }
558
559 /// Initialize variables from initial values with manifold types.
560 ///
561 /// Converts raw initial values into typed `Variable<M>` instances wrapped in `VariableEnum`.
562 /// This method also applies any fixed indices or bounds that were set via `fix_variable()`
563 /// or `set_variable_bounds()`.
564 ///
565 /// # Arguments
566 ///
567 /// * `initial_values` - Map from variable name to (manifold type, initial value vector)
568 ///
569 /// # Returns
570 ///
571 /// Map from variable name to `VariableEnum` (typed variables ready for optimization)
572 ///
573 /// # Manifold Formats
574 ///
575 /// - **SE2**: `[x, y, theta]` (3 elements)
576 /// - **SE3**: `[tx, ty, tz, qw, qx, qy, qz]` (7 elements)
577 /// - **SO2**: `[theta]` (1 element)
578 /// - **SO3**: `[qw, qx, qy, qz]` (4 elements)
579 /// - **Rn**: `[x1, x2, ..., xn]` (n elements)
580 ///
581 /// # Example
582 ///
583 /// ```
584 /// use apex_solver::core::problem::Problem;
585 /// use apex_solver::manifold::ManifoldType;
586 /// use nalgebra::dvector;
587 /// use std::collections::HashMap;
588 ///
589 /// let problem = Problem::new();
590 ///
591 /// let mut initial = HashMap::new();
592 /// initial.insert("pose0".to_string(), (ManifoldType::SE2, dvector![0.0, 0.0, 0.0]));
593 /// initial.insert("pose1".to_string(), (ManifoldType::SE2, dvector![1.0, 0.0, 0.1]));
594 /// initial.insert("landmark".to_string(), (ManifoldType::RN, dvector![5.0, 3.0]));
595 ///
596 /// let variables = problem.initialize_variables(&initial);
597 /// assert_eq!(variables.len(), 3);
598 /// ```
599 pub fn initialize_variables(
600 &self,
601 initial_values: &HashMap<String, (ManifoldType, DVector<f64>)>,
602 ) -> HashMap<String, VariableEnum> {
603 let variables: HashMap<String, VariableEnum> = initial_values
604 .iter()
605 .map(|(k, v)| {
606 let variable_enum = match v.0 {
607 ManifoldType::SO2 => {
608 let mut var = Variable::new(so2::SO2::from(v.1.clone()));
609 if let Some(indexes) = self.fixed_variable_indexes.get(k) {
610 var.fixed_indices = indexes.clone();
611 }
612 if let Some(bounds) = self.variable_bounds.get(k) {
613 var.bounds = bounds.clone();
614 }
615 VariableEnum::SO2(var)
616 }
617 ManifoldType::SO3 => {
618 let mut var = Variable::new(so3::SO3::from(v.1.clone()));
619 if let Some(indexes) = self.fixed_variable_indexes.get(k) {
620 var.fixed_indices = indexes.clone();
621 }
622 if let Some(bounds) = self.variable_bounds.get(k) {
623 var.bounds = bounds.clone();
624 }
625 VariableEnum::SO3(var)
626 }
627 ManifoldType::SE2 => {
628 let mut var = Variable::new(se2::SE2::from(v.1.clone()));
629 if let Some(indexes) = self.fixed_variable_indexes.get(k) {
630 var.fixed_indices = indexes.clone();
631 }
632 if let Some(bounds) = self.variable_bounds.get(k) {
633 var.bounds = bounds.clone();
634 }
635 VariableEnum::SE2(var)
636 }
637 ManifoldType::SE3 => {
638 let mut var = Variable::new(se3::SE3::from(v.1.clone()));
639 if let Some(indexes) = self.fixed_variable_indexes.get(k) {
640 var.fixed_indices = indexes.clone();
641 }
642 if let Some(bounds) = self.variable_bounds.get(k) {
643 var.bounds = bounds.clone();
644 }
645 VariableEnum::SE3(var)
646 }
647 ManifoldType::RN => {
648 let mut var = Variable::new(rn::Rn::from(v.1.clone()));
649 if let Some(indexes) = self.fixed_variable_indexes.get(k) {
650 var.fixed_indices = indexes.clone();
651 }
652 if let Some(bounds) = self.variable_bounds.get(k) {
653 var.bounds = bounds.clone();
654 }
655 VariableEnum::Rn(var)
656 }
657 };
658
659 (k.to_owned(), variable_enum)
660 })
661 .collect();
662 variables
663 }
664
665 /// Get the number of residual blocks
666 pub fn num_residual_blocks(&self) -> usize {
667 self.residual_blocks.len()
668 }
669
670 /// Build symbolic structure for sparse Jacobian computation
671 ///
672 /// This method constructs the sparsity pattern of the Jacobian matrix before numerical
673 /// computation. It determines which entries in the Jacobian will be non-zero based on
674 /// the structure of the optimization problem (which residual blocks connect which variables).
675 ///
676 /// # Purpose
677 /// - Pre-allocates memory for sparse matrix operations
678 /// - Enables efficient sparse linear algebra (avoiding dense operations)
679 /// - Computed once at the beginning, used throughout optimization
680 ///
681 /// # Arguments
682 /// * `variables` - Map of variable names to their values and properties (SE2, SE3, etc.)
683 /// * `variable_index_sparce_matrix` - Map from variable name to starting column index in Jacobian
684 /// * `total_dof` - Total degrees of freedom (number of columns in Jacobian)
685 ///
686 /// # Returns
687 /// A `SymbolicStructure` containing:
688 /// - `pattern`: The symbolic sparse column matrix structure (row/col indices of non-zeros)
689 /// - `order`: An ordering/permutation for efficient numerical computation
690 ///
691 /// # Algorithm
692 /// For each residual block:
693 /// 1. Identify which variables it depends on
694 /// 2. For each (residual_dimension × variable_dof) block, mark entries as non-zero
695 /// 3. Convert to optimized sparse matrix representation
696 ///
697 /// # Example Structure
698 /// For a simple problem with 3 SE2 poses (9 DOF total):
699 /// - Between(x0, x1): Creates 3×6 block at rows 0-2, cols 0-5
700 /// - Between(x1, x2): Creates 3×6 block at rows 3-5, cols 3-8
701 /// - Prior(x0): Creates 3×3 block at rows 6-8, cols 0-2
702 ///
703 /// Result: 9×9 sparse Jacobian with 45 non-zero entries
704 pub fn build_symbolic_structure(
705 &self,
706 variables: &HashMap<String, VariableEnum>,
707 variable_index_sparce_matrix: &HashMap<String, usize>,
708 total_dof: usize,
709 ) -> ApexResult<SymbolicStructure> {
710 // Vector to accumulate all (row, col) pairs that will be non-zero in the Jacobian
711 // Each Pair represents one entry in the sparse matrix
712 let mut indices = Vec::<Pair<usize, usize>>::new();
713
714 // Iterate through all residual blocks (factors/constraints) in the problem
715 // Each residual block contributes a block of entries to the Jacobian
716 self.residual_blocks.iter().for_each(|(_, residual_block)| {
717 // Create local indexing for this residual block's variables
718 // Maps each variable to its local starting index and size within this factor
719 // Example: For Between(x0, x1) with SE2: [(0, 3), (3, 3)]
720 // - x0 starts at local index 0, has 3 DOF
721 // - x1 starts at local index 3, has 3 DOF
722 let mut variable_local_idx_size_list = Vec::<(usize, usize)>::new();
723 let mut count_variable_local_idx: usize = 0;
724
725 // Build the local index mapping for this residual block
726 for var_key in &residual_block.variable_key_list {
727 if let Some(variable) = variables.get(var_key) {
728 // Store (local_start_index, dof_size) for this variable
729 variable_local_idx_size_list
730 .push((count_variable_local_idx, variable.get_size()));
731 count_variable_local_idx += variable.get_size();
732 }
733 }
734
735 // For each variable in this residual block, generate Jacobian entries
736 for (i, var_key) in residual_block.variable_key_list.iter().enumerate() {
737 if let Some(variable_global_idx) = variable_index_sparce_matrix.get(var_key) {
738 // Get the DOF size for this variable
739 let (_, var_size) = variable_local_idx_size_list[i];
740
741 // Generate all (row, col) pairs for the Jacobian block:
742 // ∂(residual) / ∂(variable)
743 //
744 // For a residual block with dimension R and variable with DOF V:
745 // Creates R × V entries in the Jacobian
746
747 // Iterate over each residual dimension (rows)
748 for row_idx in 0..residual_block.factor.get_dimension() {
749 // Iterate over each variable DOF (columns)
750 for col_idx in 0..var_size {
751 // Compute global row index:
752 // Start from this residual block's first row, add offset
753 let global_row_idx = residual_block.residual_row_start_idx + row_idx;
754
755 // Compute global column index:
756 // Start from this variable's first column, add offset
757 let global_col_idx = variable_global_idx + col_idx;
758
759 // Record this (row, col) pair as a non-zero entry
760 indices.push(Pair::new(global_row_idx, global_col_idx));
761 }
762 }
763 }
764 }
765 });
766
767 // Convert the list of (row, col) pairs into an optimized symbolic sparse matrix
768 // This performs:
769 // 1. Duplicate elimination (same entry might be referenced multiple times)
770 // 2. Sorting for column-wise storage format
771 // 3. Computing a fill-reducing ordering for numerical stability
772 // 4. Allocating the symbolic structure (no values yet, just pattern)
773 let (pattern, order) = SymbolicSparseColMat::try_new_from_indices(
774 self.total_residual_dimension, // Number of rows (total residual dimension)
775 total_dof, // Number of columns (total DOF)
776 &indices, // List of non-zero entry locations
777 )
778 .map_err(|_| {
779 ApexError::MatrixOperation(
780 "Failed to build symbolic sparse matrix structure".to_string(),
781 )
782 })?;
783
784 // Return the symbolic structure that will be filled with numerical values later
785 Ok(SymbolicStructure { pattern, order })
786 }
787
788 /// Compute only the residual vector for the current variable values.
789 ///
790 /// This is an optimized version that skips Jacobian computation when only the cost
791 /// function value is needed (e.g., during initialization or step evaluation).
792 ///
793 /// # Arguments
794 ///
795 /// * `variables` - Current variable values (from `initialize_variables()` or updated)
796 ///
797 /// # Returns
798 ///
799 /// Residual vector as N×1 column matrix (N = total residual dimension)
800 ///
801 /// # Performance
802 ///
803 /// Approximately **2x faster** than `compute_residual_and_jacobian_sparse()` since it:
804 /// - Skips Jacobian computation for each residual block
805 /// - Avoids Jacobian matrix assembly and storage
806 /// - Only parallelizes residual evaluation
807 ///
808 /// # When to Use
809 ///
810 /// - **Initial cost computation**: When setting up optimization state
811 /// - **Step evaluation**: When computing new cost after applying parameter updates
812 /// - **Cost-only queries**: When you don't need gradients
813 ///
814 /// Use `compute_residual_and_jacobian_sparse()` when you need both residual and Jacobian
815 /// (e.g., in the main optimization iteration loop for linearization).
816 ///
817 /// # Example
818 ///
819 /// ```rust,ignore
820 /// // Initial cost evaluation (no Jacobian needed)
821 /// let residual = problem.compute_residual_sparse(&variables)?;
822 /// let initial_cost = residual.norm_l2() * residual.norm_l2();
823 /// ```
824 pub fn compute_residual_sparse(
825 &self,
826 variables: &HashMap<String, VariableEnum>,
827 ) -> ApexResult<Mat<f64>> {
828 let total_residual = Arc::new(Mutex::new(Col::<f64>::zeros(self.total_residual_dimension)));
829
830 // Compute residuals in parallel (no Jacobian needed)
831 let result: Result<Vec<()>, ApexError> = self
832 .residual_blocks
833 .par_iter()
834 .map(|(_, residual_block)| {
835 self.compute_residual_block(residual_block, variables, &total_residual)
836 })
837 .collect();
838
839 result?;
840
841 let total_residual = Arc::try_unwrap(total_residual)
842 .map_err(|_| {
843 ApexError::ThreadError("Failed to unwrap Arc for total residual".to_string())
844 })?
845 .into_inner()
846 .map_err(|_| {
847 ApexError::ThreadError(
848 "Failed to extract mutex inner value for total residual".to_string(),
849 )
850 })?;
851
852 // Convert faer Col to Mat (column vector as n×1 matrix)
853 let residual_faer = total_residual.as_ref().as_mat().to_owned();
854 Ok(residual_faer)
855 }
856
857 /// Compute residual vector and sparse Jacobian matrix for the current variable values.
858 ///
859 /// This is the core linearization method called during each optimization iteration. It:
860 /// 1. Evaluates all residual blocks in parallel using rayon
861 /// 2. Assembles the full residual vector
862 /// 3. Constructs the sparse Jacobian matrix using the precomputed symbolic structure
863 ///
864 /// # Arguments
865 ///
866 /// * `variables` - Current variable values (from `initialize_variables()` or updated)
867 /// * `variable_index_sparce_matrix` - Map from variable name to starting column in Jacobian
868 /// * `symbolic_structure` - Precomputed sparsity pattern (from `build_symbolic_structure()`)
869 ///
870 /// # Returns
871 ///
872 /// Tuple `(residual, jacobian)` where:
873 /// - `residual`: N×1 column matrix (total residual dimension)
874 /// - `jacobian`: N×M sparse matrix (N = residual dim, M = total DOF)
875 ///
876 /// # Performance
877 ///
878 /// This method is highly optimized:
879 /// - **Parallel evaluation**: Each residual block is evaluated independently using rayon
880 /// - **Sparse storage**: Only non-zero Jacobian entries are stored and computed
881 /// - **Memory efficient**: Preallocated sparse structure avoids dynamic allocations
882 ///
883 /// Typically accounts for 40-60% of total optimization time (including sparse matrix ops).
884 ///
885 /// # When to Use
886 ///
887 /// Use this method in the main optimization loop when you need both residual and Jacobian
888 /// for linearization. For cost-only evaluation, use `compute_residual_sparse()` instead.
889 ///
890 /// # Example
891 ///
892 /// ```rust,ignore
893 /// // Inside optimizer loop:
894 /// let (residual, jacobian) = problem.compute_residual_and_jacobian_sparse(
895 /// &variables,
896 /// &variable_index_map,
897 /// &symbolic_structure,
898 /// )?;
899 ///
900 /// // Use for linear system: J^T J dx = -J^T r
901 /// ```
902 pub fn compute_residual_and_jacobian_sparse(
903 &self,
904 variables: &HashMap<String, VariableEnum>,
905 variable_index_sparce_matrix: &HashMap<String, usize>,
906 symbolic_structure: &SymbolicStructure,
907 ) -> ApexResult<(Mat<f64>, SparseColMat<usize, f64>)> {
908 let total_residual = Arc::new(Mutex::new(Col::<f64>::zeros(self.total_residual_dimension)));
909
910 let jacobian_values: Result<Vec<Vec<f64>>, ApexError> = self
911 .residual_blocks
912 .par_iter()
913 .map(|(_, residual_block)| {
914 self.compute_residual_and_jacobian_block(
915 residual_block,
916 variables,
917 variable_index_sparce_matrix,
918 &total_residual,
919 )
920 })
921 .collect();
922
923 let jacobian_values: Vec<f64> = jacobian_values?.into_iter().flatten().collect();
924
925 let total_residual = Arc::try_unwrap(total_residual)
926 .map_err(|_| {
927 ApexError::ThreadError("Failed to unwrap Arc for total residual".to_string())
928 })?
929 .into_inner()
930 .map_err(|_| {
931 ApexError::ThreadError(
932 "Failed to extract mutex inner value for total residual".to_string(),
933 )
934 })?;
935
936 // Convert faer Col to Mat (column vector as n×1 matrix)
937 let residual_faer = total_residual.as_ref().as_mat().to_owned();
938 let jacobian_sparse = SparseColMat::new_from_argsort(
939 symbolic_structure.pattern.clone(),
940 &symbolic_structure.order,
941 jacobian_values.as_slice(),
942 )
943 .map_err(|_| {
944 ApexError::MatrixOperation("Failed to create sparse Jacobian from argsort".to_string())
945 })?;
946
947 Ok((residual_faer, jacobian_sparse))
948 }
949
950 /// Compute only the residual for a single residual block (no Jacobian).
951 ///
952 /// Helper method for `compute_residual_sparse()`.
953 fn compute_residual_block(
954 &self,
955 residual_block: &ResidualBlock,
956 variables: &HashMap<String, VariableEnum>,
957 total_residual: &Arc<Mutex<Col<f64>>>,
958 ) -> ApexResult<()> {
959 let mut param_vectors: Vec<DVector<f64>> = Vec::new();
960
961 for var_key in &residual_block.variable_key_list {
962 if let Some(variable) = variables.get(var_key) {
963 param_vectors.push(variable.to_vector());
964 }
965 }
966
967 // Compute only residual (linearize still computes Jacobian internally,
968 // but we don't extract/store it)
969 let (mut res, _) = residual_block.factor.linearize(¶m_vectors, false);
970
971 // Apply loss function if present (critical for robust optimization)
972 if let Some(loss_func) = &residual_block.loss_func {
973 let squared_norm = res.dot(&res);
974 let corrector = Corrector::new(loss_func.as_ref(), squared_norm);
975 corrector.correct_residuals(&mut res);
976 }
977
978 let mut total_residual = total_residual.lock().map_err(|_| {
979 ApexError::ThreadError("Failed to acquire lock on total residual".to_string())
980 })?;
981
982 // Copy residual values from nalgebra DVector to faer Col
983 let start_idx = residual_block.residual_row_start_idx;
984 let dim = residual_block.factor.get_dimension();
985 let mut total_residual_mut = total_residual.as_mut();
986 for i in 0..dim {
987 total_residual_mut[start_idx + i] = res[i];
988 }
989
990 Ok(())
991 }
992
993 /// Compute residual and Jacobian for a single residual block.
994 ///
995 /// Helper method for `compute_residual_and_jacobian_sparse()`.
996 fn compute_residual_and_jacobian_block(
997 &self,
998 residual_block: &ResidualBlock,
999 variables: &HashMap<String, VariableEnum>,
1000 variable_index_sparce_matrix: &HashMap<String, usize>,
1001 total_residual: &Arc<Mutex<Col<f64>>>,
1002 ) -> ApexResult<Vec<f64>> {
1003 let mut param_vectors: Vec<DVector<f64>> = Vec::new();
1004 let mut var_sizes: Vec<usize> = Vec::new();
1005 let mut variable_local_idx_size_list = Vec::<(usize, usize)>::new();
1006 let mut count_variable_local_idx: usize = 0;
1007
1008 for var_key in &residual_block.variable_key_list {
1009 if let Some(variable) = variables.get(var_key) {
1010 param_vectors.push(variable.to_vector());
1011 let var_size = variable.get_size();
1012 var_sizes.push(var_size);
1013 variable_local_idx_size_list.push((count_variable_local_idx, var_size));
1014 count_variable_local_idx += var_size;
1015 }
1016 }
1017
1018 let (mut res, jac_opt) = residual_block.factor.linearize(¶m_vectors, true);
1019 let mut jac = jac_opt.expect("Jacobian should be computed when compute_jacobian=true");
1020
1021 // Apply loss function if present (critical for robust optimization)
1022 if let Some(loss_func) = &residual_block.loss_func {
1023 let squared_norm = res.dot(&res);
1024 let corrector = Corrector::new(loss_func.as_ref(), squared_norm);
1025 corrector.correct_jacobian(&res, &mut jac);
1026 corrector.correct_residuals(&mut res);
1027 }
1028
1029 // Update total residual
1030 {
1031 let mut total_residual = total_residual.lock().map_err(|_| {
1032 ApexError::ThreadError("Failed to acquire lock on total residual".to_string())
1033 })?;
1034
1035 // Copy residual values from nalgebra DVector to faer Col
1036 let start_idx = residual_block.residual_row_start_idx;
1037 let dim = residual_block.factor.get_dimension();
1038 let mut total_residual_mut = total_residual.as_mut();
1039 for i in 0..dim {
1040 total_residual_mut[start_idx + i] = res[i];
1041 }
1042 }
1043
1044 // Extract Jacobian values in the correct order
1045 let mut local_jacobian_values = Vec::new();
1046 for (i, var_key) in residual_block.variable_key_list.iter().enumerate() {
1047 if variable_index_sparce_matrix.contains_key(var_key) {
1048 let (variable_local_idx, var_size) = variable_local_idx_size_list[i];
1049 let variable_jac = jac.view((0, variable_local_idx), (jac.shape().0, var_size));
1050
1051 for row_idx in 0..jac.shape().0 {
1052 for col_idx in 0..var_size {
1053 local_jacobian_values.push(variable_jac[(row_idx, col_idx)]);
1054 }
1055 }
1056 } else {
1057 return Err(ApexError::InvalidInput(format!(
1058 "Missing key {} in variable-to-column-index mapping",
1059 var_key
1060 )));
1061 }
1062 }
1063
1064 Ok(local_jacobian_values)
1065 }
1066
1067 /// Log residual vector to a text file
1068 pub fn log_residual_to_file(
1069 &self,
1070 residual: &nalgebra::DVector<f64>,
1071 filename: &str,
1072 ) -> Result<(), Error> {
1073 let mut file = File::create(filename)?;
1074 writeln!(file, "# Residual vector - {} elements", residual.len())?;
1075 for (i, &value) in residual.iter().enumerate() {
1076 writeln!(file, "{}: {:.12}", i, value)?;
1077 }
1078 Ok(())
1079 }
1080
1081 /// Log sparse Jacobian matrix to a text file
1082 pub fn log_sparse_jacobian_to_file(
1083 &self,
1084 jacobian: &SparseColMat<usize, f64>,
1085 filename: &str,
1086 ) -> Result<(), Error> {
1087 let mut file = File::create(filename)?;
1088 writeln!(
1089 file,
1090 "# Sparse Jacobian matrix - {} x {} ({} non-zeros)",
1091 jacobian.nrows(),
1092 jacobian.ncols(),
1093 jacobian.compute_nnz()
1094 )?;
1095 writeln!(file, "# Matrix saved as dimensions and non-zero count only")?;
1096 writeln!(file, "# For detailed access, convert to dense matrix first")?;
1097 Ok(())
1098 }
1099
1100 /// Log variables to a text file
1101 pub fn log_variables_to_file(
1102 &self,
1103 variables: &HashMap<String, VariableEnum>,
1104 filename: &str,
1105 ) -> Result<(), Error> {
1106 let mut file = File::create(filename)?;
1107 writeln!(file, "# Variables - {} total", variables.len())?;
1108 writeln!(file, "# Format: variable_name: [values...]")?;
1109
1110 let mut sorted_vars: Vec<_> = variables.keys().collect();
1111 sorted_vars.sort();
1112
1113 for var_name in sorted_vars {
1114 let var_vector = variables[var_name].to_vector();
1115 write!(file, "{}: [", var_name)?;
1116 for (i, &value) in var_vector.iter().enumerate() {
1117 write!(file, "{:.12}", value)?;
1118 if i < var_vector.len() - 1 {
1119 write!(file, ", ")?;
1120 }
1121 }
1122 writeln!(file, "]")?;
1123 }
1124 Ok(())
1125 }
1126
1127 /// Compute per-variable covariances and set them in Variable objects
1128 ///
1129 /// This method computes the full covariance matrix by inverting the Hessian
1130 /// from the linear solver, then extracts per-variable covariance blocks and
1131 /// stores them in the corresponding Variable objects.
1132 ///
1133 /// # Arguments
1134 /// * `linear_solver` - Mutable reference to the linear solver containing the cached Hessian
1135 /// * `variables` - Mutable map of variables where covariances will be stored
1136 /// * `variable_index_map` - Map from variable names to their starting column indices
1137 ///
1138 /// # Returns
1139 /// `Some(HashMap)` containing per-variable covariance matrices if successful, `None` otherwise
1140 ///
1141 pub fn compute_and_set_covariances(
1142 &self,
1143 linear_solver: &mut Box<dyn SparseLinearSolver>,
1144 variables: &mut HashMap<String, VariableEnum>,
1145 variable_index_map: &HashMap<String, usize>,
1146 ) -> Option<HashMap<String, Mat<f64>>> {
1147 // Compute the full covariance matrix (H^{-1}) using the linear solver
1148 linear_solver.compute_covariance_matrix()?;
1149 let full_cov = linear_solver.get_covariance_matrix()?.clone();
1150
1151 // Extract per-variable covariance blocks from the full matrix
1152 let per_var_covariances =
1153 extract_variable_covariances(&full_cov, variables, variable_index_map);
1154
1155 // Set covariances in Variable objects for easy access
1156 for (var_name, cov) in &per_var_covariances {
1157 if let Some(var) = variables.get_mut(var_name) {
1158 var.set_covariance(cov.clone());
1159 }
1160 }
1161
1162 Some(per_var_covariances)
1163 }
1164}
1165
1166#[cfg(test)]
1167mod tests {
1168 use super::*;
1169 use crate::core::loss_functions::HuberLoss;
1170 use crate::factors::{BetweenFactorSE2, BetweenFactorSE3, PriorFactor};
1171 use crate::manifold::{ManifoldType, se3::SE3};
1172 use nalgebra::{Quaternion, Vector3, dvector};
1173 use std::collections::HashMap;
1174
1175 /// Create a test SE2 dataset with 10 vertices in a loop
1176 fn create_se2_test_problem() -> (
1177 Problem,
1178 HashMap<String, (ManifoldType, nalgebra::DVector<f64>)>,
1179 ) {
1180 let mut problem = Problem::new();
1181 let mut initial_values = HashMap::new();
1182
1183 // Create 10 SE2 poses in a rough circle pattern
1184 let poses = vec![
1185 (0.0, 0.0, 0.0), // x0: origin
1186 (1.0, 0.0, 0.1), // x1: move right
1187 (1.5, 1.0, 0.5), // x2: move up-right
1188 (1.0, 2.0, 1.0), // x3: move up
1189 (0.0, 2.5, 1.5), // x4: move up-left
1190 (-1.0, 2.0, 2.0), // x5: move left
1191 (-1.5, 1.0, 2.5), // x6: move down-left
1192 (-1.0, 0.0, 3.0), // x7: move down
1193 (-0.5, -0.5, -2.8), // x8: move down-right
1194 (0.5, -0.5, -2.3), // x9: back towards origin
1195 ];
1196
1197 // Add vertices using [x, y, theta] ordering
1198 for (i, (x, y, theta)) in poses.iter().enumerate() {
1199 let var_name = format!("x{}", i);
1200 let se2_data = dvector![*x, *y, *theta];
1201 initial_values.insert(var_name, (ManifoldType::SE2, se2_data));
1202 }
1203
1204 // Add chain of between factors
1205 for i in 0..9 {
1206 let from_pose = poses[i];
1207 let to_pose = poses[i + 1];
1208
1209 // Compute relative transformation
1210 let dx = to_pose.0 - from_pose.0;
1211 let dy = to_pose.1 - from_pose.1;
1212 let dtheta = to_pose.2 - from_pose.2;
1213
1214 let between_factor = BetweenFactorSE2::new(dx, dy, dtheta);
1215 problem.add_residual_block(
1216 &[&format!("x{}", i), &format!("x{}", i + 1)],
1217 Box::new(between_factor),
1218 Some(Box::new(HuberLoss::new(1.0).unwrap())),
1219 );
1220 }
1221
1222 // Add loop closure from x9 back to x0
1223 let dx = poses[0].0 - poses[9].0;
1224 let dy = poses[0].1 - poses[9].1;
1225 let dtheta = poses[0].2 - poses[9].2;
1226
1227 let loop_closure = BetweenFactorSE2::new(dx, dy, dtheta);
1228 problem.add_residual_block(
1229 &["x9", "x0"],
1230 Box::new(loop_closure),
1231 Some(Box::new(HuberLoss::new(1.0).unwrap())),
1232 );
1233
1234 // Add prior factor for x0
1235 let prior_factor = PriorFactor {
1236 data: dvector![0.0, 0.0, 0.0],
1237 };
1238 problem.add_residual_block(&["x0"], Box::new(prior_factor), None);
1239
1240 (problem, initial_values)
1241 }
1242
1243 /// Create a test SE3 dataset with 8 vertices in a 3D pattern
1244 fn create_se3_test_problem() -> (Problem, HashMap<String, (ManifoldType, DVector<f64>)>) {
1245 let mut problem = Problem::new();
1246 let mut initial_values = HashMap::new();
1247
1248 // Create 8 SE3 poses in a rough 3D cube pattern
1249 let poses = [
1250 // Bottom face of cube
1251 (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0), // x0: origin
1252 (1.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.995), // x1: +X
1253 (1.0, 1.0, 0.0, 0.0, 0.0, 0.2, 0.98), // x2: +X+Y
1254 (0.0, 1.0, 0.0, 0.0, 0.0, 0.3, 0.955), // x3: +Y
1255 // Top face of cube
1256 (0.0, 0.0, 1.0, 0.1, 0.0, 0.0, 0.995), // x4: +Z
1257 (1.0, 0.0, 1.0, 0.1, 0.0, 0.1, 0.99), // x5: +X+Z
1258 (1.0, 1.0, 1.0, 0.1, 0.0, 0.2, 0.975), // x6: +X+Y+Z
1259 (0.0, 1.0, 1.0, 0.1, 0.0, 0.3, 0.95), // x7: +Y+Z
1260 ];
1261
1262 // Add vertices using [tx, ty, tz, qw, qx, qy, qz] ordering
1263 for (i, (tx, ty, tz, qx, qy, qz, qw)) in poses.iter().enumerate() {
1264 let var_name = format!("x{}", i);
1265 let se3_data = dvector![*tx, *ty, *tz, *qw, *qx, *qy, *qz];
1266 initial_values.insert(var_name, (ManifoldType::SE3, se3_data));
1267 }
1268
1269 // Add between factors connecting the cube edges
1270 let edges = vec![
1271 (0, 1),
1272 (1, 2),
1273 (2, 3),
1274 (3, 0), // Bottom face
1275 (4, 5),
1276 (5, 6),
1277 (6, 7),
1278 (7, 4), // Top face
1279 (0, 4),
1280 (1, 5),
1281 (2, 6),
1282 (3, 7), // Vertical edges
1283 ];
1284
1285 for (from_idx, to_idx) in edges {
1286 let from_pose = poses[from_idx];
1287 let to_pose = poses[to_idx];
1288
1289 // Create a simple relative transformation (simplified for testing)
1290 let relative_se3 = SE3::from_translation_quaternion(
1291 Vector3::new(
1292 to_pose.0 - from_pose.0, // dx
1293 to_pose.1 - from_pose.1, // dy
1294 to_pose.2 - from_pose.2, // dz
1295 ),
1296 Quaternion::new(1.0, 0.0, 0.0, 0.0), // identity quaternion
1297 );
1298
1299 let between_factor = BetweenFactorSE3::new(relative_se3);
1300 problem.add_residual_block(
1301 &[&format!("x{}", from_idx), &format!("x{}", to_idx)],
1302 Box::new(between_factor),
1303 Some(Box::new(HuberLoss::new(1.0).unwrap())),
1304 );
1305 }
1306
1307 // Add prior factor for x0
1308 let prior_factor = PriorFactor {
1309 data: dvector![0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
1310 };
1311 problem.add_residual_block(&["x0"], Box::new(prior_factor), None);
1312
1313 (problem, initial_values)
1314 }
1315
1316 #[test]
1317 fn test_problem_construction_se2() {
1318 let (problem, initial_values) = create_se2_test_problem();
1319
1320 // Test basic problem properties
1321 assert_eq!(problem.num_residual_blocks(), 11); // 9 between + 1 loop closure + 1 prior
1322 assert_eq!(problem.total_residual_dimension, 33); // 11 * 3
1323 assert_eq!(initial_values.len(), 10);
1324
1325 println!("SE2 Problem construction test passed");
1326 println!("Residual blocks: {}", problem.num_residual_blocks());
1327 println!("Total residual dim: {}", problem.total_residual_dimension);
1328 println!("Variables: {}", initial_values.len());
1329 }
1330
1331 #[test]
1332 fn test_problem_construction_se3() {
1333 let (problem, initial_values) = create_se3_test_problem();
1334
1335 // Test basic problem properties
1336 assert_eq!(problem.num_residual_blocks(), 13); // 12 between + 1 prior
1337 assert_eq!(problem.total_residual_dimension, 79); // 12 * 6 + 1 * 7 (SE3 between factors are 6-dim, prior factor is 7-dim)
1338 assert_eq!(initial_values.len(), 8);
1339
1340 println!("SE3 Problem construction test passed");
1341 println!("Residual blocks: {}", problem.num_residual_blocks());
1342 println!("Total residual dim: {}", problem.total_residual_dimension);
1343 println!("Variables: {}", initial_values.len());
1344 }
1345
1346 #[test]
1347 fn test_variable_initialization_se2() {
1348 let (problem, initial_values) = create_se2_test_problem();
1349
1350 // Test variable initialization
1351 let variables = problem.initialize_variables(&initial_values);
1352 assert_eq!(variables.len(), 10);
1353
1354 // Test variable sizes
1355 for (name, var) in &variables {
1356 assert_eq!(
1357 var.get_size(),
1358 3,
1359 "SE2 variable {} should have size 3",
1360 name
1361 );
1362 }
1363
1364 // Test conversion to DVector
1365 for (name, var) in &variables {
1366 let vec = var.to_vector();
1367 assert_eq!(
1368 vec.len(),
1369 3,
1370 "SE2 variable {} vector should have length 3",
1371 name
1372 );
1373 }
1374
1375 println!("SE2 Variable initialization test passed");
1376 println!("Variables created: {}", variables.len());
1377 }
1378
1379 #[test]
1380 fn test_variable_initialization_se3() {
1381 let (problem, initial_values) = create_se3_test_problem();
1382
1383 // Test variable initialization
1384 let variables = problem.initialize_variables(&initial_values);
1385 assert_eq!(variables.len(), 8);
1386
1387 // Test variable sizes
1388 for (name, var) in &variables {
1389 assert_eq!(
1390 var.get_size(),
1391 6,
1392 "SE3 variable {} should have size 6 (DOF)",
1393 name
1394 );
1395 }
1396
1397 // Test conversion to DVector
1398 for (name, var) in &variables {
1399 let vec = var.to_vector();
1400 assert_eq!(
1401 vec.len(),
1402 7,
1403 "SE3 variable {} vector should have length 7",
1404 name
1405 );
1406 }
1407
1408 println!("SE3 Variable initialization test passed");
1409 println!("Variables created: {}", variables.len());
1410 }
1411
1412 #[test]
1413 fn test_column_mapping_se2() {
1414 let (problem, initial_values) = create_se2_test_problem();
1415 let variables = problem.initialize_variables(&initial_values);
1416
1417 // Create column mapping for variables
1418 let mut variable_index_sparce_matrix = HashMap::new();
1419 let mut col_offset = 0;
1420 let mut sorted_vars: Vec<_> = variables.keys().collect();
1421 sorted_vars.sort(); // Ensure consistent ordering
1422
1423 for var_name in sorted_vars {
1424 variable_index_sparce_matrix.insert(var_name.clone(), col_offset);
1425 col_offset += variables[var_name].get_size();
1426 }
1427
1428 // Test total degrees of freedom
1429 let total_dof: usize = variables.values().map(|v| v.get_size()).sum();
1430 assert_eq!(total_dof, 30); // 10 variables * 3 DOF each
1431 assert_eq!(col_offset, 30);
1432
1433 // Test each variable has correct column mapping
1434 for (var_name, &col_idx) in &variable_index_sparce_matrix {
1435 assert!(
1436 col_idx < total_dof,
1437 "Column index {} for {} should be < {}",
1438 col_idx,
1439 var_name,
1440 total_dof
1441 );
1442 }
1443
1444 println!("SE2 Column mapping test passed");
1445 println!("Total DOF: {}", total_dof);
1446 println!("Variable mappings: {}", variable_index_sparce_matrix.len());
1447 }
1448
1449 #[test]
1450 fn test_symbolic_structure_se2() {
1451 let (problem, initial_values) = create_se2_test_problem();
1452 let variables = problem.initialize_variables(&initial_values);
1453
1454 // Create column mapping
1455 let mut variable_index_sparce_matrix = HashMap::new();
1456 let mut col_offset = 0;
1457 let mut sorted_vars: Vec<_> = variables.keys().collect();
1458 sorted_vars.sort();
1459
1460 for var_name in sorted_vars {
1461 variable_index_sparce_matrix.insert(var_name.clone(), col_offset);
1462 col_offset += variables[var_name].get_size();
1463 }
1464
1465 // Build symbolic structure
1466 let symbolic_structure = problem
1467 .build_symbolic_structure(&variables, &variable_index_sparce_matrix, col_offset)
1468 .unwrap();
1469
1470 // Test symbolic structure dimensions
1471 assert_eq!(
1472 symbolic_structure.pattern.nrows(),
1473 problem.total_residual_dimension
1474 );
1475 assert_eq!(symbolic_structure.pattern.ncols(), 30); // total DOF
1476
1477 println!("SE2 Symbolic structure test passed");
1478 println!(
1479 "Symbolic matrix: {} x {}",
1480 symbolic_structure.pattern.nrows(),
1481 symbolic_structure.pattern.ncols()
1482 );
1483 }
1484
1485 #[test]
1486 fn test_residual_jacobian_computation_se2() {
1487 let (problem, initial_values) = create_se2_test_problem();
1488 let variables = problem.initialize_variables(&initial_values);
1489
1490 // Create column mapping
1491 let mut variable_index_sparce_matrix = HashMap::new();
1492 let mut col_offset = 0;
1493 let mut sorted_vars: Vec<_> = variables.keys().collect();
1494 sorted_vars.sort();
1495
1496 for var_name in sorted_vars {
1497 variable_index_sparce_matrix.insert(var_name.clone(), col_offset);
1498 col_offset += variables[var_name].get_size();
1499 }
1500
1501 // Test sparse computation
1502 let symbolic_structure = problem
1503 .build_symbolic_structure(&variables, &variable_index_sparce_matrix, col_offset)
1504 .unwrap();
1505 let (residual_sparse, jacobian_sparse) = problem
1506 .compute_residual_and_jacobian_sparse(
1507 &variables,
1508 &variable_index_sparce_matrix,
1509 &symbolic_structure,
1510 )
1511 .unwrap();
1512
1513 // Test sparse dimensions
1514 assert_eq!(residual_sparse.nrows(), problem.total_residual_dimension);
1515 assert_eq!(jacobian_sparse.nrows(), problem.total_residual_dimension);
1516 assert_eq!(jacobian_sparse.ncols(), 30);
1517
1518 println!("SE2 Residual/Jacobian computation test passed");
1519 println!("Residual dimensions: {}", residual_sparse.nrows());
1520 println!(
1521 "Jacobian dimensions: {} x {}",
1522 jacobian_sparse.nrows(),
1523 jacobian_sparse.ncols()
1524 );
1525 }
1526
1527 #[test]
1528 fn test_residual_jacobian_computation_se3() {
1529 let (problem, initial_values) = create_se3_test_problem();
1530 let variables = problem.initialize_variables(&initial_values);
1531
1532 // Create column mapping
1533 let mut variable_index_sparce_matrix = HashMap::new();
1534 let mut col_offset = 0;
1535 let mut sorted_vars: Vec<_> = variables.keys().collect();
1536 sorted_vars.sort();
1537
1538 for var_name in sorted_vars {
1539 variable_index_sparce_matrix.insert(var_name.clone(), col_offset);
1540 col_offset += variables[var_name].get_size();
1541 }
1542
1543 // Test sparse computation
1544 let symbolic_structure = problem
1545 .build_symbolic_structure(&variables, &variable_index_sparce_matrix, col_offset)
1546 .unwrap();
1547 let (residual_sparse, jacobian_sparse) = problem
1548 .compute_residual_and_jacobian_sparse(
1549 &variables,
1550 &variable_index_sparce_matrix,
1551 &symbolic_structure,
1552 )
1553 .unwrap();
1554
1555 // Test sparse dimensions match
1556 assert_eq!(residual_sparse.nrows(), problem.total_residual_dimension);
1557 assert_eq!(jacobian_sparse.nrows(), problem.total_residual_dimension);
1558 assert_eq!(jacobian_sparse.ncols(), 48); // 8 variables * 6 DOF each
1559
1560 println!("SE3 Residual/Jacobian computation test passed");
1561 println!("Residual dimensions: {}", residual_sparse.nrows());
1562 println!(
1563 "Jacobian dimensions: {} x {}",
1564 jacobian_sparse.nrows(),
1565 jacobian_sparse.ncols()
1566 );
1567 }
1568
1569 #[test]
1570 fn test_residual_block_operations() {
1571 let mut problem = Problem::new();
1572
1573 // Test adding residual blocks
1574 let block_id1 = problem.add_residual_block(
1575 &["x0", "x1"],
1576 Box::new(BetweenFactorSE2::new(1.0, 0.0, 0.1)),
1577 Some(Box::new(HuberLoss::new(1.0).unwrap())),
1578 );
1579
1580 let block_id2 = problem.add_residual_block(
1581 &["x0"],
1582 Box::new(PriorFactor {
1583 data: dvector![0.0, 0.0, 0.0],
1584 }),
1585 None,
1586 );
1587
1588 assert_eq!(problem.num_residual_blocks(), 2);
1589 assert_eq!(problem.total_residual_dimension, 6); // 3 + 3
1590 assert_eq!(block_id1, 0);
1591 assert_eq!(block_id2, 1);
1592
1593 // Test removing residual blocks
1594 let removed_block = problem.remove_residual_block(block_id1);
1595 assert!(removed_block.is_some());
1596 assert_eq!(problem.num_residual_blocks(), 1);
1597 assert_eq!(problem.total_residual_dimension, 3); // Only prior factor remains
1598
1599 // Test removing non-existent block
1600 let non_existent = problem.remove_residual_block(999);
1601 assert!(non_existent.is_none());
1602
1603 println!("Residual block operations test passed");
1604 println!("Block operations working correctly");
1605 }
1606
1607 #[test]
1608 fn test_variable_constraints() {
1609 let mut problem = Problem::new();
1610
1611 // Test fixing variables
1612 problem.fix_variable("x0", 0);
1613 problem.fix_variable("x0", 1);
1614 problem.fix_variable("x1", 2);
1615
1616 assert!(problem.fixed_variable_indexes.contains_key("x0"));
1617 assert!(problem.fixed_variable_indexes.contains_key("x1"));
1618 assert_eq!(problem.fixed_variable_indexes["x0"].len(), 2);
1619 assert_eq!(problem.fixed_variable_indexes["x1"].len(), 1);
1620
1621 // Test unfixing variables
1622 problem.unfix_variable("x0");
1623 assert!(!problem.fixed_variable_indexes.contains_key("x0"));
1624 assert!(problem.fixed_variable_indexes.contains_key("x1"));
1625
1626 // Test variable bounds
1627 problem.set_variable_bounds("x2", 0, -1.0, 1.0);
1628 problem.set_variable_bounds("x2", 1, -2.0, 2.0);
1629 problem.set_variable_bounds("x3", 0, 0.0, 5.0);
1630
1631 assert!(problem.variable_bounds.contains_key("x2"));
1632 assert!(problem.variable_bounds.contains_key("x3"));
1633 assert_eq!(problem.variable_bounds["x2"].len(), 2);
1634 assert_eq!(problem.variable_bounds["x3"].len(), 1);
1635
1636 // Test removing bounds
1637 problem.remove_variable_bounds("x2");
1638 assert!(!problem.variable_bounds.contains_key("x2"));
1639 assert!(problem.variable_bounds.contains_key("x3"));
1640
1641 println!("Variable constraints test passed");
1642 println!("Fix/unfix and bounds operations working correctly");
1643 }
1644}