apex_solver/linalg/
mod.rs

1pub mod cholesky;
2pub mod explicit_schur;
3pub mod implicit_schur;
4pub mod qr;
5
6use crate::core::problem::VariableEnum;
7use faer::{Mat, sparse::SparseColMat};
8use std::{
9    collections::HashMap,
10    fmt,
11    fmt::{Display, Formatter},
12};
13use thiserror::Error;
14use tracing::error;
15
16#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
17pub enum LinearSolverType {
18    #[default]
19    SparseCholesky,
20    SparseQR,
21    SparseSchurComplement,
22}
23
24impl Display for LinearSolverType {
25    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
26        match self {
27            LinearSolverType::SparseCholesky => write!(f, "Sparse Cholesky"),
28            LinearSolverType::SparseQR => write!(f, "Sparse QR"),
29            LinearSolverType::SparseSchurComplement => write!(f, "Sparse Schur Complement"),
30        }
31    }
32}
33
34/// Linear algebra specific error types for apex-solver
35#[derive(Debug, Clone, Error)]
36pub enum LinAlgError {
37    /// Matrix factorization failed (Cholesky, QR, etc.)
38    #[error("Matrix factorization failed: {0}")]
39    FactorizationFailed(String),
40
41    /// Singular or near-singular matrix detected
42    #[error("Singular matrix detected: {0}")]
43    SingularMatrix(String),
44
45    /// Failed to create sparse matrix from triplets
46    #[error("Failed to create sparse matrix: {0}")]
47    SparseMatrixCreation(String),
48
49    /// Matrix format conversion failed
50    #[error("Matrix conversion failed: {0}")]
51    MatrixConversion(String),
52
53    /// Invalid input provided to linear solver
54    #[error("Invalid input: {0}")]
55    InvalidInput(String),
56
57    /// Solver in invalid state (e.g., initialized incorrectly)
58    #[error("Invalid solver state: {0}")]
59    InvalidState(String),
60}
61
62impl LinAlgError {
63    /// Log the error with tracing::error and return self for chaining
64    ///
65    /// This method allows for a consistent error logging pattern throughout
66    /// the linalg module, ensuring all errors are properly recorded.
67    ///
68    /// # Example
69    /// ```
70    /// # use apex_solver::linalg::LinAlgError;
71    /// # fn operation() -> Result<(), LinAlgError> { Ok(()) }
72    /// # fn example() -> Result<(), LinAlgError> {
73    /// operation()
74    ///     .map_err(|e| e.log())?;
75    /// # Ok(())
76    /// # }
77    /// ```
78    #[must_use]
79    pub fn log(self) -> Self {
80        error!("{}", self);
81        self
82    }
83
84    /// Log the error with the original source error from a third-party library
85    ///
86    /// This method logs both the LinAlgError and the underlying error
87    /// from external libraries (e.g., faer's FaerError, LltError, CreationError).
88    /// This provides full debugging context when errors occur in third-party code.
89    ///
90    /// # Arguments
91    /// * `source_error` - The original error from the third-party library (must implement Debug)
92    ///
93    /// # Example
94    /// ```
95    /// # use apex_solver::linalg::LinAlgError;
96    /// # fn symbolic_llt_op() -> Result<(), std::io::Error> { Ok(()) }
97    /// # fn example() -> Result<(), LinAlgError> {
98    /// symbolic_llt_op()
99    ///     .map_err(|e| {
100    ///         LinAlgError::FactorizationFailed(
101    ///             "Symbolic Cholesky decomposition failed".to_string()
102    ///         )
103    ///         .log_with_source(e)
104    ///     })?;
105    /// # Ok(())
106    /// # }
107    /// ```
108    #[must_use]
109    pub fn log_with_source<E: std::fmt::Debug>(self, source_error: E) -> Self {
110        error!("{} | Source: {:?}", self, source_error);
111        self
112    }
113}
114
115/// Result type for linear algebra operations
116pub type LinAlgResult<T> = Result<T, LinAlgError>;
117
118// /// Contains statistical information about the quality of the optimization solution.
119// #[derive(Debug, Clone)]
120// pub struct SolverElement {
121//     /// The Hessian matrix, computed as `(J^T * W * J)`.
122//     ///
123//     /// This is `None` if the Hessian could not be computed.
124//     pub hessian: Option<sparse::SparseColMat<usize, f64>>,
125
126//     /// The gradient vector, computed as `J^T * W * r`.
127//     ///
128//     /// This is `None` if the gradient could not be computed.
129//     pub gradient: Option<Mat<f64>>,
130
131//     /// The parameter covariance matrix, computed as `(J^T * W * J)^-1`.
132//     ///
133//     /// This is `None` if the Hessian is singular or ill-conditioned.
134//     pub covariance_matrix: Option<Mat<f64>>,
135//     /// Asymptotic standard errors of the parameters.
136//     ///
137//     /// This is `None` if the covariance matrix could not be computed.
138//     /// Each error is the square root of the corresponding diagonal element
139//     /// of the covariance matrix.
140//     pub standard_errors: Option<Mat<f64>>,
141// }
142
143/// Trait for structured sparse linear solvers that require variable information.
144///
145/// This trait extends the basic sparse solver interface to support solvers that
146/// exploit problem structure (e.g., Schur complement for bundle adjustment).
147/// These solvers need access to variable information to partition the problem.
148pub trait StructuredSparseLinearSolver {
149    /// Initialize the solver's block structure from problem variables.
150    ///
151    /// This must be called before solving to set up the internal structure.
152    ///
153    /// # Arguments
154    /// * `variables` - Map of variable names to their typed instances
155    /// * `variable_index_map` - Map from variable names to starting column indices
156    fn initialize_structure(
157        &mut self,
158        variables: &HashMap<String, VariableEnum>,
159        variable_index_map: &HashMap<String, usize>,
160    ) -> LinAlgResult<()>;
161
162    /// Solve the normal equation: (J^T * J) * dx = -J^T * r
163    fn solve_normal_equation(
164        &mut self,
165        residuals: &Mat<f64>,
166        jacobians: &SparseColMat<usize, f64>,
167    ) -> LinAlgResult<Mat<f64>>;
168
169    /// Solve the augmented equation: (J^T * J + λI) * dx = -J^T * r
170    fn solve_augmented_equation(
171        &mut self,
172        residuals: &Mat<f64>,
173        jacobians: &SparseColMat<usize, f64>,
174        lambda: f64,
175    ) -> LinAlgResult<Mat<f64>>;
176
177    /// Get the cached Hessian matrix
178    fn get_hessian(&self) -> Option<&SparseColMat<usize, f64>>;
179
180    /// Get the cached gradient vector
181    fn get_gradient(&self) -> Option<&Mat<f64>>;
182}
183
184/// Trait for sparse linear solvers that can solve both normal and augmented equations
185pub trait SparseLinearSolver {
186    /// Solve the normal equation: (J^T * J) * dx = -J^T * r
187    ///
188    /// # Errors
189    /// Returns `LinAlgError` if:
190    /// - Matrix factorization fails
191    /// - Matrix is singular or ill-conditioned
192    /// - Numerical instability is detected
193    fn solve_normal_equation(
194        &mut self,
195        residuals: &Mat<f64>,
196        jacobians: &SparseColMat<usize, f64>,
197    ) -> LinAlgResult<Mat<f64>>;
198
199    /// Solve the augmented equation: (J^T * J + λI) * dx = -J^T * r
200    ///
201    /// # Errors
202    /// Returns `LinAlgError` if:
203    /// - Matrix factorization fails
204    /// - Matrix is singular or ill-conditioned
205    /// - Numerical instability is detected
206    fn solve_augmented_equation(
207        &mut self,
208        residuals: &Mat<f64>,
209        jacobians: &SparseColMat<usize, f64>,
210        lambda: f64,
211    ) -> LinAlgResult<Mat<f64>>;
212
213    /// Get the cached Hessian matrix (J^T * J) from the last solve
214    fn get_hessian(&self) -> Option<&SparseColMat<usize, f64>>;
215
216    /// Get the cached gradient vector (J^T * r) from the last solve
217    fn get_gradient(&self) -> Option<&Mat<f64>>;
218
219    /// Compute the covariance matrix (H^{-1}) by inverting the cached Hessian
220    ///
221    /// Returns a reference to the covariance matrix if successful, None otherwise.
222    /// The covariance matrix represents parameter uncertainty in the tangent space.
223    fn compute_covariance_matrix(&mut self) -> Option<&Mat<f64>>;
224
225    /// Get the cached covariance matrix (H^{-1}) computed from the Hessian
226    ///
227    /// Returns None if covariance has not been computed yet.
228    fn get_covariance_matrix(&self) -> Option<&Mat<f64>>;
229}
230
231pub use cholesky::SparseCholeskySolver;
232pub use explicit_schur::{
233    SchurBlockStructure, SchurOrdering, SchurPreconditioner, SchurSolverAdapter, SchurVariant,
234    SparseSchurComplementSolver,
235};
236pub use implicit_schur::IterativeSchurSolver;
237pub use qr::SparseQRSolver;
238
239/// Extract per-variable covariance blocks from the full covariance matrix.
240///
241/// Given the full covariance matrix H^{-1} (inverse of information matrix),
242/// this function extracts the diagonal blocks corresponding to each individual variable.
243///
244/// # Arguments
245/// * `full_covariance` - Full covariance matrix of size n×n (from H^{-1})
246/// * `variables` - Map of variable names to their Variable objects
247/// * `variable_index_map` - Map from variable names to their starting column index in the full matrix
248///
249/// # Returns
250/// HashMap mapping variable names to their covariance matrices in tangent space.
251/// For SE3 variables, this would be 6×6 matrices; for SE2, 3×3; etc.
252///
253pub fn extract_variable_covariances(
254    full_covariance: &Mat<f64>,
255    variables: &HashMap<String, VariableEnum>,
256    variable_index_map: &HashMap<String, usize>,
257) -> HashMap<String, Mat<f64>> {
258    let mut result = HashMap::new();
259
260    for (var_name, var) in variables {
261        // Get the starting column/row index for this variable
262        if let Some(&start_idx) = variable_index_map.get(var_name) {
263            // Get the tangent space dimension for this variable
264            let dim = var.get_size();
265
266            // Extract the block diagonal covariance for this variable
267            // This is the block [start_idx:start_idx+dim, start_idx:start_idx+dim]
268            let mut var_cov = Mat::zeros(dim, dim);
269
270            for i in 0..dim {
271                for j in 0..dim {
272                    var_cov[(i, j)] = full_covariance[(start_idx + i, start_idx + j)];
273                }
274            }
275
276            result.insert(var_name.clone(), var_cov);
277        }
278    }
279
280    result
281}