lax/lib.rs
1//! Safe Rust wrapper for LAPACK without external dependency.
2//!
3//! [Lapack] trait
4//! ----------------
5//!
6//! This crates provides LAPACK wrapper as a traits.
7//! For example, LU decomposition of general matrices is provided like:
8//!
9//! ```ignore
10//! pub trait Lapack {
11//! fn lu(l: MatrixLayout, a: &mut [Self]) -> Result<Pivot>;
12//! }
13//! ```
14//!
15//! see [Lapack] for detail.
16//! This trait is implemented for [f32], [f64], [c32] which is an alias to `num::Complex<f32>`,
17//! and [c64] which is an alias to `num::Complex<f64>`.
18//! You can use it like `f64::lu`:
19//!
20//! ```
21//! use lax::{Lapack, layout::MatrixLayout, Transpose};
22//!
23//! let mut a = vec![
24//! 1.0, 2.0,
25//! 3.0, 4.0
26//! ];
27//! let mut b = vec![1.0, 2.0];
28//! let layout = MatrixLayout::C { row: 2, lda: 2 };
29//! let pivot = f64::lu(layout, &mut a).unwrap();
30//! f64::solve(layout, Transpose::No, &a, &pivot, &mut b).unwrap();
31//! ```
32//!
33//! When you want to write generic algorithm for real and complex matrices,
34//! this trait can be used as a trait bound:
35//!
36//! ```
37//! use lax::{Lapack, layout::MatrixLayout, Transpose};
38//!
39//! fn solve_at_once<T: Lapack>(layout: MatrixLayout, a: &mut [T], b: &mut [T]) -> Result<(), lax::error::Error> {
40//! let pivot = T::lu(layout, a)?;
41//! T::solve(layout, Transpose::No, a, &pivot, b)?;
42//! Ok(())
43//! }
44//! ```
45//!
46//! There are several similar traits as described below to keep development easy.
47//! They are merged into a single trait, [Lapack].
48//!
49//! Linear equation, Inverse matrix, Condition number
50//! --------------------------------------------------
51//!
52//! According to the property input metrix, several types of triangular decomposition are used:
53//!
54//! - [solve] module provides methods for LU-decomposition for general matrix.
55//! - [solveh] module provides methods for Bunch-Kaufman diagonal pivoting method for symmetric/Hermitian indefinite matrix.
56//! - [cholesky] module provides methods for Cholesky decomposition for symmetric/Hermitian positive dinite matrix.
57//!
58//! Eigenvalue Problem
59//! -------------------
60//!
61//! According to the property input metrix,
62//! there are several types of eigenvalue problem API
63//!
64//! - [eig] module for eigenvalue problem for general matrix.
65//! - [eig_generalized] module for generalized eigenvalue problem for general matrix.
66//! - [eigh] module for eigenvalue problem for symmetric/Hermitian matrix.
67//! - [eigh_generalized] module for generalized eigenvalue problem for symmetric/Hermitian matrix.
68//!
69//! Singular Value Decomposition
70//! -----------------------------
71//!
72//! - [svd] module for singular value decomposition (SVD) for general matrix
73//! - [svddc] module for singular value decomposition (SVD) with divided-and-conquer algorithm for general matrix
74//! - [least_squares] module for solving least square problem using SVD
75//!
76
77#![deny(rustdoc::broken_intra_doc_links, rustdoc::private_intra_doc_links)]
78
79#[cfg(any(
80 feature = "intel-mkl-static", //decprecated
81 feature = "intel-mkl-dynamic", //decprecated
82 feature = "intel-mkl-dynamic-lp64-iomp",
83 feature = "intel-mkl-dynamic-lp64-seq",
84 feature = "intel-mkl-static-ilp64-iomp",
85 feature = "intel-mkl-static-lp64-iomp",
86 feature = "intel-mkl-dynamic-ilp64-iomp",
87 feature = "intel-mkl-static-ilp64-seq",
88 feature = "intel-mkl-static-lp64-seq"
89))]
90extern crate intel_mkl_src as _src;
91
92#[cfg(any(feature = "openblas-system", feature = "openblas-static"))]
93extern crate openblas_src as _src;
94
95#[cfg(any(feature = "netlib-system", feature = "netlib-static"))]
96extern crate netlib_src as _src;
97
98pub mod alloc;
99pub mod cholesky;
100pub mod eig;
101pub mod eig_generalized;
102pub mod eigh;
103pub mod eigh_generalized;
104pub mod error;
105pub mod flags;
106pub mod layout;
107pub mod least_squares;
108pub mod opnorm;
109pub mod qr;
110pub mod rcond;
111pub mod solve;
112pub mod solveh;
113pub mod svd;
114pub mod svddc;
115pub mod triangular;
116pub mod tridiagonal;
117
118pub use crate::eig_generalized::GeneralizedEigenvalue;
119
120pub use self::flags::*;
121pub use self::least_squares::LeastSquaresOwned;
122pub use self::svd::{SvdOwned, SvdRef};
123pub use self::tridiagonal::{LUFactorizedTridiagonal, Tridiagonal};
124
125use self::{alloc::*, error::*, layout::*};
126use cauchy::*;
127use std::mem::MaybeUninit;
128
129pub type Pivot = Vec<i32>;
130
131#[cfg_attr(doc, katexit::katexit)]
132/// Trait for primitive types which implements LAPACK subroutines
133pub trait Lapack: Scalar {
134 /// Compute right eigenvalue and eigenvectors for a general matrix
135 fn eig(
136 calc_v: bool,
137 l: MatrixLayout,
138 a: &mut [Self],
139 ) -> Result<(Vec<Self::Complex>, Vec<Self::Complex>)>;
140
141 /// Compute right eigenvalue and eigenvectors for a general matrix
142 fn eig_generalized(
143 calc_v: bool,
144 l: MatrixLayout,
145 a: &mut [Self],
146 b: &mut [Self],
147 thresh_opt: Option<Self::Real>,
148 ) -> Result<(
149 Vec<GeneralizedEigenvalue<Self::Complex>>,
150 Vec<Self::Complex>,
151 )>;
152
153 /// Compute right eigenvalue and eigenvectors for a symmetric or Hermitian matrix
154 fn eigh(
155 calc_eigenvec: bool,
156 layout: MatrixLayout,
157 uplo: UPLO,
158 a: &mut [Self],
159 ) -> Result<Vec<Self::Real>>;
160
161 /// Compute right eigenvalue and eigenvectors for a symmetric or Hermitian matrix
162 fn eigh_generalized(
163 calc_eigenvec: bool,
164 layout: MatrixLayout,
165 uplo: UPLO,
166 a: &mut [Self],
167 b: &mut [Self],
168 ) -> Result<Vec<Self::Real>>;
169
170 /// Execute Householder reflection as the first step of QR-decomposition
171 ///
172 /// For C-continuous array,
173 /// this will call LQ-decomposition of the transposed matrix $ A^T = LQ^T $
174 fn householder(l: MatrixLayout, a: &mut [Self]) -> Result<Vec<Self>>;
175
176 /// Reconstruct Q-matrix from Householder-reflectors
177 fn q(l: MatrixLayout, a: &mut [Self], tau: &[Self]) -> Result<()>;
178
179 /// Execute QR-decomposition at once
180 fn qr(l: MatrixLayout, a: &mut [Self]) -> Result<Vec<Self>>;
181
182 /// Compute singular-value decomposition (SVD)
183 fn svd(l: MatrixLayout, calc_u: bool, calc_vt: bool, a: &mut [Self]) -> Result<SvdOwned<Self>>;
184
185 /// Compute singular value decomposition (SVD) with divide-and-conquer algorithm
186 fn svddc(layout: MatrixLayout, jobz: JobSvd, a: &mut [Self]) -> Result<SvdOwned<Self>>;
187
188 /// Compute a vector $x$ which minimizes Euclidian norm $\| Ax - b\|$
189 /// for a given matrix $A$ and a vector $b$.
190 fn least_squares(
191 a_layout: MatrixLayout,
192 a: &mut [Self],
193 b: &mut [Self],
194 ) -> Result<LeastSquaresOwned<Self>>;
195
196 /// Solve least square problems $\argmin_X \| AX - B\|$
197 fn least_squares_nrhs(
198 a_layout: MatrixLayout,
199 a: &mut [Self],
200 b_layout: MatrixLayout,
201 b: &mut [Self],
202 ) -> Result<LeastSquaresOwned<Self>>;
203
204 /// Computes the LU decomposition of a general $m \times n$ matrix
205 /// with partial pivoting with row interchanges.
206 ///
207 /// For a given matrix $A$, LU decomposition is described as $A = PLU$ where:
208 ///
209 /// - $L$ is lower matrix
210 /// - $U$ is upper matrix
211 /// - $P$ is permutation matrix represented by [Pivot]
212 ///
213 /// This is designed as two step computation according to LAPACK API:
214 ///
215 /// 1. Factorize input matrix $A$ into $L$, $U$, and $P$.
216 /// 2. Solve linear equation $Ax = b$ by [Lapack::solve]
217 /// or compute inverse matrix $A^{-1}$ by [Lapack::inv] using the output of LU decomposition.
218 ///
219 /// Output
220 /// -------
221 /// - $U$ and $L$ are stored in `a` after LU decomposition has succeeded.
222 /// - $P$ is returned as [Pivot]
223 ///
224 /// Error
225 /// ------
226 /// - if the matrix is singular
227 /// - On this case, `return_code` in [Error::LapackComputationalFailure] means
228 /// `return_code`-th diagonal element of $U$ becomes zero.
229 ///
230 fn lu(l: MatrixLayout, a: &mut [Self]) -> Result<Pivot>;
231
232 /// Compute inverse matrix $A^{-1}$ from the output of LU-decomposition
233 fn inv(l: MatrixLayout, a: &mut [Self], p: &Pivot) -> Result<()>;
234
235 /// Solve linear equations $Ax = b$ using the output of LU-decomposition
236 fn solve(l: MatrixLayout, t: Transpose, a: &[Self], p: &Pivot, b: &mut [Self]) -> Result<()>;
237
238 /// Factorize symmetric/Hermitian matrix using Bunch-Kaufman diagonal pivoting method
239 ///
240 /// For a given symmetric matrix $A$,
241 /// this method factorizes $A = U^T D U$ or $A = L D L^T$ where
242 ///
243 /// - $U$ (or $L$) are is a product of permutation and unit upper (lower) triangular matrices
244 /// - $D$ is symmetric and block diagonal with 1-by-1 and 2-by-2 diagonal blocks.
245 ///
246 /// This takes two-step approach based in LAPACK:
247 ///
248 /// 1. Factorize given matrix $A$ into upper ($U$) or lower ($L$) form with diagonal matrix $D$
249 /// 2. Then solve linear equation $Ax = b$, and/or calculate inverse matrix $A^{-1}$
250 ///
251 fn bk(l: MatrixLayout, uplo: UPLO, a: &mut [Self]) -> Result<Pivot>;
252
253 /// Compute inverse matrix $A^{-1}$ using the result of [Lapack::bk]
254 fn invh(l: MatrixLayout, uplo: UPLO, a: &mut [Self], ipiv: &Pivot) -> Result<()>;
255
256 /// Solve symmetric/Hermitian linear equation $Ax = b$ using the result of [Lapack::bk]
257 fn solveh(l: MatrixLayout, uplo: UPLO, a: &[Self], ipiv: &Pivot, b: &mut [Self]) -> Result<()>;
258
259 /// Solve symmetric/Hermitian positive-definite linear equations using Cholesky decomposition
260 ///
261 /// For a given positive definite matrix $A$,
262 /// Cholesky decomposition is described as $A = U^T U$ or $A = LL^T$ where
263 ///
264 /// - $L$ is lower matrix
265 /// - $U$ is upper matrix
266 ///
267 /// This is designed as two step computation according to LAPACK API
268 ///
269 /// 1. Factorize input matrix $A$ into $L$ or $U$
270 /// 2. Solve linear equation $Ax = b$ by [Lapack::solve_cholesky]
271 /// or compute inverse matrix $A^{-1}$ by [Lapack::inv_cholesky]
272 ///
273 fn cholesky(l: MatrixLayout, uplo: UPLO, a: &mut [Self]) -> Result<()>;
274
275 /// Compute inverse matrix $A^{-1}$ using $U$ or $L$ calculated by [Lapack::cholesky]
276 fn inv_cholesky(l: MatrixLayout, uplo: UPLO, a: &mut [Self]) -> Result<()>;
277
278 /// Solve linear equation $Ax = b$ using $U$ or $L$ calculated by [Lapack::cholesky]
279 fn solve_cholesky(l: MatrixLayout, uplo: UPLO, a: &[Self], b: &mut [Self]) -> Result<()>;
280
281 /// Estimates the the reciprocal of the condition number of the matrix in 1-norm.
282 ///
283 /// `anorm` should be the 1-norm of the matrix `a`.
284 fn rcond(l: MatrixLayout, a: &[Self], anorm: Self::Real) -> Result<Self::Real>;
285
286 /// Compute norm of matrices
287 ///
288 /// For a $n \times m$ matrix
289 /// $$
290 /// A = \begin{pmatrix}
291 /// a_{11} & \cdots & a_{1m} \\\\
292 /// \vdots & \ddots & \vdots \\\\
293 /// a_{n1} & \cdots & a_{nm}
294 /// \end{pmatrix}
295 /// $$
296 /// LAPACK can compute three types of norms:
297 ///
298 /// - Operator norm based on 1-norm for its domain linear space:
299 /// $$
300 /// \Vert A \Vert_1 = \sup_{\Vert x \Vert_1 = 1} \Vert Ax \Vert_1
301 /// = \max_{1 \le j \le m } \sum_{i=1}^n |a_{ij}|
302 /// $$
303 /// where
304 /// $\Vert x\Vert_1 = \sum_{j=1}^m |x_j|$
305 /// is 1-norm for a vector $x$.
306 ///
307 /// - Operator norm based on $\infty$-norm for its domain linear space:
308 /// $$
309 /// \Vert A \Vert_\infty = \sup_{\Vert x \Vert_\infty = 1} \Vert Ax \Vert_\infty
310 /// = \max_{1 \le i \le n } \sum_{j=1}^m |a_{ij}|
311 /// $$
312 /// where
313 /// $\Vert x\Vert_\infty = \max_{j=1}^m |x_j|$
314 /// is $\infty$-norm for a vector $x$.
315 ///
316 /// - Frobenious norm
317 /// $$
318 /// \Vert A \Vert_F = \sqrt{\mathrm{Tr} \left(AA^\dagger\right)} = \sqrt{\sum_{i=1}^n \sum_{j=1}^m |a_{ij}|^2}
319 /// $$
320 ///
321 fn opnorm(t: NormType, l: MatrixLayout, a: &[Self]) -> Self::Real;
322
323 fn solve_triangular(
324 al: MatrixLayout,
325 bl: MatrixLayout,
326 uplo: UPLO,
327 d: Diag,
328 a: &[Self],
329 b: &mut [Self],
330 ) -> Result<()>;
331
332 /// Computes the LU factorization of a tridiagonal `m x n` matrix `a` using
333 /// partial pivoting with row interchanges.
334 fn lu_tridiagonal(a: Tridiagonal<Self>) -> Result<LUFactorizedTridiagonal<Self>>;
335
336 fn rcond_tridiagonal(lu: &LUFactorizedTridiagonal<Self>) -> Result<Self::Real>;
337
338 fn solve_tridiagonal(
339 lu: &LUFactorizedTridiagonal<Self>,
340 bl: MatrixLayout,
341 t: Transpose,
342 b: &mut [Self],
343 ) -> Result<()>;
344}
345
346macro_rules! impl_lapack {
347 ($s:ty) => {
348 impl Lapack for $s {
349 fn eig(
350 calc_v: bool,
351 l: MatrixLayout,
352 a: &mut [Self],
353 ) -> Result<(Vec<Self::Complex>, Vec<Self::Complex>)> {
354 use eig::*;
355 let work = EigWork::<$s>::new(calc_v, l)?;
356 let EigOwned { eigs, vr, vl } = work.eval(a)?;
357 Ok((eigs, vr.or(vl).unwrap_or_default()))
358 }
359
360 fn eig_generalized(
361 calc_v: bool,
362 l: MatrixLayout,
363 a: &mut [Self],
364 b: &mut [Self],
365 thresh_opt: Option<Self::Real>,
366 ) -> Result<(
367 Vec<GeneralizedEigenvalue<Self::Complex>>,
368 Vec<Self::Complex>,
369 )> {
370 use eig_generalized::*;
371 let work = EigGeneralizedWork::<$s>::new(calc_v, l)?;
372 let eig_generalized_owned = work.eval(a, b)?;
373 let eigs = eig_generalized_owned.calc_eigs(thresh_opt);
374 let vr = eig_generalized_owned.vr;
375 let vl = eig_generalized_owned.vl;
376 Ok((eigs, vr.or(vl).unwrap_or_default()))
377 }
378
379 fn eigh(
380 calc_eigenvec: bool,
381 layout: MatrixLayout,
382 uplo: UPLO,
383 a: &mut [Self],
384 ) -> Result<Vec<Self::Real>> {
385 use eigh::*;
386 let work = EighWork::<$s>::new(calc_eigenvec, layout)?;
387 work.eval(uplo, a)
388 }
389
390 fn eigh_generalized(
391 calc_eigenvec: bool,
392 layout: MatrixLayout,
393 uplo: UPLO,
394 a: &mut [Self],
395 b: &mut [Self],
396 ) -> Result<Vec<Self::Real>> {
397 use eigh_generalized::*;
398 let work = EighGeneralizedWork::<$s>::new(calc_eigenvec, layout)?;
399 work.eval(uplo, a, b)
400 }
401
402 fn householder(l: MatrixLayout, a: &mut [Self]) -> Result<Vec<Self>> {
403 use qr::*;
404 let work = HouseholderWork::<$s>::new(l)?;
405 work.eval(a)
406 }
407
408 fn q(l: MatrixLayout, a: &mut [Self], tau: &[Self]) -> Result<()> {
409 use qr::*;
410 let mut work = QWork::<$s>::new(l)?;
411 work.calc(a, tau)?;
412 Ok(())
413 }
414
415 fn qr(l: MatrixLayout, a: &mut [Self]) -> Result<Vec<Self>> {
416 let tau = Self::householder(l, a)?;
417 let r = Vec::from(&*a);
418 Self::q(l, a, &tau)?;
419 Ok(r)
420 }
421
422 fn svd(
423 l: MatrixLayout,
424 calc_u: bool,
425 calc_vt: bool,
426 a: &mut [Self],
427 ) -> Result<SvdOwned<Self>> {
428 use svd::*;
429 let work = SvdWork::<$s>::new(l, calc_u, calc_vt)?;
430 work.eval(a)
431 }
432
433 fn svddc(layout: MatrixLayout, jobz: JobSvd, a: &mut [Self]) -> Result<SvdOwned<Self>> {
434 use svddc::*;
435 let work = SvdDcWork::<$s>::new(layout, jobz)?;
436 work.eval(a)
437 }
438
439 fn least_squares(
440 l: MatrixLayout,
441 a: &mut [Self],
442 b: &mut [Self],
443 ) -> Result<LeastSquaresOwned<Self>> {
444 let b_layout = l.resized(b.len() as i32, 1);
445 Self::least_squares_nrhs(l, a, b_layout, b)
446 }
447
448 fn least_squares_nrhs(
449 a_layout: MatrixLayout,
450 a: &mut [Self],
451 b_layout: MatrixLayout,
452 b: &mut [Self],
453 ) -> Result<LeastSquaresOwned<Self>> {
454 use least_squares::*;
455 let work = LeastSquaresWork::<$s>::new(a_layout, b_layout)?;
456 work.eval(a, b)
457 }
458
459 fn lu(l: MatrixLayout, a: &mut [Self]) -> Result<Pivot> {
460 use solve::*;
461 LuImpl::lu(l, a)
462 }
463
464 fn inv(l: MatrixLayout, a: &mut [Self], p: &Pivot) -> Result<()> {
465 use solve::*;
466 let mut work = InvWork::<$s>::new(l)?;
467 work.calc(a, p)?;
468 Ok(())
469 }
470
471 fn solve(
472 l: MatrixLayout,
473 t: Transpose,
474 a: &[Self],
475 p: &Pivot,
476 b: &mut [Self],
477 ) -> Result<()> {
478 use solve::*;
479 SolveImpl::solve(l, t, a, p, b)
480 }
481
482 fn bk(l: MatrixLayout, uplo: UPLO, a: &mut [Self]) -> Result<Pivot> {
483 use solveh::*;
484 let work = BkWork::<$s>::new(l)?;
485 work.eval(uplo, a)
486 }
487
488 fn invh(l: MatrixLayout, uplo: UPLO, a: &mut [Self], ipiv: &Pivot) -> Result<()> {
489 use solveh::*;
490 let mut work = InvhWork::<$s>::new(l)?;
491 work.calc(uplo, a, ipiv)
492 }
493
494 fn solveh(
495 l: MatrixLayout,
496 uplo: UPLO,
497 a: &[Self],
498 ipiv: &Pivot,
499 b: &mut [Self],
500 ) -> Result<()> {
501 use solveh::*;
502 SolvehImpl::solveh(l, uplo, a, ipiv, b)
503 }
504
505 fn cholesky(l: MatrixLayout, uplo: UPLO, a: &mut [Self]) -> Result<()> {
506 use cholesky::*;
507 CholeskyImpl::cholesky(l, uplo, a)
508 }
509
510 fn inv_cholesky(l: MatrixLayout, uplo: UPLO, a: &mut [Self]) -> Result<()> {
511 use cholesky::*;
512 InvCholeskyImpl::inv_cholesky(l, uplo, a)
513 }
514
515 fn solve_cholesky(
516 l: MatrixLayout,
517 uplo: UPLO,
518 a: &[Self],
519 b: &mut [Self],
520 ) -> Result<()> {
521 use cholesky::*;
522 SolveCholeskyImpl::solve_cholesky(l, uplo, a, b)
523 }
524
525 fn rcond(l: MatrixLayout, a: &[Self], anorm: Self::Real) -> Result<Self::Real> {
526 use rcond::*;
527 let mut work = RcondWork::<$s>::new(l);
528 work.calc(a, anorm)
529 }
530
531 fn opnorm(t: NormType, l: MatrixLayout, a: &[Self]) -> Self::Real {
532 use opnorm::*;
533 let mut work = OperatorNormWork::<$s>::new(t, l);
534 work.calc(a)
535 }
536
537 fn solve_triangular(
538 al: MatrixLayout,
539 bl: MatrixLayout,
540 uplo: UPLO,
541 d: Diag,
542 a: &[Self],
543 b: &mut [Self],
544 ) -> Result<()> {
545 use triangular::*;
546 SolveTriangularImpl::solve_triangular(al, bl, uplo, d, a, b)
547 }
548
549 fn lu_tridiagonal(a: Tridiagonal<Self>) -> Result<LUFactorizedTridiagonal<Self>> {
550 use tridiagonal::*;
551 let work = LuTridiagonalWork::<$s>::new(a.l);
552 work.eval(a)
553 }
554
555 fn rcond_tridiagonal(lu: &LUFactorizedTridiagonal<Self>) -> Result<Self::Real> {
556 use tridiagonal::*;
557 let mut work = RcondTridiagonalWork::<$s>::new(lu.a.l);
558 work.calc(lu)
559 }
560
561 fn solve_tridiagonal(
562 lu: &LUFactorizedTridiagonal<Self>,
563 bl: MatrixLayout,
564 t: Transpose,
565 b: &mut [Self],
566 ) -> Result<()> {
567 use tridiagonal::*;
568 SolveTridiagonalImpl::solve_tridiagonal(lu, bl, t, b)
569 }
570 }
571 };
572}
573impl_lapack!(c64);
574impl_lapack!(c32);
575impl_lapack!(f64);
576impl_lapack!(f32);