faer/
lib.rs

1//! `faer` is a general-purpose linear algebra library for rust, with a focus on high performance
2//! for algebraic operations on medium/large matrices, as well as matrix decompositions
3//!
4//! most of the high-level functionality in this library is provided through associated functions in
5//! its vocabulary types: [`Mat`]/[`MatRef`]/[`MatMut`]
6//!
7//! `faer` is recommended for applications that handle medium to large dense matrices, and its
8//! design is not well suited for applications that operate mostly on low dimensional vectors and
9//! matrices such as computer graphics or game development. for such applications, `nalgebra` and
10//! `cgmath` may be better suited
11//!
12//! # basic usage
13//!
14//! [`Mat`] is a resizable matrix type with dynamic capacity, which can be created using
15//! [`Mat::new`] to produce an empty $0\times 0$ matrix, [`Mat::zeros`] to create a rectangular
16//! matrix filled with zeros, [`Mat::identity`] to create an identity matrix, or [`Mat::from_fn`]
17//! for the more general case
18//!
19//! Given a `&Mat<T>` (resp. `&mut Mat<T>`), a [`MatRef<'_, T>`](MatRef) (resp. [`MatMut<'_,
20//! T>`](MatMut)) can be created by calling [`Mat::as_ref`] (resp. [`Mat::as_mut`]), which allow
21//! for more flexibility than `Mat` in that they allow slicing ([`MatRef::get`]) and splitting
22//! ([`MatRef::split_at`])
23//!
24//! `MatRef` and `MatMut` are lightweight view objects. the former can be copied freely while the
25//! latter has move and reborrow semantics, as described in its documentation
26//!
27//! most of the matrix operations can be used through the corresponding math operators: `+` for
28//! matrix addition, `-` for subtraction, `*` for either scalar or matrix multiplication depending
29//! on the types of the operands.
30//!
31//! ## example
32//! ```
33//! use faer::{Mat, Scale, mat};
34//!
35//! let a = mat![
36//! 	[1.0, 5.0, 9.0], //
37//! 	[2.0, 6.0, 10.0],
38//! 	[3.0, 7.0, 11.0],
39//! 	[4.0, 8.0, 12.0f64],
40//! ];
41//!
42//! let b = Mat::from_fn(4, 3, |i, j| (i + j) as f64);
43//!
44//! let add = &a + &b;
45//! let sub = &a - &b;
46//! let scale = Scale(3.0) * &a;
47//! let mul = &a * b.transpose();
48//!
49//! let a00 = a[(0, 0)];
50//! ```
51//!
52//! # matrix decompositions
53//! `faer` provides a variety of matrix factorizations, each with its own advantages and drawbacks:
54//!
55//! ## $LL^\top$ decomposition
56//! [`Mat::llt`] decomposes a self-adjoint positive definite matrix $A$ such that
57//! $$A = LL^H,$$
58//! where $L$ is a lower triangular matrix. this decomposition is highly efficient and has good
59//! stability properties
60//!
61//! [an implementation for sparse matrices is also available](sparse::linalg::solvers::Llt)
62//!
63//! ## $LBL^\top$ decomposition
64//! [`Mat::lblt`] decomposes a self-adjoint (possibly indefinite) matrix $A$ such that
65//! $$P A P^\top = LBL^H,$$
66//! where $P$ is a permutation matrix, $L$ is a lower triangular matrix, and $B$ is a block
67//! diagonal matrix, with $1 \times 1$ or $2 \times 2$ diagonal blocks.
68//! this decomposition is efficient and has good stability properties
69//!
70//! ## $LU$ decomposition with partial pivoting
71//! [`Mat::partial_piv_lu`] decomposes a square invertible matrix $A$ into a lower triangular
72//! matrix $L$, a unit upper triangular matrix $U$, and a permutation matrix $P$, such that
73//! $$PA = LU$$
74//! it is used by default for computing the determinant, and is generally the recommended method
75//! for solving a square linear system or computing the inverse of a matrix (although we generally
76//! recommend using a [`faer::linalg::solvers::Solve`](crate::linalg::solvers::Solve) instead of
77//! computing the inverse explicitly)
78//!
79//! [an implementation for sparse matrices is also available](sparse::linalg::solvers::Lu)
80//!
81//! ## $LU$ decomposition with full pivoting
82//! [`Mat::full_piv_lu`] decomposes a generic rectangular matrix $A$ into a lower triangular
83//! matrix $L$, a unit upper triangular matrix $U$, and permutation matrices $P$ and $Q$, such that
84//! $$PAQ^\top = LU$$
85//! it can be more stable than the LU decomposition with partial pivoting, in exchange for being
86//! more computationally expensive
87//!
88//! ## $QR$ decomposition
89//! [`Mat::qr`] decomposes a matrix $A$ into the product $$A = QR,$$
90//! where $Q$ is a unitary matrix, and $R$ is an upper trapezoidal matrix. it is often used for
91//! solving least squares problems
92//!
93//! [an implementation for sparse matrices is also available](sparse::linalg::solvers::Qr)
94//!
95//! ## $QR$ decomposition with column pivoting
96//! ([`Mat::col_piv_qr`]) decomposes a matrix $A$ into the product $$AP^\top = QR,$$
97//! where $P$ is a permutation matrix, $Q$ is a unitary matrix, and $R$ is an upper trapezoidal
98//! matrix
99//!
100//! it is slower than the version with no pivoting, in exchange for being more numerically stable
101//! for rank-deficient matrices
102//!
103//! ## singular value decomposition
104//! the SVD of a matrix $A$ of shape $(m, n)$ is a decomposition into three components $U$, $S$,
105//! and $V$, such that:
106//!
107//! - $U$ has shape $(m, m)$ and is a unitary matrix,
108//! - $V$ has shape $(n, n)$ and is a unitary matrix,
109//! - $S$ has shape $(m, n)$ and is zero everywhere except the main diagonal, with nonnegative
110//! diagonal values in nonincreasing order,
111//! - and finally:
112//!
113//! $$A = U S V^H$$
114//!
115//! the SVD is provided in two forms: either the full matrices $U$ and $V$ are computed, using
116//! [`Mat::svd`], or only their first $\min(m, n)$ columns are computed, using
117//! [`Mat::thin_svd`]
118//!
119//! if only the singular values (elements of $S$) are desired, they can be obtained in
120//! nonincreasing order using [`Mat::singular_values`]
121//!
122//! ## eigendecomposition
123//! **note**: the order of the eigenvalues is currently unspecified and may be changed in a future
124//! release
125//!
126//! the eigenvalue decomposition of a square matrix $A$ of shape $(n, n)$ is a decomposition into
127//! two components $U$, $S$:
128//!
129//! - $U$ has shape $(n, n)$ and is invertible,
130//! - $S$ has shape $(n, n)$ and is a diagonal matrix,
131//! - and finally:
132//!
133//! $$A = U S U^{-1}$$
134//!
135//! if $A$ is self-adjoint, then $U$ can be made unitary ($U^{-1} = U^H$), and $S$ is real valued.
136//! additionally, the eigenvalues are sorted in nondecreasing order
137//!
138//! Depending on the domain of the input matrix and whether it is self-adjoint, multiple methods
139//! are provided to compute the eigendecomposition:
140//! * [`Mat::self_adjoint_eigen`] can be used with either real or complex matrices,
141//! producing an eigendecomposition of the same type,
142//! * [`Mat::eigen`] can be used with real or complex matrices, but always produces complex values.
143//!
144//! if only the eigenvalues (elements of $S$) are desired, they can be obtained using
145//! [`Mat::self_adjoint_eigenvalues`] (nondecreasing order), [`Mat::eigenvalues`]
146//! with the same conditions described above.
147//!
148//! # crate features
149//!
150//! - `std`: enabled by default. links with the standard library to enable additional features such
151//!   as cpu feature detection at runtime
152//! - `rayon`: enabled by default. enables the `rayon` parallel backend and enables global
153//!   parallelism by default
154//! - `serde`: Enables serialization and deserialization of [`Mat`]
155//! - `npy`: enables conversions to/from numpy's matrix file format
156//! - `perf-warn`: produces performance warnings when matrix operations are called with suboptimal
157//! data layout
158//! - `nightly`: requires the nightly compiler. enables experimental simd features such as avx512
159
160#![cfg_attr(not(feature = "std"), no_std)]
161#![allow(non_snake_case)]
162#![warn(missing_docs)]
163#![warn(rustdoc::broken_intra_doc_links)]
164
165extern crate alloc;
166#[cfg(feature = "std")]
167extern crate std;
168
169/// see: [`generativity::make_guard`]
170#[macro_export]
171macro_rules! make_guard {
172    ($($name:ident),* $(,)?) => {$(
173        #[allow(unused_unsafe)]
174        let $name = unsafe { extern crate generativity; ::generativity::Id::new() };
175        #[allow(unused, unused_unsafe)]
176        let lifetime_brand = unsafe { extern crate generativity; ::generativity::LifetimeBrand::new(&$name) };
177        #[allow(unused_unsafe)]
178        let $name = unsafe { extern crate generativity; ::generativity::Guard::new($name) };
179    )*};
180}
181
182macro_rules! repeat_n {
183	($e: expr, $n: expr) => {
184		iter::repeat_n($e, $n)
185	};
186}
187
188macro_rules! try_const {
189	($e: expr) => {
190		::pulp::try_const! { $e }
191	};
192}
193
194use core::num::NonZeroUsize;
195use core::sync::atomic::AtomicUsize;
196use equator::{assert, debug_assert};
197use faer_traits::*;
198
199/// shorthand for `<_ as Auto::<T>>::auto()`
200#[macro_export]
201macro_rules! auto {
202	($ty: ty $(,)?) => {
203		$crate::Auto::<$ty>::auto()
204	};
205}
206
207macro_rules! dispatch {
208	($imp: expr, $ty: ident, $T: ty $(,)?) => {
209		if try_const! { <$T>::IS_NATIVE_C32 } {
210			unsafe { transmute(<ComplexImpl<f32> as ComplexField>::Arch::default().dispatch(transmute::<_, $ty<ComplexImpl<f32>>>($imp))) }
211		} else if try_const! { <$T>::IS_NATIVE_C64 } {
212			unsafe { transmute(<ComplexImpl<f64> as ComplexField>::Arch::default().dispatch(transmute::<_, $ty<ComplexImpl<f64>>>($imp))) }
213		} else {
214			<$T>::Arch::default().dispatch($imp)
215		}
216	};
217}
218
219macro_rules! stack_mat {
220	($name: ident, $m: expr, $n: expr, $A: expr, $N: expr, $T: ty $(,)?) => {
221		let mut __tmp = {
222			#[repr(align(64))]
223			struct __Col<T, const A: usize>([T; A]);
224			struct __Mat<T, const A: usize, const N: usize>([__Col<T, A>; N]);
225
226			core::mem::MaybeUninit::<__Mat<$T, $A, $N>>::uninit()
227		};
228		let __stack = MemStack::new_any(core::slice::from_mut(&mut __tmp));
229		let mut $name = $crate::linalg::temp_mat_zeroed::<$T, _, _>($m, $n, __stack).0;
230		let mut $name = $name.as_mat_mut();
231	};
232
233	($name: ident, $m: expr, $n: expr,  $T: ty $(,)?) => {
234		stack_mat!($name, $m, $n, $m, $n, $T)
235	};
236}
237
238#[macro_export]
239#[doc(hidden)]
240macro_rules! __dbg {
241    () => {
242        std::eprintln!("[{}:{}:{}]", std::file!(), std::line!(), std::column!())
243    };
244    ($val:expr $(,)?) => {
245        match $val {
246            tmp => {
247                std::eprintln!("[{}:{}:{}] {} = {:16.12?}",
248                    std::file!(), std::line!(), std::column!(), std::stringify!($val), &tmp);
249                tmp
250            }
251        }
252    };
253    ($($val:expr),+ $(,)?) => {
254        ($($crate::__dbg!($val)),+,)
255    };
256}
257
258#[cfg(feature = "perf-warn")]
259#[macro_export]
260#[doc(hidden)]
261macro_rules! __perf_warn {
262	($name: ident) => {{
263		#[inline(always)]
264		#[allow(non_snake_case)]
265		fn $name() -> &'static ::core::sync::atomic::AtomicBool {
266			static $name: ::core::sync::atomic::AtomicBool = ::core::sync::atomic::AtomicBool::new(false);
267			&$name
268		}
269		::core::matches!(
270			$name().compare_exchange(
271				false,
272				true,
273				::core::sync::atomic::Ordering::Relaxed,
274				::core::sync::atomic::Ordering::Relaxed,
275			),
276			Ok(_)
277		)
278	}};
279}
280
281#[doc(hidden)]
282#[macro_export]
283macro_rules! with_dim {
284	($name: ident, $value: expr $(,)?) => {
285		let __val__ = $value;
286		$crate::make_guard!($name);
287		let $name = $crate::utils::bound::Dim::new(__val__, $name);
288	};
289
290	({$(let $name: ident = $value: expr;)*}) => {$(
291		let __val__ = $value;
292		$crate::make_guard!($name);
293		let $name = $crate::utils::bound::Dim::new(__val__, $name);
294	)*};
295}
296
297/// zips together matrix of the same size, so that coefficient-wise operations can be performed on
298/// their elements.
299///
300/// # note
301/// the order in which the matrix elements are traversed is unspecified.
302///
303/// # example
304/// ```
305/// use faer::{Mat, mat, unzip, zip};
306///
307/// let nrows = 2;
308/// let ncols = 3;
309///
310/// let a = mat![[1.0, 3.0, 5.0], [2.0, 4.0, 6.0]];
311/// let b = mat![[7.0, 9.0, 11.0], [8.0, 10.0, 12.0]];
312/// let mut sum = Mat::<f64>::zeros(nrows, ncols);
313///
314/// zip!(&mut sum, &a, &b).for_each(|unzip!(sum, a, b)| {
315/// 	*sum = a + b;
316/// });
317///
318/// for i in 0..nrows {
319/// 	for j in 0..ncols {
320/// 		assert_eq!(sum[(i, j)], a[(i, j)] + b[(i, j)]);
321/// 	}
322/// }
323/// ```
324#[macro_export]
325macro_rules! zip {
326    ($head: expr $(,)?) => {
327        $crate::linalg::zip::LastEq($crate::linalg::zip::IntoView::into_view($head), ::core::marker::PhantomData)
328    };
329
330    ($head: expr, $($tail: expr),* $(,)?) => {
331        $crate::linalg::zip::ZipEq::new($crate::linalg::zip::IntoView::into_view($head), $crate::zip!($($tail,)*))
332    };
333}
334
335/// expands to the type of zipped items.
336///
337/// # example
338/// ```
339/// use faer::{Mat, Zip, mat, unzip, zip};
340///
341/// let nrows = 2;
342/// let ncols = 3;
343///
344/// let a = mat![[1.0, 3.0, 5.0], [2.0, 4.0, 6.0]];
345/// let b = mat![[7.0, 9.0, 11.0], [8.0, 10.0, 12.0]];
346/// let mut sum = Mat::<f64>::zeros(nrows, ncols);
347///
348/// zip!(&mut sum, &a, &b).for_each(|unzip!(sum, a, b): Zip!(&mut f64, &f64, &f64)| {
349/// 	*sum = a + b;
350/// });
351///
352/// for i in 0..nrows {
353/// 	for j in 0..ncols {
354/// 		assert_eq!(sum[(i, j)], a[(i, j)] + b[(i, j)]);
355/// 	}
356/// }
357/// ```
358#[macro_export]
359macro_rules! Zip {
360    ($head: ty $(,)?) => {
361        $crate::linalg::zip::Last::<$head>
362    };
363
364    ($head: ty, $($tail: ty),* $(,)?) => {
365        $crate::linalg::zip::Zip::<$head, $crate::Zip!($($tail,)*)>
366    };
367}
368
369/// used to undo the zipping by the [`zip!`] macro.
370///
371/// # example
372/// ```
373/// use faer::{Mat, mat, unzip, zip};
374///
375/// let nrows = 2;
376/// let ncols = 3;
377///
378/// let a = mat![[1.0, 3.0, 5.0], [2.0, 4.0, 6.0]];
379/// let b = mat![[7.0, 9.0, 11.0], [8.0, 10.0, 12.0]];
380/// let mut sum = Mat::<f64>::zeros(nrows, ncols);
381///
382/// zip!(&mut sum, &a, &b).for_each(|unzip!(sum, a, b)| {
383/// 	*sum = a + b;
384/// });
385///
386/// for i in 0..nrows {
387/// 	for j in 0..ncols {
388/// 		assert_eq!(sum[(i, j)], a[(i, j)] + b[(i, j)]);
389/// 	}
390/// }
391/// ```
392#[macro_export]
393macro_rules! unzip {
394    ($head: pat $(,)?) => {
395        $crate::linalg::zip::Last($head)
396    };
397
398    ($head: pat, $($tail: pat),* $(,)?) => {
399        $crate::linalg::zip::Zip($head, $crate::unzip!($($tail,)*))
400    };
401}
402
403#[macro_export]
404#[doc(hidden)]
405macro_rules! __transpose_impl {
406    ([$([$($col:expr),*])*] $($v:expr;)* ) => {
407        [$([$($col,)*],)* [$($v,)*]]
408    };
409    ([$([$($col:expr),*])*] $($v0:expr, $($v:expr),* ;)*) => {
410        $crate::__transpose_impl!([$([$($col),*])* [$($v0),*]] $($($v),* ;)*)
411    };
412}
413
414/// creates a [`Mat`] containing the arguments.
415///
416/// ```
417/// use faer::mat;
418///
419/// let matrix = mat![
420/// 	[1.0, 5.0, 9.0], //
421/// 	[2.0, 6.0, 10.0],
422/// 	[3.0, 7.0, 11.0],
423/// 	[4.0, 8.0, 12.0f64],
424/// ];
425///
426/// assert_eq!(matrix[(0, 0)], 1.0);
427/// assert_eq!(matrix[(1, 0)], 2.0);
428/// assert_eq!(matrix[(2, 0)], 3.0);
429/// assert_eq!(matrix[(3, 0)], 4.0);
430///
431/// assert_eq!(matrix[(0, 1)], 5.0);
432/// assert_eq!(matrix[(1, 1)], 6.0);
433/// assert_eq!(matrix[(2, 1)], 7.0);
434/// assert_eq!(matrix[(3, 1)], 8.0);
435///
436/// assert_eq!(matrix[(0, 2)], 9.0);
437/// assert_eq!(matrix[(1, 2)], 10.0);
438/// assert_eq!(matrix[(2, 2)], 11.0);
439/// assert_eq!(matrix[(3, 2)], 12.0);
440/// ```
441#[macro_export]
442macro_rules! mat {
443    () => {
444        {
445            compile_error!("number of columns in the matrix is ambiguous");
446        }
447    };
448
449    ($([$($v:expr),* $(,)?] ),* $(,)?) => {
450        {
451            let __data = ::core::mem::ManuallyDrop::new($crate::__transpose_impl!([] $($($v),* ;)*));
452            let __data = &*__data;
453            let __ncols = __data.len();
454            let __nrows = (*__data.get(0).unwrap()).len();
455
456            #[allow(unused_unsafe)]
457            unsafe {
458                $crate::mat::Mat::from_fn(__nrows, __ncols, |i, j| ::core::ptr::from_ref(&__data[j][i]).read())
459            }
460        }
461    };
462}
463
464/// creates a [`col::Col`] containing the arguments
465///
466/// ```
467/// use faer::col;
468///
469/// let col_vec = col![3.0, 5.0, 7.0, 9.0];
470///
471/// assert_eq!(col_vec[0], 3.0);
472/// assert_eq!(col_vec[1], 5.0);
473/// assert_eq!(col_vec[2], 7.0);
474/// assert_eq!(col_vec[3], 9.0);
475/// ```
476#[macro_export]
477macro_rules! col {
478    ($($v: expr),* $(,)?) => {
479        {
480            let __data = ::core::mem::ManuallyDrop::new([$($v,)*]);
481            let __data = &*__data;
482            let __len = __data.len();
483
484            #[allow(unused_unsafe)]
485            unsafe {
486                $crate::col::Col::from_fn(__len, |i| ::core::ptr::from_ref(&__data[i]).read())
487            }
488        }
489    };
490}
491
492/// creates a [`row::Row`] containing the arguments
493///
494/// ```
495/// use faer::row;
496///
497/// let row_vec = row![3.0, 5.0, 7.0, 9.0];
498///
499/// assert_eq!(row_vec[0], 3.0);
500/// assert_eq!(row_vec[1], 5.0);
501/// assert_eq!(row_vec[2], 7.0);
502/// assert_eq!(row_vec[3], 9.0);
503/// ```
504#[macro_export]
505macro_rules! row {
506    ($($v: expr),* $(,)?) => {
507        {
508            let __data = ::core::mem::ManuallyDrop::new([$($v,)*]);
509            let __data = &*__data;
510            let __len = __data.len();
511
512            #[allow(unused_unsafe)]
513            unsafe {
514                $crate::row::Row::from_fn(__len, |i| ::core::ptr::from_ref(&__data[i]).read())
515            }
516        }
517    };
518}
519
520/// convenience function to concatenate a nested list of matrices into a single
521/// big ['Mat']. concatonation pattern follows the numpy.block convention that
522/// each sub-list must have an equal number of columns (net) but the boundaries
523/// do not need to align. in other words, this sort of thing:
524/// ```notcode
525///   AAAbb
526///   AAAbb
527///   cDDDD
528/// ```
529/// is perfectly acceptable
530#[doc(hidden)]
531#[track_caller]
532pub fn concat_impl<T: ComplexField>(blocks: &[&[(mat::MatRef<'_, T>, Conj)]]) -> mat::Mat<T> {
533	#[inline(always)]
534	fn count_total_columns<T: ComplexField>(block_row: &[(mat::MatRef<'_, T>, Conj)]) -> usize {
535		let mut out: usize = 0;
536		for (elem, _) in block_row.iter() {
537			out += elem.ncols();
538		}
539		out
540	}
541
542	#[inline(always)]
543	#[track_caller]
544	fn count_rows<T: ComplexField>(block_row: &[(mat::MatRef<'_, T>, Conj)]) -> usize {
545		let mut out: usize = 0;
546		for (i, (e, _)) in block_row.iter().enumerate() {
547			if i == 0 {
548				out = e.nrows();
549			} else {
550				assert!(e.nrows() == out);
551			}
552		}
553		out
554	}
555
556	// get size of result while doing checks
557	let mut n: usize = 0;
558	let mut m: usize = 0;
559	for row in blocks.iter() {
560		n += count_rows(row);
561	}
562	for (i, row) in blocks.iter().enumerate() {
563		let cols = count_total_columns(row);
564		if i == 0 {
565			m = cols;
566		} else {
567			assert!(cols == m);
568		}
569	}
570
571	let mut mat = mat::Mat::<T>::zeros(n, m);
572	let mut ni: usize = 0;
573	let mut mj: usize;
574	for row in blocks.iter() {
575		mj = 0;
576
577		for (elem, conj) in row.iter() {
578			let mut dst = mat.as_mut().submatrix_mut(ni, mj, elem.nrows(), elem.ncols());
579			if *conj == Conj::No {
580				dst.copy_from(elem);
581			} else {
582				dst.copy_from(elem.conjugate());
583			}
584			mj += elem.ncols();
585		}
586		ni += row[0].0.nrows();
587	}
588
589	mat
590}
591
592/// concatenates the matrices in each row horizontally,
593/// then concatenates the results vertically
594///
595/// `concat![[a0, a1, a2], [b1, b2]]` results in the matrix
596/// ```notcode
597/// [a0 | a1 | a2][b0 | b1]
598/// ```
599#[macro_export]
600macro_rules! concat {
601    () => {
602        {
603            compile_error!("number of columns in the matrix is ambiguous");
604        }
605    };
606
607    ($([$($v:expr),* $(,)?] ),* $(,)?) => {
608        {
609            $crate::concat_impl(&[$(&[$(($v).as_ref().__canonicalize(),)*],)*])
610        }
611    };
612}
613
614/// helper utilities
615pub mod utils;
616
617/// diagonal matrix
618pub mod diag;
619/// rectangular matrix
620pub mod mat;
621/// permutation matrix
622pub mod perm;
623
624/// column vector
625pub mod col;
626/// row vector
627pub mod row;
628
629pub mod linalg;
630#[path = "./operator/mod.rs"]
631pub mod matrix_free;
632pub mod sparse;
633
634/// de-serialization from common matrix file formats
635#[cfg(feature = "std")]
636pub mod io;
637
638#[cfg(feature = "serde")]
639mod serde;
640
641/// native unsigned integer type
642pub trait Index: traits::IndexCore + traits::Index + seal::Seal {}
643impl<T: faer_traits::Index<Signed: seal::Seal> + seal::Seal> Index for T {}
644
645mod seal {
646	pub trait Seal {}
647	impl<T: faer_traits::Seal> Seal for T {}
648	impl Seal for crate::utils::bound::Dim<'_> {}
649	impl<I: crate::Index> Seal for crate::utils::bound::Idx<'_, I> {}
650	impl<I: crate::Index> Seal for crate::utils::bound::IdxInc<'_, I> {}
651	impl<I: crate::Index> Seal for crate::utils::bound::MaybeIdx<'_, I> {}
652	impl<I: crate::Index> Seal for crate::utils::bound::IdxIncOne<I> {}
653	impl<I: crate::Index> Seal for crate::utils::bound::MaybeIdxOne<I> {}
654	impl Seal for crate::utils::bound::One {}
655	impl Seal for crate::utils::bound::Zero {}
656	impl Seal for crate::ContiguousFwd {}
657	impl Seal for crate::ContiguousBwd {}
658}
659
660/// sealed trait for types that can be created from "unbound" values, as long as their
661/// struct preconditions are upheld
662pub trait Unbind<I = usize>: Send + Sync + Copy + core::fmt::Debug + seal::Seal {
663	/// creates new value
664	/// # safety
665	/// safety invariants must be upheld
666	unsafe fn new_unbound(idx: I) -> Self;
667
668	/// returns the unbound value, unconstrained by safety invariants
669	fn unbound(self) -> I;
670}
671
672/// type that can be used to index into a range
673pub type Idx<Dim, I = usize> = <Dim as ShapeIdx>::Idx<I>;
674/// type that can be used to partition a range
675pub type IdxInc<Dim, I = usize> = <Dim as ShapeIdx>::IdxInc<I>;
676/// either an index or a negative value
677pub type MaybeIdx<Dim, I = usize> = <Dim as ShapeIdx>::MaybeIdx<I>;
678
679/// base trait for [`Shape`]
680pub trait ShapeIdx {
681	/// type that can be used to index into a range
682	type Idx<I: Index>: Unbind<I> + Ord + Eq;
683	/// type that can be used to partition a range
684	type IdxInc<I: Index>: Unbind<I> + Ord + Eq + From<Idx<Self, I>>;
685	/// either an index or a negative value
686	type MaybeIdx<I: Index>: Unbind<I::Signed> + Ord + Eq;
687}
688
689/// matrix dimension
690pub trait Shape: Unbind + Ord + ShapeIdx<Idx<usize>: Ord + Eq + PartialOrd<Self>, IdxInc<usize>: Ord + Eq + PartialOrd<Self>> {
691	/// whether the types involved have any safety invariants
692	const IS_BOUND: bool = true;
693
694	/// bind the current value using a invariant lifetime guard
695	#[inline]
696	fn bind<'n>(self, guard: generativity::Guard<'n>) -> utils::bound::Dim<'n> {
697		utils::bound::Dim::new(self.unbound(), guard)
698	}
699
700	/// cast a slice of bound values to unbound values
701	#[inline]
702	fn cast_idx_slice<I: Index>(slice: &[Idx<Self, I>]) -> &[I] {
703		unsafe { core::slice::from_raw_parts(slice.as_ptr() as _, slice.len()) }
704	}
705
706	/// cast a slice of bound values to unbound values
707	#[inline]
708	fn cast_idx_inc_slice<I: Index>(slice: &[IdxInc<Self, I>]) -> &[I] {
709		unsafe { core::slice::from_raw_parts(slice.as_ptr() as _, slice.len()) }
710	}
711
712	/// returns the index `0`, which is always valid
713	#[inline(always)]
714	fn start() -> IdxInc<Self> {
715		unsafe { IdxInc::<Self>::new_unbound(0) }
716	}
717
718	/// returns the incremented value, as an inclusive index
719	#[inline(always)]
720	fn next(idx: Idx<Self>) -> IdxInc<Self> {
721		unsafe { IdxInc::<Self>::new_unbound(idx.unbound() + 1) }
722	}
723
724	/// returns the last value, equal to the dimension
725	#[inline(always)]
726	fn end(self) -> IdxInc<Self> {
727		unsafe { IdxInc::<Self>::new_unbound(self.unbound()) }
728	}
729
730	/// checks if the index is valid, returning `Some(_)` in that case
731	#[inline(always)]
732	fn idx(self, idx: usize) -> Option<Idx<Self>> {
733		if idx < self.unbound() {
734			Some(unsafe { Idx::<Self>::new_unbound(idx) })
735		} else {
736			None
737		}
738	}
739
740	/// checks if the index is valid, returning `Some(_)` in that case
741	#[inline(always)]
742	fn idx_inc(self, idx: usize) -> Option<IdxInc<Self>> {
743		if idx <= self.unbound() {
744			Some(unsafe { IdxInc::<Self>::new_unbound(idx) })
745		} else {
746			None
747		}
748	}
749
750	/// checks if the index is valid, and panics otherwise
751	#[inline(always)]
752	fn checked_idx(self, idx: usize) -> Idx<Self> {
753		equator::assert!(idx < self.unbound());
754		unsafe { Idx::<Self>::new_unbound(idx) }
755	}
756
757	/// checks if the index is valid, and panics otherwise
758	#[inline(always)]
759	fn checked_idx_inc(self, idx: usize) -> IdxInc<Self> {
760		equator::assert!(idx <= self.unbound());
761		unsafe { IdxInc::<Self>::new_unbound(idx) }
762	}
763
764	/// assumes the index is valid
765	/// # safety
766	/// the index must be valid
767	#[inline(always)]
768	unsafe fn unchecked_idx(self, idx: usize) -> Idx<Self> {
769		equator::debug_assert!(idx < self.unbound());
770		unsafe { Idx::<Self>::new_unbound(idx) }
771	}
772
773	/// assumes the index is valid
774	/// # safety
775	/// the index must be valid
776	#[inline(always)]
777	unsafe fn unchecked_idx_inc(self, idx: usize) -> IdxInc<Self> {
778		equator::debug_assert!(idx <= self.unbound());
779		unsafe { IdxInc::<Self>::new_unbound(idx) }
780	}
781
782	/// returns an iterator over the indices between `from` and `to`
783	#[inline(always)]
784	fn indices(from: IdxInc<Self>, to: IdxInc<Self>) -> impl Clone + ExactSizeIterator + DoubleEndedIterator<Item = Idx<Self>> {
785		(from.unbound()..to.unbound()).map(
786			#[inline(always)]
787			|i| unsafe { Idx::<Self>::new_unbound(i) },
788		)
789	}
790}
791
792impl<T: Send + Sync + Copy + core::fmt::Debug + faer_traits::Seal> Unbind<T> for T {
793	#[inline(always)]
794	unsafe fn new_unbound(idx: T) -> Self {
795		idx
796	}
797
798	#[inline(always)]
799	fn unbound(self) -> T {
800		self
801	}
802}
803
804impl ShapeIdx for usize {
805	type Idx<I: Index> = I;
806	type IdxInc<I: Index> = I;
807	type MaybeIdx<I: Index> = I::Signed;
808}
809impl Shape for usize {
810	const IS_BOUND: bool = false;
811}
812
813/// stride distance between two consecutive elements along a given dimension
814pub trait Stride: seal::Seal + core::fmt::Debug + Copy + Send + Sync + 'static {
815	/// the reversed stride type
816	type Rev: Stride<Rev = Self>;
817	/// returns the reversed stride
818	fn rev(self) -> Self::Rev;
819
820	/// returns the stride in elements
821	fn element_stride(self) -> isize;
822}
823
824impl Stride for isize {
825	type Rev = Self;
826
827	#[inline(always)]
828	fn rev(self) -> Self::Rev {
829		-self
830	}
831
832	#[inline(always)]
833	fn element_stride(self) -> isize {
834		self
835	}
836}
837
838/// contiguous stride equal to `+1`
839#[derive(Copy, Clone, Debug)]
840pub struct ContiguousFwd;
841/// contiguous stride equal to `-1`
842#[derive(Copy, Clone, Debug)]
843pub struct ContiguousBwd;
844
845impl Stride for ContiguousFwd {
846	type Rev = ContiguousBwd;
847
848	#[inline(always)]
849	fn rev(self) -> Self::Rev {
850		ContiguousBwd
851	}
852
853	#[inline(always)]
854	fn element_stride(self) -> isize {
855		1
856	}
857}
858
859impl Stride for ContiguousBwd {
860	type Rev = ContiguousFwd;
861
862	#[inline(always)]
863	fn rev(self) -> Self::Rev {
864		ContiguousFwd
865	}
866
867	#[inline(always)]
868	fn element_stride(self) -> isize {
869		-1
870	}
871}
872
873/// memory allocation error
874#[derive(Copy, Clone, Debug, PartialEq, Eq)]
875pub enum TryReserveError {
876	///rRequired allocation does not fit within `isize` bytes
877	CapacityOverflow,
878	/// allocator could not provide an allocation with the requested layout
879	AllocError {
880		/// requested layout
881		layout: core::alloc::Layout,
882	},
883}
884
885/// determines whether the input should be implicitly conjugated or not
886#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
887pub enum Conj {
888	/// no implicit conjugation
889	No,
890	/// implicit conjugation
891	Yes,
892}
893
894#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
895pub(crate) enum DiagStatus {
896	Unit,
897	Generic,
898}
899
900/// determines whether to replace or add to the result of a matmul operatio
901#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
902pub enum Accum {
903	/// overwrites the output buffer
904	Replace,
905	/// adds the result to the output buffer
906	Add,
907}
908
909/// determines which side of a self-adjoint matrix should be accessed
910#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
911pub enum Side {
912	/// lower triangular half
913	Lower,
914	/// upper triangular half
915	Upper,
916}
917
918impl Conj {
919	/// returns `self == Conj::Yes`
920	#[inline]
921	pub const fn is_conj(self) -> bool {
922		matches!(self, Conj::Yes)
923	}
924
925	/// returns the composition of `self` and `other`
926	#[inline]
927	pub const fn compose(self, other: Self) -> Self {
928		match (self, other) {
929			(Conj::No, Conj::No) => Conj::No,
930			(Conj::Yes, Conj::Yes) => Conj::No,
931			(Conj::No, Conj::Yes) => Conj::Yes,
932			(Conj::Yes, Conj::No) => Conj::Yes,
933		}
934	}
935
936	/// returns `Conj::No` if `T` is the canonical representation, otherwise `Conj::Yes`
937	#[inline]
938	pub const fn get<T: Conjugate>() -> Self {
939		if T::IS_CANONICAL { Self::No } else { Self::Yes }
940	}
941
942	#[inline]
943	pub(crate) fn apply<T: Conjugate>(value: &T) -> T::Canonical {
944		let value = unsafe { &*(value as *const T as *const T::Canonical) };
945
946		if try_const! { matches!(Self::get::<T>(), Conj::Yes) } {
947			T::Canonical::conj_impl(value)
948		} else {
949			T::Canonical::copy_impl(value)
950		}
951	}
952
953	#[inline]
954	pub(crate) fn apply_rt<T: ComplexField>(self, value: &T) -> T {
955		if self.is_conj() { T::conj_impl(value) } else { T::copy_impl(value) }
956	}
957}
958
959/// determines the parallelization configuration
960#[derive(Copy, Clone, Debug, PartialEq, Eq)]
961pub enum Par {
962	/// sequential, non portable across different platforms
963	Seq,
964	/// parallelized using the global rayon threadpool, non portable across different platforms
965	#[cfg(feature = "rayon")]
966	Rayon(NonZeroUsize),
967}
968
969impl Par {
970	/// returns `Par::Rayon(nthreads)` if `nthreads` is non-zero, or
971	/// `Par::Rayon(rayon::current_num_threads())` otherwise
972	#[inline]
973	#[cfg(feature = "rayon")]
974	pub fn rayon(nthreads: usize) -> Self {
975		if nthreads == 0 {
976			Self::Rayon(NonZeroUsize::new(rayon::current_num_threads()).unwrap())
977		} else {
978			Self::Rayon(NonZeroUsize::new(nthreads).unwrap())
979		}
980	}
981
982	/// the number of threads that should ideally execute an operation with the given parallelism
983	#[inline]
984	pub fn degree(&self) -> usize {
985		utils::thread::parallelism_degree(*self)
986	}
987}
988
989#[allow(non_camel_case_types)]
990/// `Complex<f32>`
991pub type c32 = traits::c32;
992#[allow(non_camel_case_types)]
993/// `Complex<f64>`
994pub type c64 = traits::c64;
995#[allow(non_camel_case_types)]
996/// `Complex<f64>`
997pub type cx128 = traits::cx128;
998#[allow(non_camel_case_types)]
999/// `Complex<f64>`
1000pub type fx128 = traits::fx128;
1001
1002pub use col::{Col, ColMut, ColRef};
1003pub use mat::{Mat, MatMut, MatRef};
1004pub use row::{Row, RowMut, RowRef};
1005
1006#[allow(unused_imports, dead_code)]
1007mod internal_prelude {
1008	pub trait DivCeil: Sized {
1009		fn msrv_div_ceil(self, rhs: Self) -> Self;
1010		fn msrv_next_multiple_of(self, rhs: Self) -> Self;
1011		fn msrv_checked_next_multiple_of(self, rhs: Self) -> Option<Self>;
1012	}
1013
1014	impl DivCeil for usize {
1015		#[inline]
1016		fn msrv_div_ceil(self, rhs: Self) -> Self {
1017			let d = self / rhs;
1018			let r = self % rhs;
1019			if r > 0 { d + 1 } else { d }
1020		}
1021
1022		#[inline]
1023		fn msrv_next_multiple_of(self, rhs: Self) -> Self {
1024			match self % rhs {
1025				0 => self,
1026				r => self + (rhs - r),
1027			}
1028		}
1029
1030		#[inline]
1031		fn msrv_checked_next_multiple_of(self, rhs: Self) -> Option<Self> {
1032			{
1033				match self.checked_rem(rhs)? {
1034					0 => Some(self),
1035					r => self.checked_add(rhs - r),
1036				}
1037			}
1038		}
1039	}
1040
1041	#[cfg(test)]
1042	pub(crate) use std::dbg;
1043	#[cfg(test)]
1044	pub(crate) use {alloc::boxed::Box, alloc::vec, alloc::vec::Vec};
1045
1046	pub use faer_traits::{ComplexImpl, ComplexImplConj, Symbolic};
1047
1048	pub(crate) use crate::col::{Col, ColMut, ColRef};
1049	pub(crate) use crate::diag::{Diag, DiagMut, DiagRef};
1050	pub(crate) use crate::hacks::transmute;
1051	pub(crate) use crate::linalg::{self, temp_mat_scratch, temp_mat_uninit, temp_mat_zeroed};
1052	pub(crate) use crate::mat::{AsMat, AsMatMut, AsMatRef, Mat, MatMut, MatRef};
1053	pub(crate) use crate::perm::{Perm, PermRef};
1054	pub(crate) use crate::prelude::*;
1055	pub(crate) use crate::row::{AsRowMut, AsRowRef, Row, RowMut, RowRef};
1056	pub(crate) use crate::utils::bound::{Array, Dim, Idx, IdxInc, MaybeIdx};
1057	pub(crate) use crate::utils::simd::SimdCtx;
1058	pub(crate) use crate::{Auto, NonExhaustive, Side, Spec};
1059
1060	pub use num_complex::Complex;
1061
1062	pub use faer_macros::math;
1063	pub use faer_traits::math_utils::*;
1064	pub use faer_traits::{ComplexField, Conjugate, Index, IndexCore, Real, RealField, SignedIndex, SimdArch};
1065
1066	#[inline]
1067	pub fn simd_align(i: usize) -> usize {
1068		i.wrapping_neg()
1069	}
1070
1071	pub(crate) use crate::{Accum, Conj, ContiguousBwd, ContiguousFwd, DiagStatus, Par, Shape, Stride, Unbind, unzip, zip};
1072
1073	pub use {unzip as uz, zip as z};
1074
1075	pub use crate::make_guard;
1076	pub use dyn_stack::{MemStack, StackReq};
1077	pub use equator::{assert, assert as Assert, debug_assert, debug_assert as DebugAssert};
1078	pub use reborrow::*;
1079}
1080
1081#[allow(unused_imports)]
1082pub(crate) mod internal_prelude_sp {
1083	pub(crate) use crate::internal_prelude::*;
1084	pub(crate) use crate::sparse::{
1085		FaerError, NONE, Pair, SparseColMat, SparseColMatMut, SparseColMatRef, SparseRowMat, SparseRowMatMut, SparseRowMatRef, SymbolicSparseColMat,
1086		SymbolicSparseColMatRef, SymbolicSparseRowMat, SymbolicSparseRowMatRef, Triplet, csc_numeric, csc_symbolic, csr_numeric, csr_symbolic,
1087		linalg as linalg_sp, try_collect, try_zeroed, windows2,
1088	};
1089	pub(crate) use core::cell::Cell;
1090	pub(crate) use core::iter;
1091	pub(crate) use dyn_stack::MemBuffer;
1092}
1093
1094/// useful imports for general usage of the library
1095pub mod prelude {
1096	pub use reborrow::{IntoConst, Reborrow, ReborrowMut};
1097
1098	pub use super::{Par, Scale, c32, c64, col, mat, row, unzip, zip};
1099	pub use col::{Col, ColMut, ColRef};
1100	pub use mat::{Mat, MatMut, MatRef};
1101	pub use row::{Row, RowMut, RowRef};
1102
1103	#[cfg(feature = "linalg")]
1104	pub use super::linalg::solvers::{DenseSolve, Solve, SolveLstsq};
1105
1106	#[cfg(feature = "sparse")]
1107	pub use super::prelude_sp::*;
1108
1109	/// see [`Default`]
1110	#[inline]
1111	pub fn default<T: Default>() -> T {
1112		Default::default()
1113	}
1114}
1115
1116#[cfg(feature = "sparse")]
1117mod prelude_sp {
1118	use crate::sparse;
1119
1120	pub use sparse::{SparseColMat, SparseColMatMut, SparseColMatRef, SparseRowMat, SparseRowMatMut, SparseRowMatRef};
1121}
1122
1123/// scaling factor for multiplying matrices.
1124#[derive(Copy, Clone, Debug)]
1125#[repr(transparent)]
1126pub struct Scale<T>(pub T);
1127impl<T> Scale<T> {
1128	/// create a reference to a scaling factor from a reference to a value.
1129	#[inline(always)]
1130	pub fn from_ref(value: &T) -> &Self {
1131		unsafe { &*(value as *const T as *const Self) }
1132	}
1133
1134	/// create a mutable reference to a scaling factor from a mutable reference to a value.
1135	#[inline(always)]
1136	pub fn from_mut(value: &mut T) -> &mut Self {
1137		unsafe { &mut *(value as *mut T as *mut Self) }
1138	}
1139}
1140
1141/// 0: disabled
1142/// 1: `Seq`
1143/// n >= 2: `Rayon(n - 2)`
1144///
1145/// default: `Rayon(0)`
1146static GLOBAL_PARALLELISM: AtomicUsize = {
1147	#[cfg(all(not(miri), feature = "rayon"))]
1148	{
1149		AtomicUsize::new(2)
1150	}
1151	#[cfg(not(all(not(miri), feature = "rayon")))]
1152	{
1153		AtomicUsize::new(1)
1154	}
1155};
1156
1157/// causes functions that access global parallelism settings to panic.
1158pub fn disable_global_parallelism() {
1159	GLOBAL_PARALLELISM.store(0, core::sync::atomic::Ordering::Relaxed);
1160}
1161
1162/// sets the global parallelism settings.
1163pub fn set_global_parallelism(par: Par) {
1164	let value = match par {
1165		Par::Seq => 1,
1166		#[cfg(feature = "rayon")]
1167		Par::Rayon(n) => n.get().saturating_add(2),
1168	};
1169	GLOBAL_PARALLELISM.store(value, core::sync::atomic::Ordering::Relaxed);
1170}
1171
1172/// gets the global parallelism settings.
1173///
1174/// # panics
1175/// panics if global parallelism is disabled.
1176#[track_caller]
1177pub fn get_global_parallelism() -> Par {
1178	let value = GLOBAL_PARALLELISM.load(core::sync::atomic::Ordering::Relaxed);
1179	match value {
1180		0 => panic!("Global parallelism is disabled."),
1181		1 => Par::Seq,
1182		#[cfg(feature = "rayon")]
1183		n => Par::rayon(n - 2),
1184		#[cfg(not(feature = "rayon"))]
1185		_ => unreachable!(),
1186	}
1187}
1188
1189#[doc(hidden)]
1190pub mod hacks;
1191
1192/// statistics and randomness functionality
1193pub mod stats;
1194
1195mod non_exhaustive {
1196	#[doc(hidden)]
1197	#[derive(Debug, Copy, Clone, PartialEq, Eq)]
1198	#[repr(transparent)]
1199	pub struct NonExhaustive(pub(crate) ());
1200}
1201pub(crate) use non_exhaustive::NonExhaustive;
1202
1203/// like `Default`, but with an extra type parameter so that algorithm hyperparameters can be tuned
1204/// per scalar type.
1205pub trait Auto<T> {
1206	/// returns the default value for the type `T`
1207	fn auto() -> Self;
1208}
1209
1210/// implements [`Default`] based on `Config`'s [`Auto`] implementation for the type `T`.
1211pub struct Spec<Config, T> {
1212	/// wrapped config value
1213	pub config: Config,
1214	__marker: core::marker::PhantomData<fn() -> T>,
1215}
1216
1217impl<Config, T> core::ops::Deref for Spec<Config, T> {
1218	type Target = Config;
1219
1220	#[inline]
1221	fn deref(&self) -> &Self::Target {
1222		&self.config
1223	}
1224}
1225
1226impl<Config, T> core::ops::DerefMut for Spec<Config, T> {
1227	#[inline]
1228	fn deref_mut(&mut self) -> &mut Self::Target {
1229		&mut self.config
1230	}
1231}
1232
1233impl<Config: Copy, T> Copy for Spec<Config, T> {}
1234impl<Config: Clone, T> Clone for Spec<Config, T> {
1235	#[inline]
1236	fn clone(&self) -> Self {
1237		Self::new(self.config.clone())
1238	}
1239}
1240impl<Config: core::fmt::Debug, T> core::fmt::Debug for Spec<Config, T> {
1241	#[inline]
1242	fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
1243		self.config.fmt(f)
1244	}
1245}
1246
1247impl<Config, T> Spec<Config, T> {
1248	/// wraps the given config value
1249	#[inline]
1250	pub fn new(config: Config) -> Self {
1251		Spec {
1252			config,
1253			__marker: core::marker::PhantomData,
1254		}
1255	}
1256}
1257
1258impl<T, Config> From<Config> for Spec<Config, T> {
1259	#[inline]
1260	fn from(config: Config) -> Self {
1261		Spec {
1262			config,
1263			__marker: core::marker::PhantomData,
1264		}
1265	}
1266}
1267
1268impl<T, Config: Auto<T>> Default for Spec<Config, T> {
1269	#[inline]
1270	fn default() -> Self {
1271		Spec {
1272			config: Auto::<T>::auto(),
1273			__marker: core::marker::PhantomData,
1274		}
1275	}
1276}
1277
1278mod into_range {
1279	use super::*;
1280
1281	pub trait IntoRange<I> {
1282		type Len<N: Shape>: Shape;
1283
1284		fn into_range(self, min: I, max: I) -> core::ops::Range<I>;
1285	}
1286
1287	impl<I> IntoRange<I> for core::ops::Range<I> {
1288		type Len<N: Shape> = usize;
1289
1290		#[inline]
1291		fn into_range(self, _: I, _: I) -> core::ops::Range<I> {
1292			self
1293		}
1294	}
1295	impl<I> IntoRange<I> for core::ops::RangeFrom<I> {
1296		type Len<N: Shape> = usize;
1297
1298		#[inline]
1299		fn into_range(self, _: I, max: I) -> core::ops::Range<I> {
1300			self.start..max
1301		}
1302	}
1303	impl<I> IntoRange<I> for core::ops::RangeTo<I> {
1304		type Len<N: Shape> = usize;
1305
1306		#[inline]
1307		fn into_range(self, min: I, _: I) -> core::ops::Range<I> {
1308			min..self.end
1309		}
1310	}
1311	impl<I> IntoRange<I> for core::ops::RangeFull {
1312		type Len<N: Shape> = N;
1313
1314		#[inline]
1315		fn into_range(self, min: I, max: I) -> core::ops::Range<I> {
1316			min..max
1317		}
1318	}
1319}
1320
1321mod sort;
1322
1323pub extern crate dyn_stack;
1324pub extern crate faer_traits as traits;
1325pub extern crate num_complex as complex;
1326pub extern crate reborrow;
1327
1328#[cfg(feature = "rand")]
1329#[cfg_attr(docs_rs, doc(cfg(feature = "rand")))]
1330pub extern crate rand;