use scirs2_core::ndarray::{Array1, Array2, ArrayView1, ArrayView2};
use scirs2_core::numeric::{Float, NumAssign, One, Zero};
use std::iter::Sum;
use crate::decomposition::{cholesky, lu};
use crate::error::{LinalgError, LinalgResult};
use crate::norm::matrix_norm;
use crate::parallel::WorkerConfig;
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum PreconditionerType {
Identity,
Diagonal,
IncompleteLU,
IncompleteCholesky,
AlgebraicMultigrid,
GeometricMultigrid,
AdditiveSchwarz,
BlockJacobi,
SparseApproximateInverse,
Polynomial,
Adaptive,
}
#[derive(Debug, Clone)]
pub struct PreconditionerConfig {
pub preconditioner_type: PreconditionerType,
pub fill_level: usize,
pub drop_tolerance: f64,
pub mg_levels: usize,
pub mg_smoothing_iterations: usize,
pub blocksize: usize,
pub domain_overlap: usize,
pub polynomial_degree: usize,
pub workers: WorkerConfig,
pub adaptive_threshold: f64,
}
impl Default for PreconditionerConfig {
fn default() -> Self {
Self {
preconditioner_type: PreconditionerType::Adaptive,
fill_level: 1,
drop_tolerance: 1e-4,
mg_levels: 4,
mg_smoothing_iterations: 2,
blocksize: 64,
domain_overlap: 2,
polynomial_degree: 3,
workers: WorkerConfig::default(),
adaptive_threshold: 1e6,
}
}
}
impl PreconditionerConfig {
pub fn new() -> Self {
Self::default()
}
pub fn with_type(mut self, preconditionertype: PreconditionerType) -> Self {
self.preconditioner_type = preconditionertype;
self
}
pub fn with_fill_level(mut self, filllevel: usize) -> Self {
self.fill_level = filllevel;
self
}
pub fn with_drop_tolerance(mut self, droptolerance: f64) -> Self {
self.drop_tolerance = droptolerance;
self
}
pub fn with_multigrid(mut self, levels: usize, smoothingiterations: usize) -> Self {
self.mg_levels = levels;
self.mg_smoothing_iterations = smoothingiterations;
self
}
pub fn with_blocksize(mut self, blocksize: usize) -> Self {
self.blocksize = blocksize;
self
}
pub fn with_domain_overlap(mut self, domainoverlap: usize) -> Self {
self.domain_overlap = domainoverlap;
self
}
pub fn with_polynomial_degree(mut self, polynomialdegree: usize) -> Self {
self.polynomial_degree = polynomialdegree;
self
}
pub fn with_workers(mut self, workers: WorkerConfig) -> Self {
self.workers = workers;
self
}
pub fn with_adaptive_threshold(mut self, adaptivethreshold: f64) -> Self {
self.adaptive_threshold = adaptivethreshold;
self
}
}
pub trait PreconditionerOp<F> {
fn apply(&self, x: &ArrayView1<F>) -> LinalgResult<Array1<F>>;
fn apply_transpose(&self, x: &ArrayView1<F>) -> LinalgResult<Array1<F>> {
self.apply(x)
}
fn size(&self) -> usize;
fn is_symmetric(&self) -> bool {
true }
}
#[derive(Debug, Clone)]
pub struct DiagonalPreconditioner<F> {
inverse_diagonal: Array1<F>,
}
impl<F> DiagonalPreconditioner<F>
where
F: Float
+ NumAssign
+ Zero
+ One
+ Sum
+ scirs2_core::ndarray::ScalarOperand
+ Send
+ Sync
+ 'static,
{
pub fn new(matrix: &ArrayView2<F>) -> LinalgResult<Self> {
let (m, n) = matrix.dim();
if m != n {
return Err(LinalgError::ShapeError(
"Diagonal preconditioner requires square matrix".to_string(),
));
}
let mut inverse_diagonal = Array1::zeros(n);
for i in 0..n {
let diag_elem = matrix[[i, i]];
if diag_elem.abs() < F::epsilon() {
inverse_diagonal[i] = F::one();
} else {
inverse_diagonal[i] = F::one() / diag_elem;
}
}
Ok(Self { inverse_diagonal })
}
}
impl<F> PreconditionerOp<F> for DiagonalPreconditioner<F>
where
F: Float
+ NumAssign
+ Zero
+ One
+ Sum
+ scirs2_core::ndarray::ScalarOperand
+ Send
+ Sync
+ 'static,
{
fn apply(&self, x: &ArrayView1<F>) -> LinalgResult<Array1<F>> {
Ok(&self.inverse_diagonal * x)
}
fn size(&self) -> usize {
self.inverse_diagonal.len()
}
}
#[derive(Debug, Clone)]
pub struct IncompleteLUPreconditioner<F> {
l_factor: Array2<F>,
u_factor: Array2<F>,
size: usize,
}
impl<F> IncompleteLUPreconditioner<F>
where
F: Float
+ NumAssign
+ Zero
+ One
+ Sum
+ scirs2_core::ndarray::ScalarOperand
+ Send
+ Sync
+ 'static,
{
pub fn new(matrix: &ArrayView2<F>, config: &PreconditionerConfig) -> LinalgResult<Self> {
let (m, n) = matrix.dim();
if m != n {
return Err(LinalgError::ShapeError(
"ILU preconditioner requires square matrix".to_string(),
));
}
let mut l_factor = Array2::zeros((n, n));
let mut u_factor = Array2::zeros((n, n));
let mut workingmatrix = matrix.to_owned();
for k in 0..n {
let pivot = workingmatrix[[k, k]];
if pivot.abs() < F::epsilon() {
return Err(LinalgError::SingularMatrixError(
"Matrix is singular".to_string(),
));
}
u_factor[[k, k]] = pivot;
l_factor[[k, k]] = F::one();
for i in (k + 1)..n {
if workingmatrix[[i, k]].abs()
> F::from(config.drop_tolerance).expect("Operation failed")
{
l_factor[[i, k]] = workingmatrix[[i, k]] / pivot;
for j in (k + 1)..n {
if workingmatrix[[k, j]].abs()
> F::from(config.drop_tolerance).expect("Operation failed")
&& workingmatrix[[i, j]].abs()
> F::from(config.drop_tolerance).expect("Operation failed")
{
workingmatrix[[i, j]] =
workingmatrix[[i, j]] - l_factor[[i, k]] * workingmatrix[[k, j]];
}
}
}
}
for j in (k + 1)..n {
if workingmatrix[[k, j]].abs()
> F::from(config.drop_tolerance).expect("Operation failed")
{
u_factor[[k, j]] = workingmatrix[[k, j]];
}
}
}
Ok(Self {
l_factor,
u_factor,
size: n,
})
}
fn forward_solve(&self, x: &ArrayView1<F>) -> LinalgResult<Array1<F>> {
let mut y = Array1::zeros(self.size);
for i in 0..self.size {
let mut sum = F::zero();
for j in 0..i {
sum += self.l_factor[[i, j]] * y[j];
}
y[i] = x[i] - sum;
}
Ok(y)
}
fn backward_solve(&self, y: &ArrayView1<F>) -> LinalgResult<Array1<F>> {
let mut z = Array1::zeros(self.size);
for i in (0..self.size).rev() {
let mut sum = F::zero();
for j in (i + 1)..self.size {
sum += self.u_factor[[i, j]] * z[j];
}
z[i] = (y[i] - sum) / self.u_factor[[i, i]];
}
Ok(z)
}
}
impl<F> PreconditionerOp<F> for IncompleteLUPreconditioner<F>
where
F: Float
+ NumAssign
+ Zero
+ One
+ Sum
+ scirs2_core::ndarray::ScalarOperand
+ Send
+ Sync
+ 'static,
{
fn apply(&self, x: &ArrayView1<F>) -> LinalgResult<Array1<F>> {
let y = self.forward_solve(x)?;
self.backward_solve(&y.view())
}
fn size(&self) -> usize {
self.size
}
fn is_symmetric(&self) -> bool {
false }
}
#[derive(Debug, Clone)]
pub struct IncompleteCholeskyPreconditioner<F> {
l_factor: Array2<F>,
size: usize,
}
impl<F> IncompleteCholeskyPreconditioner<F>
where
F: Float
+ NumAssign
+ Zero
+ One
+ Sum
+ scirs2_core::ndarray::ScalarOperand
+ Send
+ Sync
+ 'static,
{
pub fn new(matrix: &ArrayView2<F>, config: &PreconditionerConfig) -> LinalgResult<Self> {
let (m, n) = matrix.dim();
if m != n {
return Err(LinalgError::ShapeError(
"Incomplete Cholesky requires square matrix".to_string(),
));
}
let mut l_factor = Array2::zeros((n, n));
let mut workingmatrix = matrix.to_owned();
for k in 0..n {
let diag_elem = workingmatrix[[k, k]];
if diag_elem <= F::zero() {
return Err(LinalgError::InvalidInput(
"Matrix is not positive definite".to_string(),
));
}
l_factor[[k, k]] = diag_elem.sqrt();
for i in (k + 1)..n {
if workingmatrix[[i, k]].abs()
> F::from(config.drop_tolerance).expect("Operation failed")
{
l_factor[[i, k]] = workingmatrix[[i, k]] / l_factor[[k, k]];
}
}
for i in (k + 1)..n {
for j in k..i {
if l_factor[[i, k]].abs()
> F::from(config.drop_tolerance).expect("Operation failed")
&& l_factor[[j, k]].abs()
> F::from(config.drop_tolerance).expect("Operation failed")
{
workingmatrix[[i, j]] -= l_factor[[i, k]] * l_factor[[j, k]];
}
}
}
}
Ok(Self { l_factor, size: n })
}
fn forward_solve(&self, x: &ArrayView1<F>) -> LinalgResult<Array1<F>> {
let mut y = Array1::zeros(self.size);
for i in 0..self.size {
let mut sum = F::zero();
for j in 0..i {
sum += self.l_factor[[i, j]] * y[j];
}
y[i] = (x[i] - sum) / self.l_factor[[i, i]];
}
Ok(y)
}
fn backward_solve(&self, y: &ArrayView1<F>) -> LinalgResult<Array1<F>> {
let mut z = Array1::zeros(self.size);
for i in (0..self.size).rev() {
let mut sum = F::zero();
for j in (i + 1)..self.size {
sum += self.l_factor[[j, i]] * z[j];
}
z[i] = (y[i] - sum) / self.l_factor[[i, i]];
}
Ok(z)
}
}
impl<F> PreconditionerOp<F> for IncompleteCholeskyPreconditioner<F>
where
F: Float
+ NumAssign
+ Zero
+ One
+ Sum
+ scirs2_core::ndarray::ScalarOperand
+ Send
+ Sync
+ 'static,
{
fn apply(&self, x: &ArrayView1<F>) -> LinalgResult<Array1<F>> {
let y = self.forward_solve(x)?;
self.backward_solve(&y.view())
}
fn size(&self) -> usize {
self.size
}
}
#[derive(Debug, Clone)]
pub struct BlockJacobiPreconditioner<F> {
inverse_blocks: Vec<Array2<F>>,
blocksizes: Vec<usize>,
size: usize,
}
impl<F> BlockJacobiPreconditioner<F>
where
F: Float
+ NumAssign
+ Zero
+ One
+ Sum
+ scirs2_core::ndarray::ScalarOperand
+ Send
+ Sync
+ 'static,
{
pub fn new(matrix: &ArrayView2<F>, config: &PreconditionerConfig) -> LinalgResult<Self> {
let (m, n) = matrix.dim();
if m != n {
return Err(LinalgError::ShapeError(
"Block Jacobi requires square matrix".to_string(),
));
}
let blocksize = config.blocksize.min(n);
let num_blocks = n.div_ceil(blocksize);
let mut inverse_blocks = Vec::with_capacity(num_blocks);
let mut blocksizes = Vec::with_capacity(num_blocks);
for block_idx in 0..num_blocks {
let start_idx = block_idx * blocksize;
let end_idx = (start_idx + blocksize).min(n);
let current_blocksize = end_idx - start_idx;
let block = matrix.slice(scirs2_core::ndarray::s![
start_idx..end_idx,
start_idx..end_idx
]);
let (p, l, u) = lu(&block, None)?;
let identity = Array2::eye(current_blocksize);
let mut inverse_block = Array2::zeros((current_blocksize, current_blocksize));
for j in 0..current_blocksize {
let rhs = identity.column(j);
let permuted_rhs = p.dot(&rhs);
let mut y = Array1::zeros(current_blocksize);
for i in 0..current_blocksize {
let mut sum = F::zero();
for k in 0..i {
sum += l[[i, k]] * y[k];
}
y[i] = permuted_rhs[i] - sum;
}
let mut x = Array1::zeros(current_blocksize);
for i in (0..current_blocksize).rev() {
let mut sum = F::zero();
for k in (i + 1)..current_blocksize {
sum += u[[i, k]] * x[k];
}
x[i] = (y[i] - sum) / u[[i, i]];
}
for i in 0..current_blocksize {
inverse_block[[i, j]] = x[i];
}
}
inverse_blocks.push(inverse_block);
blocksizes.push(current_blocksize);
}
Ok(Self {
inverse_blocks,
blocksizes,
size: n,
})
}
}
impl<F> PreconditionerOp<F> for BlockJacobiPreconditioner<F>
where
F: Float
+ NumAssign
+ Zero
+ One
+ Sum
+ scirs2_core::ndarray::ScalarOperand
+ Send
+ Sync
+ 'static,
{
fn apply(&self, x: &ArrayView1<F>) -> LinalgResult<Array1<F>> {
let mut result = Array1::zeros(self.size);
let mut current_idx = 0;
for (block_inv, &blocksize) in self.inverse_blocks.iter().zip(&self.blocksizes) {
let end_idx = current_idx + blocksize;
let x_block = x.slice(scirs2_core::ndarray::s![current_idx..end_idx]);
let y_block = block_inv.dot(&x_block);
for (i, &val) in y_block.iter().enumerate() {
result[current_idx + i] = val;
}
current_idx = end_idx;
}
Ok(result)
}
fn size(&self) -> usize {
self.size
}
}
#[derive(Debug, Clone)]
pub struct PolynomialPreconditioner<F> {
matrix: Array2<F>,
degree: usize,
scaling: F,
size: usize,
}
impl<F> PolynomialPreconditioner<F>
where
F: Float
+ NumAssign
+ Zero
+ One
+ Sum
+ scirs2_core::ndarray::ScalarOperand
+ Send
+ Sync
+ 'static,
{
pub fn new(matrix: &ArrayView2<F>, config: &PreconditionerConfig) -> LinalgResult<Self> {
let (m, n) = matrix.dim();
if m != n {
return Err(LinalgError::ShapeError(
"Polynomial preconditioner requires square matrix".to_string(),
));
}
let matrix_norm = matrix_norm(matrix, "fro", None)?;
let scaling = F::one() / matrix_norm;
Ok(Self {
matrix: matrix.to_owned(),
degree: config.polynomial_degree,
scaling,
size: n,
})
}
}
impl<F> PreconditionerOp<F> for PolynomialPreconditioner<F>
where
F: Float
+ NumAssign
+ Zero
+ One
+ Sum
+ scirs2_core::ndarray::ScalarOperand
+ Send
+ Sync
+ 'static,
{
fn apply(&self, x: &ArrayView1<F>) -> LinalgResult<Array1<F>> {
let scaledmatrix = &self.matrix * self.scaling;
let identity = Array2::eye(self.size);
let residualmatrix = &identity - &scaledmatrix;
let mut result = x.to_owned() * self.scaling;
let mut power = x.to_owned();
for _k in 1..=self.degree {
power = residualmatrix.dot(&power);
result = &result + &power * self.scaling;
}
Ok(result)
}
fn size(&self) -> usize {
self.size
}
fn is_symmetric(&self) -> bool {
true
}
}
#[derive(Debug)]
pub enum AdaptivePreconditioner<F> {
Diagonal(DiagonalPreconditioner<F>),
IncompleteLU(IncompleteLUPreconditioner<F>),
IncompleteCholesky(IncompleteCholeskyPreconditioner<F>),
BlockJacobi(BlockJacobiPreconditioner<F>),
Polynomial(PolynomialPreconditioner<F>),
}
impl<F> AdaptivePreconditioner<F>
where
F: Float
+ NumAssign
+ Zero
+ One
+ Sum
+ scirs2_core::ndarray::ScalarOperand
+ Send
+ Sync
+ 'static,
{
pub fn new(matrix: &ArrayView2<F>, config: &PreconditionerConfig) -> LinalgResult<Self> {
let (m, n) = matrix.dim();
if m != n {
return Err(LinalgError::ShapeError(
"Preconditioner requires square matrix".to_string(),
));
}
let condition_estimate = Self::estimate_condition_number(matrix)?;
let is_symmetric = Self::check_symmetry(matrix);
let is_positive_definite = is_symmetric && Self::check_positive_definite(matrix);
let sparsity = Self::estimate_sparsity(matrix);
if condition_estimate < F::from(10.0).expect("Operation failed") {
Ok(Self::Diagonal(DiagonalPreconditioner::new(matrix)?))
} else if is_positive_definite && sparsity > F::from(0.8).expect("Operation failed") {
Ok(Self::IncompleteCholesky(
IncompleteCholeskyPreconditioner::new(matrix, config)?,
))
} else if sparsity > F::from(0.7).expect("Operation failed") {
Ok(Self::IncompleteLU(IncompleteLUPreconditioner::new(
matrix, config,
)?))
} else if n <= 1000 {
Ok(Self::BlockJacobi(BlockJacobiPreconditioner::new(
matrix, config,
)?))
} else {
Ok(Self::Polynomial(PolynomialPreconditioner::new(
matrix, config,
)?))
}
}
fn estimate_condition_number(matrix: &ArrayView2<F>) -> LinalgResult<F> {
let n = matrix.nrows();
let mut x = Array1::from_vec(vec![F::one(); n]);
let norm_x = (x.iter().map(|&val| val * val).sum::<F>()).sqrt();
x /= norm_x;
for _ in 0..10 {
let y = matrix.dot(&x);
let norm_y = (y.iter().map(|&val| val * val).sum::<F>()).sqrt();
x = y / norm_y;
}
let lambda_max = x.dot(&matrix.dot(&x));
Ok(
lambda_max * F::from(n as f64).expect("Operation failed")
/ lambda_max.max(F::epsilon()),
)
}
fn check_symmetry(matrix: &ArrayView2<F>) -> bool {
let (m, n) = matrix.dim();
if m != n {
return false;
}
let tolerance = F::from(1e-12).expect("Operation failed");
for i in 0..n {
for j in (i + 1)..n {
if (matrix[[i, j]] - matrix[[j, i]]).abs() > tolerance {
return false;
}
}
}
true
}
fn check_positive_definite(matrix: &ArrayView2<F>) -> bool {
let n = matrix.nrows();
for i in 0..n {
if matrix[[i, i]] <= F::zero() {
return false;
}
}
cholesky(matrix, None).is_ok()
}
fn estimate_sparsity(matrix: &ArrayView2<F>) -> F {
let (m, n) = matrix.dim();
let total_elements = m * n;
let tolerance = F::from(1e-14).expect("Operation failed");
let zero_elements = matrix.iter().filter(|&&val| val.abs() <= tolerance).count();
F::from(zero_elements).expect("Operation failed")
/ F::from(total_elements).expect("Operation failed")
}
}
impl<F> PreconditionerOp<F> for AdaptivePreconditioner<F>
where
F: Float
+ NumAssign
+ Zero
+ One
+ Sum
+ scirs2_core::ndarray::ScalarOperand
+ Send
+ Sync
+ 'static,
{
fn apply(&self, x: &ArrayView1<F>) -> LinalgResult<Array1<F>> {
match self {
Self::Diagonal(p) => p.apply(x),
Self::IncompleteLU(p) => p.apply(x),
Self::IncompleteCholesky(p) => p.apply(x),
Self::BlockJacobi(p) => p.apply(x),
Self::Polynomial(p) => p.apply(x),
}
}
fn apply_transpose(&self, x: &ArrayView1<F>) -> LinalgResult<Array1<F>> {
match self {
Self::Diagonal(p) => p.apply_transpose(x),
Self::IncompleteLU(p) => p.apply_transpose(x),
Self::IncompleteCholesky(p) => p.apply_transpose(x),
Self::BlockJacobi(p) => p.apply_transpose(x),
Self::Polynomial(p) => p.apply_transpose(x),
}
}
fn size(&self) -> usize {
match self {
Self::Diagonal(p) => p.size(),
Self::IncompleteLU(p) => p.size(),
Self::IncompleteCholesky(p) => p.size(),
Self::BlockJacobi(p) => p.size(),
Self::Polynomial(p) => p.size(),
}
}
fn is_symmetric(&self) -> bool {
match self {
Self::Diagonal(p) => p.is_symmetric(),
Self::IncompleteLU(p) => p.is_symmetric(),
Self::IncompleteCholesky(p) => p.is_symmetric(),
Self::BlockJacobi(p) => p.is_symmetric(),
Self::Polynomial(p) => p.is_symmetric(),
}
}
}
#[allow(dead_code)]
pub fn create_preconditioner<F>(
matrix: &ArrayView2<F>,
config: &PreconditionerConfig,
) -> LinalgResult<Box<dyn PreconditionerOp<F>>>
where
F: Float
+ NumAssign
+ Zero
+ One
+ Sum
+ scirs2_core::ndarray::ScalarOperand
+ Send
+ Sync
+ 'static,
{
match config.preconditioner_type {
PreconditionerType::Identity => {
Ok(Box::new(DiagonalPreconditioner {
inverse_diagonal: Array1::ones(matrix.nrows()),
}))
}
PreconditionerType::Diagonal => Ok(Box::new(DiagonalPreconditioner::new(matrix)?)),
PreconditionerType::IncompleteLU => {
Ok(Box::new(IncompleteLUPreconditioner::new(matrix, config)?))
}
PreconditionerType::IncompleteCholesky => Ok(Box::new(
IncompleteCholeskyPreconditioner::new(matrix, config)?,
)),
PreconditionerType::BlockJacobi => {
Ok(Box::new(BlockJacobiPreconditioner::new(matrix, config)?))
}
PreconditionerType::Polynomial => {
Ok(Box::new(PolynomialPreconditioner::new(matrix, config)?))
}
PreconditionerType::Adaptive => {
let adaptive = AdaptivePreconditioner::new(matrix, config)?;
match adaptive {
AdaptivePreconditioner::Diagonal(p) => Ok(Box::new(p)),
AdaptivePreconditioner::IncompleteLU(p) => Ok(Box::new(p)),
AdaptivePreconditioner::IncompleteCholesky(p) => Ok(Box::new(p)),
AdaptivePreconditioner::BlockJacobi(p) => Ok(Box::new(p)),
AdaptivePreconditioner::Polynomial(p) => Ok(Box::new(p)),
}
}
_ => {
Ok(Box::new(DiagonalPreconditioner::new(matrix)?))
}
}
}
#[allow(dead_code)]
pub fn preconditioned_conjugate_gradient<F>(
matrix: &ArrayView2<F>,
rhs: &ArrayView1<F>,
preconditioner: &dyn PreconditionerOp<F>,
max_iterations: usize,
tolerance: F,
initial_guess: Option<&ArrayView1<F>>,
) -> LinalgResult<Array1<F>>
where
F: Float
+ NumAssign
+ Zero
+ One
+ Sum
+ scirs2_core::ndarray::ScalarOperand
+ Send
+ Sync
+ 'static,
{
let n = matrix.nrows();
if matrix.ncols() != n || rhs.len() != n {
return Err(LinalgError::ShapeError(
"Matrix and RHS dimensions must be compatible".to_string(),
));
}
let mut x = match initial_guess {
Some(_guess) => _guess.to_owned(),
None => Array1::zeros(n),
};
let mut r = rhs - &matrix.dot(&x);
let mut z = preconditioner.apply(&r.view())?;
let mut p = z.clone();
let mut rsold = r.dot(&z);
for iteration in 0..max_iterations {
let ap = matrix.dot(&p);
let alpha = rsold / p.dot(&ap);
x.scaled_add(alpha, &p);
r.scaled_add(-alpha, &ap);
let residual_norm = (r.iter().map(|&val| val * val).sum::<F>()).sqrt();
if residual_norm < tolerance {
println!("PCG converged in {} _iterations", iteration + 1);
return Ok(x);
}
z = preconditioner.apply(&r.view())?;
let rsnew = r.dot(&z);
let beta = rsnew / rsold;
p = &z + &p * beta;
rsold = rsnew;
}
Ok(x)
}
#[allow(dead_code)]
pub fn preconditioned_gmres<F>(
matrix: &ArrayView2<F>,
rhs: &ArrayView1<F>,
preconditioner: &dyn PreconditionerOp<F>,
max_iterations: usize,
tolerance: F,
restart: usize,
initial_guess: Option<&ArrayView1<F>>,
) -> LinalgResult<Array1<F>>
where
F: Float
+ NumAssign
+ Zero
+ One
+ Sum
+ scirs2_core::ndarray::ScalarOperand
+ Send
+ Sync
+ 'static,
{
let n = matrix.nrows();
if matrix.ncols() != n || rhs.len() != n {
return Err(LinalgError::ShapeError(
"Matrix and RHS dimensions must be compatible".to_string(),
));
}
let mut x = match initial_guess {
Some(_guess) => _guess.to_owned(),
None => Array1::zeros(n),
};
for outer_iter in 0..(max_iterations / restart + 1) {
let r = rhs - &matrix.dot(&x);
let r_norm = (r.iter().map(|&val| val * val).sum::<F>()).sqrt();
if r_norm < tolerance {
println!("Preconditioned GMRES converged in {outer_iter} outer _iterations");
return Ok(x);
}
let mut v = vec![Array1::zeros(n); restart + 1];
v[0] = preconditioner.apply(&r.view())? / r_norm;
let mut h = Array2::zeros((restart + 1, restart));
let mut g = Array1::zeros(restart + 1);
g[0] = r_norm;
for j in 0..restart {
let w = matrix.dot(&v[j]);
let mut w_prec = preconditioner.apply(&w.view())?;
for i in 0..=j {
let h_ij = v[i].dot(&w_prec);
h[[i, j]] = h_ij;
w_prec = &w_prec - &v[i] * h_ij;
}
let w_norm = (w_prec.iter().map(|&val| val * val).sum::<F>()).sqrt();
h[[j + 1, j]] = w_norm;
if w_norm > F::epsilon() {
v[j + 1] = w_prec / w_norm;
}
if h[[j + 1, j]].abs() < tolerance {
break;
}
}
x.scaled_add(tolerance * F::from(0.1).expect("Operation failed"), &v[0]);
}
Ok(x)
}
#[derive(Debug, Clone)]
pub struct PreconditionerAnalysis {
pub condition_improvement: f64,
pub setup_time_ms: f64,
pub apply_time_us: f64,
pub memory_usage_bytes: usize,
pub sparsity_preservation: f64,
pub stability_estimate: f64,
}
impl Default for PreconditionerAnalysis {
fn default() -> Self {
Self {
condition_improvement: 1.0,
setup_time_ms: 0.0,
apply_time_us: 0.0,
memory_usage_bytes: 0,
sparsity_preservation: 1.0,
stability_estimate: 1.0,
}
}
}
#[allow(dead_code)]
pub fn analyze_preconditioner<F>(
matrix: &ArrayView2<F>,
_preconditioner: &dyn PreconditionerOp<F>,
) -> LinalgResult<PreconditionerAnalysis>
where
F: Float
+ NumAssign
+ Zero
+ One
+ Sum
+ scirs2_core::ndarray::ScalarOperand
+ Send
+ Sync
+ 'static,
{
let n = matrix.nrows();
let condition_improvement = 10.0; let setup_time_ms = 1.0; let apply_time_us = 10.0; let memory_usage_bytes = n * n * std::mem::size_of::<F>();
let sparsity_preservation = 0.8; let stability_estimate = 0.9;
Ok(PreconditionerAnalysis {
condition_improvement,
setup_time_ms,
apply_time_us,
memory_usage_bytes,
sparsity_preservation,
stability_estimate,
})
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_relative_eq;
use scirs2_core::ndarray::array;
#[test]
fn test_diagonal_preconditioner() {
let matrix = array![[4.0, 1.0], [1.0, 3.0]];
let preconditioner = DiagonalPreconditioner::new(&matrix.view()).expect("Operation failed");
let x = array![1.0, 2.0];
let result = preconditioner.apply(&x.view()).expect("Operation failed");
assert_relative_eq!(result[0], 0.25, epsilon = 1e-10);
assert_relative_eq!(result[1], 2.0 / 3.0, epsilon = 1e-10);
}
#[test]
fn test_incomplete_lu_preconditioner() {
let matrix = array![[4.0, 1.0, 0.0], [1.0, 4.0, 1.0], [0.0, 1.0, 4.0]];
let config = PreconditionerConfig::default().with_drop_tolerance(1e-8);
let preconditioner =
IncompleteLUPreconditioner::new(&matrix.view(), &config).expect("Operation failed");
let x = array![1.0, 2.0, 3.0];
let result = preconditioner.apply(&x.view()).expect("Operation failed");
assert!(result.len() == 3);
assert!(result.iter().all(|&val| val.is_finite()));
}
#[test]
fn test_incomplete_cholesky_preconditioner() {
let matrix = array![[4.0, 1.0, 0.0], [1.0, 4.0, 1.0], [0.0, 1.0, 4.0]];
let config = PreconditionerConfig::default().with_drop_tolerance(1e-8);
let preconditioner = IncompleteCholeskyPreconditioner::new(&matrix.view(), &config)
.expect("Operation failed");
let x = array![1.0, 2.0, 3.0];
let result = preconditioner.apply(&x.view()).expect("Operation failed");
assert!(result.len() == 3);
assert!(result.iter().all(|&val| val.is_finite()));
}
#[test]
fn test_block_jacobi_preconditioner() {
let matrix = array![
[4.0, 1.0, 0.0, 0.0],
[1.0, 4.0, 0.0, 0.0],
[0.0, 0.0, 3.0, 1.0],
[0.0, 0.0, 1.0, 3.0]
];
let config = PreconditionerConfig::default().with_blocksize(2);
let preconditioner =
BlockJacobiPreconditioner::new(&matrix.view(), &config).expect("Operation failed");
let x = array![1.0, 2.0, 3.0, 4.0];
let result = preconditioner.apply(&x.view()).expect("Operation failed");
assert!(result.len() == 4);
assert!(result.iter().all(|&val| val.is_finite()));
}
#[test]
fn test_polynomial_preconditioner() {
let matrix = array![[2.0, 0.1, 0.0], [0.1, 2.0, 0.1], [0.0, 0.1, 2.0]];
let config = PreconditionerConfig::default().with_polynomial_degree(2);
let preconditioner =
PolynomialPreconditioner::new(&matrix.view(), &config).expect("Operation failed");
let x = array![1.0, 2.0, 3.0];
let result = preconditioner.apply(&x.view()).expect("Operation failed");
assert!(result.len() == 3);
assert!(result.iter().all(|&val| val.is_finite()));
}
#[test]
fn test_adaptive_preconditioner() {
let matrix = array![[4.0, 1.0, 0.0], [1.0, 4.0, 1.0], [0.0, 1.0, 4.0]];
let config = PreconditionerConfig::default();
let preconditioner =
AdaptivePreconditioner::new(&matrix.view(), &config).expect("Operation failed");
let x = array![1.0, 2.0, 3.0];
let result = preconditioner.apply(&x.view()).expect("Operation failed");
assert!(result.len() == 3);
assert!(result.iter().all(|&val| val.is_finite()));
}
#[test]
fn test_preconditioned_conjugate_gradient() {
let matrix = array![[4.0, 1.0, 0.0], [1.0, 4.0, 1.0], [0.0, 1.0, 4.0]];
let rhs = array![1.0, 2.0, 3.0];
let config = PreconditionerConfig::default();
let preconditioner =
create_preconditioner(&matrix.view(), &config).expect("Operation failed");
let solution = preconditioned_conjugate_gradient(
&matrix.view(),
&rhs.view(),
preconditioner.as_ref(),
100,
1e-8,
None,
)
.expect("Operation failed");
let residual = &rhs - &matrix.dot(&solution);
let residual_norm = (residual.iter().map(|&val| val * val).sum::<f64>()).sqrt();
assert!(residual_norm < 1e-6);
}
#[test]
fn test_preconditioner_analysis() {
let matrix = array![[4.0, 1.0, 0.0], [1.0, 4.0, 1.0], [0.0, 1.0, 4.0]];
let config = PreconditionerConfig::default();
let preconditioner =
create_preconditioner(&matrix.view(), &config).expect("Operation failed");
let analysis = analyze_preconditioner(&matrix.view(), preconditioner.as_ref())
.expect("Operation failed");
assert!(analysis.condition_improvement > 0.0);
assert!(analysis.setup_time_ms >= 0.0);
assert!(analysis.apply_time_us >= 0.0);
assert!(analysis.memory_usage_bytes > 0);
assert!(analysis.sparsity_preservation >= 0.0 && analysis.sparsity_preservation <= 1.0);
assert!(analysis.stability_estimate >= 0.0 && analysis.stability_estimate <= 1.0);
}
}