use scirs2_core::ndarray::{Array1, Array2, ArrayView1, ArrayView2};
use scirs2_core::numeric::{Float, NumAssign, One, Zero};
use scirs2_core::random::prelude::*;
use scirs2_core::random::{Distribution, Normal, Uniform};
use std::fmt::Debug;
use std::iter::Sum;
use crate::decomposition::qr;
use crate::error::{LinalgError, LinalgResult};
#[derive(Debug, Clone)]
pub struct SparseSign {
pub rows: usize,
pub cols: usize,
pub sparsity: usize,
row_indices: Vec<usize>,
signs: Vec<f64>,
scale: f64,
}
impl SparseSign {
pub fn new(
rows: usize,
cols: usize,
sparsity: Option<usize>,
seed: Option<u64>,
) -> LinalgResult<Self> {
if rows == 0 || cols == 0 {
return Err(LinalgError::ShapeError(
"SparseSign: rows and cols must be positive".to_string(),
));
}
let s = sparsity.unwrap_or_else(|| {
let log2r = (rows as f64).log2().ceil() as usize;
log2r.max(1).min(rows)
});
if s > rows {
return Err(LinalgError::ShapeError(format!(
"SparseSign: sparsity {s} cannot exceed rows {rows}"
)));
}
let mut rng = {
let s = seed.unwrap_or_else(|| {
let mut tr = scirs2_core::random::rng();
scirs2_core::random::RngExt::random::<u64>(&mut tr)
});
scirs2_core::random::seeded_rng(s)
};
let mut row_indices = vec![0usize; cols * s];
let mut signs = vec![0.0f64; cols * s];
let uniform_sign = Uniform::new(0u8, 2).map_err(|e| {
LinalgError::ComputationError(format!("Failed to create uniform distribution: {e}"))
})?;
for col in 0..cols {
let mut pool: Vec<usize> = (0..rows).collect();
for k in 0..s {
let j = (Uniform::new(k, rows)
.map_err(|e| {
LinalgError::ComputationError(format!("Failed to sample index: {e}"))
})?
.sample(&mut rng)) as usize;
pool.swap(k, j);
let row = pool[k];
row_indices[col * s + k] = row;
let sign_bit = uniform_sign.sample(&mut rng);
signs[col * s + k] = if sign_bit == 0 { 1.0 } else { -1.0 };
}
}
let scale = 1.0 / (s as f64).sqrt();
Ok(Self {
rows,
cols,
sparsity: s,
row_indices,
signs,
scale,
})
}
pub fn apply(&self, x: &ArrayView1<f64>) -> LinalgResult<Array1<f64>> {
if x.len() != self.cols {
return Err(LinalgError::DimensionError(format!(
"SparseSign::apply: x has length {} but cols={}",
x.len(),
self.cols
)));
}
let mut y = Array1::zeros(self.rows);
let s = self.sparsity;
for col in 0..self.cols {
let xval = x[col];
for k in 0..s {
let row = self.row_indices[col * s + k];
let sign = self.signs[col * s + k];
y[row] += self.scale * sign * xval;
}
}
Ok(y)
}
pub fn apply_matrix(&self, a: &ArrayView2<f64>) -> LinalgResult<Array2<f64>> {
let (nrows_a, ncols_a) = a.dim();
if nrows_a != self.cols {
return Err(LinalgError::DimensionError(format!(
"SparseSign::apply_matrix: A has {} rows but cols={}",
nrows_a, self.cols
)));
}
let mut y = Array2::zeros((self.rows, ncols_a));
let s = self.sparsity;
for col in 0..self.cols {
for k in 0..s {
let row = self.row_indices[col * s + k];
let sign = self.signs[col * s + k];
let coeff = self.scale * sign;
for j in 0..ncols_a {
y[[row, j]] += coeff * a[[col, j]];
}
}
}
Ok(y)
}
pub fn apply_transpose(&self, x: &ArrayView1<f64>) -> LinalgResult<Array1<f64>> {
if x.len() != self.rows {
return Err(LinalgError::DimensionError(format!(
"SparseSign::apply_transpose: x has length {} but rows={}",
x.len(),
self.rows
)));
}
let mut y = Array1::zeros(self.cols);
let s = self.sparsity;
for col in 0..self.cols {
let mut val = 0.0f64;
for k in 0..s {
let row = self.row_indices[col * s + k];
let sign = self.signs[col * s + k];
val += sign * x[row];
}
y[col] = self.scale * val;
}
Ok(y)
}
}
#[derive(Debug, Clone)]
pub struct SubsampledRandomizedHadamard {
pub n: usize,
pub k: usize,
diag_signs: Vec<f64>,
sampled_rows: Vec<usize>,
scale: f64,
}
impl SubsampledRandomizedHadamard {
pub fn new(n: usize, k: usize, seed: Option<u64>) -> LinalgResult<Self> {
if n == 0 {
return Err(LinalgError::ShapeError(
"SRHT: n must be positive".to_string(),
));
}
if k == 0 || k > n {
return Err(LinalgError::ShapeError(format!(
"SRHT: k must satisfy 1 ≤ k ≤ n={n}, got k={k}"
)));
}
let n_padded = n.next_power_of_two();
let mut rng = {
let s = seed.unwrap_or_else(|| {
let mut tr = scirs2_core::random::rng();
scirs2_core::random::RngExt::random::<u64>(&mut tr)
});
scirs2_core::random::seeded_rng(s)
};
let uniform_bit = Uniform::new(0u8, 2).map_err(|e| {
LinalgError::ComputationError(format!("Failed to create uniform distribution: {e}"))
})?;
let diag_signs: Vec<f64> = (0..n_padded)
.map(|_| {
if uniform_bit.sample(&mut rng) == 0 {
1.0
} else {
-1.0
}
})
.collect();
let mut pool: Vec<usize> = (0..n_padded).collect();
let k_actual = k.min(n_padded);
for i in 0..k_actual {
let j_range = Uniform::new(i, n_padded).map_err(|e| {
LinalgError::ComputationError(format!("Failed to sample row index: {e}"))
})?;
let j = j_range.sample(&mut rng);
pool.swap(i, j);
}
let sampled_rows: Vec<usize> = pool[..k_actual].to_vec();
let scale = (n_padded as f64 / k_actual as f64).sqrt();
Ok(Self {
n: n_padded,
k: k_actual,
diag_signs,
sampled_rows,
scale,
})
}
pub fn apply(&self, x: &ArrayView1<f64>) -> LinalgResult<Array1<f64>> {
if x.len() > self.n {
return Err(LinalgError::DimensionError(format!(
"SRHT::apply: x has length {} but n={}",
x.len(),
self.n
)));
}
let mut buf = vec![0.0f64; self.n];
for i in 0..x.len() {
buf[i] = self.diag_signs[i] * x[i];
}
Self::fwht(&mut buf);
let scale = self.scale / (self.n as f64).sqrt();
let y: Array1<f64> = self.sampled_rows.iter().map(|&r| scale * buf[r]).collect();
Ok(y)
}
pub fn apply_matrix(&self, a: &ArrayView2<f64>) -> LinalgResult<Array2<f64>> {
let (nrows, ncols) = a.dim();
if nrows > self.n {
return Err(LinalgError::DimensionError(format!(
"SRHT::apply_matrix: A has {} rows but n={}",
nrows, self.n
)));
}
let scale = self.scale / (self.n as f64).sqrt();
let mut result = Array2::zeros((self.k, ncols));
for j in 0..ncols {
let mut buf = vec![0.0f64; self.n];
for i in 0..nrows {
buf[i] = self.diag_signs[i] * a[[i, j]];
}
Self::fwht(&mut buf);
for (ki, &row) in self.sampled_rows.iter().enumerate() {
result[[ki, j]] = scale * buf[row];
}
}
Ok(result)
}
fn fwht(data: &mut [f64]) {
let n = data.len();
let mut h = 1;
while h < n {
let mut i = 0;
while i < n {
for j in i..(i + h) {
let u = data[j];
let v = data[j + h];
data[j] = u + v;
data[j + h] = u - v;
}
i += 2 * h;
}
h *= 2;
}
}
}
#[derive(Debug, Clone)]
pub struct RandomizedPrecondResult {
pub solution: Array1<f64>,
pub relative_residual: f64,
pub iterations: usize,
pub converged: bool,
pub preconditioned_cond_estimate: Option<f64>,
}
#[derive(Debug, Clone)]
pub struct BlendenpikPreconditioner {
pub r_factor: Array2<f64>,
pub n: usize,
pub sketch_dim: usize,
pub cond_estimate: f64,
}
impl BlendenpikPreconditioner {
pub fn new(
a: &ArrayView2<f64>,
oversampling: Option<f64>,
use_srht: bool,
seed: Option<u64>,
) -> LinalgResult<Self> {
let (m, n) = a.dim();
if m < n {
return Err(LinalgError::ShapeError(format!(
"Blendenpik requires m ≥ n, got {}×{}",
m, n
)));
}
let alpha = oversampling.unwrap_or(2.0).max(1.0);
let sketch_dim = ((alpha * n as f64) as usize).max(n + 1).min(m);
let sketch_a = if use_srht {
let srht = SubsampledRandomizedHadamard::new(m, sketch_dim, seed)?;
srht.apply_matrix(a)?
} else {
let sparse = SparseSign::new(sketch_dim, m, None, seed)?;
sparse.apply_matrix(a)?
};
let (_, r_factor) = qr(&sketch_a.view(), None)?;
let r_n = r_factor.nrows().min(n);
let r_square = r_factor
.slice(scirs2_core::ndarray::s![..r_n, ..n])
.to_owned();
let diag_abs: Vec<f64> = (0..r_n).map(|i| r_square[[i, i]].abs()).collect();
let r_max = diag_abs.iter().cloned().fold(0.0_f64, f64::max);
let r_min = diag_abs
.iter()
.cloned()
.fold(f64::INFINITY, f64::min)
.max(1e-300);
let cond_estimate = if r_max > 0.0 { r_max / r_min } else { 1e16 };
Ok(Self {
r_factor: r_square,
n: r_n,
sketch_dim,
cond_estimate,
})
}
pub fn apply_r_inverse(&self, v: &ArrayView1<f64>) -> LinalgResult<Array1<f64>> {
let n = self.n;
if v.len() != n {
return Err(LinalgError::DimensionError(format!(
"BlendenpikPreconditioner::apply_r_inverse: v has {} elements but n={}",
v.len(),
n
)));
}
let mut x = Array1::zeros(n);
for i in (0..n).rev() {
let mut sum = v[i];
for j in (i + 1)..n {
sum -= self.r_factor[[i, j]] * x[j];
}
let diag = self.r_factor[[i, i]];
if diag.abs() < 1e-300 {
return Err(LinalgError::SingularMatrixError(format!(
"Blendenpik R is singular at diagonal index {i}"
)));
}
x[i] = sum / diag;
}
Ok(x)
}
pub fn apply_r(&self, x: &ArrayView1<f64>) -> LinalgResult<Array1<f64>> {
let n = self.n;
if x.len() != n {
return Err(LinalgError::DimensionError(format!(
"BlendenpikPreconditioner::apply_r: x has {} elements but n={}",
x.len(),
n
)));
}
let mut y = Array1::zeros(n);
for i in 0..n {
let mut val = 0.0f64;
for j in i..n {
val += self.r_factor[[i, j]] * x[j];
}
y[i] = val;
}
Ok(y)
}
pub fn solve(
&self,
a: &ArrayView2<f64>,
b: &ArrayView1<f64>,
max_iter: Option<usize>,
tol: Option<f64>,
) -> LinalgResult<RandomizedPrecondResult> {
let (m, n_a) = a.dim();
if m != b.len() {
return Err(LinalgError::DimensionError(format!(
"BlendenpikPreconditioner::solve: A has {} rows but b has {} elements",
m,
b.len()
)));
}
let max_it = max_iter.unwrap_or(200);
let tolerance = tol.unwrap_or(1e-10);
let n = self.n.min(n_a);
let mut atb = Array1::zeros(n);
for i in 0..n {
let mut val = 0.0f64;
for k in 0..m {
val += a[[k, i]] * b[k];
}
atb[i] = val;
}
let mut y = Array1::zeros(n);
let mut g = self.apply_r_transpose_inverse(&atb.view())?;
let mut p = g.clone();
let mut rr = dot_product(&g.view(), &g.view());
let mut converged = false;
let mut iterations = 0;
for iter in 0..max_it {
iterations = iter + 1;
let rp = self.apply_r_inverse(&p.view())?;
let mut arp = Array1::zeros(m);
for i in 0..m {
let mut val = 0.0f64;
for j in 0..n.min(n_a) {
val += a[[i, j]] * rp[j];
}
arp[i] = val;
}
let mut aarp = Array1::zeros(n);
for i in 0..n {
let mut val = 0.0f64;
for k in 0..m {
val += a[[k, i]] * arp[k];
}
aarp[i] = val;
}
let q = self.apply_r_transpose_inverse(&aarp.view())?;
let pq = dot_product(&p.view(), &q.view());
if pq.abs() < 1e-300 {
break;
}
let alpha = rr / pq;
y = y + alpha * &p;
g = g - alpha * &q;
let rr_new = dot_product(&g.view(), &g.view());
let beta = rr_new / rr.max(1e-300);
rr = rr_new;
if rr.sqrt() < tolerance {
converged = true;
break;
}
p = &g + beta * &p;
}
let solution = self.apply_r_inverse(&y.view())?;
let mut ax = Array1::zeros(m);
for i in 0..m {
let mut val = 0.0f64;
for j in 0..n.min(n_a) {
val += a[[i, j]] * solution[j];
}
ax[i] = val;
}
let residual = (&ax - b).mapv(|v: f64| v * v).sum().sqrt();
let b_norm = b.mapv(|v: f64| v * v).sum().sqrt().max(1e-300);
let relative_residual = residual / b_norm;
Ok(RandomizedPrecondResult {
solution,
relative_residual,
iterations,
converged,
preconditioned_cond_estimate: Some(self.cond_estimate),
})
}
fn apply_r_transpose_inverse(&self, v: &ArrayView1<f64>) -> LinalgResult<Array1<f64>> {
let n = self.n;
if v.len() != n {
return Err(LinalgError::DimensionError(format!(
"apply_r_transpose_inverse: v has {} elements but n={}",
v.len(),
n
)));
}
let mut x = Array1::zeros(n);
for i in 0..n {
let mut sum = v[i];
for j in 0..i {
sum -= self.r_factor[[j, i]] * x[j];
}
let diag = self.r_factor[[i, i]];
if diag.abs() < 1e-300 {
return Err(LinalgError::SingularMatrixError(format!(
"Blendenpik R^T is singular at index {i}"
)));
}
x[i] = sum / diag;
}
Ok(x)
}
}
#[derive(Debug, Clone)]
pub struct LSRNPreconditioner {
pub v_factor: Array2<f64>,
pub sigma_inv: Array1<f64>,
pub n: usize,
pub rank: usize,
pub sketch_dim: usize,
}
impl LSRNPreconditioner {
pub fn new(
a: &ArrayView2<f64>,
oversampling: Option<f64>,
rcond: Option<f64>,
seed: Option<u64>,
) -> LinalgResult<Self> {
let (m, n) = a.dim();
if m < n {
return Err(LinalgError::ShapeError(format!(
"LSRN requires m ≥ n, got {}×{}",
m, n
)));
}
let alpha = oversampling.unwrap_or(2.0).max(1.0);
let k = ((alpha * n as f64) as usize).max(n + 1).min(m);
let threshold = rcond.unwrap_or(1e-12);
let s_matrix = gaussian_random_matrix(k, m, seed)?;
let mut b_sketch = Array2::zeros((k, n));
let scale = 1.0 / (k as f64).sqrt();
for i in 0..k {
for j in 0..n {
let mut val = 0.0f64;
for l in 0..m {
val += s_matrix[[i, l]] * a[[l, j]];
}
b_sketch[[i, j]] = scale * val;
}
}
let (_, sigma, vt) = crate::decomposition::svd(&b_sketch.view(), true, None)?;
let sigma_max = sigma[0].max(1e-300);
let tol = threshold * sigma_max;
let rank = sigma.iter().filter(|&&s| s > tol).count().max(1);
let rank = rank.min(n);
let v_factor = vt
.slice(scirs2_core::ndarray::s![..rank, ..])
.t()
.to_owned();
let sigma_inv: Array1<f64> =
sigma
.slice(scirs2_core::ndarray::s![..rank])
.mapv(|s| if s > tol { 1.0 / s } else { 0.0 });
Ok(Self {
v_factor,
sigma_inv,
n,
rank,
sketch_dim: k,
})
}
pub fn apply_n(&self, x: &ArrayView1<f64>) -> LinalgResult<Array1<f64>> {
if x.len() != self.rank {
return Err(LinalgError::DimensionError(format!(
"LSRN::apply_n: x has {} elements but rank={}",
x.len(),
self.rank
)));
}
let mut scaled = Array1::zeros(self.rank);
for i in 0..self.rank {
scaled[i] = self.sigma_inv[i] * x[i];
}
let mut result = Array1::zeros(self.n);
for i in 0..self.n {
let mut val = 0.0f64;
for j in 0..self.rank {
val += self.v_factor[[i, j]] * scaled[j];
}
result[i] = val;
}
Ok(result)
}
pub fn apply_n_transpose(&self, x: &ArrayView1<f64>) -> LinalgResult<Array1<f64>> {
if x.len() != self.n {
return Err(LinalgError::DimensionError(format!(
"LSRN::apply_n_transpose: x has {} elements but n={}",
x.len(),
self.n
)));
}
let mut vt_x = Array1::zeros(self.rank);
for i in 0..self.rank {
let mut val = 0.0f64;
for j in 0..self.n {
val += self.v_factor[[j, i]] * x[j];
}
vt_x[i] = self.sigma_inv[i] * val;
}
Ok(vt_x)
}
pub fn solve(
&self,
a: &ArrayView2<f64>,
b: &ArrayView1<f64>,
max_iter: Option<usize>,
tol: Option<f64>,
) -> LinalgResult<RandomizedPrecondResult> {
let (m, n) = a.dim();
if b.len() != m {
return Err(LinalgError::DimensionError(format!(
"LSRN::solve: A has {} rows but b has {} elements",
m,
b.len()
)));
}
let max_it = max_iter.unwrap_or(300);
let tolerance = tol.unwrap_or(1e-10);
let rank = self.rank;
let mut atb = Array1::zeros(n);
for i in 0..n {
let mut val = 0.0f64;
for k in 0..m {
val += a[[k, i]] * b[k];
}
atb[i] = val;
}
let nt_atb = self.apply_n_transpose(&atb.view())?;
let mut y = Array1::zeros(rank);
let mut g = nt_atb.clone();
let mut p = g.clone();
let mut rr = dot_product(&g.view(), &g.view());
let mut converged = false;
let mut iterations = 0;
for iter in 0..max_it {
iterations = iter + 1;
let np = self.apply_n(&p.view())?;
let mut anp = Array1::zeros(m);
for i in 0..m {
let mut val = 0.0f64;
for j in 0..n.min(self.n) {
val += a[[i, j]] * np[j];
}
anp[i] = val;
}
let mut at_anp = Array1::zeros(n);
for i in 0..n {
let mut val = 0.0f64;
for k in 0..m {
val += a[[k, i]] * anp[k];
}
at_anp[i] = val;
}
let q = self.apply_n_transpose(&at_anp.view())?;
let pq = dot_product(&p.view(), &q.view());
if pq.abs() < 1e-300 {
break;
}
let alpha = rr / pq;
y = y + alpha * &p;
g = g - alpha * &q;
let rr_new = dot_product(&g.view(), &g.view());
let beta = rr_new / rr.max(1e-300);
rr = rr_new;
if rr.sqrt() < tolerance {
converged = true;
break;
}
p = &g + beta * &p;
}
let solution = self.apply_n(&y.view())?;
let mut ax = Array1::zeros(m);
for i in 0..m {
let mut val = 0.0f64;
for j in 0..n.min(self.n) {
val += a[[i, j]] * solution[j];
}
ax[i] = val;
}
let residual = (&ax - b).mapv(|v: f64| v * v).sum().sqrt();
let b_norm = b.mapv(|v: f64| v * v).sum().sqrt().max(1e-300);
let relative_residual = residual / b_norm;
Ok(RandomizedPrecondResult {
solution,
relative_residual,
iterations,
converged,
preconditioned_cond_estimate: None,
})
}
}
#[derive(Debug, Clone)]
pub struct IterativeRefinementLS {
pub max_refinement_steps: usize,
pub inner_tol: f64,
pub outer_tol: f64,
pub use_blendenpik: bool,
pub oversampling: f64,
pub seed: Option<u64>,
}
impl IterativeRefinementLS {
pub fn new() -> Self {
Self {
max_refinement_steps: 3,
inner_tol: 1e-8,
outer_tol: 1e-12,
use_blendenpik: true,
oversampling: 2.0,
seed: None,
}
}
pub fn with_max_steps(mut self, steps: usize) -> Self {
self.max_refinement_steps = steps;
self
}
pub fn with_inner_tol(mut self, tol: f64) -> Self {
self.inner_tol = tol;
self
}
pub fn with_outer_tol(mut self, tol: f64) -> Self {
self.outer_tol = tol;
self
}
pub fn with_lsrn(mut self) -> Self {
self.use_blendenpik = false;
self
}
pub fn with_oversampling(mut self, alpha: f64) -> Self {
self.oversampling = alpha;
self
}
pub fn with_seed(mut self, seed: u64) -> Self {
self.seed = Some(seed);
self
}
pub fn solve(
&self,
a: &ArrayView2<f64>,
b: &ArrayView1<f64>,
) -> LinalgResult<RandomizedPrecondResult> {
let (m, n) = a.dim();
if b.len() != m {
return Err(LinalgError::DimensionError(format!(
"IterativeRefinementLS: A has {} rows but b has {} elements",
m,
b.len()
)));
}
let mut result = if self.use_blendenpik {
let precond =
BlendenpikPreconditioner::new(a, Some(self.oversampling), true, self.seed)?;
precond.solve(a, b, Some(200), Some(self.inner_tol))?
} else {
let precond = LSRNPreconditioner::new(a, Some(self.oversampling), None, self.seed)?;
precond.solve(a, b, Some(300), Some(self.inner_tol))?
};
let mut x = result.solution.clone();
let mut total_iters = result.iterations;
for step in 0..self.max_refinement_steps {
let rel_res = result.relative_residual;
if rel_res < self.outer_tol {
result.converged = true;
break;
}
let mut r = Array1::zeros(m);
for i in 0..m {
let mut ax_i = 0.0f64;
for j in 0..n {
ax_i += a[[i, j]] * x[j];
}
r[i] = b[i] - ax_i;
}
let correction_result = if self.use_blendenpik {
let new_seed = self.seed.map(|s| s + step as u64 + 1);
let precond =
BlendenpikPreconditioner::new(a, Some(self.oversampling), true, new_seed)?;
precond.solve(a, &r.view(), Some(100), Some(self.inner_tol * 0.1))?
} else {
let new_seed = self.seed.map(|s| s + step as u64 + 1);
let precond = LSRNPreconditioner::new(a, Some(self.oversampling), None, new_seed)?;
precond.solve(a, &r.view(), Some(150), Some(self.inner_tol * 0.1))?
};
x += &correction_result.solution;
total_iters += correction_result.iterations;
let mut ax = Array1::zeros(m);
for i in 0..m {
let mut val = 0.0f64;
for j in 0..n {
val += a[[i, j]] * x[j];
}
ax[i] = val;
}
let res_norm = (&ax - b).mapv(|v: f64| v * v).sum().sqrt();
let b_norm = b.mapv(|v: f64| v * v).sum().sqrt().max(1e-300);
result.relative_residual = res_norm / b_norm;
}
result.solution = x;
result.iterations = total_iters;
Ok(result)
}
}
fn dot_product(a: &ArrayView1<f64>, b: &ArrayView1<f64>) -> f64 {
a.iter().zip(b.iter()).map(|(ai, bi)| ai * bi).sum()
}
fn gaussian_random_matrix(
rows: usize,
cols: usize,
seed: Option<u64>,
) -> LinalgResult<Array2<f64>> {
let mut rng = {
let seed_val = seed.unwrap_or_else(|| {
let mut tr = scirs2_core::random::rng();
scirs2_core::random::RngExt::random::<u64>(&mut tr)
});
scirs2_core::random::seeded_rng(seed_val)
};
let normal = Normal::new(0.0, 1.0).map_err(|e| {
LinalgError::ComputationError(format!("Failed to create Normal distribution: {e}"))
})?;
let mut matrix = Array2::zeros((rows, cols));
for i in 0..rows {
for j in 0..cols {
matrix[[i, j]] = normal.sample(&mut rng);
}
}
Ok(matrix)
}
#[cfg(test)]
mod tests {
use super::*;
use scirs2_core::ndarray::array;
#[test]
fn test_sparse_sign_dimensions() {
let ss = SparseSign::new(20, 10, Some(3), Some(42)).expect("SparseSign::new failed");
assert_eq!(ss.rows, 20);
assert_eq!(ss.cols, 10);
assert_eq!(ss.sparsity, 3);
let x = Array1::ones(10);
let y = ss.apply(&x.view()).expect("apply failed");
assert_eq!(y.len(), 20);
}
#[test]
fn test_sparse_sign_linearity() {
let ss = SparseSign::new(30, 15, Some(4), Some(7)).expect("SparseSign::new failed");
let x1 = Array1::from_vec((0..15).map(|i| i as f64).collect());
let x2 = Array1::from_vec((0..15).map(|i| (i + 1) as f64).collect());
let y1 = ss.apply(&x1.view()).expect("apply x1 failed");
let y2 = ss.apply(&x2.view()).expect("apply x2 failed");
let y12 = ss.apply(&(&x1 + &x2).view()).expect("apply x1+x2 failed");
for i in 0..30 {
assert!((y12[i] - y1[i] - y2[i]).abs() < 1e-12);
}
}
#[test]
fn test_srht_dimensions() {
let srht = SubsampledRandomizedHadamard::new(64, 16, Some(99)).expect("SRHT::new failed");
let x = Array1::ones(64);
let y = srht.apply(&x.view()).expect("SRHT apply failed");
assert_eq!(y.len(), 16);
}
#[test]
fn test_blendenpik_overdetermined() {
let m = 20;
let n = 5;
let mut a = Array2::zeros((m, n));
for i in 0..m {
for j in 0..n {
a[[i, j]] = ((i + j + 1) as f64).cos() + if i == j { 2.0 } else { 0.0 };
}
}
let x_true = array![1.0, -1.0, 2.0, 0.5, -0.5];
let mut b = Array1::zeros(m);
for i in 0..m {
for j in 0..n {
b[i] += a[[i, j]] * x_true[j];
}
}
let precond = BlendenpikPreconditioner::new(&a.view(), Some(2.0), false, Some(1))
.expect("Blendenpik::new failed");
let result = precond
.solve(&a.view(), &b.view(), Some(100), Some(1e-8))
.expect("Blendenpik::solve failed");
assert!(
result.relative_residual < 1e-4,
"Blendenpik residual too large: {}",
result.relative_residual
);
}
#[test]
fn test_lsrn_overdetermined() {
let m = 30;
let n = 6;
let mut a = Array2::zeros((m, n));
for i in 0..m {
for j in 0..n {
a[[i, j]] = 0.5 * ((i * n + j) as f64 / (m * n) as f64);
if i == j {
a[[i, j]] += 3.0;
}
}
}
let x_true = array![1.0, 2.0, -1.0, 0.5, -2.0, 0.0];
let mut b = Array1::zeros(m);
for i in 0..m {
for j in 0..n {
b[i] += a[[i, j]] * x_true[j];
}
}
let precond = LSRNPreconditioner::new(&a.view(), Some(2.0), Some(1e-12), Some(42))
.expect("LSRN::new failed");
let result = precond
.solve(&a.view(), &b.view(), Some(200), Some(1e-8))
.expect("LSRN::solve failed");
assert!(
result.relative_residual < 1e-3,
"LSRN residual too large: {}",
result.relative_residual
);
}
#[test]
fn test_iterative_refinement_ls() {
let m = 25;
let n = 5;
let mut a = Array2::zeros((m, n));
for i in 0..m {
for j in 0..n {
a[[i, j]] = (i as f64 * 0.1 + j as f64 * 0.3 + 1.0).sin();
if i == j && j < n {
a[[i, j]] += 2.0;
}
}
}
let x_true = Array1::from_vec(vec![1.0, -1.0, 0.5, -0.5, 2.0]);
let mut b = Array1::zeros(m);
for i in 0..m {
for j in 0..n {
b[i] += a[[i, j]] * x_true[j];
}
}
let solver = IterativeRefinementLS::new()
.with_max_steps(2)
.with_outer_tol(1e-6)
.with_seed(123);
let result = solver
.solve(&a.view(), &b.view())
.expect("IterativeRefinementLS::solve failed");
assert!(
result.relative_residual < 1e-3,
"IterativeRefinementLS residual too large: {}",
result.relative_residual
);
}
}