use crate::csr::CsrMatrix;
use crate::error::{SparseError, SparseResult};
use crate::iterative_solvers::{IterativeSolverConfig, Preconditioner, SolverResult};
use scirs2_core::ndarray::{Array1, Array2};
use scirs2_core::numeric::{Float, NumAssign, SparseElement};
use std::fmt::Debug;
use std::iter::Sum;
#[derive(Debug)]
pub struct AmgLevel {
pub a: CsrMatrix<f64>,
pub p: Option<CsrMatrix<f64>>,
pub r: Option<CsrMatrix<f64>>,
pub diag_inv: Array1<f64>,
pub size: usize,
}
#[derive(Debug)]
pub struct AmgHierarchy {
pub levels: Vec<AmgLevel>,
}
fn strength_of_connection(a: &CsrMatrix<f64>, epsilon_strong: f64) -> Vec<Vec<usize>> {
let n = a.rows();
let mut strong = vec![Vec::new(); n];
let diag: Vec<f64> = (0..n).map(|i| a.get(i, i).abs()).collect();
for i in 0..n {
let range = a.row_range(i);
let d_ii = diag[i];
for pos in range {
let j = a.indices[pos];
if j == i {
continue;
}
let a_ij = a.data[pos].abs();
let threshold = epsilon_strong * (d_ii * diag[j]).sqrt();
if a_ij >= threshold {
strong[i].push(j);
}
}
}
strong
}
fn greedy_aggregate(n: usize, strong: &[Vec<usize>]) -> Vec<usize> {
let mut agg_id = vec![usize::MAX; n];
let mut next_agg = 0usize;
for i in 0..n {
if agg_id[i] != usize::MAX {
continue;
}
let has_free_neighbour = strong[i].iter().any(|&j| agg_id[j] == usize::MAX);
if !has_free_neighbour && !strong[i].is_empty() {
continue; }
agg_id[i] = next_agg;
for &j in &strong[i] {
if agg_id[j] == usize::MAX {
agg_id[j] = next_agg;
}
}
next_agg += 1;
}
for i in 0..n {
if agg_id[i] != usize::MAX {
continue;
}
let mut found = false;
for &j in &strong[i] {
if agg_id[j] != usize::MAX {
agg_id[i] = agg_id[j];
found = true;
break;
}
}
if !found {
agg_id[i] = next_agg;
next_agg += 1;
}
}
agg_id
}
fn build_aggregate_sets(n: usize, agg_id: &[usize]) -> Vec<Vec<usize>> {
let num_agg = agg_id.iter().copied().max().map(|v| v + 1).unwrap_or(0);
let mut aggregates = vec![Vec::new(); num_agg];
for i in 0..n {
if agg_id[i] < num_agg {
aggregates[agg_id[i]].push(i);
}
}
aggregates
}
pub fn aggregate(a: &CsrMatrix<f64>, epsilon_strong: f64) -> SparseResult<Vec<Vec<usize>>> {
if a.rows() != a.cols() {
return Err(SparseError::ValueError(
"Matrix must be square for aggregation".to_string(),
));
}
let n = a.rows();
let strong = strength_of_connection(a, epsilon_strong);
let agg_id = greedy_aggregate(n, &strong);
Ok(build_aggregate_sets(n, &agg_id))
}
pub fn tentative_prolongation(
aggregates: &[Vec<usize>],
near_nullspace: &Array1<f64>,
) -> SparseResult<CsrMatrix<f64>> {
let n = near_nullspace.len();
let num_agg = aggregates.len();
let mut covered = vec![false; n];
for (k, agg) in aggregates.iter().enumerate() {
for &i in agg {
if i >= n {
return Err(SparseError::ValueError(format!(
"Aggregate {k} contains node {i} which is out of range (n={n})"
)));
}
if covered[i] {
return Err(SparseError::ValueError(format!(
"Node {i} appears in more than one aggregate"
)));
}
covered[i] = true;
}
}
if covered.iter().any(|&c| !c) {
return Err(SparseError::ValueError(
"Some nodes are not covered by any aggregate".to_string(),
));
}
let mut agg_norms = vec![0.0_f64; num_agg];
for (k, agg) in aggregates.iter().enumerate() {
let sq: f64 = agg.iter().map(|&i| near_nullspace[i] * near_nullspace[i]).sum();
agg_norms[k] = sq.sqrt();
}
let nnz = aggregates.iter().map(|a| a.len()).sum();
let mut rows = Vec::with_capacity(nnz);
let mut cols = Vec::with_capacity(nnz);
let mut vals = Vec::with_capacity(nnz);
for (k, agg) in aggregates.iter().enumerate() {
let norm = agg_norms[k];
let scale = if norm > 1e-14 { 1.0 / norm } else { 1.0 };
for &i in agg {
rows.push(i);
cols.push(k);
vals.push(near_nullspace[i] * scale);
}
}
CsrMatrix::from_triplets(n, num_agg, rows, cols, vals)
}
pub fn smooth_prolongation(
p_tent: &CsrMatrix<f64>,
a: &CsrMatrix<f64>,
omega: f64,
) -> SparseResult<CsrMatrix<f64>> {
let n = a.rows();
if p_tent.rows() != n {
return Err(SparseError::DimensionMismatch {
expected: n,
found: p_tent.rows(),
});
}
let num_agg = p_tent.cols();
let mut d_inv = vec![1.0_f64; n];
for i in 0..n {
let d = a.get(i, i);
if d.abs() > 1e-14 {
d_inv[i] = 1.0 / d;
}
}
let p0_dense = csr_to_dense(p_tent)?;
let mut ap0 = vec![vec![0.0_f64; num_agg]; n];
for i in 0..n {
let range = a.row_range(i);
for pos in range {
let k = a.indices[pos];
let a_ik = a.data[pos];
for j in 0..num_agg {
ap0[i][j] += a_ik * p0_dense[k][j];
}
}
}
let mut p_rows = Vec::new();
let mut p_cols = Vec::new();
let mut p_vals = Vec::new();
const DROP_TOL: f64 = 1e-14;
for i in 0..n {
for j in 0..num_agg {
let val = p0_dense[i][j] - omega * d_inv[i] * ap0[i][j];
if val.abs() > DROP_TOL {
p_rows.push(i);
p_cols.push(j);
p_vals.push(val);
}
}
}
CsrMatrix::from_triplets(n, num_agg, p_rows, p_cols, p_vals)
}
fn galerkin_coarse(a: &CsrMatrix<f64>, p: &CsrMatrix<f64>) -> SparseResult<CsrMatrix<f64>> {
let n = a.rows();
let nc = p.cols();
if p.rows() != n {
return Err(SparseError::DimensionMismatch {
expected: n,
found: p.rows(),
});
}
let mut ap = vec![vec![0.0_f64; nc]; n];
let p_dense = csr_to_dense(p)?;
for i in 0..n {
let range = a.row_range(i);
for pos in range {
let k = a.indices[pos];
let a_ik = a.data[pos];
for j in 0..nc {
ap[i][j] += a_ik * p_dense[k][j];
}
}
}
let mut ac = vec![vec![0.0_f64; nc]; nc];
for i in 0..n {
for j in 0..nc {
let p_ij = p_dense[i][j];
if p_ij.abs() < 1e-14 {
continue;
}
for k in 0..nc {
ac[j][k] += p_ij * ap[i][k];
}
}
}
let mut rows = Vec::new();
let mut cols = Vec::new();
let mut vals = Vec::new();
const DROP_TOL: f64 = 1e-14;
for i in 0..nc {
for j in 0..nc {
if ac[i][j].abs() > DROP_TOL {
rows.push(i);
cols.push(j);
vals.push(ac[i][j]);
}
}
}
CsrMatrix::from_triplets(nc, nc, rows, cols, vals)
}
fn compute_diag_inv(a: &CsrMatrix<f64>) -> Array1<f64> {
let n = a.rows();
let mut d = Array1::ones(n);
for i in 0..n {
let v = a.get(i, i);
if v.abs() > 1e-14 {
d[i] = 1.0 / v;
}
}
d
}
pub fn sa_setup(
a: &CsrMatrix<f64>,
near_nullspace: &Array1<f64>,
n_levels: usize,
epsilon_strong: f64,
omega: f64,
) -> SparseResult<AmgHierarchy> {
if a.rows() != a.cols() {
return Err(SparseError::ValueError(
"Matrix must be square for SA-AMG setup".to_string(),
));
}
if n_levels < 2 {
return Err(SparseError::ValueError(
"n_levels must be at least 2".to_string(),
));
}
if near_nullspace.len() != a.rows() {
return Err(SparseError::DimensionMismatch {
expected: a.rows(),
found: near_nullspace.len(),
});
}
let coarsest_size = 4;
let mut levels: Vec<AmgLevel> = Vec::with_capacity(n_levels);
let mut a_current = a.clone();
let mut ns_current = near_nullspace.clone();
for _level in 0..n_levels - 1 {
let n_current = a_current.rows();
if n_current <= coarsest_size {
break;
}
let aggregates = aggregate(&a_current, epsilon_strong)?;
let num_agg = aggregates.len();
if num_agg >= n_current || num_agg == 0 {
break;
}
let p_tent = tentative_prolongation(&aggregates, &ns_current)?;
let p = smooth_prolongation(&p_tent, &a_current, omega)?;
let r = p.transpose();
let nc = num_agg;
let mut ns_coarse = Array1::zeros(nc);
let r_dense = csr_to_dense(&r)?;
for j in 0..nc {
let mut acc = 0.0_f64;
for i in 0..n_current {
acc += r_dense[j][i] * ns_current[i];
}
ns_coarse[j] = acc;
}
let norm: f64 = ns_coarse.iter().map(|v| v * v).sum::<f64>().sqrt();
if norm > 1e-14 {
ns_coarse.mapv_inplace(|v| v / norm);
}
let a_coarse = galerkin_coarse(&a_current, &p)?;
let diag_inv = compute_diag_inv(&a_current);
let size = n_current;
levels.push(AmgLevel {
a: a_current.clone(),
p: Some(p),
r: Some(r),
diag_inv,
size,
});
a_current = a_coarse;
ns_current = ns_coarse;
if a_current.rows() <= coarsest_size {
break;
}
}
let diag_inv = compute_diag_inv(&a_current);
let size = a_current.rows();
levels.push(AmgLevel {
a: a_current,
p: None,
r: None,
diag_inv,
size,
});
if levels.is_empty() {
return Err(SparseError::ComputationError(
"SA-AMG setup produced no levels".to_string(),
));
}
Ok(AmgHierarchy { levels })
}
fn jacobi_smooth(
a: &CsrMatrix<f64>,
diag_inv: &Array1<f64>,
b: &Array1<f64>,
x: &mut Array1<f64>,
n_smooth: usize,
omega: f64,
) -> SparseResult<()> {
let n = a.rows();
let mut ax = Array1::zeros(n);
for _ in 0..n_smooth {
for i in 0..n {
let range = a.row_range(i);
let mut acc = 0.0_f64;
for pos in range {
acc += a.data[pos] * x[a.indices[pos]];
}
ax[i] = acc;
}
for i in 0..n {
x[i] += omega * diag_inv[i] * (b[i] - ax[i]);
}
}
Ok(())
}
fn residual(a: &CsrMatrix<f64>, b: &Array1<f64>, x: &Array1<f64>) -> SparseResult<Array1<f64>> {
let n = a.rows();
let mut r = b.clone();
for i in 0..n {
let range = a.row_range(i);
let mut acc = 0.0_f64;
for pos in range {
acc += a.data[pos] * x[a.indices[pos]];
}
r[i] -= acc;
}
Ok(r)
}
fn restrict(r_mat: &CsrMatrix<f64>, v: &Array1<f64>) -> SparseResult<Array1<f64>> {
let (m, n) = r_mat.shape();
if v.len() != n {
return Err(SparseError::DimensionMismatch {
expected: n,
found: v.len(),
});
}
let mut result = Array1::zeros(m);
for i in 0..m {
let range = r_mat.row_range(i);
let mut acc = 0.0_f64;
for pos in range {
acc += r_mat.data[pos] * v[r_mat.indices[pos]];
}
result[i] = acc;
}
Ok(result)
}
fn prolongate(p_mat: &CsrMatrix<f64>, v: &Array1<f64>) -> SparseResult<Array1<f64>> {
let (n, nc) = p_mat.shape();
if v.len() != nc {
return Err(SparseError::DimensionMismatch {
expected: nc,
found: v.len(),
});
}
let mut result = Array1::zeros(n);
for i in 0..n {
let range = p_mat.row_range(i);
let mut acc = 0.0_f64;
for pos in range {
acc += p_mat.data[pos] * v[p_mat.indices[pos]];
}
result[i] = acc;
}
Ok(result)
}
fn coarsest_solve(a: &CsrMatrix<f64>, b: &Array1<f64>) -> SparseResult<Array1<f64>> {
let n = a.rows();
if n == 0 {
return Ok(Array1::zeros(0));
}
let mut mat = vec![vec![0.0_f64; n]; n];
let mut rhs = b.to_vec();
for i in 0..n {
let range = a.row_range(i);
for pos in range {
mat[i][a.indices[pos]] = a.data[pos];
}
}
for col in 0..n {
let mut pivot_row = col;
let mut pivot_val = mat[col][col].abs();
for row in (col + 1)..n {
if mat[row][col].abs() > pivot_val {
pivot_val = mat[row][col].abs();
pivot_row = row;
}
}
if pivot_val < 1e-14 {
let mut x = Array1::zeros(n);
for i in 0..n {
let d = a.get(i, i);
if d.abs() > 1e-14 {
x[i] = b[i] / d;
}
}
return Ok(x);
}
if pivot_row != col {
mat.swap(col, pivot_row);
rhs.swap(col, pivot_row);
}
let pivot = mat[col][col];
for row in (col + 1)..n {
let factor = mat[row][col] / pivot;
for k in col..n {
let v = mat[col][k];
mat[row][k] -= factor * v;
}
rhs[row] -= factor * rhs[col];
}
}
let mut x = vec![0.0_f64; n];
for i in (0..n).rev() {
let mut sum = rhs[i];
for j in (i + 1)..n {
sum -= mat[i][j] * x[j];
}
if mat[i][i].abs() < 1e-14 {
x[i] = 0.0;
} else {
x[i] = sum / mat[i][i];
}
}
Ok(Array1::from_vec(x))
}
fn vcycle_recursive(
hierarchy: &AmgHierarchy,
b: &Array1<f64>,
x: &mut Array1<f64>,
n_pre: usize,
n_post: usize,
level: usize,
) -> SparseResult<()> {
let lv = &hierarchy.levels[level];
let is_coarsest = level == hierarchy.levels.len() - 1 || lv.p.is_none();
if is_coarsest {
let sol = coarsest_solve(&lv.a, b)?;
for i in 0..sol.len() {
x[i] = sol[i];
}
return Ok(());
}
let smooth_omega = 2.0 / 3.0;
jacobi_smooth(&lv.a, &lv.diag_inv, b, x, n_pre, smooth_omega)?;
let r = residual(&lv.a, b, x)?;
let r_mat = lv.r.as_ref().ok_or_else(|| {
SparseError::ComputationError("No restriction operator at non-coarsest level".to_string())
})?;
let r_coarse = restrict(r_mat, &r)?;
let nc = r_coarse.len();
let mut e_coarse = Array1::zeros(nc);
vcycle_recursive(hierarchy, &r_coarse, &mut e_coarse, n_pre, n_post, level + 1)?;
let p_mat = lv.p.as_ref().ok_or_else(|| {
SparseError::ComputationError("No prolongation operator at non-coarsest level".to_string())
})?;
let e_fine = prolongate(p_mat, &e_coarse)?;
for i in 0..x.len() {
x[i] += e_fine[i];
}
jacobi_smooth(&lv.a, &lv.diag_inv, b, x, n_post, smooth_omega)?;
Ok(())
}
pub fn sa_vcycle(
hierarchy: &AmgHierarchy,
b: &Array1<f64>,
x: &Array1<f64>,
n_pre: usize,
n_post: usize,
) -> SparseResult<Array1<f64>> {
if hierarchy.levels.is_empty() {
return Err(SparseError::ComputationError(
"Empty AMG hierarchy".to_string(),
));
}
let n = hierarchy.levels[0].size;
if b.len() != n || x.len() != n {
return Err(SparseError::DimensionMismatch {
expected: n,
found: if b.len() != n { b.len() } else { x.len() },
});
}
let mut x_out = x.clone();
vcycle_recursive(hierarchy, b, &mut x_out, n_pre, n_post, 0)?;
Ok(x_out)
}
pub struct SaAmgPreconditioner<'a> {
hierarchy: &'a AmgHierarchy,
n_pre: usize,
n_post: usize,
}
impl<'a> SaAmgPreconditioner<'a> {
pub fn new(hierarchy: &'a AmgHierarchy, n_pre: usize, n_post: usize) -> Self {
Self {
hierarchy,
n_pre,
n_post,
}
}
}
impl<'a> Preconditioner<f64> for SaAmgPreconditioner<'a> {
fn apply(&self, r: &Array1<f64>) -> crate::error::SparseResult<Array1<f64>> {
let x0 = Array1::zeros(r.len());
sa_vcycle(self.hierarchy, r, &x0, self.n_pre, self.n_post)
}
}
pub fn sa_solve(
a: &CsrMatrix<f64>,
b: &Array1<f64>,
tol: f64,
max_iter: usize,
) -> SparseResult<SolverResult<f64>> {
let n = a.rows();
if a.cols() != n {
return Err(SparseError::ValueError(
"Matrix must be square".to_string(),
));
}
if b.len() != n {
return Err(SparseError::DimensionMismatch {
expected: n,
found: b.len(),
});
}
let scale = 1.0 / (n as f64).sqrt();
let near_nullspace = Array1::from_elem(n, scale);
let hierarchy = sa_setup(a, &near_nullspace, 10, 0.25, 4.0 / 3.0)?;
let amg_pc = SaAmgPreconditioner::new(&hierarchy, 2, 2);
let config = IterativeSolverConfig {
max_iter,
tol,
verbose: false,
};
pcg(a, b, &config, Some(&amg_pc as &dyn Preconditioner<f64>))
}
fn pcg(
a: &CsrMatrix<f64>,
b: &Array1<f64>,
config: &IterativeSolverConfig,
precond: Option<&dyn Preconditioner<f64>>,
) -> SparseResult<SolverResult<f64>> {
let n = a.rows();
let mut x = Array1::zeros(n);
let mut r = b.clone();
let bnorm = norm2_arr(&r);
if bnorm < 1e-30 {
return Ok(SolverResult {
solution: x,
n_iter: 0,
residual_norm: 0.0,
converged: true,
});
}
let tol_abs = config.tol * bnorm;
let mut z = match precond {
Some(pc) => pc.apply(&r)?,
None => r.clone(),
};
let mut p = z.clone();
let mut rz = dot_arr(&r, &z);
for k in 0..config.max_iter {
let ap = csr_matvec_arr(a, &p)?;
let pap = dot_arr(&p, &ap);
if pap.abs() < 1e-30 {
break;
}
let alpha = rz / pap;
axpy_mut_arr(&mut x, alpha, &p);
axpy_mut_arr(&mut r, -alpha, &ap);
let rnorm = norm2_arr(&r);
if rnorm <= tol_abs {
return Ok(SolverResult {
solution: x,
n_iter: k + 1,
residual_norm: rnorm,
converged: true,
});
}
z = match precond {
Some(pc) => pc.apply(&r)?,
None => r.clone(),
};
let rz_new = dot_arr(&r, &z);
let beta = rz_new / rz;
for (pi, &zi) in p.iter_mut().zip(z.iter()) {
*pi = zi + beta * *pi;
}
rz = rz_new;
}
let rnorm = norm2_arr(&r);
Ok(SolverResult {
solution: x,
n_iter: config.max_iter,
residual_norm: rnorm,
converged: rnorm <= tol_abs,
})
}
fn csr_matvec_arr(a: &CsrMatrix<f64>, x: &Array1<f64>) -> SparseResult<Array1<f64>> {
let (m, n) = a.shape();
if x.len() != n {
return Err(SparseError::DimensionMismatch {
expected: n,
found: x.len(),
});
}
let mut y = Array1::zeros(m);
for i in 0..m {
let range = a.row_range(i);
let mut acc = 0.0_f64;
for pos in range {
acc += a.data[pos] * x[a.indices[pos]];
}
y[i] = acc;
}
Ok(y)
}
#[inline]
fn dot_arr(a: &Array1<f64>, b: &Array1<f64>) -> f64 {
a.iter().zip(b.iter()).map(|(ai, bi)| ai * bi).sum()
}
#[inline]
fn norm2_arr(v: &Array1<f64>) -> f64 {
dot_arr(v, v).sqrt()
}
#[inline]
fn axpy_mut_arr(y: &mut Array1<f64>, alpha: f64, x: &Array1<f64>) {
for (yi, &xi) in y.iter_mut().zip(x.iter()) {
*yi += alpha * xi;
}
}
fn csr_to_dense(a: &CsrMatrix<f64>) -> SparseResult<Vec<Vec<f64>>> {
let (m, n) = a.shape();
let mut dense = vec![vec![0.0_f64; n]; m];
for i in 0..m {
let range = a.row_range(i);
for pos in range {
dense[i][a.indices[pos]] = a.data[pos];
}
}
Ok(dense)
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_relative_eq;
fn laplacian_1d(n: usize) -> CsrMatrix<f64> {
let mut rows = Vec::new();
let mut cols = Vec::new();
let mut vals = Vec::new();
for i in 0..n {
rows.push(i);
cols.push(i);
vals.push(2.0_f64);
if i > 0 {
rows.push(i);
cols.push(i - 1);
vals.push(-1.0_f64);
rows.push(i - 1);
cols.push(i);
vals.push(-1.0_f64);
}
}
CsrMatrix::from_triplets(n, n, rows, cols, vals).expect("valid test setup")
}
#[test]
fn test_aggregate_small() {
let a = laplacian_1d(8);
let aggs = aggregate(&a, 0.25).expect("valid test setup");
let mut covered = vec![false; 8];
for agg in &aggs {
for &i in agg {
assert!(!covered[i], "Node {i} covered twice");
covered[i] = true;
}
}
assert!(covered.iter().all(|&c| c));
}
#[test]
fn test_tentative_prolongation_partition_of_unity() {
let a = laplacian_1d(8);
let aggs = aggregate(&a, 0.25).expect("valid test setup");
let ns = Array1::ones(8);
let p = tentative_prolongation(&aggs, &ns).expect("valid test setup");
assert_eq!(p.rows(), 8);
assert_eq!(p.cols(), aggs.len());
for k in 0..p.cols() {
let mut sq_sum = 0.0_f64;
for i in 0..8 {
let v = p.get(i, k);
sq_sum += v * v;
}
assert_relative_eq!(sq_sum, 1.0, epsilon = 1e-12);
}
}
#[test]
fn test_smooth_prolongation_shape() {
let a = laplacian_1d(8);
let aggs = aggregate(&a, 0.25).expect("valid test setup");
let ns = Array1::ones(8);
let p_tent = tentative_prolongation(&aggs, &ns).expect("valid test setup");
let p = smooth_prolongation(&p_tent, &a, 4.0 / 3.0).expect("valid test setup");
assert_eq!(p.rows(), 8);
assert_eq!(p.cols(), aggs.len());
}
#[test]
fn test_sa_setup_levels() {
let n = 32;
let a = laplacian_1d(n);
let ns = Array1::ones(n);
let hierarchy = sa_setup(&a, &ns, 5, 0.25, 4.0 / 3.0).expect("valid test setup");
assert!(hierarchy.levels.len() >= 2);
assert!(hierarchy.levels.last().expect("hierarchy has at least one level").size < n);
}
#[test]
fn test_sa_vcycle_reduces_residual() {
let n = 16;
let a = laplacian_1d(n);
let ns = Array1::ones(n);
let b = Array1::ones(n);
let hierarchy = sa_setup(&a, &ns, 5, 0.25, 4.0 / 3.0).expect("valid test setup");
let x0 = Array1::zeros(n);
let x1 = sa_vcycle(&hierarchy, &b, &x0, 2, 2).expect("valid test setup");
let r0: f64 = b.iter().map(|v| v * v).sum::<f64>().sqrt();
let r1 = residual(&a, &b, &x1).expect("valid test setup");
let r1_norm: f64 = r1.iter().map(|v| v * v).sum::<f64>().sqrt();
assert!(
r1_norm < r0,
"V-cycle should reduce residual: {r1_norm} >= {r0}"
);
}
#[test]
fn test_sa_solve_convergence() {
let n = 16;
let a = laplacian_1d(n);
let b = Array1::ones(n);
let result = sa_solve(&a, &b, 1e-8, 200).expect("valid test setup");
assert!(
result.converged,
"sa_solve did not converge: residual={}",
result.residual_norm
);
assert_eq!(result.solution.len(), n);
}
}