use crate::error::{LinalgError, LinalgResult};
use scirs2_core::ndarray::{Array2, ScalarOperand};
use scirs2_core::numeric::{Float, NumAssign};
use std::fmt::Debug;
use std::iter::Sum;
#[derive(Debug, Clone)]
pub struct BlockClusterTree {
pub rows: Vec<usize>,
pub cols: Vec<usize>,
pub children: Option<Box<[BlockClusterTree; 4]>>,
pub is_leaf: bool,
}
impl BlockClusterTree {
pub fn build(rows: Vec<usize>, cols: Vec<usize>, leaf_size: usize, eta: f64) -> Self {
let n_rows = rows.len();
let n_cols = cols.len();
if n_rows <= leaf_size || n_cols <= leaf_size {
return BlockClusterTree {
rows,
cols,
children: None,
is_leaf: true,
};
}
let row_min = rows.iter().copied().min().unwrap_or(0);
let row_max = rows.iter().copied().max().unwrap_or(0);
let col_min = cols.iter().copied().min().unwrap_or(0);
let col_max = cols.iter().copied().max().unwrap_or(0);
let diam_rows = (row_max - row_min) as f64;
let diam_cols = (col_max - col_min) as f64;
let diam = diam_rows.min(diam_cols);
let dist = if row_max < col_min {
(col_min - row_max) as f64
} else if col_max < row_min {
(row_min - col_max) as f64
} else {
0.0_f64 };
if dist > 0.0 && diam * eta <= dist {
return BlockClusterTree {
rows,
cols,
children: None,
is_leaf: true,
};
}
let row_mid = n_rows / 2;
let col_mid = n_cols / 2;
let rows_lo = rows[..row_mid].to_vec();
let rows_hi = rows[row_mid..].to_vec();
let cols_lo = cols[..col_mid].to_vec();
let cols_hi = cols[col_mid..].to_vec();
let c00 = BlockClusterTree::build(rows_lo.clone(), cols_lo.clone(), leaf_size, eta);
let c01 = BlockClusterTree::build(rows_lo, cols_hi.clone(), leaf_size, eta);
let c10 = BlockClusterTree::build(rows_hi.clone(), cols_lo, leaf_size, eta);
let c11 = BlockClusterTree::build(rows_hi, cols_hi, leaf_size, eta);
BlockClusterTree {
rows,
cols,
children: Some(Box::new([c00, c01, c10, c11])),
is_leaf: false,
}
}
pub fn count_leaves(&self) -> usize {
if self.is_leaf {
return 1;
}
match &self.children {
None => 1,
Some(children) => children.iter().map(|c| c.count_leaves()).sum(),
}
}
}
#[derive(Debug, Clone)]
pub struct LowRankBlock<F: Float> {
pub u: Vec<F>,
pub v: Vec<F>,
pub rank: usize,
pub nrows: usize,
pub ncols: usize,
}
impl<F: Float + Debug + Clone + Sum> LowRankBlock<F> {
pub fn matvec(&self, x: &[F]) -> LinalgResult<Vec<F>> {
if x.len() != self.ncols {
return Err(LinalgError::DimensionError(format!(
"LowRankBlock matvec: x has length {} but ncols={}",
x.len(),
self.ncols
)));
}
let mut tmp = vec![F::zero(); self.rank];
for (k, t) in tmp.iter_mut().enumerate() {
let mut acc = F::zero();
for (j, xj) in x.iter().enumerate() {
acc = acc + self.v[j * self.rank + k] * *xj;
}
*t = acc;
}
let mut y = vec![F::zero(); self.nrows];
for (i, yi) in y.iter_mut().enumerate() {
let mut acc = F::zero();
for (k, tk) in tmp.iter().enumerate() {
acc = acc + self.u[i * self.rank + k] * *tk;
}
*yi = acc;
}
Ok(y)
}
pub fn frobenius_norm_sq(&self) -> F {
let mut gu = vec![F::zero(); self.rank * self.rank];
for k1 in 0..self.rank {
for k2 in 0..self.rank {
let mut s = F::zero();
for i in 0..self.nrows {
s = s + self.u[i * self.rank + k1] * self.u[i * self.rank + k2];
}
gu[k1 * self.rank + k2] = s;
}
}
let mut gv = vec![F::zero(); self.rank * self.rank];
for k1 in 0..self.rank {
for k2 in 0..self.rank {
let mut s = F::zero();
for j in 0..self.ncols {
s = s + self.v[j * self.rank + k1] * self.v[j * self.rank + k2];
}
gv[k1 * self.rank + k2] = s;
}
}
let mut tr = F::zero();
for k1 in 0..self.rank {
for k2 in 0..self.rank {
tr = tr + gu[k1 * self.rank + k2] * gv[k2 * self.rank + k1];
}
}
tr
}
}
#[derive(Debug, Clone)]
pub struct DenseBlock<F: Float> {
pub data: Vec<F>,
pub nrows: usize,
pub ncols: usize,
}
impl<F: Float + Debug + Clone> DenseBlock<F> {
pub fn matvec(&self, x: &[F]) -> LinalgResult<Vec<F>> {
if x.len() != self.ncols {
return Err(LinalgError::DimensionError(format!(
"DenseBlock matvec: x has length {} but ncols={}",
x.len(),
self.ncols
)));
}
let mut y = vec![F::zero(); self.nrows];
for (i, yi) in y.iter_mut().enumerate() {
let mut acc = F::zero();
for (j, xj) in x.iter().enumerate() {
acc = acc + self.data[i * self.ncols + j] * *xj;
}
*yi = acc;
}
Ok(y)
}
pub fn frobenius_norm_sq(&self) -> F {
self.data.iter().fold(F::zero(), |acc, &v| acc + v * v)
}
}
#[derive(Debug, Clone)]
pub enum HBlock<F: Float> {
LowRank(LowRankBlock<F>),
Dense(DenseBlock<F>),
}
impl<F: Float + Debug + Clone + Sum> HBlock<F> {
pub fn nrows(&self) -> usize {
match self {
HBlock::LowRank(b) => b.nrows,
HBlock::Dense(b) => b.nrows,
}
}
pub fn ncols(&self) -> usize {
match self {
HBlock::LowRank(b) => b.ncols,
HBlock::Dense(b) => b.ncols,
}
}
pub fn matvec_add(&self, x: &[F], y: &mut [F]) -> LinalgResult<()> {
let result = match self {
HBlock::LowRank(b) => b.matvec(x)?,
HBlock::Dense(b) => b.matvec(x)?,
};
for (yi, ri) in y.iter_mut().zip(result.iter()) {
*yi = *yi + *ri;
}
Ok(())
}
pub fn frobenius_norm_sq(&self) -> F {
match self {
HBlock::LowRank(b) => b.frobenius_norm_sq(),
HBlock::Dense(b) => b.frobenius_norm_sq(),
}
}
}
#[derive(Debug)]
pub struct HMatrix<F: Float> {
pub(crate) blocks: Vec<(usize, usize, HBlock<F>)>,
pub nrows: usize,
pub ncols: usize,
pub leaf_size: usize,
}
impl<F: Float + Debug + Clone + Sum + 'static> HMatrix<F> {
pub fn from_kernel<K>(
n: usize,
kernel: K,
leaf_size: usize,
eta: f64,
tol: F,
) -> LinalgResult<Self>
where
K: Fn(usize, usize) -> F,
{
if n == 0 {
return Err(LinalgError::ValueError(
"HMatrix::from_kernel: n must be > 0".into(),
));
}
let rows: Vec<usize> = (0..n).collect();
let cols: Vec<usize> = (0..n).collect();
let tree = BlockClusterTree::build(rows, cols, leaf_size, eta);
let mut blocks = Vec::new();
Self::collect_blocks(&tree, &kernel, leaf_size, eta, tol, &mut blocks)?;
Ok(HMatrix {
blocks,
nrows: n,
ncols: n,
leaf_size,
})
}
fn collect_blocks<K>(
node: &BlockClusterTree,
kernel: &K,
leaf_size: usize,
eta: f64,
tol: F,
out: &mut Vec<(usize, usize, HBlock<F>)>,
) -> LinalgResult<()>
where
K: Fn(usize, usize) -> F,
{
if !node.is_leaf {
if let Some(children) = &node.children {
for child in children.iter() {
Self::collect_blocks(child, kernel, leaf_size, eta, tol, out)?;
}
}
return Ok(());
}
let n_rows = node.rows.len();
let n_cols = node.cols.len();
let row_offset = node.rows.iter().copied().min().unwrap_or(0);
let col_offset = node.cols.iter().copied().min().unwrap_or(0);
let is_admissible = Self::is_admissible_block(&node.rows, &node.cols, leaf_size, eta);
if is_admissible && n_rows > 1 && n_cols > 1 {
let lr = aca_approximate(kernel, &node.rows, &node.cols, tol)?;
out.push((row_offset, col_offset, HBlock::LowRank(lr)));
} else {
let mut data = vec![F::zero(); n_rows * n_cols];
for (li, &gi) in node.rows.iter().enumerate() {
for (lj, &gj) in node.cols.iter().enumerate() {
data[li * n_cols + lj] = kernel(gi, gj);
}
}
out.push((
row_offset,
col_offset,
HBlock::Dense(DenseBlock {
data,
nrows: n_rows,
ncols: n_cols,
}),
));
}
Ok(())
}
fn is_admissible_block(rows: &[usize], cols: &[usize], leaf_size: usize, eta: f64) -> bool {
if rows.len() <= leaf_size || cols.len() <= leaf_size {
return false;
}
let row_min = rows.iter().copied().min().unwrap_or(0);
let row_max = rows.iter().copied().max().unwrap_or(0);
let col_min = cols.iter().copied().min().unwrap_or(0);
let col_max = cols.iter().copied().max().unwrap_or(0);
let diam_rows = (row_max - row_min) as f64;
let diam_cols = (col_max - col_min) as f64;
let diam = diam_rows.min(diam_cols);
let dist = if row_max < col_min {
(col_min - row_max) as f64
} else if col_max < row_min {
(row_min - col_max) as f64
} else {
return false; };
dist > 0.0 && diam * eta <= dist
}
pub fn num_blocks(&self) -> usize {
self.blocks.len()
}
pub fn num_lowrank_blocks(&self) -> usize {
self.blocks
.iter()
.filter(|(_, _, b)| matches!(b, HBlock::LowRank(_)))
.count()
}
pub fn num_dense_blocks(&self) -> usize {
self.blocks
.iter()
.filter(|(_, _, b)| matches!(b, HBlock::Dense(_)))
.count()
}
}
pub fn aca_approximate<F, K>(
kernel: &K,
rows: &[usize],
cols: &[usize],
tol: F,
) -> LinalgResult<LowRankBlock<F>>
where
F: Float + Debug + Clone + Sum,
K: Fn(usize, usize) -> F,
{
let m = rows.len();
let n = cols.len();
let max_rank = m.min(n);
if m == 0 || n == 0 {
return Ok(LowRankBlock {
u: Vec::new(),
v: Vec::new(),
rank: 0,
nrows: m,
ncols: n,
});
}
let mut u_cols: Vec<Vec<F>> = Vec::new();
let mut v_cols: Vec<Vec<F>> = Vec::new();
let mut used_row_pivots = vec![false; m];
let mut approx_norm_sq = F::zero();
let mut row_pivot = 0usize;
for _iter in 0..max_rank {
let mut res_row = vec![F::zero(); n];
for (lj, &gj) in cols.iter().enumerate() {
let gi = rows[row_pivot];
let mut v = kernel(gi, gj);
for k in 0..u_cols.len() {
v = v - u_cols[k][row_pivot] * v_cols[k][lj];
}
res_row[lj] = v;
}
let col_pivot = res_row
.iter()
.enumerate()
.max_by(|(_, a), (_, b)| {
a.abs()
.partial_cmp(&b.abs())
.unwrap_or(std::cmp::Ordering::Equal)
})
.map(|(i, _)| i)
.unwrap_or(0);
let pivot_val = res_row[col_pivot];
if pivot_val.abs() <= F::epsilon() * F::from(1000.0).unwrap_or(F::one()) {
break;
}
let inv_pivot = F::one() / pivot_val;
let new_v: Vec<F> = res_row.iter().map(|&x| x * inv_pivot).collect();
let mut new_u = vec![F::zero(); m];
for (li, &gi) in rows.iter().enumerate() {
let gj = cols[col_pivot];
let mut val = kernel(gi, gj);
for k in 0..u_cols.len() {
val = val - u_cols[k][li] * v_cols[k][col_pivot];
}
new_u[li] = val;
}
let u_norm_sq: F = new_u.iter().map(|&x| x * x).sum();
let v_norm_sq: F = new_v.iter().map(|&x| x * x).sum();
let rank_contrib_sq = u_norm_sq * v_norm_sq;
let cross_term: F = u_cols
.iter()
.zip(v_cols.iter())
.fold(F::zero(), |acc, (uk, vk)| {
let dot_u: F = uk.iter().zip(new_u.iter()).map(|(&a, &b)| a * b).sum();
let dot_v: F = vk.iter().zip(new_v.iter()).map(|(&a, &b)| a * b).sum();
acc + dot_u * dot_v
});
approx_norm_sq = approx_norm_sq + rank_contrib_sq + (F::one() + F::one()) * cross_term;
u_cols.push(new_u);
v_cols.push(new_v);
if approx_norm_sq > F::zero() {
let rel_contrib = rank_contrib_sq / approx_norm_sq;
if rel_contrib < tol * tol {
break;
}
}
used_row_pivots[row_pivot] = true;
let next_row = (0..m).filter(|&i| !used_row_pivots[i]).max_by(|&a, &b| {
let au = u_cols.last().map(|uk| uk[a].abs()).unwrap_or(F::zero());
let bu = u_cols.last().map(|uk| uk[b].abs()).unwrap_or(F::zero());
au.partial_cmp(&bu).unwrap_or(std::cmp::Ordering::Equal)
});
match next_row {
Some(r) => row_pivot = r,
None => break, }
}
let rank = u_cols.len();
let mut u_flat = vec![F::zero(); m * rank];
let mut v_flat = vec![F::zero(); n * rank];
for k in 0..rank {
for i in 0..m {
u_flat[i * rank + k] = u_cols[k][i];
}
for j in 0..n {
v_flat[j * rank + k] = v_cols[k][j];
}
}
Ok(LowRankBlock {
u: u_flat,
v: v_flat,
rank,
nrows: m,
ncols: n,
})
}
pub fn hmatrix_matvec<F>(h: &HMatrix<F>, x: &[F]) -> LinalgResult<Vec<F>>
where
F: Float + Debug + Clone + Sum + 'static,
{
if x.len() != h.ncols {
return Err(LinalgError::DimensionError(format!(
"hmatrix_matvec: x length {} != h.ncols {}",
x.len(),
h.ncols
)));
}
let mut y = vec![F::zero(); h.nrows];
for (row_off, col_off, block) in &h.blocks {
let n_rows = block.nrows();
let n_cols = block.ncols();
let x_sub = &x[*col_off..*col_off + n_cols];
block.matvec_add(x_sub, &mut y[*row_off..*row_off + n_rows])?;
}
Ok(y)
}
pub fn hmatrix_compress<F>(h: &mut HMatrix<F>, tol: F) -> LinalgResult<()>
where
F: Float + Debug + Clone + Sum + NumAssign + ScalarOperand + Send + Sync + 'static,
{
for (_, _, block) in h.blocks.iter_mut() {
if let HBlock::LowRank(lr) = block {
if lr.rank == 0 {
continue;
}
let m = lr.nrows;
let n = lr.ncols;
let r = lr.rank;
let mut a_data = vec![F::zero(); m * n];
for i in 0..m {
for j in 0..n {
let mut s = F::zero();
for k in 0..r {
s += lr.u[i * r + k] * lr.v[j * r + k];
}
a_data[i * n + j] = s;
}
}
let a_arr = Array2::from_shape_vec((m, n), a_data).map_err(|e| {
LinalgError::ComputationError(format!("hmatrix_compress: shape error: {e}"))
})?;
let (u_arr, s_arr, vt_arr) = crate::decomposition::svd(&a_arr.view(), true, None)
.map_err(|e| LinalgError::ComputationError(format!("hmatrix_compress svd: {e}")))?;
let sigma_max = if s_arr.is_empty() {
F::zero()
} else {
s_arr[0]
};
let threshold = tol * sigma_max;
let new_rank = s_arr
.iter()
.take_while(|&&sv| sv >= threshold)
.count()
.max(1)
.min(r);
if new_rank >= r {
continue;
}
let mut new_u = vec![F::zero(); m * new_rank];
let mut new_v = vec![F::zero(); n * new_rank];
for k in 0..new_rank {
let sv = s_arr[k];
let sv_sqrt = sv.sqrt();
for i in 0..m {
new_u[i * new_rank + k] = u_arr[[i, k]] * sv_sqrt;
}
for j in 0..n {
new_v[j * new_rank + k] = vt_arr[[k, j]] * sv_sqrt;
}
}
lr.u = new_u;
lr.v = new_v;
lr.rank = new_rank;
}
}
Ok(())
}
pub fn hmatrix_frobenius_norm<F>(h: &HMatrix<F>) -> F
where
F: Float + Debug + Clone + Sum + 'static,
{
let sum_sq: F = h
.blocks
.iter()
.fold(F::zero(), |acc, (_, _, b)| acc + b.frobenius_norm_sq());
sum_sq.sqrt()
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_abs_diff_eq;
fn cauchy_kernel(i: usize, j: usize) -> f64 {
let diff = (i as f64 - j as f64).abs();
1.0 / (diff + 1.0)
}
fn dense_cauchy(n: usize) -> Vec<f64> {
let mut a = vec![0.0f64; n * n];
for i in 0..n {
for j in 0..n {
a[i * n + j] = cauchy_kernel(i, j);
}
}
a
}
fn dense_matvec(a: &[f64], n: usize, x: &[f64]) -> Vec<f64> {
let mut y = vec![0.0f64; n];
for i in 0..n {
for j in 0..n {
y[i] += a[i * n + j] * x[j];
}
}
y
}
#[test]
fn test_hmatrix_build_and_matvec_n64() {
let n = 64usize;
let h: HMatrix<f64> =
HMatrix::from_kernel(n, cauchy_kernel, 8, 2.0, 1e-6).expect("build failed");
let x: Vec<f64> = (0..n).map(|i| (i as f64 * 1.3 + 0.7).sin()).collect();
let y_h = hmatrix_matvec(&h, &x).expect("matvec failed");
let a_dense = dense_cauchy(n);
let y_dense = dense_matvec(&a_dense, n, &x);
assert_eq!(y_h.len(), n);
let max_err = y_h
.iter()
.zip(y_dense.iter())
.map(|(a, b)| (a - b).abs())
.fold(0.0f64, f64::max);
let max_y = y_dense.iter().map(|v| v.abs()).fold(0.0f64, f64::max);
assert!(
max_err < 1e-3 * max_y + 1e-10,
"matvec error too large: {max_err:.2e} (max_y={max_y:.2e})"
);
}
#[test]
fn test_aca_low_rank_smooth_kernel() {
let rows: Vec<usize> = (0..16).collect();
let cols: Vec<usize> = (32..48).collect();
let kernel = |i: usize, j: usize| -> f64 {
let d = (i as f64 - j as f64).abs();
(-d / 20.0).exp()
};
let lr = aca_approximate(&kernel, &rows, &cols, 1e-8).expect("ACA failed");
assert!(
lr.rank <= 6,
"expected rank ≤ 6 for smooth kernel, got {}",
lr.rank
);
assert_eq!(lr.nrows, 16);
assert_eq!(lr.ncols, 16);
}
#[test]
fn test_hmatrix_compress_reduces_rank() {
let n = 32usize;
let mut h: HMatrix<f64> =
HMatrix::from_kernel(n, cauchy_kernel, 4, 1.5, 1e-4).expect("build failed");
let x: Vec<f64> = (0..n).map(|i| (i as f64 * 0.1).sin()).collect();
let y_before = hmatrix_matvec(&h, &x).expect("matvec before compress failed");
let rank_before: usize = h
.blocks
.iter()
.filter_map(|(_, _, b)| {
if let HBlock::LowRank(lr) = b {
Some(lr.rank)
} else {
None
}
})
.sum();
hmatrix_compress(&mut h, 1e-6).expect("compress failed");
let y_after = hmatrix_matvec(&h, &x).expect("matvec after compress failed");
let rank_after: usize = h
.blocks
.iter()
.filter_map(|(_, _, b)| {
if let HBlock::LowRank(lr) = b {
Some(lr.rank)
} else {
None
}
})
.sum();
assert!(
rank_after <= rank_before + 1, "rank increased after compression: {} → {}",
rank_before,
rank_after
);
let max_err = y_before
.iter()
.zip(y_after.iter())
.map(|(a, b)| (a - b).abs())
.fold(0.0f64, f64::max);
assert!(
max_err < 1e-3,
"accuracy lost after compression: max diff = {max_err:.2e}"
);
}
#[test]
fn test_lowrank_block_shape() {
let lr = LowRankBlock::<f64> {
u: vec![1.0, 0.0, 0.0, 1.0, 0.5, 0.5], v: vec![1.0, 0.0, 0.0, 1.0, 0.5, 0.5, 1.0, 0.5], rank: 2,
nrows: 3,
ncols: 4,
};
assert_eq!(lr.nrows, 3);
assert_eq!(lr.ncols, 4);
assert_eq!(lr.rank, 2);
let x = vec![1.0, 0.0, 0.0, 0.0];
let y = lr.matvec(&x).expect("matvec failed");
assert_eq!(y.len(), 3);
}
#[test]
fn test_dense_block_fallback() {
let n = 8usize;
let h: HMatrix<f64> =
HMatrix::from_kernel(n, cauchy_kernel, n, 2.0, 1e-6).expect("build failed");
let num_lr = h.num_lowrank_blocks();
assert_eq!(h.num_dense_blocks(), 1, "expected all dense blocks");
assert_eq!(
num_lr, 0,
"expected no low-rank blocks with large leaf_size"
);
}
#[test]
fn test_frobenius_norm_matches_dense() {
let n = 32usize;
let h: HMatrix<f64> =
HMatrix::from_kernel(n, cauchy_kernel, 4, 2.0, 1e-8).expect("build failed");
let h_norm = hmatrix_frobenius_norm(&h);
let a_dense = dense_cauchy(n);
let dense_norm: f64 = a_dense.iter().map(|&v| v * v).sum::<f64>().sqrt();
let rel_err = (h_norm - dense_norm).abs() / (dense_norm + 1e-15);
assert!(
rel_err < 0.05,
"Frobenius norm mismatch: H={h_norm:.6}, dense={dense_norm:.6}, rel_err={rel_err:.4}"
);
}
#[test]
fn test_matvec_matches_dense_n32() {
let n = 32usize;
let h: HMatrix<f64> =
HMatrix::from_kernel(n, cauchy_kernel, 4, 2.0, 1e-8).expect("build failed");
let x: Vec<f64> = (0..n)
.map(|i| if i % 3 == 0 { 1.0 } else { -0.5 })
.collect();
let y_h = hmatrix_matvec(&h, &x).expect("H-matvec failed");
let a_dense = dense_cauchy(n);
let y_dense = dense_matvec(&a_dense, n, &x);
let max_y = y_dense.iter().map(|v| v.abs()).fold(0.0f64, f64::max);
let max_err = y_h
.iter()
.zip(y_dense.iter())
.map(|(a, b)| (a - b).abs())
.fold(0.0f64, f64::max);
assert!(
max_err < 1e-4 * max_y + 1e-10,
"matvec error: {max_err:.2e}, max_y={max_y:.2e}"
);
}
#[test]
fn test_round_trip_build_compress_matvec() {
let n = 48usize;
let mut h: HMatrix<f64> =
HMatrix::from_kernel(n, cauchy_kernel, 6, 2.0, 1e-6).expect("build failed");
let x: Vec<f64> = (0..n).map(|i| (i as f64 * 0.2).cos()).collect();
hmatrix_compress(&mut h, 1e-5).expect("compress failed");
let y_h = hmatrix_matvec(&h, &x).expect("post-compress matvec failed");
let a_dense = dense_cauchy(n);
let y_dense = dense_matvec(&a_dense, n, &x);
let max_y = y_dense.iter().map(|v| v.abs()).fold(0.0f64, f64::max);
let max_err = y_h
.iter()
.zip(y_dense.iter())
.map(|(a, b)| (a - b).abs())
.fold(0.0f64, f64::max);
assert!(
max_err < 1e-2 * max_y + 1e-8,
"round-trip error too large: {max_err:.2e}, max_y={max_y:.2e}"
);
}
#[test]
fn test_block_cluster_tree_structure() {
let rows: Vec<usize> = (0..16).collect();
let cols: Vec<usize> = (0..16).collect();
let tree = BlockClusterTree::build(rows, cols, 4, 2.0);
let num_leaves = tree.count_leaves();
assert!(num_leaves >= 1, "tree should have at least one leaf");
}
#[test]
fn test_dense_block_matvec() {
let block = DenseBlock::<f64> {
data: vec![1.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 3.0],
nrows: 3,
ncols: 3,
};
let x = vec![1.0, 2.0, 3.0];
let y = block.matvec(&x).expect("dense matvec failed");
assert_abs_diff_eq!(y[0], 1.0, epsilon = 1e-14);
assert_abs_diff_eq!(y[1], 4.0, epsilon = 1e-14);
assert_abs_diff_eq!(y[2], 9.0, epsilon = 1e-14);
}
}