use crate::error::{IntegrateError, IntegrateResult};
use scirs2_core::ndarray::Array1;
use std::collections::HashMap;
use std::f64::consts::PI;
#[derive(Debug, Clone)]
pub struct OneDRule {
pub nodes: Vec<f64>,
pub weights: Vec<f64>,
}
pub trait OneDRuleFamily: Send + Sync {
fn rule(&self, level: usize) -> IntegrateResult<OneDRule>;
}
#[derive(Debug, Clone, Copy)]
pub struct ClenshawCurtisFamily;
impl OneDRuleFamily for ClenshawCurtisFamily {
fn rule(&self, level: usize) -> IntegrateResult<OneDRule> {
if level == 0 {
return Err(IntegrateError::ValueError("Level must be >= 1".into()));
}
let n = if level == 1 {
0
} else {
1_usize << (level - 1)
};
cc_rule(n)
}
}
fn cc_rule(n: usize) -> IntegrateResult<OneDRule> {
if n == 0 {
return Ok(OneDRule {
nodes: vec![0.0],
weights: vec![2.0],
});
}
if n == 1 {
return Ok(OneDRule {
nodes: vec![-1.0, 0.0, 1.0],
weights: vec![1.0 / 3.0, 4.0 / 3.0, 1.0 / 3.0],
});
}
let nf = n as f64;
let mut nodes = Vec::with_capacity(n + 1);
let mut weights = Vec::with_capacity(n + 1);
for j in 0..=n {
nodes.push((j as f64 * PI / nf).cos());
}
let half_n = n / 2;
for j in 0..=n {
let c_j: f64 = if j == 0 || j == n { 1.0 } else { 2.0 };
let theta_j = j as f64 * PI / nf;
let mut s = 0.0_f64;
for k in 1..=half_n {
let b_k: f64 = if k < half_n || (!n.is_multiple_of(2) && k == half_n) {
2.0
} else {
1.0
};
s += b_k / (4.0 * (k as f64) * (k as f64) - 1.0) * (2.0 * k as f64 * theta_j).cos();
}
weights.push(c_j / nf * (1.0 - s));
}
Ok(OneDRule { nodes, weights })
}
#[derive(Debug, Clone, Copy)]
pub struct GaussLegendreFamily;
impl OneDRuleFamily for GaussLegendreFamily {
fn rule(&self, level: usize) -> IntegrateResult<OneDRule> {
if level == 0 {
return Err(IntegrateError::ValueError("Level must be >= 1".into()));
}
gl_rule(level)
}
}
fn gl_rule(n: usize) -> IntegrateResult<OneDRule> {
if n == 0 {
return Err(IntegrateError::ValueError(
"Number of GL points must be >= 1".into(),
));
}
if n == 1 {
return Ok(OneDRule {
nodes: vec![0.0],
weights: vec![2.0],
});
}
let mut nodes = vec![0.0_f64; n];
let mut weights = vec![0.0_f64; n];
let m = n.div_ceil(2);
for i in 0..m {
let mut z = (PI * (i as f64 + 0.75) / (n as f64 + 0.5)).cos();
for _ in 0..100 {
let mut p0 = 1.0_f64;
let mut p1 = z;
for k in 2..=n {
let kf = k as f64;
let p2 = ((2.0 * kf - 1.0) * z * p1 - (kf - 1.0) * p0) / kf;
p0 = p1;
p1 = p2;
}
let nf = n as f64;
let dp = nf * (z * p1 - p0) / (z * z - 1.0);
let delta = p1 / dp;
z -= delta;
if delta.abs() < 1e-15 {
break;
}
}
nodes[i] = -z;
nodes[n - 1 - i] = z;
let mut p0 = 1.0_f64;
let mut p1 = z;
for k in 2..=n {
let kf = k as f64;
let p2 = ((2.0 * kf - 1.0) * z * p1 - (kf - 1.0) * p0) / kf;
p0 = p1;
p1 = p2;
}
let nf = n as f64;
let dp = nf * (z * p1 - p0) / (z * z - 1.0);
let w = 2.0 / ((1.0 - z * z) * dp * dp);
weights[i] = w;
weights[n - 1 - i] = w;
}
Ok(OneDRule { nodes, weights })
}
#[derive(Debug, Clone, Copy)]
pub struct GaussHermiteFamily;
impl OneDRuleFamily for GaussHermiteFamily {
fn rule(&self, level: usize) -> IntegrateResult<OneDRule> {
if level == 0 {
return Err(IntegrateError::ValueError("Level must be >= 1".into()));
}
let n = 2 * level - 1;
gauss_hermite_rule(n)
}
}
fn gauss_hermite_rule(n: usize) -> IntegrateResult<OneDRule> {
if n == 0 {
return Err(IntegrateError::ValueError(
"Number of GH points must be >= 1".into(),
));
}
if n == 1 {
return Ok(OneDRule {
nodes: vec![0.0],
weights: vec![PI.sqrt()],
});
}
let mut diag = vec![0.0_f64; n];
let mut offdiag = vec![0.0_f64; n - 1];
for k in 0..(n - 1) {
offdiag[k] = ((k + 1) as f64 / 2.0).sqrt();
}
let (eigenvalues, first_components) = symtrid_eig(&diag, &mut offdiag)?;
let mu0 = PI.sqrt(); let mut nodes = eigenvalues;
let mut weights = Vec::with_capacity(n);
for v in &first_components {
weights.push(mu0 * v * v);
}
let mut order: Vec<usize> = (0..n).collect();
order.sort_by(|&a, &b| {
nodes[a]
.partial_cmp(&nodes[b])
.unwrap_or(std::cmp::Ordering::Equal)
});
let sorted_nodes: Vec<f64> = order.iter().map(|&i| nodes[i]).collect();
let sorted_weights: Vec<f64> = order.iter().map(|&i| weights[i]).collect();
nodes = sorted_nodes;
weights = sorted_weights;
Ok(OneDRule { nodes, weights })
}
fn symtrid_eig(diag: &[f64], offdiag: &mut [f64]) -> IntegrateResult<(Vec<f64>, Vec<f64>)> {
let n = diag.len();
if n == 0 {
return Ok((vec![], vec![]));
}
if offdiag.len() != n.saturating_sub(1) {
return Err(IntegrateError::DimensionMismatch(
"offdiag length must be n-1".into(),
));
}
if n == 1 {
return Ok((vec![diag[0]], vec![1.0]));
}
let mut d = diag.to_vec(); let mut e = vec![0.0_f64; n]; e[1..n].copy_from_slice(offdiag);
let mut z = vec![vec![0.0_f64; n]; n]; for i in 0..n {
z[i][i] = 1.0;
}
let max_iter = 300 * n;
let eps = f64::EPSILON;
for iter in 0..max_iter {
let mut converged = true;
let mut active_end = 0_usize;
for i in (1..n).rev() {
if e[i].abs() > eps * (d[i - 1].abs() + d[i].abs()) {
active_end = i;
converged = false;
break;
}
}
if converged {
break;
}
let mut active_start = active_end;
while active_start > 0 {
if e[active_start].abs() <= eps * (d[active_start - 1].abs() + d[active_start].abs()) {
break;
}
active_start -= 1;
}
let p_idx = active_end;
let sd = (d[p_idx - 1] - d[p_idx]) / 2.0;
let ee = e[p_idx];
let shift = if sd.abs() < 1e-300 {
d[p_idx] - ee.abs()
} else {
d[p_idx] - ee * ee / (sd + sd.signum() * (sd * sd + ee * ee).sqrt())
};
let mut cos_prev = 1.0_f64;
let mut sin_prev = 0.0_f64;
let mut g = d[active_start] - shift;
let mut h = g;
let mut e_next = e[active_start + 1];
let _ = (cos_prev, sin_prev, g, h, e_next);
{
let mut x = d[active_start] - shift;
let mut z_val = e[active_start + 1];
for i in active_start..active_end {
let r = (x * x + z_val * z_val).sqrt();
let c = x / r;
let s = z_val / r;
if i > active_start {
e[i] = r;
}
let d_i = d[i];
let d_ip1 = d[i + 1];
let e_ip1 = e[i + 1];
d[i] = c * c * d_i + 2.0 * c * s * e_ip1 + s * s * d_ip1;
d[i + 1] = s * s * d_i - 2.0 * c * s * e_ip1 + c * c * d_ip1;
e[i + 1] = c * s * (d_ip1 - d_i) + (c * c - s * s) * e_ip1;
if i + 2 <= active_end {
x = e[i + 1];
z_val = s * e[i + 2];
e[i + 2] *= c;
}
for k in 0..n {
let tmp = z[i][k];
z[i][k] = c * tmp + s * z[i + 1][k];
z[i + 1][k] = -s * tmp + c * z[i + 1][k];
}
}
}
if iter == max_iter - 1 {
return Err(IntegrateError::ConvergenceError(
"QL eigensolver did not converge".into(),
));
}
}
let v0: Vec<f64> = (0..n).map(|i| z[i][0]).collect();
Ok((d, v0))
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SparseGridRule {
ClenshawCurtis,
GaussLegendre,
GaussHermite,
}
#[derive(Debug, Clone)]
pub struct SparseGridOptions {
pub level: usize,
pub rule: SparseGridRule,
}
impl Default for SparseGridOptions {
fn default() -> Self {
Self {
level: 4,
rule: SparseGridRule::ClenshawCurtis,
}
}
}
#[derive(Debug, Clone)]
pub struct SparseGridResult {
pub value: f64,
pub n_points: usize,
pub level: usize,
pub dim: usize,
}
#[derive(Debug, Clone)]
pub struct SparseGrid {
pub points: Vec<Vec<f64>>,
pub weights: Vec<f64>,
pub dim: usize,
pub level: usize,
}
pub fn build_sparse_grid(
dim: usize,
options: &SparseGridOptions,
ranges: Option<&[(f64, f64)]>,
) -> IntegrateResult<SparseGrid> {
if dim == 0 {
return Err(IntegrateError::ValueError(
"Dimension must be at least 1".into(),
));
}
if options.level == 0 {
return Err(IntegrateError::ValueError("Level must be >= 1".into()));
}
let level = options.level;
let family: Box<dyn OneDRuleFamily> = match options.rule {
SparseGridRule::ClenshawCurtis => Box::new(ClenshawCurtisFamily),
SparseGridRule::GaussLegendre => Box::new(GaussLegendreFamily),
SparseGridRule::GaussHermite => Box::new(GaussHermiteFamily),
};
let mut rules_1d = Vec::with_capacity(level + 1);
rules_1d.push(OneDRule {
nodes: vec![],
weights: vec![],
}); for lv in 1..=level {
rules_1d.push(family.rule(lv)?);
}
let mut point_weight_map: HashMap<Vec<i64>, (Vec<f64>, f64)> = HashMap::new();
let q_min = if level >= dim { level - dim + 1 } else { 1 };
let q_max = level;
for q in q_min..=q_max {
let multi_indices = enumerate_multi_indices(dim, q);
let sign: f64 = if (level - q).is_multiple_of(2) {
1.0
} else {
-1.0
};
let binom = binomial_coeff(dim - 1, level - q);
let coeff = sign * binom as f64;
for alpha in &multi_indices {
add_tensor_product_to_map(&rules_1d, alpha, ranges, dim, coeff, &mut point_weight_map)?;
}
}
let mut points = Vec::with_capacity(point_weight_map.len());
let mut weights = Vec::with_capacity(point_weight_map.len());
for (pt, w) in point_weight_map.values() {
if w.abs() > 1e-30 {
points.push(pt.clone());
weights.push(*w);
}
}
Ok(SparseGrid {
points,
weights,
dim,
level,
})
}
pub fn sparse_grid_quad<F>(
f: F,
ranges: &[(f64, f64)],
options: Option<SparseGridOptions>,
) -> IntegrateResult<SparseGridResult>
where
F: Fn(&[f64]) -> f64,
{
let opts = options.unwrap_or_default();
let dim = ranges.len();
let grid = build_sparse_grid(dim, &opts, Some(ranges))?;
let mut value = 0.0_f64;
for (pt, &w) in grid.points.iter().zip(grid.weights.iter()) {
value += w * f(pt);
}
Ok(SparseGridResult {
value,
n_points: grid.points.len(),
level: grid.level,
dim,
})
}
pub fn sparse_grid_quad_adaptive<F>(
f: F,
ranges: &[(f64, f64)],
rule: SparseGridRule,
abs_tol: f64,
rel_tol: f64,
max_level: usize,
) -> IntegrateResult<SparseGridResult>
where
F: Fn(&[f64]) -> f64,
{
let dim = ranges.len();
if dim == 0 {
return Err(IntegrateError::ValueError(
"At least one dimension required".into(),
));
}
let start_level = dim.max(1);
let mut prev_result = sparse_grid_quad(
&f,
ranges,
Some(SparseGridOptions {
level: start_level,
rule,
}),
)?;
for level in (start_level + 1)..=max_level {
let curr_result = sparse_grid_quad(&f, ranges, Some(SparseGridOptions { level, rule }))?;
let diff = (curr_result.value - prev_result.value).abs();
let scale = curr_result.value.abs().max(1.0);
if diff < abs_tol || diff < rel_tol * scale {
return Ok(curr_result);
}
prev_result = curr_result;
}
Ok(prev_result)
}
fn enumerate_multi_indices(d: usize, q: usize) -> Vec<Vec<usize>> {
if d == 0 {
return if q == 0 { vec![vec![]] } else { vec![] };
}
if d == 1 {
return if q >= 1 { vec![vec![q]] } else { vec![] };
}
let mut result = Vec::new();
let max_first = if q >= d { q - d + 1 } else { return result };
for a0 in 1..=max_first {
let sub = enumerate_multi_indices(d - 1, q - a0);
for mut tail in sub {
let mut row = Vec::with_capacity(d);
row.push(a0);
row.append(&mut tail);
result.push(row);
}
}
result
}
fn binomial_coeff(n: usize, k: usize) -> usize {
if k > n {
return 0;
}
if k == 0 || k == n {
return 1;
}
let k = k.min(n - k);
let mut result: usize = 1;
for i in 0..k {
result = result * (n - i) / (i + 1);
}
result
}
fn add_tensor_product_to_map(
rules: &[OneDRule],
alpha: &[usize],
ranges: Option<&[(f64, f64)]>,
dim: usize,
coeff: f64,
map: &mut HashMap<Vec<i64>, (Vec<f64>, f64)>,
) -> IntegrateResult<()> {
let mut dim_nodes: Vec<Vec<f64>> = Vec::with_capacity(dim);
let mut dim_weights: Vec<Vec<f64>> = Vec::with_capacity(dim);
for (i, &lv) in alpha.iter().enumerate() {
let rule = &rules[lv];
if let Some(r) = ranges {
let (a, b) = r[i];
let mid = (a + b) / 2.0;
let half_len = (b - a) / 2.0;
dim_nodes.push(rule.nodes.iter().map(|&x| mid + half_len * x).collect());
dim_weights.push(rule.weights.iter().map(|&w| w * half_len).collect());
} else {
dim_nodes.push(rule.nodes.clone());
dim_weights.push(rule.weights.clone());
}
}
let sizes: Vec<usize> = dim_nodes.iter().map(|v| v.len()).collect();
let total_size: usize = sizes.iter().product();
let mut indices = vec![0_usize; dim];
for _ in 0..total_size {
let mut w_prod = 1.0_f64;
let mut point = vec![0.0_f64; dim];
for k in 0..dim {
point[k] = dim_nodes[k][indices[k]];
w_prod *= dim_weights[k][indices[k]];
}
let key: Vec<i64> = point.iter().map(|&x| (x * 1e14).round() as i64).collect();
let w_contrib = coeff * w_prod;
let entry = map.entry(key).or_insert_with(|| (point.clone(), 0.0));
entry.1 += w_contrib;
let mut carry = true;
for k in (0..dim).rev() {
if carry {
indices[k] += 1;
if indices[k] >= sizes[k] {
indices[k] = 0;
} else {
carry = false;
}
}
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_2d_gaussian_approx_pi() {
let result = sparse_grid_quad(
|x: &[f64]| (-x[0] * x[0] - x[1] * x[1]).exp(),
&[(-3.0, 3.0), (-3.0, 3.0)],
Some(SparseGridOptions {
level: 8,
rule: SparseGridRule::ClenshawCurtis,
}),
)
.expect("sparse_grid_quad");
assert!(
(result.value - PI).abs() < 0.02,
"2D Gaussian: got {}, expected pi={}",
result.value,
PI
);
}
#[test]
fn test_polynomial_exactness() {
let exact = 1.0 / 9.0;
let result = sparse_grid_quad(
|x: &[f64]| x[0] * x[0] * x[1] * x[1],
&[(0.0, 1.0), (0.0, 1.0)],
Some(SparseGridOptions {
level: 4,
rule: SparseGridRule::ClenshawCurtis,
}),
)
.expect("sparse_grid_quad");
assert!(
(result.value - exact).abs() < 1e-10,
"Polynomial exactness: got {}, expected {}",
result.value,
exact
);
}
#[test]
fn test_dimension_scaling() {
let r2 = sparse_grid_quad(
|_x: &[f64]| 1.0,
&[(0.0, 2.0), (0.0, 2.0)],
Some(SparseGridOptions {
level: 2,
rule: SparseGridRule::ClenshawCurtis,
}),
)
.expect("2D");
assert!(
(r2.value - 4.0).abs() < 1e-12,
"2D constant: got {}",
r2.value
);
let r3 = sparse_grid_quad(
|_x: &[f64]| 1.0,
&[(0.0, 2.0), (0.0, 2.0), (0.0, 2.0)],
Some(SparseGridOptions {
level: 3,
rule: SparseGridRule::ClenshawCurtis,
}),
)
.expect("3D");
assert!(
(r3.value - 8.0).abs() < 1e-12,
"3D constant: got {}",
r3.value
);
assert!(
r3.n_points >= r2.n_points,
"3D should use >= 2D points: {} vs {}",
r3.n_points,
r2.n_points
);
}
#[test]
fn test_gauss_legendre_rule() {
let result = sparse_grid_quad(
|x: &[f64]| x[0] * x[1],
&[(0.0, 1.0), (0.0, 1.0)],
Some(SparseGridOptions {
level: 3,
rule: SparseGridRule::GaussLegendre,
}),
)
.expect("GL sparse grid");
assert!(
(result.value - 0.25).abs() < 1e-10,
"GL 2D x*y: got {}",
result.value
);
}
#[test]
fn test_gauss_hermite_2d() {
let grid = build_sparse_grid(
2,
&SparseGridOptions {
level: 3,
rule: SparseGridRule::GaussHermite,
},
None,
)
.expect("build GH grid");
let mut value = 0.0_f64;
for (pt, &w) in grid.points.iter().zip(grid.weights.iter()) {
let _ = pt; value += w * 1.0;
}
assert!(
(value - PI).abs() < 1e-8,
"GH 2D constant: got {}, expected pi={}",
value,
PI
);
}
#[test]
fn test_build_sparse_grid() {
let grid = build_sparse_grid(
2,
&SparseGridOptions {
level: 3,
rule: SparseGridRule::ClenshawCurtis,
},
Some(&[(0.0, 1.0), (0.0, 1.0)]),
)
.expect("build grid");
assert!(!grid.points.is_empty(), "Grid should have points");
assert_eq!(
grid.points.len(),
grid.weights.len(),
"Points/weights mismatch"
);
assert_eq!(grid.dim, 2);
assert_eq!(grid.level, 3);
let weight_sum: f64 = grid.weights.iter().sum();
assert!(
(weight_sum - 1.0).abs() < 1e-10,
"Weight sum should be 1, got {}",
weight_sum
);
}
#[test]
fn test_adaptive_sparse_grid() {
let result = sparse_grid_quad_adaptive(
|x: &[f64]| (x[0] * x[1]).sin(),
&[(0.0, 1.0), (0.0, 1.0)],
SparseGridRule::ClenshawCurtis,
1e-8,
1e-8,
10,
)
.expect("adaptive sparse grid");
assert!(
result.value > 0.23 && result.value < 0.25,
"Adaptive: got {}",
result.value
);
}
#[test]
fn test_5d_constant() {
let ranges: Vec<(f64, f64)> = (0..5).map(|_| (0.0, 1.0)).collect();
let result = sparse_grid_quad(
|_x: &[f64]| 1.0,
&ranges,
Some(SparseGridOptions {
level: 6,
rule: SparseGridRule::ClenshawCurtis,
}),
)
.expect("5D constant");
assert!(
(result.value - 1.0).abs() < 1e-10,
"5D constant: got {}",
result.value
);
}
#[test]
fn test_errors() {
let r = sparse_grid_quad(|_: &[f64]| 1.0, &[], None);
assert!(r.is_err(), "Empty ranges should error");
let r = sparse_grid_quad(
|_: &[f64]| 1.0,
&[(0.0, 1.0)],
Some(SparseGridOptions {
level: 0,
rule: SparseGridRule::ClenshawCurtis,
}),
);
assert!(r.is_err(), "Level 0 should error");
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum UnivariateRule {
ClenCurt(usize),
GaussLegendre(usize),
GaussPatternak(usize),
}
impl UnivariateRule {
fn to_sparse_grid_rule(self) -> SparseGridRule {
match self {
UnivariateRule::ClenCurt(_) => SparseGridRule::ClenshawCurtis,
UnivariateRule::GaussLegendre(_) => SparseGridRule::GaussLegendre,
UnivariateRule::GaussPatternak(_) => SparseGridRule::ClenshawCurtis,
}
}
}
#[derive(Debug, Clone)]
pub struct SmolyakConfig {
pub dim: usize,
pub level: usize,
pub domain: Vec<[f64; 2]>,
pub rule: UnivariateRule,
}
#[derive(Debug, Clone)]
pub struct SmolyakGrid {
pub points: Vec<Vec<f64>>,
pub weights: Vec<f64>,
pub n_points: usize,
config: SmolyakConfig,
}
impl SmolyakGrid {
pub fn new(config: SmolyakConfig) -> IntegrateResult<Self> {
let dim = config.dim;
let level = config.level;
if dim == 0 {
return Err(IntegrateError::ValueError(
"SmolyakGrid: dim must be >= 1".into(),
));
}
if level == 0 {
return Err(IntegrateError::ValueError(
"SmolyakGrid: level must be >= 1".into(),
));
}
if config.domain.len() != dim {
return Err(IntegrateError::DimensionMismatch(format!(
"SmolyakGrid: domain.len()={} != dim={}",
config.domain.len(),
dim
)));
}
let ranges: Vec<(f64, f64)> = config.domain.iter().map(|&[a, b]| (a, b)).collect();
let options = SparseGridOptions {
level,
rule: config.rule.to_sparse_grid_rule(),
};
let grid = build_sparse_grid(dim, &options, Some(&ranges))?;
let n_points = grid.points.len();
let points: Vec<Vec<f64>> = grid.points;
let weights = grid.weights;
Ok(Self {
points,
weights,
n_points,
config,
})
}
pub fn integrate<F: Fn(&[f64]) -> f64>(&self, f: F) -> f64 {
self.points
.iter()
.zip(self.weights.iter())
.map(|(pt, &w)| w * f(pt.as_slice()))
.sum()
}
pub fn n_points(&self) -> usize {
self.points.len()
}
}
#[inline]
pub fn rescale(x: f64, a: f64, b: f64) -> f64 {
(a + b) / 2.0 + (b - a) / 2.0 * x
}
#[inline]
pub fn rescale_weight(w: f64, a: f64, b: f64) -> f64 {
w * (b - a) / 2.0
}
pub fn cc_nodes_weights(n: usize) -> (Vec<f64>, Vec<f64>) {
match cc_rule(n) {
Ok(r) => (r.nodes, r.weights),
Err(_) => (vec![0.0], vec![2.0]),
}
}
pub fn gl_nodes_weights(n: usize) -> (Vec<f64>, Vec<f64>) {
match gl_rule(n) {
Ok(r) => (r.nodes, r.weights),
Err(_) => (vec![0.0], vec![2.0]),
}
}
pub fn smolyak_indices(dim: usize, level: usize) -> Vec<Vec<usize>> {
let mut result = Vec::new();
let q_min = if level + 1 > 0 { level + 1 } else { dim };
let q_max = level + dim;
for q in q_min..=q_max {
result.extend(enumerate_multi_indices(dim, q));
}
result
}
#[cfg(test)]
mod smolyak_api_tests {
use super::*;
#[test]
fn test_integrate_constant_1d() {
let config = SmolyakConfig {
dim: 1,
level: 2,
domain: vec![[0.0, 1.0]],
rule: UnivariateRule::ClenCurt(0),
};
let grid = SmolyakGrid::new(config).expect("build 1d grid");
let val = grid.integrate(|_x| 1.0);
assert!(
(val - 1.0).abs() < 1e-12,
"∫1 over [0,1]^1 = {val}, expected 1.0"
);
}
#[test]
fn test_integrate_constant_3d() {
let config = SmolyakConfig {
dim: 3,
level: 3,
domain: vec![[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]],
rule: UnivariateRule::ClenCurt(0),
};
let grid = SmolyakGrid::new(config).expect("build 3d grid");
let val = grid.integrate(|_x| 1.0);
assert!(
(val - 1.0).abs() < 1e-12,
"∫1 over [0,1]^3 = {val}, expected 1.0"
);
}
#[test]
fn test_integrate_linear() {
let config = SmolyakConfig {
dim: 2,
level: 3,
domain: vec![[0.0, 1.0], [0.0, 1.0]],
rule: UnivariateRule::ClenCurt(0),
};
let grid = SmolyakGrid::new(config).expect("build 2d grid");
let val = grid.integrate(|x| x[0]);
assert!(
(val - 0.5).abs() < 1e-12,
"∫x1 over [0,1]^2 = {val}, expected 0.5"
);
}
#[test]
fn test_integrate_quadratic_2d() {
let config = SmolyakConfig {
dim: 2,
level: 4,
domain: vec![[0.0, 1.0], [0.0, 1.0]],
rule: UnivariateRule::ClenCurt(0),
};
let grid = SmolyakGrid::new(config).expect("build 2d grid level 4");
let val = grid.integrate(|x| x[0] * x[0] + x[1] * x[1]);
let exact = 2.0 / 3.0;
assert!(
(val - exact).abs() < 1e-10,
"∫(x1²+x2²) over [0,1]² = {val}, expected {exact}"
);
}
#[test]
fn test_n_points_increases_with_level() {
let mk_grid = |level: usize| {
SmolyakGrid::new(SmolyakConfig {
dim: 3,
level,
domain: vec![[0.0, 1.0]; 3],
rule: UnivariateRule::ClenCurt(0),
})
.expect("grid")
.n_points()
};
let n2 = mk_grid(2);
let n3 = mk_grid(3);
let n4 = mk_grid(4);
assert!(
n3 >= n2,
"level 3 ({n3}) should have >= points as level 2 ({n2})"
);
assert!(
n4 >= n3,
"level 4 ({n4}) should have >= points as level 3 ({n3})"
);
}
#[test]
fn test_points_within_domain() {
let domain = vec![[0.5, 2.5], [-1.0, 1.0], [0.0, 3.0]];
let config = SmolyakConfig {
dim: 3,
level: 3,
domain: domain.clone(),
rule: UnivariateRule::GaussLegendre(0),
};
let grid = SmolyakGrid::new(config).expect("build grid");
for (i, pt) in grid.points.iter().enumerate() {
for (d, &x) in pt.iter().enumerate() {
let [a, b] = domain[d];
assert!(
x >= a - 1e-12 && x <= b + 1e-12,
"point {i} dim {d}: x={x} out of [{a}, {b}]"
);
}
}
}
#[test]
fn test_rescale() {
assert!((rescale(-1.0, 0.0, 4.0) - 0.0).abs() < 1e-14);
assert!((rescale(1.0, 0.0, 4.0) - 4.0).abs() < 1e-14);
assert!((rescale(0.0, 0.0, 4.0) - 2.0).abs() < 1e-14);
}
#[test]
fn test_rescale_weight() {
let w = rescale_weight(2.0, 0.0, 4.0);
assert!((w - 4.0).abs() < 1e-14, "rescale_weight: got {w}");
}
#[test]
fn test_gausspatternak_rule() {
let config = SmolyakConfig {
dim: 2,
level: 3,
domain: vec![[0.0, 1.0], [0.0, 1.0]],
rule: UnivariateRule::GaussPatternak(0),
};
let grid = SmolyakGrid::new(config).expect("GP grid");
let val = grid.integrate(|_x| 1.0);
assert!(
(val - 1.0).abs() < 1e-12,
"GaussPatternak ∫1 = {val}, expected 1.0"
);
}
#[test]
fn test_error_dim_zero() {
let config = SmolyakConfig {
dim: 0,
level: 2,
domain: vec![],
rule: UnivariateRule::ClenCurt(0),
};
assert!(
SmolyakGrid::new(config).is_err(),
"dim=0 should return an error"
);
}
#[test]
fn test_error_level_zero() {
let config = SmolyakConfig {
dim: 2,
level: 0,
domain: vec![[0.0, 1.0], [0.0, 1.0]],
rule: UnivariateRule::ClenCurt(0),
};
assert!(
SmolyakGrid::new(config).is_err(),
"level=0 should return an error"
);
}
}