#[cfg(not(feature = "std"))]
use alloc::{vec, vec::Vec};
#[cfg(not(feature = "std"))]
use num_traits::Float as _;
#[cfg(not(feature = "std"))]
use alloc::collections::BTreeMap;
#[cfg(feature = "std")]
use std::collections::BTreeMap;
use crate::cubature::CubatureRule;
use crate::error::QuadratureError;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SparseGridBasis {
ClenshawCurtis,
}
#[derive(Debug, Clone)]
pub struct SparseGrid {
rule: CubatureRule,
level: usize,
}
impl SparseGrid {
pub fn new(dim: usize, level: usize, _basis: SparseGridBasis) -> Result<Self, QuadratureError> {
if dim == 0 {
return Err(QuadratureError::InvalidInput("dimension must be >= 1"));
}
let rule = build_smolyak(dim, level);
Ok(Self { rule, level })
}
pub fn clenshaw_curtis(dim: usize, level: usize) -> Result<Self, QuadratureError> {
Self::new(dim, level, SparseGridBasis::ClenshawCurtis)
}
#[inline]
#[must_use]
pub fn rule(&self) -> &CubatureRule {
&self.rule
}
#[inline]
#[must_use]
pub fn num_points(&self) -> usize {
self.rule.num_points()
}
#[inline]
#[must_use]
pub fn dim(&self) -> usize {
self.rule.dim()
}
#[inline]
#[must_use]
pub fn level(&self) -> usize {
self.level
}
}
fn cc_order(level: usize) -> usize {
if level == 0 {
1
} else {
(1 << level) + 1
}
}
fn cc_rule(n: usize) -> (Vec<f64>, Vec<f64>) {
crate::clenshaw_curtis::compute_clenshaw_curtis(n)
}
#[allow(clippy::cast_precision_loss, clippy::cast_possible_truncation)]
fn quantise(x: f64) -> i64 {
(x * (1i64 << 48) as f64).round() as i64
}
fn build_smolyak(dim: usize, level: usize) -> CubatureRule {
let max_level = level;
let cc_rules: Vec<(Vec<f64>, Vec<f64>)> =
(0..=max_level).map(|l| cc_rule(cc_order(l))).collect();
let mut point_map: BTreeMap<Vec<i64>, (Vec<f64>, f64)> = BTreeMap::new();
let q = level;
let sum_min = (q + 1).saturating_sub(dim);
let sum_max = q;
for s in sum_min..=sum_max {
let diff = q - s;
#[allow(clippy::cast_precision_loss)]
let coeff =
if diff.is_multiple_of(2) { 1.0 } else { -1.0 } * binomial(dim - 1, diff) as f64;
if coeff.abs() < 1e-300 {
continue;
}
let mut multi_idx = vec![0usize; dim];
multi_idx[0] = s;
loop {
let orders: Vec<usize> = multi_idx.iter().map(|&l| cc_order(l)).collect();
let total: usize = orders.iter().product();
let mut indices = vec![0usize; dim];
for _ in 0..total {
let mut w = coeff;
let mut key = Vec::with_capacity(dim);
let mut point = Vec::with_capacity(dim);
for j in 0..dim {
let (ref nodes, ref weights) = cc_rules[multi_idx[j]];
point.push(nodes[indices[j]]);
key.push(quantise(nodes[indices[j]]));
w *= weights[indices[j]];
}
point_map
.entry(key)
.and_modify(|(_, existing_w)| *existing_w += w)
.or_insert((point, w));
for j in 0..dim {
indices[j] += 1;
if indices[j] < orders[j] {
break;
}
indices[j] = 0;
}
}
if !next_composition(&mut multi_idx, s) {
break;
}
}
}
let mut pairs: Vec<(Vec<f64>, f64)> = point_map
.into_values()
.filter(|(_, w)| w.abs() > 1e-15)
.collect();
pairs.sort_by(|a, b| {
a.0.iter()
.zip(b.0.iter())
.find_map(|(x, y)| {
x.partial_cmp(y)
.filter(|o| *o != core::cmp::Ordering::Equal)
})
.unwrap_or(core::cmp::Ordering::Equal)
});
let n = pairs.len();
let mut nodes_flat = Vec::with_capacity(n * dim);
let mut weights = Vec::with_capacity(n);
for (pt, w) in pairs {
nodes_flat.extend_from_slice(&pt);
weights.push(w);
}
CubatureRule::new(nodes_flat, weights, dim)
}
fn next_composition(c: &mut [usize], s: usize) -> bool {
let d = c.len();
if d <= 1 {
return false;
}
let mut j = d - 2;
loop {
if c[j] > 0 {
break;
}
if j == 0 {
return false;
}
j -= 1;
}
c[j] -= 1;
let remainder: usize = s - c[..=j].iter().sum::<usize>();
c[j + 1] = remainder;
for val in c.iter_mut().take(d).skip(j + 2) {
*val = 0;
}
true
}
fn binomial(n: usize, k: usize) -> usize {
if k > n {
return 0;
}
if k == 0 || k == n {
return 1;
}
let k = k.min(n - k);
let mut result = 1usize;
for i in 0..k {
result = result * (n - i) / (i + 1);
}
result
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn level_0_single_point() {
let sg = SparseGrid::clenshaw_curtis(3, 0).unwrap();
assert_eq!(sg.num_points(), 1);
let sum: f64 = sg.rule().weights().iter().sum();
assert!((sum - 8.0).abs() < 1e-14);
}
#[test]
fn invalid_dim() {
assert!(SparseGrid::clenshaw_curtis(0, 1).is_err());
}
#[test]
fn weight_sum() {
for d in 1..=4 {
for q in 0..=3 {
let sg = SparseGrid::clenshaw_curtis(d, q).unwrap();
let sum: f64 = sg.rule().weights().iter().sum();
let expected = 2.0_f64.powi(d as i32);
assert!(
(sum - expected).abs() < 1e-10,
"d={d}, q={q}: sum={sum}, expected={expected}, n={}",
sg.num_points()
);
}
}
}
#[test]
fn one_d_matches_cc() {
let sg = SparseGrid::clenshaw_curtis(1, 3).unwrap();
assert_eq!(sg.num_points(), 9);
}
#[test]
fn point_counts() {
let sg = SparseGrid::clenshaw_curtis(2, 1).unwrap();
assert_eq!(sg.num_points(), 5);
let sg = SparseGrid::clenshaw_curtis(2, 2).unwrap();
assert_eq!(sg.num_points(), 13);
let sg = SparseGrid::clenshaw_curtis(3, 1).unwrap();
assert_eq!(sg.num_points(), 7);
}
#[test]
fn polynomial_exactness_2d() {
let sg = SparseGrid::clenshaw_curtis(2, 3).unwrap();
let result = sg.rule().integrate(|x| x[0] * x[0] * x[1] * x[1]);
assert!((result - 4.0 / 9.0).abs() < 1e-12, "result={result}");
}
#[test]
fn sparsity_advantage() {
let sg = SparseGrid::clenshaw_curtis(5, 3).unwrap();
let tp_points = 9usize.pow(5); assert!(
sg.num_points() < tp_points / 10,
"sparse={} should be much less than tensor={}",
sg.num_points(),
tp_points
);
}
#[test]
fn smooth_3d_integral() {
let sg = SparseGrid::clenshaw_curtis(3, 4).unwrap();
let result = sg
.rule()
.integrate_box(&[0.0, 0.0, 0.0], &[1.0, 1.0, 1.0], |x| {
(x[0] + x[1] + x[2]).exp()
});
let e_minus_1 = core::f64::consts::E - 1.0;
let expected = e_minus_1 * e_minus_1 * e_minus_1;
assert!(
(result - expected).abs() < 1e-6,
"result={result}, expected={expected}"
);
}
}