#![allow(clippy::type_complexity)]
#![allow(dead_code)]
use rand::RngExt as RandRng;
use std::f64::consts::TAU;
#[derive(Debug, Clone)]
pub struct MeasurableSet {
pub label: String,
pub lower: Vec<f64>,
pub upper: Vec<f64>,
}
impl MeasurableSet {
pub fn new(label: impl Into<String>, lower: Vec<f64>, upper: Vec<f64>) -> Self {
assert_eq!(
lower.len(),
upper.len(),
"MeasurableSet: dimension mismatch"
);
MeasurableSet {
label: label.into(),
lower,
upper,
}
}
pub fn interval(a: f64, b: f64) -> Self {
Self::new(format!("[{a},{b}]"), vec![a], vec![b])
}
pub fn dim(&self) -> usize {
self.lower.len()
}
pub fn contains(&self, point: &[f64]) -> bool {
assert_eq!(
point.len(),
self.dim(),
"MeasurableSet::contains: dim mismatch"
);
self.lower
.iter()
.zip(self.upper.iter())
.zip(point.iter())
.all(|((lo, hi), x)| x >= lo && x <= hi)
}
pub fn is_null_set(&self) -> bool {
self.lower
.iter()
.zip(self.upper.iter())
.any(|(lo, hi)| (hi - lo).abs() < 1e-15)
}
}
pub trait Measure {
fn measure(&self, set: &MeasurableSet) -> f64;
fn is_null(&self, set: &MeasurableSet) -> bool {
self.measure(set).abs() < 1e-15
}
fn empty_set_measure(&self) -> f64 {
0.0
}
fn union_measure_disjoint(&self, a: &MeasurableSet, b: &MeasurableSet) -> f64 {
self.measure(a) + self.measure(b)
}
}
#[derive(Debug, Clone)]
pub struct FiniteSigmaAlgebra {
pub n: usize,
sets: Vec<u64>,
}
impl FiniteSigmaAlgebra {
pub fn power_set(n: usize) -> Self {
assert!(n <= 63, "FiniteSigmaAlgebra: n too large (max 63)");
let sets = (0u64..(1u64 << n)).collect();
Self { n, sets }
}
pub fn trivial(n: usize) -> Self {
let full = if n == 0 { 0 } else { (1u64 << n) - 1 };
Self {
n,
sets: vec![0, full],
}
}
pub fn contains(&self, bitmask: u64) -> bool {
self.sets.contains(&bitmask)
}
pub fn cardinality(&self) -> usize {
self.sets.len()
}
pub fn is_closed_under_complement(&self) -> bool {
let full = if self.n == 0 { 0 } else { (1u64 << self.n) - 1 };
self.sets.iter().all(|&s| self.contains(full & !s))
}
pub fn is_closed_under_union(&self) -> bool {
for &a in &self.sets {
for &b in &self.sets {
if !self.contains(a | b) {
return false;
}
}
}
true
}
}
#[derive(Debug, Clone, Copy, Default)]
pub struct LebesgueMeasure;
impl Measure for LebesgueMeasure {
fn measure(&self, set: &MeasurableSet) -> f64 {
set.lower
.iter()
.zip(set.upper.iter())
.map(|(lo, hi)| (hi - lo).max(0.0))
.product()
}
}
impl LebesgueMeasure {
pub fn interval_measure(a: f64, b: f64) -> f64 {
(b - a).max(0.0)
}
pub fn box_measure(lower: &[f64], upper: &[f64]) -> f64 {
assert_eq!(lower.len(), upper.len(), "box_measure: dim mismatch");
lower
.iter()
.zip(upper.iter())
.map(|(lo, hi)| (hi - lo).max(0.0))
.product()
}
pub fn interval_intersection(a1: f64, a2: f64, b1: f64, b2: f64) -> f64 {
let lo = a1.max(b1);
let hi = a2.min(b2);
(hi - lo).max(0.0)
}
pub fn translation_invariant(a: f64, b: f64, t: f64) -> bool {
let original = Self::interval_measure(a, b);
let shifted = Self::interval_measure(a + t, b + t);
(original - shifted).abs() < 1e-12
}
}
pub struct ProbabilityMeasure {
pub density: Box<dyn Fn(&[f64]) -> f64>,
pub domain: MeasurableSet,
pub n_samples: usize,
}
impl ProbabilityMeasure {
pub fn new(
density: impl Fn(&[f64]) -> f64 + 'static,
domain: MeasurableSet,
n_samples: usize,
) -> Self {
Self {
density: Box::new(density),
domain,
n_samples,
}
}
pub fn probability(&self, set: &MeasurableSet) -> f64 {
let mut rng = rand::rng();
let domain_vol = LebesgueMeasure.measure(&self.domain);
if domain_vol == 0.0 {
return 0.0;
}
let mut sum = 0.0;
let dim = self.domain.dim();
for _ in 0..self.n_samples {
let point: Vec<f64> = (0..dim)
.map(|d| rng.random_range(self.domain.lower[d]..self.domain.upper[d]))
.collect();
if set.contains(&point) {
sum += (self.density)(&point);
}
}
domain_vol * sum / self.n_samples as f64
}
pub fn total_mass(&self) -> f64 {
let mut rng = rand::rng();
let domain_vol = LebesgueMeasure.measure(&self.domain);
if domain_vol == 0.0 {
return 0.0;
}
let mut sum = 0.0;
let dim = self.domain.dim();
for _ in 0..self.n_samples {
let point: Vec<f64> = (0..dim)
.map(|d| rng.random_range(self.domain.lower[d]..self.domain.upper[d]))
.collect();
sum += (self.density)(&point);
}
domain_vol * sum / self.n_samples as f64
}
pub fn expectation(&self, g: &dyn Fn(&[f64]) -> f64) -> f64 {
let mut rng = rand::rng();
let domain_vol = LebesgueMeasure.measure(&self.domain);
if domain_vol == 0.0 {
return 0.0;
}
let mut numer = 0.0;
let mut denom = 0.0;
let dim = self.domain.dim();
for _ in 0..self.n_samples {
let point: Vec<f64> = (0..dim)
.map(|d| rng.random_range(self.domain.lower[d]..self.domain.upper[d]))
.collect();
let f_val = (self.density)(&point);
numer += f_val * g(&point);
denom += f_val;
}
if denom.abs() < 1e-15 {
return 0.0;
}
numer / denom
}
pub fn variance(&self, g: &dyn Fn(&[f64]) -> f64) -> f64 {
let eg = self.expectation(g);
let g2 = |x: &[f64]| g(x).powi(2);
let eg2 = self.expectation(&g2);
(eg2 - eg * eg).max(0.0)
}
pub fn entropy(&self) -> f64 {
let mut rng = rand::rng();
let domain_vol = LebesgueMeasure.measure(&self.domain);
if domain_vol == 0.0 {
return 0.0;
}
let mut sum = 0.0;
let dim = self.domain.dim();
for _ in 0..self.n_samples {
let point: Vec<f64> = (0..dim)
.map(|d| rng.random_range(self.domain.lower[d]..self.domain.upper[d]))
.collect();
let f_val = (self.density)(&point);
if f_val > 1e-15 {
sum += f_val * f_val.ln();
}
}
-domain_vol * sum / self.n_samples as f64
}
}
pub struct SignedMeasure {
pub positive_density: Box<dyn Fn(&[f64]) -> f64>,
pub negative_density: Box<dyn Fn(&[f64]) -> f64>,
pub domain: MeasurableSet,
pub n_samples: usize,
}
impl SignedMeasure {
pub fn new(
positive_density: impl Fn(&[f64]) -> f64 + 'static,
negative_density: impl Fn(&[f64]) -> f64 + 'static,
domain: MeasurableSet,
n_samples: usize,
) -> Self {
Self {
positive_density: Box::new(positive_density),
negative_density: Box::new(negative_density),
domain,
n_samples,
}
}
pub fn from_density(
density: impl Fn(&[f64]) -> f64 + 'static,
domain: MeasurableSet,
n_samples: usize,
) -> Self {
let density = std::rc::Rc::new(density);
let d1 = density.clone();
let d2 = density;
Self::new(
move |x| d1(x).max(0.0),
move |x| (-d2(x)).max(0.0),
domain,
n_samples,
)
}
pub fn evaluate(&self, set: &MeasurableSet) -> f64 {
let vol = LebesgueMeasure.measure(&self.domain);
if vol == 0.0 {
return 0.0;
}
let mut rng = rand::rng();
let mut sum = 0.0;
let dim = self.domain.dim();
for _ in 0..self.n_samples {
let point: Vec<f64> = (0..dim)
.map(|d| rng.random_range(self.domain.lower[d]..self.domain.upper[d]))
.collect();
if set.contains(&point) {
let fpos = (self.positive_density)(&point);
let fneg = (self.negative_density)(&point);
sum += fpos - fneg;
}
}
vol * sum / self.n_samples as f64
}
pub fn total_variation(&self, set: &MeasurableSet) -> f64 {
let vol = LebesgueMeasure.measure(&self.domain);
if vol == 0.0 {
return 0.0;
}
let mut rng = rand::rng();
let mut sum = 0.0;
let dim = self.domain.dim();
for _ in 0..self.n_samples {
let point: Vec<f64> = (0..dim)
.map(|d| rng.random_range(self.domain.lower[d]..self.domain.upper[d]))
.collect();
if set.contains(&point) {
let fpos = (self.positive_density)(&point);
let fneg = (self.negative_density)(&point);
sum += fpos + fneg;
}
}
vol * sum / self.n_samples as f64
}
pub fn hahn_positive_set_samples(&self, n: usize) -> Vec<Vec<f64>> {
let mut rng = rand::rng();
let dim = self.domain.dim();
let mut result = Vec::new();
for _ in 0..n {
let point: Vec<f64> = (0..dim)
.map(|d| rng.random_range(self.domain.lower[d]..self.domain.upper[d]))
.collect();
if (self.positive_density)(&point) > (self.negative_density)(&point) {
result.push(point);
}
}
result
}
pub fn jordan_decomposition(&self) -> (f64, f64) {
let vol = LebesgueMeasure.measure(&self.domain);
if vol == 0.0 {
return (0.0, 0.0);
}
let mut rng = rand::rng();
let mut pos_sum = 0.0;
let mut neg_sum = 0.0;
let dim = self.domain.dim();
for _ in 0..self.n_samples {
let point: Vec<f64> = (0..dim)
.map(|d| rng.random_range(self.domain.lower[d]..self.domain.upper[d]))
.collect();
pos_sum += (self.positive_density)(&point);
neg_sum += (self.negative_density)(&point);
}
let scale = vol / self.n_samples as f64;
(scale * pos_sum, scale * neg_sum)
}
}
#[derive(Debug, Clone)]
pub struct ProductMeasure {
pub domain1: MeasurableSet,
pub domain2: MeasurableSet,
}
impl ProductMeasure {
pub fn new(domain1: MeasurableSet, domain2: MeasurableSet) -> Self {
Self { domain1, domain2 }
}
pub fn measure_product_box(&self, a: &MeasurableSet, b: &MeasurableSet) -> f64 {
LebesgueMeasure.measure(a) * LebesgueMeasure.measure(b)
}
pub fn product_domain(&self) -> MeasurableSet {
let mut lower = self.domain1.lower.clone();
lower.extend(self.domain2.lower.iter().copied());
let mut upper = self.domain1.upper.clone();
upper.extend(self.domain2.upper.iter().copied());
MeasurableSet::new("product_domain", lower, upper)
}
pub fn fubini_integrate(&self, f: &dyn Fn(f64, f64) -> f64, n_x: usize, n_y: usize) -> f64 {
assert_eq!(
self.domain1.dim(),
1,
"fubini_integrate: domain1 must be 1-D"
);
assert_eq!(
self.domain2.dim(),
1,
"fubini_integrate: domain2 must be 1-D"
);
let x0 = self.domain1.lower[0];
let x1 = self.domain1.upper[0];
let y0 = self.domain2.lower[0];
let y1 = self.domain2.upper[0];
let dx = (x1 - x0) / n_x as f64;
let dy = (y1 - y0) / n_y as f64;
let mut sum = 0.0;
for i in 0..n_x {
let x = x0 + (i as f64 + 0.5) * dx;
let inner: f64 = (0..n_y)
.map(|j| {
let y = y0 + (j as f64 + 0.5) * dy;
f(x, y)
})
.sum();
sum += inner * dy;
}
sum * dx
}
pub fn verify_fubini(&self, f: &dyn Fn(f64, f64) -> f64, n: usize) -> (f64, f64) {
let iterated = self.fubini_integrate(f, n, n);
let mut rng = rand::rng();
let x0 = self.domain1.lower[0];
let x1 = self.domain1.upper[0];
let y0 = self.domain2.lower[0];
let y1 = self.domain2.upper[0];
let vol = (x1 - x0) * (y1 - y0);
let mut mc_sum = 0.0;
for _ in 0..(n * n) {
let x = rng.random_range(x0..x1);
let y = rng.random_range(y0..y1);
mc_sum += f(x, y);
}
let mc = vol * mc_sum / (n * n) as f64;
(iterated, mc)
}
}
pub struct MeasureIntegral;
impl MeasureIntegral {
pub fn integrate_1d(f: &dyn Fn(f64) -> f64, a: f64, b: f64, n: usize) -> f64 {
let h = (b - a) / n as f64;
(0..n)
.map(|i| {
let x = a + (i as f64 + 0.5) * h;
f(x) * h
})
.sum()
}
pub fn integrate_2d(
f: &dyn Fn(f64, f64) -> f64,
a: f64,
b: f64,
c: f64,
d: f64,
n: usize,
) -> f64 {
let hx = (b - a) / n as f64;
let hy = (d - c) / n as f64;
let mut sum = 0.0;
for i in 0..n {
let x = a + (i as f64 + 0.5) * hx;
for j in 0..n {
let y = c + (j as f64 + 0.5) * hy;
sum += f(x, y);
}
}
sum * hx * hy
}
pub fn integrate_nd(
f: &dyn Fn(&[f64]) -> f64,
lower: &[f64],
upper: &[f64],
n_pts: usize,
) -> f64 {
let dim = lower.len();
assert_eq!(dim, upper.len(), "integrate_nd: dim mismatch");
let steps: Vec<f64> = lower
.iter()
.zip(upper.iter())
.map(|(lo, hi)| (hi - lo) / n_pts as f64)
.collect();
let vol_cell: f64 = steps.iter().product();
let total_cells = n_pts.pow(dim as u32);
(0..total_cells)
.map(|idx| {
let mut point = vec![0.0f64; dim];
let mut rem = idx;
for d in 0..dim {
let coord = rem % n_pts;
rem /= n_pts;
point[d] = lower[d] + (coord as f64 + 0.5) * steps[d];
}
f(&point) * vol_cell
})
.sum()
}
pub fn monte_carlo(
f: &dyn Fn(&[f64]) -> f64,
lower: &[f64],
upper: &[f64],
n_samples: usize,
) -> f64 {
let dim = lower.len();
assert_eq!(dim, upper.len(), "monte_carlo: dim mismatch");
let vol: f64 = lower
.iter()
.zip(upper.iter())
.map(|(lo, hi)| hi - lo)
.product();
let mut rng = rand::rng();
let mut sum = 0.0;
for _ in 0..n_samples {
let point: Vec<f64> = (0..dim)
.map(|d| rng.random_range(lower[d]..upper[d]))
.collect();
sum += f(&point);
}
vol * sum / n_samples as f64
}
pub fn importance_sampling(
f: &dyn Fn(f64) -> f64,
proposal_sample: &dyn Fn() -> f64,
proposal_density: &dyn Fn(f64) -> f64,
n_samples: usize,
) -> f64 {
let mut sum = 0.0;
for _ in 0..n_samples {
let x = proposal_sample();
let g = proposal_density(x);
if g.abs() > 1e-15 {
sum += f(x) / g;
}
}
sum / n_samples as f64
}
pub fn riemann_stieltjes(
f: &dyn Fn(f64) -> f64,
g: &dyn Fn(f64) -> f64,
a: f64,
b: f64,
n: usize,
) -> f64 {
let h = (b - a) / n as f64;
(0..n)
.map(|i| {
let x = a + i as f64 * h;
let xp = x + h;
f(x) * (g(xp) - g(x))
})
.sum()
}
pub fn monte_carlo_std_error(
f: &dyn Fn(&[f64]) -> f64,
lower: &[f64],
upper: &[f64],
n_samples: usize,
) -> (f64, f64) {
let dim = lower.len();
let vol: f64 = lower
.iter()
.zip(upper.iter())
.map(|(lo, hi)| hi - lo)
.product();
let mut rng = rand::rng();
let mut vals = Vec::with_capacity(n_samples);
for _ in 0..n_samples {
let point: Vec<f64> = (0..dim)
.map(|d| rng.random_range(lower[d]..upper[d]))
.collect();
vals.push(f(&point));
}
let mean: f64 = vals.iter().sum::<f64>() / n_samples as f64;
let var: f64 =
vals.iter().map(|v| (v - mean).powi(2)).sum::<f64>() / (n_samples - 1) as f64;
let std_err = (var / n_samples as f64).sqrt() * vol;
(vol * mean, std_err)
}
}
pub struct RadonNikodym;
impl RadonNikodym {
pub fn estimate_at(x: f64, mu_samples: &[f64], nu_samples: &[f64], eps: f64) -> f64 {
let count =
|samples: &[f64]| samples.iter().filter(|&&s| (s - x).abs() <= eps).count() as f64;
let mu_count = count(mu_samples);
let nu_count = count(nu_samples);
if mu_count < 1e-15 {
return 0.0;
}
(nu_count / nu_samples.len() as f64) / (mu_count / mu_samples.len() as f64)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ConvergenceType {
AlmostEverywhere,
InMeasure,
InLp,
Uniform,
}
pub fn check_lp_convergence(
fn_seq: &[Box<dyn Fn(f64) -> f64>],
limit: &dyn Fn(f64) -> f64,
a: f64,
b: f64,
p: f64,
n_pts: usize,
tolerance: f64,
) -> bool {
if fn_seq.is_empty() {
return true;
}
let h = (b - a) / n_pts as f64;
let last = fn_seq.last().expect("fn_seq is non-empty after check");
let lp_norm: f64 = (0..n_pts)
.map(|i| {
let x = a + (i as f64 + 0.5) * h;
(last(x) - limit(x)).abs().powf(p) * h
})
.sum::<f64>()
.powf(1.0 / p);
lp_norm < tolerance
}
pub struct OuterMeasure {
pub base: LebesgueMeasure,
}
impl OuterMeasure {
pub fn new() -> Self {
Self {
base: LebesgueMeasure,
}
}
pub fn estimate(&self, a: f64, b: f64, n_covers: usize) -> f64 {
let _ = n_covers;
(b - a).max(0.0)
}
pub fn verify_caratheodory(&self, a_lo: f64, a_hi: f64) -> bool {
let e_lo = 0.0;
let e_hi = 1.0;
let full = e_hi - e_lo;
let intersection = (a_hi.min(e_hi) - a_lo.max(e_lo)).max(0.0);
let complement = ((a_lo - e_lo).max(0.0) + (e_hi - a_hi).max(0.0)).min(full);
(full - (intersection + complement)).abs() < 1e-12
}
}
impl Default for OuterMeasure {
fn default() -> Self {
Self::new()
}
}
#[derive(Debug, Clone, Copy, Default)]
pub struct CountingMeasure;
impl CountingMeasure {
pub fn measure_set(set_size: usize) -> f64 {
set_size as f64
}
pub fn additivity(a_size: usize, b_size: usize) -> bool {
Self::measure_set(a_size + b_size) == Self::measure_set(a_size) + Self::measure_set(b_size)
}
}
pub struct ErgodicMeasure;
impl ErgodicMeasure {
pub fn time_average(
f: &dyn Fn(f64) -> f64,
map: &dyn Fn(f64) -> f64,
x0: f64,
n: usize,
) -> f64 {
let mut x = x0;
let mut sum = 0.0;
for _ in 0..n {
sum += f(x);
x = map(x);
}
sum / n as f64
}
pub fn birkhoff_check(
f: &dyn Fn(f64) -> f64,
map: &dyn Fn(f64) -> f64,
x0: f64,
space_average: f64,
n: usize,
tol: f64,
) -> bool {
let ta = Self::time_average(f, map, x0, n);
(ta - space_average).abs() < tol
}
}
pub struct HaarMeasure;
impl HaarMeasure {
pub fn integrate(f: &dyn Fn(f64) -> f64, n: usize) -> f64 {
use std::f64::consts::TAU;
let h = TAU / n as f64;
(0..n).map(|i| f(i as f64 * h) * h / TAU).sum()
}
pub fn translation_invariant(f: &dyn Fn(f64) -> f64, t: f64, n: usize) -> bool {
let original = Self::integrate(f, n);
let shifted = Self::integrate(&|theta| f((theta + t) % TAU), n);
(original - shifted).abs() < 1e-6
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::f64::consts::PI;
#[test]
fn test_measurable_set_contains() {
let s = MeasurableSet::interval(0.0, 1.0);
assert!(s.contains(&[0.5]));
assert!(!s.contains(&[1.5]));
}
#[test]
fn test_measurable_set_null() {
let s = MeasurableSet::interval(1.0, 1.0);
assert!(s.is_null_set());
}
#[test]
fn test_measurable_set_dim() {
let s = MeasurableSet::new("box", vec![0.0, 0.0], vec![1.0, 1.0]);
assert_eq!(s.dim(), 2);
}
#[test]
fn test_sigma_algebra_power_set_cardinality() {
let sa = FiniteSigmaAlgebra::power_set(3);
assert_eq!(sa.cardinality(), 8);
}
#[test]
fn test_sigma_algebra_trivial_cardinality() {
let sa = FiniteSigmaAlgebra::trivial(4);
assert_eq!(sa.cardinality(), 2);
}
#[test]
fn test_sigma_algebra_complement_closure() {
let sa = FiniteSigmaAlgebra::power_set(3);
assert!(sa.is_closed_under_complement());
}
#[test]
fn test_sigma_algebra_union_closure() {
let sa = FiniteSigmaAlgebra::power_set(3);
assert!(sa.is_closed_under_union());
}
#[test]
fn test_sigma_algebra_contains_empty() {
let sa = FiniteSigmaAlgebra::power_set(4);
assert!(sa.contains(0)); }
#[test]
fn test_lebesgue_interval() {
let m = LebesgueMeasure;
let s = MeasurableSet::interval(0.0, 3.0);
assert!((m.measure(&s) - 3.0).abs() < 1e-12);
}
#[test]
fn test_lebesgue_box() {
let m = LebesgueMeasure;
let s = MeasurableSet::new("box", vec![0.0, 0.0], vec![2.0, 3.0]);
assert!((m.measure(&s) - 6.0).abs() < 1e-12);
}
#[test]
fn test_lebesgue_null_set() {
let m = LebesgueMeasure;
let s = MeasurableSet::interval(1.0, 1.0);
assert!(m.is_null(&s));
}
#[test]
fn test_lebesgue_empty_set() {
let m = LebesgueMeasure;
assert!((m.empty_set_measure()).abs() < 1e-12);
}
#[test]
fn test_lebesgue_disjoint_union() {
let m = LebesgueMeasure;
let a = MeasurableSet::interval(0.0, 1.0);
let b = MeasurableSet::interval(2.0, 4.0);
let u = m.union_measure_disjoint(&a, &b);
assert!((u - 3.0).abs() < 1e-12);
}
#[test]
fn test_lebesgue_translation_invariant() {
assert!(LebesgueMeasure::translation_invariant(0.0, 1.0, 5.0));
}
#[test]
fn test_lebesgue_intersection() {
let i = LebesgueMeasure::interval_intersection(0.0, 2.0, 1.0, 3.0);
assert!((i - 1.0).abs() < 1e-12);
}
#[test]
fn test_lebesgue_no_intersection() {
let i = LebesgueMeasure::interval_intersection(0.0, 1.0, 2.0, 3.0);
assert!(i.abs() < 1e-12);
}
#[test]
fn test_probability_total_mass_uniform() {
let pm = ProbabilityMeasure::new(|_x| 1.0, MeasurableSet::interval(0.0, 1.0), 10_000);
let mass = pm.total_mass();
assert!((mass - 1.0).abs() < 0.05, "total mass = {mass}");
}
#[test]
fn test_probability_expectation_linear() {
let pm = ProbabilityMeasure::new(|_x| 1.0, MeasurableSet::interval(0.0, 1.0), 20_000);
let ex = pm.expectation(&|x| x[0]);
assert!((ex - 0.5).abs() < 0.05, "E[X] = {ex}");
}
#[test]
fn test_probability_variance_uniform() {
let pm = ProbabilityMeasure::new(|_x| 1.0, MeasurableSet::interval(0.0, 1.0), 30_000);
let vx = pm.variance(&|x| x[0]);
assert!((vx - 1.0 / 12.0).abs() < 0.02, "Var[X] = {vx}");
}
#[test]
fn test_probability_entropy_positive() {
let pm = ProbabilityMeasure::new(|_x| 1.0, MeasurableSet::interval(0.0, 1.0), 5_000);
let h = pm.entropy();
assert!(h.abs() < 0.05, "H = {h}");
}
#[test]
fn test_signed_measure_total_variation_positive() {
let sm = SignedMeasure::new(|_| 2.0, |_| 1.0, MeasurableSet::interval(0.0, 1.0), 20_000);
let tv = sm.total_variation(&MeasurableSet::interval(0.0, 1.0));
assert!((tv - 3.0).abs() < 0.15, "TV = {tv}");
}
#[test]
fn test_signed_measure_evaluate() {
let sm = SignedMeasure::new(|_| 1.0, |_| 0.0, MeasurableSet::interval(0.0, 1.0), 20_000);
let v = sm.evaluate(&MeasurableSet::interval(0.0, 1.0));
assert!((v - 1.0).abs() < 0.1, "ν = {v}");
}
#[test]
fn test_signed_measure_jordan_decomposition() {
let sm = SignedMeasure::new(|_| 2.0, |_| 1.0, MeasurableSet::interval(0.0, 1.0), 20_000);
let (pos, neg) = sm.jordan_decomposition();
assert!((pos - 2.0).abs() < 0.15, "ν⁺ = {pos}");
assert!((neg - 1.0).abs() < 0.15, "ν⁻ = {neg}");
}
#[test]
fn test_signed_measure_from_density() {
let sm =
SignedMeasure::from_density(|x| x[0] - 0.5, MeasurableSet::interval(0.0, 1.0), 20_000);
let (pos, neg) = sm.jordan_decomposition();
assert!(pos >= 0.0);
assert!(neg >= 0.0);
}
#[test]
fn test_product_measure_box() {
let pm = ProductMeasure::new(
MeasurableSet::interval(0.0, 2.0),
MeasurableSet::interval(0.0, 3.0),
);
let a = MeasurableSet::interval(0.0, 2.0);
let b = MeasurableSet::interval(0.0, 3.0);
let m = pm.measure_product_box(&a, &b);
assert!((m - 6.0).abs() < 1e-12);
}
#[test]
fn test_fubini_constant_function() {
let pm = ProductMeasure::new(
MeasurableSet::interval(0.0, 1.0),
MeasurableSet::interval(0.0, 1.0),
);
let result = pm.fubini_integrate(&|_x, _y| 1.0, 100, 100);
assert!((result - 1.0).abs() < 0.01, "result = {result}");
}
#[test]
fn test_fubini_separable_function() {
let pm = ProductMeasure::new(
MeasurableSet::interval(0.0, 1.0),
MeasurableSet::interval(0.0, 1.0),
);
let result = pm.fubini_integrate(&|x, y| x * y, 200, 200);
assert!((result - 0.25).abs() < 0.01, "result = {result}");
}
#[test]
fn test_fubini_vs_monte_carlo() {
let pm = ProductMeasure::new(
MeasurableSet::interval(0.0, 1.0),
MeasurableSet::interval(0.0, 1.0),
);
let (iterated, mc) = pm.verify_fubini(&|x, y| x * x + y * y, 50);
assert!((iterated - mc).abs() < 0.1, "iterated={iterated} mc={mc}");
}
#[test]
fn test_integrate_1d_constant() {
let result = MeasureIntegral::integrate_1d(&|_| 2.0, 0.0, 3.0, 1000);
assert!((result - 6.0).abs() < 0.01, "result = {result}");
}
#[test]
fn test_integrate_1d_linear() {
let result = MeasureIntegral::integrate_1d(&|x| x, 0.0, 1.0, 10_000);
assert!((result - 0.5).abs() < 0.001, "result = {result}");
}
#[test]
fn test_integrate_1d_trig() {
let result = MeasureIntegral::integrate_1d(&|x| x.sin(), 0.0, PI, 10_000);
assert!((result - 2.0).abs() < 0.01, "result = {result}");
}
#[test]
fn test_integrate_2d_constant() {
let result = MeasureIntegral::integrate_2d(&|_x, _y| 1.0, 0.0, 1.0, 0.0, 1.0, 100);
assert!((result - 1.0).abs() < 0.01, "result = {result}");
}
#[test]
fn test_integrate_2d_separable() {
let result = MeasureIntegral::integrate_2d(&|x, y| x * y, 0.0, 1.0, 0.0, 1.0, 200);
assert!((result - 0.25).abs() < 0.005, "result = {result}");
}
#[test]
fn test_integrate_nd_volume() {
let result =
MeasureIntegral::integrate_nd(&|_| 1.0, &[0.0, 0.0, 0.0], &[1.0, 1.0, 1.0], 10);
assert!((result - 1.0).abs() < 0.01, "result = {result}");
}
#[test]
fn test_monte_carlo_1d() {
let result = MeasureIntegral::monte_carlo(&|x| x[0].powi(2), &[0.0], &[1.0], 50_000);
assert!((result - 1.0 / 3.0).abs() < 0.02, "result = {result}");
}
#[test]
fn test_monte_carlo_2d() {
let result =
MeasureIntegral::monte_carlo(&|p| p[0] + p[1], &[0.0, 0.0], &[1.0, 1.0], 50_000);
assert!((result - 1.0).abs() < 0.05, "result = {result}");
}
#[test]
fn test_riemann_stieltjes() {
let result = MeasureIntegral::riemann_stieltjes(&|_| 1.0, &|x| x, 0.0, 1.0, 10_000);
assert!((result - 1.0).abs() < 0.001, "result = {result}");
}
#[test]
fn test_monte_carlo_std_error() {
let (est, err) = MeasureIntegral::monte_carlo_std_error(&|x| x[0], &[0.0], &[1.0], 10_000);
assert!((est - 0.5).abs() < 0.05, "estimate = {est}");
assert!(err > 0.0, "std error should be positive");
}
#[test]
fn test_radon_nikodym_estimate() {
let samples: Vec<f64> = (0..1000).map(|i| i as f64 / 1000.0).collect();
let rn = RadonNikodym::estimate_at(0.5, &samples, &samples, 0.05);
assert!((rn - 1.0).abs() < 0.2, "RN derivative = {rn}");
}
#[test]
fn test_outer_measure_estimate() {
let om = OuterMeasure::new();
let m = om.estimate(0.0, 3.0, 10);
assert!((m - 3.0).abs() < 1e-12);
}
#[test]
fn test_caratheodory_condition() {
let om = OuterMeasure::new();
assert!(om.verify_caratheodory(0.2, 0.7));
}
#[test]
fn test_counting_measure() {
assert!((CountingMeasure::measure_set(5) - 5.0).abs() < 1e-12);
assert!((CountingMeasure::measure_set(0)).abs() < 1e-12);
}
#[test]
fn test_ergodic_time_average_doubling_map() {
let alpha = (5.0_f64.sqrt() - 1.0) / 2.0; let ta = ErgodicMeasure::time_average(
&|x| x,
&move |x| (x + alpha) % 1.0,
0.123_456_789,
200_000,
);
assert!((ta - 0.5).abs() < 0.05, "time average = {ta}");
}
#[test]
fn test_ergodic_birkhoff_check() {
let alpha = (5.0_f64.sqrt() - 1.0) / 2.0;
let ok = ErgodicMeasure::birkhoff_check(
&|x| x,
&move |x| (x + alpha) % 1.0,
0.1234,
0.5,
200_000,
0.05,
);
assert!(ok, "Birkhoff check failed");
}
#[test]
fn test_haar_measure_constant() {
let result = HaarMeasure::integrate(&|_| 1.0, 1000);
assert!((result - 1.0).abs() < 0.01, "result = {result}");
}
#[test]
fn test_haar_measure_cosine() {
let result = HaarMeasure::integrate(&|theta: f64| theta.cos(), 10_000);
assert!(result.abs() < 0.01, "result = {result}");
}
#[test]
fn test_haar_translation_invariant() {
let ok = HaarMeasure::translation_invariant(&|theta: f64| theta.sin().powi(2), 1.0, 10_000);
assert!(ok);
}
#[test]
fn test_lp_convergence() {
let fns: Vec<Box<dyn Fn(f64) -> f64>> = (1..=10)
.map(|n| {
let b: Box<dyn Fn(f64) -> f64> = Box::new(move |x: f64| x.powf(1.0 / n as f64));
b
})
.collect();
let limit = |_x: f64| 1.0;
let converges = check_lp_convergence(&fns, &limit, 0.0, 1.0, 2.0, 1000, 0.15);
assert!(converges);
}
}