#[derive(Debug, Clone)]
pub struct KahanAccumulator {
sum: f64,
comp: f64,
count: usize,
}
impl KahanAccumulator {
#[inline]
pub fn new() -> Self {
Self {
sum: 0.0,
comp: 0.0,
count: 0,
}
}
#[inline]
pub fn add(&mut self, value: f64) {
self.count += 1;
let y = value - self.comp;
let t = self.sum + y;
self.comp = (t - self.sum) - y;
self.sum = t;
}
pub fn add_slice(&mut self, values: &[f64]) {
for &v in values {
self.add(v);
}
}
#[inline]
pub fn finalize(&self) -> f64 {
self.sum - self.comp
}
#[inline]
pub fn count(&self) -> usize {
self.count
}
}
impl Default for KahanAccumulator {
fn default() -> Self {
Self::new()
}
}
pub fn kahan_sum(values: &[f64]) -> f64 {
let mut acc = KahanAccumulator::new();
acc.add_slice(values);
acc.finalize()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_kahan_compensates() {
let mut acc = KahanAccumulator::new();
let n = 10_000_000;
for _ in 0..n {
acc.add(0.1);
}
let result = acc.finalize();
let expected = 1_000_000.0;
assert!(
(result - expected).abs() < 1e-6,
"Kahan result {} should be close to {}",
result,
expected
);
let naive: f64 = (0..n).map(|_| 0.1_f64).fold(0.0, |a, b| a + b);
assert!(
(result - expected).abs() <= (naive - expected).abs(),
"Kahan ({}) should be at least as accurate as naive ({})",
result,
naive
);
}
#[test]
fn test_kahan_determinism() {
let values: Vec<f64> = (0..1000).map(|i| (i as f64) * 0.001).collect();
let r1 = kahan_sum(&values);
let r2 = kahan_sum(&values);
assert_eq!(r1.to_bits(), r2.to_bits());
}
#[test]
fn test_kahan_count() {
let mut acc = KahanAccumulator::new();
acc.add(1.0);
acc.add(2.0);
acc.add(3.0);
assert_eq!(acc.count(), 3);
assert_eq!(acc.finalize(), 6.0);
}
}