Skip to main content

virtual_frame/
kahan.rs

1//! Kahan compensated summation — bit-identical results regardless of platform.
2//!
3//! Every floating-point reduction in this library uses Kahan summation to
4//! guarantee deterministic results. The compensation term captures rounding
5//! error that naive left-to-right summation would lose.
6
7/// Kahan compensated accumulator for f64 values.
8///
9/// Maintains a running sum and a compensation term on the stack.
10/// The result is bit-identical for identical input sequences.
11#[derive(Debug, Clone)]
12pub struct KahanAccumulator {
13    sum: f64,
14    comp: f64,
15    count: usize,
16}
17
18impl KahanAccumulator {
19    /// Create a zero-initialized accumulator.
20    #[inline]
21    pub fn new() -> Self {
22        Self {
23            sum: 0.0,
24            comp: 0.0,
25            count: 0,
26        }
27    }
28
29    /// Add a single value with Kahan compensation.
30    #[inline]
31    pub fn add(&mut self, value: f64) {
32        self.count += 1;
33        let y = value - self.comp;
34        let t = self.sum + y;
35        self.comp = (t - self.sum) - y;
36        self.sum = t;
37    }
38
39    /// Add a slice of values.
40    pub fn add_slice(&mut self, values: &[f64]) {
41        for &v in values {
42            self.add(v);
43        }
44    }
45
46    /// Return the accumulated sum, including the final compensation residual.
47    #[inline]
48    pub fn finalize(&self) -> f64 {
49        self.sum - self.comp
50    }
51
52    /// Return the number of values added.
53    #[inline]
54    pub fn count(&self) -> usize {
55        self.count
56    }
57}
58
59impl Default for KahanAccumulator {
60    fn default() -> Self {
61        Self::new()
62    }
63}
64
65/// One-shot Kahan summation for a slice of f64 values.
66pub fn kahan_sum(values: &[f64]) -> f64 {
67    let mut acc = KahanAccumulator::new();
68    acc.add_slice(values);
69    acc.finalize()
70}
71
72#[cfg(test)]
73mod tests {
74    use super::*;
75
76    #[test]
77    fn test_kahan_compensates() {
78        // Sum 10 million copies of 0.1.
79        // Naive summation accumulates rounding error; Kahan stays accurate.
80        let mut acc = KahanAccumulator::new();
81        let n = 10_000_000;
82        for _ in 0..n {
83            acc.add(0.1);
84        }
85        let result = acc.finalize();
86        let expected = 1_000_000.0;
87        // Kahan should be within 1 ULP of the expected value
88        assert!(
89            (result - expected).abs() < 1e-6,
90            "Kahan result {} should be close to {}",
91            result,
92            expected
93        );
94        // Verify it's better than naive
95        let naive: f64 = (0..n).map(|_| 0.1_f64).fold(0.0, |a, b| a + b);
96        assert!(
97            (result - expected).abs() <= (naive - expected).abs(),
98            "Kahan ({}) should be at least as accurate as naive ({})",
99            result,
100            naive
101        );
102    }
103
104    #[test]
105    fn test_kahan_determinism() {
106        let values: Vec<f64> = (0..1000).map(|i| (i as f64) * 0.001).collect();
107        let r1 = kahan_sum(&values);
108        let r2 = kahan_sum(&values);
109        assert_eq!(r1.to_bits(), r2.to_bits());
110    }
111
112    #[test]
113    fn test_kahan_count() {
114        let mut acc = KahanAccumulator::new();
115        acc.add(1.0);
116        acc.add(2.0);
117        acc.add(3.0);
118        assert_eq!(acc.count(), 3);
119        assert_eq!(acc.finalize(), 6.0);
120    }
121}