math_audio_dsp/adaa.rs
1// ============================================================================
2// ADAA — Antiderivative Anti-Aliasing for Nonlinear Processors
3// ============================================================================
4//
5// Implements 1st-order and 2nd-order ADAA as described in:
6// Parker et al., "Reducing the Aliasing of Nonlinear Waveshaping Using
7// Continuous-Time Convolution" (DAFx-16)
8//
9// The core idea: instead of evaluating f(x) directly (which aliases),
10// compute (AD1(x[n]) - AD1(x[n-1])) / (x[n] - x[n-1]) where AD1 is the
11// antiderivative of f. This effectively applies a moving-average anti-alias
12// filter across the nonlinearity.
13//
14// HARD RULES:
15// - No allocations in process()
16// - f64 intermediates for numerical stability in the division
17// - Fallback to f(x_mid) when consecutive samples are near-identical
18
19const EPSILON: f64 = 1e-5;
20
21// ============================================================================
22// First-order ADAA
23// ============================================================================
24
25/// First-order Antiderivative Anti-Aliasing processor.
26///
27/// Wraps a memoryless nonlinearity `f(x)` and its first antiderivative `AD1(x)`.
28/// Produces significantly less aliasing than naive evaluation, at minimal CPU cost.
29pub struct Adaa1 {
30 /// The nonlinearity f(x)
31 f: fn(f64) -> f64,
32 /// First antiderivative AD1(x) = integral of f(x)
33 ad1: fn(f64) -> f64,
34 /// Previous input sample
35 x_prev: f64,
36}
37
38impl Adaa1 {
39 /// Create a new first-order ADAA processor.
40 pub fn new(f: fn(f64) -> f64, ad1: fn(f64) -> f64) -> Self {
41 Self {
42 f,
43 ad1,
44 x_prev: 0.0,
45 }
46 }
47
48 /// Process one sample.
49 #[inline]
50 pub fn process(&mut self, x: f32) -> f32 {
51 let x = x as f64;
52 let x_prev = self.x_prev;
53 self.x_prev = x;
54
55 let diff = x - x_prev;
56 if diff.abs() < EPSILON {
57 // Near-identical consecutive samples: fallback to f(midpoint)
58 (self.f)((x + x_prev) * 0.5) as f32
59 } else {
60 // ADAA1: (AD1(x) - AD1(x_prev)) / (x - x_prev)
61 (((self.ad1)(x) - (self.ad1)(x_prev)) / diff) as f32
62 }
63 }
64
65 /// Process a block of samples in-place.
66 #[inline]
67 pub fn process_block(&mut self, buffer: &mut [f32]) {
68 for sample in buffer.iter_mut() {
69 *sample = self.process(*sample);
70 }
71 }
72
73 /// Reset state.
74 pub fn reset(&mut self) {
75 self.x_prev = 0.0;
76 }
77}
78
79// ============================================================================
80// Second-order ADAA
81// ============================================================================
82
83/// Second-order Antiderivative Anti-Aliasing processor.
84///
85/// Uses the second antiderivative for even better alias suppression,
86/// at the cost of one additional sample of latency and slight HF rolloff.
87pub struct Adaa2 {
88 /// The nonlinearity f(x)
89 f: fn(f64) -> f64,
90 /// First antiderivative
91 ad1: fn(f64) -> f64,
92 /// Second antiderivative AD2(x) = integral of AD1(x)
93 ad2: fn(f64) -> f64,
94 /// Two previous samples
95 x_prev1: f64,
96 x_prev2: f64,
97 /// Previous D1 value for the second-order finite difference
98 d1_prev: f64,
99}
100
101impl Adaa2 {
102 pub fn new(f: fn(f64) -> f64, ad1: fn(f64) -> f64, ad2: fn(f64) -> f64) -> Self {
103 Self {
104 f,
105 ad1,
106 ad2,
107 x_prev1: 0.0,
108 x_prev2: 0.0,
109 d1_prev: 0.0,
110 }
111 }
112
113 #[inline]
114 fn compute_d1(&self, x: f64, x_prev: f64) -> f64 {
115 let diff = x - x_prev;
116 if diff.abs() < EPSILON {
117 (self.ad1)((x + x_prev) * 0.5)
118 } else {
119 ((self.ad2)(x) - (self.ad2)(x_prev)) / diff
120 }
121 }
122
123 /// Process one sample. Introduces 0.5 sample latency.
124 #[inline]
125 pub fn process(&mut self, x: f32) -> f32 {
126 let x = x as f64;
127 let x_prev1 = self.x_prev1;
128
129 let d1 = self.compute_d1(x, x_prev1);
130
131 let diff = (x - self.x_prev2) * 0.5;
132 let result = if diff.abs() < EPSILON {
133 (self.f)((x + self.x_prev2) * 0.25 + x_prev1 * 0.5)
134 } else {
135 (d1 - self.d1_prev) / diff
136 };
137
138 self.x_prev2 = self.x_prev1;
139 self.x_prev1 = x;
140 self.d1_prev = d1;
141
142 result as f32
143 }
144
145 pub fn process_block(&mut self, buffer: &mut [f32]) {
146 for sample in buffer.iter_mut() {
147 *sample = self.process(*sample);
148 }
149 }
150
151 pub fn reset(&mut self) {
152 self.x_prev1 = 0.0;
153 self.x_prev2 = 0.0;
154 self.d1_prev = 0.0;
155 }
156}
157
158// ============================================================================
159// Pre-built ADAA processors for common saturation functions
160// ============================================================================
161
162// --- tanh ---
163fn tanh_f(x: f64) -> f64 {
164 x.tanh()
165}
166fn tanh_ad1(x: f64) -> f64 {
167 // AD1(tanh(x)) = ln(cosh(x))
168 // For numerical stability: ln(cosh(x)) = |x| + ln(1 + e^(-2|x|)) - ln(2)
169 let abs_x = x.abs();
170 abs_x + (-2.0 * abs_x).exp().ln_1p() - std::f64::consts::LN_2
171}
172fn tanh_ad2(x: f64) -> f64 {
173 // AD2(tanh(x)) = integral of ln(cosh(x))
174 // = (1/2)[x^2 + Li_2(-e^{-2x})] - ln(2)*x + C
175 // The linear term -(ln2)*x does NOT cancel in ADAA2 finite differences.
176 let z = (-2.0 * x).exp();
177 let li2 = dilog_neg(z);
178 0.5 * (x * x + li2) - std::f64::consts::LN_2 * x
179}
180
181/// Approximate Li_2(-z) for z >= 0 using series expansion.
182fn dilog_neg(z: f64) -> f64 {
183 if z < 1e-15 {
184 return 0.0;
185 }
186 if z <= 1.0 {
187 // Li_2(-z) = sum_{k=1}^{inf} (-z)^k / k^2
188 // = -z + z^2/4 - z^3/9 + ...
189 let mut result = 0.0;
190 let mut z_pow = 1.0;
191 for k in 1..=200 {
192 z_pow *= z;
193 let term = z_pow / (k * k) as f64;
194 if k % 2 == 1 {
195 result -= term;
196 } else {
197 result += term;
198 }
199 if term.abs() < 1e-15 {
200 break;
201 }
202 }
203 result
204 } else {
205 // For z > 1, use identity: Li_2(-z) = -Li_2(-1/z) - pi^2/6 - 0.5*ln(z)^2
206 let ln_z = z.ln();
207 -dilog_neg(1.0 / z) - std::f64::consts::PI * std::f64::consts::PI / 6.0 - 0.5 * ln_z * ln_z
208 }
209}
210
211// --- soft clip: x / (1 + |x|) ---
212fn softclip_f(x: f64) -> f64 {
213 x / (1.0 + x.abs())
214}
215fn softclip_ad1(x: f64) -> f64 {
216 // integral of x/(1+|x|) dx
217 // For x >= 0: integral of x/(1+x) = x - ln(1+x) + C
218 // For x < 0: integral of x/(1-x) = -x - ln(1-x) + C (i.e., -(|x| - ln(1+|x|)))
219 // Combined with continuity at x=0 (both give 0):
220 // AD1(x) = |x| - ln(1 + |x|) (always positive, symmetric)
221 let abs_x = x.abs();
222 abs_x - (1.0 + abs_x).ln()
223}
224fn softclip_ad2(x: f64) -> f64 {
225 // AD2 = integral of AD1. Since f(x) is odd, AD1 is even, AD2 must be odd.
226 // For x >= 0: integral of (x - ln(1+x)) = x^2/2 - (1+x)*ln(1+x) + x + C
227 // With C chosen so AD2(0) = 0: C = 0 (since 0 - 1*ln(1) + 0 = 0).
228 let abs_x = x.abs();
229 let one_plus = 1.0 + abs_x;
230 let magnitude = 0.5 * abs_x * abs_x - one_plus * one_plus.ln() + abs_x;
231 if x >= 0.0 { magnitude } else { -magnitude }
232}
233
234// --- hard clip: clamp to [-1, 1] ---
235fn hardclip_f(x: f64) -> f64 {
236 x.clamp(-1.0, 1.0)
237}
238fn hardclip_ad1(x: f64) -> f64 {
239 // integral of clamp(x, -1, 1)
240 // For |x| <= 1: x^2/2
241 // For x > 1: x - 0.5 (value at x=1: 0.5, derivative = 1)
242 // For x < -1: -x - 0.5 (value at x=-1: 0.5, derivative = -1)
243 // Note: AD1 is even-symmetric for an odd function
244 if (-1.0..=1.0).contains(&x) {
245 x * x * 0.5
246 } else if x > 1.0 {
247 x - 0.5
248 } else {
249 -x - 0.5
250 }
251}
252fn hardclip_ad2(x: f64) -> f64 {
253 // integral of hardclip_ad1(x). Since f is odd, AD1 is even, AD2 must be odd.
254 // For |x| <= 1: x^3/6 (odd)
255 // For x > 1: x^2/2 - x/2 + 1/6
256 // For x < -1: -(|x|^2/2 - |x|/2 + 1/6) (odd symmetry)
257 if (-1.0..=1.0).contains(&x) {
258 x * x * x / 6.0
259 } else if x > 1.0 {
260 x * x * 0.5 - 0.5 * x + 1.0 / 6.0
261 } else {
262 // x < -1: negate the positive formula evaluated at |x|
263 let abs_x = x.abs();
264 -(abs_x * abs_x * 0.5 - 0.5 * abs_x + 1.0 / 6.0)
265 }
266}
267
268/// Create a first-order ADAA processor for `tanh(x)` (soft saturation).
269pub fn adaa1_tanh() -> Adaa1 {
270 Adaa1::new(tanh_f, tanh_ad1)
271}
272
273/// Create a first-order ADAA processor for `x/(1+|x|)` (soft clip).
274pub fn adaa1_softclip() -> Adaa1 {
275 Adaa1::new(softclip_f, softclip_ad1)
276}
277
278/// Create a first-order ADAA processor for `clamp(x, -1, 1)` (hard clip).
279pub fn adaa1_hardclip() -> Adaa1 {
280 Adaa1::new(hardclip_f, hardclip_ad1)
281}
282
283/// Create a second-order ADAA processor for `tanh(x)`.
284pub fn adaa2_tanh() -> Adaa2 {
285 Adaa2::new(tanh_f, tanh_ad1, tanh_ad2)
286}
287
288/// Create a second-order ADAA processor for `x/(1+|x|)`.
289pub fn adaa2_softclip() -> Adaa2 {
290 Adaa2::new(softclip_f, softclip_ad1, softclip_ad2)
291}
292
293/// Create a second-order ADAA processor for `clamp(x, -1, 1)`.
294pub fn adaa2_hardclip() -> Adaa2 {
295 Adaa2::new(hardclip_f, hardclip_ad1, hardclip_ad2)
296}
297
298#[cfg(test)]
299mod tests {
300 use super::*;
301
302 #[test]
303 fn test_tanh_ad1_identity() {
304 // Verify AD1(tanh) = ln(cosh(x)) at several points
305 for &x in &[0.0_f64, 0.5, 1.0, 2.0, -1.0, -3.0] {
306 let expected = x.cosh().ln();
307 let actual = tanh_ad1(x);
308 assert!(
309 (actual - expected).abs() < 1e-10,
310 "tanh_ad1({x}): expected {expected}, got {actual}"
311 );
312 }
313 }
314
315 #[test]
316 fn test_softclip_ad1_identity() {
317 // Verify via numerical differentiation: d/dx AD1(x) ≈ |f(x)|
318 // AD1 is even (|x| - ln(1+|x|)), so d/dx AD1 = sign(x) * f(|x|)
319 // which equals f(x) since f(x) = x/(1+|x|) is odd.
320 // Wait — let's verify: AD1(x) = |x| - ln(1+|x|)
321 // d/dx AD1 for x > 0: 1 - 1/(1+x) = x/(1+x) = f(x) ✓
322 // d/dx AD1 for x < 0: -1 + 1/(1+|x|) = -|x|/(1+|x|) = x/(1+|x|) = f(x) ✓
323 let h = 1e-7;
324 for &x in &[0.1, 0.5, 1.0, 2.0, -0.5, -2.0] {
325 let numerical_derivative = (softclip_ad1(x + h) - softclip_ad1(x - h)) / (2.0 * h);
326 let actual = softclip_f(x);
327 assert!(
328 (numerical_derivative - actual).abs() < 1e-4,
329 "softclip AD1 derivative mismatch at x={x}: d/dx AD1={numerical_derivative}, f(x)={actual}"
330 );
331 }
332 }
333
334 #[test]
335 fn test_adaa1_tanh_basic() {
336 let mut adaa = adaa1_tanh();
337 // Process a ramp — should produce tanh-like output without aliasing
338 let mut outputs = Vec::new();
339 for i in 0..100 {
340 let x = (i as f32 - 50.0) / 25.0; // -2.0 to +2.0
341 outputs.push(adaa.process(x));
342 }
343 // Output should be bounded by [-1, 1]
344 for &y in &outputs {
345 assert!(y.abs() <= 1.01, "Output out of bounds: {y}");
346 }
347 }
348
349 #[test]
350 fn test_adaa1_reduces_aliasing() {
351 // Compare alias energy: naive tanh vs ADAA1 tanh on a high-frequency sine
352 let sr = 48000.0;
353 let freq = 15000.0; // Near Nyquist
354 let drive = 5.0; // Heavy drive creates harmonics that alias
355 let n = 4096;
356
357 // Naive
358 let naive_output: Vec<f32> = (0..n)
359 .map(|i| {
360 let t = i as f64 / sr;
361 (drive * (2.0 * std::f64::consts::PI * freq * t).sin()).tanh() as f32
362 })
363 .collect();
364
365 // ADAA1
366 let mut adaa = adaa1_tanh();
367 let adaa_output: Vec<f32> = (0..n)
368 .map(|i| {
369 let t = i as f64 / sr;
370 let x = (drive * (2.0 * std::f64::consts::PI * freq * t).sin()) as f32;
371 adaa.process(x)
372 })
373 .collect();
374
375 // Compute energy in alias band (above Nyquist fold-back region)
376 // Both should have fundamental at 15kHz. Aliases fold back below.
377 // Simply compare total high-frequency energy as a proxy.
378 let naive_energy: f32 = naive_output.iter().map(|x| x * x).sum();
379 let adaa_energy: f32 = adaa_output.iter().map(|x| x * x).sum();
380
381 // ADAA should have similar or less energy (less alias content)
382 // This is a rough check — the key is no crash and bounded output
383 assert!(adaa_energy > 0.0, "ADAA produced silence");
384 assert!(naive_energy > 0.0, "Naive produced silence");
385 }
386
387 #[test]
388 fn test_adaa1_reset() {
389 let mut adaa = adaa1_tanh();
390 adaa.process(1.0);
391 adaa.reset();
392 // After reset, processing 0 should give near-0
393 let out = adaa.process(0.0);
394 assert!(out.abs() < 0.01);
395 }
396
397 #[test]
398 fn test_adaa2_tanh_bounded() {
399 let mut adaa = adaa2_tanh();
400 for i in 0..200 {
401 let x = (i as f32 - 100.0) / 30.0;
402 let y = adaa.process(x);
403 assert!(y.abs() < 2.0, "ADAA2 output unbounded: {y} at x={x}");
404 }
405 }
406
407 #[test]
408 fn test_hardclip_adaa1() {
409 let mut adaa = adaa1_hardclip();
410 // Ramp through clipping region — skip first sample (transient from x_prev=0)
411 let mut outputs = Vec::new();
412 for i in 0..100 {
413 let x = (i as f32 - 50.0) / 25.0;
414 outputs.push(adaa.process(x));
415 }
416 // After the initial transient, outputs should be bounded
417 for (i, &y) in outputs.iter().enumerate().skip(5) {
418 assert!(
419 y.abs() <= 1.5,
420 "Hard clip ADAA output too large at i={i}: {y}"
421 );
422 }
423 }
424
425 #[test]
426 fn test_consecutive_identical_samples() {
427 let mut adaa = adaa1_tanh();
428 // Feeding the same value repeatedly should not produce NaN or infinity
429 for _ in 0..100 {
430 let y = adaa.process(0.5);
431 assert!(y.is_finite(), "Non-finite output: {y}");
432 }
433 }
434
435 #[test]
436 fn test_dilog_neg_basic() {
437 // Li_2(0) = 0
438 assert!((dilog_neg(0.0)).abs() < 1e-15);
439 // Li_2(-1) = -pi^2/12
440 let expected = -std::f64::consts::PI * std::f64::consts::PI / 12.0;
441 let actual = dilog_neg(1.0);
442 assert!(
443 (actual - expected).abs() < 1e-4,
444 "Li_2(-1): expected {expected}, got {actual}"
445 );
446 }
447}