dsfb_rf/attractor.rs
1//! Delay-coordinate attractor embedding and Koopman operator proxy.
2//!
3//! ## Theoretical Basis
4//!
5//! **Takens' Embedding Theorem (1981):** A scalar time-series {x(t)} generated by
6//! a d-dimensional dynamical system can be reconstructed (diffeomorphically) in
7//! ℝ^m using delay coordinates [x(t), x(t−τ), x(t−2τ), …, x(t−(m−1)τ)] when
8//! m ≥ 2d+1 (Whitney's theorem). For RF residual norms (d ≤ 2 in practice),
9//! m = 2, τ = 2 suffices.
10//!
11//! **Correlation Dimension D₂ (Grassberger & Procaccia 1983):**
12//! ```text
13//! C(r) = lim_{N→∞} (2 / N(N-1)) · #{pairs (i,j) : ||xᵢ − xⱼ|| < r}
14//! D₂ = lim_{r→0} log C(r) / log r
15//! ```
16//! Pure noise produces D₂ → m (fills the embedding space).
17//! A low-dimensional attractor produces D₂ ≪ m.
18//!
19//! **Koopman Operator Proxy (Mezić 2005):** The spectral analysis of the linear
20//! Koopman operator K acting on observables f(x) → f(F(x)) is approximated here
21//! by the variance-to-mean ratio of the embedding trajectory — a computationally
22//! tractable proxy that distinguishes stochastic diffusion (high VM) from
23//! structured orbits (low VM, persistent modes).
24//!
25//! ## Design
26//!
27//! - `no_std`, `no_alloc`, zero `unsafe`
28//! - Fixed-capacity circular buffer W for delay-coordinate history
29//! - O(W²) correlation dimension per query — calibration-path only (W ≤ 64)
30//!
31//! ## References
32//!
33//! Takens, F. (1981) "Detecting strange attractors in turbulence," *Lecture Notes
34//! in Mathematics* 898:366–381. Springer. doi:10.1007/BFb0091924.
35//!
36//! Grassberger, P. and Procaccia, I. (1983) "Measuring the strangeness of strange
37//! attractors," *Physica D* 9:189–208. doi:10.1016/0167-2789(83)90298-1.
38//!
39//! Mezić, I. (2005) "Spectral properties of dynamical systems, model reduction and
40//! decompositions," *Nonlinear Dynamics* 41:309–325. doi:10.1007/s11071-005-2824-x.
41
42use crate::math::{mean_f32, sqrt_f32};
43
44// ── Attractor State ────────────────────────────────────────────────────────
45
46/// Classification of the reconstructed residual attractor.
47#[derive(Debug, Clone, Copy, PartialEq, Eq)]
48pub enum NoiseAttractorState {
49 /// Residuals fill the embedding space: purely stochastic, no hidden structure.
50 /// D₂ ≈ embedding dimension. Koopman modes absent.
51 StochasticBall,
52 /// Residuals form a bounded, recurring orbit: hidden determinism present.
53 /// D₂ < embedding dimension. Persistent Koopman modes detected.
54 StructuredOrbit,
55 /// Residuals have collapsed to a lower-dimensional manifold: systematic drift
56 /// or near-periodic interference pattern dominating the residual process.
57 CollapsedAttractor,
58}
59
60/// Output of an attractor analysis pass.
61#[derive(Debug, Clone, Copy)]
62pub struct AttractorResult {
63 /// Grassberger-Procaccia correlation dimension estimate D₂.
64 /// Range [0, 2] for 2-dimensional delay embedding.
65 pub correlation_dimension: f32,
66 /// Koopman proxy: variance-to-mean ratio of embedding norms.
67 /// High (> 1.0) → stochastic; Low (< 0.3) → structured modes.
68 pub koopman_proxy: f32,
69 /// Inferred attractor state classification.
70 pub state: NoiseAttractorState,
71 /// Number of delay-coordinate pairs used for D₂ estimation.
72 pub n_pairs: usize,
73}
74
75// ── Delay Embedding ────────────────────────────────────────────────────────
76
77/// Delay-coordinate embedding estimator.
78///
79/// Maintains a circular buffer of W residual norms and computes the
80/// 2D delay embedding [(x(t), x(t−τ))] across the window.
81///
82/// ## Type Parameters
83/// - `W`: Embedding window size (≥ 4, ≤ 64 recommended).
84pub struct DelayEmbedding<const W: usize> {
85 /// Circular buffer of residual norms.
86 buf: [f32; W],
87 /// Write head.
88 head: usize,
89 /// Number of samples absorbed (saturates at W).
90 count: usize,
91 /// Delay lag τ.
92 pub tau: usize,
93}
94
95impl<const W: usize> DelayEmbedding<W> {
96 /// Create a new estimator with delay lag τ.
97 pub const fn new(tau: usize) -> Self {
98 Self { buf: [0.0; W], head: 0, count: 0, tau }
99 }
100
101 /// Absorb one residual norm sample.
102 pub fn push(&mut self, norm: f32) {
103 self.buf[self.head] = norm;
104 self.head = (self.head + 1) % W;
105 if self.count < W { self.count += 1; }
106 }
107
108 /// Number of valid samples in the buffer.
109 pub fn len(&self) -> usize { self.count }
110
111 /// Whether the buffer has enough samples for analysis.
112 pub fn is_ready(&self) -> bool { self.count >= W }
113
114 /// Reset the buffer.
115 pub fn reset(&mut self) {
116 self.buf = [0.0; W];
117 self.head = 0;
118 self.count = 0;
119 }
120
121 /// Access sample at position `i` relative to the most-recent sample
122 /// (0 = latest, 1 = one step back, etc.).
123 fn sample(&self, i: usize) -> f32 {
124 let idx = (self.head + W - 1 - i) % W;
125 self.buf[idx]
126 }
127
128 /// Compute attractor analysis over the current window.
129 ///
130 /// Returns `None` if the buffer contains fewer than `tau + 4` valid samples.
131 pub fn analyse(&self, r1: f32, r2: f32) -> Option<AttractorResult> {
132 if self.count < self.tau + 4 { return None; }
133 let n = self.count.min(W);
134 let usable = if n > self.tau { n - self.tau } else { return None; };
135 if usable < 3 { return None; }
136 let n_pairs = usable;
137
138 let (c_r1, c_r2, total_pairs) = self.count_pairs_in_radii(n_pairs, r1, r2);
139 let d2 = correlation_dimension_gp(c_r1, c_r2, total_pairs, r1, r2);
140 let koopman_proxy = self.koopman_variance_to_mean(n);
141 let state = classify_attractor_state(d2, koopman_proxy);
142
143 Some(AttractorResult {
144 correlation_dimension: d2,
145 koopman_proxy,
146 state,
147 n_pairs,
148 })
149 }
150
151 fn count_pairs_in_radii(&self, n_pairs: usize, r1: f32, r2: f32) -> (u32, u32, u32) {
152 let mut c_r1: u32 = 0;
153 let mut c_r2: u32 = 0;
154 let mut total_pairs: u32 = 0;
155 for i in 0..n_pairs {
156 let xi = self.sample(i);
157 let xi_d = self.sample(i + self.tau);
158 for j in (i + 1)..n_pairs {
159 let xj = self.sample(j);
160 let xj_d = self.sample(j + self.tau);
161 let dx = xi - xj;
162 let dy = xi_d - xj_d;
163 let dist = sqrt_f32(dx * dx + dy * dy);
164 if dist < r1 { c_r1 += 1; c_r2 += 1; }
165 else if dist < r2 { c_r2 += 1; }
166 total_pairs += 1;
167 }
168 }
169 (c_r1, c_r2, total_pairs)
170 }
171
172 fn koopman_variance_to_mean(&self, n: usize) -> f32 {
173 let mut norms_buf = [0.0_f32; W];
174 for i in 0..n {
175 norms_buf[i] = self.sample(i);
176 }
177 let slice = &norms_buf[..n];
178 let m = mean_f32(slice);
179 let var = slice.iter().map(|&x| (x - m) * (x - m)).sum::<f32>() / n as f32;
180 if m > 1e-6 { var / m } else { 0.0 }
181 }
182}
183
184fn correlation_dimension_gp(c_r1: u32, c_r2: u32, total_pairs: u32, r1: f32, r2: f32) -> f32 {
185 if total_pairs == 0 { return 2.0; }
186 if c_r1 == total_pairs { return 0.0; }
187 if c_r1 < 2 || c_r2 < 2 || r1 <= 0.0 || r2 <= r1 { return 2.0; }
188 let fr1 = c_r1 as f32 / total_pairs as f32;
189 let fr2 = c_r2 as f32 / total_pairs as f32;
190 if fr1 <= 0.0 || fr2 <= fr1 { return 2.0; }
191 let log_ratio_c = log2_approx(fr2 / fr1);
192 let log_ratio_r = log2_approx(r2 / r1);
193 (log_ratio_c / log_ratio_r).max(0.0).min(4.0)
194}
195
196fn classify_attractor_state(d2: f32, koopman_proxy: f32) -> NoiseAttractorState {
197 if d2 < 0.6 {
198 NoiseAttractorState::CollapsedAttractor
199 } else if d2 < 1.4 && koopman_proxy < 0.5 {
200 NoiseAttractorState::StructuredOrbit
201 } else {
202 NoiseAttractorState::StochasticBall
203 }
204}
205
206impl<const W: usize> Default for DelayEmbedding<W> {
207 fn default() -> Self { Self::new(2) }
208}
209
210// ── Private helpers ────────────────────────────────────────────────────────
211
212/// log₂(x) approximation via identity log₂(x) = log(x) / log(2).
213///
214/// Uses the identity log₂(x) = ln(x) / ln(2) with ln(x) approximated by
215/// a 4-term Padé expansion around x=1: ln(x) ≈ 2·(u + u³/3 + u⁵/5) where u=(x-1)/(x+1).
216/// Accurate to < 0.5% for x ∈ [0.01, 100].
217fn log2_approx(x: f32) -> f32 {
218 if x <= 0.0 { return -100.0; }
219 // Find integer power of 2 to reduce to [1, 2). f32 exponent range
220 // is bounded by [−149, 127]; 300 iterations covers the full range.
221 let mut y = x;
222 let mut exp = 0i32;
223 for _ in 0..300 {
224 if y < 2.0 { break; }
225 y *= 0.5; exp += 1;
226 }
227 for _ in 0..300 {
228 if y >= 1.0 { break; }
229 y *= 2.0; exp -= 1;
230 }
231 // y ∈ [1, 2); compute log₂(y) via ln Padé + convert
232 let u = (y - 1.0) / (y + 1.0);
233 let u2 = u * u;
234 let ln_y = 2.0 * u * (1.0 + u2 / 3.0 + u2 * u2 / 5.0);
235 let ln2 = 0.693_147_f32;
236 exp as f32 + ln_y / ln2
237}
238
239// ── Tests ──────────────────────────────────────────────────────────────────
240
241#[cfg(test)]
242mod tests {
243 use super::*;
244
245 #[test]
246 fn push_fills_buffer() {
247 let mut emb = DelayEmbedding::<8>::new(2);
248 assert!(!emb.is_ready());
249 for i in 0..8 { emb.push(i as f32 * 0.01); }
250 assert!(emb.is_ready());
251 }
252
253 #[test]
254 fn reset_clears_buffer() {
255 let mut emb = DelayEmbedding::<8>::new(2);
256 for _ in 0..8 { emb.push(1.0); }
257 emb.reset();
258 assert_eq!(emb.len(), 0);
259 assert!(!emb.is_ready());
260 }
261
262 #[test]
263 fn constant_signal_collapsed_attractor() {
264 let mut emb = DelayEmbedding::<32>::new(2);
265 for _ in 0..32 { emb.push(0.1_f32); }
266 // Constant → all pairs at distance 0 → very high C(r1) → D₂ → 0 → Collapsed
267 let res = emb.analyse(0.05, 0.2).unwrap();
268 assert_eq!(res.state, NoiseAttractorState::CollapsedAttractor,
269 "constant signal: D₂={:.3}", res.correlation_dimension);
270 }
271
272 #[test]
273 fn stochastic_signal_fills_space() {
274 // Widely varying norms span the embedding space: D₂ should be higher
275 let mut emb = DelayEmbedding::<32>::new(2);
276 // LCG pseudo-random
277 let mut s: u32 = 12345;
278 for _ in 0..32 {
279 s = s.wrapping_mul(1664525).wrapping_add(1013904223);
280 let v = (s >> 16) as f32 / 65535.0;
281 emb.push(v);
282 }
283 let res = emb.analyse(0.05, 0.3).unwrap();
284 // Stochastic → D₂ tends toward 2.0; at minimum, state should not be Collapsed
285 assert_ne!(res.state, NoiseAttractorState::CollapsedAttractor,
286 "random norms: D₂={:.3}", res.correlation_dimension);
287 }
288
289 #[test]
290 fn not_enough_samples_returns_none() {
291 let emb = DelayEmbedding::<32>::new(2);
292 assert!(emb.analyse(0.05, 0.2).is_none());
293 }
294
295 #[test]
296 fn log2_approx_accuracy() {
297 assert!((log2_approx(1.0) - 0.0).abs() < 0.01, "log₂(1)=0");
298 assert!((log2_approx(2.0) - 1.0).abs() < 0.02, "log₂(2)=1");
299 assert!((log2_approx(4.0) - 2.0).abs() < 0.02, "log₂(4)=2");
300 assert!((log2_approx(0.5) + 1.0).abs() < 0.02, "log₂(0.5)=-1");
301 }
302}