Skip to main content

topological_coherence/
lib.rs

1//! # Topological Coherence
2//!
3//! Rust implementation of topological constraints for coherent inference.
4//! Based on: "Topological Constraints for Coherent Language Models" (Cormier, 2026)
5//!
6//! ## Theory
7//!
8//! Hallucination is a geometry problem: unconstrained latent dynamics permit
9//! arbitrary drift through latent space. Topological constraints (specifically
10//! toroidal manifolds with constant spectral gap) bound this drift.
11//!
12//! ## Hierarchy
13//!
14//! ```text
15//! mHC (Birkhoff) ⊂ ERLHS (Hamiltonian) ⊂ Karmonic (Toroidal + Spectral)
16//! ```
17//!
18//! ## Key Results (from validation experiment)
19//!
20//! | Condition | Drift Rate | Interpretation |
21//! |-----------|------------|----------------|
22//! | Toroidal  | 0.006      | 40% lower than baseline |
23//! | Random    | 0.167      | 28x worse (proves topology matters) |
24//!
25//! ## Usage
26//!
27//! ```rust,ignore
28//! use topological_coherence::{Tonnetz, ToroidalMask};
29//!
30//! // Create 12x12 Tonnetz (standard musical topology)
31//! let tonnetz = Tonnetz::<12>::new();
32//!
33//! // Distance on torus
34//! let d = tonnetz.distance((0, 0), (5, 7));
35//!
36//! // Attention mask with locality radius 2
37//! let mask = ToroidalMask::new(64, 2.0, 1.0);
38//! ```
39
40#![cfg_attr(not(feature = "std"), no_std)]
41
42use libm::{expf, cosf, fabsf};
43
44// Substrate SCALE codec support
45#[cfg(feature = "substrate")]
46use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
47#[cfg(feature = "substrate")]
48use scale_info::TypeInfo;
49
50// =============================================================================
51// Implementation Status
52// =============================================================================
53//
54// Phase 1: Core Topology — COMPLETE
55//   Tonnetz struct, toroidal distance, attention masks, spectral gap, property tests
56//
57// Phase 2: Mask Variants — COMPLETE
58//   Hard cutoff, soft exponential, hybrid, Sinkhorn-Knopp projection
59//
60// Phase 3: Integration Points — COMPLETE
61//   Substrate SCALE codec types, CoherenceConfig, CoherenceResult, ToroidalPosition
62//
63// Phase 4: Advanced Features — COMPLETE
64//   Learned toroidal projection, adjacency loss, multi-scale Tonnetz, 3D torus (T^3),
65//   sparse CSR mask format
66//
67// Phase 5: Grounding Projector — COMPLETE
68//   Orthogonal evidence projection G = A(AᵀA)⁻¹Aᵀ, hallucination scoring,
69//   vector decomposition into grounded and hallucinated components
70//
71// References:
72//   Paper: https://github.com/Paraxiom/topological-coherence
73//   ERLHS: DOI 10.5281/zenodo.17928909
74//   Karmonic: DOI 10.5281/zenodo.17928991
75//
76// =============================================================================
77
78/// Mask type variants for different use cases.
79#[derive(Debug, Clone, Copy, PartialEq, Eq)]
80#[cfg_attr(feature = "substrate", derive(Encode, Decode, MaxEncodedLen, TypeInfo))]
81pub enum MaskType {
82    /// Hard cutoff: M(i,j) = 1 if d <= r, else 0
83    HardCutoff,
84    /// Soft exponential: M(i,j) = exp(-α * d)
85    SoftExponential,
86    /// Hybrid: 1 if d <= r, else exp(-α * (d - r))
87    Hybrid,
88}
89
90/// Tonnetz topology on a 2D torus of size N x N.
91///
92/// The Tonnetz is a toroidal lattice where:
93/// - Horizontal edges connect by perfect fifths
94/// - Vertical edges connect by major thirds
95/// - Diagonal edges connect by minor thirds
96///
97/// We use it as a constructive existence proof of a low-genus manifold
98/// with constant spectral gap λ₁ = Θ(1) for fixed N.
99#[derive(Debug, Clone, Copy)]
100pub struct Tonnetz<const N: usize>;
101
102impl<const N: usize> Tonnetz<N> {
103    /// Create a new Tonnetz topology.
104    pub const fn new() -> Self {
105        Self
106    }
107
108    /// Grid size (N x N torus).
109    pub const fn size(&self) -> usize {
110        N
111    }
112
113    /// Total number of positions on the torus.
114    pub const fn total_positions(&self) -> usize {
115        N * N
116    }
117
118    /// Convert linear index to 2D torus coordinates.
119    #[inline]
120    pub const fn to_coords(index: usize) -> (usize, usize) {
121        (index / N, index % N)
122    }
123
124    /// Convert 2D torus coordinates to linear index.
125    #[inline]
126    pub const fn to_index(row: usize, col: usize) -> usize {
127        (row % N) * N + (col % N)
128    }
129
130    /// Toroidal distance between two positions.
131    ///
132    /// Uses L1 (Manhattan) distance with wraparound:
133    /// d_T(a, b) = min(|a.0 - b.0|, N - |a.0 - b.0|) + min(|a.1 - b.1|, N - |a.1 - b.1|)
134    #[inline]
135    pub fn distance(a: (usize, usize), b: (usize, usize)) -> usize {
136        let dx = a.0.abs_diff(b.0);
137        let dy = a.1.abs_diff(b.1);
138
139        let dx_wrap = if dx > N / 2 { N - dx } else { dx };
140        let dy_wrap = if dy > N / 2 { N - dy } else { dy };
141
142        dx_wrap + dy_wrap
143    }
144
145    /// Distance between two linear indices.
146    #[inline]
147    pub fn distance_linear(i: usize, j: usize) -> usize {
148        Self::distance(Self::to_coords(i), Self::to_coords(j))
149    }
150
151    /// First non-trivial eigenvalue of the torus Laplacian (spectral gap).
152    ///
153    /// For a d-dimensional torus T^d_N:
154    /// λ₁ = 2 - 2cos(2π/N) = Θ(1) for fixed N
155    ///
156    /// This is the key property: spectral gap remains constant regardless
157    /// of how we scale the embedding dimension.
158    pub fn spectral_gap() -> f32 {
159        let pi = core::f32::consts::PI;
160        2.0 - 2.0 * cosf(2.0 * pi / N as f32)
161    }
162
163    /// Spectral gap decay rate for non-resonant modes.
164    ///
165    /// Non-resonant modes decay as e^(-λ₁ * t).
166    pub fn decay_rate(t: f32) -> f32 {
167        expf(-Self::spectral_gap() * t)
168    }
169}
170
171impl<const N: usize> Default for Tonnetz<N> {
172    fn default() -> Self {
173        Self::new()
174    }
175}
176
177/// Toroidal attention mask generator.
178///
179/// Creates masks according to Eq. 17 from the paper:
180/// ```text
181/// M_Tonnetz(i, j) = 1                        if d_Tonnetz(i, j) ≤ r
182///                   exp(-α · d_Tonnetz(i,j)) otherwise
183/// ```
184#[derive(Debug, Clone)]
185pub struct ToroidalMask {
186    /// Sequence length
187    pub seq_len: usize,
188    /// Locality radius (hard cutoff)
189    pub radius: f32,
190    /// Decay rate for soft falloff
191    pub alpha: f32,
192    /// Grid size for Tonnetz mapping
193    pub grid_size: usize,
194    /// Mask type variant
195    pub mask_type: MaskType,
196}
197
198impl ToroidalMask {
199    /// Create a new toroidal mask configuration (hybrid by default).
200    pub fn new(seq_len: usize, radius: f32, alpha: f32) -> Self {
201        Self::with_grid(seq_len, radius, alpha, 12)
202    }
203
204    /// Create with custom grid size.
205    pub fn with_grid(seq_len: usize, radius: f32, alpha: f32, grid_size: usize) -> Self {
206        Self {
207            seq_len,
208            radius,
209            alpha,
210            grid_size,
211            mask_type: MaskType::Hybrid,
212        }
213    }
214
215    /// Create hard cutoff mask: M(i,j) = 1 if d <= r, else 0
216    pub fn hard_cutoff(seq_len: usize, radius: f32, grid_size: usize) -> Self {
217        Self {
218            seq_len,
219            radius,
220            alpha: 0.0,
221            grid_size,
222            mask_type: MaskType::HardCutoff,
223        }
224    }
225
226    /// Create soft exponential mask: M(i,j) = exp(-α * d)
227    pub fn soft_exponential(seq_len: usize, alpha: f32, grid_size: usize) -> Self {
228        Self {
229            seq_len,
230            radius: 0.0,
231            alpha,
232            grid_size,
233            mask_type: MaskType::SoftExponential,
234        }
235    }
236
237    /// Compute toroidal distance between two sequence positions.
238    fn toroidal_distance(&self, i: usize, j: usize) -> f32 {
239        let n = self.grid_size;
240        let pos_i = (i % n, (i / n) % n);
241        let pos_j = (j % n, (j / n) % n);
242
243        let dx = pos_i.0.abs_diff(pos_j.0);
244        let dy = pos_i.1.abs_diff(pos_j.1);
245
246        let dx_wrap = if dx > n / 2 { n - dx } else { dx };
247        let dy_wrap = if dy > n / 2 { n - dy } else { dy };
248        (dx_wrap + dy_wrap) as f32
249    }
250
251    /// Compute mask value for position pair (i, j).
252    pub fn value(&self, i: usize, j: usize) -> f32 {
253        let dist = self.toroidal_distance(i, j);
254
255        match self.mask_type {
256            MaskType::HardCutoff => {
257                if dist <= self.radius { 1.0 } else { 0.0 }
258            }
259            MaskType::SoftExponential => {
260                expf(-self.alpha * dist)
261            }
262            MaskType::Hybrid => {
263                if dist <= self.radius {
264                    1.0
265                } else {
266                    expf(-self.alpha * (dist - self.radius))
267                }
268            }
269        }
270    }
271
272    /// Generate full mask matrix.
273    #[cfg(feature = "std")]
274    pub fn generate(&self) -> Vec<Vec<f32>> {
275        (0..self.seq_len)
276            .map(|i| (0..self.seq_len).map(|j| self.value(i, j)).collect())
277            .collect()
278    }
279
280    /// Generate and apply Sinkhorn-Knopp to make doubly-stochastic.
281    #[cfg(feature = "std")]
282    pub fn generate_doubly_stochastic(&self, iterations: usize) -> Vec<Vec<f32>> {
283        let mask = self.generate();
284        sinkhorn_knopp(mask, iterations)
285    }
286}
287
288/// Sinkhorn-Knopp algorithm: project matrix to doubly-stochastic.
289///
290/// Alternately normalizes rows and columns until convergence.
291/// A doubly-stochastic matrix has all rows and columns sum to 1.
292///
293/// This is used for mHC-style constraints (Birkhoff polytope projection).
294#[cfg(feature = "std")]
295pub fn sinkhorn_knopp(mut matrix: Vec<Vec<f32>>, iterations: usize) -> Vec<Vec<f32>> {
296    let n = matrix.len();
297    if n == 0 {
298        return matrix;
299    }
300
301    for _ in 0..iterations {
302        // Normalize rows
303        for row in &mut matrix {
304            let row_sum: f32 = row.iter().sum();
305            if row_sum > 1e-10 {
306                for val in row.iter_mut() {
307                    *val /= row_sum;
308                }
309            }
310        }
311
312        // Normalize columns
313        for j in 0..n {
314            let col_sum: f32 = matrix.iter().map(|row| row[j]).sum();
315            if col_sum > 1e-10 {
316                for row in &mut matrix {
317                    row[j] /= col_sum;
318                }
319            }
320        }
321    }
322
323    matrix
324}
325
326/// Check if matrix is approximately doubly-stochastic.
327#[cfg(feature = "std")]
328pub fn is_doubly_stochastic(matrix: &[Vec<f32>], tolerance: f32) -> bool {
329    let n = matrix.len();
330    if n == 0 {
331        return true;
332    }
333
334    // Check row sums
335    for row in matrix {
336        let sum: f32 = row.iter().sum();
337        if fabsf(sum - 1.0) > tolerance {
338            return false;
339        }
340    }
341
342    // Check column sums
343    for j in 0..n {
344        let sum: f32 = (0..n).map(|i| matrix[i][j]).sum();
345        if fabsf(sum - 1.0) > tolerance {
346            return false;
347        }
348    }
349
350    true
351}
352
353/// Drift measurement utilities.
354///
355/// Drift rate = fraction of transitions where d_Tonnetz(pred, target) > threshold.
356pub struct DriftMeter {
357    /// Distance threshold for "drift" classification
358    pub threshold: usize,
359    /// Number of transitions measured
360    pub count: usize,
361    /// Number of drifts detected
362    pub drifts: usize,
363}
364
365impl DriftMeter {
366    /// Create a new drift meter with given threshold.
367    pub fn new(threshold: usize) -> Self {
368        Self {
369            threshold,
370            count: 0,
371            drifts: 0,
372        }
373    }
374
375    /// Record a transition from predicted to target position.
376    pub fn record<const N: usize>(&mut self, pred: usize, target: usize) {
377        let dist = Tonnetz::<N>::distance_linear(pred, target);
378        self.count += 1;
379        if dist > self.threshold {
380            self.drifts += 1;
381        }
382    }
383
384    /// Current drift rate (0.0 to 1.0).
385    pub fn rate(&self) -> f32 {
386        if self.count == 0 {
387            0.0
388        } else {
389            self.drifts as f32 / self.count as f32
390        }
391    }
392
393    /// Reset measurements.
394    pub fn reset(&mut self) {
395        self.count = 0;
396        self.drifts = 0;
397    }
398}
399
400// =============================================================================
401// Substrate Integration Types
402// =============================================================================
403
404/// Position on the Tonnetz torus (for on-chain storage).
405///
406/// Stores coordinates as u8 to minimize storage cost.
407/// Supports grids up to 255x255.
408#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
409#[cfg_attr(feature = "substrate", derive(Encode, Decode, MaxEncodedLen, TypeInfo))]
410pub struct ToroidalPosition {
411    /// Row coordinate (0..N)
412    pub row: u8,
413    /// Column coordinate (0..N)
414    pub col: u8,
415}
416
417impl ToroidalPosition {
418    /// Create a new position.
419    pub const fn new(row: u8, col: u8) -> Self {
420        Self { row, col }
421    }
422
423    /// Convert to tuple.
424    pub const fn as_tuple(&self) -> (usize, usize) {
425        (self.row as usize, self.col as usize)
426    }
427
428    /// Distance to another position on an NxN torus.
429    pub fn distance_to<const N: usize>(&self, other: &Self) -> usize {
430        Tonnetz::<N>::distance(self.as_tuple(), other.as_tuple())
431    }
432}
433
434/// Configuration for coherence validation (for pallet storage).
435///
436/// Defines the parameters for topological coherence checking.
437#[derive(Debug, Clone, Copy, PartialEq, Eq)]
438#[cfg_attr(feature = "substrate", derive(Encode, Decode, MaxEncodedLen, TypeInfo))]
439pub struct CoherenceConfig {
440    /// Grid size for Tonnetz topology
441    pub grid_size: u8,
442    /// Locality radius (scaled by 100 for fixed-point)
443    pub radius_scaled: u16,
444    /// Decay rate alpha (scaled by 100 for fixed-point)
445    pub alpha_scaled: u16,
446    /// Drift threshold for coherence violation
447    pub drift_threshold: u8,
448    /// Mask type to use
449    pub mask_type: MaskType,
450}
451
452impl Default for CoherenceConfig {
453    fn default() -> Self {
454        Self {
455            grid_size: 12,
456            radius_scaled: 200,  // 2.0
457            alpha_scaled: 100,   // 1.0
458            drift_threshold: 2,
459            mask_type: MaskType::Hybrid,
460        }
461    }
462}
463
464impl CoherenceConfig {
465    /// Create a new configuration.
466    pub const fn new(
467        grid_size: u8,
468        radius: f32,
469        alpha: f32,
470        drift_threshold: u8,
471        mask_type: MaskType,
472    ) -> Self {
473        Self {
474            grid_size,
475            radius_scaled: (radius * 100.0) as u16,
476            alpha_scaled: (alpha * 100.0) as u16,
477            drift_threshold,
478            mask_type,
479        }
480    }
481
482    /// Get radius as f32.
483    pub fn radius(&self) -> f32 {
484        self.radius_scaled as f32 / 100.0
485    }
486
487    /// Get alpha as f32.
488    pub fn alpha(&self) -> f32 {
489        self.alpha_scaled as f32 / 100.0
490    }
491
492    /// Create a ToroidalMask from this config.
493    pub fn to_mask(&self, seq_len: usize) -> ToroidalMask {
494        ToroidalMask {
495            seq_len,
496            radius: self.radius(),
497            alpha: self.alpha(),
498            grid_size: self.grid_size as usize,
499            mask_type: self.mask_type,
500        }
501    }
502}
503
504/// Result of coherence validation (for on-chain reporting).
505#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
506#[cfg_attr(feature = "substrate", derive(Encode, Decode, MaxEncodedLen, TypeInfo))]
507pub struct CoherenceResult {
508    /// Number of transitions checked
509    pub transitions: u32,
510    /// Number of drift violations
511    pub violations: u32,
512    /// Whether coherence is within bounds
513    pub is_coherent: bool,
514}
515
516impl CoherenceResult {
517    /// Create from drift meter measurements.
518    pub fn from_meter(meter: &DriftMeter, max_rate: f32) -> Self {
519        let rate = meter.rate();
520        Self {
521            transitions: meter.count as u32,
522            violations: meter.drifts as u32,
523            is_coherent: rate <= max_rate,
524        }
525    }
526
527    /// Drift rate (0.0 to 1.0).
528    pub fn drift_rate(&self) -> f32 {
529        if self.transitions == 0 {
530            0.0
531        } else {
532            self.violations as f32 / self.transitions as f32
533        }
534    }
535}
536
537// =============================================================================
538// Phase 4: Advanced Features
539// =============================================================================
540
541/// 3D Torus topology (T^3).
542///
543/// Higher-dimensional torus for richer semantic spaces.
544/// Distance is L1 with wraparound in all three dimensions.
545#[derive(Debug, Clone, Copy)]
546pub struct Torus3D<const N: usize>;
547
548impl<const N: usize> Torus3D<N> {
549    /// Create a new 3D torus topology.
550    pub const fn new() -> Self {
551        Self
552    }
553
554    /// Total number of positions.
555    pub const fn total_positions() -> usize {
556        N * N * N
557    }
558
559    /// Convert linear index to 3D coordinates.
560    #[inline]
561    pub const fn to_coords(index: usize) -> (usize, usize, usize) {
562        let z = index / (N * N);
563        let rem = index % (N * N);
564        let y = rem / N;
565        let x = rem % N;
566        (x, y, z)
567    }
568
569    /// Convert 3D coordinates to linear index.
570    #[inline]
571    pub const fn to_index(x: usize, y: usize, z: usize) -> usize {
572        (z % N) * N * N + (y % N) * N + (x % N)
573    }
574
575    /// 3D toroidal distance (L1 with wraparound).
576    #[inline]
577    pub fn distance(a: (usize, usize, usize), b: (usize, usize, usize)) -> usize {
578        let dx = a.0.abs_diff(b.0);
579        let dy = a.1.abs_diff(b.1);
580        let dz = a.2.abs_diff(b.2);
581
582        let dx_wrap = if dx > N / 2 { N - dx } else { dx };
583        let dy_wrap = if dy > N / 2 { N - dy } else { dy };
584        let dz_wrap = if dz > N / 2 { N - dz } else { dz };
585
586        dx_wrap + dy_wrap + dz_wrap
587    }
588
589    /// Spectral gap for 3D torus.
590    pub fn spectral_gap() -> f32 {
591        // For T^3, gap is same as T^2 for fixed N
592        let pi = core::f32::consts::PI;
593        2.0 - 2.0 * cosf(2.0 * pi / N as f32)
594    }
595}
596
597impl<const N: usize> Default for Torus3D<N> {
598    fn default() -> Self {
599        Self::new()
600    }
601}
602
603/// Multi-scale Tonnetz configuration.
604///
605/// Combines multiple grid sizes for hierarchical coherence.
606#[derive(Debug, Clone)]
607pub struct MultiScaleTonnetz {
608    /// Grid sizes (e.g., [6, 12, 24])
609    pub scales: [usize; 3],
610    /// Weights for each scale
611    pub weights: [f32; 3],
612}
613
614impl Default for MultiScaleTonnetz {
615    fn default() -> Self {
616        Self {
617            scales: [6, 12, 24],
618            weights: [0.5, 0.3, 0.2],
619        }
620    }
621}
622
623impl MultiScaleTonnetz {
624    /// Create with custom scales and weights.
625    pub fn new(scales: [usize; 3], weights: [f32; 3]) -> Self {
626        Self { scales, weights }
627    }
628
629    /// Weighted multi-scale distance.
630    pub fn distance(&self, a: (usize, usize), b: (usize, usize)) -> f32 {
631        let mut total = 0.0;
632        for (i, &scale) in self.scales.iter().enumerate() {
633            let d = Self::distance_at_scale(a, b, scale) as f32;
634            total += self.weights[i] * d;
635        }
636        total
637    }
638
639    fn distance_at_scale(a: (usize, usize), b: (usize, usize), n: usize) -> usize {
640        // Map positions to scale
641        let a_scaled = (a.0 % n, a.1 % n);
642        let b_scaled = (b.0 % n, b.1 % n);
643
644        let dx = a_scaled.0.abs_diff(b_scaled.0);
645        let dy = a_scaled.1.abs_diff(b_scaled.1);
646
647        let dx_wrap = if dx > n / 2 { n - dx } else { dx };
648        let dy_wrap = if dy > n / 2 { n - dy } else { dy };
649
650        dx_wrap + dy_wrap
651    }
652}
653
654/// Learned toroidal projection parameters.
655///
656/// Implements φ_θ(e) = (σ(W₁e) mod 1, σ(W₂e) mod 1)
657/// where σ is sigmoid and e is an embedding vector.
658///
659/// Note: Only available with `std` feature due to Vec usage.
660#[cfg(feature = "std")]
661#[derive(Debug, Clone)]
662pub struct LearnedProjection {
663    /// Input dimension
664    pub input_dim: usize,
665    /// Grid size for output
666    pub grid_size: usize,
667    /// Weight matrix W1 (flattened, grid_size elements)
668    pub w1: Vec<f32>,
669    /// Weight matrix W2 (flattened, grid_size elements)
670    pub w2: Vec<f32>,
671}
672
673#[cfg(feature = "std")]
674impl LearnedProjection {
675    /// Create a new projection with random initialization.
676    pub fn new(input_dim: usize, grid_size: usize) -> Self {
677        // Initialize with small random values (placeholder - use proper RNG in practice)
678        let scale = 1.0 / (input_dim as f32).sqrt();
679        let w1 = (0..input_dim).map(|i| ((i * 7) % 100) as f32 * scale / 100.0 - scale / 2.0).collect();
680        let w2 = (0..input_dim).map(|i| ((i * 13) % 100) as f32 * scale / 100.0 - scale / 2.0).collect();
681        Self { input_dim, grid_size, w1, w2 }
682    }
683
684    /// Sigmoid function.
685    fn sigmoid(x: f32) -> f32 {
686        1.0 / (1.0 + expf(-x))
687    }
688
689    /// Project embedding to torus position.
690    ///
691    /// φ_θ(e) = (σ(W₁·e) mod 1, σ(W₂·e) mod 1) * grid_size
692    pub fn project(&self, embedding: &[f32]) -> (usize, usize) {
693        assert_eq!(embedding.len(), self.input_dim);
694
695        // Compute W1 · e
696        let dot1: f32 = self.w1.iter().zip(embedding.iter()).map(|(w, e)| w * e).sum();
697        let x = Self::sigmoid(dot1);
698
699        // Compute W2 · e
700        let dot2: f32 = self.w2.iter().zip(embedding.iter()).map(|(w, e)| w * e).sum();
701        let y = Self::sigmoid(dot2);
702
703        // Map to grid
704        let row = ((x * self.grid_size as f32) as usize) % self.grid_size;
705        let col = ((y * self.grid_size as f32) as usize) % self.grid_size;
706
707        (row, col)
708    }
709}
710
711/// Adjacency loss computation.
712///
713/// L_topo = E[(a,b)~co-occur][d_T(φ(a), φ(b))] - λ · E[(a,c)~random][d_T(φ(a), φ(c))]
714///
715/// Co-occurring pairs should be close; random pairs should be far.
716#[derive(Debug, Clone)]
717pub struct AdjacencyLoss<const N: usize> {
718    /// Regularization weight for negative samples
719    pub lambda: f32,
720    /// Accumulated positive pair distances
721    positive_sum: f32,
722    positive_count: usize,
723    /// Accumulated negative pair distances
724    negative_sum: f32,
725    negative_count: usize,
726}
727
728impl<const N: usize> AdjacencyLoss<N> {
729    /// Create a new adjacency loss tracker.
730    pub fn new(lambda: f32) -> Self {
731        Self {
732            lambda,
733            positive_sum: 0.0,
734            positive_count: 0,
735            negative_sum: 0.0,
736            negative_count: 0,
737        }
738    }
739
740    /// Record a positive (co-occurring) pair.
741    pub fn record_positive(&mut self, a: (usize, usize), b: (usize, usize)) {
742        let d = Tonnetz::<N>::distance(a, b) as f32;
743        self.positive_sum += d;
744        self.positive_count += 1;
745    }
746
747    /// Record a negative (random) pair.
748    pub fn record_negative(&mut self, a: (usize, usize), c: (usize, usize)) {
749        let d = Tonnetz::<N>::distance(a, c) as f32;
750        self.negative_sum += d;
751        self.negative_count += 1;
752    }
753
754    /// Compute the loss.
755    ///
756    /// Lower is better: positive pairs close, negative pairs far.
757    pub fn loss(&self) -> f32 {
758        let pos_mean = if self.positive_count > 0 {
759            self.positive_sum / self.positive_count as f32
760        } else {
761            0.0
762        };
763
764        let neg_mean = if self.negative_count > 0 {
765            self.negative_sum / self.negative_count as f32
766        } else {
767            0.0
768        };
769
770        pos_mean - self.lambda * neg_mean
771    }
772
773    /// Reset accumulators.
774    pub fn reset(&mut self) {
775        self.positive_sum = 0.0;
776        self.positive_count = 0;
777        self.negative_sum = 0.0;
778        self.negative_count = 0;
779    }
780}
781
782/// Sparse mask in CSR (Compressed Sparse Row) format.
783///
784/// Efficient storage for sparse attention masks.
785#[cfg(feature = "std")]
786#[derive(Debug, Clone)]
787pub struct SparseMask {
788    /// Number of rows/columns
789    pub size: usize,
790    /// Row pointers (size + 1 elements)
791    pub row_ptr: Vec<usize>,
792    /// Column indices
793    pub col_idx: Vec<usize>,
794    /// Non-zero values
795    pub values: Vec<f32>,
796}
797
798#[cfg(feature = "std")]
799impl SparseMask {
800    /// Create from dense mask, keeping values above threshold.
801    pub fn from_dense(dense: &[Vec<f32>], threshold: f32) -> Self {
802        let size = dense.len();
803        let mut row_ptr = vec![0];
804        let mut col_idx = Vec::new();
805        let mut values = Vec::new();
806
807        for row in dense {
808            for (j, &val) in row.iter().enumerate() {
809                if val > threshold {
810                    col_idx.push(j);
811                    values.push(val);
812                }
813            }
814            row_ptr.push(col_idx.len());
815        }
816
817        Self { size, row_ptr, col_idx, values }
818    }
819
820    /// Create from ToroidalMask with threshold.
821    pub fn from_toroidal(mask: &ToroidalMask, threshold: f32) -> Self {
822        let dense = mask.generate();
823        Self::from_dense(&dense, threshold)
824    }
825
826    /// Number of non-zero elements.
827    pub fn nnz(&self) -> usize {
828        self.values.len()
829    }
830
831    /// Sparsity ratio (1.0 = fully sparse, 0.0 = fully dense).
832    pub fn sparsity(&self) -> f32 {
833        let total = self.size * self.size;
834        if total == 0 {
835            0.0
836        } else {
837            1.0 - (self.nnz() as f32 / total as f32)
838        }
839    }
840
841    /// Get value at (i, j), or 0 if not stored.
842    pub fn get(&self, i: usize, j: usize) -> f32 {
843        if i >= self.size {
844            return 0.0;
845        }
846
847        let start = self.row_ptr[i];
848        let end = self.row_ptr[i + 1];
849
850        for k in start..end {
851            if self.col_idx[k] == j {
852                return self.values[k];
853            }
854        }
855
856        0.0
857    }
858
859    /// Memory usage in bytes (approximate).
860    pub fn memory_bytes(&self) -> usize {
861        self.row_ptr.len() * 8 + self.col_idx.len() * 8 + self.values.len() * 4
862    }
863}
864
865// =============================================================================
866// Phase 5: Grounding Projector — Orthogonal Evidence Projection
867// =============================================================================
868//
869// Implements G = A(AᵀA)⁻¹Aᵀ where:
870//   A = evidence embedding matrix (m evidence vectors of dim n)
871//   G = orthogonal projector onto col(A)
872//
873// For any LLM output vector x:
874//   x_grounded    = G·x   (component supported by evidence)
875//   x_hallucinated = (I - G)·x  (component orthogonal to evidence)
876//
877// Hallucination magnitude = ‖(I - G)·x‖ / ‖x‖
878//
879// Patent pending — Paraxiom Technologies Inc.
880
881/// Orthogonal grounding projector G = A(AᵀA)⁻¹Aᵀ.
882///
883/// Projects LLM output vectors onto the subspace spanned by evidence embeddings.
884/// The residual (I - G)x is the hallucination component.
885#[cfg(feature = "std")]
886#[derive(Debug, Clone)]
887pub struct GroundingProjector {
888    /// Dimension of embedding space
889    dim: usize,
890    /// Number of evidence vectors
891    rank: usize,
892    /// Precomputed projection matrix G (dim × dim, row-major)
893    matrix: Vec<f32>,
894}
895
896#[cfg(feature = "std")]
897impl GroundingProjector {
898    /// Build a grounding projector from evidence embeddings.
899    ///
900    /// `evidence` is a slice of embedding vectors, each of length `dim`.
901    /// Returns `None` if evidence is empty or AᵀA is singular.
902    pub fn from_evidence(evidence: &[&[f32]], dim: usize) -> Option<Self> {
903        let m = evidence.len();
904        if m == 0 || dim == 0 {
905            return None;
906        }
907
908        // Validate all evidence vectors have correct dimension
909        for v in evidence {
910            if v.len() != dim {
911                return None;
912            }
913        }
914
915        // Compute AᵀA (m × m)
916        let mut ata = vec![0.0f32; m * m];
917        for i in 0..m {
918            for j in i..m {
919                let dot: f32 = evidence[i]
920                    .iter()
921                    .zip(evidence[j].iter())
922                    .map(|(a, b)| a * b)
923                    .sum();
924                ata[i * m + j] = dot;
925                ata[j * m + i] = dot;
926            }
927        }
928
929        // Invert AᵀA via Gauss-Jordan elimination
930        let ata_inv = invert_matrix(&ata, m)?;
931
932        // Compute G = A(AᵀA)⁻¹Aᵀ  (dim × dim)
933        //
934        // First: B = (AᵀA)⁻¹Aᵀ  (m × dim)
935        // Then:  G = A · B       (dim × dim)
936        //
937        // A is dim × m (columns are evidence vectors)
938        // Aᵀ is m × dim
939
940        // B = (AᵀA)⁻¹ · Aᵀ  →  B[i][k] = Σ_j ata_inv[i][j] * A[k][j]
941        //                                 = Σ_j ata_inv[i][j] * evidence[j][k]
942        let mut b = vec![0.0f32; m * dim];
943        for i in 0..m {
944            for k in 0..dim {
945                let mut sum = 0.0f32;
946                for j in 0..m {
947                    sum += ata_inv[i * m + j] * evidence[j][k];
948                }
949                b[i * dim + k] = sum;
950            }
951        }
952
953        // G = A · B  →  G[p][q] = Σ_i A[p][i] * B[i][q]
954        //                        = Σ_i evidence[i][p] * B[i][q]
955        let mut g = vec![0.0f32; dim * dim];
956        for p in 0..dim {
957            for q in 0..dim {
958                let mut sum = 0.0f32;
959                for i in 0..m {
960                    sum += evidence[i][p] * b[i * dim + q];
961                }
962                g[p * dim + q] = sum;
963            }
964        }
965
966        Some(Self {
967            dim,
968            rank: m,
969            matrix: g,
970        })
971    }
972
973    /// Project vector onto evidence subspace (grounded component).
974    ///
975    /// Returns G·x — the part of x supported by evidence.
976    pub fn project_grounded(&self, x: &[f32]) -> Vec<f32> {
977        assert_eq!(x.len(), self.dim);
978        let mut result = vec![0.0f32; self.dim];
979        for i in 0..self.dim {
980            let mut sum = 0.0f32;
981            for j in 0..self.dim {
982                sum += self.matrix[i * self.dim + j] * x[j];
983            }
984            result[i] = sum;
985        }
986        result
987    }
988
989    /// Project vector onto hallucination subspace.
990    ///
991    /// Returns (I - G)·x — the part of x not supported by evidence.
992    pub fn project_hallucinated(&self, x: &[f32]) -> Vec<f32> {
993        let grounded = self.project_grounded(x);
994        x.iter()
995            .zip(grounded.iter())
996            .map(|(xi, gi)| xi - gi)
997            .collect()
998    }
999
1000    /// Compute hallucination score for a vector.
1001    ///
1002    /// Returns ‖(I - G)·x‖ / ‖x‖ ∈ [0, 1].
1003    /// 0 = fully grounded, 1 = fully hallucinated.
1004    pub fn hallucination_score(&self, x: &[f32]) -> f32 {
1005        let x_norm_sq: f32 = x.iter().map(|v| v * v).sum();
1006        if x_norm_sq < 1e-12 {
1007            return 0.0;
1008        }
1009
1010        let hallucinated = self.project_hallucinated(x);
1011        let h_norm_sq: f32 = hallucinated.iter().map(|v| v * v).sum();
1012
1013        libm::sqrtf(h_norm_sq / x_norm_sq)
1014    }
1015
1016    /// Decompose a vector into grounded and hallucinated components with scores.
1017    pub fn decompose(&self, x: &[f32]) -> GroundingDecomposition {
1018        let grounded = self.project_grounded(x);
1019        let hallucinated: Vec<f32> = x
1020            .iter()
1021            .zip(grounded.iter())
1022            .map(|(xi, gi)| xi - gi)
1023            .collect();
1024
1025        let x_norm = libm::sqrtf(x.iter().map(|v| v * v).sum());
1026        let g_norm = libm::sqrtf(grounded.iter().map(|v| v * v).sum());
1027        let h_norm = libm::sqrtf(hallucinated.iter().map(|v| v * v).sum());
1028
1029        GroundingDecomposition {
1030            grounded,
1031            hallucinated,
1032            grounding_ratio: if x_norm > 1e-12 { g_norm / x_norm } else { 0.0 },
1033            hallucination_ratio: if x_norm > 1e-12 { h_norm / x_norm } else { 0.0 },
1034        }
1035    }
1036
1037    /// Dimension of the embedding space.
1038    pub fn dim(&self) -> usize {
1039        self.dim
1040    }
1041
1042    /// Rank of the evidence subspace.
1043    pub fn rank(&self) -> usize {
1044        self.rank
1045    }
1046
1047    /// Verify G² ≈ G (idempotent — defining property of projection).
1048    pub fn verify_idempotent(&self, tolerance: f32) -> bool {
1049        // Compute G² and check ‖G² - G‖_max < tolerance
1050        for i in 0..self.dim {
1051            for j in 0..self.dim {
1052                let mut g2_ij = 0.0f32;
1053                for k in 0..self.dim {
1054                    g2_ij += self.matrix[i * self.dim + k] * self.matrix[k * self.dim + j];
1055                }
1056                if fabsf(g2_ij - self.matrix[i * self.dim + j]) > tolerance {
1057                    return false;
1058                }
1059            }
1060        }
1061        true
1062    }
1063
1064    /// Verify Gᵀ = G (symmetric — orthogonal projection).
1065    pub fn verify_symmetric(&self, tolerance: f32) -> bool {
1066        for i in 0..self.dim {
1067            for j in (i + 1)..self.dim {
1068                if fabsf(
1069                    self.matrix[i * self.dim + j] - self.matrix[j * self.dim + i],
1070                ) > tolerance
1071                {
1072                    return false;
1073                }
1074            }
1075        }
1076        true
1077    }
1078}
1079
1080/// Result of decomposing a vector into grounded and hallucinated components.
1081#[cfg(feature = "std")]
1082#[derive(Debug, Clone)]
1083pub struct GroundingDecomposition {
1084    /// Component supported by evidence (G·x)
1085    pub grounded: Vec<f32>,
1086    /// Component orthogonal to evidence ((I-G)·x)
1087    pub hallucinated: Vec<f32>,
1088    /// ‖G·x‖ / ‖x‖  — fraction of signal grounded in evidence
1089    pub grounding_ratio: f32,
1090    /// ‖(I-G)·x‖ / ‖x‖  — fraction of signal that is hallucination
1091    pub hallucination_ratio: f32,
1092}
1093
1094/// Invert a square matrix using Gauss-Jordan elimination.
1095/// Returns None if matrix is singular (det ≈ 0).
1096#[cfg(feature = "std")]
1097fn invert_matrix(mat: &[f32], n: usize) -> Option<Vec<f32>> {
1098    // Augmented matrix [A | I]
1099    let mut aug = vec![0.0f32; n * 2 * n];
1100    for i in 0..n {
1101        for j in 0..n {
1102            aug[i * 2 * n + j] = mat[i * n + j];
1103        }
1104        aug[i * 2 * n + n + i] = 1.0;
1105    }
1106
1107    for col in 0..n {
1108        // Partial pivoting
1109        let mut max_val = fabsf(aug[col * 2 * n + col]);
1110        let mut max_row = col;
1111        for row in (col + 1)..n {
1112            let val = fabsf(aug[row * 2 * n + col]);
1113            if val > max_val {
1114                max_val = val;
1115                max_row = row;
1116            }
1117        }
1118
1119        if max_val < 1e-10 {
1120            return None; // Singular
1121        }
1122
1123        // Swap rows
1124        if max_row != col {
1125            for k in 0..(2 * n) {
1126                let tmp = aug[col * 2 * n + k];
1127                aug[col * 2 * n + k] = aug[max_row * 2 * n + k];
1128                aug[max_row * 2 * n + k] = tmp;
1129            }
1130        }
1131
1132        // Scale pivot row
1133        let pivot = aug[col * 2 * n + col];
1134        for k in 0..(2 * n) {
1135            aug[col * 2 * n + k] /= pivot;
1136        }
1137
1138        // Eliminate column
1139        for row in 0..n {
1140            if row == col {
1141                continue;
1142            }
1143            let factor = aug[row * 2 * n + col];
1144            for k in 0..(2 * n) {
1145                aug[row * 2 * n + k] -= factor * aug[col * 2 * n + k];
1146            }
1147        }
1148    }
1149
1150    // Extract inverse
1151    let mut inv = vec![0.0f32; n * n];
1152    for i in 0..n {
1153        for j in 0..n {
1154            inv[i * n + j] = aug[i * 2 * n + n + j];
1155        }
1156    }
1157
1158    Some(inv)
1159}
1160
1161// =============================================================================
1162// Phase 6: Karmonic Spectral Filter — Training-Time Regularization
1163// =============================================================================
1164//
1165// Implements the Karmonic spectral filter from Theorem 12.1:
1166//   Low-frequency toroidal modes (global coherence) are PRESERVED;
1167//   high-frequency modes (noise) are ATTENUATED toward uniformity.
1168//
1169// The filter weight for Fourier mode n on an N-cycle graph:
1170//   λ_n = 2 - 2·cos(2πn/N)     (nth eigenvalue of cycle Laplacian)
1171//   w(n) = (λ_n - λ_1) / (λ_max - λ_1)
1172//
1173// Mode 1: w ≈ 0.000 → preserve (class discrimination)
1174// Mode 2: w ≈ 0.196 → mild regularization
1175// Mode 3: w ≈ 0.464 → moderate
1176// Mode 4+: w > 0.73 → strong (enforce torus coverage)
1177//
1178// This is the TRAINING-TIME primitive that works (+1.3pp MC1, +4.4pp MC2),
1179// unlike inference-time logit bias which was shown null (March 2026).
1180//
1181// Reference: Karmonic LLM (DOI: 10.5281/zenodo.18746144)
1182
1183/// Karmonic spectral filter for training-time regularization.
1184///
1185/// Computes mode-dependent weights from the cycle graph Laplacian eigenvalues.
1186/// Low-frequency modes (semantically meaningful) get low weight (preserved);
1187/// high-frequency modes (noise) get high weight (regularized toward uniformity).
1188///
1189/// # Example
1190///
1191/// ```rust
1192/// use topological_coherence::KarmonicFilter;
1193///
1194/// let filter = KarmonicFilter::new(12, 6);
1195/// assert_eq!(filter.n_modes(), 6);
1196///
1197/// // Mode 1 has zero weight (preserved)
1198/// assert!(filter.weight(0) < 0.001);
1199///
1200/// // Higher modes have increasing weight
1201/// assert!(filter.weight(1) < filter.weight(2));
1202/// assert!(filter.weight(2) < filter.weight(3));
1203///
1204/// // Compute regularization loss for a batch of embeddings
1205/// // Expected dim = 2 * torus_dim * n_modes = 2 * 1 * 6 = 12
1206/// let embeddings = vec![
1207///     vec![1.0, 0.0, 0.5, 0.5, -0.3, 0.2, 0.1, -0.1, 0.4, 0.3, -0.2, 0.1],
1208///     vec![0.0, 1.0, 0.4, 0.6, -0.1, 0.3, 0.2, -0.2, 0.3, 0.4, -0.1, 0.2],
1209///     vec![0.5, 0.5, 0.3, 0.7, -0.2, 0.1, 0.3, -0.3, 0.2, 0.5, -0.3, 0.3],
1210/// ];
1211/// let loss = filter.uniformity_loss(&embeddings, 1, 2.0);
1212/// // Loss is finite and non-negative for well-formed input
1213/// ```
1214#[derive(Debug, Clone)]
1215pub struct KarmonicFilter {
1216    /// Grid size N for the cycle graph
1217    grid_size: usize,
1218    /// Number of Fourier modes
1219    n_modes: usize,
1220    /// Precomputed eigenvalues λ_n = 2 - 2·cos(2πn/N)
1221    eigenvalues: Vec<f32>,
1222    /// Normalized weights w(n) ∈ [0, 1]
1223    weights: Vec<f32>,
1224}
1225
1226impl KarmonicFilter {
1227    /// Create a new Karmonic filter.
1228    ///
1229    /// # Arguments
1230    /// * `grid_size` - Torus grid size N (typically 12)
1231    /// * `n_modes` - Number of Fourier modes (typically 6)
1232    pub fn new(grid_size: usize, n_modes: usize) -> Self {
1233        assert!(grid_size >= 2, "grid_size must be >= 2");
1234        assert!(n_modes >= 1, "n_modes must be >= 1");
1235
1236        // Clamp to Nyquist limit: eigenvalues of cycle graph C_N are symmetric
1237        // around N/2, so modes beyond N/2 are redundant (same eigenvalues mirrored).
1238        let n_modes = core::cmp::min(n_modes, grid_size / 2);
1239        let n_modes = core::cmp::max(n_modes, 1); // ensure at least 1 mode
1240
1241        let pi = core::f32::consts::PI;
1242
1243        let eigenvalues: Vec<f32> = (1..=n_modes)
1244            .map(|n| 2.0 - 2.0 * cosf(2.0 * pi * n as f32 / grid_size as f32))
1245            .collect();
1246
1247        let lam_1 = eigenvalues[0];
1248        let lam_max = eigenvalues.iter().copied().fold(f32::NEG_INFINITY, f32::max);
1249
1250        let weights = if (lam_max - lam_1).abs() < 1e-10 {
1251            vec![0.0; n_modes]
1252        } else {
1253            eigenvalues
1254                .iter()
1255                .map(|&lam| (lam - lam_1) / (lam_max - lam_1))
1256                .collect()
1257        };
1258
1259        Self {
1260            grid_size,
1261            n_modes,
1262            eigenvalues,
1263            weights,
1264        }
1265    }
1266
1267    /// Number of Fourier modes.
1268    pub fn n_modes(&self) -> usize {
1269        self.n_modes
1270    }
1271
1272    /// Grid size.
1273    pub fn grid_size(&self) -> usize {
1274        self.grid_size
1275    }
1276
1277    /// Get the eigenvalue for mode `idx` (0-indexed).
1278    pub fn eigenvalue(&self, idx: usize) -> f32 {
1279        self.eigenvalues[idx]
1280    }
1281
1282    /// Get the normalized weight for mode `idx` (0-indexed).
1283    ///
1284    /// Returns 0.0 for mode 0 (preserve), approaching 1.0 for high modes (regularize).
1285    pub fn weight(&self, idx: usize) -> f32 {
1286        self.weights[idx]
1287    }
1288
1289    /// Get all weights as a slice.
1290    pub fn weights(&self) -> &[f32] {
1291        &self.weights
1292    }
1293
1294    /// Get all eigenvalues as a slice.
1295    pub fn eigenvalues(&self) -> &[f32] {
1296        &self.eigenvalues
1297    }
1298
1299    /// Compute mode-weighted uniformity loss for a batch of Fourier embeddings.
1300    ///
1301    /// This is the core Karmonic regularization term to add to a training loss.
1302    ///
1303    /// # Arguments
1304    /// * `fourier_embeddings` - Batch of Fourier coordinates, each of length `2 * torus_dim * n_modes`
1305    ///   grouped by mode: `[mode_1_all_circles | mode_2_all_circles | ...]`
1306    /// * `torus_dim` - Number of torus circles (k), typically 2
1307    /// * `temperature` - Wang-Isola uniformity temperature (typically 2.0)
1308    ///
1309    /// # Returns
1310    /// Weighted uniformity loss (scalar). Lower = more uniform on high-freq modes.
1311    #[cfg(feature = "std")]
1312    pub fn uniformity_loss(
1313        &self,
1314        fourier_embeddings: &[Vec<f32>],
1315        torus_dim: usize,
1316        temperature: f32,
1317    ) -> f32 {
1318        let batch_size = fourier_embeddings.len();
1319        if batch_size < 2 {
1320            return 0.0;
1321        }
1322
1323        let expected_dim = 2 * torus_dim * self.n_modes;
1324        let slice_size = 2 * torus_dim; // 2k dims per mode
1325
1326        let mut total = 0.0f32;
1327
1328        for n in 0..self.n_modes {
1329            let start = slice_size * n;
1330            let end = start + slice_size;
1331
1332            // Compute pairwise squared distances for this mode
1333            let mut log_sum_total = 0.0f32;
1334
1335            for i in 0..batch_size {
1336                assert!(
1337                    fourier_embeddings[i].len() >= expected_dim,
1338                    "embedding dim {} < expected {}", fourier_embeddings[i].len(), expected_dim
1339                );
1340
1341                let mut neg_dists: Vec<f32> = Vec::with_capacity(batch_size - 1);
1342
1343                for j in 0..batch_size {
1344                    if i == j {
1345                        continue;
1346                    }
1347
1348                    // Squared L2 distance for this mode slice
1349                    let sq_dist: f32 = (start..end)
1350                        .map(|d| {
1351                            let diff = fourier_embeddings[i][d] - fourier_embeddings[j][d];
1352                            diff * diff
1353                        })
1354                        .sum();
1355
1356                    neg_dists.push(-temperature * sq_dist);
1357                }
1358
1359                // log-sum-exp for numerical stability
1360                let max_val = neg_dists.iter().copied().fold(f32::NEG_INFINITY, f32::max);
1361                let lse: f32 = max_val
1362                    + neg_dists
1363                        .iter()
1364                        .map(|&x| expf(x - max_val))
1365                        .sum::<f32>()
1366                        .ln();
1367
1368                let log_b_minus_1 = ((batch_size - 1) as f32).ln();
1369                log_sum_total += lse - log_b_minus_1;
1370            }
1371
1372            let unif_n = log_sum_total / batch_size as f32;
1373            total += self.weights[n] * unif_n;
1374        }
1375
1376        total
1377    }
1378
1379    /// Compute circular spread loss (decorrelation across torus dimensions).
1380    ///
1381    /// Penalizes correlation between angles on different circles,
1382    /// encouraging independent coverage of each torus dimension.
1383    ///
1384    /// # Arguments
1385    /// * `angles` - Batch of raw angles in `[0, 2π)`, each of length `torus_dim`
1386    ///
1387    /// # Returns
1388    /// Mean squared circular correlation across all circle pairs.
1389    #[cfg(feature = "std")]
1390    pub fn spread_loss(&self, angles: &[Vec<f32>]) -> f32 {
1391        if angles.is_empty() || angles[0].len() < 2 {
1392            return 0.0;
1393        }
1394
1395        let batch_size = angles.len();
1396        let k = angles[0].len(); // torus_dim
1397
1398        let mut total_corr = 0.0f32;
1399        let mut n_pairs = 0;
1400
1401        for i in 0..k {
1402            for j in (i + 1)..k {
1403                // Mean angle (circular mean)
1404                let sin_mean_i: f32 = angles.iter().map(|a| libm::sinf(a[i])).sum::<f32>() / batch_size as f32;
1405                let cos_mean_i: f32 = angles.iter().map(|a| cosf(a[i])).sum::<f32>() / batch_size as f32;
1406                let mu_i = libm::atan2f(sin_mean_i, cos_mean_i);
1407
1408                let sin_mean_j: f32 = angles.iter().map(|a| libm::sinf(a[j])).sum::<f32>() / batch_size as f32;
1409                let cos_mean_j: f32 = angles.iter().map(|a| cosf(a[j])).sum::<f32>() / batch_size as f32;
1410                let mu_j = libm::atan2f(sin_mean_j, cos_mean_j);
1411
1412                // Circular correlation
1413                let mut num = 0.0f32;
1414                let mut den_i = 0.0f32;
1415                let mut den_j = 0.0f32;
1416
1417                for a in angles.iter() {
1418                    let s_i = libm::sinf(a[i] - mu_i);
1419                    let s_j = libm::sinf(a[j] - mu_j);
1420                    num += s_i * s_j;
1421                    den_i += s_i * s_i;
1422                    den_j += s_j * s_j;
1423                }
1424
1425                num /= batch_size as f32;
1426                den_i /= batch_size as f32;
1427                den_j /= batch_size as f32;
1428
1429                let den = libm::sqrtf(den_i * den_j + 1e-8);
1430                let corr = num / den;
1431                total_corr += corr * corr;
1432                n_pairs += 1;
1433            }
1434        }
1435
1436        if n_pairs > 0 {
1437            total_corr / n_pairs as f32
1438        } else {
1439            0.0
1440        }
1441    }
1442}
1443
1444/// Fourier torus projection: map angles to Fourier coordinates.
1445///
1446/// For each angle θ and mode n, computes (cos(nθ), sin(nθ)).
1447/// Output grouped by mode: `[mode_1 | mode_2 | ... | mode_m]`.
1448///
1449/// # Arguments
1450/// * `angles` - Raw angles in `[0, 2π)` of length `torus_dim`
1451/// * `n_modes` - Number of Fourier modes
1452///
1453/// # Returns
1454/// Fourier embedding of length `2 * torus_dim * n_modes`
1455pub fn fourier_expand(angles: &[f32], n_modes: usize) -> Vec<f32> {
1456    let k = angles.len();
1457    let mut result = Vec::with_capacity(2 * k * n_modes);
1458
1459    for n in 1..=n_modes {
1460        for &theta in angles {
1461            let n_theta = n as f32 * theta;
1462            result.push(cosf(n_theta));
1463            result.push(libm::sinf(n_theta));
1464        }
1465    }
1466
1467    result
1468}
1469
1470/// Angles to torus position: quantize continuous angles to grid coordinates.
1471///
1472/// # Arguments
1473/// * `angles` - Raw angles in `[0, 2π)` of length `torus_dim`
1474/// * `grid_size` - Grid size N
1475///
1476/// # Returns
1477/// Grid coordinates (one per angle)
1478pub fn angles_to_grid(angles: &[f32], grid_size: usize) -> Vec<usize> {
1479    let pi = core::f32::consts::PI;
1480    angles
1481        .iter()
1482        .map(|&theta| {
1483            let normalized = theta / (2.0 * pi); // [0, 1)
1484            let coord = (normalized * grid_size as f32) as usize;
1485            coord % grid_size
1486        })
1487        .collect()
1488}
1489
1490// =============================================================================
1491// Tests
1492// =============================================================================
1493
1494#[cfg(test)]
1495mod tests {
1496    use super::*;
1497
1498    // -------------------------------------------------------------------------
1499    // Basic Distance Tests
1500    // -------------------------------------------------------------------------
1501
1502    #[test]
1503    fn test_tonnetz_distance_self() {
1504        let d = Tonnetz::<12>::distance((5, 5), (5, 5));
1505        assert_eq!(d, 0);
1506    }
1507
1508    #[test]
1509    fn test_tonnetz_distance_adjacent() {
1510        let d = Tonnetz::<12>::distance((0, 0), (0, 1));
1511        assert_eq!(d, 1);
1512    }
1513
1514    #[test]
1515    fn test_tonnetz_distance_wraparound() {
1516        // On a 12x12 torus, (0,0) to (0,11) should wrap to distance 1
1517        let d = Tonnetz::<12>::distance((0, 0), (0, 11));
1518        assert_eq!(d, 1);
1519    }
1520
1521    #[test]
1522    fn test_tonnetz_distance_diagonal_wrap() {
1523        // (0,0) to (11,11) wraps both dimensions
1524        let d = Tonnetz::<12>::distance((0, 0), (11, 11));
1525        assert_eq!(d, 2); // 1 + 1 after wrapping
1526    }
1527
1528    // -------------------------------------------------------------------------
1529    // Property Tests: Metric Space Axioms
1530    // -------------------------------------------------------------------------
1531
1532    #[test]
1533    fn test_distance_symmetry() {
1534        // d(a, b) = d(b, a) for all points
1535        for i in 0..12 {
1536            for j in 0..12 {
1537                for k in 0..12 {
1538                    for l in 0..12 {
1539                        let d1 = Tonnetz::<12>::distance((i, j), (k, l));
1540                        let d2 = Tonnetz::<12>::distance((k, l), (i, j));
1541                        assert_eq!(d1, d2, "Symmetry violated at ({},{}) <-> ({},{})", i, j, k, l);
1542                    }
1543                }
1544            }
1545        }
1546    }
1547
1548    #[test]
1549    fn test_distance_identity() {
1550        // d(a, a) = 0 for all points
1551        for i in 0..12 {
1552            for j in 0..12 {
1553                let d = Tonnetz::<12>::distance((i, j), (i, j));
1554                assert_eq!(d, 0, "Identity violated at ({},{})", i, j);
1555            }
1556        }
1557    }
1558
1559    #[test]
1560    fn test_triangle_inequality() {
1561        // d(a, c) <= d(a, b) + d(b, c) for all points
1562        // Test a sample of points (full test is O(n^6))
1563        let points = [(0, 0), (3, 5), (7, 2), (11, 11), (6, 6), (1, 10)];
1564        for &a in &points {
1565            for &b in &points {
1566                for &c in &points {
1567                    let d_ac = Tonnetz::<12>::distance(a, c);
1568                    let d_ab = Tonnetz::<12>::distance(a, b);
1569                    let d_bc = Tonnetz::<12>::distance(b, c);
1570                    assert!(
1571                        d_ac <= d_ab + d_bc,
1572                        "Triangle inequality violated: d({:?},{:?})={} > d({:?},{:?})={} + d({:?},{:?})={}",
1573                        a, c, d_ac, a, b, d_ab, b, c, d_bc
1574                    );
1575                }
1576            }
1577        }
1578    }
1579
1580    #[test]
1581    fn test_distance_non_negative() {
1582        // d(a, b) >= 0 for all points
1583        for i in 0..12 {
1584            for j in 0..12 {
1585                for k in 0..12 {
1586                    for l in 0..12 {
1587                        let d = Tonnetz::<12>::distance((i, j), (k, l));
1588                        // usize is always >= 0, but verify the computation doesn't overflow
1589                        assert!(d <= 12, "Distance too large at ({},{}) <-> ({},{}): {}", i, j, k, l, d);
1590                    }
1591                }
1592            }
1593        }
1594    }
1595
1596    #[test]
1597    fn test_max_distance_bounded() {
1598        // On a 12x12 torus, max L1 distance is 6+6=12 (half grid in each dim)
1599        let mut max_dist = 0;
1600        for i in 0..12 {
1601            for j in 0..12 {
1602                let d = Tonnetz::<12>::distance((0, 0), (i, j));
1603                if d > max_dist {
1604                    max_dist = d;
1605                }
1606            }
1607        }
1608        assert_eq!(max_dist, 12, "Max distance should be 12 (6+6)");
1609    }
1610
1611    // -------------------------------------------------------------------------
1612    // Spectral Gap Tests
1613    // -------------------------------------------------------------------------
1614
1615    #[test]
1616    fn test_spectral_gap_positive() {
1617        let gap = Tonnetz::<12>::spectral_gap();
1618        assert!(gap > 0.0);
1619        assert!(gap < 1.0); // For N=12, gap ≈ 0.268
1620    }
1621
1622    #[test]
1623    fn test_spectral_gap_scales_with_n() {
1624        // Smaller N -> larger gap (more connected)
1625        let gap_6 = Tonnetz::<6>::spectral_gap();
1626        let gap_12 = Tonnetz::<12>::spectral_gap();
1627        let gap_24 = Tonnetz::<24>::spectral_gap();
1628        assert!(gap_6 > gap_12, "Gap should decrease with N");
1629        assert!(gap_12 > gap_24, "Gap should decrease with N");
1630    }
1631
1632    // -------------------------------------------------------------------------
1633    // Mask Type Tests
1634    // -------------------------------------------------------------------------
1635
1636    #[test]
1637    fn test_toroidal_mask_self() {
1638        let mask = ToroidalMask::new(64, 2.0, 1.0);
1639        assert_eq!(mask.value(0, 0), 1.0);
1640    }
1641
1642    #[test]
1643    fn test_toroidal_mask_decay() {
1644        let mask = ToroidalMask::new(64, 1.0, 1.0);
1645        let v_near = mask.value(0, 1);
1646        let v_far = mask.value(0, 5);
1647        assert!(v_near >= v_far);
1648    }
1649
1650    #[test]
1651    fn test_hard_cutoff_mask() {
1652        let mask = ToroidalMask::hard_cutoff(64, 2.0, 12);
1653        // Within radius -> 1.0
1654        assert_eq!(mask.value(0, 0), 1.0);
1655        assert_eq!(mask.value(0, 1), 1.0);
1656        // Outside radius -> 0.0
1657        assert_eq!(mask.value(0, 36), 0.0); // distance 6
1658    }
1659
1660    #[test]
1661    fn test_soft_exponential_mask() {
1662        let mask = ToroidalMask::soft_exponential(64, 1.0, 12);
1663        // Self -> exp(0) = 1.0
1664        assert!((mask.value(0, 0) - 1.0).abs() < 1e-6);
1665        // Distance 1 -> exp(-1) ≈ 0.368
1666        let v1 = mask.value(0, 1);
1667        assert!((v1 - 0.368).abs() < 0.01);
1668    }
1669
1670    #[test]
1671    fn test_hybrid_mask() {
1672        let mask = ToroidalMask::new(64, 2.0, 1.0);
1673        // Within radius -> 1.0
1674        assert_eq!(mask.value(0, 0), 1.0);
1675        assert_eq!(mask.value(0, 1), 1.0);
1676        // Just outside radius -> exp(-α*(d-r)) = exp(-1*(3-2)) = exp(-1)
1677        // Need to find a point at distance 3
1678    }
1679
1680    // -------------------------------------------------------------------------
1681    // Sinkhorn-Knopp Tests
1682    // -------------------------------------------------------------------------
1683
1684    #[test]
1685    fn test_sinkhorn_knopp_doubly_stochastic() {
1686        let mask = ToroidalMask::new(16, 2.0, 0.5);
1687        let ds = mask.generate_doubly_stochastic(50);
1688        assert!(
1689            is_doubly_stochastic(&ds, 0.01),
1690            "Sinkhorn-Knopp should produce doubly-stochastic matrix"
1691        );
1692    }
1693
1694    #[test]
1695    fn test_sinkhorn_preserves_structure() {
1696        // After Sinkhorn-Knopp, nearby positions should still have higher values
1697        let mask = ToroidalMask::new(16, 2.0, 0.5);
1698        let ds = mask.generate_doubly_stochastic(50);
1699        // Diagonal (self-attention) should be relatively high
1700        let diag_avg: f32 = (0..16).map(|i| ds[i][i]).sum::<f32>() / 16.0;
1701        let total_avg: f32 = ds.iter().flat_map(|r| r.iter()).sum::<f32>() / 256.0;
1702        assert!(
1703            diag_avg > total_avg,
1704            "Diagonal should be above average after Sinkhorn"
1705        );
1706    }
1707
1708    // -------------------------------------------------------------------------
1709    // Drift Meter Tests
1710    // -------------------------------------------------------------------------
1711
1712    #[test]
1713    fn test_drift_meter() {
1714        let mut meter = DriftMeter::new(2);
1715        meter.record::<12>(0, 1);  // distance 1, not drift
1716        meter.record::<12>(0, 6);  // distance 6, drift
1717        meter.record::<12>(0, 0);  // distance 0, not drift
1718
1719        assert_eq!(meter.count, 3);
1720        assert_eq!(meter.drifts, 1);
1721        assert!((meter.rate() - 0.333).abs() < 0.01);
1722    }
1723
1724    #[test]
1725    fn test_drift_meter_reset() {
1726        let mut meter = DriftMeter::new(2);
1727        meter.record::<12>(0, 6);
1728        meter.reset();
1729        assert_eq!(meter.count, 0);
1730        assert_eq!(meter.drifts, 0);
1731        assert_eq!(meter.rate(), 0.0);
1732    }
1733
1734    // -------------------------------------------------------------------------
1735    // Coordinate Conversion Tests
1736    // -------------------------------------------------------------------------
1737
1738    #[test]
1739    fn test_coord_conversion_roundtrip() {
1740        for idx in 0..144 {
1741            let coords = Tonnetz::<12>::to_coords(idx);
1742            let back = Tonnetz::<12>::to_index(coords.0, coords.1);
1743            assert_eq!(idx, back, "Roundtrip failed for index {}", idx);
1744        }
1745    }
1746
1747    // -------------------------------------------------------------------------
1748    // Substrate Integration Type Tests
1749    // -------------------------------------------------------------------------
1750
1751    #[test]
1752    fn test_toroidal_position() {
1753        let pos = ToroidalPosition::new(5, 7);
1754        assert_eq!(pos.as_tuple(), (5, 7));
1755    }
1756
1757    #[test]
1758    fn test_toroidal_position_distance() {
1759        let a = ToroidalPosition::new(0, 0);
1760        let b = ToroidalPosition::new(5, 7);
1761        let dist = a.distance_to::<12>(&b);
1762        assert_eq!(dist, Tonnetz::<12>::distance((0, 0), (5, 7)));
1763    }
1764
1765    #[test]
1766    fn test_coherence_config_default() {
1767        let config = CoherenceConfig::default();
1768        assert_eq!(config.grid_size, 12);
1769        assert!((config.radius() - 2.0).abs() < 0.01);
1770        assert!((config.alpha() - 1.0).abs() < 0.01);
1771        assert_eq!(config.drift_threshold, 2);
1772        assert_eq!(config.mask_type, MaskType::Hybrid);
1773    }
1774
1775    #[test]
1776    fn test_coherence_config_to_mask() {
1777        let config = CoherenceConfig::default();
1778        let mask = config.to_mask(64);
1779        assert_eq!(mask.seq_len, 64);
1780        assert_eq!(mask.grid_size, 12);
1781        assert_eq!(mask.mask_type, MaskType::Hybrid);
1782    }
1783
1784    #[test]
1785    fn test_coherence_result_from_meter() {
1786        let mut meter = DriftMeter::new(2);
1787        meter.record::<12>(0, 1);
1788        meter.record::<12>(0, 6);
1789        meter.record::<12>(0, 0);
1790
1791        let result = CoherenceResult::from_meter(&meter, 0.5);
1792        assert_eq!(result.transitions, 3);
1793        assert_eq!(result.violations, 1);
1794        assert!(result.is_coherent); // 0.333 < 0.5
1795
1796        let strict_result = CoherenceResult::from_meter(&meter, 0.1);
1797        assert!(!strict_result.is_coherent); // 0.333 > 0.1
1798    }
1799
1800    #[test]
1801    fn test_coherence_result_drift_rate() {
1802        let result = CoherenceResult {
1803            transitions: 100,
1804            violations: 25,
1805            is_coherent: true,
1806        };
1807        assert!((result.drift_rate() - 0.25).abs() < 0.001);
1808    }
1809
1810    // -------------------------------------------------------------------------
1811    // Phase 4: Advanced Feature Tests
1812    // -------------------------------------------------------------------------
1813
1814    #[test]
1815    fn test_torus3d_distance_self() {
1816        let d = Torus3D::<8>::distance((0, 0, 0), (0, 0, 0));
1817        assert_eq!(d, 0);
1818    }
1819
1820    #[test]
1821    fn test_torus3d_distance_adjacent() {
1822        let d = Torus3D::<8>::distance((0, 0, 0), (1, 0, 0));
1823        assert_eq!(d, 1);
1824    }
1825
1826    #[test]
1827    fn test_torus3d_distance_wraparound() {
1828        // On 8x8x8 torus, (0,0,0) to (7,0,0) should wrap to distance 1
1829        let d = Torus3D::<8>::distance((0, 0, 0), (7, 0, 0));
1830        assert_eq!(d, 1);
1831    }
1832
1833    #[test]
1834    fn test_torus3d_max_distance() {
1835        // Max distance on 8x8x8 torus is 4+4+4=12
1836        let d = Torus3D::<8>::distance((0, 0, 0), (4, 4, 4));
1837        assert_eq!(d, 12);
1838    }
1839
1840    #[test]
1841    fn test_torus3d_coord_roundtrip() {
1842        for idx in 0..512 {
1843            let (x, y, z) = Torus3D::<8>::to_coords(idx);
1844            let back = Torus3D::<8>::to_index(x, y, z);
1845            assert_eq!(idx, back, "Roundtrip failed for index {}", idx);
1846        }
1847    }
1848
1849    #[test]
1850    fn test_multi_scale_tonnetz_default() {
1851        let ms = MultiScaleTonnetz::default();
1852        assert_eq!(ms.scales, [6, 12, 24]);
1853    }
1854
1855    #[test]
1856    fn test_multi_scale_distance_same_point() {
1857        let ms = MultiScaleTonnetz::default();
1858        let d = ms.distance((0, 0), (0, 0));
1859        assert_eq!(d, 0.0);
1860    }
1861
1862    #[test]
1863    fn test_multi_scale_distance_weighted() {
1864        let ms = MultiScaleTonnetz::new([6, 12, 24], [1.0, 0.0, 0.0]);
1865        // Only use 6x6 scale
1866        let d = ms.distance((0, 0), (3, 3));
1867        // On 6x6 torus, (0,0) to (3,3) = 3+3 = 6
1868        assert_eq!(d, 6.0);
1869    }
1870
1871    #[test]
1872    fn test_learned_projection() {
1873        let proj = LearnedProjection::new(4, 12);
1874        let embedding = vec![1.0, 0.5, -0.5, 0.2];
1875        let (row, col) = proj.project(&embedding);
1876        assert!(row < 12);
1877        assert!(col < 12);
1878    }
1879
1880    #[test]
1881    fn test_adjacency_loss_positive_pairs() {
1882        let mut loss = AdjacencyLoss::<12>::new(0.5);
1883        loss.record_positive((0, 0), (1, 1)); // distance 2
1884        loss.record_positive((0, 0), (0, 1)); // distance 1
1885        // Mean positive distance = 1.5
1886        let l = loss.loss();
1887        assert!((l - 1.5).abs() < 0.001);
1888    }
1889
1890    #[test]
1891    fn test_adjacency_loss_with_negatives() {
1892        let mut loss = AdjacencyLoss::<12>::new(0.5);
1893        loss.record_positive((0, 0), (1, 0)); // distance 1
1894        loss.record_negative((0, 0), (6, 6)); // distance 12
1895        // Loss = 1 - 0.5 * 12 = -5
1896        let l = loss.loss();
1897        assert!((l - (-5.0)).abs() < 0.001);
1898    }
1899
1900    #[test]
1901    fn test_sparse_mask_from_toroidal() {
1902        let mask = ToroidalMask::hard_cutoff(16, 1.0, 4);
1903        let sparse = SparseMask::from_toroidal(&mask, 0.5);
1904        // Hard cutoff with radius 1 on 4x4 grid should have limited non-zeros
1905        assert!(sparse.nnz() < 16 * 16);
1906        assert!(sparse.sparsity() > 0.0);
1907    }
1908
1909    #[test]
1910    fn test_sparse_mask_get() {
1911        let mask = ToroidalMask::hard_cutoff(16, 1.0, 4);
1912        let dense = mask.generate();
1913        let sparse = SparseMask::from_toroidal(&mask, 0.5);
1914
1915        // Spot check some values
1916        for i in 0..16 {
1917            for j in 0..16 {
1918                let dense_val = dense[i][j];
1919                let sparse_val = sparse.get(i, j);
1920                if dense_val > 0.5 {
1921                    assert!((dense_val - sparse_val).abs() < 0.001);
1922                } else {
1923                    assert_eq!(sparse_val, 0.0);
1924                }
1925            }
1926        }
1927    }
1928
1929    #[test]
1930    fn test_sparse_mask_memory() {
1931        let mask = ToroidalMask::soft_exponential(64, 2.0, 12);
1932        let sparse = SparseMask::from_toroidal(&mask, 0.1);
1933        let dense_bytes = 64 * 64 * 4; // f32 = 4 bytes
1934        let sparse_bytes = sparse.memory_bytes();
1935        // Sparse should use less memory if sufficiently sparse
1936        if sparse.sparsity() > 0.5 {
1937            assert!(sparse_bytes < dense_bytes);
1938        }
1939    }
1940
1941    // -------------------------------------------------------------------------
1942    // Grounding Projector Tests
1943    // -------------------------------------------------------------------------
1944
1945    #[test]
1946    fn test_grounding_projector_single_vector() {
1947        // Single evidence vector [1, 0, 0] — projects onto x-axis
1948        let e1: Vec<f32> = vec![1.0, 0.0, 0.0];
1949        let evidence: Vec<&[f32]> = vec![e1.as_slice()];
1950        let proj = GroundingProjector::from_evidence(&evidence, 3).unwrap();
1951
1952        // x = [3, 4, 5] → grounded = [3, 0, 0], hallucinated = [0, 4, 5]
1953        let x = vec![3.0, 4.0, 5.0];
1954        let g = proj.project_grounded(&x);
1955        assert!((g[0] - 3.0).abs() < 1e-5);
1956        assert!((g[1]).abs() < 1e-5);
1957        assert!((g[2]).abs() < 1e-5);
1958    }
1959
1960    #[test]
1961    fn test_grounding_projector_two_vectors() {
1962        // Evidence spans xy-plane
1963        let e1: Vec<f32> = vec![1.0, 0.0, 0.0];
1964        let e2: Vec<f32> = vec![0.0, 1.0, 0.0];
1965        let evidence: Vec<&[f32]> = vec![e1.as_slice(), e2.as_slice()];
1966        let proj = GroundingProjector::from_evidence(&evidence, 3).unwrap();
1967
1968        // x = [3, 4, 5] → grounded = [3, 4, 0], hallucinated = [0, 0, 5]
1969        let x = vec![3.0, 4.0, 5.0];
1970        let g = proj.project_grounded(&x);
1971        let h = proj.project_hallucinated(&x);
1972        assert!((g[0] - 3.0).abs() < 1e-5);
1973        assert!((g[1] - 4.0).abs() < 1e-5);
1974        assert!((g[2]).abs() < 1e-5);
1975        assert!((h[0]).abs() < 1e-5);
1976        assert!((h[1]).abs() < 1e-5);
1977        assert!((h[2] - 5.0).abs() < 1e-5);
1978    }
1979
1980    #[test]
1981    fn test_grounding_projector_idempotent() {
1982        // G² = G (projection property)
1983        let e1: Vec<f32> = vec![1.0, 1.0, 0.0, 0.0];
1984        let e2: Vec<f32> = vec![0.0, 0.0, 1.0, 1.0];
1985        let evidence: Vec<&[f32]> = vec![e1.as_slice(), e2.as_slice()];
1986        let proj = GroundingProjector::from_evidence(&evidence, 4).unwrap();
1987
1988        assert!(proj.verify_idempotent(1e-5));
1989    }
1990
1991    #[test]
1992    fn test_grounding_projector_symmetric() {
1993        // Gᵀ = G (orthogonal projection)
1994        let e1: Vec<f32> = vec![1.0, 2.0, 3.0];
1995        let e2: Vec<f32> = vec![4.0, 5.0, 6.0];
1996        let evidence: Vec<&[f32]> = vec![e1.as_slice(), e2.as_slice()];
1997        let proj = GroundingProjector::from_evidence(&evidence, 3).unwrap();
1998
1999        assert!(proj.verify_symmetric(1e-5));
2000    }
2001
2002    #[test]
2003    fn test_grounding_fully_grounded() {
2004        // Vector in evidence subspace → score = 0
2005        let e1: Vec<f32> = vec![1.0, 0.0, 0.0];
2006        let e2: Vec<f32> = vec![0.0, 1.0, 0.0];
2007        let evidence: Vec<&[f32]> = vec![e1.as_slice(), e2.as_slice()];
2008        let proj = GroundingProjector::from_evidence(&evidence, 3).unwrap();
2009
2010        let x = vec![3.0, 4.0, 0.0]; // lies in xy-plane
2011        let score = proj.hallucination_score(&x);
2012        assert!(score < 1e-5, "Expected ~0, got {}", score);
2013    }
2014
2015    #[test]
2016    fn test_grounding_fully_hallucinated() {
2017        // Vector orthogonal to evidence → score = 1
2018        let e1: Vec<f32> = vec![1.0, 0.0, 0.0];
2019        let e2: Vec<f32> = vec![0.0, 1.0, 0.0];
2020        let evidence: Vec<&[f32]> = vec![e1.as_slice(), e2.as_slice()];
2021        let proj = GroundingProjector::from_evidence(&evidence, 3).unwrap();
2022
2023        let x = vec![0.0, 0.0, 5.0]; // along z-axis
2024        let score = proj.hallucination_score(&x);
2025        assert!((score - 1.0).abs() < 1e-5, "Expected ~1, got {}", score);
2026    }
2027
2028    #[test]
2029    fn test_grounding_partial() {
2030        // Vector partially in evidence subspace
2031        let e1: Vec<f32> = vec![1.0, 0.0, 0.0];
2032        let evidence: Vec<&[f32]> = vec![e1.as_slice()];
2033        let proj = GroundingProjector::from_evidence(&evidence, 3).unwrap();
2034
2035        let x = vec![1.0, 1.0, 0.0]; // half grounded
2036        let score = proj.hallucination_score(&x);
2037        // ‖hallucinated‖ = ‖[0,1,0]‖ = 1, ‖x‖ = √2 → score = 1/√2 ≈ 0.707
2038        assert!((score - 0.7071).abs() < 0.01, "Expected ~0.707, got {}", score);
2039    }
2040
2041    #[test]
2042    fn test_grounding_decompose() {
2043        let e1: Vec<f32> = vec![1.0, 0.0, 0.0];
2044        let evidence: Vec<&[f32]> = vec![e1.as_slice()];
2045        let proj = GroundingProjector::from_evidence(&evidence, 3).unwrap();
2046
2047        let x = vec![3.0, 4.0, 0.0];
2048        let decomp = proj.decompose(&x);
2049
2050        assert!((decomp.grounded[0] - 3.0).abs() < 1e-5);
2051        assert!((decomp.hallucinated[1] - 4.0).abs() < 1e-5);
2052        // Pythagorean: grounding² + hallucination² ≈ 1
2053        let sum_sq = decomp.grounding_ratio.powi(2) + decomp.hallucination_ratio.powi(2);
2054        assert!((sum_sq - 1.0).abs() < 1e-4, "Pythagorean check failed: {}", sum_sq);
2055    }
2056
2057    #[test]
2058    fn test_grounding_empty_evidence() {
2059        let evidence: Vec<&[f32]> = vec![];
2060        let result = GroundingProjector::from_evidence(&evidence, 3);
2061        assert!(result.is_none());
2062    }
2063
2064    #[test]
2065    fn test_grounding_non_orthogonal_evidence() {
2066        // Non-orthogonal evidence — (AᵀA)⁻¹ handles this
2067        let e1: Vec<f32> = vec![1.0, 1.0, 0.0];
2068        let e2: Vec<f32> = vec![1.0, 0.0, 0.0];
2069        let evidence: Vec<&[f32]> = vec![e1.as_slice(), e2.as_slice()];
2070        let proj = GroundingProjector::from_evidence(&evidence, 3).unwrap();
2071
2072        // Still idempotent and symmetric
2073        assert!(proj.verify_idempotent(1e-4));
2074        assert!(proj.verify_symmetric(1e-4));
2075
2076        // Rank should be 2 (spans xy-plane)
2077        assert_eq!(proj.rank(), 2);
2078
2079        // z-axis should be fully hallucinated
2080        let z = vec![0.0, 0.0, 1.0];
2081        let score = proj.hallucination_score(&z);
2082        assert!((score - 1.0).abs() < 1e-4);
2083    }
2084
2085    #[test]
2086    fn test_invert_matrix_2x2() {
2087        // [2, 1; 1, 1]⁻¹ = [1, -1; -1, 2]
2088        let m = vec![2.0, 1.0, 1.0, 1.0];
2089        let inv = invert_matrix(&m, 2).unwrap();
2090        assert!((inv[0] - 1.0).abs() < 1e-5);
2091        assert!((inv[1] - (-1.0)).abs() < 1e-5);
2092        assert!((inv[2] - (-1.0)).abs() < 1e-5);
2093        assert!((inv[3] - 2.0).abs() < 1e-5);
2094    }
2095
2096    #[test]
2097    fn test_invert_singular_matrix() {
2098        // Singular matrix → None
2099        let m = vec![1.0, 2.0, 2.0, 4.0];
2100        assert!(invert_matrix(&m, 2).is_none());
2101    }
2102
2103    // -------------------------------------------------------------------------
2104    // Karmonic Filter Tests
2105    // -------------------------------------------------------------------------
2106
2107    #[test]
2108    fn test_karmonic_filter_creation() {
2109        let filter = KarmonicFilter::new(12, 6);
2110        assert_eq!(filter.n_modes(), 6);
2111        assert_eq!(filter.grid_size(), 12);
2112    }
2113
2114    #[test]
2115    fn test_karmonic_mode1_preserved() {
2116        // Mode 1 should have weight ≈ 0 (preserved, not regularized)
2117        let filter = KarmonicFilter::new(12, 6);
2118        assert!(filter.weight(0) < 0.001, "Mode 1 weight should be ~0, got {}", filter.weight(0));
2119    }
2120
2121    #[test]
2122    fn test_karmonic_weights_monotonic() {
2123        // Weights should increase with mode index
2124        let filter = KarmonicFilter::new(12, 6);
2125        for i in 1..filter.n_modes() {
2126            assert!(
2127                filter.weight(i) >= filter.weight(i - 1),
2128                "Weights not monotonic: w[{}]={} < w[{}]={}",
2129                i, filter.weight(i), i - 1, filter.weight(i - 1)
2130            );
2131        }
2132    }
2133
2134    #[test]
2135    fn test_karmonic_highest_mode_is_one() {
2136        // Highest mode should have weight = 1.0 (maximum regularization)
2137        let filter = KarmonicFilter::new(12, 6);
2138        let last = filter.n_modes() - 1;
2139        assert!(
2140            (filter.weight(last) - 1.0).abs() < 0.001,
2141            "Last mode weight should be ~1.0, got {}",
2142            filter.weight(last)
2143        );
2144    }
2145
2146    #[test]
2147    fn test_karmonic_eigenvalues_positive() {
2148        let filter = KarmonicFilter::new(12, 6);
2149        for (i, &lam) in filter.eigenvalues().iter().enumerate() {
2150            assert!(lam > 0.0, "Eigenvalue {} should be positive, got {}", i, lam);
2151        }
2152    }
2153
2154    #[test]
2155    fn test_karmonic_eigenvalue_formula() {
2156        // λ_1 for N=12: 2 - 2·cos(2π/12) = 2 - 2·cos(π/6) = 2 - √3 ≈ 0.268
2157        let filter = KarmonicFilter::new(12, 6);
2158        let expected = 2.0 - 3.0_f32.sqrt();
2159        assert!(
2160            (filter.eigenvalue(0) - expected).abs() < 0.01,
2161            "λ₁ should be ~{}, got {}", expected, filter.eigenvalue(0)
2162        );
2163    }
2164
2165    #[test]
2166    fn test_karmonic_spectral_gap_matches_tonnetz() {
2167        // Karmonic eigenvalue(0) should match Tonnetz spectral gap
2168        let filter = KarmonicFilter::new(12, 6);
2169        let tonnetz_gap = Tonnetz::<12>::spectral_gap();
2170        assert!(
2171            (filter.eigenvalue(0) - tonnetz_gap).abs() < 0.01,
2172            "Karmonic λ₁={} should match Tonnetz gap={}",
2173            filter.eigenvalue(0), tonnetz_gap
2174        );
2175    }
2176
2177    #[test]
2178    fn test_karmonic_uniformity_loss_batch_too_small() {
2179        let filter = KarmonicFilter::new(12, 6);
2180        // Batch of 1 should return 0
2181        let embeddings = vec![vec![0.0; 24]]; // 2 * 2 * 6 = 24
2182        let loss = filter.uniformity_loss(&embeddings, 2, 2.0);
2183        assert_eq!(loss, 0.0);
2184    }
2185
2186    #[test]
2187    fn test_karmonic_uniformity_loss_finite() {
2188        let filter = KarmonicFilter::new(12, 6);
2189        let embeddings = vec![
2190            vec![1.0, 0.0, 0.5, 0.5, -0.3, 0.2, 0.1, -0.1, 0.4, 0.3, -0.2, 0.1,
2191                 0.3, 0.1, 0.2, -0.4, 0.1, 0.5, -0.1, 0.2, 0.3, -0.3, 0.4, 0.1],
2192            vec![0.0, 1.0, 0.4, 0.6, -0.1, 0.3, 0.2, -0.2, 0.3, 0.4, -0.1, 0.2,
2193                 0.2, 0.3, 0.1, -0.3, 0.2, 0.4, -0.2, 0.3, 0.2, -0.2, 0.3, 0.2],
2194            vec![0.5, 0.5, 0.3, 0.7, -0.2, 0.1, 0.3, -0.3, 0.2, 0.5, -0.3, 0.3,
2195                 0.1, 0.2, 0.3, -0.2, 0.3, 0.3, -0.3, 0.1, 0.1, -0.1, 0.5, 0.3],
2196        ];
2197        let loss = filter.uniformity_loss(&embeddings, 2, 2.0);
2198        assert!(loss.is_finite(), "Loss should be finite, got {}", loss);
2199    }
2200
2201    #[test]
2202    fn test_karmonic_identical_embeddings_low_loss() {
2203        // Identical embeddings → distances are 0 → uniformity is high (low loss)
2204        let filter = KarmonicFilter::new(12, 3);
2205        let emb = vec![0.5; 12]; // 2 * 2 * 3 = 12
2206        let embeddings = vec![emb.clone(), emb.clone(), emb.clone()];
2207        let loss = filter.uniformity_loss(&embeddings, 2, 2.0);
2208        assert!(loss.is_finite());
2209    }
2210
2211    #[test]
2212    fn test_fourier_expand_dims() {
2213        let angles = vec![1.0, 2.0]; // 2 circles
2214        let embed = fourier_expand(&angles, 6);
2215        assert_eq!(embed.len(), 2 * 2 * 6); // 2k*m = 24
2216    }
2217
2218    #[test]
2219    fn test_fourier_expand_mode1() {
2220        let angles = vec![0.0]; // single circle, angle=0
2221        let embed = fourier_expand(&angles, 3);
2222        // Mode 1: cos(0)=1, sin(0)=0
2223        assert!((embed[0] - 1.0).abs() < 1e-6);
2224        assert!(embed[1].abs() < 1e-6);
2225    }
2226
2227    #[test]
2228    fn test_angles_to_grid() {
2229        let pi = core::f32::consts::PI;
2230        // angle = 0 → grid 0
2231        // angle = π → grid 6 (halfway on N=12)
2232        let coords = angles_to_grid(&[0.0, pi], 12);
2233        assert_eq!(coords[0], 0);
2234        assert_eq!(coords[1], 6);
2235    }
2236
2237    #[test]
2238    fn test_spread_loss_uncorrelated() {
2239        let filter = KarmonicFilter::new(12, 6);
2240        // Generate angles that are roughly uncorrelated
2241        let angles = vec![
2242            vec![0.0, 3.14],
2243            vec![1.57, 0.5],
2244            vec![3.14, 1.57],
2245            vec![4.71, 4.0],
2246        ];
2247        let spread = filter.spread_loss(&angles);
2248        assert!(spread.is_finite());
2249        assert!(spread >= 0.0);
2250    }
2251
2252    #[test]
2253    fn test_spread_loss_single_dim() {
2254        let filter = KarmonicFilter::new(12, 6);
2255        // Single dimension → no pairs → 0
2256        let angles = vec![vec![1.0], vec![2.0]];
2257        assert_eq!(filter.spread_loss(&angles), 0.0);
2258    }
2259
2260    #[test]
2261    fn test_karmonic_different_grid_sizes() {
2262        // Filter should work with different grid sizes.
2263        // n_modes is clamped to grid_size/2 (Nyquist limit).
2264        for &n in &[4, 6, 8, 12, 24] {
2265            let filter = KarmonicFilter::new(n, 4);
2266            let actual_modes = filter.n_modes();
2267            assert!(actual_modes >= 1);
2268            assert!(actual_modes <= n / 2);
2269            // Lowest mode always has weight 0
2270            assert!(filter.weight(0) < 0.001);
2271            // Highest available mode always has weight 1
2272            assert!(
2273                (filter.weight(actual_modes - 1) - 1.0).abs() < 0.001,
2274                "grid_size={}, n_modes={}, highest weight={}",
2275                n, actual_modes, filter.weight(actual_modes - 1)
2276            );
2277        }
2278    }
2279}