1#![cfg_attr(not(feature = "std"), no_std)]
41
42use libm::{expf, cosf, fabsf};
43
44#[cfg(feature = "substrate")]
46use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
47#[cfg(feature = "substrate")]
48use scale_info::TypeInfo;
49
50#[derive(Debug, Clone, Copy, PartialEq, Eq)]
80#[cfg_attr(feature = "substrate", derive(Encode, Decode, MaxEncodedLen, TypeInfo))]
81pub enum MaskType {
82 HardCutoff,
84 SoftExponential,
86 Hybrid,
88}
89
90#[derive(Debug, Clone, Copy)]
100pub struct Tonnetz<const N: usize>;
101
102impl<const N: usize> Tonnetz<N> {
103 pub const fn new() -> Self {
105 Self
106 }
107
108 pub const fn size(&self) -> usize {
110 N
111 }
112
113 pub const fn total_positions(&self) -> usize {
115 N * N
116 }
117
118 #[inline]
120 pub const fn to_coords(index: usize) -> (usize, usize) {
121 (index / N, index % N)
122 }
123
124 #[inline]
126 pub const fn to_index(row: usize, col: usize) -> usize {
127 (row % N) * N + (col % N)
128 }
129
130 #[inline]
135 pub fn distance(a: (usize, usize), b: (usize, usize)) -> usize {
136 let dx = a.0.abs_diff(b.0);
137 let dy = a.1.abs_diff(b.1);
138
139 let dx_wrap = if dx > N / 2 { N - dx } else { dx };
140 let dy_wrap = if dy > N / 2 { N - dy } else { dy };
141
142 dx_wrap + dy_wrap
143 }
144
145 #[inline]
147 pub fn distance_linear(i: usize, j: usize) -> usize {
148 Self::distance(Self::to_coords(i), Self::to_coords(j))
149 }
150
151 pub fn spectral_gap() -> f32 {
159 let pi = core::f32::consts::PI;
160 2.0 - 2.0 * cosf(2.0 * pi / N as f32)
161 }
162
163 pub fn decay_rate(t: f32) -> f32 {
167 expf(-Self::spectral_gap() * t)
168 }
169}
170
171impl<const N: usize> Default for Tonnetz<N> {
172 fn default() -> Self {
173 Self::new()
174 }
175}
176
177#[derive(Debug, Clone)]
185pub struct ToroidalMask {
186 pub seq_len: usize,
188 pub radius: f32,
190 pub alpha: f32,
192 pub grid_size: usize,
194 pub mask_type: MaskType,
196}
197
198impl ToroidalMask {
199 pub fn new(seq_len: usize, radius: f32, alpha: f32) -> Self {
201 Self::with_grid(seq_len, radius, alpha, 12)
202 }
203
204 pub fn with_grid(seq_len: usize, radius: f32, alpha: f32, grid_size: usize) -> Self {
206 Self {
207 seq_len,
208 radius,
209 alpha,
210 grid_size,
211 mask_type: MaskType::Hybrid,
212 }
213 }
214
215 pub fn hard_cutoff(seq_len: usize, radius: f32, grid_size: usize) -> Self {
217 Self {
218 seq_len,
219 radius,
220 alpha: 0.0,
221 grid_size,
222 mask_type: MaskType::HardCutoff,
223 }
224 }
225
226 pub fn soft_exponential(seq_len: usize, alpha: f32, grid_size: usize) -> Self {
228 Self {
229 seq_len,
230 radius: 0.0,
231 alpha,
232 grid_size,
233 mask_type: MaskType::SoftExponential,
234 }
235 }
236
237 fn toroidal_distance(&self, i: usize, j: usize) -> f32 {
239 let n = self.grid_size;
240 let pos_i = (i % n, (i / n) % n);
241 let pos_j = (j % n, (j / n) % n);
242
243 let dx = pos_i.0.abs_diff(pos_j.0);
244 let dy = pos_i.1.abs_diff(pos_j.1);
245
246 let dx_wrap = if dx > n / 2 { n - dx } else { dx };
247 let dy_wrap = if dy > n / 2 { n - dy } else { dy };
248 (dx_wrap + dy_wrap) as f32
249 }
250
251 pub fn value(&self, i: usize, j: usize) -> f32 {
253 let dist = self.toroidal_distance(i, j);
254
255 match self.mask_type {
256 MaskType::HardCutoff => {
257 if dist <= self.radius { 1.0 } else { 0.0 }
258 }
259 MaskType::SoftExponential => {
260 expf(-self.alpha * dist)
261 }
262 MaskType::Hybrid => {
263 if dist <= self.radius {
264 1.0
265 } else {
266 expf(-self.alpha * (dist - self.radius))
267 }
268 }
269 }
270 }
271
272 #[cfg(feature = "std")]
274 pub fn generate(&self) -> Vec<Vec<f32>> {
275 (0..self.seq_len)
276 .map(|i| (0..self.seq_len).map(|j| self.value(i, j)).collect())
277 .collect()
278 }
279
280 #[cfg(feature = "std")]
282 pub fn generate_doubly_stochastic(&self, iterations: usize) -> Vec<Vec<f32>> {
283 let mask = self.generate();
284 sinkhorn_knopp(mask, iterations)
285 }
286}
287
288#[cfg(feature = "std")]
295pub fn sinkhorn_knopp(mut matrix: Vec<Vec<f32>>, iterations: usize) -> Vec<Vec<f32>> {
296 let n = matrix.len();
297 if n == 0 {
298 return matrix;
299 }
300
301 for _ in 0..iterations {
302 for row in &mut matrix {
304 let row_sum: f32 = row.iter().sum();
305 if row_sum > 1e-10 {
306 for val in row.iter_mut() {
307 *val /= row_sum;
308 }
309 }
310 }
311
312 for j in 0..n {
314 let col_sum: f32 = matrix.iter().map(|row| row[j]).sum();
315 if col_sum > 1e-10 {
316 for row in &mut matrix {
317 row[j] /= col_sum;
318 }
319 }
320 }
321 }
322
323 matrix
324}
325
326#[cfg(feature = "std")]
328pub fn is_doubly_stochastic(matrix: &[Vec<f32>], tolerance: f32) -> bool {
329 let n = matrix.len();
330 if n == 0 {
331 return true;
332 }
333
334 for row in matrix {
336 let sum: f32 = row.iter().sum();
337 if fabsf(sum - 1.0) > tolerance {
338 return false;
339 }
340 }
341
342 for j in 0..n {
344 let sum: f32 = (0..n).map(|i| matrix[i][j]).sum();
345 if fabsf(sum - 1.0) > tolerance {
346 return false;
347 }
348 }
349
350 true
351}
352
353pub struct DriftMeter {
357 pub threshold: usize,
359 pub count: usize,
361 pub drifts: usize,
363}
364
365impl DriftMeter {
366 pub fn new(threshold: usize) -> Self {
368 Self {
369 threshold,
370 count: 0,
371 drifts: 0,
372 }
373 }
374
375 pub fn record<const N: usize>(&mut self, pred: usize, target: usize) {
377 let dist = Tonnetz::<N>::distance_linear(pred, target);
378 self.count += 1;
379 if dist > self.threshold {
380 self.drifts += 1;
381 }
382 }
383
384 pub fn rate(&self) -> f32 {
386 if self.count == 0 {
387 0.0
388 } else {
389 self.drifts as f32 / self.count as f32
390 }
391 }
392
393 pub fn reset(&mut self) {
395 self.count = 0;
396 self.drifts = 0;
397 }
398}
399
400#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
409#[cfg_attr(feature = "substrate", derive(Encode, Decode, MaxEncodedLen, TypeInfo))]
410pub struct ToroidalPosition {
411 pub row: u8,
413 pub col: u8,
415}
416
417impl ToroidalPosition {
418 pub const fn new(row: u8, col: u8) -> Self {
420 Self { row, col }
421 }
422
423 pub const fn as_tuple(&self) -> (usize, usize) {
425 (self.row as usize, self.col as usize)
426 }
427
428 pub fn distance_to<const N: usize>(&self, other: &Self) -> usize {
430 Tonnetz::<N>::distance(self.as_tuple(), other.as_tuple())
431 }
432}
433
434#[derive(Debug, Clone, Copy, PartialEq, Eq)]
438#[cfg_attr(feature = "substrate", derive(Encode, Decode, MaxEncodedLen, TypeInfo))]
439pub struct CoherenceConfig {
440 pub grid_size: u8,
442 pub radius_scaled: u16,
444 pub alpha_scaled: u16,
446 pub drift_threshold: u8,
448 pub mask_type: MaskType,
450}
451
452impl Default for CoherenceConfig {
453 fn default() -> Self {
454 Self {
455 grid_size: 12,
456 radius_scaled: 200, alpha_scaled: 100, drift_threshold: 2,
459 mask_type: MaskType::Hybrid,
460 }
461 }
462}
463
464impl CoherenceConfig {
465 pub const fn new(
467 grid_size: u8,
468 radius: f32,
469 alpha: f32,
470 drift_threshold: u8,
471 mask_type: MaskType,
472 ) -> Self {
473 Self {
474 grid_size,
475 radius_scaled: (radius * 100.0) as u16,
476 alpha_scaled: (alpha * 100.0) as u16,
477 drift_threshold,
478 mask_type,
479 }
480 }
481
482 pub fn radius(&self) -> f32 {
484 self.radius_scaled as f32 / 100.0
485 }
486
487 pub fn alpha(&self) -> f32 {
489 self.alpha_scaled as f32 / 100.0
490 }
491
492 pub fn to_mask(&self, seq_len: usize) -> ToroidalMask {
494 ToroidalMask {
495 seq_len,
496 radius: self.radius(),
497 alpha: self.alpha(),
498 grid_size: self.grid_size as usize,
499 mask_type: self.mask_type,
500 }
501 }
502}
503
504#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
506#[cfg_attr(feature = "substrate", derive(Encode, Decode, MaxEncodedLen, TypeInfo))]
507pub struct CoherenceResult {
508 pub transitions: u32,
510 pub violations: u32,
512 pub is_coherent: bool,
514}
515
516impl CoherenceResult {
517 pub fn from_meter(meter: &DriftMeter, max_rate: f32) -> Self {
519 let rate = meter.rate();
520 Self {
521 transitions: meter.count as u32,
522 violations: meter.drifts as u32,
523 is_coherent: rate <= max_rate,
524 }
525 }
526
527 pub fn drift_rate(&self) -> f32 {
529 if self.transitions == 0 {
530 0.0
531 } else {
532 self.violations as f32 / self.transitions as f32
533 }
534 }
535}
536
537#[derive(Debug, Clone, Copy)]
546pub struct Torus3D<const N: usize>;
547
548impl<const N: usize> Torus3D<N> {
549 pub const fn new() -> Self {
551 Self
552 }
553
554 pub const fn total_positions() -> usize {
556 N * N * N
557 }
558
559 #[inline]
561 pub const fn to_coords(index: usize) -> (usize, usize, usize) {
562 let z = index / (N * N);
563 let rem = index % (N * N);
564 let y = rem / N;
565 let x = rem % N;
566 (x, y, z)
567 }
568
569 #[inline]
571 pub const fn to_index(x: usize, y: usize, z: usize) -> usize {
572 (z % N) * N * N + (y % N) * N + (x % N)
573 }
574
575 #[inline]
577 pub fn distance(a: (usize, usize, usize), b: (usize, usize, usize)) -> usize {
578 let dx = a.0.abs_diff(b.0);
579 let dy = a.1.abs_diff(b.1);
580 let dz = a.2.abs_diff(b.2);
581
582 let dx_wrap = if dx > N / 2 { N - dx } else { dx };
583 let dy_wrap = if dy > N / 2 { N - dy } else { dy };
584 let dz_wrap = if dz > N / 2 { N - dz } else { dz };
585
586 dx_wrap + dy_wrap + dz_wrap
587 }
588
589 pub fn spectral_gap() -> f32 {
591 let pi = core::f32::consts::PI;
593 2.0 - 2.0 * cosf(2.0 * pi / N as f32)
594 }
595}
596
597impl<const N: usize> Default for Torus3D<N> {
598 fn default() -> Self {
599 Self::new()
600 }
601}
602
603#[derive(Debug, Clone)]
607pub struct MultiScaleTonnetz {
608 pub scales: [usize; 3],
610 pub weights: [f32; 3],
612}
613
614impl Default for MultiScaleTonnetz {
615 fn default() -> Self {
616 Self {
617 scales: [6, 12, 24],
618 weights: [0.5, 0.3, 0.2],
619 }
620 }
621}
622
623impl MultiScaleTonnetz {
624 pub fn new(scales: [usize; 3], weights: [f32; 3]) -> Self {
626 Self { scales, weights }
627 }
628
629 pub fn distance(&self, a: (usize, usize), b: (usize, usize)) -> f32 {
631 let mut total = 0.0;
632 for (i, &scale) in self.scales.iter().enumerate() {
633 let d = Self::distance_at_scale(a, b, scale) as f32;
634 total += self.weights[i] * d;
635 }
636 total
637 }
638
639 fn distance_at_scale(a: (usize, usize), b: (usize, usize), n: usize) -> usize {
640 let a_scaled = (a.0 % n, a.1 % n);
642 let b_scaled = (b.0 % n, b.1 % n);
643
644 let dx = a_scaled.0.abs_diff(b_scaled.0);
645 let dy = a_scaled.1.abs_diff(b_scaled.1);
646
647 let dx_wrap = if dx > n / 2 { n - dx } else { dx };
648 let dy_wrap = if dy > n / 2 { n - dy } else { dy };
649
650 dx_wrap + dy_wrap
651 }
652}
653
654#[cfg(feature = "std")]
661#[derive(Debug, Clone)]
662pub struct LearnedProjection {
663 pub input_dim: usize,
665 pub grid_size: usize,
667 pub w1: Vec<f32>,
669 pub w2: Vec<f32>,
671}
672
673#[cfg(feature = "std")]
674impl LearnedProjection {
675 pub fn new(input_dim: usize, grid_size: usize) -> Self {
677 let scale = 1.0 / (input_dim as f32).sqrt();
679 let w1 = (0..input_dim).map(|i| ((i * 7) % 100) as f32 * scale / 100.0 - scale / 2.0).collect();
680 let w2 = (0..input_dim).map(|i| ((i * 13) % 100) as f32 * scale / 100.0 - scale / 2.0).collect();
681 Self { input_dim, grid_size, w1, w2 }
682 }
683
684 fn sigmoid(x: f32) -> f32 {
686 1.0 / (1.0 + expf(-x))
687 }
688
689 pub fn project(&self, embedding: &[f32]) -> (usize, usize) {
693 assert_eq!(embedding.len(), self.input_dim);
694
695 let dot1: f32 = self.w1.iter().zip(embedding.iter()).map(|(w, e)| w * e).sum();
697 let x = Self::sigmoid(dot1);
698
699 let dot2: f32 = self.w2.iter().zip(embedding.iter()).map(|(w, e)| w * e).sum();
701 let y = Self::sigmoid(dot2);
702
703 let row = ((x * self.grid_size as f32) as usize) % self.grid_size;
705 let col = ((y * self.grid_size as f32) as usize) % self.grid_size;
706
707 (row, col)
708 }
709}
710
711#[derive(Debug, Clone)]
717pub struct AdjacencyLoss<const N: usize> {
718 pub lambda: f32,
720 positive_sum: f32,
722 positive_count: usize,
723 negative_sum: f32,
725 negative_count: usize,
726}
727
728impl<const N: usize> AdjacencyLoss<N> {
729 pub fn new(lambda: f32) -> Self {
731 Self {
732 lambda,
733 positive_sum: 0.0,
734 positive_count: 0,
735 negative_sum: 0.0,
736 negative_count: 0,
737 }
738 }
739
740 pub fn record_positive(&mut self, a: (usize, usize), b: (usize, usize)) {
742 let d = Tonnetz::<N>::distance(a, b) as f32;
743 self.positive_sum += d;
744 self.positive_count += 1;
745 }
746
747 pub fn record_negative(&mut self, a: (usize, usize), c: (usize, usize)) {
749 let d = Tonnetz::<N>::distance(a, c) as f32;
750 self.negative_sum += d;
751 self.negative_count += 1;
752 }
753
754 pub fn loss(&self) -> f32 {
758 let pos_mean = if self.positive_count > 0 {
759 self.positive_sum / self.positive_count as f32
760 } else {
761 0.0
762 };
763
764 let neg_mean = if self.negative_count > 0 {
765 self.negative_sum / self.negative_count as f32
766 } else {
767 0.0
768 };
769
770 pos_mean - self.lambda * neg_mean
771 }
772
773 pub fn reset(&mut self) {
775 self.positive_sum = 0.0;
776 self.positive_count = 0;
777 self.negative_sum = 0.0;
778 self.negative_count = 0;
779 }
780}
781
782#[cfg(feature = "std")]
786#[derive(Debug, Clone)]
787pub struct SparseMask {
788 pub size: usize,
790 pub row_ptr: Vec<usize>,
792 pub col_idx: Vec<usize>,
794 pub values: Vec<f32>,
796}
797
798#[cfg(feature = "std")]
799impl SparseMask {
800 pub fn from_dense(dense: &[Vec<f32>], threshold: f32) -> Self {
802 let size = dense.len();
803 let mut row_ptr = vec![0];
804 let mut col_idx = Vec::new();
805 let mut values = Vec::new();
806
807 for row in dense {
808 for (j, &val) in row.iter().enumerate() {
809 if val > threshold {
810 col_idx.push(j);
811 values.push(val);
812 }
813 }
814 row_ptr.push(col_idx.len());
815 }
816
817 Self { size, row_ptr, col_idx, values }
818 }
819
820 pub fn from_toroidal(mask: &ToroidalMask, threshold: f32) -> Self {
822 let dense = mask.generate();
823 Self::from_dense(&dense, threshold)
824 }
825
826 pub fn nnz(&self) -> usize {
828 self.values.len()
829 }
830
831 pub fn sparsity(&self) -> f32 {
833 let total = self.size * self.size;
834 if total == 0 {
835 0.0
836 } else {
837 1.0 - (self.nnz() as f32 / total as f32)
838 }
839 }
840
841 pub fn get(&self, i: usize, j: usize) -> f32 {
843 if i >= self.size {
844 return 0.0;
845 }
846
847 let start = self.row_ptr[i];
848 let end = self.row_ptr[i + 1];
849
850 for k in start..end {
851 if self.col_idx[k] == j {
852 return self.values[k];
853 }
854 }
855
856 0.0
857 }
858
859 pub fn memory_bytes(&self) -> usize {
861 self.row_ptr.len() * 8 + self.col_idx.len() * 8 + self.values.len() * 4
862 }
863}
864
865#[cfg(feature = "std")]
886#[derive(Debug, Clone)]
887pub struct GroundingProjector {
888 dim: usize,
890 rank: usize,
892 matrix: Vec<f32>,
894}
895
896#[cfg(feature = "std")]
897impl GroundingProjector {
898 pub fn from_evidence(evidence: &[&[f32]], dim: usize) -> Option<Self> {
903 let m = evidence.len();
904 if m == 0 || dim == 0 {
905 return None;
906 }
907
908 for v in evidence {
910 if v.len() != dim {
911 return None;
912 }
913 }
914
915 let mut ata = vec![0.0f32; m * m];
917 for i in 0..m {
918 for j in i..m {
919 let dot: f32 = evidence[i]
920 .iter()
921 .zip(evidence[j].iter())
922 .map(|(a, b)| a * b)
923 .sum();
924 ata[i * m + j] = dot;
925 ata[j * m + i] = dot;
926 }
927 }
928
929 let ata_inv = invert_matrix(&ata, m)?;
931
932 let mut b = vec![0.0f32; m * dim];
943 for i in 0..m {
944 for k in 0..dim {
945 let mut sum = 0.0f32;
946 for j in 0..m {
947 sum += ata_inv[i * m + j] * evidence[j][k];
948 }
949 b[i * dim + k] = sum;
950 }
951 }
952
953 let mut g = vec![0.0f32; dim * dim];
956 for p in 0..dim {
957 for q in 0..dim {
958 let mut sum = 0.0f32;
959 for i in 0..m {
960 sum += evidence[i][p] * b[i * dim + q];
961 }
962 g[p * dim + q] = sum;
963 }
964 }
965
966 Some(Self {
967 dim,
968 rank: m,
969 matrix: g,
970 })
971 }
972
973 pub fn project_grounded(&self, x: &[f32]) -> Vec<f32> {
977 assert_eq!(x.len(), self.dim);
978 let mut result = vec![0.0f32; self.dim];
979 for i in 0..self.dim {
980 let mut sum = 0.0f32;
981 for j in 0..self.dim {
982 sum += self.matrix[i * self.dim + j] * x[j];
983 }
984 result[i] = sum;
985 }
986 result
987 }
988
989 pub fn project_hallucinated(&self, x: &[f32]) -> Vec<f32> {
993 let grounded = self.project_grounded(x);
994 x.iter()
995 .zip(grounded.iter())
996 .map(|(xi, gi)| xi - gi)
997 .collect()
998 }
999
1000 pub fn hallucination_score(&self, x: &[f32]) -> f32 {
1005 let x_norm_sq: f32 = x.iter().map(|v| v * v).sum();
1006 if x_norm_sq < 1e-12 {
1007 return 0.0;
1008 }
1009
1010 let hallucinated = self.project_hallucinated(x);
1011 let h_norm_sq: f32 = hallucinated.iter().map(|v| v * v).sum();
1012
1013 libm::sqrtf(h_norm_sq / x_norm_sq)
1014 }
1015
1016 pub fn decompose(&self, x: &[f32]) -> GroundingDecomposition {
1018 let grounded = self.project_grounded(x);
1019 let hallucinated: Vec<f32> = x
1020 .iter()
1021 .zip(grounded.iter())
1022 .map(|(xi, gi)| xi - gi)
1023 .collect();
1024
1025 let x_norm = libm::sqrtf(x.iter().map(|v| v * v).sum());
1026 let g_norm = libm::sqrtf(grounded.iter().map(|v| v * v).sum());
1027 let h_norm = libm::sqrtf(hallucinated.iter().map(|v| v * v).sum());
1028
1029 GroundingDecomposition {
1030 grounded,
1031 hallucinated,
1032 grounding_ratio: if x_norm > 1e-12 { g_norm / x_norm } else { 0.0 },
1033 hallucination_ratio: if x_norm > 1e-12 { h_norm / x_norm } else { 0.0 },
1034 }
1035 }
1036
1037 pub fn dim(&self) -> usize {
1039 self.dim
1040 }
1041
1042 pub fn rank(&self) -> usize {
1044 self.rank
1045 }
1046
1047 pub fn verify_idempotent(&self, tolerance: f32) -> bool {
1049 for i in 0..self.dim {
1051 for j in 0..self.dim {
1052 let mut g2_ij = 0.0f32;
1053 for k in 0..self.dim {
1054 g2_ij += self.matrix[i * self.dim + k] * self.matrix[k * self.dim + j];
1055 }
1056 if fabsf(g2_ij - self.matrix[i * self.dim + j]) > tolerance {
1057 return false;
1058 }
1059 }
1060 }
1061 true
1062 }
1063
1064 pub fn verify_symmetric(&self, tolerance: f32) -> bool {
1066 for i in 0..self.dim {
1067 for j in (i + 1)..self.dim {
1068 if fabsf(
1069 self.matrix[i * self.dim + j] - self.matrix[j * self.dim + i],
1070 ) > tolerance
1071 {
1072 return false;
1073 }
1074 }
1075 }
1076 true
1077 }
1078}
1079
1080#[cfg(feature = "std")]
1082#[derive(Debug, Clone)]
1083pub struct GroundingDecomposition {
1084 pub grounded: Vec<f32>,
1086 pub hallucinated: Vec<f32>,
1088 pub grounding_ratio: f32,
1090 pub hallucination_ratio: f32,
1092}
1093
1094#[cfg(feature = "std")]
1097fn invert_matrix(mat: &[f32], n: usize) -> Option<Vec<f32>> {
1098 let mut aug = vec![0.0f32; n * 2 * n];
1100 for i in 0..n {
1101 for j in 0..n {
1102 aug[i * 2 * n + j] = mat[i * n + j];
1103 }
1104 aug[i * 2 * n + n + i] = 1.0;
1105 }
1106
1107 for col in 0..n {
1108 let mut max_val = fabsf(aug[col * 2 * n + col]);
1110 let mut max_row = col;
1111 for row in (col + 1)..n {
1112 let val = fabsf(aug[row * 2 * n + col]);
1113 if val > max_val {
1114 max_val = val;
1115 max_row = row;
1116 }
1117 }
1118
1119 if max_val < 1e-10 {
1120 return None; }
1122
1123 if max_row != col {
1125 for k in 0..(2 * n) {
1126 let tmp = aug[col * 2 * n + k];
1127 aug[col * 2 * n + k] = aug[max_row * 2 * n + k];
1128 aug[max_row * 2 * n + k] = tmp;
1129 }
1130 }
1131
1132 let pivot = aug[col * 2 * n + col];
1134 for k in 0..(2 * n) {
1135 aug[col * 2 * n + k] /= pivot;
1136 }
1137
1138 for row in 0..n {
1140 if row == col {
1141 continue;
1142 }
1143 let factor = aug[row * 2 * n + col];
1144 for k in 0..(2 * n) {
1145 aug[row * 2 * n + k] -= factor * aug[col * 2 * n + k];
1146 }
1147 }
1148 }
1149
1150 let mut inv = vec![0.0f32; n * n];
1152 for i in 0..n {
1153 for j in 0..n {
1154 inv[i * n + j] = aug[i * 2 * n + n + j];
1155 }
1156 }
1157
1158 Some(inv)
1159}
1160
1161#[derive(Debug, Clone)]
1215pub struct KarmonicFilter {
1216 grid_size: usize,
1218 n_modes: usize,
1220 eigenvalues: Vec<f32>,
1222 weights: Vec<f32>,
1224}
1225
1226impl KarmonicFilter {
1227 pub fn new(grid_size: usize, n_modes: usize) -> Self {
1233 assert!(grid_size >= 2, "grid_size must be >= 2");
1234 assert!(n_modes >= 1, "n_modes must be >= 1");
1235
1236 let n_modes = core::cmp::min(n_modes, grid_size / 2);
1239 let n_modes = core::cmp::max(n_modes, 1); let pi = core::f32::consts::PI;
1242
1243 let eigenvalues: Vec<f32> = (1..=n_modes)
1244 .map(|n| 2.0 - 2.0 * cosf(2.0 * pi * n as f32 / grid_size as f32))
1245 .collect();
1246
1247 let lam_1 = eigenvalues[0];
1248 let lam_max = eigenvalues.iter().copied().fold(f32::NEG_INFINITY, f32::max);
1249
1250 let weights = if (lam_max - lam_1).abs() < 1e-10 {
1251 vec![0.0; n_modes]
1252 } else {
1253 eigenvalues
1254 .iter()
1255 .map(|&lam| (lam - lam_1) / (lam_max - lam_1))
1256 .collect()
1257 };
1258
1259 Self {
1260 grid_size,
1261 n_modes,
1262 eigenvalues,
1263 weights,
1264 }
1265 }
1266
1267 pub fn n_modes(&self) -> usize {
1269 self.n_modes
1270 }
1271
1272 pub fn grid_size(&self) -> usize {
1274 self.grid_size
1275 }
1276
1277 pub fn eigenvalue(&self, idx: usize) -> f32 {
1279 self.eigenvalues[idx]
1280 }
1281
1282 pub fn weight(&self, idx: usize) -> f32 {
1286 self.weights[idx]
1287 }
1288
1289 pub fn weights(&self) -> &[f32] {
1291 &self.weights
1292 }
1293
1294 pub fn eigenvalues(&self) -> &[f32] {
1296 &self.eigenvalues
1297 }
1298
1299 #[cfg(feature = "std")]
1312 pub fn uniformity_loss(
1313 &self,
1314 fourier_embeddings: &[Vec<f32>],
1315 torus_dim: usize,
1316 temperature: f32,
1317 ) -> f32 {
1318 let batch_size = fourier_embeddings.len();
1319 if batch_size < 2 {
1320 return 0.0;
1321 }
1322
1323 let expected_dim = 2 * torus_dim * self.n_modes;
1324 let slice_size = 2 * torus_dim; let mut total = 0.0f32;
1327
1328 for n in 0..self.n_modes {
1329 let start = slice_size * n;
1330 let end = start + slice_size;
1331
1332 let mut log_sum_total = 0.0f32;
1334
1335 for i in 0..batch_size {
1336 assert!(
1337 fourier_embeddings[i].len() >= expected_dim,
1338 "embedding dim {} < expected {}", fourier_embeddings[i].len(), expected_dim
1339 );
1340
1341 let mut neg_dists: Vec<f32> = Vec::with_capacity(batch_size - 1);
1342
1343 for j in 0..batch_size {
1344 if i == j {
1345 continue;
1346 }
1347
1348 let sq_dist: f32 = (start..end)
1350 .map(|d| {
1351 let diff = fourier_embeddings[i][d] - fourier_embeddings[j][d];
1352 diff * diff
1353 })
1354 .sum();
1355
1356 neg_dists.push(-temperature * sq_dist);
1357 }
1358
1359 let max_val = neg_dists.iter().copied().fold(f32::NEG_INFINITY, f32::max);
1361 let lse: f32 = max_val
1362 + neg_dists
1363 .iter()
1364 .map(|&x| expf(x - max_val))
1365 .sum::<f32>()
1366 .ln();
1367
1368 let log_b_minus_1 = ((batch_size - 1) as f32).ln();
1369 log_sum_total += lse - log_b_minus_1;
1370 }
1371
1372 let unif_n = log_sum_total / batch_size as f32;
1373 total += self.weights[n] * unif_n;
1374 }
1375
1376 total
1377 }
1378
1379 #[cfg(feature = "std")]
1390 pub fn spread_loss(&self, angles: &[Vec<f32>]) -> f32 {
1391 if angles.is_empty() || angles[0].len() < 2 {
1392 return 0.0;
1393 }
1394
1395 let batch_size = angles.len();
1396 let k = angles[0].len(); let mut total_corr = 0.0f32;
1399 let mut n_pairs = 0;
1400
1401 for i in 0..k {
1402 for j in (i + 1)..k {
1403 let sin_mean_i: f32 = angles.iter().map(|a| libm::sinf(a[i])).sum::<f32>() / batch_size as f32;
1405 let cos_mean_i: f32 = angles.iter().map(|a| cosf(a[i])).sum::<f32>() / batch_size as f32;
1406 let mu_i = libm::atan2f(sin_mean_i, cos_mean_i);
1407
1408 let sin_mean_j: f32 = angles.iter().map(|a| libm::sinf(a[j])).sum::<f32>() / batch_size as f32;
1409 let cos_mean_j: f32 = angles.iter().map(|a| cosf(a[j])).sum::<f32>() / batch_size as f32;
1410 let mu_j = libm::atan2f(sin_mean_j, cos_mean_j);
1411
1412 let mut num = 0.0f32;
1414 let mut den_i = 0.0f32;
1415 let mut den_j = 0.0f32;
1416
1417 for a in angles.iter() {
1418 let s_i = libm::sinf(a[i] - mu_i);
1419 let s_j = libm::sinf(a[j] - mu_j);
1420 num += s_i * s_j;
1421 den_i += s_i * s_i;
1422 den_j += s_j * s_j;
1423 }
1424
1425 num /= batch_size as f32;
1426 den_i /= batch_size as f32;
1427 den_j /= batch_size as f32;
1428
1429 let den = libm::sqrtf(den_i * den_j + 1e-8);
1430 let corr = num / den;
1431 total_corr += corr * corr;
1432 n_pairs += 1;
1433 }
1434 }
1435
1436 if n_pairs > 0 {
1437 total_corr / n_pairs as f32
1438 } else {
1439 0.0
1440 }
1441 }
1442}
1443
1444pub fn fourier_expand(angles: &[f32], n_modes: usize) -> Vec<f32> {
1456 let k = angles.len();
1457 let mut result = Vec::with_capacity(2 * k * n_modes);
1458
1459 for n in 1..=n_modes {
1460 for &theta in angles {
1461 let n_theta = n as f32 * theta;
1462 result.push(cosf(n_theta));
1463 result.push(libm::sinf(n_theta));
1464 }
1465 }
1466
1467 result
1468}
1469
1470pub fn angles_to_grid(angles: &[f32], grid_size: usize) -> Vec<usize> {
1479 let pi = core::f32::consts::PI;
1480 angles
1481 .iter()
1482 .map(|&theta| {
1483 let normalized = theta / (2.0 * pi); let coord = (normalized * grid_size as f32) as usize;
1485 coord % grid_size
1486 })
1487 .collect()
1488}
1489
1490#[cfg(test)]
1495mod tests {
1496 use super::*;
1497
1498 #[test]
1503 fn test_tonnetz_distance_self() {
1504 let d = Tonnetz::<12>::distance((5, 5), (5, 5));
1505 assert_eq!(d, 0);
1506 }
1507
1508 #[test]
1509 fn test_tonnetz_distance_adjacent() {
1510 let d = Tonnetz::<12>::distance((0, 0), (0, 1));
1511 assert_eq!(d, 1);
1512 }
1513
1514 #[test]
1515 fn test_tonnetz_distance_wraparound() {
1516 let d = Tonnetz::<12>::distance((0, 0), (0, 11));
1518 assert_eq!(d, 1);
1519 }
1520
1521 #[test]
1522 fn test_tonnetz_distance_diagonal_wrap() {
1523 let d = Tonnetz::<12>::distance((0, 0), (11, 11));
1525 assert_eq!(d, 2); }
1527
1528 #[test]
1533 fn test_distance_symmetry() {
1534 for i in 0..12 {
1536 for j in 0..12 {
1537 for k in 0..12 {
1538 for l in 0..12 {
1539 let d1 = Tonnetz::<12>::distance((i, j), (k, l));
1540 let d2 = Tonnetz::<12>::distance((k, l), (i, j));
1541 assert_eq!(d1, d2, "Symmetry violated at ({},{}) <-> ({},{})", i, j, k, l);
1542 }
1543 }
1544 }
1545 }
1546 }
1547
1548 #[test]
1549 fn test_distance_identity() {
1550 for i in 0..12 {
1552 for j in 0..12 {
1553 let d = Tonnetz::<12>::distance((i, j), (i, j));
1554 assert_eq!(d, 0, "Identity violated at ({},{})", i, j);
1555 }
1556 }
1557 }
1558
1559 #[test]
1560 fn test_triangle_inequality() {
1561 let points = [(0, 0), (3, 5), (7, 2), (11, 11), (6, 6), (1, 10)];
1564 for &a in &points {
1565 for &b in &points {
1566 for &c in &points {
1567 let d_ac = Tonnetz::<12>::distance(a, c);
1568 let d_ab = Tonnetz::<12>::distance(a, b);
1569 let d_bc = Tonnetz::<12>::distance(b, c);
1570 assert!(
1571 d_ac <= d_ab + d_bc,
1572 "Triangle inequality violated: d({:?},{:?})={} > d({:?},{:?})={} + d({:?},{:?})={}",
1573 a, c, d_ac, a, b, d_ab, b, c, d_bc
1574 );
1575 }
1576 }
1577 }
1578 }
1579
1580 #[test]
1581 fn test_distance_non_negative() {
1582 for i in 0..12 {
1584 for j in 0..12 {
1585 for k in 0..12 {
1586 for l in 0..12 {
1587 let d = Tonnetz::<12>::distance((i, j), (k, l));
1588 assert!(d <= 12, "Distance too large at ({},{}) <-> ({},{}): {}", i, j, k, l, d);
1590 }
1591 }
1592 }
1593 }
1594 }
1595
1596 #[test]
1597 fn test_max_distance_bounded() {
1598 let mut max_dist = 0;
1600 for i in 0..12 {
1601 for j in 0..12 {
1602 let d = Tonnetz::<12>::distance((0, 0), (i, j));
1603 if d > max_dist {
1604 max_dist = d;
1605 }
1606 }
1607 }
1608 assert_eq!(max_dist, 12, "Max distance should be 12 (6+6)");
1609 }
1610
1611 #[test]
1616 fn test_spectral_gap_positive() {
1617 let gap = Tonnetz::<12>::spectral_gap();
1618 assert!(gap > 0.0);
1619 assert!(gap < 1.0); }
1621
1622 #[test]
1623 fn test_spectral_gap_scales_with_n() {
1624 let gap_6 = Tonnetz::<6>::spectral_gap();
1626 let gap_12 = Tonnetz::<12>::spectral_gap();
1627 let gap_24 = Tonnetz::<24>::spectral_gap();
1628 assert!(gap_6 > gap_12, "Gap should decrease with N");
1629 assert!(gap_12 > gap_24, "Gap should decrease with N");
1630 }
1631
1632 #[test]
1637 fn test_toroidal_mask_self() {
1638 let mask = ToroidalMask::new(64, 2.0, 1.0);
1639 assert_eq!(mask.value(0, 0), 1.0);
1640 }
1641
1642 #[test]
1643 fn test_toroidal_mask_decay() {
1644 let mask = ToroidalMask::new(64, 1.0, 1.0);
1645 let v_near = mask.value(0, 1);
1646 let v_far = mask.value(0, 5);
1647 assert!(v_near >= v_far);
1648 }
1649
1650 #[test]
1651 fn test_hard_cutoff_mask() {
1652 let mask = ToroidalMask::hard_cutoff(64, 2.0, 12);
1653 assert_eq!(mask.value(0, 0), 1.0);
1655 assert_eq!(mask.value(0, 1), 1.0);
1656 assert_eq!(mask.value(0, 36), 0.0); }
1659
1660 #[test]
1661 fn test_soft_exponential_mask() {
1662 let mask = ToroidalMask::soft_exponential(64, 1.0, 12);
1663 assert!((mask.value(0, 0) - 1.0).abs() < 1e-6);
1665 let v1 = mask.value(0, 1);
1667 assert!((v1 - 0.368).abs() < 0.01);
1668 }
1669
1670 #[test]
1671 fn test_hybrid_mask() {
1672 let mask = ToroidalMask::new(64, 2.0, 1.0);
1673 assert_eq!(mask.value(0, 0), 1.0);
1675 assert_eq!(mask.value(0, 1), 1.0);
1676 }
1679
1680 #[test]
1685 fn test_sinkhorn_knopp_doubly_stochastic() {
1686 let mask = ToroidalMask::new(16, 2.0, 0.5);
1687 let ds = mask.generate_doubly_stochastic(50);
1688 assert!(
1689 is_doubly_stochastic(&ds, 0.01),
1690 "Sinkhorn-Knopp should produce doubly-stochastic matrix"
1691 );
1692 }
1693
1694 #[test]
1695 fn test_sinkhorn_preserves_structure() {
1696 let mask = ToroidalMask::new(16, 2.0, 0.5);
1698 let ds = mask.generate_doubly_stochastic(50);
1699 let diag_avg: f32 = (0..16).map(|i| ds[i][i]).sum::<f32>() / 16.0;
1701 let total_avg: f32 = ds.iter().flat_map(|r| r.iter()).sum::<f32>() / 256.0;
1702 assert!(
1703 diag_avg > total_avg,
1704 "Diagonal should be above average after Sinkhorn"
1705 );
1706 }
1707
1708 #[test]
1713 fn test_drift_meter() {
1714 let mut meter = DriftMeter::new(2);
1715 meter.record::<12>(0, 1); meter.record::<12>(0, 6); meter.record::<12>(0, 0); assert_eq!(meter.count, 3);
1720 assert_eq!(meter.drifts, 1);
1721 assert!((meter.rate() - 0.333).abs() < 0.01);
1722 }
1723
1724 #[test]
1725 fn test_drift_meter_reset() {
1726 let mut meter = DriftMeter::new(2);
1727 meter.record::<12>(0, 6);
1728 meter.reset();
1729 assert_eq!(meter.count, 0);
1730 assert_eq!(meter.drifts, 0);
1731 assert_eq!(meter.rate(), 0.0);
1732 }
1733
1734 #[test]
1739 fn test_coord_conversion_roundtrip() {
1740 for idx in 0..144 {
1741 let coords = Tonnetz::<12>::to_coords(idx);
1742 let back = Tonnetz::<12>::to_index(coords.0, coords.1);
1743 assert_eq!(idx, back, "Roundtrip failed for index {}", idx);
1744 }
1745 }
1746
1747 #[test]
1752 fn test_toroidal_position() {
1753 let pos = ToroidalPosition::new(5, 7);
1754 assert_eq!(pos.as_tuple(), (5, 7));
1755 }
1756
1757 #[test]
1758 fn test_toroidal_position_distance() {
1759 let a = ToroidalPosition::new(0, 0);
1760 let b = ToroidalPosition::new(5, 7);
1761 let dist = a.distance_to::<12>(&b);
1762 assert_eq!(dist, Tonnetz::<12>::distance((0, 0), (5, 7)));
1763 }
1764
1765 #[test]
1766 fn test_coherence_config_default() {
1767 let config = CoherenceConfig::default();
1768 assert_eq!(config.grid_size, 12);
1769 assert!((config.radius() - 2.0).abs() < 0.01);
1770 assert!((config.alpha() - 1.0).abs() < 0.01);
1771 assert_eq!(config.drift_threshold, 2);
1772 assert_eq!(config.mask_type, MaskType::Hybrid);
1773 }
1774
1775 #[test]
1776 fn test_coherence_config_to_mask() {
1777 let config = CoherenceConfig::default();
1778 let mask = config.to_mask(64);
1779 assert_eq!(mask.seq_len, 64);
1780 assert_eq!(mask.grid_size, 12);
1781 assert_eq!(mask.mask_type, MaskType::Hybrid);
1782 }
1783
1784 #[test]
1785 fn test_coherence_result_from_meter() {
1786 let mut meter = DriftMeter::new(2);
1787 meter.record::<12>(0, 1);
1788 meter.record::<12>(0, 6);
1789 meter.record::<12>(0, 0);
1790
1791 let result = CoherenceResult::from_meter(&meter, 0.5);
1792 assert_eq!(result.transitions, 3);
1793 assert_eq!(result.violations, 1);
1794 assert!(result.is_coherent); let strict_result = CoherenceResult::from_meter(&meter, 0.1);
1797 assert!(!strict_result.is_coherent); }
1799
1800 #[test]
1801 fn test_coherence_result_drift_rate() {
1802 let result = CoherenceResult {
1803 transitions: 100,
1804 violations: 25,
1805 is_coherent: true,
1806 };
1807 assert!((result.drift_rate() - 0.25).abs() < 0.001);
1808 }
1809
1810 #[test]
1815 fn test_torus3d_distance_self() {
1816 let d = Torus3D::<8>::distance((0, 0, 0), (0, 0, 0));
1817 assert_eq!(d, 0);
1818 }
1819
1820 #[test]
1821 fn test_torus3d_distance_adjacent() {
1822 let d = Torus3D::<8>::distance((0, 0, 0), (1, 0, 0));
1823 assert_eq!(d, 1);
1824 }
1825
1826 #[test]
1827 fn test_torus3d_distance_wraparound() {
1828 let d = Torus3D::<8>::distance((0, 0, 0), (7, 0, 0));
1830 assert_eq!(d, 1);
1831 }
1832
1833 #[test]
1834 fn test_torus3d_max_distance() {
1835 let d = Torus3D::<8>::distance((0, 0, 0), (4, 4, 4));
1837 assert_eq!(d, 12);
1838 }
1839
1840 #[test]
1841 fn test_torus3d_coord_roundtrip() {
1842 for idx in 0..512 {
1843 let (x, y, z) = Torus3D::<8>::to_coords(idx);
1844 let back = Torus3D::<8>::to_index(x, y, z);
1845 assert_eq!(idx, back, "Roundtrip failed for index {}", idx);
1846 }
1847 }
1848
1849 #[test]
1850 fn test_multi_scale_tonnetz_default() {
1851 let ms = MultiScaleTonnetz::default();
1852 assert_eq!(ms.scales, [6, 12, 24]);
1853 }
1854
1855 #[test]
1856 fn test_multi_scale_distance_same_point() {
1857 let ms = MultiScaleTonnetz::default();
1858 let d = ms.distance((0, 0), (0, 0));
1859 assert_eq!(d, 0.0);
1860 }
1861
1862 #[test]
1863 fn test_multi_scale_distance_weighted() {
1864 let ms = MultiScaleTonnetz::new([6, 12, 24], [1.0, 0.0, 0.0]);
1865 let d = ms.distance((0, 0), (3, 3));
1867 assert_eq!(d, 6.0);
1869 }
1870
1871 #[test]
1872 fn test_learned_projection() {
1873 let proj = LearnedProjection::new(4, 12);
1874 let embedding = vec![1.0, 0.5, -0.5, 0.2];
1875 let (row, col) = proj.project(&embedding);
1876 assert!(row < 12);
1877 assert!(col < 12);
1878 }
1879
1880 #[test]
1881 fn test_adjacency_loss_positive_pairs() {
1882 let mut loss = AdjacencyLoss::<12>::new(0.5);
1883 loss.record_positive((0, 0), (1, 1)); loss.record_positive((0, 0), (0, 1)); let l = loss.loss();
1887 assert!((l - 1.5).abs() < 0.001);
1888 }
1889
1890 #[test]
1891 fn test_adjacency_loss_with_negatives() {
1892 let mut loss = AdjacencyLoss::<12>::new(0.5);
1893 loss.record_positive((0, 0), (1, 0)); loss.record_negative((0, 0), (6, 6)); let l = loss.loss();
1897 assert!((l - (-5.0)).abs() < 0.001);
1898 }
1899
1900 #[test]
1901 fn test_sparse_mask_from_toroidal() {
1902 let mask = ToroidalMask::hard_cutoff(16, 1.0, 4);
1903 let sparse = SparseMask::from_toroidal(&mask, 0.5);
1904 assert!(sparse.nnz() < 16 * 16);
1906 assert!(sparse.sparsity() > 0.0);
1907 }
1908
1909 #[test]
1910 fn test_sparse_mask_get() {
1911 let mask = ToroidalMask::hard_cutoff(16, 1.0, 4);
1912 let dense = mask.generate();
1913 let sparse = SparseMask::from_toroidal(&mask, 0.5);
1914
1915 for i in 0..16 {
1917 for j in 0..16 {
1918 let dense_val = dense[i][j];
1919 let sparse_val = sparse.get(i, j);
1920 if dense_val > 0.5 {
1921 assert!((dense_val - sparse_val).abs() < 0.001);
1922 } else {
1923 assert_eq!(sparse_val, 0.0);
1924 }
1925 }
1926 }
1927 }
1928
1929 #[test]
1930 fn test_sparse_mask_memory() {
1931 let mask = ToroidalMask::soft_exponential(64, 2.0, 12);
1932 let sparse = SparseMask::from_toroidal(&mask, 0.1);
1933 let dense_bytes = 64 * 64 * 4; let sparse_bytes = sparse.memory_bytes();
1935 if sparse.sparsity() > 0.5 {
1937 assert!(sparse_bytes < dense_bytes);
1938 }
1939 }
1940
1941 #[test]
1946 fn test_grounding_projector_single_vector() {
1947 let e1: Vec<f32> = vec![1.0, 0.0, 0.0];
1949 let evidence: Vec<&[f32]> = vec![e1.as_slice()];
1950 let proj = GroundingProjector::from_evidence(&evidence, 3).unwrap();
1951
1952 let x = vec![3.0, 4.0, 5.0];
1954 let g = proj.project_grounded(&x);
1955 assert!((g[0] - 3.0).abs() < 1e-5);
1956 assert!((g[1]).abs() < 1e-5);
1957 assert!((g[2]).abs() < 1e-5);
1958 }
1959
1960 #[test]
1961 fn test_grounding_projector_two_vectors() {
1962 let e1: Vec<f32> = vec![1.0, 0.0, 0.0];
1964 let e2: Vec<f32> = vec![0.0, 1.0, 0.0];
1965 let evidence: Vec<&[f32]> = vec![e1.as_slice(), e2.as_slice()];
1966 let proj = GroundingProjector::from_evidence(&evidence, 3).unwrap();
1967
1968 let x = vec![3.0, 4.0, 5.0];
1970 let g = proj.project_grounded(&x);
1971 let h = proj.project_hallucinated(&x);
1972 assert!((g[0] - 3.0).abs() < 1e-5);
1973 assert!((g[1] - 4.0).abs() < 1e-5);
1974 assert!((g[2]).abs() < 1e-5);
1975 assert!((h[0]).abs() < 1e-5);
1976 assert!((h[1]).abs() < 1e-5);
1977 assert!((h[2] - 5.0).abs() < 1e-5);
1978 }
1979
1980 #[test]
1981 fn test_grounding_projector_idempotent() {
1982 let e1: Vec<f32> = vec![1.0, 1.0, 0.0, 0.0];
1984 let e2: Vec<f32> = vec![0.0, 0.0, 1.0, 1.0];
1985 let evidence: Vec<&[f32]> = vec![e1.as_slice(), e2.as_slice()];
1986 let proj = GroundingProjector::from_evidence(&evidence, 4).unwrap();
1987
1988 assert!(proj.verify_idempotent(1e-5));
1989 }
1990
1991 #[test]
1992 fn test_grounding_projector_symmetric() {
1993 let e1: Vec<f32> = vec![1.0, 2.0, 3.0];
1995 let e2: Vec<f32> = vec![4.0, 5.0, 6.0];
1996 let evidence: Vec<&[f32]> = vec![e1.as_slice(), e2.as_slice()];
1997 let proj = GroundingProjector::from_evidence(&evidence, 3).unwrap();
1998
1999 assert!(proj.verify_symmetric(1e-5));
2000 }
2001
2002 #[test]
2003 fn test_grounding_fully_grounded() {
2004 let e1: Vec<f32> = vec![1.0, 0.0, 0.0];
2006 let e2: Vec<f32> = vec![0.0, 1.0, 0.0];
2007 let evidence: Vec<&[f32]> = vec![e1.as_slice(), e2.as_slice()];
2008 let proj = GroundingProjector::from_evidence(&evidence, 3).unwrap();
2009
2010 let x = vec![3.0, 4.0, 0.0]; let score = proj.hallucination_score(&x);
2012 assert!(score < 1e-5, "Expected ~0, got {}", score);
2013 }
2014
2015 #[test]
2016 fn test_grounding_fully_hallucinated() {
2017 let e1: Vec<f32> = vec![1.0, 0.0, 0.0];
2019 let e2: Vec<f32> = vec![0.0, 1.0, 0.0];
2020 let evidence: Vec<&[f32]> = vec![e1.as_slice(), e2.as_slice()];
2021 let proj = GroundingProjector::from_evidence(&evidence, 3).unwrap();
2022
2023 let x = vec![0.0, 0.0, 5.0]; let score = proj.hallucination_score(&x);
2025 assert!((score - 1.0).abs() < 1e-5, "Expected ~1, got {}", score);
2026 }
2027
2028 #[test]
2029 fn test_grounding_partial() {
2030 let e1: Vec<f32> = vec![1.0, 0.0, 0.0];
2032 let evidence: Vec<&[f32]> = vec![e1.as_slice()];
2033 let proj = GroundingProjector::from_evidence(&evidence, 3).unwrap();
2034
2035 let x = vec![1.0, 1.0, 0.0]; let score = proj.hallucination_score(&x);
2037 assert!((score - 0.7071).abs() < 0.01, "Expected ~0.707, got {}", score);
2039 }
2040
2041 #[test]
2042 fn test_grounding_decompose() {
2043 let e1: Vec<f32> = vec![1.0, 0.0, 0.0];
2044 let evidence: Vec<&[f32]> = vec![e1.as_slice()];
2045 let proj = GroundingProjector::from_evidence(&evidence, 3).unwrap();
2046
2047 let x = vec![3.0, 4.0, 0.0];
2048 let decomp = proj.decompose(&x);
2049
2050 assert!((decomp.grounded[0] - 3.0).abs() < 1e-5);
2051 assert!((decomp.hallucinated[1] - 4.0).abs() < 1e-5);
2052 let sum_sq = decomp.grounding_ratio.powi(2) + decomp.hallucination_ratio.powi(2);
2054 assert!((sum_sq - 1.0).abs() < 1e-4, "Pythagorean check failed: {}", sum_sq);
2055 }
2056
2057 #[test]
2058 fn test_grounding_empty_evidence() {
2059 let evidence: Vec<&[f32]> = vec![];
2060 let result = GroundingProjector::from_evidence(&evidence, 3);
2061 assert!(result.is_none());
2062 }
2063
2064 #[test]
2065 fn test_grounding_non_orthogonal_evidence() {
2066 let e1: Vec<f32> = vec![1.0, 1.0, 0.0];
2068 let e2: Vec<f32> = vec![1.0, 0.0, 0.0];
2069 let evidence: Vec<&[f32]> = vec![e1.as_slice(), e2.as_slice()];
2070 let proj = GroundingProjector::from_evidence(&evidence, 3).unwrap();
2071
2072 assert!(proj.verify_idempotent(1e-4));
2074 assert!(proj.verify_symmetric(1e-4));
2075
2076 assert_eq!(proj.rank(), 2);
2078
2079 let z = vec![0.0, 0.0, 1.0];
2081 let score = proj.hallucination_score(&z);
2082 assert!((score - 1.0).abs() < 1e-4);
2083 }
2084
2085 #[test]
2086 fn test_invert_matrix_2x2() {
2087 let m = vec![2.0, 1.0, 1.0, 1.0];
2089 let inv = invert_matrix(&m, 2).unwrap();
2090 assert!((inv[0] - 1.0).abs() < 1e-5);
2091 assert!((inv[1] - (-1.0)).abs() < 1e-5);
2092 assert!((inv[2] - (-1.0)).abs() < 1e-5);
2093 assert!((inv[3] - 2.0).abs() < 1e-5);
2094 }
2095
2096 #[test]
2097 fn test_invert_singular_matrix() {
2098 let m = vec![1.0, 2.0, 2.0, 4.0];
2100 assert!(invert_matrix(&m, 2).is_none());
2101 }
2102
2103 #[test]
2108 fn test_karmonic_filter_creation() {
2109 let filter = KarmonicFilter::new(12, 6);
2110 assert_eq!(filter.n_modes(), 6);
2111 assert_eq!(filter.grid_size(), 12);
2112 }
2113
2114 #[test]
2115 fn test_karmonic_mode1_preserved() {
2116 let filter = KarmonicFilter::new(12, 6);
2118 assert!(filter.weight(0) < 0.001, "Mode 1 weight should be ~0, got {}", filter.weight(0));
2119 }
2120
2121 #[test]
2122 fn test_karmonic_weights_monotonic() {
2123 let filter = KarmonicFilter::new(12, 6);
2125 for i in 1..filter.n_modes() {
2126 assert!(
2127 filter.weight(i) >= filter.weight(i - 1),
2128 "Weights not monotonic: w[{}]={} < w[{}]={}",
2129 i, filter.weight(i), i - 1, filter.weight(i - 1)
2130 );
2131 }
2132 }
2133
2134 #[test]
2135 fn test_karmonic_highest_mode_is_one() {
2136 let filter = KarmonicFilter::new(12, 6);
2138 let last = filter.n_modes() - 1;
2139 assert!(
2140 (filter.weight(last) - 1.0).abs() < 0.001,
2141 "Last mode weight should be ~1.0, got {}",
2142 filter.weight(last)
2143 );
2144 }
2145
2146 #[test]
2147 fn test_karmonic_eigenvalues_positive() {
2148 let filter = KarmonicFilter::new(12, 6);
2149 for (i, &lam) in filter.eigenvalues().iter().enumerate() {
2150 assert!(lam > 0.0, "Eigenvalue {} should be positive, got {}", i, lam);
2151 }
2152 }
2153
2154 #[test]
2155 fn test_karmonic_eigenvalue_formula() {
2156 let filter = KarmonicFilter::new(12, 6);
2158 let expected = 2.0 - 3.0_f32.sqrt();
2159 assert!(
2160 (filter.eigenvalue(0) - expected).abs() < 0.01,
2161 "λ₁ should be ~{}, got {}", expected, filter.eigenvalue(0)
2162 );
2163 }
2164
2165 #[test]
2166 fn test_karmonic_spectral_gap_matches_tonnetz() {
2167 let filter = KarmonicFilter::new(12, 6);
2169 let tonnetz_gap = Tonnetz::<12>::spectral_gap();
2170 assert!(
2171 (filter.eigenvalue(0) - tonnetz_gap).abs() < 0.01,
2172 "Karmonic λ₁={} should match Tonnetz gap={}",
2173 filter.eigenvalue(0), tonnetz_gap
2174 );
2175 }
2176
2177 #[test]
2178 fn test_karmonic_uniformity_loss_batch_too_small() {
2179 let filter = KarmonicFilter::new(12, 6);
2180 let embeddings = vec![vec![0.0; 24]]; let loss = filter.uniformity_loss(&embeddings, 2, 2.0);
2183 assert_eq!(loss, 0.0);
2184 }
2185
2186 #[test]
2187 fn test_karmonic_uniformity_loss_finite() {
2188 let filter = KarmonicFilter::new(12, 6);
2189 let embeddings = vec![
2190 vec![1.0, 0.0, 0.5, 0.5, -0.3, 0.2, 0.1, -0.1, 0.4, 0.3, -0.2, 0.1,
2191 0.3, 0.1, 0.2, -0.4, 0.1, 0.5, -0.1, 0.2, 0.3, -0.3, 0.4, 0.1],
2192 vec![0.0, 1.0, 0.4, 0.6, -0.1, 0.3, 0.2, -0.2, 0.3, 0.4, -0.1, 0.2,
2193 0.2, 0.3, 0.1, -0.3, 0.2, 0.4, -0.2, 0.3, 0.2, -0.2, 0.3, 0.2],
2194 vec![0.5, 0.5, 0.3, 0.7, -0.2, 0.1, 0.3, -0.3, 0.2, 0.5, -0.3, 0.3,
2195 0.1, 0.2, 0.3, -0.2, 0.3, 0.3, -0.3, 0.1, 0.1, -0.1, 0.5, 0.3],
2196 ];
2197 let loss = filter.uniformity_loss(&embeddings, 2, 2.0);
2198 assert!(loss.is_finite(), "Loss should be finite, got {}", loss);
2199 }
2200
2201 #[test]
2202 fn test_karmonic_identical_embeddings_low_loss() {
2203 let filter = KarmonicFilter::new(12, 3);
2205 let emb = vec![0.5; 12]; let embeddings = vec![emb.clone(), emb.clone(), emb.clone()];
2207 let loss = filter.uniformity_loss(&embeddings, 2, 2.0);
2208 assert!(loss.is_finite());
2209 }
2210
2211 #[test]
2212 fn test_fourier_expand_dims() {
2213 let angles = vec![1.0, 2.0]; let embed = fourier_expand(&angles, 6);
2215 assert_eq!(embed.len(), 2 * 2 * 6); }
2217
2218 #[test]
2219 fn test_fourier_expand_mode1() {
2220 let angles = vec![0.0]; let embed = fourier_expand(&angles, 3);
2222 assert!((embed[0] - 1.0).abs() < 1e-6);
2224 assert!(embed[1].abs() < 1e-6);
2225 }
2226
2227 #[test]
2228 fn test_angles_to_grid() {
2229 let pi = core::f32::consts::PI;
2230 let coords = angles_to_grid(&[0.0, pi], 12);
2233 assert_eq!(coords[0], 0);
2234 assert_eq!(coords[1], 6);
2235 }
2236
2237 #[test]
2238 fn test_spread_loss_uncorrelated() {
2239 let filter = KarmonicFilter::new(12, 6);
2240 let angles = vec![
2242 vec![0.0, 3.14],
2243 vec![1.57, 0.5],
2244 vec![3.14, 1.57],
2245 vec![4.71, 4.0],
2246 ];
2247 let spread = filter.spread_loss(&angles);
2248 assert!(spread.is_finite());
2249 assert!(spread >= 0.0);
2250 }
2251
2252 #[test]
2253 fn test_spread_loss_single_dim() {
2254 let filter = KarmonicFilter::new(12, 6);
2255 let angles = vec![vec![1.0], vec![2.0]];
2257 assert_eq!(filter.spread_loss(&angles), 0.0);
2258 }
2259
2260 #[test]
2261 fn test_karmonic_different_grid_sizes() {
2262 for &n in &[4, 6, 8, 12, 24] {
2265 let filter = KarmonicFilter::new(n, 4);
2266 let actual_modes = filter.n_modes();
2267 assert!(actual_modes >= 1);
2268 assert!(actual_modes <= n / 2);
2269 assert!(filter.weight(0) < 0.001);
2271 assert!(
2273 (filter.weight(actual_modes - 1) - 1.0).abs() < 0.001,
2274 "grid_size={}, n_modes={}, highest weight={}",
2275 n, actual_modes, filter.weight(actual_modes - 1)
2276 );
2277 }
2278 }
2279}