Skip to main content

pot_o_core/math/
mod.rs

1//! Multi-tier arithmetic for tensor network calculations
2//!
3//! Supports three implementation tiers:
4//! 1. Research: Arbitrary precision (optional, requires features)
5//! 2. Portable: f64-based (default, widely supported)
6//! 3. Hardware: Fixed-point u32/u64 (optimal for embedded/blockchain)
7
8/// Research-precision arithmetic (optional feature)
9///
10/// For theoretical validation and manuscript proofs.
11/// Requires `num-bigint` and `num-rational` crates.
12#[cfg(feature = "research-precision")]
13pub mod field {
14    use num_bigint::BigInt;
15    use num_rational::Ratio;
16    
17    /// Arbitrary-precision rational number
18    /// Used for exact symbolic calculations
19    pub type ResearchScalar = Ratio<BigInt>;
20    
21    /// Natural logarithm with arbitrary precision
22    pub fn ln_arbitrary(x: &ResearchScalar) -> ResearchScalar {
23        // Approximation using Taylor series (for research only)
24        // In production, use decimal crate or similar
25        let f64_val = x.to_f64().unwrap_or(1.0);
26        let result = f64_val.ln();
27        Ratio::from_float(result).unwrap_or_else(|| Ratio::new(BigInt::from(1), BigInt::from(1)))
28    }
29}
30
31/// Portable f64-based arithmetic (default)
32///
33/// Works on all platforms. Precision limited to ~15 decimal digits.
34/// Suitable for on-chain calculations where f64 is available.
35pub mod portable {
36    /// Standard f64 floating-point
37    pub type PortableScalar = f64;
38    
39    /// Natural logarithm (standard libm)
40    #[inline]
41    pub fn ln(x: f64) -> f64 {
42        x.ln()
43    }
44    
45    /// Tanh for coherence/probability
46    #[inline]
47    pub fn tanh(x: f64) -> f64 {
48        x.tanh()
49    }
50}
51
52/// Fixed-point arithmetic for blockchain
53///
54/// Optimal for Solana/EVM with limited floating-point support.
55/// Uses u64 with configurable scale (typically 1e6).
56pub mod fixed_point {
57    /// Fixed-point u64 with configurable scale
58    #[derive(Clone, Copy, Debug, PartialEq, Eq)]
59    pub struct FixedPoint64 {
60        /// Raw value (fixed-point)
61        pub value: u64,
62        /// Decimal places (typically 6 for 1e6 scale)
63        pub scale: u32,
64    }
65    
66    impl FixedPoint64 {
67        /// Create from raw value and scale
68        pub const fn new(value: u64, scale: u32) -> Self {
69            FixedPoint64 { value, scale }
70        }
71        
72        /// Create from f64
73        pub fn from_f64(f: f64, scale: u32) -> Self {
74            let scale_f = 10u64.pow(scale) as f64;
75            FixedPoint64 {
76                value: (f.max(0.0) * scale_f) as u64,
77                scale,
78            }
79        }
80        
81        /// Convert to f64
82        pub fn to_f64(&self) -> f64 {
83            let scale_f = 10u64.pow(self.scale) as f64;
84            self.value as f64 / scale_f
85        }
86        
87        /// Multiply two fixed-point numbers
88        pub fn multiply(&self, other: &FixedPoint64) -> FixedPoint64 {
89            debug_assert_eq!(self.scale, other.scale);
90            
91            let scale_factor = 10u64.pow(self.scale);
92            let result = ((self.value as u128 * other.value as u128) / scale_factor as u128) as u64;
93            
94            FixedPoint64 {
95                value: result,
96                scale: self.scale,
97            }
98        }
99        
100        /// Natural logarithm approximation (fixed-point)
101        /// Uses polynomial approximation suitable for fixed-point
102        pub fn ln(&self) -> FixedPoint64 {
103            let f = self.to_f64();
104            let ln_f = f.ln();
105            FixedPoint64::from_f64(ln_f, self.scale)
106        }
107        
108        /// Tanh approximation (fixed-point)
109        pub fn tanh(&self) -> FixedPoint64 {
110            let f = self.to_f64();
111            let tanh_f = f.tanh();
112            FixedPoint64::from_f64(tanh_f, self.scale)
113        }
114    }
115}
116
117/// Hardware-optimized arithmetic for embedded systems (ESP32, etc.)
118///
119/// Uses u32 with precision bits for extreme resource-constrained environments.
120pub mod hardware {
121    /// Minimal fixed-point u32 for embedded
122    #[derive(Clone, Copy, Debug, PartialEq, Eq)]
123    pub struct HardwareFixed {
124        /// Raw value
125        pub value: u32,
126        /// Bits after binary point (typically 8-16)
127        pub precision_bits: u8,
128    }
129    
130    impl HardwareFixed {
131        /// Create from value and precision
132        pub const fn new(value: u32, precision_bits: u8) -> Self {
133            HardwareFixed { value, precision_bits }
134        }
135        
136        /// Multiply (with rounding to avoid overflow)
137        pub fn multiply(&self, other: &HardwareFixed) -> HardwareFixed {
138            debug_assert_eq!(self.precision_bits, other.precision_bits);
139            
140            let result = ((self.value as u64 * other.value as u64) >> (self.precision_bits as u64)) as u32;
141            
142            HardwareFixed {
143                value: result,
144                precision_bits: self.precision_bits,
145            }
146        }
147        
148        /// Fast approximation of ln using Taylor series
149        /// Assumes input normalized to [1, 2)
150        pub fn ln_approx(&self) -> HardwareFixed {
151            // Quick approximation: ln(x) ≈ (x-1) - (x-1)²/2 + (x-1)³/3 - ...
152            // For embedded, just return a rough estimate
153            let scale = 1u32 << self.precision_bits;
154            let norm_val = (self.value as f32) / (scale as f32);
155            let ln_result = norm_val.ln();
156            let scaled = (ln_result * (scale as f32)) as u32;
157            
158            HardwareFixed {
159                value: scaled,
160                precision_bits: self.precision_bits,
161            }
162        }
163    }
164}
165
166#[cfg(test)]
167mod tests {
168    use super::*;
169    
170    #[test]
171    fn test_fixed_point_64_from_f64() {
172        let fp = fixed_point::FixedPoint64::from_f64(1.5, 6);
173        assert_eq!(fp.value, 1_500_000);
174    }
175    
176    #[test]
177    fn test_fixed_point_64_to_f64() {
178        let fp = fixed_point::FixedPoint64::new(1_500_000, 6);
179        let f = fp.to_f64();
180        assert!((f - 1.5).abs() < 0.000001);
181    }
182    
183    #[test]
184    fn test_fixed_point_64_multiply() {
185        let a = fixed_point::FixedPoint64::from_f64(2.0, 6);
186        let b = fixed_point::FixedPoint64::from_f64(3.0, 6);
187        let result = a.multiply(&b);
188        let f = result.to_f64();
189        assert!((f - 6.0).abs() < 0.001);
190    }
191    
192    #[test]
193    fn test_hardware_fixed_multiply() {
194        let a = hardware::HardwareFixed::new(256, 8); // 1.0 with 256 = 2^8
195        let b = hardware::HardwareFixed::new(512, 8); // 2.0 with 256 = 2^8
196        let result = a.multiply(&b);
197        // Result should be roughly 2.0 with same scale
198        assert!(result.value > 400 && result.value < 600);
199    }
200}