quantrs2_sim/mixed_precision_impl/
config.rs

1//! Configuration structures for mixed-precision quantum simulation.
2//!
3//! This module provides configuration types for precision levels,
4//! adaptive strategies, and performance optimization settings.
5
6use serde::{Deserialize, Serialize};
7
8use crate::error::{Result, SimulatorError};
9
10// Note: scirs2_linalg mixed_precision module temporarily unavailable
11// #[cfg(feature = "advanced_math")]
12// use scirs2_linalg::mixed_precision::{AdaptiveStrategy, MixedPrecisionContext, PrecisionLevel};
13
14// Placeholder types when the feature is not available
15#[derive(Debug)]
16pub struct MixedPrecisionContext;
17
18#[derive(Debug)]
19pub enum PrecisionLevel {
20    F16,
21    F32,
22    F64,
23    Adaptive,
24}
25
26#[derive(Debug)]
27pub enum AdaptiveStrategy {
28    ErrorBased(f64),
29    Fixed(PrecisionLevel),
30}
31
32impl MixedPrecisionContext {
33    pub fn new(_strategy: AdaptiveStrategy) -> Result<Self> {
34        Err(SimulatorError::UnsupportedOperation(
35            "Mixed precision context not available without advanced_math feature".to_string(),
36        ))
37    }
38}
39
40/// Precision levels for quantum computations
41#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
42pub enum QuantumPrecision {
43    /// Half precision (16-bit floats, FP16)
44    Half,
45    /// BFloat16 precision (16-bit with larger range)
46    BFloat16,
47    /// TensorFloat-32 (NVIDIA TF32 for Tensor Cores)
48    TF32,
49    /// Single precision (32-bit floats)
50    Single,
51    /// Double precision (64-bit floats)
52    Double,
53    /// Adaptive precision (automatically selected)
54    Adaptive,
55}
56
57impl QuantumPrecision {
58    /// Get the corresponding SciRS2 precision level
59    #[cfg(feature = "advanced_math")]
60    pub const fn to_scirs2_precision(&self) -> PrecisionLevel {
61        match self {
62            Self::Half | Self::BFloat16 => PrecisionLevel::F16,
63            Self::TF32 | Self::Single => PrecisionLevel::F32,
64            Self::Double => PrecisionLevel::F64,
65            Self::Adaptive => PrecisionLevel::Adaptive,
66        }
67    }
68
69    /// Get memory usage factor relative to double precision
70    #[must_use]
71    pub const fn memory_factor(&self) -> f64 {
72        match self {
73            Self::Half => 0.25,
74            Self::BFloat16 => 0.25,
75            Self::TF32 => 0.5, // Same storage as FP32, but faster compute
76            Self::Single => 0.5,
77            Self::Double => 1.0,
78            Self::Adaptive => 0.75, // Average case
79        }
80    }
81
82    /// Get computational cost factor relative to double precision
83    /// Lower is better - represents performance relative to FP64
84    #[must_use]
85    pub const fn computation_factor(&self) -> f64 {
86        match self {
87            Self::Half => 0.25,     // ~4x faster on Tensor Cores
88            Self::BFloat16 => 0.25, // ~4x faster on Tensor Cores
89            Self::TF32 => 0.35,     // ~2.8x faster on Tensor Cores
90            Self::Single => 0.7,
91            Self::Double => 1.0,
92            Self::Adaptive => 0.6, // Average case
93        }
94    }
95
96    /// Get typical numerical error for this precision
97    #[must_use]
98    pub const fn typical_error(&self) -> f64 {
99        match self {
100            Self::Half => 1e-3,     // 10-bit mantissa
101            Self::BFloat16 => 1e-2, // 7-bit mantissa, but same range as FP32
102            Self::TF32 => 1e-4,     // 10-bit mantissa with FP32 range
103            Self::Single => 1e-6,   // 23-bit mantissa
104            Self::Double => 1e-15,  // 52-bit mantissa
105            Self::Adaptive => 1e-6, // Conservative estimate
106        }
107    }
108
109    /// Check if this precision requires Tensor Cores
110    #[must_use]
111    pub const fn requires_tensor_cores(&self) -> bool {
112        matches!(self, Self::TF32 | Self::BFloat16)
113    }
114
115    /// Check if this precision is a reduced-precision format
116    #[must_use]
117    pub const fn is_reduced_precision(&self) -> bool {
118        matches!(self, Self::Half | Self::BFloat16 | Self::TF32)
119    }
120
121    /// Get the bit width of this precision format
122    #[must_use]
123    pub const fn bit_width(&self) -> usize {
124        match self {
125            Self::Half => 16,
126            Self::BFloat16 => 16,
127            Self::TF32 => 19, // Stored as 32-bit, but 19 effective bits
128            Self::Single => 32,
129            Self::Double => 64,
130            Self::Adaptive => 32, // Default to single
131        }
132    }
133
134    /// Get mantissa bits for this precision
135    #[must_use]
136    pub const fn mantissa_bits(&self) -> usize {
137        match self {
138            Self::Half => 10,
139            Self::BFloat16 => 7,
140            Self::TF32 => 10,
141            Self::Single => 23,
142            Self::Double => 52,
143            Self::Adaptive => 23,
144        }
145    }
146
147    /// Get exponent bits for this precision
148    #[must_use]
149    pub const fn exponent_bits(&self) -> usize {
150        match self {
151            Self::Half => 5,
152            Self::BFloat16 => 8,
153            Self::TF32 => 8,
154            Self::Single => 8,
155            Self::Double => 11,
156            Self::Adaptive => 8,
157        }
158    }
159
160    /// Check if this precision is sufficient for the given error tolerance
161    #[must_use]
162    pub fn is_sufficient_for_tolerance(&self, tolerance: f64) -> bool {
163        self.typical_error() <= tolerance * 10.0 // Safety factor of 10
164    }
165
166    /// Get the next higher precision level
167    #[must_use]
168    pub const fn higher_precision(&self) -> Option<Self> {
169        match self {
170            Self::Half => Some(Self::BFloat16),
171            Self::BFloat16 => Some(Self::TF32),
172            Self::TF32 => Some(Self::Single),
173            Self::Single => Some(Self::Double),
174            Self::Double => None,
175            Self::Adaptive => Some(Self::Double),
176        }
177    }
178
179    /// Get the next lower precision level
180    #[must_use]
181    pub const fn lower_precision(&self) -> Option<Self> {
182        match self {
183            Self::Half => None,
184            Self::BFloat16 => Some(Self::Half),
185            Self::TF32 => Some(Self::BFloat16),
186            Self::Single => Some(Self::TF32),
187            Self::Double => Some(Self::Single),
188            Self::Adaptive => Some(Self::Single),
189        }
190    }
191
192    /// Select best precision for given accuracy and Tensor Core availability
193    #[must_use]
194    pub fn select_for_accuracy_and_tensor_cores(tolerance: f64, has_tensor_cores: bool) -> Self {
195        if tolerance >= 1e-2 {
196            if has_tensor_cores {
197                Self::BFloat16
198            } else {
199                Self::Half
200            }
201        } else if tolerance >= 1e-4 {
202            if has_tensor_cores {
203                Self::TF32
204            } else {
205                Self::Single
206            }
207        } else if tolerance >= 1e-6 {
208            Self::Single
209        } else {
210            Self::Double
211        }
212    }
213}
214
215/// Mixed precision configuration
216#[derive(Debug, Clone, Serialize, Deserialize)]
217pub struct MixedPrecisionConfig {
218    /// Default precision for state vectors
219    pub state_vector_precision: QuantumPrecision,
220    /// Default precision for gate operations
221    pub gate_precision: QuantumPrecision,
222    /// Default precision for measurements
223    pub measurement_precision: QuantumPrecision,
224    /// Error tolerance for precision selection
225    pub error_tolerance: f64,
226    /// Enable automatic precision adaptation
227    pub adaptive_precision: bool,
228    /// Minimum precision level (never go below this)
229    pub min_precision: QuantumPrecision,
230    /// Maximum precision level (never go above this)
231    pub max_precision: QuantumPrecision,
232    /// Number of qubits threshold for precision reduction
233    pub large_system_threshold: usize,
234    /// Enable precision analysis and reporting
235    pub enable_analysis: bool,
236}
237
238impl Default for MixedPrecisionConfig {
239    fn default() -> Self {
240        Self {
241            state_vector_precision: QuantumPrecision::Single,
242            gate_precision: QuantumPrecision::Single,
243            measurement_precision: QuantumPrecision::Double,
244            error_tolerance: 1e-6,
245            adaptive_precision: true,
246            min_precision: QuantumPrecision::Half,
247            max_precision: QuantumPrecision::Double,
248            large_system_threshold: 20,
249            enable_analysis: true,
250        }
251    }
252}
253
254impl MixedPrecisionConfig {
255    /// Create configuration optimized for accuracy
256    #[must_use]
257    pub const fn for_accuracy() -> Self {
258        Self {
259            state_vector_precision: QuantumPrecision::Double,
260            gate_precision: QuantumPrecision::Double,
261            measurement_precision: QuantumPrecision::Double,
262            error_tolerance: 1e-12,
263            adaptive_precision: false,
264            min_precision: QuantumPrecision::Double,
265            max_precision: QuantumPrecision::Double,
266            large_system_threshold: 50,
267            enable_analysis: true,
268        }
269    }
270
271    /// Create configuration optimized for performance
272    #[must_use]
273    pub const fn for_performance() -> Self {
274        Self {
275            state_vector_precision: QuantumPrecision::Half,
276            gate_precision: QuantumPrecision::Single,
277            measurement_precision: QuantumPrecision::Single,
278            error_tolerance: 1e-3,
279            adaptive_precision: true,
280            min_precision: QuantumPrecision::Half,
281            max_precision: QuantumPrecision::Single,
282            large_system_threshold: 10,
283            enable_analysis: false,
284        }
285    }
286
287    /// Create configuration balanced between accuracy and performance
288    #[must_use]
289    pub fn balanced() -> Self {
290        Self::default()
291    }
292
293    /// Validate the configuration
294    pub fn validate(&self) -> Result<()> {
295        if self.error_tolerance <= 0.0 {
296            return Err(SimulatorError::InvalidInput(
297                "Error tolerance must be positive".to_string(),
298            ));
299        }
300
301        if self.large_system_threshold == 0 {
302            return Err(SimulatorError::InvalidInput(
303                "Large system threshold must be positive".to_string(),
304            ));
305        }
306
307        // Check precision consistency
308        if self.min_precision as u8 > self.max_precision as u8 {
309            return Err(SimulatorError::InvalidInput(
310                "Minimum precision cannot be higher than maximum precision".to_string(),
311            ));
312        }
313
314        Ok(())
315    }
316
317    /// Adjust configuration for a specific number of qubits
318    pub const fn adjust_for_qubits(&mut self, num_qubits: usize) {
319        if num_qubits >= self.large_system_threshold {
320            // For large systems, reduce precision to save memory
321            if self.adaptive_precision {
322                match self.state_vector_precision {
323                    QuantumPrecision::Double => {
324                        self.state_vector_precision = QuantumPrecision::Single;
325                    }
326                    QuantumPrecision::Single => {
327                        self.state_vector_precision = QuantumPrecision::Half;
328                    }
329                    _ => {}
330                }
331            }
332        }
333    }
334
335    /// Estimate memory usage for a given number of qubits
336    #[must_use]
337    pub fn estimate_memory_usage(&self, num_qubits: usize) -> usize {
338        let state_vector_size = 1 << num_qubits;
339        let base_memory = state_vector_size * 16; // Complex64 size
340
341        let factor = self.state_vector_precision.memory_factor();
342        (f64::from(base_memory) * factor) as usize
343    }
344
345    /// Check if the configuration is suitable for the available memory
346    #[must_use]
347    pub fn fits_in_memory(&self, num_qubits: usize, available_memory: usize) -> bool {
348        self.estimate_memory_usage(num_qubits) <= available_memory
349    }
350}