quantrs2_sim/quantum_reservoir_computing_enhanced/
state.rs

1//! State types for Quantum Reservoir Computing
2//!
3//! This module provides state structs for the QRC framework.
4
5use scirs2_core::ndarray::{Array1, Array2};
6use scirs2_core::Complex64;
7use serde::{Deserialize, Serialize};
8use std::collections::{HashMap, VecDeque};
9
10/// Memory analysis metrics
11#[derive(Debug, Clone, Default, Serialize, Deserialize)]
12pub struct MemoryMetrics {
13    /// Linear memory capacity
14    pub linear_capacity: f64,
15    /// Nonlinear memory capacity
16    pub nonlinear_capacity: f64,
17    /// Total memory capacity
18    pub total_capacity: f64,
19    /// Information processing capacity
20    pub processing_capacity: f64,
21    /// Temporal correlation length
22    pub correlation_length: f64,
23    /// Memory decay rate
24    pub decay_rate: f64,
25    /// Memory efficiency
26    pub efficiency: f64,
27}
28
29/// Enhanced quantum reservoir state
30#[derive(Debug, Clone)]
31pub struct QuantumReservoirState {
32    /// Current quantum state vector
33    pub state_vector: Array1<Complex64>,
34    /// Evolution history buffer
35    pub state_history: VecDeque<Array1<Complex64>>,
36    /// Observable measurements cache
37    pub observables: HashMap<String, f64>,
38    /// Two-qubit correlation matrix
39    pub correlations: Array2<f64>,
40    /// Higher-order correlations
41    pub higher_order_correlations: HashMap<String, f64>,
42    /// Entanglement measures
43    pub entanglement_measures: HashMap<String, f64>,
44    /// Memory capacity metrics
45    pub memory_metrics: MemoryMetrics,
46    /// Time index counter
47    pub time_index: usize,
48    /// Last update timestamp
49    pub last_update: f64,
50    /// Reservoir activity level
51    pub activity_level: f64,
52    /// Performance tracking
53    pub performance_history: VecDeque<f64>,
54}
55
56impl QuantumReservoirState {
57    /// Create new enhanced reservoir state
58    #[must_use]
59    pub fn new(num_qubits: usize, memory_capacity: usize) -> Self {
60        let state_size = 1 << num_qubits;
61        let mut state_vector = Array1::zeros(state_size);
62        state_vector[0] = Complex64::new(1.0, 0.0); // Start in |0...0⟩
63
64        Self {
65            state_vector,
66            state_history: VecDeque::with_capacity(memory_capacity),
67            observables: HashMap::new(),
68            correlations: Array2::zeros((num_qubits, num_qubits)),
69            higher_order_correlations: HashMap::new(),
70            entanglement_measures: HashMap::new(),
71            memory_metrics: MemoryMetrics::default(),
72            time_index: 0,
73            last_update: 0.0,
74            activity_level: 0.0,
75            performance_history: VecDeque::with_capacity(1000),
76        }
77    }
78
79    /// Update state and maintain comprehensive history
80    pub fn update_state(&mut self, new_state: Array1<Complex64>, timestamp: f64) {
81        // Store previous state
82        self.state_history.push_back(self.state_vector.clone());
83        if self.state_history.len() > self.state_history.capacity() {
84            self.state_history.pop_front();
85        }
86
87        // Update current state
88        self.state_vector = new_state;
89        self.time_index += 1;
90        self.last_update = timestamp;
91
92        // Update activity level
93        self.update_activity_level();
94    }
95
96    /// Update reservoir activity level
97    fn update_activity_level(&mut self) {
98        let activity = self
99            .state_vector
100            .iter()
101            .map(scirs2_core::Complex::norm_sqr)
102            .sum::<f64>()
103            / self.state_vector.len() as f64;
104
105        // Exponential moving average
106        let alpha = 0.1;
107        self.activity_level = alpha * activity + (1.0 - alpha) * self.activity_level;
108    }
109
110    /// Calculate memory decay
111    #[must_use]
112    pub fn calculate_memory_decay(&self) -> f64 {
113        if self.state_history.len() < 2 {
114            return 0.0;
115        }
116
117        let mut total_decay = 0.0;
118        let current_state = &self.state_vector;
119
120        for (i, past_state) in self.state_history.iter().enumerate() {
121            let fidelity = self.calculate_fidelity(current_state, past_state);
122            let time_diff = (self.state_history.len() - i) as f64;
123            total_decay += fidelity * (-time_diff * 0.1).exp();
124        }
125
126        total_decay / self.state_history.len() as f64
127    }
128
129    /// Calculate fidelity between two states
130    fn calculate_fidelity(&self, state1: &Array1<Complex64>, state2: &Array1<Complex64>) -> f64 {
131        let overlap = state1
132            .iter()
133            .zip(state2.iter())
134            .map(|(a, b)| a.conj() * b)
135            .sum::<Complex64>();
136        overlap.norm_sqr()
137    }
138}
139
140/// Enhanced training data for reservoir computing
141#[derive(Debug, Clone)]
142pub struct ReservoirTrainingData {
143    /// Input time series
144    pub inputs: Vec<Array1<f64>>,
145    /// Target outputs
146    pub targets: Vec<Array1<f64>>,
147    /// Time stamps
148    pub timestamps: Vec<f64>,
149    /// Additional features
150    pub features: Option<Vec<Array1<f64>>>,
151    /// Data labels for classification
152    pub labels: Option<Vec<usize>>,
153    /// Sequence lengths for variable-length sequences
154    pub sequence_lengths: Option<Vec<usize>>,
155    /// Missing data indicators
156    pub missing_mask: Option<Vec<Array1<bool>>>,
157    /// Data weights for importance sampling
158    pub sample_weights: Option<Vec<f64>>,
159    /// Metadata for each sample
160    pub metadata: Option<Vec<HashMap<String, String>>>,
161}
162
163impl ReservoirTrainingData {
164    /// Create new training data
165    #[must_use]
166    pub const fn new(
167        inputs: Vec<Array1<f64>>,
168        targets: Vec<Array1<f64>>,
169        timestamps: Vec<f64>,
170    ) -> Self {
171        Self {
172            inputs,
173            targets,
174            timestamps,
175            features: None,
176            labels: None,
177            sequence_lengths: None,
178            missing_mask: None,
179            sample_weights: None,
180            metadata: None,
181        }
182    }
183
184    /// Add features to training data
185    #[must_use]
186    pub fn with_features(mut self, features: Vec<Array1<f64>>) -> Self {
187        self.features = Some(features);
188        self
189    }
190
191    /// Add labels for classification
192    #[must_use]
193    pub fn with_labels(mut self, labels: Vec<usize>) -> Self {
194        self.labels = Some(labels);
195        self
196    }
197
198    /// Add sample weights
199    #[must_use]
200    pub fn with_weights(mut self, weights: Vec<f64>) -> Self {
201        self.sample_weights = Some(weights);
202        self
203    }
204
205    /// Get data length
206    #[must_use]
207    pub fn len(&self) -> usize {
208        self.inputs.len()
209    }
210
211    /// Check if data is empty
212    #[must_use]
213    pub fn is_empty(&self) -> bool {
214        self.inputs.is_empty()
215    }
216
217    /// Split data into train/test sets
218    #[must_use]
219    pub fn train_test_split(&self, test_ratio: f64) -> (Self, Self) {
220        let test_size = (self.len() as f64 * test_ratio) as usize;
221        let train_size = self.len() - test_size;
222
223        let train_data = Self {
224            inputs: self.inputs[..train_size].to_vec(),
225            targets: self.targets[..train_size].to_vec(),
226            timestamps: self.timestamps[..train_size].to_vec(),
227            features: self.features.as_ref().map(|f| f[..train_size].to_vec()),
228            labels: self.labels.as_ref().map(|l| l[..train_size].to_vec()),
229            sequence_lengths: self
230                .sequence_lengths
231                .as_ref()
232                .map(|s| s[..train_size].to_vec()),
233            missing_mask: self.missing_mask.as_ref().map(|m| m[..train_size].to_vec()),
234            sample_weights: self
235                .sample_weights
236                .as_ref()
237                .map(|w| w[..train_size].to_vec()),
238            metadata: self.metadata.as_ref().map(|m| m[..train_size].to_vec()),
239        };
240
241        let test_data = Self {
242            inputs: self.inputs[train_size..].to_vec(),
243            targets: self.targets[train_size..].to_vec(),
244            timestamps: self.timestamps[train_size..].to_vec(),
245            features: self.features.as_ref().map(|f| f[train_size..].to_vec()),
246            labels: self.labels.as_ref().map(|l| l[train_size..].to_vec()),
247            sequence_lengths: self
248                .sequence_lengths
249                .as_ref()
250                .map(|s| s[train_size..].to_vec()),
251            missing_mask: self.missing_mask.as_ref().map(|m| m[train_size..].to_vec()),
252            sample_weights: self
253                .sample_weights
254                .as_ref()
255                .map(|w| w[train_size..].to_vec()),
256            metadata: self.metadata.as_ref().map(|m| m[train_size..].to_vec()),
257        };
258
259        (train_data, test_data)
260    }
261}
262
263/// Enhanced training example for reservoir learning
264#[derive(Debug, Clone)]
265pub struct TrainingExample {
266    /// Input data
267    pub input: Array1<f64>,
268    /// Reservoir state after processing
269    pub reservoir_state: Array1<f64>,
270    /// Extracted features
271    pub features: Array1<f64>,
272    /// Target output
273    pub target: Array1<f64>,
274    /// Predicted output
275    pub prediction: Array1<f64>,
276    /// Prediction error
277    pub error: f64,
278    /// Confidence score
279    pub confidence: f64,
280    /// Processing timestamp
281    pub timestamp: f64,
282    /// Additional metadata
283    pub metadata: HashMap<String, f64>,
284}
285
286/// Enhanced performance metrics for reservoir computing
287#[derive(Debug, Clone, Default, Serialize, Deserialize)]
288pub struct ReservoirMetrics {
289    /// Total training examples processed
290    pub training_examples: usize,
291    /// Current prediction accuracy
292    pub prediction_accuracy: f64,
293    /// Memory capacity estimate
294    pub memory_capacity: f64,
295    /// Nonlinear memory capacity
296    pub nonlinear_memory_capacity: f64,
297    /// Information processing capacity
298    pub processing_capacity: f64,
299    /// Generalization error
300    pub generalization_error: f64,
301    /// Echo state property indicator
302    pub echo_state_property: f64,
303    /// Average processing time per input
304    pub avg_processing_time_ms: f64,
305    /// Quantum resource utilization
306    pub quantum_resource_usage: f64,
307    /// Temporal correlation length
308    pub temporal_correlation_length: f64,
309    /// Reservoir efficiency
310    pub reservoir_efficiency: f64,
311    /// Adaptation rate
312    pub adaptation_rate: f64,
313    /// Plasticity level
314    pub plasticity_level: f64,
315    /// Hardware utilization
316    pub hardware_utilization: f64,
317    /// Error mitigation overhead
318    pub error_mitigation_overhead: f64,
319    /// Quantum advantage metric
320    pub quantum_advantage: f64,
321    /// Computational complexity
322    pub computational_complexity: f64,
323}
324
325/// Enhanced training result
326#[derive(Debug, Clone, Serialize, Deserialize)]
327pub struct TrainingResult {
328    /// Training error (RMSE)
329    pub training_error: f64,
330    /// Test error (RMSE)
331    pub test_error: f64,
332    /// Training time in milliseconds
333    pub training_time_ms: f64,
334    /// Number of training examples
335    pub num_examples: usize,
336    /// Echo state property measure
337    pub echo_state_property: f64,
338    /// Memory capacity estimate
339    pub memory_capacity: f64,
340    /// Nonlinear memory capacity
341    pub nonlinear_capacity: f64,
342    /// Information processing capacity
343    pub processing_capacity: f64,
344}