use scirs2_core::ndarray::{Array1, Array2};
use scirs2_core::Complex64;
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, VecDeque};
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct MemoryMetrics {
pub linear_capacity: f64,
pub nonlinear_capacity: f64,
pub total_capacity: f64,
pub processing_capacity: f64,
pub correlation_length: f64,
pub decay_rate: f64,
pub efficiency: f64,
}
#[derive(Debug, Clone)]
pub struct QuantumReservoirState {
pub state_vector: Array1<Complex64>,
pub state_history: VecDeque<Array1<Complex64>>,
pub observables: HashMap<String, f64>,
pub correlations: Array2<f64>,
pub higher_order_correlations: HashMap<String, f64>,
pub entanglement_measures: HashMap<String, f64>,
pub memory_metrics: MemoryMetrics,
pub time_index: usize,
pub last_update: f64,
pub activity_level: f64,
pub performance_history: VecDeque<f64>,
}
impl QuantumReservoirState {
#[must_use]
pub fn new(num_qubits: usize, memory_capacity: usize) -> Self {
let state_size = 1 << num_qubits;
let mut state_vector = Array1::zeros(state_size);
state_vector[0] = Complex64::new(1.0, 0.0);
Self {
state_vector,
state_history: VecDeque::with_capacity(memory_capacity),
observables: HashMap::new(),
correlations: Array2::zeros((num_qubits, num_qubits)),
higher_order_correlations: HashMap::new(),
entanglement_measures: HashMap::new(),
memory_metrics: MemoryMetrics::default(),
time_index: 0,
last_update: 0.0,
activity_level: 0.0,
performance_history: VecDeque::with_capacity(1000),
}
}
pub fn update_state(&mut self, new_state: Array1<Complex64>, timestamp: f64) {
self.state_history.push_back(self.state_vector.clone());
if self.state_history.len() > self.state_history.capacity() {
self.state_history.pop_front();
}
self.state_vector = new_state;
self.time_index += 1;
self.last_update = timestamp;
self.update_activity_level();
}
fn update_activity_level(&mut self) {
let activity = self
.state_vector
.iter()
.map(scirs2_core::Complex::norm_sqr)
.sum::<f64>()
/ self.state_vector.len() as f64;
let alpha = 0.1;
self.activity_level = alpha * activity + (1.0 - alpha) * self.activity_level;
}
#[must_use]
pub fn calculate_memory_decay(&self) -> f64 {
if self.state_history.len() < 2 {
return 0.0;
}
let mut total_decay = 0.0;
let current_state = &self.state_vector;
for (i, past_state) in self.state_history.iter().enumerate() {
let fidelity = self.calculate_fidelity(current_state, past_state);
let time_diff = (self.state_history.len() - i) as f64;
total_decay += fidelity * (-time_diff * 0.1).exp();
}
total_decay / self.state_history.len() as f64
}
fn calculate_fidelity(&self, state1: &Array1<Complex64>, state2: &Array1<Complex64>) -> f64 {
let overlap = state1
.iter()
.zip(state2.iter())
.map(|(a, b)| a.conj() * b)
.sum::<Complex64>();
overlap.norm_sqr()
}
}
#[derive(Debug, Clone)]
pub struct ReservoirTrainingData {
pub inputs: Vec<Array1<f64>>,
pub targets: Vec<Array1<f64>>,
pub timestamps: Vec<f64>,
pub features: Option<Vec<Array1<f64>>>,
pub labels: Option<Vec<usize>>,
pub sequence_lengths: Option<Vec<usize>>,
pub missing_mask: Option<Vec<Array1<bool>>>,
pub sample_weights: Option<Vec<f64>>,
pub metadata: Option<Vec<HashMap<String, String>>>,
}
impl ReservoirTrainingData {
#[must_use]
pub const fn new(
inputs: Vec<Array1<f64>>,
targets: Vec<Array1<f64>>,
timestamps: Vec<f64>,
) -> Self {
Self {
inputs,
targets,
timestamps,
features: None,
labels: None,
sequence_lengths: None,
missing_mask: None,
sample_weights: None,
metadata: None,
}
}
#[must_use]
pub fn with_features(mut self, features: Vec<Array1<f64>>) -> Self {
self.features = Some(features);
self
}
#[must_use]
pub fn with_labels(mut self, labels: Vec<usize>) -> Self {
self.labels = Some(labels);
self
}
#[must_use]
pub fn with_weights(mut self, weights: Vec<f64>) -> Self {
self.sample_weights = Some(weights);
self
}
#[must_use]
pub fn len(&self) -> usize {
self.inputs.len()
}
#[must_use]
pub fn is_empty(&self) -> bool {
self.inputs.is_empty()
}
#[must_use]
pub fn train_test_split(&self, test_ratio: f64) -> (Self, Self) {
let test_size = (self.len() as f64 * test_ratio) as usize;
let train_size = self.len() - test_size;
let train_data = Self {
inputs: self.inputs[..train_size].to_vec(),
targets: self.targets[..train_size].to_vec(),
timestamps: self.timestamps[..train_size].to_vec(),
features: self.features.as_ref().map(|f| f[..train_size].to_vec()),
labels: self.labels.as_ref().map(|l| l[..train_size].to_vec()),
sequence_lengths: self
.sequence_lengths
.as_ref()
.map(|s| s[..train_size].to_vec()),
missing_mask: self.missing_mask.as_ref().map(|m| m[..train_size].to_vec()),
sample_weights: self
.sample_weights
.as_ref()
.map(|w| w[..train_size].to_vec()),
metadata: self.metadata.as_ref().map(|m| m[..train_size].to_vec()),
};
let test_data = Self {
inputs: self.inputs[train_size..].to_vec(),
targets: self.targets[train_size..].to_vec(),
timestamps: self.timestamps[train_size..].to_vec(),
features: self.features.as_ref().map(|f| f[train_size..].to_vec()),
labels: self.labels.as_ref().map(|l| l[train_size..].to_vec()),
sequence_lengths: self
.sequence_lengths
.as_ref()
.map(|s| s[train_size..].to_vec()),
missing_mask: self.missing_mask.as_ref().map(|m| m[train_size..].to_vec()),
sample_weights: self
.sample_weights
.as_ref()
.map(|w| w[train_size..].to_vec()),
metadata: self.metadata.as_ref().map(|m| m[train_size..].to_vec()),
};
(train_data, test_data)
}
}
#[derive(Debug, Clone)]
pub struct TrainingExample {
pub input: Array1<f64>,
pub reservoir_state: Array1<f64>,
pub features: Array1<f64>,
pub target: Array1<f64>,
pub prediction: Array1<f64>,
pub error: f64,
pub confidence: f64,
pub timestamp: f64,
pub metadata: HashMap<String, f64>,
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ReservoirMetrics {
pub training_examples: usize,
pub prediction_accuracy: f64,
pub memory_capacity: f64,
pub nonlinear_memory_capacity: f64,
pub processing_capacity: f64,
pub generalization_error: f64,
pub echo_state_property: f64,
pub avg_processing_time_ms: f64,
pub quantum_resource_usage: f64,
pub temporal_correlation_length: f64,
pub reservoir_efficiency: f64,
pub adaptation_rate: f64,
pub plasticity_level: f64,
pub hardware_utilization: f64,
pub error_mitigation_overhead: f64,
pub quantum_advantage: f64,
pub computational_complexity: f64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TrainingResult {
pub training_error: f64,
pub test_error: f64,
pub training_time_ms: f64,
pub num_examples: usize,
pub echo_state_property: f64,
pub memory_capacity: f64,
pub nonlinear_capacity: f64,
pub processing_capacity: f64,
}