quantrs2_device/process_tomography/
validation.rs

1//! Validation methods for process tomography
2
3use scirs2_core::ndarray::{Array1, Array2, Array4};
4use scirs2_core::random::prelude::*;
5use scirs2_core::Complex64;
6use std::collections::HashMap;
7
8use super::core::SciRS2ProcessTomographer;
9use super::results::*;
10use crate::DeviceResult;
11
12impl SciRS2ProcessTomographer {
13    /// Perform comprehensive validation
14    pub fn perform_validation(
15        &self,
16        experimental_data: &ExperimentalData,
17    ) -> DeviceResult<ProcessValidationResults> {
18        let cross_validation = if self.config.validation_config.enable_cross_validation {
19            Some(self.perform_cross_validation(experimental_data)?)
20        } else {
21            None
22        };
23
24        let bootstrap_results = if self.config.validation_config.enable_bootstrap {
25            Some(self.perform_bootstrap_validation(experimental_data)?)
26        } else {
27            None
28        };
29
30        let benchmark_results = if self.config.validation_config.enable_benchmarking {
31            Some(self.perform_benchmark_validation(experimental_data)?)
32        } else {
33            None
34        };
35
36        let model_selection = self.perform_model_selection(experimental_data)?;
37
38        Ok(ProcessValidationResults {
39            cross_validation,
40            bootstrap_results,
41            benchmark_results,
42            model_selection,
43        })
44    }
45
46    /// Perform k-fold cross-validation
47    fn perform_cross_validation(
48        &self,
49        experimental_data: &ExperimentalData,
50    ) -> DeviceResult<CrossValidationResults> {
51        let num_folds = self.config.validation_config.cv_folds;
52        let data_size = experimental_data.measurement_results.len();
53        let fold_size = data_size / num_folds;
54
55        let mut fold_scores = Vec::new();
56
57        for fold_idx in 0..num_folds {
58            // Create training and validation sets
59            let val_start = fold_idx * fold_size;
60            let val_end = if fold_idx == num_folds - 1 {
61                data_size
62            } else {
63                (fold_idx + 1) * fold_size
64            };
65
66            let training_data = self.create_training_fold(experimental_data, val_start, val_end)?;
67            let validation_data =
68                self.create_validation_fold(experimental_data, val_start, val_end)?;
69
70            // Train on training set
71            let (process_matrix, _) = self.linear_inversion_reconstruction(&training_data)?;
72
73            // Evaluate on validation set
74            let score = self.evaluate_process_quality(&process_matrix, &validation_data)?;
75            fold_scores.push(score);
76        }
77
78        let mean_score = fold_scores.iter().sum::<f64>() / fold_scores.len() as f64;
79        let variance = fold_scores
80            .iter()
81            .map(|&x| (x - mean_score).powi(2))
82            .sum::<f64>()
83            / fold_scores.len() as f64;
84        let std_score = variance.sqrt();
85
86        // 95% confidence interval
87        let margin = 1.96 * std_score / (fold_scores.len() as f64).sqrt();
88        let confidence_interval = (mean_score - margin, mean_score + margin);
89
90        Ok(CrossValidationResults {
91            fold_scores,
92            mean_score,
93            std_score,
94            confidence_interval,
95        })
96    }
97
98    /// Perform bootstrap validation
99    fn perform_bootstrap_validation(
100        &self,
101        experimental_data: &ExperimentalData,
102    ) -> DeviceResult<BootstrapResults> {
103        let num_bootstrap = self.config.validation_config.bootstrap_samples;
104        let data_size = experimental_data.measurement_results.len();
105
106        let mut bootstrap_samples = Vec::new();
107        let mut bootstrap_metrics = HashMap::new();
108
109        for _ in 0..num_bootstrap {
110            // Create bootstrap sample
111            let bootstrap_data = self.create_bootstrap_sample(experimental_data)?;
112
113            // Reconstruct process
114            let (process_matrix, _) = self.linear_inversion_reconstruction(&bootstrap_data)?;
115
116            // Calculate metrics
117            let metrics = self.calculate_process_metrics(&process_matrix)?;
118            bootstrap_samples.push(metrics.clone());
119
120            // Collect metrics for confidence intervals
121            self.collect_bootstrap_metrics(&metrics, &mut bootstrap_metrics);
122        }
123
124        // Calculate confidence intervals
125        let confidence_intervals =
126            self.calculate_bootstrap_confidence_intervals(&bootstrap_metrics)?;
127
128        // Calculate bias estimates
129        let bias_estimates = self.calculate_bootstrap_bias(&bootstrap_samples)?;
130
131        Ok(BootstrapResults {
132            bootstrap_samples,
133            confidence_intervals,
134            bias_estimates,
135        })
136    }
137
138    /// Perform benchmark validation
139    fn perform_benchmark_validation(
140        &self,
141        experimental_data: &ExperimentalData,
142    ) -> DeviceResult<BenchmarkResults> {
143        let benchmarks = &self.config.validation_config.benchmark_processes;
144        let mut benchmark_scores = HashMap::new();
145        let mut rankings = HashMap::new();
146
147        // Reconstruct experimental process
148        let (experimental_process, _) = self.linear_inversion_reconstruction(experimental_data)?;
149
150        for (idx, benchmark_name) in benchmarks.iter().enumerate() {
151            let benchmark_process = self.create_benchmark_process(benchmark_name)?;
152            let fidelity =
153                self.calculate_process_fidelity_between(&experimental_process, &benchmark_process)?;
154
155            benchmark_scores.insert(benchmark_name.clone(), fidelity);
156            rankings.insert(benchmark_name.clone(), idx + 1);
157        }
158
159        Ok(BenchmarkResults {
160            benchmark_scores,
161            rankings,
162        })
163    }
164
165    /// Perform model selection
166    fn perform_model_selection(
167        &self,
168        experimental_data: &ExperimentalData,
169    ) -> DeviceResult<ModelSelectionResults> {
170        let reconstruction_methods = vec![
171            "linear_inversion",
172            "maximum_likelihood",
173            "compressed_sensing",
174            "bayesian",
175        ];
176
177        let mut aic_scores = HashMap::new();
178        let mut bic_scores = HashMap::new();
179        let mut cv_scores = HashMap::new();
180        let mut model_weights = HashMap::new();
181
182        let mut best_score = f64::NEG_INFINITY;
183        let mut best_model = "linear_inversion".to_string();
184
185        for method in &reconstruction_methods {
186            // Reconstruct using method
187            let (process_matrix, quality) = match &**method {
188                "linear_inversion" => self.linear_inversion_reconstruction(experimental_data)?,
189                "maximum_likelihood" => {
190                    self.maximum_likelihood_reconstruction(experimental_data)?
191                }
192                "compressed_sensing" => {
193                    self.compressed_sensing_reconstruction(experimental_data)?
194                }
195                "bayesian" => self.bayesian_reconstruction(experimental_data)?,
196                _ => self.linear_inversion_reconstruction(experimental_data)?,
197            };
198
199            // Calculate model selection criteria
200            let num_params = self.estimate_num_parameters(&process_matrix);
201            let log_likelihood = quality.log_likelihood;
202            let n_data = experimental_data.measurement_results.len() as f64;
203
204            let aic = (-2.0f64).mul_add(log_likelihood, 2.0 * num_params as f64);
205            let bic = (-2.0f64).mul_add(log_likelihood, num_params as f64 * n_data.ln());
206
207            // Cross-validation score
208            let cv_score = self.calculate_cv_score_for_method(method, experimental_data)?;
209
210            aic_scores.insert(method.to_string(), aic);
211            bic_scores.insert(method.to_string(), bic);
212            cv_scores.insert(method.to_string(), cv_score);
213
214            // Track best model (lowest AIC)
215            if -aic > best_score {
216                best_score = -aic;
217                best_model = method.to_string();
218            }
219        }
220
221        // Calculate model weights (Akaike weights)
222        let min_aic = aic_scores.values().copied().fold(f64::INFINITY, f64::min);
223        let mut weight_sum = 0.0;
224
225        for (model, &aic) in &aic_scores {
226            let delta_aic = aic - min_aic;
227            let weight = (-0.5 * delta_aic).exp();
228            model_weights.insert(model.clone(), weight);
229            weight_sum += weight;
230        }
231
232        // Normalize weights
233        for weight in model_weights.values_mut() {
234            *weight /= weight_sum;
235        }
236
237        Ok(ModelSelectionResults {
238            aic_scores,
239            bic_scores,
240            cross_validation_scores: cv_scores,
241            best_model,
242            model_weights,
243        })
244    }
245
246    /// Create training fold for cross-validation
247    fn create_training_fold(
248        &self,
249        data: &ExperimentalData,
250        val_start: usize,
251        val_end: usize,
252    ) -> DeviceResult<ExperimentalData> {
253        let mut training_results = Vec::new();
254        let mut training_uncertainties = Vec::new();
255
256        for (i, (&result, &uncertainty)) in data
257            .measurement_results
258            .iter()
259            .zip(data.measurement_uncertainties.iter())
260            .enumerate()
261        {
262            if i < val_start || i >= val_end {
263                training_results.push(result);
264                training_uncertainties.push(uncertainty);
265            }
266        }
267
268        Ok(ExperimentalData {
269            input_states: data.input_states.clone(),
270            measurement_operators: data.measurement_operators.clone(),
271            measurement_results: training_results,
272            measurement_uncertainties: training_uncertainties,
273        })
274    }
275
276    /// Create validation fold for cross-validation
277    fn create_validation_fold(
278        &self,
279        data: &ExperimentalData,
280        val_start: usize,
281        val_end: usize,
282    ) -> DeviceResult<ExperimentalData> {
283        let validation_results = data.measurement_results[val_start..val_end].to_vec();
284        let validation_uncertainties = data.measurement_uncertainties[val_start..val_end].to_vec();
285
286        Ok(ExperimentalData {
287            input_states: data.input_states.clone(),
288            measurement_operators: data.measurement_operators.clone(),
289            measurement_results: validation_results,
290            measurement_uncertainties: validation_uncertainties,
291        })
292    }
293
294    /// Create bootstrap sample
295    fn create_bootstrap_sample(&self, data: &ExperimentalData) -> DeviceResult<ExperimentalData> {
296        use scirs2_core::random::prelude::*;
297        let mut rng = thread_rng();
298
299        let data_size = data.measurement_results.len();
300        let mut bootstrap_results = Vec::new();
301        let mut bootstrap_uncertainties = Vec::new();
302
303        for _ in 0..data_size {
304            let idx = rng.gen_range(0..data_size);
305            bootstrap_results.push(data.measurement_results[idx]);
306            bootstrap_uncertainties.push(data.measurement_uncertainties[idx]);
307        }
308
309        Ok(ExperimentalData {
310            input_states: data.input_states.clone(),
311            measurement_operators: data.measurement_operators.clone(),
312            measurement_results: bootstrap_results,
313            measurement_uncertainties: bootstrap_uncertainties,
314        })
315    }
316
317    /// Evaluate process quality
318    fn evaluate_process_quality(
319        &self,
320        process_matrix: &Array4<Complex64>,
321        validation_data: &ExperimentalData,
322    ) -> DeviceResult<f64> {
323        // Calculate log-likelihood on validation data
324        let mut log_likelihood = 0.0;
325
326        for (observed, &uncertainty) in validation_data
327            .measurement_results
328            .iter()
329            .zip(validation_data.measurement_uncertainties.iter())
330        {
331            let predicted = 0.5; // Placeholder prediction
332            let diff = observed - predicted;
333            let variance = uncertainty * uncertainty;
334            log_likelihood -= 0.5 * (diff * diff / variance);
335        }
336
337        Ok(log_likelihood)
338    }
339
340    /// Collect bootstrap metrics
341    fn collect_bootstrap_metrics(
342        &self,
343        metrics: &ProcessMetrics,
344        bootstrap_metrics: &mut HashMap<String, Vec<f64>>,
345    ) {
346        bootstrap_metrics
347            .entry("process_fidelity".to_string())
348            .or_default()
349            .push(metrics.process_fidelity);
350
351        bootstrap_metrics
352            .entry("average_gate_fidelity".to_string())
353            .or_default()
354            .push(metrics.average_gate_fidelity);
355
356        bootstrap_metrics
357            .entry("unitarity".to_string())
358            .or_default()
359            .push(metrics.unitarity);
360
361        bootstrap_metrics
362            .entry("entangling_power".to_string())
363            .or_default()
364            .push(metrics.entangling_power);
365    }
366
367    /// Calculate bootstrap confidence intervals
368    fn calculate_bootstrap_confidence_intervals(
369        &self,
370        bootstrap_metrics: &HashMap<String, Vec<f64>>,
371    ) -> DeviceResult<HashMap<String, (f64, f64)>> {
372        let mut confidence_intervals = HashMap::new();
373
374        for (metric_name, values) in bootstrap_metrics {
375            let mut sorted_values = values.clone();
376            sorted_values.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
377
378            let n = sorted_values.len();
379            let lower_idx = (0.025 * n as f64) as usize;
380            let upper_idx = (0.975 * n as f64) as usize;
381
382            let lower_bound = sorted_values[lower_idx.min(n - 1)];
383            let upper_bound = sorted_values[upper_idx.min(n - 1)];
384
385            confidence_intervals.insert(metric_name.clone(), (lower_bound, upper_bound));
386        }
387
388        Ok(confidence_intervals)
389    }
390
391    /// Calculate bootstrap bias estimates
392    fn calculate_bootstrap_bias(
393        &self,
394        bootstrap_samples: &[ProcessMetrics],
395    ) -> DeviceResult<HashMap<String, f64>> {
396        let mut bias_estimates = HashMap::new();
397
398        if bootstrap_samples.is_empty() {
399            return Ok(bias_estimates);
400        }
401
402        // Calculate means
403        let mean_fidelity = bootstrap_samples
404            .iter()
405            .map(|m| m.process_fidelity)
406            .sum::<f64>()
407            / bootstrap_samples.len() as f64;
408
409        let mean_unitarity = bootstrap_samples.iter().map(|m| m.unitarity).sum::<f64>()
410            / bootstrap_samples.len() as f64;
411
412        // Bias is difference from theoretical expectation (assuming ideal = 1.0)
413        bias_estimates.insert("process_fidelity".to_string(), mean_fidelity - 1.0);
414        bias_estimates.insert("unitarity".to_string(), mean_unitarity - 1.0);
415
416        Ok(bias_estimates)
417    }
418
419    /// Create benchmark process
420    fn create_benchmark_process(&self, benchmark_name: &str) -> DeviceResult<Array4<Complex64>> {
421        let dim = 2; // Assume single qubit for simplicity
422        let mut process = Array4::zeros((dim, dim, dim, dim));
423
424        match benchmark_name {
425            "identity" => {
426                // Identity process
427                for i in 0..dim {
428                    process[[i, i, i, i]] = Complex64::new(1.0, 0.0);
429                }
430            }
431            "pauli_x" => {
432                // Pauli-X process
433                process[[0, 0, 1, 1]] = Complex64::new(1.0, 0.0);
434                process[[1, 1, 0, 0]] = Complex64::new(1.0, 0.0);
435                process[[0, 1, 1, 0]] = Complex64::new(1.0, 0.0);
436                process[[1, 0, 0, 1]] = Complex64::new(1.0, 0.0);
437            }
438            "pauli_y" => {
439                // Pauli-Y process
440                process[[0, 0, 1, 1]] = Complex64::new(1.0, 0.0);
441                process[[1, 1, 0, 0]] = Complex64::new(1.0, 0.0);
442                process[[0, 1, 1, 0]] = Complex64::new(0.0, -1.0);
443                process[[1, 0, 0, 1]] = Complex64::new(0.0, 1.0);
444            }
445            "pauli_z" => {
446                // Pauli-Z process
447                process[[0, 0, 0, 0]] = Complex64::new(1.0, 0.0);
448                process[[1, 1, 1, 1]] = Complex64::new(-1.0, 0.0);
449            }
450            "hadamard" => {
451                // Hadamard process
452                let factor = 1.0 / (2.0_f64).sqrt();
453                process[[0, 0, 0, 0]] = Complex64::new(factor, 0.0);
454                process[[0, 0, 1, 1]] = Complex64::new(factor, 0.0);
455                process[[1, 1, 0, 0]] = Complex64::new(factor, 0.0);
456                process[[1, 1, 1, 1]] = Complex64::new(-factor, 0.0);
457                process[[0, 1, 0, 1]] = Complex64::new(factor, 0.0);
458                process[[0, 1, 1, 0]] = Complex64::new(factor, 0.0);
459                process[[1, 0, 0, 1]] = Complex64::new(factor, 0.0);
460                process[[1, 0, 1, 0]] = Complex64::new(-factor, 0.0);
461            }
462            _ => {
463                // Default to identity
464                for i in 0..dim {
465                    process[[i, i, i, i]] = Complex64::new(1.0, 0.0);
466                }
467            }
468        }
469
470        Ok(process)
471    }
472
473    /// Calculate process fidelity between two processes
474    fn calculate_process_fidelity_between(
475        &self,
476        process1: &Array4<Complex64>,
477        process2: &Array4<Complex64>,
478    ) -> DeviceResult<f64> {
479        let dim = process1.dim().0;
480        let mut fidelity = 0.0;
481        let mut norm1 = 0.0;
482        let mut norm2 = 0.0;
483
484        for i in 0..dim {
485            for j in 0..dim {
486                for k in 0..dim {
487                    for l in 0..dim {
488                        let element1 = process1[[i, j, k, l]];
489                        let element2 = process2[[i, j, k, l]];
490
491                        fidelity += (element1.conj() * element2).re;
492                        norm1 += element1.norm_sqr();
493                        norm2 += element2.norm_sqr();
494                    }
495                }
496            }
497        }
498
499        if norm1 > 1e-12 && norm2 > 1e-12 {
500            Ok(fidelity / (norm1 * norm2).sqrt())
501        } else {
502            Ok(0.0)
503        }
504    }
505
506    /// Estimate number of parameters in process matrix
507    fn estimate_num_parameters(&self, process_matrix: &Array4<Complex64>) -> usize {
508        let dim = process_matrix.dim().0;
509        // Complex process matrix has 2 * dim^4 real parameters
510        // But with constraints (trace preservation, etc.), effective parameters are fewer
511        2 * dim * dim * dim * dim - dim * dim // Subtract constraints
512    }
513
514    /// Calculate cross-validation score for specific method
515    fn calculate_cv_score_for_method(
516        &self,
517        method: &str,
518        experimental_data: &ExperimentalData,
519    ) -> DeviceResult<f64> {
520        // Simplified CV score calculation
521        let (process_matrix, quality) = match method {
522            "maximum_likelihood" => self.maximum_likelihood_reconstruction(experimental_data)?,
523            "compressed_sensing" => self.compressed_sensing_reconstruction(experimental_data)?,
524            "bayesian" => self.bayesian_reconstruction(experimental_data)?,
525            "linear_inversion" | _ => self.linear_inversion_reconstruction(experimental_data)?,
526        };
527
528        Ok(quality.log_likelihood)
529    }
530}