1#![allow(dead_code)]
7
8use scirs2_core::ndarray::Array2;
9use serde::{Deserialize, Serialize};
10use std::collections::HashMap;
11use std::time::{Duration, Instant};
12
13pub struct QuantumAdvantageAnalyzer {
15 config: AnalysisConfig,
17 classical_estimator: ClassicalComplexityEstimator,
19 quantum_estimator: QuantumResourceEstimator,
21 benchmarker: QuantumSupremacyBenchmarker,
23}
24
25#[derive(Debug, Clone, Serialize, Deserialize)]
26pub struct AnalysisConfig {
27 pub problem_size_range: (usize, usize),
29 pub num_samples: usize,
31 pub confidence_level: f64,
33 pub classical_baselines: Vec<ClassicalAlgorithm>,
35 pub quantum_algorithms: Vec<QuantumAlgorithm>,
37 pub hardware_models: Vec<HardwareModel>,
39 pub noise_models: Vec<NoiseModel>,
41}
42
43impl Default for AnalysisConfig {
44 fn default() -> Self {
45 Self {
46 problem_size_range: (10, 1000),
47 num_samples: 100,
48 confidence_level: 0.95,
49 classical_baselines: vec![
50 ClassicalAlgorithm::SimulatedAnnealing,
51 ClassicalAlgorithm::TabuSearch,
52 ClassicalAlgorithm::GeneticAlgorithm,
53 ClassicalAlgorithm::BranchAndBound,
54 ],
55 quantum_algorithms: vec![
56 QuantumAlgorithm::QuantumAnnealing,
57 QuantumAlgorithm::QAOA,
58 QuantumAlgorithm::VQE,
59 QuantumAlgorithm::QFAST,
60 ],
61 hardware_models: vec![
62 HardwareModel::IdealQuantum,
63 HardwareModel::NoisyNISQ { error_rate: 0.001 },
64 HardwareModel::DigitalAnnealer,
65 HardwareModel::PhotonicIsing,
66 ],
67 noise_models: vec![
68 NoiseModel::None,
69 NoiseModel::Depolarizing { rate: 0.001 },
70 NoiseModel::AmplitudeDamping { rate: 0.01 },
71 NoiseModel::Realistic {
72 decoherence_time: Duration::from_micros(100),
73 },
74 ],
75 }
76 }
77}
78
79#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]
80pub enum ClassicalAlgorithm {
81 SimulatedAnnealing,
82 TabuSearch,
83 GeneticAlgorithm,
84 BranchAndBound,
85 ConstraintProgramming,
86 LinearProgramming,
87 SDP,
88}
89
90#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]
91pub enum QuantumAlgorithm {
92 QuantumAnnealing,
93 QAOA,
94 VQE,
95 QFAST,
96 QuantumWalk,
97 AdiabaticQuantumComputing,
98}
99
100#[derive(Debug, Clone, Serialize, Deserialize)]
101pub enum HardwareModel {
102 IdealQuantum,
103 NoisyNISQ { error_rate: f64 },
104 DigitalAnnealer,
105 PhotonicIsing,
106 CoherentIsingMachine,
107 SuperconductingAnnealer,
108}
109
110#[derive(Debug, Clone, Serialize, Deserialize)]
111pub enum NoiseModel {
112 None,
113 Depolarizing { rate: f64 },
114 AmplitudeDamping { rate: f64 },
115 PhaseDamping { rate: f64 },
116 Realistic { decoherence_time: Duration },
117}
118
119#[derive(Debug, Clone, Serialize, Deserialize)]
121pub struct AdvantageAnalysisResult {
122 pub problem_info: ProblemCharacteristics,
124 pub classical_performance: HashMap<ClassicalAlgorithm, PerformanceMetrics>,
126 pub quantum_performance: HashMap<QuantumAlgorithm, QuantumPerformanceMetrics>,
128 pub advantage_analysis: AdvantageAnalysis,
130 pub threshold_analysis: ThresholdAnalysis,
132 pub recommendations: Vec<AlgorithmRecommendation>,
134}
135
136#[derive(Debug, Clone, Serialize, Deserialize)]
137pub struct ProblemCharacteristics {
138 pub problem_type: String,
140 pub num_variables: usize,
142 pub density: f64,
144 pub connectivity: ConnectivityStructure,
146 pub hardness_indicators: HardnessIndicators,
148 pub symmetries: Vec<SymmetryType>,
150}
151
152#[derive(Debug, Clone, Serialize, Deserialize)]
153pub enum ConnectivityStructure {
154 FullyConnected,
155 Sparse { avg_degree: f64 },
156 Grid { dimensions: Vec<usize> },
157 Tree,
158 Planar,
159 SmallWorld { clustering_coefficient: f64 },
160}
161
162#[derive(Debug, Clone, Serialize, Deserialize)]
163pub struct HardnessIndicators {
164 pub complexity_class: ComplexityClass,
166 pub difficulty_metrics: HashMap<String, f64>,
168 pub approximation_bounds: Option<(f64, f64)>,
170}
171
172#[derive(Debug, Clone, Serialize, Deserialize)]
173pub enum ComplexityClass {
174 P,
175 NP,
176 NPComplete,
177 NPHard,
178 PSpace,
179 ExpTime,
180}
181
182#[derive(Debug, Clone, Serialize, Deserialize)]
183pub enum SymmetryType {
184 Permutation,
185 Reflection,
186 Rotation,
187 Translation,
188 Scale,
189}
190
191#[derive(Debug, Clone, Serialize, Deserialize)]
192pub struct PerformanceMetrics {
193 pub time_complexity: TimeComplexity,
195 pub space_complexity: SpaceComplexity,
197 pub solution_quality: QualityMetrics,
199 pub success_probability: f64,
201 pub scaling_behavior: ScalingBehavior,
203}
204
205#[derive(Debug, Clone, Serialize, Deserialize)]
206pub struct QuantumPerformanceMetrics {
207 pub base_metrics: PerformanceMetrics,
209 pub quantum_metrics: QuantumSpecificMetrics,
211 pub hardware_requirements: HardwareRequirements,
213 pub noise_sensitivity: NoiseSensitivity,
215}
216
217#[derive(Debug, Clone, Serialize, Deserialize)]
218pub struct QuantumSpecificMetrics {
219 pub qubits_required: usize,
221 pub circuit_depth: usize,
223 pub quantum_operations: usize,
225 pub entanglement_measures: EntanglementMeasures,
227 pub coherence_time_required: Duration,
229}
230
231#[derive(Debug, Clone, Serialize, Deserialize)]
232pub struct HardwareRequirements {
233 pub min_qubits: usize,
235 pub connectivity_requirements: ConnectivityRequirement,
237 pub gate_fidelity_threshold: f64,
239 pub measurement_fidelity_threshold: f64,
241 pub temperature_requirements: Option<f64>,
243}
244
245#[derive(Debug, Clone, Serialize, Deserialize)]
246pub enum ConnectivityRequirement {
247 AllToAll,
248 NearestNeighbor,
249 Specific { required_edges: Vec<(usize, usize)> },
250 MinDegree { min_degree: usize },
251}
252
253#[derive(Debug, Clone, Serialize, Deserialize)]
254pub struct NoiseSensitivity {
255 pub error_thresholds: HashMap<String, f64>,
257 pub degradation_rates: HashMap<String, f64>,
259 pub mitigation_effectiveness: HashMap<String, f64>,
261}
262
263#[derive(Debug, Clone, Serialize, Deserialize)]
264pub struct EntanglementMeasures {
265 pub avg_entanglement_entropy: f64,
267 pub max_entanglement_entropy: f64,
269 pub entanglement_depth: usize,
271 pub multipartite_measures: HashMap<String, f64>,
273}
274
275#[derive(Debug, Clone, Serialize, Deserialize)]
276pub struct TimeComplexity {
277 pub best_case: ComplexityFunction,
279 pub average_case: ComplexityFunction,
281 pub worst_case: ComplexityFunction,
283}
284
285#[derive(Debug, Clone, Serialize, Deserialize)]
286pub struct SpaceComplexity {
287 pub memory_complexity: ComplexityFunction,
289 pub storage_complexity: ComplexityFunction,
291}
292
293#[derive(Debug, Clone, Serialize, Deserialize)]
294pub enum ComplexityFunction {
295 Constant,
296 Logarithmic,
297 Linear,
298 Linearithmic,
299 Quadratic,
300 Cubic,
301 Polynomial { degree: f64 },
302 Exponential { base: f64 },
303 Factorial,
304}
305
306#[derive(Debug, Clone, Serialize, Deserialize)]
307pub struct QualityMetrics {
308 pub approximation_ratio: f64,
310 pub solution_variance: f64,
312 pub optimal_probability: f64,
314 pub energy_gap: f64,
316}
317
318#[derive(Debug, Clone, Serialize, Deserialize)]
319pub struct ScalingBehavior {
320 pub scaling_exponent: f64,
322 pub scaling_constant: f64,
324 pub confidence_interval: (f64, f64),
326 pub fit_quality: f64,
328}
329
330#[derive(Debug, Clone, Serialize, Deserialize)]
331pub struct AdvantageAnalysis {
332 pub has_quantum_advantage: bool,
334 pub confidence: f64,
336 pub advantage_factors: HashMap<String, f64>,
338 pub conditional_advantages: Vec<ConditionalAdvantage>,
340 pub break_even_points: HashMap<String, f64>,
342}
343
344#[derive(Debug, Clone, Serialize, Deserialize)]
345pub struct ConditionalAdvantage {
346 pub conditions: Vec<String>,
348 pub advantage_magnitude: f64,
350 pub condition_probability: f64,
352}
353
354#[derive(Debug, Clone, Serialize, Deserialize)]
355pub struct ThresholdAnalysis {
356 pub size_thresholds: HashMap<String, usize>,
358 pub noise_thresholds: HashMap<String, f64>,
360 pub hardware_thresholds: HashMap<String, f64>,
362 pub time_to_advantage: HashMap<String, Duration>,
364}
365
366#[derive(Debug, Clone, Serialize, Deserialize)]
367pub struct AlgorithmRecommendation {
368 pub algorithm: String,
370 pub algorithm_type: AlgorithmType,
372 pub confidence: f64,
374 pub expected_performance: PerformanceMetrics,
376 pub justification: String,
378 pub alternatives: Vec<String>,
380}
381
382#[derive(Debug, Clone, Serialize, Deserialize)]
383pub enum AlgorithmType {
384 Classical,
385 Quantum,
386 Hybrid,
387}
388
389pub struct ClassicalComplexityEstimator {
391 complexity_database: HashMap<String, ComplexityInfo>,
393 heuristic_estimators: Vec<Box<dyn ComplexityHeuristic>>,
395}
396
397pub trait ComplexityHeuristic {
398 fn estimate_complexity(&self, problem: &ProblemCharacteristics) -> PerformanceMetrics;
399 fn algorithm_name(&self) -> &str;
400}
401
402#[derive(Debug, Clone)]
403pub struct ComplexityInfo {
404 pub time_complexity: TimeComplexity,
405 pub space_complexity: SpaceComplexity,
406 pub approximation_bounds: Option<(f64, f64)>,
407 pub practical_scaling: ScalingBehavior,
408}
409
410pub struct QuantumResourceEstimator {
412 algorithm_database: HashMap<String, QuantumAlgorithmInfo>,
414 hardware_database: HashMap<String, HardwareCharacteristics>,
416}
417
418#[derive(Debug, Clone)]
419pub struct QuantumAlgorithmInfo {
420 pub resource_requirements: HardwareRequirements,
421 pub performance_model: QuantumPerformanceModel,
422 pub noise_sensitivity: NoiseSensitivity,
423 pub theoretical_advantage: Option<AdvantageEstimate>,
424}
425
426pub struct QuantumPerformanceModel {
427 pub time_to_solution: Box<dyn Fn(usize) -> Duration>,
428 pub success_probability: Box<dyn Fn(usize, f64) -> f64>,
429 pub resource_scaling: ScalingBehavior,
430}
431
432impl std::fmt::Debug for QuantumPerformanceModel {
433 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
434 f.debug_struct("QuantumPerformanceModel")
435 .field("time_to_solution", &"<function>")
436 .field("success_probability", &"<function>")
437 .field("resource_scaling", &self.resource_scaling)
438 .finish()
439 }
440}
441
442impl Clone for QuantumPerformanceModel {
443 fn clone(&self) -> Self {
444 Self {
445 time_to_solution: Box::new(|_| Duration::from_secs(1)),
446 success_probability: Box::new(|_, _| 0.5),
447 resource_scaling: self.resource_scaling.clone(),
448 }
449 }
450}
451
452#[derive(Debug, Clone)]
453pub struct AdvantageEstimate {
454 pub speedup_factor: f64,
455 pub advantage_type: AdvantageType,
456 pub confidence_level: f64,
457 pub conditions: Vec<String>,
458}
459
460#[derive(Debug, Clone)]
461pub enum AdvantageType {
462 Polynomial { degree_reduction: f64 },
463 Exponential { base_improvement: f64 },
464 Constant { factor: f64 },
465 Conditional { conditions: Vec<String> },
466}
467
468#[derive(Debug, Clone)]
469pub struct HardwareCharacteristics {
470 pub qubit_count: usize,
471 pub connectivity: ConnectivityStructure,
472 pub gate_fidelities: HashMap<String, f64>,
473 pub measurement_fidelity: f64,
474 pub coherence_times: HashMap<String, Duration>,
475 pub operating_parameters: HashMap<String, f64>,
476}
477
478pub struct QuantumSupremacyBenchmarker {
480 problem_generators: Vec<Box<dyn BenchmarkProblemGenerator>>,
482 performance_trackers: HashMap<String, PerformanceTracker>,
484}
485
486pub trait BenchmarkProblemGenerator {
487 fn generate_problem(&self, size: usize, seed: Option<u64>) -> BenchmarkProblem;
488 fn problem_type(&self) -> &str;
489 fn difficulty_level(&self) -> DifficultyLevel;
490}
491
492#[derive(Debug, Clone)]
493pub struct BenchmarkProblem {
494 pub qubo: Array2<f64>,
495 pub metadata: ProblemMetadata,
496 pub known_optimal: Option<f64>,
497 pub theoretical_properties: TheoreticalProperties,
498}
499
500#[derive(Debug, Clone)]
501pub struct ProblemMetadata {
502 pub problem_type: String,
503 pub size: usize,
504 pub density: f64,
505 pub generation_seed: Option<u64>,
506 pub generation_time: Instant,
507}
508
509#[derive(Debug, Clone)]
510pub struct TheoreticalProperties {
511 pub complexity_class: ComplexityClass,
512 pub approximation_hardness: Option<f64>,
513 pub spectral_gap: Option<f64>,
514 pub landscape_features: LandscapeFeatures,
515}
516
517#[derive(Debug, Clone)]
518pub struct LandscapeFeatures {
519 pub num_local_minima: Option<usize>,
520 pub barrier_heights: Vec<f64>,
521 pub correlation_length: f64,
522 pub ruggedness_measure: f64,
523}
524
525#[derive(Debug, Clone)]
526pub enum DifficultyLevel {
527 Easy,
528 Medium,
529 Hard,
530 ExtremelyHard,
531}
532
533#[derive(Debug, Clone)]
534pub struct PerformanceTracker {
535 pub algorithm_name: String,
536 pub performance_history: Vec<PerformanceDataPoint>,
537 pub scaling_model: Option<ScalingBehavior>,
538}
539
540#[derive(Debug, Clone)]
541pub struct PerformanceDataPoint {
542 pub problem_size: usize,
543 pub execution_time: Duration,
544 pub solution_quality: f64,
545 pub success_rate: f64,
546 pub timestamp: Instant,
547}
548
549impl QuantumAdvantageAnalyzer {
550 pub fn new(config: AnalysisConfig) -> Self {
552 Self {
553 config,
554 classical_estimator: ClassicalComplexityEstimator::new(),
555 quantum_estimator: QuantumResourceEstimator::new(),
556 benchmarker: QuantumSupremacyBenchmarker::new(),
557 }
558 }
559
560 pub fn analyze_advantage(
562 &self,
563 qubo: &Array2<f64>,
564 problem_metadata: Option<ProblemMetadata>,
565 ) -> Result<AdvantageAnalysisResult, String> {
566 let problem_chars = self.characterize_problem(qubo, problem_metadata)?;
568
569 let classical_performance = self
571 .classical_estimator
572 .estimate_classical_performance(&problem_chars, &self.config.classical_baselines)?;
573
574 let quantum_performance = self
576 .quantum_estimator
577 .estimate_quantum_performance(&problem_chars, &self.config.quantum_algorithms)?;
578
579 let advantage_analysis = self.analyze_quantum_advantage(
581 &classical_performance,
582 &quantum_performance,
583 &problem_chars,
584 )?;
585
586 let threshold_analysis =
588 self.analyze_thresholds(&classical_performance, &quantum_performance, &problem_chars)?;
589
590 let recommendations = self.generate_recommendations(
592 &classical_performance,
593 &quantum_performance,
594 &advantage_analysis,
595 &problem_chars,
596 )?;
597
598 Ok(AdvantageAnalysisResult {
599 problem_info: problem_chars,
600 classical_performance,
601 quantum_performance,
602 advantage_analysis,
603 threshold_analysis,
604 recommendations,
605 })
606 }
607
608 fn characterize_problem(
610 &self,
611 qubo: &Array2<f64>,
612 metadata: Option<ProblemMetadata>,
613 ) -> Result<ProblemCharacteristics, String> {
614 let num_variables = qubo.shape()[0];
615
616 let non_zero_count = qubo.iter().filter(|&&x| x.abs() > 1e-10).count();
618 let density = non_zero_count as f64 / (num_variables * num_variables) as f64;
619
620 let connectivity = self.analyze_connectivity(qubo)?;
622
623 let hardness_indicators = self.compute_hardness_indicators(qubo)?;
625
626 let symmetries = self.detect_symmetries(qubo)?;
628
629 Ok(ProblemCharacteristics {
630 problem_type: metadata
631 .as_ref()
632 .map_or_else(|| "Unknown".to_string(), |m| m.problem_type.clone()),
633 num_variables,
634 density,
635 connectivity,
636 hardness_indicators,
637 symmetries,
638 })
639 }
640
641 fn analyze_connectivity(&self, qubo: &Array2<f64>) -> Result<ConnectivityStructure, String> {
643 let n = qubo.shape()[0];
644 let mut edge_count = 0;
645 let mut degree_sum = 0;
646
647 for i in 0..n {
648 let mut degree = 0;
649 for j in 0..n {
650 if i != j && qubo[[i, j]].abs() > 1e-10 {
651 if i < j {
652 edge_count += 1;
653 }
654 degree += 1;
655 }
656 }
657 degree_sum += degree;
658 }
659
660 let avg_degree = degree_sum as f64 / n as f64;
661
662 if edge_count == n * (n - 1) / 2 {
664 Ok(ConnectivityStructure::FullyConnected)
665 } else if avg_degree < n as f64 / 4.0 {
666 Ok(ConnectivityStructure::Sparse { avg_degree })
667 } else {
668 let expected_grid_degree = 4.0; if (avg_degree - expected_grid_degree).abs() < 1.0 {
671 Ok(ConnectivityStructure::Grid {
672 dimensions: vec![(n as f64).sqrt() as usize, (n as f64).sqrt() as usize],
673 })
674 } else {
675 Ok(ConnectivityStructure::Sparse { avg_degree })
676 }
677 }
678 }
679
680 fn compute_hardness_indicators(
682 &self,
683 qubo: &Array2<f64>,
684 ) -> Result<HardnessIndicators, String> {
685 let n = qubo.shape()[0];
686
687 let complexity_class = if n < 100 {
689 ComplexityClass::P
690 } else {
691 ComplexityClass::NPComplete };
693
694 let mut difficulty_metrics = HashMap::new();
696
697 let coeffs: Vec<f64> = qubo.iter().copied().collect();
699 let mean = coeffs.iter().sum::<f64>() / coeffs.len() as f64;
700 let variance = coeffs.iter().map(|x| (x - mean).powi(2)).sum::<f64>() / coeffs.len() as f64;
701 difficulty_metrics.insert("coefficient_variance".to_string(), variance);
702
703 let max_coeff = coeffs.iter().map(|x| x.abs()).fold(0.0, f64::max);
705 let min_coeff = coeffs
706 .iter()
707 .map(|x| x.abs())
708 .filter(|&x| x > 1e-10)
709 .fold(f64::INFINITY, f64::min);
710 if min_coeff.is_finite() {
711 difficulty_metrics.insert("dynamic_range".to_string(), max_coeff / min_coeff);
712 }
713
714 let mut frustration = 0.0;
716 for i in 0..n {
717 for j in i + 1..n {
718 if qubo[[i, j]] > 0.0 {
719 frustration += qubo[[i, j]];
720 }
721 }
722 }
723 difficulty_metrics.insert("frustration_measure".to_string(), frustration);
724
725 Ok(HardnessIndicators {
726 complexity_class,
727 difficulty_metrics,
728 approximation_bounds: None, })
730 }
731
732 fn detect_symmetries(&self, qubo: &Array2<f64>) -> Result<Vec<SymmetryType>, String> {
734 let mut symmetries = Vec::new();
735 let n = qubo.shape()[0];
736
737 let mut is_symmetric = true;
739 for i in 0..n {
740 for j in 0..n {
741 if (qubo[[i, j]] - qubo[[j, i]]).abs() > 1e-10 {
742 is_symmetric = false;
743 break;
744 }
745 }
746 if !is_symmetric {
747 break;
748 }
749 }
750
751 if is_symmetric {
752 symmetries.push(SymmetryType::Permutation);
753 }
754
755 Ok(symmetries)
758 }
759
760 fn analyze_quantum_advantage(
762 &self,
763 classical_perf: &HashMap<ClassicalAlgorithm, PerformanceMetrics>,
764 quantum_perf: &HashMap<QuantumAlgorithm, QuantumPerformanceMetrics>,
765 _problem_chars: &ProblemCharacteristics,
766 ) -> Result<AdvantageAnalysis, String> {
767 let mut advantage_factors = HashMap::new();
768 let mut conditional_advantages = Vec::new();
769 let break_even_points = HashMap::new();
770
771 let best_classical = classical_perf.values().min_by(|a, b| {
773 a.time_complexity
774 .average_case
775 .partial_cmp(&b.time_complexity.average_case)
776 .unwrap_or(std::cmp::Ordering::Equal)
777 });
778
779 let best_quantum = quantum_perf.values().min_by(|a, b| {
781 a.base_metrics
782 .time_complexity
783 .average_case
784 .partial_cmp(&b.base_metrics.time_complexity.average_case)
785 .unwrap_or(std::cmp::Ordering::Equal)
786 });
787
788 let has_quantum_advantage = if let (Some(classical), Some(quantum)) =
789 (best_classical, best_quantum)
790 {
791 let classical_quality = classical.solution_quality.approximation_ratio;
793 let quantum_quality = quantum.base_metrics.solution_quality.approximation_ratio;
794
795 advantage_factors.insert("quality".to_string(), quantum_quality / classical_quality);
796
797 let classical_success = classical.success_probability;
798 let quantum_success = quantum.base_metrics.success_probability;
799
800 advantage_factors.insert(
801 "success_rate".to_string(),
802 quantum_success / classical_success,
803 );
804
805 if quantum
807 .noise_sensitivity
808 .error_thresholds
809 .values()
810 .any(|&x| x < 0.01)
811 {
812 conditional_advantages.push(ConditionalAdvantage {
813 conditions: vec!["Low noise environment".to_string()],
814 advantage_magnitude: 2.0,
815 condition_probability: 0.3,
816 });
817 }
818
819 quantum_quality > classical_quality || quantum_success > classical_success
820 } else {
821 false
822 };
823
824 let confidence = if has_quantum_advantage {
826 0.7 } else {
828 0.3
829 };
830
831 Ok(AdvantageAnalysis {
832 has_quantum_advantage,
833 confidence,
834 advantage_factors,
835 conditional_advantages,
836 break_even_points,
837 })
838 }
839
840 fn analyze_thresholds(
842 &self,
843 _classical_perf: &HashMap<ClassicalAlgorithm, PerformanceMetrics>,
844 quantum_perf: &HashMap<QuantumAlgorithm, QuantumPerformanceMetrics>,
845 problem_chars: &ProblemCharacteristics,
846 ) -> Result<ThresholdAnalysis, String> {
847 let mut size_thresholds = HashMap::new();
848 let mut noise_thresholds = HashMap::new();
849 let mut hardware_thresholds = HashMap::new();
850 let mut time_to_advantage = HashMap::new();
851
852 let estimated_threshold = if problem_chars.num_variables < 100 {
854 500 } else {
856 100 };
858
859 size_thresholds.insert("general".to_string(), estimated_threshold);
860
861 for (alg, perf) in quantum_perf {
863 if let Some(min_threshold) = perf
864 .noise_sensitivity
865 .error_thresholds
866 .values()
867 .min_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
868 {
869 noise_thresholds.insert(format!("{alg:?}"), *min_threshold);
870 }
871 }
872
873 for (alg, perf) in quantum_perf {
875 hardware_thresholds.insert(
876 format!("{alg:?}_qubits"),
877 perf.hardware_requirements.min_qubits as f64,
878 );
879 hardware_thresholds.insert(
880 format!("{alg:?}_fidelity"),
881 perf.hardware_requirements.gate_fidelity_threshold,
882 );
883 }
884
885 time_to_advantage.insert(
887 "conservative".to_string(),
888 Duration::from_secs(365 * 24 * 3600 * 5),
889 ); time_to_advantage.insert(
891 "optimistic".to_string(),
892 Duration::from_secs(365 * 24 * 3600 * 2),
893 ); Ok(ThresholdAnalysis {
896 size_thresholds,
897 noise_thresholds,
898 hardware_thresholds,
899 time_to_advantage,
900 })
901 }
902
903 fn generate_recommendations(
905 &self,
906 classical_perf: &HashMap<ClassicalAlgorithm, PerformanceMetrics>,
907 quantum_perf: &HashMap<QuantumAlgorithm, QuantumPerformanceMetrics>,
908 advantage_analysis: &AdvantageAnalysis,
909 problem_chars: &ProblemCharacteristics,
910 ) -> Result<Vec<AlgorithmRecommendation>, String> {
911 let mut recommendations = Vec::new();
912
913 if let Some((best_classical_alg, best_classical_perf)) =
915 classical_perf.iter().max_by(|a, b| {
916 a.1.solution_quality
917 .approximation_ratio
918 .partial_cmp(&b.1.solution_quality.approximation_ratio)
919 .unwrap_or(std::cmp::Ordering::Equal)
920 })
921 {
922 recommendations.push(AlgorithmRecommendation {
923 algorithm: format!("{best_classical_alg:?}"),
924 algorithm_type: AlgorithmType::Classical,
925 confidence: 0.8,
926 expected_performance: best_classical_perf.clone(),
927 justification: "Best performing classical algorithm for this problem type"
928 .to_string(),
929 alternatives: classical_perf.keys().map(|k| format!("{k:?}")).collect(),
930 });
931 }
932
933 if advantage_analysis.has_quantum_advantage {
935 if let Some((best_quantum_alg, best_quantum_perf)) =
936 quantum_perf.iter().max_by(|a, b| {
937 a.1.base_metrics
938 .solution_quality
939 .approximation_ratio
940 .partial_cmp(&b.1.base_metrics.solution_quality.approximation_ratio)
941 .unwrap_or(std::cmp::Ordering::Equal)
942 })
943 {
944 recommendations.push(AlgorithmRecommendation {
945 algorithm: format!("{best_quantum_alg:?}"),
946 algorithm_type: AlgorithmType::Quantum,
947 confidence: advantage_analysis.confidence,
948 expected_performance: best_quantum_perf.base_metrics.clone(),
949 justification: "Quantum advantage detected for this problem".to_string(),
950 alternatives: quantum_perf.keys().map(|k| format!("{k:?}")).collect(),
951 });
952 }
953 }
954
955 if problem_chars.num_variables > 1000 {
957 recommendations.push(AlgorithmRecommendation {
958 algorithm: "Hybrid Quantum-Classical".to_string(),
959 algorithm_type: AlgorithmType::Hybrid,
960 confidence: 0.6,
961 expected_performance: PerformanceMetrics {
962 time_complexity: TimeComplexity {
963 best_case: ComplexityFunction::Linear,
964 average_case: ComplexityFunction::Quadratic,
965 worst_case: ComplexityFunction::Cubic,
966 },
967 space_complexity: SpaceComplexity {
968 memory_complexity: ComplexityFunction::Linear,
969 storage_complexity: ComplexityFunction::Linear,
970 },
971 solution_quality: QualityMetrics {
972 approximation_ratio: 0.9,
973 solution_variance: 0.1,
974 optimal_probability: 0.1,
975 energy_gap: 0.1,
976 },
977 success_probability: 0.8,
978 scaling_behavior: ScalingBehavior {
979 scaling_exponent: 1.5,
980 scaling_constant: 1.0,
981 confidence_interval: (1.2, 1.8),
982 fit_quality: 0.85,
983 },
984 },
985 justification: "Hybrid approaches often perform well for large-scale problems"
986 .to_string(),
987 alternatives: vec!["Pure classical".to_string(), "Pure quantum".to_string()],
988 });
989 }
990
991 Ok(recommendations)
992 }
993
994 pub fn run_supremacy_benchmark(&mut self) -> Result<SupremacyBenchmarkResult, String> {
996 self.benchmarker.run_comprehensive_benchmark(&self.config)
997 }
998
999 pub fn export_results(
1001 &self,
1002 results: &AdvantageAnalysisResult,
1003 format: ExportFormat,
1004 ) -> Result<String, String> {
1005 match format {
1006 ExportFormat::JSON => serde_json::to_string_pretty(results)
1007 .map_err(|e| format!("JSON export failed: {e}")),
1008 ExportFormat::Python => {
1009 Ok(self.generate_python_analysis_code(results))
1011 }
1012 ExportFormat::Rust => {
1013 Ok(self.generate_rust_analysis_code(results))
1015 }
1016 ExportFormat::QUBO => {
1017 Err("QUBO export not applicable for analysis results".to_string())
1018 }
1019 }
1020 }
1021
1022 fn generate_python_analysis_code(&self, results: &AdvantageAnalysisResult) -> String {
1023 format!(
1024 r#"
1025# Quantum Advantage Analysis Results
1026# Generated by QuantRS2-Tytan
1027
1028import numpy as np
1029import matplotlib.pyplot as plt
1030
1031# Problem characteristics
1032problem_size = {}
1033problem_density = {:.3}
1034has_quantum_advantage = {}
1035confidence = {:.3}
1036
1037# Visualization code
1038plt.figure(figsize=(12, 8))
1039plt.subplot(2, 2, 1)
1040plt.title('Quantum vs Classical Performance')
1041# Add your performance comparison plots here
1042
1043plt.subplot(2, 2, 2)
1044plt.title('Advantage Factors')
1045# Add advantage factor visualization here
1046
1047plt.tight_layout()
1048plt.show()
1049
1050print(f"Problem size: {{problem_size}}")
1051print(f"Quantum advantage detected: {{has_quantum_advantage}}")
1052print(f"Confidence level: {{confidence:.1%}}")
1053"#,
1054 results.problem_info.num_variables,
1055 results.problem_info.density,
1056 results.advantage_analysis.has_quantum_advantage,
1057 results.advantage_analysis.confidence,
1058 )
1059 }
1060
1061 fn generate_rust_analysis_code(&self, results: &AdvantageAnalysisResult) -> String {
1062 format!(
1063 r#"
1064// Quantum Advantage Analysis Results
1065// Generated by QuantRS2-Tytan
1066
1067use quantrs2_tytan::quantum_advantage_analysis::*;
1068
1069fn reproduce_analysis() {{
1070 let problem_size = {};
1071 let has_advantage = {};
1072 let confidence = {:.3};
1073
1074 println!("Problem size: {{}}", problem_size);
1075 println!("Quantum advantage: {{}}", has_advantage);
1076 println!("Confidence: {{:.1%}}", confidence);
1077
1078 // Add your analysis reproduction code here
1079}}
1080"#,
1081 results.problem_info.num_variables,
1082 results.advantage_analysis.has_quantum_advantage,
1083 results.advantage_analysis.confidence,
1084 )
1085 }
1086}
1087
1088impl Default for ClassicalComplexityEstimator {
1089 fn default() -> Self {
1090 Self::new()
1091 }
1092}
1093
1094impl ClassicalComplexityEstimator {
1095 pub fn new() -> Self {
1096 Self {
1097 complexity_database: Self::build_complexity_database(),
1098 heuristic_estimators: vec![],
1099 }
1100 }
1101
1102 fn build_complexity_database() -> HashMap<String, ComplexityInfo> {
1103 let mut db = HashMap::new();
1104
1105 db.insert(
1107 "max_cut".to_string(),
1108 ComplexityInfo {
1109 time_complexity: TimeComplexity {
1110 best_case: ComplexityFunction::Exponential { base: 2.0 },
1111 average_case: ComplexityFunction::Exponential { base: 2.0 },
1112 worst_case: ComplexityFunction::Exponential { base: 2.0 },
1113 },
1114 space_complexity: SpaceComplexity {
1115 memory_complexity: ComplexityFunction::Linear,
1116 storage_complexity: ComplexityFunction::Linear,
1117 },
1118 approximation_bounds: Some((0.878, 1.0)),
1119 practical_scaling: ScalingBehavior {
1120 scaling_exponent: 1.8,
1121 scaling_constant: 10.0,
1122 confidence_interval: (1.6, 2.0),
1123 fit_quality: 0.9,
1124 },
1125 },
1126 );
1127
1128 db
1129 }
1130
1131 pub fn estimate_classical_performance(
1132 &self,
1133 problem: &ProblemCharacteristics,
1134 algorithms: &[ClassicalAlgorithm],
1135 ) -> Result<HashMap<ClassicalAlgorithm, PerformanceMetrics>, String> {
1136 let mut results = HashMap::new();
1137
1138 for algorithm in algorithms {
1139 let performance = self.estimate_algorithm_performance(algorithm, problem)?;
1140 results.insert(algorithm.clone(), performance);
1141 }
1142
1143 Ok(results)
1144 }
1145
1146 fn estimate_algorithm_performance(
1147 &self,
1148 algorithm: &ClassicalAlgorithm,
1149 problem: &ProblemCharacteristics,
1150 ) -> Result<PerformanceMetrics, String> {
1151 let base_complexity = match algorithm {
1153 ClassicalAlgorithm::SimulatedAnnealing => {
1154 ComplexityFunction::Polynomial { degree: 2.0 }
1155 }
1156 ClassicalAlgorithm::TabuSearch => ComplexityFunction::Polynomial { degree: 2.5 },
1157 ClassicalAlgorithm::GeneticAlgorithm => ComplexityFunction::Polynomial { degree: 3.0 },
1158 ClassicalAlgorithm::BranchAndBound => ComplexityFunction::Exponential { base: 2.0 },
1159 ClassicalAlgorithm::ConstraintProgramming => {
1160 ComplexityFunction::Exponential { base: 1.5 }
1161 }
1162 ClassicalAlgorithm::LinearProgramming => ComplexityFunction::Polynomial { degree: 3.5 },
1163 ClassicalAlgorithm::SDP => ComplexityFunction::Polynomial { degree: 4.0 },
1164 };
1165
1166 let scaling_factor = match problem.connectivity {
1168 ConnectivityStructure::FullyConnected => 1.5,
1169 ConnectivityStructure::Sparse { .. } => 0.8,
1170 ConnectivityStructure::Grid { .. } => 0.9,
1171 ConnectivityStructure::Tree => 0.6,
1172 ConnectivityStructure::Planar => 0.7,
1173 ConnectivityStructure::SmallWorld { .. } => 1.1,
1174 };
1175
1176 let approximation_ratio = match algorithm {
1177 ClassicalAlgorithm::SimulatedAnnealing => 0.85,
1178 ClassicalAlgorithm::TabuSearch => 0.9,
1179 ClassicalAlgorithm::GeneticAlgorithm => 0.8,
1180 ClassicalAlgorithm::BranchAndBound => 1.0, ClassicalAlgorithm::ConstraintProgramming => 0.95,
1182 ClassicalAlgorithm::LinearProgramming => 0.99, ClassicalAlgorithm::SDP => 0.878, };
1185
1186 Ok(PerformanceMetrics {
1187 time_complexity: TimeComplexity {
1188 best_case: base_complexity.clone(),
1189 average_case: base_complexity.clone(),
1190 worst_case: base_complexity,
1191 },
1192 space_complexity: SpaceComplexity {
1193 memory_complexity: ComplexityFunction::Linear,
1194 storage_complexity: ComplexityFunction::Linear,
1195 },
1196 solution_quality: QualityMetrics {
1197 approximation_ratio,
1198 solution_variance: 0.1,
1199 optimal_probability: if matches!(algorithm, ClassicalAlgorithm::BranchAndBound) {
1200 1.0
1201 } else {
1202 0.1
1203 },
1204 energy_gap: 0.05,
1205 },
1206 success_probability: 0.9,
1207 scaling_behavior: ScalingBehavior {
1208 scaling_exponent: scaling_factor,
1209 scaling_constant: 1.0,
1210 confidence_interval: (scaling_factor * 0.8, scaling_factor * 1.2),
1211 fit_quality: 0.8,
1212 },
1213 })
1214 }
1215}
1216
1217impl Default for QuantumResourceEstimator {
1218 fn default() -> Self {
1219 Self::new()
1220 }
1221}
1222
1223impl QuantumResourceEstimator {
1224 pub fn new() -> Self {
1225 Self {
1226 algorithm_database: Self::build_algorithm_database(),
1227 hardware_database: Self::build_hardware_database(),
1228 }
1229 }
1230
1231 fn build_algorithm_database() -> HashMap<String, QuantumAlgorithmInfo> {
1232 let mut db = HashMap::new();
1233
1234 db.insert(
1236 "QAOA".to_string(),
1237 QuantumAlgorithmInfo {
1238 resource_requirements: HardwareRequirements {
1239 min_qubits: 10,
1240 connectivity_requirements: ConnectivityRequirement::AllToAll,
1241 gate_fidelity_threshold: 0.99,
1242 measurement_fidelity_threshold: 0.95,
1243 temperature_requirements: Some(0.01), },
1245 performance_model: QuantumPerformanceModel {
1246 time_to_solution: Box::new(|n| {
1247 Duration::from_millis((n as f64 * n as f64 * 0.1) as u64)
1248 }),
1249 success_probability: Box::new(|n, error_rate| {
1250 (1.0 - error_rate).powf(n as f64)
1251 }),
1252 resource_scaling: ScalingBehavior {
1253 scaling_exponent: 1.0,
1254 scaling_constant: 1.0,
1255 confidence_interval: (0.8, 1.2),
1256 fit_quality: 0.85,
1257 },
1258 },
1259 noise_sensitivity: NoiseSensitivity {
1260 error_thresholds: {
1261 let mut map = HashMap::new();
1262 map.insert("gate_error".to_string(), 0.001);
1263 map.insert("measurement_error".to_string(), 0.01);
1264 map
1265 },
1266 degradation_rates: HashMap::new(),
1267 mitigation_effectiveness: HashMap::new(),
1268 },
1269 theoretical_advantage: Some(AdvantageEstimate {
1270 speedup_factor: 1.5,
1271 advantage_type: AdvantageType::Polynomial {
1272 degree_reduction: 0.5,
1273 },
1274 confidence_level: 0.7,
1275 conditions: vec![
1276 "Low noise".to_string(),
1277 "Sufficient circuit depth".to_string(),
1278 ],
1279 }),
1280 },
1281 );
1282
1283 db
1284 }
1285
1286 fn build_hardware_database() -> HashMap<String, HardwareCharacteristics> {
1287 let mut db = HashMap::new();
1288
1289 db.insert(
1291 "ideal_quantum".to_string(),
1292 HardwareCharacteristics {
1293 qubit_count: 10000,
1294 connectivity: ConnectivityStructure::FullyConnected,
1295 gate_fidelities: {
1296 let mut map = HashMap::new();
1297 map.insert("single_qubit".to_string(), 1.0);
1298 map.insert("two_qubit".to_string(), 1.0);
1299 map
1300 },
1301 measurement_fidelity: 1.0,
1302 coherence_times: {
1303 let mut map = HashMap::new();
1304 map.insert("T1".to_string(), Duration::from_secs(1000));
1305 map.insert("T2".to_string(), Duration::from_secs(1000));
1306 map
1307 },
1308 operating_parameters: HashMap::new(),
1309 },
1310 );
1311
1312 db
1313 }
1314
1315 pub fn estimate_quantum_performance(
1316 &self,
1317 problem: &ProblemCharacteristics,
1318 algorithms: &[QuantumAlgorithm],
1319 ) -> Result<HashMap<QuantumAlgorithm, QuantumPerformanceMetrics>, String> {
1320 let mut results = HashMap::new();
1321
1322 for algorithm in algorithms {
1323 let performance = self.estimate_quantum_algorithm_performance(algorithm, problem)?;
1324 results.insert(algorithm.clone(), performance);
1325 }
1326
1327 Ok(results)
1328 }
1329
1330 fn estimate_quantum_algorithm_performance(
1331 &self,
1332 algorithm: &QuantumAlgorithm,
1333 problem: &ProblemCharacteristics,
1334 ) -> Result<QuantumPerformanceMetrics, String> {
1335 let qubits_required = match algorithm {
1337 QuantumAlgorithm::QuantumAnnealing => problem.num_variables,
1338 QuantumAlgorithm::QAOA => problem.num_variables,
1339 QuantumAlgorithm::VQE => problem.num_variables,
1340 QuantumAlgorithm::QFAST => problem.num_variables + 10, QuantumAlgorithm::QuantumWalk => (problem.num_variables as f64).log2().ceil() as usize,
1342 QuantumAlgorithm::AdiabaticQuantumComputing => problem.num_variables,
1343 };
1344
1345 let circuit_depth = match algorithm {
1346 QuantumAlgorithm::QuantumAnnealing => 1, QuantumAlgorithm::QAOA => 10, QuantumAlgorithm::VQE => 50,
1349 QuantumAlgorithm::QFAST => 100,
1350 QuantumAlgorithm::QuantumWalk => 20,
1351 QuantumAlgorithm::AdiabaticQuantumComputing => 1,
1352 };
1353
1354 let base_metrics = PerformanceMetrics {
1356 time_complexity: TimeComplexity {
1357 best_case: ComplexityFunction::Linear,
1358 average_case: ComplexityFunction::Linearithmic,
1359 worst_case: ComplexityFunction::Quadratic,
1360 },
1361 space_complexity: SpaceComplexity {
1362 memory_complexity: ComplexityFunction::Linear,
1363 storage_complexity: ComplexityFunction::Linear,
1364 },
1365 solution_quality: QualityMetrics {
1366 approximation_ratio: 0.95, solution_variance: 0.05,
1368 optimal_probability: 0.3,
1369 energy_gap: 0.1,
1370 },
1371 success_probability: 0.8,
1372 scaling_behavior: ScalingBehavior {
1373 scaling_exponent: 1.2,
1374 scaling_constant: 0.5,
1375 confidence_interval: (1.0, 1.4),
1376 fit_quality: 0.75,
1377 },
1378 };
1379
1380 let quantum_metrics = QuantumSpecificMetrics {
1382 qubits_required,
1383 circuit_depth,
1384 quantum_operations: circuit_depth * qubits_required,
1385 entanglement_measures: EntanglementMeasures {
1386 avg_entanglement_entropy: 2.0,
1387 max_entanglement_entropy: (qubits_required as f64).log2(),
1388 entanglement_depth: circuit_depth / 5,
1389 multipartite_measures: HashMap::new(),
1390 },
1391 coherence_time_required: Duration::from_micros(circuit_depth as u64 * 10),
1392 };
1393
1394 let hardware_requirements = HardwareRequirements {
1396 min_qubits: qubits_required,
1397 connectivity_requirements: ConnectivityRequirement::AllToAll,
1398 gate_fidelity_threshold: 0.99,
1399 measurement_fidelity_threshold: 0.95,
1400 temperature_requirements: Some(0.01),
1401 };
1402
1403 let noise_sensitivity = NoiseSensitivity {
1405 error_thresholds: {
1406 let mut map = HashMap::new();
1407 map.insert("gate_error".to_string(), 1.0 / circuit_depth as f64);
1408 map.insert("measurement_error".to_string(), 0.01);
1409 map
1410 },
1411 degradation_rates: HashMap::new(),
1412 mitigation_effectiveness: HashMap::new(),
1413 };
1414
1415 Ok(QuantumPerformanceMetrics {
1416 base_metrics,
1417 quantum_metrics,
1418 hardware_requirements,
1419 noise_sensitivity,
1420 })
1421 }
1422}
1423
1424impl Default for QuantumSupremacyBenchmarker {
1425 fn default() -> Self {
1426 Self::new()
1427 }
1428}
1429
1430impl QuantumSupremacyBenchmarker {
1431 pub fn new() -> Self {
1432 Self {
1433 problem_generators: vec![],
1434 performance_trackers: HashMap::new(),
1435 }
1436 }
1437
1438 pub fn run_comprehensive_benchmark(
1439 &mut self,
1440 config: &AnalysisConfig,
1441 ) -> Result<SupremacyBenchmarkResult, String> {
1442 let mut results = Vec::new();
1443
1444 for size in (config.problem_size_range.0..=config.problem_size_range.1).step_by(50) {
1445 let benchmark_result = self.run_size_benchmark(size, config)?;
1446 results.push(benchmark_result);
1447 }
1448
1449 Ok(SupremacyBenchmarkResult {
1450 benchmark_results: results.clone(),
1451 summary: self.generate_summary(&results)?,
1452 })
1453 }
1454
1455 fn run_size_benchmark(
1456 &self,
1457 size: usize,
1458 _config: &AnalysisConfig,
1459 ) -> Result<SizeBenchmarkResult, String> {
1460 Ok(SizeBenchmarkResult {
1463 problem_size: size,
1464 classical_times: HashMap::new(),
1465 quantum_times: HashMap::new(),
1466 solution_qualities: HashMap::new(),
1467 advantage_detected: size > 100, })
1469 }
1470
1471 fn generate_summary(
1472 &self,
1473 results: &[SizeBenchmarkResult],
1474 ) -> Result<BenchmarkSummary, String> {
1475 let advantage_threshold = results
1476 .iter()
1477 .find(|r| r.advantage_detected)
1478 .map_or(usize::MAX, |r| r.problem_size);
1479
1480 Ok(BenchmarkSummary {
1481 total_benchmarks: results.len(),
1482 advantage_threshold,
1483 confidence_level: 0.8,
1484 recommendations: vec![
1485 "Quantum advantage likely for problems > 100 variables".to_string()
1486 ],
1487 })
1488 }
1489}
1490
1491#[derive(Debug, Clone, Serialize, Deserialize)]
1492pub struct SupremacyBenchmarkResult {
1493 pub benchmark_results: Vec<SizeBenchmarkResult>,
1494 pub summary: BenchmarkSummary,
1495}
1496
1497#[derive(Debug, Clone, Serialize, Deserialize)]
1498pub struct SizeBenchmarkResult {
1499 pub problem_size: usize,
1500 pub classical_times: HashMap<String, Duration>,
1501 pub quantum_times: HashMap<String, Duration>,
1502 pub solution_qualities: HashMap<String, f64>,
1503 pub advantage_detected: bool,
1504}
1505
1506#[derive(Debug, Clone, Serialize, Deserialize)]
1507pub struct BenchmarkSummary {
1508 pub total_benchmarks: usize,
1509 pub advantage_threshold: usize,
1510 pub confidence_level: f64,
1511 pub recommendations: Vec<String>,
1512}
1513
1514#[derive(Debug, Clone, Serialize, Deserialize)]
1515pub enum ExportFormat {
1516 JSON,
1517 Python,
1518 Rust,
1519 QUBO,
1520}
1521
1522impl PartialOrd for ComplexityFunction {
1524 fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
1525 use ComplexityFunction::{
1526 Constant, Cubic, Exponential, Factorial, Linear, Linearithmic, Logarithmic, Polynomial,
1527 Quadratic,
1528 };
1529 match (self, other) {
1530 (Constant, Constant) => Some(std::cmp::Ordering::Equal),
1531 (Constant, _) => Some(std::cmp::Ordering::Less),
1532 (_, Constant) => Some(std::cmp::Ordering::Greater),
1533 (Logarithmic, Logarithmic) => Some(std::cmp::Ordering::Equal),
1534 (Logarithmic, _) => Some(std::cmp::Ordering::Less),
1535 (_, Logarithmic) => Some(std::cmp::Ordering::Greater),
1536 (Linear, Linear) => Some(std::cmp::Ordering::Equal),
1537 (Linear, _) => Some(std::cmp::Ordering::Less),
1538 (_, Linear) => Some(std::cmp::Ordering::Greater),
1539 (Linearithmic, Linearithmic) => Some(std::cmp::Ordering::Equal),
1540 (Linearithmic, _) => Some(std::cmp::Ordering::Less),
1541 (_, Linearithmic) => Some(std::cmp::Ordering::Greater),
1542 (Quadratic, Quadratic) => Some(std::cmp::Ordering::Equal),
1543 (Quadratic, _) => Some(std::cmp::Ordering::Less),
1544 (_, Quadratic) => Some(std::cmp::Ordering::Greater),
1545 (Cubic, Cubic) => Some(std::cmp::Ordering::Equal),
1546 (Cubic, _) => Some(std::cmp::Ordering::Less),
1547 (_, Cubic) => Some(std::cmp::Ordering::Greater),
1548 (Polynomial { degree: d1 }, Polynomial { degree: d2 }) => d1.partial_cmp(d2),
1549 (Polynomial { .. }, _) => Some(std::cmp::Ordering::Less),
1550 (_, Polynomial { .. }) => Some(std::cmp::Ordering::Greater),
1551 (Exponential { base: b1 }, Exponential { base: b2 }) => b1.partial_cmp(b2),
1552 (Exponential { .. }, Factorial) => Some(std::cmp::Ordering::Less),
1553 (Factorial, Exponential { .. }) => Some(std::cmp::Ordering::Greater),
1554 (Factorial, Factorial) => Some(std::cmp::Ordering::Equal),
1555 }
1556 }
1557}
1558
1559impl PartialEq for ComplexityFunction {
1560 fn eq(&self, other: &Self) -> bool {
1561 matches!(self.partial_cmp(other), Some(std::cmp::Ordering::Equal))
1562 }
1563}