use crate::{
automatic_parallelization::{AutoParallelConfig, AutoParallelEngine},
circuit_optimization::{CircuitOptimizer, OptimizationConfig},
distributed_simulator::{DistributedQuantumSimulator, DistributedSimulatorConfig},
error::{Result, SimulatorError},
large_scale_simulator::{LargeScaleQuantumSimulator, LargeScaleSimulatorConfig},
simulator::SimulatorResult,
statevector::StateVectorSimulator,
};
use quantrs2_circuit::builder::{Circuit, Simulator};
use quantrs2_core::{
error::{QuantRS2Error, QuantRS2Result},
gate::GateOp,
qubit::QubitId,
register::Register,
};
use std::fmt::Write;
#[cfg(all(feature = "gpu", not(target_os = "macos")))]
use crate::gpu::SciRS2GpuStateVectorSimulator;
use scirs2_core::parallel_ops::current_num_threads; use scirs2_core::Complex64;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Duration, Instant};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AutoOptimizerConfig {
pub enable_profiling: bool,
pub memory_budget: usize,
pub cpu_utilization_threshold: f64,
pub gpu_check_timeout: Duration,
pub enable_distributed: bool,
pub scirs2_optimization_level: OptimizationLevel,
pub fallback_strategy: FallbackStrategy,
pub analysis_depth: AnalysisDepth,
pub performance_cache_size: usize,
pub backend_preferences: Vec<BackendType>,
}
impl Default for AutoOptimizerConfig {
fn default() -> Self {
Self {
enable_profiling: true,
memory_budget: 8 * 1024 * 1024 * 1024, cpu_utilization_threshold: 0.8,
gpu_check_timeout: Duration::from_millis(1000),
enable_distributed: true,
scirs2_optimization_level: OptimizationLevel::Aggressive,
fallback_strategy: FallbackStrategy::Conservative,
analysis_depth: AnalysisDepth::Deep,
performance_cache_size: 1000,
backend_preferences: vec![
BackendType::SciRS2Gpu,
BackendType::LargeScale,
BackendType::Distributed,
BackendType::StateVector,
],
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum BackendType {
StateVector,
SciRS2Gpu,
LargeScale,
Distributed,
Auto,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum OptimizationLevel {
None,
Basic,
Advanced,
Aggressive,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum FallbackStrategy {
Conservative,
Aggressive,
Fail,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum AnalysisDepth {
Quick,
Standard,
Deep,
}
#[derive(Debug, Clone)]
pub struct CircuitCharacteristics {
pub num_qubits: usize,
pub num_gates: usize,
pub circuit_depth: usize,
pub gate_distribution: HashMap<String, usize>,
pub parallelism_potential: f64,
pub memory_requirement: usize,
pub complexity_score: f64,
pub two_qubit_density: f64,
pub connectivity_properties: ConnectivityProperties,
pub entanglement_depth: usize,
pub noise_susceptibility: f64,
}
#[derive(Debug, Clone)]
pub struct ConnectivityProperties {
pub max_degree: usize,
pub avg_degree: f64,
pub connected_components: usize,
pub diameter: usize,
pub clustering_coefficient: f64,
}
#[derive(Debug, Clone)]
pub struct BackendRecommendation {
pub backend_type: BackendType,
pub confidence: f64,
pub expected_improvement: f64,
pub estimated_execution_time: Duration,
pub estimated_memory_usage: usize,
pub reasoning: String,
pub alternatives: Vec<(BackendType, f64)>,
pub prediction_model: String,
}
#[derive(Debug, Clone)]
pub struct PerformanceMetrics {
pub execution_time: Duration,
pub memory_usage: usize,
pub cpu_utilization: f64,
pub gpu_utilization: Option<f64>,
pub throughput: f64,
pub error_rate: f64,
}
#[derive(Debug, Clone)]
pub struct PerformanceHistory {
pub circuit_hash: u64,
pub backend_type: BackendType,
pub metrics: PerformanceMetrics,
pub timestamp: Instant,
}
pub struct AutoOptimizer {
config: AutoOptimizerConfig,
circuit_optimizer: CircuitOptimizer,
parallel_engine: AutoParallelEngine,
performance_cache: Vec<PerformanceHistory>,
backend_availability: HashMap<BackendType, bool>,
scirs2_analyzer: SciRS2CircuitAnalyzer,
}
struct SciRS2CircuitAnalyzer {
enable_advanced_features: bool,
}
impl AutoOptimizer {
#[must_use]
pub fn new() -> Self {
Self::with_config(AutoOptimizerConfig::default())
}
#[must_use]
pub fn with_config(config: AutoOptimizerConfig) -> Self {
let optimization_config = OptimizationConfig {
enable_gate_fusion: true,
enable_redundant_elimination: true,
enable_commutation_reordering: true,
enable_single_qubit_optimization: true,
enable_two_qubit_optimization: true,
max_passes: 3,
enable_depth_reduction: true,
};
let parallel_config = AutoParallelConfig {
max_threads: current_num_threads(), min_gates_for_parallel: 20,
strategy: crate::automatic_parallelization::ParallelizationStrategy::Hybrid,
..Default::default()
};
Self {
config,
circuit_optimizer: CircuitOptimizer::with_config(optimization_config),
parallel_engine: AutoParallelEngine::new(parallel_config),
performance_cache: Vec::new(),
backend_availability: HashMap::new(),
scirs2_analyzer: SciRS2CircuitAnalyzer {
enable_advanced_features: true,
},
}
}
pub fn analyze_circuit<const N: usize>(
&self,
circuit: &Circuit<N>,
) -> QuantRS2Result<CircuitCharacteristics> {
let start_time = Instant::now();
let num_qubits = circuit.num_qubits();
let num_gates = circuit.num_gates();
let circuit_depth = self.calculate_circuit_depth(circuit);
let gate_distribution = self.analyze_gate_distribution(circuit);
let parallelism_potential = self.analyze_parallelism_potential(circuit)?;
let memory_requirement = self.estimate_memory_requirement(num_qubits, num_gates);
let complexity_score = self.calculate_complexity_score(circuit)?;
let two_qubit_density = self.calculate_two_qubit_density(circuit);
let connectivity_properties = self.analyze_connectivity(circuit)?;
let entanglement_depth = self.estimate_entanglement_depth(circuit)?;
let noise_susceptibility = self.analyze_noise_susceptibility(circuit);
let analysis_time = start_time.elapsed();
if self.config.enable_profiling {
println!("Circuit analysis completed in {analysis_time:?}");
}
Ok(CircuitCharacteristics {
num_qubits,
num_gates,
circuit_depth,
gate_distribution,
parallelism_potential,
memory_requirement,
complexity_score,
two_qubit_density,
connectivity_properties,
entanglement_depth,
noise_susceptibility,
})
}
pub fn recommend_backend<const N: usize>(
&mut self,
circuit: &Circuit<N>,
) -> QuantRS2Result<BackendRecommendation> {
let characteristics = self.analyze_circuit(circuit)?;
self.update_backend_availability()?;
if let Some(cached_result) = self.check_performance_cache(&characteristics) {
return Ok(self.build_recommendation_from_cache(cached_result));
}
let recommendation = self.generate_backend_recommendation(&characteristics)?;
Ok(recommendation)
}
pub fn execute_optimized<const N: usize>(
&mut self,
circuit: &Circuit<N>,
) -> Result<SimulatorResult<N>> {
let recommendation = self
.recommend_backend(circuit)
.map_err(|e| SimulatorError::ComputationError(e.to_string()))?;
if self.config.enable_profiling {
println!(
"Using {} backend (confidence: {:.2})",
self.backend_type_name(recommendation.backend_type),
recommendation.confidence
);
println!("Reasoning: {}", recommendation.reasoning);
}
let start_time = Instant::now();
let register = self.execute_with_backend(circuit, recommendation.backend_type)?;
let execution_time = start_time.elapsed();
let result = self.register_to_simulator_result(register);
if self.config.enable_profiling {
self.record_performance_metrics(circuit, recommendation.backend_type, execution_time);
println!("Execution completed in {execution_time:?}");
}
Ok(result)
}
fn calculate_circuit_depth<const N: usize>(&self, circuit: &Circuit<N>) -> usize {
let mut qubit_depths = HashMap::new();
let mut max_depth = 0;
for gate in circuit.gates() {
let qubits = gate.qubits();
let input_depth = qubits
.iter()
.map(|&q| qubit_depths.get(&q).copied().unwrap_or(0))
.max()
.unwrap_or(0);
let new_depth = input_depth + 1;
for &qubit in &qubits {
qubit_depths.insert(qubit, new_depth);
}
max_depth = max_depth.max(new_depth);
}
max_depth
}
fn analyze_gate_distribution<const N: usize>(
&self,
circuit: &Circuit<N>,
) -> HashMap<String, usize> {
let mut distribution = HashMap::new();
for gate in circuit.gates() {
let gate_name = gate.name().to_string();
*distribution.entry(gate_name).or_insert(0) += 1;
}
distribution
}
fn analyze_parallelism_potential<const N: usize>(
&self,
circuit: &Circuit<N>,
) -> QuantRS2Result<f64> {
let analysis = self.parallel_engine.analyze_circuit(circuit)?;
Ok(analysis.efficiency)
}
const fn estimate_memory_requirement(&self, num_qubits: usize, num_gates: usize) -> usize {
let state_vector_size = (1 << num_qubits) * std::mem::size_of::<Complex64>();
let overhead = num_gates * 64;
state_vector_size + overhead
}
fn calculate_complexity_score<const N: usize>(
&self,
circuit: &Circuit<N>,
) -> QuantRS2Result<f64> {
let num_qubits = circuit.num_qubits() as f64;
let num_gates = circuit.num_gates() as f64;
let depth = self.calculate_circuit_depth(circuit) as f64;
let gate_complexity = num_gates * (num_qubits.log2() + 1.0);
let depth_complexity = depth * num_qubits;
let entanglement_complexity = self.estimate_entanglement_complexity(circuit)?;
Ok((gate_complexity + depth_complexity + entanglement_complexity) / 1000.0)
}
fn estimate_entanglement_complexity<const N: usize>(
&self,
circuit: &Circuit<N>,
) -> QuantRS2Result<f64> {
let mut entanglement_score = 0.0;
for gate in circuit.gates() {
let qubits = gate.qubits();
if qubits.len() >= 2 {
entanglement_score += qubits.len() as f64 * qubits.len() as f64;
}
}
Ok(entanglement_score)
}
fn calculate_two_qubit_density<const N: usize>(&self, circuit: &Circuit<N>) -> f64 {
let total_gates = circuit.num_gates();
if total_gates == 0 {
return 0.0;
}
let two_qubit_gates = circuit
.gates()
.iter()
.filter(|gate| gate.qubits().len() >= 2)
.count();
two_qubit_gates as f64 / total_gates as f64
}
fn analyze_connectivity<const N: usize>(
&self,
circuit: &Circuit<N>,
) -> QuantRS2Result<ConnectivityProperties> {
let mut qubit_connections: HashMap<QubitId, Vec<QubitId>> = HashMap::new();
for gate in circuit.gates() {
let qubits = gate.qubits();
if qubits.len() >= 2 {
for i in 0..qubits.len() {
for j in (i + 1)..qubits.len() {
qubit_connections
.entry(qubits[i])
.or_default()
.push(qubits[j]);
qubit_connections
.entry(qubits[j])
.or_default()
.push(qubits[i]);
}
}
}
}
let max_degree = qubit_connections
.values()
.map(std::vec::Vec::len)
.max()
.unwrap_or(0);
let avg_degree = if qubit_connections.is_empty() {
0.0
} else {
qubit_connections
.values()
.map(std::vec::Vec::len)
.sum::<usize>() as f64
/ qubit_connections.len() as f64
};
let connected_components = 1;
let diameter = circuit.num_qubits().min(6);
let clustering_coefficient = 0.5;
Ok(ConnectivityProperties {
max_degree,
avg_degree,
connected_components,
diameter,
clustering_coefficient,
})
}
fn estimate_entanglement_depth<const N: usize>(
&self,
circuit: &Circuit<N>,
) -> QuantRS2Result<usize> {
let two_qubit_gates = circuit
.gates()
.iter()
.filter(|gate| gate.qubits().len() >= 2)
.count();
let depth_estimate = (two_qubit_gates as f64).sqrt().ceil() as usize;
Ok(depth_estimate.min(circuit.num_qubits()))
}
fn analyze_noise_susceptibility<const N: usize>(&self, circuit: &Circuit<N>) -> f64 {
let depth = self.calculate_circuit_depth(circuit) as f64;
let two_qubit_density = self.calculate_two_qubit_density(circuit);
(depth / 100.0 + two_qubit_density).min(1.0)
}
fn update_backend_availability(&mut self) -> QuantRS2Result<()> {
#[cfg(all(feature = "gpu", not(target_os = "macos")))]
let gpu_available = SciRS2GpuStateVectorSimulator::is_available();
#[cfg(any(not(feature = "gpu"), target_os = "macos"))]
let gpu_available = false;
self.backend_availability
.insert(BackendType::SciRS2Gpu, gpu_available);
self.backend_availability
.insert(BackendType::StateVector, true);
self.backend_availability
.insert(BackendType::LargeScale, true);
self.backend_availability
.insert(BackendType::Distributed, false);
Ok(())
}
fn check_performance_cache(
&self,
characteristics: &CircuitCharacteristics,
) -> Option<&PerformanceHistory> {
self.performance_cache
.iter()
.find(|&entry| self.are_characteristics_similar(characteristics, entry))
.map(|v| v as _)
}
const fn are_characteristics_similar(
&self,
characteristics: &CircuitCharacteristics,
entry: &PerformanceHistory,
) -> bool {
false }
fn build_recommendation_from_cache(
&self,
cache_entry: &PerformanceHistory,
) -> BackendRecommendation {
BackendRecommendation {
backend_type: cache_entry.backend_type,
confidence: 0.9, expected_improvement: 0.0,
estimated_execution_time: cache_entry.metrics.execution_time,
estimated_memory_usage: cache_entry.metrics.memory_usage,
reasoning: "Based on cached performance data for similar circuits".to_string(),
alternatives: Vec::new(),
prediction_model: "Cache-based".to_string(),
}
}
fn generate_backend_recommendation(
&self,
characteristics: &CircuitCharacteristics,
) -> QuantRS2Result<BackendRecommendation> {
let mut scores: HashMap<BackendType, f64> = HashMap::new();
let mut reasoning = String::new();
for &backend_type in &self.config.backend_preferences {
if !self
.backend_availability
.get(&backend_type)
.unwrap_or(&false)
{
continue;
}
let score = self.score_backend_for_characteristics(backend_type, characteristics);
scores.insert(backend_type, score);
}
let (best_backend, best_score) = scores
.into_iter()
.max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
.unwrap_or((BackendType::StateVector, 0.5));
reasoning = self.generate_recommendation_reasoning(best_backend, characteristics);
let estimated_execution_time = self.estimate_execution_time(best_backend, characteristics);
let estimated_memory_usage = characteristics.memory_requirement;
Ok(BackendRecommendation {
backend_type: best_backend,
confidence: best_score,
expected_improvement: (best_score - 0.5).max(0.0) * 2.0, estimated_execution_time,
estimated_memory_usage,
reasoning,
alternatives: Vec::new(),
prediction_model: "SciRS2-guided heuristic".to_string(),
})
}
fn score_backend_for_characteristics(
&self,
backend_type: BackendType,
characteristics: &CircuitCharacteristics,
) -> f64 {
let mut score: f64 = 0.5;
match backend_type {
BackendType::StateVector => {
if characteristics.num_qubits <= 20 {
score += 0.3;
}
if characteristics.num_gates <= 1000 {
score += 0.2;
}
}
BackendType::SciRS2Gpu => {
if characteristics.num_qubits >= 10 && characteristics.num_qubits <= 30 {
score += 0.4;
}
if characteristics.parallelism_potential > 0.5 {
score += 0.3;
}
if characteristics.two_qubit_density > 0.3 {
score += 0.2;
}
}
BackendType::LargeScale => {
if characteristics.num_qubits >= 20 {
score += 0.4;
}
if characteristics.complexity_score > 0.5 {
score += 0.3;
}
}
BackendType::Distributed => {
if characteristics.num_qubits >= 30 {
score += 0.5;
}
if characteristics.memory_requirement > self.config.memory_budget / 2 {
score += 0.3;
}
}
BackendType::Auto => {
score = 0.1;
}
}
score.min(1.0)
}
fn generate_recommendation_reasoning(
&self,
backend_type: BackendType,
characteristics: &CircuitCharacteristics,
) -> String {
match backend_type {
BackendType::StateVector => {
format!("CPU state vector simulator recommended for {} qubits, {} gates. Suitable for small circuits with straightforward execution.",
characteristics.num_qubits, characteristics.num_gates)
}
BackendType::SciRS2Gpu => {
format!("SciRS2 GPU simulator recommended for {} qubits, {} gates. High parallelism potential ({:.2}) and two-qubit gate density ({:.2}) make GPU acceleration beneficial.",
characteristics.num_qubits, characteristics.num_gates, characteristics.parallelism_potential, characteristics.two_qubit_density)
}
BackendType::LargeScale => {
format!("Large-scale simulator recommended for {} qubits, {} gates. Circuit complexity ({:.2}) and depth ({}) require optimized memory management.",
characteristics.num_qubits, characteristics.num_gates, characteristics.complexity_score, characteristics.circuit_depth)
}
BackendType::Distributed => {
format!("Distributed simulator recommended for {} qubits, {} gates. Memory requirement ({:.1} MB) exceeds single-node capacity.",
characteristics.num_qubits, characteristics.num_gates, characteristics.memory_requirement as f64 / (1024.0 * 1024.0))
}
BackendType::Auto => "Automatic backend selection".to_string(),
}
}
fn estimate_execution_time(
&self,
backend_type: BackendType,
characteristics: &CircuitCharacteristics,
) -> Duration {
let base_time_ms = match backend_type {
BackendType::StateVector => characteristics.num_gates as u64 * 10,
BackendType::SciRS2Gpu => characteristics.num_gates as u64 * 2,
BackendType::LargeScale => characteristics.num_gates as u64 * 5,
BackendType::Distributed => characteristics.num_gates as u64 * 15,
BackendType::Auto => characteristics.num_gates as u64 * 10,
};
let complexity_factor = characteristics.complexity_score.mul_add(2.0, 1.0) as u64;
Duration::from_millis(base_time_ms * complexity_factor)
}
fn execute_with_backend<const N: usize>(
&self,
circuit: &Circuit<N>,
backend_type: BackendType,
) -> Result<Register<N>> {
match backend_type {
BackendType::StateVector => {
let simulator = StateVectorSimulator::new();
simulator
.run(circuit)
.map_err(|e| SimulatorError::ComputationError(e.to_string()))
.and_then(|result| {
Register::with_amplitudes(result.amplitudes().to_vec())
.map_err(|e| SimulatorError::ComputationError(e.to_string()))
})
}
BackendType::SciRS2Gpu => {
#[cfg(all(feature = "gpu", not(target_os = "macos")))]
{
let mut simulator = SciRS2GpuStateVectorSimulator::new()
.map_err(|e| SimulatorError::ComputationError(e.to_string()))?;
use crate::simulator::Simulator;
simulator
.run(circuit)
.map_err(|e| SimulatorError::ComputationError(e.to_string()))
.and_then(|result| {
Register::with_amplitudes(result.amplitudes().to_vec())
.map_err(|e| SimulatorError::ComputationError(e.to_string()))
})
}
#[cfg(any(not(feature = "gpu"), target_os = "macos"))]
{
let simulator = StateVectorSimulator::new();
simulator
.run(circuit)
.map_err(|e| SimulatorError::ComputationError(e.to_string()))
.and_then(|result| {
Register::with_amplitudes(result.amplitudes().to_vec())
.map_err(|e| SimulatorError::ComputationError(e.to_string()))
})
}
}
BackendType::LargeScale => {
let config = LargeScaleSimulatorConfig::default();
let simulator = LargeScaleQuantumSimulator::new(config)
.map_err(|e| SimulatorError::ComputationError(e.to_string()))?;
simulator
.run(circuit)
.map_err(|e| SimulatorError::ComputationError(e.to_string()))
}
BackendType::Distributed => {
let config = LargeScaleSimulatorConfig::default();
let simulator = LargeScaleQuantumSimulator::new(config)
.map_err(|e| SimulatorError::ComputationError(e.to_string()))?;
simulator
.run(circuit)
.map_err(|e| SimulatorError::ComputationError(e.to_string()))
}
BackendType::Auto => {
let simulator = StateVectorSimulator::new();
simulator
.run(circuit)
.map_err(|e| SimulatorError::ComputationError(e.to_string()))
}
}
}
fn register_to_simulator_result<const N: usize>(
&self,
register: Register<N>,
) -> SimulatorResult<N> {
let amplitudes = register.amplitudes().to_vec();
SimulatorResult {
amplitudes,
num_qubits: N,
}
}
fn record_performance_metrics<const N: usize>(
&mut self,
circuit: &Circuit<N>,
backend_type: BackendType,
execution_time: Duration,
) {
let metrics = PerformanceMetrics {
execution_time,
memory_usage: 0, cpu_utilization: 0.0, gpu_utilization: None,
throughput: circuit.num_gates() as f64 / execution_time.as_secs_f64(),
error_rate: 0.0,
};
let history_entry = PerformanceHistory {
circuit_hash: self.compute_circuit_hash(circuit),
backend_type,
metrics,
timestamp: Instant::now(),
};
self.performance_cache.push(history_entry);
if self.performance_cache.len() > self.config.performance_cache_size {
self.performance_cache.remove(0);
}
}
fn compute_circuit_hash<const N: usize>(&self, circuit: &Circuit<N>) -> u64 {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::new();
circuit.num_gates().hash(&mut hasher);
circuit.num_qubits().hash(&mut hasher);
for gate in circuit.gates() {
gate.name().hash(&mut hasher);
gate.qubits().len().hash(&mut hasher);
}
hasher.finish()
}
const fn backend_type_name(&self, backend_type: BackendType) -> &'static str {
match backend_type {
BackendType::StateVector => "CPU StateVector",
BackendType::SciRS2Gpu => "SciRS2 GPU",
BackendType::LargeScale => "Large-Scale",
BackendType::Distributed => "Distributed",
BackendType::Auto => "Auto",
}
}
#[must_use]
pub fn get_performance_summary(&self) -> String {
let total_circuits = self.performance_cache.len();
if total_circuits == 0 {
return "No performance data available".to_string();
}
let avg_execution_time = self
.performance_cache
.iter()
.map(|entry| entry.metrics.execution_time.as_millis())
.sum::<u128>()
/ total_circuits as u128;
let backend_usage: HashMap<BackendType, usize> =
self.performance_cache
.iter()
.fold(HashMap::new(), |mut acc, entry| {
*acc.entry(entry.backend_type).or_insert(0) += 1;
acc
});
let mut summary = "AutoOptimizer Performance Summary\n".to_string();
writeln!(summary, "Total circuits processed: {total_circuits}")
.expect("Writing to String should never fail");
writeln!(summary, "Average execution time: {avg_execution_time}ms")
.expect("Writing to String should never fail");
summary.push_str("Backend usage:\n");
for (backend, count) in backend_usage {
let percentage = (count as f64 / total_circuits as f64) * 100.0;
writeln!(
summary,
" {}: {} ({:.1}%)",
self.backend_type_name(backend),
count,
percentage
)
.expect("Writing to String should never fail");
}
summary
}
}
impl Default for AutoOptimizer {
fn default() -> Self {
Self::new()
}
}
impl SciRS2CircuitAnalyzer {
const fn analyze_circuit_with_scirs2<const N: usize>(
&self,
_circuit: &Circuit<N>,
) -> QuantRS2Result<f64> {
Ok(0.7) }
}
pub fn execute_with_auto_optimization<const N: usize>(
circuit: &Circuit<N>,
) -> Result<SimulatorResult<N>> {
let mut optimizer = AutoOptimizer::new();
optimizer.execute_optimized(circuit)
}
pub fn recommend_backend_for_circuit<const N: usize>(
circuit: &Circuit<N>,
) -> QuantRS2Result<BackendRecommendation> {
let mut optimizer = AutoOptimizer::new();
optimizer.recommend_backend(circuit)
}
#[cfg(test)]
mod tests {
use super::*;
use quantrs2_circuit::builder::CircuitBuilder;
#[test]
fn test_auto_optimizer_creation() {
let optimizer = AutoOptimizer::new();
assert!(optimizer.config.enable_profiling);
}
#[test]
fn test_circuit_characteristics_analysis() {
let optimizer = AutoOptimizer::new();
let mut builder = CircuitBuilder::<4>::new();
let _ = builder.h(0);
let _ = builder.cnot(0, 1);
let _ = builder.h(2);
let _ = builder.cnot(2, 3);
let circuit = builder.build();
let characteristics = optimizer
.analyze_circuit(&circuit)
.expect("Failed to analyze circuit characteristics");
assert_eq!(characteristics.num_qubits, 4);
assert_eq!(characteristics.num_gates, 4);
assert!(characteristics.circuit_depth > 0);
assert!(characteristics.two_qubit_density > 0.0);
}
#[test]
fn test_backend_recommendation() {
let mut optimizer = AutoOptimizer::new();
let mut builder = CircuitBuilder::<2>::new();
let _ = builder.h(0);
let _ = builder.cnot(0, 1);
let circuit = builder.build();
let recommendation = optimizer
.recommend_backend(&circuit)
.expect("Failed to get backend recommendation");
assert!(recommendation.confidence > 0.0);
assert!(!recommendation.reasoning.is_empty());
}
#[test]
fn test_execute_with_optimization() {
let mut optimizer = AutoOptimizer::new();
let mut builder = CircuitBuilder::<2>::new();
let _ = builder.h(0);
let _ = builder.cnot(0, 1);
let circuit = builder.build();
let result = optimizer.execute_optimized(&circuit);
assert!(result.is_ok());
if let Ok(sim_result) = result {
assert_eq!(sim_result.num_qubits, 2);
assert_eq!(sim_result.amplitudes.len(), 4);
}
}
#[test]
fn test_convenience_functions() {
let mut builder = CircuitBuilder::<2>::new();
let _ = builder.h(0);
let _ = builder.cnot(0, 1);
let circuit = builder.build();
let recommendation = recommend_backend_for_circuit(&circuit);
assert!(recommendation.is_ok());
let result = execute_with_auto_optimization(&circuit);
assert!(result.is_ok());
}
}