quantrs2_core/optimization/
lazy_evaluation.rs

1//! Lazy evaluation system for gate sequence optimization
2//!
3//! This module provides a lazy evaluation framework that defers gate optimizations
4//! until they're actually needed, improving performance for large circuits by avoiding
5//! unnecessary computation and enabling more sophisticated optimization strategies.
6
7use crate::{
8    error::{QuantRS2Error, QuantRS2Result},
9    gate::GateOp,
10    optimization::OptimizationChain,
11    qubit::QubitId,
12};
13use serde::{Deserialize, Serialize};
14use std::{
15    collections::{HashMap, HashSet, VecDeque},
16    sync::{Arc, RwLock},
17    time::{Duration, Instant},
18};
19
20/// Configuration for lazy evaluation
21#[derive(Debug, Clone, Serialize, Deserialize)]
22pub struct LazyEvaluationConfig {
23    /// Maximum number of gates to buffer before forced evaluation
24    pub max_buffer_size: usize,
25    /// Maximum time to defer evaluation
26    pub max_defer_time: Duration,
27    /// Enable dependency-based optimization ordering
28    pub enable_dependency_optimization: bool,
29    /// Enable speculative optimization
30    pub enable_speculative_optimization: bool,
31    /// Number of worker threads for async optimization
32    pub num_optimization_threads: usize,
33    /// Cache size for optimization results
34    pub optimization_cache_size: usize,
35}
36
37impl Default for LazyEvaluationConfig {
38    fn default() -> Self {
39        Self {
40            max_buffer_size: 1000,
41            max_defer_time: Duration::from_millis(100),
42            enable_dependency_optimization: true,
43            enable_speculative_optimization: true,
44            num_optimization_threads: 4,
45            optimization_cache_size: 10000,
46        }
47    }
48}
49
50/// Lazy evaluation context for a gate
51#[derive(Debug, Clone)]
52pub struct LazyGateContext {
53    /// Unique identifier for this gate in the pipeline
54    pub gate_id: usize,
55    /// The gate to be optimized
56    pub gate: Box<dyn GateOp>,
57    /// Dependencies (other gates that must be evaluated first)
58    pub dependencies: HashSet<usize>,
59    /// Dependents (gates that depend on this one)
60    pub dependents: HashSet<usize>,
61    /// Priority for evaluation (higher = more urgent)
62    pub priority: f64,
63    /// Timestamp when gate was added
64    pub created_at: Instant,
65    /// Whether this gate has been evaluated
66    pub is_evaluated: bool,
67    /// Cached optimization result
68    pub cached_result: Option<OptimizationResult>,
69}
70
71/// Result of a lazy optimization
72#[derive(Debug, Clone)]
73pub struct OptimizationResult {
74    /// Optimized gate sequence
75    pub optimized_gates: Vec<Box<dyn GateOp>>,
76    /// Optimization statistics
77    pub stats: OptimizationStats,
78    /// Time taken for optimization
79    pub optimization_time: Duration,
80}
81
82/// Statistics from optimization
83#[derive(Debug, Clone, Default)]
84pub struct OptimizationStats {
85    /// Number of gates before optimization
86    pub gates_before: usize,
87    /// Number of gates after optimization
88    pub gates_after: usize,
89    /// Number of optimization passes applied
90    pub passes_applied: usize,
91    /// Estimated performance improvement
92    pub performance_improvement: f64,
93    /// Memory savings achieved
94    pub memory_savings: usize,
95}
96
97/// Lazy optimization pipeline
98pub struct LazyOptimizationPipeline {
99    /// Configuration
100    config: LazyEvaluationConfig,
101    /// Buffered gates awaiting optimization
102    gate_buffer: Arc<RwLock<HashMap<usize, LazyGateContext>>>,
103    /// Dependency graph
104    dependency_graph: Arc<RwLock<DependencyGraph>>,
105    /// Optimization chain to apply
106    optimization_chain: OptimizationChain,
107    /// Compilation cache for optimized results
108    optimization_cache: Arc<RwLock<OptimizationCache>>,
109    /// Next gate ID
110    next_gate_id: Arc<RwLock<usize>>,
111    /// Worker thread handles
112    worker_handles: Vec<std::thread::JoinHandle<()>>,
113    /// Shutdown signal
114    shutdown_signal: Arc<RwLock<bool>>,
115}
116
117/// Dependency graph for managing gate relationships
118#[derive(Debug, Default)]
119struct DependencyGraph {
120    /// Adjacency list representation
121    edges: HashMap<usize, HashSet<usize>>,
122    /// Reverse edges for quick lookup
123    reverse_edges: HashMap<usize, HashSet<usize>>,
124    /// Topological ordering cache
125    topo_order_cache: Option<Vec<usize>>,
126}
127
128/// Cache for optimization results
129struct OptimizationCache {
130    /// Cache entries indexed by gate hash
131    entries: HashMap<u64, CachedOptimization>,
132    /// LRU queue for eviction
133    lru_queue: VecDeque<u64>,
134    /// Maximum cache size
135    max_size: usize,
136}
137
138/// Cached optimization entry
139#[derive(Debug, Clone)]
140struct CachedOptimization {
141    /// The optimization result
142    result: OptimizationResult,
143    /// Access count for LRU
144    access_count: usize,
145    /// Last access time
146    last_accessed: Instant,
147}
148
149impl LazyOptimizationPipeline {
150    /// Create a new lazy optimization pipeline
151    pub fn new(
152        config: LazyEvaluationConfig,
153        optimization_chain: OptimizationChain,
154    ) -> QuantRS2Result<Self> {
155        let gate_buffer = Arc::new(RwLock::new(HashMap::new()));
156        let dependency_graph = Arc::new(RwLock::new(DependencyGraph::default()));
157        let optimization_cache = Arc::new(RwLock::new(OptimizationCache::new(
158            config.optimization_cache_size,
159        )));
160        let next_gate_id = Arc::new(RwLock::new(0));
161        let shutdown_signal = Arc::new(RwLock::new(false));
162
163        // Start worker threads
164        let mut worker_handles = Vec::new();
165        for worker_id in 0..config.num_optimization_threads {
166            let handle = Self::start_worker_thread(
167                worker_id,
168                Arc::clone(&gate_buffer),
169                Arc::clone(&dependency_graph),
170                Arc::clone(&optimization_cache),
171                Arc::clone(&shutdown_signal),
172                config.clone(),
173            );
174            worker_handles.push(handle);
175        }
176
177        Ok(Self {
178            config,
179            gate_buffer,
180            dependency_graph,
181            optimization_chain,
182            optimization_cache,
183            next_gate_id,
184            worker_handles,
185            shutdown_signal,
186        })
187    }
188
189    /// Add a gate to the lazy evaluation pipeline
190    pub fn add_gate(&self, gate: Box<dyn GateOp>) -> QuantRS2Result<usize> {
191        let gate_id = {
192            let mut next_id = self.next_gate_id.write().unwrap();
193            let id = *next_id;
194            *next_id += 1;
195            id
196        };
197
198        // Analyze dependencies based on qubit overlap
199        let dependencies = self.analyze_dependencies(gate.as_ref())?;
200
201        // Calculate priority based on gate type and dependencies
202        let priority = self.calculate_priority(gate.as_ref(), &dependencies);
203
204        let context = LazyGateContext {
205            gate_id,
206            gate,
207            dependencies: dependencies.clone(),
208            dependents: HashSet::new(),
209            priority,
210            created_at: Instant::now(),
211            is_evaluated: false,
212            cached_result: None,
213        };
214
215        // Update dependency graph
216        {
217            let mut graph = self.dependency_graph.write().unwrap();
218            graph.add_gate(gate_id, dependencies);
219        }
220
221        // Add to buffer
222        {
223            let mut buffer = self.gate_buffer.write().unwrap();
224            buffer.insert(gate_id, context);
225        }
226
227        // Check if we need to force evaluation due to buffer size or time
228        self.check_forced_evaluation()?;
229
230        Ok(gate_id)
231    }
232
233    /// Evaluate a specific gate (force optimization)
234    pub fn evaluate_gate(&self, gate_id: usize) -> QuantRS2Result<OptimizationResult> {
235        // Check cache first
236        if let Some(cached) = self.get_cached_result(gate_id)? {
237            return Ok(cached);
238        }
239
240        // Get the gate context
241        let context = {
242            let buffer = self.gate_buffer.read().unwrap();
243            buffer.get(&gate_id).cloned().ok_or_else(|| {
244                QuantRS2Error::InvalidInput(format!("Gate {} not found in buffer", gate_id))
245            })?
246        };
247
248        // Ensure dependencies are evaluated first
249        self.evaluate_dependencies(&context.dependencies)?;
250
251        // Perform the optimization
252        let result = self.optimize_gate_context(&context)?;
253
254        // Cache the result
255        self.cache_optimization_result(gate_id, &result)?;
256
257        // Mark as evaluated
258        {
259            let mut buffer = self.gate_buffer.write().unwrap();
260            if let Some(ctx) = buffer.get_mut(&gate_id) {
261                ctx.is_evaluated = true;
262                ctx.cached_result = Some(result.clone());
263            }
264        }
265
266        Ok(result)
267    }
268
269    /// Evaluate all buffered gates
270    pub fn evaluate_all(&self) -> QuantRS2Result<Vec<OptimizationResult>> {
271        // Get topological ordering of gates
272        let ordered_gates = {
273            let graph = self.dependency_graph.read().unwrap();
274            graph.topological_sort()
275        };
276
277        let mut results = Vec::new();
278        for gate_id in ordered_gates {
279            if let Ok(result) = self.evaluate_gate(gate_id) {
280                results.push(result);
281            }
282        }
283
284        // Clear the buffer
285        {
286            let mut buffer = self.gate_buffer.write().unwrap();
287            buffer.clear();
288        }
289
290        Ok(results)
291    }
292
293    /// Get optimization statistics
294    pub fn get_statistics(&self) -> LazyEvaluationStats {
295        let buffer = self.gate_buffer.read().unwrap();
296        let cache = self.optimization_cache.read().unwrap();
297
298        let total_gates = buffer.len();
299        let evaluated_gates = buffer.values().filter(|ctx| ctx.is_evaluated).count();
300        let pending_gates = total_gates - evaluated_gates;
301
302        LazyEvaluationStats {
303            total_gates,
304            evaluated_gates,
305            pending_gates,
306            cache_hits: cache.get_hit_count(),
307            cache_size: cache.entries.len(),
308            average_optimization_time: cache.get_average_optimization_time(),
309        }
310    }
311
312    /// Analyze dependencies for a gate based on qubit overlap
313    fn analyze_dependencies(&self, gate: &dyn GateOp) -> QuantRS2Result<HashSet<usize>> {
314        let gate_qubits: HashSet<QubitId> = gate.qubits().into_iter().collect();
315        let mut dependencies = HashSet::new();
316
317        let buffer = self.gate_buffer.read().unwrap();
318        for (gate_id, context) in buffer.iter() {
319            let context_qubits: HashSet<QubitId> = context.gate.qubits().into_iter().collect();
320
321            // If there's qubit overlap, this gate depends on the previous one
322            if !gate_qubits.is_disjoint(&context_qubits) {
323                dependencies.insert(*gate_id);
324            }
325        }
326
327        Ok(dependencies)
328    }
329
330    /// Calculate priority for a gate
331    fn calculate_priority(&self, gate: &dyn GateOp, dependencies: &HashSet<usize>) -> f64 {
332        let mut priority = 0.0;
333
334        // Higher priority for gates with fewer qubits (simpler to optimize)
335        priority += 10.0 / (gate.num_qubits() as f64 + 1.0);
336
337        // Lower priority for gates with many dependencies
338        priority -= dependencies.len() as f64 * 0.5;
339
340        // Higher priority for common gate types
341        match gate.name() {
342            "H" | "X" | "Y" | "Z" => priority += 5.0,
343            "CNOT" | "CZ" => priority += 3.0,
344            "RX" | "RY" | "RZ" => priority += 2.0,
345            _ => priority += 1.0,
346        }
347
348        priority.max(0.1)
349    }
350
351    /// Check if forced evaluation is needed
352    fn check_forced_evaluation(&self) -> QuantRS2Result<()> {
353        let buffer = self.gate_buffer.read().unwrap();
354
355        // Check buffer size
356        if buffer.len() >= self.config.max_buffer_size {
357            drop(buffer);
358            return self.force_oldest_evaluation();
359        }
360
361        // Check time-based forced evaluation
362        let now = Instant::now();
363        for context in buffer.values() {
364            if now.duration_since(context.created_at) > self.config.max_defer_time {
365                drop(buffer);
366                return self.force_oldest_evaluation();
367            }
368        }
369
370        Ok(())
371    }
372
373    /// Force evaluation of the oldest gate
374    fn force_oldest_evaluation(&self) -> QuantRS2Result<()> {
375        let oldest_gate_id = {
376            let buffer = self.gate_buffer.read().unwrap();
377            buffer
378                .values()
379                .filter(|ctx| !ctx.is_evaluated)
380                .min_by_key(|ctx| ctx.created_at)
381                .map(|ctx| ctx.gate_id)
382        };
383
384        if let Some(gate_id) = oldest_gate_id {
385            self.evaluate_gate(gate_id)?;
386        }
387
388        Ok(())
389    }
390
391    /// Evaluate dependencies recursively
392    fn evaluate_dependencies(&self, dependencies: &HashSet<usize>) -> QuantRS2Result<()> {
393        for &dep_id in dependencies {
394            if !self.is_gate_evaluated(dep_id) {
395                self.evaluate_gate(dep_id)?;
396            }
397        }
398        Ok(())
399    }
400
401    /// Check if a gate has been evaluated
402    fn is_gate_evaluated(&self, gate_id: usize) -> bool {
403        let buffer = self.gate_buffer.read().unwrap();
404        buffer
405            .get(&gate_id)
406            .map(|ctx| ctx.is_evaluated)
407            .unwrap_or(false)
408    }
409
410    /// Optimize a gate context
411    fn optimize_gate_context(
412        &self,
413        context: &LazyGateContext,
414    ) -> QuantRS2Result<OptimizationResult> {
415        let start_time = Instant::now();
416
417        // Apply optimization chain
418        let input_gates = vec![context.gate.clone_gate()];
419        let optimized_gates = self.optimization_chain.optimize(input_gates)?;
420
421        let optimization_time = start_time.elapsed();
422
423        // Calculate statistics
424        let stats = OptimizationStats {
425            gates_before: 1,
426            gates_after: optimized_gates.len(),
427            passes_applied: 1, // Would track actual passes in real implementation
428            performance_improvement: self.estimate_performance_improvement(&optimized_gates),
429            memory_savings: self.estimate_memory_savings(&optimized_gates),
430        };
431
432        Ok(OptimizationResult {
433            optimized_gates,
434            stats,
435            optimization_time,
436        })
437    }
438
439    /// Estimate performance improvement from optimization
440    fn estimate_performance_improvement(&self, gates: &[Box<dyn GateOp>]) -> f64 {
441        // Simple heuristic: fewer gates = better performance
442        let base_improvement = 1.0 / (gates.len() as f64 + 1.0);
443
444        // Bonus for single-qubit gates
445        let single_qubit_bonus = gates.iter().filter(|g| g.num_qubits() == 1).count() as f64 * 0.1;
446
447        base_improvement + single_qubit_bonus
448    }
449
450    /// Estimate memory savings from optimization
451    fn estimate_memory_savings(&self, gates: &[Box<dyn GateOp>]) -> usize {
452        // Simple heuristic based on gate complexity
453        gates
454            .iter()
455            .map(|g| match g.num_qubits() {
456                1 => 16,                  // 2x2 complex matrix
457                2 => 64,                  // 4x4 complex matrix
458                n => (1 << (2 * n)) * 16, // 2^n x 2^n complex matrix
459            })
460            .sum()
461    }
462
463    /// Get cached optimization result
464    fn get_cached_result(&self, gate_id: usize) -> QuantRS2Result<Option<OptimizationResult>> {
465        let buffer = self.gate_buffer.read().unwrap();
466        if let Some(context) = buffer.get(&gate_id) {
467            if let Some(ref result) = context.cached_result {
468                return Ok(Some(result.clone()));
469            }
470        }
471
472        // Check optimization cache
473        let gate_hash = self.compute_gate_hash(gate_id)?;
474        let mut cache = self.optimization_cache.write().unwrap();
475        if let Some(cached) = cache.get_mut(gate_hash) {
476            return Ok(Some(cached.result.clone()));
477        }
478
479        Ok(None)
480    }
481
482    /// Cache optimization result
483    fn cache_optimization_result(
484        &self,
485        gate_id: usize,
486        result: &OptimizationResult,
487    ) -> QuantRS2Result<()> {
488        let gate_hash = self.compute_gate_hash(gate_id)?;
489        let mut cache = self.optimization_cache.write().unwrap();
490
491        let cached = CachedOptimization {
492            result: result.clone(),
493            access_count: 1,
494            last_accessed: Instant::now(),
495        };
496
497        cache.insert(gate_hash, cached);
498        Ok(())
499    }
500
501    /// Compute hash for a gate
502    fn compute_gate_hash(&self, gate_id: usize) -> QuantRS2Result<u64> {
503        use std::collections::hash_map::DefaultHasher;
504        use std::hash::{Hash, Hasher};
505
506        let buffer = self.gate_buffer.read().unwrap();
507        let context = buffer
508            .get(&gate_id)
509            .ok_or_else(|| QuantRS2Error::InvalidInput(format!("Gate {} not found", gate_id)))?;
510
511        let mut hasher = DefaultHasher::new();
512        context.gate.name().hash(&mut hasher);
513
514        // Hash the gate matrix
515        if let Ok(matrix) = context.gate.matrix() {
516            for elem in &matrix {
517                elem.re.to_bits().hash(&mut hasher);
518                elem.im.to_bits().hash(&mut hasher);
519            }
520        }
521
522        Ok(hasher.finish())
523    }
524
525    /// Start a worker thread for async optimization
526    fn start_worker_thread(
527        _worker_id: usize,
528        gate_buffer: Arc<RwLock<HashMap<usize, LazyGateContext>>>,
529        _dependency_graph: Arc<RwLock<DependencyGraph>>,
530        _optimization_cache: Arc<RwLock<OptimizationCache>>,
531        shutdown_signal: Arc<RwLock<bool>>,
532        config: LazyEvaluationConfig,
533    ) -> std::thread::JoinHandle<()> {
534        std::thread::spawn(move || {
535            let sleep_duration = Duration::from_millis(10);
536
537            loop {
538                // Check shutdown signal
539                {
540                    let shutdown = shutdown_signal.read().unwrap();
541                    if *shutdown {
542                        break;
543                    }
544                }
545
546                // Look for high-priority gates to optimize speculatively
547                if config.enable_speculative_optimization {
548                    let high_priority_gates = {
549                        let buffer = gate_buffer.read().unwrap();
550                        buffer
551                            .values()
552                            .filter(|ctx| !ctx.is_evaluated && ctx.priority > 5.0)
553                            .map(|ctx| ctx.gate_id)
554                            .collect::<Vec<_>>()
555                    };
556
557                    for gate_id in high_priority_gates {
558                        // This would require access to the pipeline's optimization methods
559                        // For now, just mark as processed
560                        let mut buffer = gate_buffer.write().unwrap();
561                        if let Some(ctx) = buffer.get_mut(&gate_id) {
562                            // Placeholder: would perform actual optimization here
563                            ctx.priority += 0.1; // Slight priority boost
564                        }
565                    }
566                }
567
568                std::thread::sleep(sleep_duration);
569            }
570        })
571    }
572}
573
574impl Drop for LazyOptimizationPipeline {
575    fn drop(&mut self) {
576        // Signal shutdown to worker threads
577        {
578            let mut shutdown = self.shutdown_signal.write().unwrap();
579            *shutdown = true;
580        }
581
582        // Wait for all worker threads to finish
583        while let Some(handle) = self.worker_handles.pop() {
584            let _ = handle.join();
585        }
586    }
587}
588
589impl DependencyGraph {
590    /// Add a gate with its dependencies
591    fn add_gate(&mut self, gate_id: usize, dependencies: HashSet<usize>) {
592        self.edges.insert(gate_id, dependencies.clone());
593
594        // Update reverse edges
595        for dep in dependencies {
596            self.reverse_edges
597                .entry(dep)
598                .or_insert_with(HashSet::new)
599                .insert(gate_id);
600        }
601
602        // Invalidate topological order cache
603        self.topo_order_cache = None;
604    }
605
606    /// Get topological ordering of gates
607    fn topological_sort(&self) -> Vec<usize> {
608        if let Some(ref cached) = self.topo_order_cache {
609            return cached.clone();
610        }
611
612        let mut result = Vec::new();
613        let mut in_degree: HashMap<usize, usize> = HashMap::new();
614        let mut queue = VecDeque::new();
615
616        // Calculate in-degrees
617        for (&node, edges) in &self.edges {
618            in_degree.entry(node).or_insert(0);
619            for &dep in edges {
620                *in_degree.entry(dep).or_insert(0) += 1;
621            }
622        }
623
624        // Find nodes with no incoming edges
625        for (&node, &degree) in &in_degree {
626            if degree == 0 {
627                queue.push_back(node);
628            }
629        }
630
631        // Process nodes
632        while let Some(node) = queue.pop_front() {
633            result.push(node);
634
635            if let Some(dependents) = self.reverse_edges.get(&node) {
636                for &dependent in dependents {
637                    if let Some(degree) = in_degree.get_mut(&dependent) {
638                        *degree -= 1;
639                        if *degree == 0 {
640                            queue.push_back(dependent);
641                        }
642                    }
643                }
644            }
645        }
646
647        result
648    }
649}
650
651impl OptimizationCache {
652    fn new(max_size: usize) -> Self {
653        Self {
654            entries: HashMap::new(),
655            lru_queue: VecDeque::new(),
656            max_size,
657        }
658    }
659
660    fn get_mut(&mut self, hash: u64) -> Option<&mut CachedOptimization> {
661        if let Some(cached) = self.entries.get_mut(&hash) {
662            cached.access_count += 1;
663            cached.last_accessed = Instant::now();
664
665            // Update LRU
666            self.lru_queue.retain(|&h| h != hash);
667            self.lru_queue.push_front(hash);
668
669            Some(cached)
670        } else {
671            None
672        }
673    }
674
675    fn insert(&mut self, hash: u64, cached: CachedOptimization) {
676        // Evict if necessary
677        while self.entries.len() >= self.max_size {
678            if let Some(oldest_hash) = self.lru_queue.pop_back() {
679                self.entries.remove(&oldest_hash);
680            } else {
681                break;
682            }
683        }
684
685        self.entries.insert(hash, cached);
686        self.lru_queue.push_front(hash);
687    }
688
689    fn get_hit_count(&self) -> usize {
690        self.entries.values().map(|c| c.access_count).sum()
691    }
692
693    fn get_average_optimization_time(&self) -> Duration {
694        if self.entries.is_empty() {
695            return Duration::ZERO;
696        }
697
698        let total_time: Duration = self
699            .entries
700            .values()
701            .map(|c| c.result.optimization_time)
702            .sum();
703
704        total_time / self.entries.len() as u32
705    }
706}
707
708/// Statistics for lazy evaluation
709#[derive(Debug, Clone)]
710pub struct LazyEvaluationStats {
711    pub total_gates: usize,
712    pub evaluated_gates: usize,
713    pub pending_gates: usize,
714    pub cache_hits: usize,
715    pub cache_size: usize,
716    pub average_optimization_time: Duration,
717}
718
719#[cfg(test)]
720mod tests {
721    use super::*;
722    use crate::gate::single::{Hadamard, PauliX, PauliZ};
723    use crate::optimization::OptimizationChain;
724
725    #[test]
726    fn test_lazy_pipeline_creation() {
727        let config = LazyEvaluationConfig::default();
728        let chain = OptimizationChain::new();
729
730        let pipeline = LazyOptimizationPipeline::new(config, chain).unwrap();
731        let stats = pipeline.get_statistics();
732
733        assert_eq!(stats.total_gates, 0);
734        assert_eq!(stats.evaluated_gates, 0);
735    }
736
737    #[test]
738    fn test_gate_addition() {
739        let config = LazyEvaluationConfig::default();
740        let chain = OptimizationChain::new();
741
742        let pipeline = LazyOptimizationPipeline::new(config, chain).unwrap();
743
744        let h_gate = Box::new(Hadamard {
745            target: crate::qubit::QubitId(0),
746        });
747        let gate_id = pipeline.add_gate(h_gate).unwrap();
748
749        assert_eq!(gate_id, 0);
750
751        let stats = pipeline.get_statistics();
752        assert_eq!(stats.total_gates, 1);
753        assert_eq!(stats.pending_gates, 1);
754    }
755
756    #[test]
757    fn test_gate_evaluation() {
758        let config = LazyEvaluationConfig::default();
759        let chain = OptimizationChain::new();
760
761        let pipeline = LazyOptimizationPipeline::new(config, chain).unwrap();
762
763        let h_gate = Box::new(Hadamard {
764            target: crate::qubit::QubitId(0),
765        });
766        let gate_id = pipeline.add_gate(h_gate).unwrap();
767
768        let result = pipeline.evaluate_gate(gate_id).unwrap();
769        assert!(result.optimization_time > Duration::ZERO);
770
771        let stats = pipeline.get_statistics();
772        assert_eq!(stats.evaluated_gates, 1);
773        assert_eq!(stats.pending_gates, 0);
774    }
775
776    #[test]
777    fn test_dependency_analysis() {
778        let config = LazyEvaluationConfig::default();
779        let chain = OptimizationChain::new();
780
781        let pipeline = LazyOptimizationPipeline::new(config, chain).unwrap();
782
783        // Add gates that share qubits
784        let h_gate = Box::new(Hadamard {
785            target: crate::qubit::QubitId(0),
786        });
787        let x_gate = Box::new(PauliX {
788            target: crate::qubit::QubitId(0),
789        });
790        let z_gate = Box::new(PauliZ {
791            target: crate::qubit::QubitId(1),
792        });
793
794        let _h_id = pipeline.add_gate(h_gate).unwrap();
795        let _x_id = pipeline.add_gate(x_gate).unwrap();
796        let _z_id = pipeline.add_gate(z_gate).unwrap();
797
798        // X gate should depend on H gate (same qubit)
799        // Z gate should be independent (different qubit)
800
801        let results = pipeline.evaluate_all().unwrap();
802        // Results may be filtered or combined during optimization
803        assert!(results.len() <= 3);
804    }
805
806    #[test]
807    fn test_optimization_caching() {
808        let config = LazyEvaluationConfig::default();
809        let chain = OptimizationChain::new();
810
811        let pipeline = LazyOptimizationPipeline::new(config, chain).unwrap();
812
813        let h_gate = Box::new(Hadamard {
814            target: crate::qubit::QubitId(0),
815        });
816        let gate_id = pipeline.add_gate(h_gate).unwrap();
817
818        // First evaluation
819        let result1 = pipeline.evaluate_gate(gate_id).unwrap();
820
821        // Second evaluation should use cache
822        let result2 = pipeline.evaluate_gate(gate_id).unwrap();
823
824        // Results should be identical
825        assert_eq!(result1.stats.gates_before, result2.stats.gates_before);
826        assert_eq!(result1.stats.gates_after, result2.stats.gates_after);
827    }
828}