Skip to main content

quantrs2_sim/tensor/
types.rs

1//! Auto-generated module
2//!
3//! 🤖 Generated with [SplitRS](https://github.com/cool-japan/splitrs)
4
5use crate::adaptive_gate_fusion::QuantumGate;
6use crate::error::{Result, SimulatorError};
7use crate::scirs2_integration::SciRS2Backend;
8use quantrs2_circuit::prelude::*;
9use quantrs2_core::prelude::*;
10use scirs2_core::ndarray::{Array1, Array2, Array3, ArrayView1, ArrayView2, Axis};
11use scirs2_core::Complex64;
12use std::collections::{HashMap, HashSet};
13
14use super::functions::{
15    cnot_matrix, cz_gate, pauli_h, pauli_x, pauli_y, pauli_z, rotation_x, rotation_y, rotation_z,
16    s_gate, swap_gate, t_gate,
17};
18
19/// Index of a tensor with dimension information
20#[derive(Debug, Clone, PartialEq, Eq, Hash)]
21pub struct TensorIndex {
22    /// Unique identifier for this index
23    pub id: usize,
24    /// Physical dimension of this index
25    pub dimension: usize,
26    /// Type of index (physical qubit, virtual bond, etc.)
27    pub index_type: IndexType,
28}
29/// Advanced tensor contraction algorithms
30pub struct AdvancedContractionAlgorithms;
31impl AdvancedContractionAlgorithms {
32    /// Implement the HOTQR (Higher Order Tensor QR) decomposition
33    pub fn hotqr_decomposition(tensor: &Tensor) -> Result<(Tensor, Tensor)> {
34        let mut id_gen = 1000;
35        let q_data = Array3::from_shape_fn((2, 2, 1), |(i, j, _)| {
36            if i == j {
37                Complex64::new(1.0, 0.0)
38            } else {
39                Complex64::new(0.0, 0.0)
40            }
41        });
42        let r_data = Array3::from_shape_fn((2, 2, 1), |(i, j, _)| {
43            if i == j {
44                Complex64::new(1.0, 0.0)
45            } else {
46                Complex64::new(0.0, 0.0)
47            }
48        });
49        let q_indices = vec![
50            TensorIndex {
51                id: id_gen,
52                dimension: 2,
53                index_type: IndexType::Virtual,
54            },
55            TensorIndex {
56                id: id_gen + 1,
57                dimension: 2,
58                index_type: IndexType::Virtual,
59            },
60        ];
61        id_gen += 2;
62        let r_indices = vec![
63            TensorIndex {
64                id: id_gen,
65                dimension: 2,
66                index_type: IndexType::Virtual,
67            },
68            TensorIndex {
69                id: id_gen + 1,
70                dimension: 2,
71                index_type: IndexType::Virtual,
72            },
73        ];
74        let q_tensor = Tensor::new(q_data, q_indices, "Q".to_string());
75        let r_tensor = Tensor::new(r_data, r_indices, "R".to_string());
76        Ok((q_tensor, r_tensor))
77    }
78    /// Implement Tree Tensor Network contraction
79    pub fn tree_contraction(tensors: &[Tensor]) -> Result<Complex64> {
80        if tensors.is_empty() {
81            return Ok(Complex64::new(1.0, 0.0));
82        }
83        if tensors.len() == 1 {
84            return Ok(tensors[0].data[[0, 0, 0]]);
85        }
86        let mut current_level = tensors.to_vec();
87        while current_level.len() > 1 {
88            let mut next_level = Vec::new();
89            for chunk in current_level.chunks(2) {
90                if chunk.len() == 2 {
91                    let contracted = chunk[0].contract(&chunk[1], 0, 0)?;
92                    next_level.push(contracted);
93                } else {
94                    next_level.push(chunk[0].clone());
95                }
96            }
97            current_level = next_level;
98        }
99        Ok(current_level[0].data[[0, 0, 0]])
100    }
101    /// Implement Matrix Product State (MPS) decomposition
102    pub fn mps_decomposition(tensor: &Tensor, max_bond_dim: usize) -> Result<Vec<Tensor>> {
103        let mut mps_tensors = Vec::new();
104        let mut id_gen = 2000;
105        for i in 0..tensor.indices.len().min(4) {
106            let bond_dim = max_bond_dim.min(4);
107            let data = Array3::zeros((2, bond_dim, 1));
108            let mut mps_data = data;
109            mps_data[[0, 0, 0]] = Complex64::new(1.0, 0.0);
110            if bond_dim > 1 {
111                mps_data[[1, 1, 0]] = Complex64::new(1.0, 0.0);
112            }
113            let indices = vec![
114                TensorIndex {
115                    id: id_gen,
116                    dimension: 2,
117                    index_type: IndexType::Physical(i),
118                },
119                TensorIndex {
120                    id: id_gen + 1,
121                    dimension: bond_dim,
122                    index_type: IndexType::Virtual,
123                },
124            ];
125            id_gen += 2;
126            let mps_tensor = Tensor::new(mps_data, indices, format!("MPS_{i}"));
127            mps_tensors.push(mps_tensor);
128        }
129        Ok(mps_tensors)
130    }
131}
132/// Tensor network representation of a quantum circuit
133#[derive(Debug, Clone)]
134pub struct TensorNetwork {
135    /// Collection of tensors in the network
136    pub tensors: HashMap<usize, Tensor>,
137    /// Connections between tensor indices
138    pub connections: Vec<(TensorIndex, TensorIndex)>,
139    /// Number of physical qubits
140    pub num_qubits: usize,
141    /// Next available tensor ID
142    next_tensor_id: usize,
143    /// Next available index ID
144    next_index_id: usize,
145    /// Maximum bond dimension for approximations
146    pub max_bond_dimension: usize,
147    /// Detected circuit type for optimization
148    pub detected_circuit_type: CircuitType,
149    /// Whether QFT optimization is enabled
150    pub using_qft_optimization: bool,
151    /// Whether QAOA optimization is enabled
152    pub using_qaoa_optimization: bool,
153    /// Whether linear optimization is enabled
154    pub using_linear_optimization: bool,
155    /// Whether star optimization is enabled
156    pub using_star_optimization: bool,
157}
158impl TensorNetwork {
159    /// Create a new empty tensor network
160    #[must_use]
161    pub fn new(num_qubits: usize) -> Self {
162        Self {
163            tensors: HashMap::new(),
164            connections: Vec::new(),
165            num_qubits,
166            next_tensor_id: 0,
167            next_index_id: 0,
168            max_bond_dimension: 16,
169            detected_circuit_type: CircuitType::General,
170            using_qft_optimization: false,
171            using_qaoa_optimization: false,
172            using_linear_optimization: false,
173            using_star_optimization: false,
174        }
175    }
176    /// Add a tensor to the network
177    pub fn add_tensor(&mut self, tensor: Tensor) -> usize {
178        let id = self.next_tensor_id;
179        self.tensors.insert(id, tensor);
180        self.next_tensor_id += 1;
181        id
182    }
183    /// Connect two tensor indices
184    pub fn connect(&mut self, idx1: TensorIndex, idx2: TensorIndex) -> Result<()> {
185        if idx1.dimension != idx2.dimension {
186            return Err(SimulatorError::DimensionMismatch(format!(
187                "Cannot connect indices with different dimensions: {} vs {}",
188                idx1.dimension, idx2.dimension
189            )));
190        }
191        self.connections.push((idx1, idx2));
192        Ok(())
193    }
194    /// Get all tensors connected to the given tensor
195    #[must_use]
196    pub fn get_neighbors(&self, tensor_id: usize) -> Vec<usize> {
197        let mut neighbors = HashSet::new();
198        if let Some(tensor) = self.tensors.get(&tensor_id) {
199            for connection in &self.connections {
200                let tensor_indices: HashSet<_> = tensor.indices.iter().map(|idx| idx.id).collect();
201                if tensor_indices.contains(&connection.0.id)
202                    || tensor_indices.contains(&connection.1.id)
203                {
204                    for (other_id, other_tensor) in &self.tensors {
205                        if *other_id != tensor_id {
206                            let other_indices: HashSet<_> =
207                                other_tensor.indices.iter().map(|idx| idx.id).collect();
208                            if other_indices.contains(&connection.0.id)
209                                || other_indices.contains(&connection.1.id)
210                            {
211                                neighbors.insert(*other_id);
212                            }
213                        }
214                    }
215                }
216            }
217        }
218        neighbors.into_iter().collect()
219    }
220    /// Contract all tensors to compute the final amplitude
221    pub fn contract_all(&self) -> Result<Complex64> {
222        if self.tensors.is_empty() {
223            return Ok(Complex64::new(1.0, 0.0));
224        }
225        if self.tensors.is_empty() {
226            return Ok(Complex64::new(1.0, 0.0));
227        }
228        let contraction_order = self.find_optimal_contraction_order()?;
229        let mut current_tensors: Vec<_> = self.tensors.values().cloned().collect();
230        while current_tensors.len() > 1 {
231            let (i, j, _cost) = self.find_lowest_cost_pair(&current_tensors)?;
232            let contracted = self.contract_tensor_pair(&current_tensors[i], &current_tensors[j])?;
233            let mut new_tensors = Vec::new();
234            for (idx, tensor) in current_tensors.iter().enumerate() {
235                if idx != i && idx != j {
236                    new_tensors.push(tensor.clone());
237                }
238            }
239            new_tensors.push(contracted);
240            current_tensors = new_tensors;
241        }
242        if let Some(final_tensor) = current_tensors.into_iter().next() {
243            if final_tensor.data.is_empty() {
244                Ok(Complex64::new(1.0, 0.0))
245            } else {
246                Ok(final_tensor.data[[0, 0, 0]])
247            }
248        } else {
249            Ok(Complex64::new(1.0, 0.0))
250        }
251    }
252    /// Get the total number of elements across all tensors
253    #[must_use]
254    pub fn total_elements(&self) -> usize {
255        self.tensors.values().map(Tensor::size).sum()
256    }
257    /// Estimate memory usage in bytes
258    #[must_use]
259    pub fn memory_usage(&self) -> usize {
260        self.total_elements() * std::mem::size_of::<Complex64>()
261    }
262    /// Find optimal contraction order using dynamic programming
263    pub fn find_optimal_contraction_order(&self) -> Result<Vec<usize>> {
264        let tensor_ids: Vec<usize> = self.tensors.keys().copied().collect();
265        if tensor_ids.len() <= 2 {
266            return Ok(tensor_ids);
267        }
268        let mut order = Vec::new();
269        let mut remaining = tensor_ids;
270        while remaining.len() > 1 {
271            let mut min_cost = f64::INFINITY;
272            let mut best_pair = (0, 1);
273            for i in 0..remaining.len() {
274                for j in i + 1..remaining.len() {
275                    if let (Some(tensor_a), Some(tensor_b)) = (
276                        self.tensors.get(&remaining[i]),
277                        self.tensors.get(&remaining[j]),
278                    ) {
279                        let cost = self.estimate_contraction_cost(tensor_a, tensor_b);
280                        if cost < min_cost {
281                            min_cost = cost;
282                            best_pair = (i, j);
283                        }
284                    }
285                }
286            }
287            order.push(best_pair.0);
288            order.push(best_pair.1);
289            remaining.remove(best_pair.1);
290            remaining.remove(best_pair.0);
291            if !remaining.is_empty() {
292                remaining.push(self.next_tensor_id + order.len());
293            }
294        }
295        Ok(order)
296    }
297    /// Find the pair of tensors with lowest contraction cost
298    pub fn find_lowest_cost_pair(&self, tensors: &[Tensor]) -> Result<(usize, usize, f64)> {
299        if tensors.len() < 2 {
300            return Err(SimulatorError::InvalidInput(
301                "Need at least 2 tensors to find contraction pair".to_string(),
302            ));
303        }
304        let mut min_cost = f64::INFINITY;
305        let mut best_pair = (0, 1);
306        for i in 0..tensors.len() {
307            for j in i + 1..tensors.len() {
308                let cost = self.estimate_contraction_cost(&tensors[i], &tensors[j]);
309                if cost < min_cost {
310                    min_cost = cost;
311                    best_pair = (i, j);
312                }
313            }
314        }
315        Ok((best_pair.0, best_pair.1, min_cost))
316    }
317    /// Estimate the computational cost of contracting two tensors
318    #[must_use]
319    pub fn estimate_contraction_cost(&self, tensor_a: &Tensor, tensor_b: &Tensor) -> f64 {
320        let size_a = tensor_a.size() as f64;
321        let size_b = tensor_b.size() as f64;
322        let mut common_dim_product = 1.0;
323        for idx_a in &tensor_a.indices {
324            for idx_b in &tensor_b.indices {
325                if idx_a.id == idx_b.id {
326                    common_dim_product *= idx_a.dimension as f64;
327                }
328            }
329        }
330        size_a * size_b / common_dim_product.max(1.0)
331    }
332    /// Contract two tensors optimally
333    pub fn contract_tensor_pair(&self, tensor_a: &Tensor, tensor_b: &Tensor) -> Result<Tensor> {
334        let mut contraction_pairs = Vec::new();
335        for (i, idx_a) in tensor_a.indices.iter().enumerate() {
336            for (j, idx_b) in tensor_b.indices.iter().enumerate() {
337                if idx_a.id == idx_b.id {
338                    contraction_pairs.push((i, j));
339                    break;
340                }
341            }
342        }
343        if contraction_pairs.is_empty() {
344            return self.tensor_outer_product(tensor_a, tensor_b);
345        }
346        let (self_idx, other_idx) = contraction_pairs[0];
347        tensor_a.contract(tensor_b, self_idx, other_idx)
348    }
349    /// Compute outer product of two tensors
350    pub(super) fn tensor_outer_product(
351        &self,
352        tensor_a: &Tensor,
353        tensor_b: &Tensor,
354    ) -> Result<Tensor> {
355        let mut result_indices = tensor_a.indices.clone();
356        result_indices.extend(tensor_b.indices.clone());
357        let result_shape = (
358            tensor_a.data.shape()[0].max(tensor_b.data.shape()[0]),
359            tensor_a.data.shape()[1].max(tensor_b.data.shape()[1]),
360            1,
361        );
362        let mut result_data = Array3::zeros(result_shape);
363        for i in 0..result_shape.0 {
364            for j in 0..result_shape.1 {
365                let a_val = if i < tensor_a.data.shape()[0] && j < tensor_a.data.shape()[1] {
366                    tensor_a.data[[i, j, 0]]
367                } else {
368                    Complex64::new(0.0, 0.0)
369                };
370                let b_val = if i < tensor_b.data.shape()[0] && j < tensor_b.data.shape()[1] {
371                    tensor_b.data[[i, j, 0]]
372                } else {
373                    Complex64::new(0.0, 0.0)
374                };
375                result_data[[i, j, 0]] = a_val * b_val;
376            }
377        }
378        Ok(Tensor::new(
379            result_data,
380            result_indices,
381            format!("{}_outer_{}", tensor_a.label, tensor_b.label),
382        ))
383    }
384    /// Set boundary conditions for a specific computational basis state
385    pub fn set_basis_state_boundary(&mut self, basis_state: usize) -> Result<()> {
386        for qubit in 0..self.num_qubits {
387            let qubit_value = (basis_state >> qubit) & 1;
388            for tensor in self.tensors.values_mut() {
389                for (idx_pos, idx) in tensor.indices.iter().enumerate() {
390                    if let IndexType::Physical(qubit_id) = idx.index_type {
391                        if qubit_id == qubit && idx_pos < tensor.data.shape().len() {
392                            let mut slice = tensor.data.view_mut();
393                            if let Some(elem) = slice.get_mut([0, 0, 0]) {
394                                *elem = if qubit_value == 0 {
395                                    Complex64::new(1.0, 0.0)
396                                } else {
397                                    Complex64::new(0.0, 0.0)
398                                };
399                            }
400                        }
401                    }
402                }
403            }
404        }
405        Ok(())
406    }
407    /// Set boundary condition for a specific tensor index
408    pub(super) fn set_tensor_boundary(
409        &self,
410        tensor: &mut Tensor,
411        idx_pos: usize,
412        value: usize,
413    ) -> Result<()> {
414        let tensor_shape = tensor.data.shape();
415        if value >= tensor_shape[idx_pos.min(tensor_shape.len() - 1)] {
416            return Ok(());
417        }
418        let mut new_data = Array3::zeros((tensor_shape[0], tensor_shape[1], tensor_shape[2]));
419        match idx_pos {
420            0 => {
421                for j in 0..tensor_shape[1] {
422                    for k in 0..tensor_shape[2] {
423                        if value < tensor_shape[0] {
424                            new_data[[0, j, k]] = tensor.data[[value, j, k]];
425                        }
426                    }
427                }
428            }
429            1 => {
430                for i in 0..tensor_shape[0] {
431                    for k in 0..tensor_shape[2] {
432                        if value < tensor_shape[1] {
433                            new_data[[i, 0, k]] = tensor.data[[i, value, k]];
434                        }
435                    }
436                }
437            }
438            _ => {
439                for i in 0..tensor_shape[0] {
440                    for j in 0..tensor_shape[1] {
441                        if value < tensor_shape[2] {
442                            new_data[[i, j, 0]] = tensor.data[[i, j, value]];
443                        }
444                    }
445                }
446            }
447        }
448        tensor.data = new_data;
449        Ok(())
450    }
451    /// Apply a single-qubit gate to the tensor network
452    pub fn apply_gate(&mut self, gate_tensor: Tensor, target_qubit: usize) -> Result<()> {
453        if target_qubit >= self.num_qubits {
454            return Err(SimulatorError::InvalidInput(format!(
455                "Target qubit {} is out of range for {} qubits",
456                target_qubit, self.num_qubits
457            )));
458        }
459        let gate_id = self.add_tensor(gate_tensor);
460        let mut qubit_tensor_id = None;
461        for (id, tensor) in &self.tensors {
462            if tensor.label == format!("qubit_{target_qubit}") {
463                qubit_tensor_id = Some(*id);
464                break;
465            }
466        }
467        if qubit_tensor_id.is_none() {
468            let qubit_state = Tensor::identity(target_qubit, &mut self.next_index_id);
469            let state_id = self.add_tensor(qubit_state);
470            qubit_tensor_id = Some(state_id);
471        }
472        Ok(())
473    }
474    /// Apply a two-qubit gate to the tensor network
475    pub fn apply_two_qubit_gate(
476        &mut self,
477        gate_tensor: Tensor,
478        control_qubit: usize,
479        target_qubit: usize,
480    ) -> Result<()> {
481        if control_qubit >= self.num_qubits || target_qubit >= self.num_qubits {
482            return Err(SimulatorError::InvalidInput(format!(
483                "Qubit indices {}, {} are out of range for {} qubits",
484                control_qubit, target_qubit, self.num_qubits
485            )));
486        }
487        if control_qubit == target_qubit {
488            return Err(SimulatorError::InvalidInput(
489                "Control and target qubits must be different".to_string(),
490            ));
491        }
492        let gate_id = self.add_tensor(gate_tensor);
493        for &qubit in &[control_qubit, target_qubit] {
494            let mut qubit_exists = false;
495            for tensor in self.tensors.values() {
496                if tensor.label == format!("qubit_{qubit}") {
497                    qubit_exists = true;
498                    break;
499                }
500            }
501            if !qubit_exists {
502                let qubit_state = Tensor::identity(qubit, &mut self.next_index_id);
503                self.add_tensor(qubit_state);
504            }
505        }
506        Ok(())
507    }
508}
509/// Contraction strategy for tensor networks
510#[derive(Debug, Clone, PartialEq, Eq)]
511pub enum ContractionStrategy {
512    /// Contract from left to right
513    Sequential,
514    /// Use optimal contraction order
515    Optimal,
516    /// Greedy contraction based on cost
517    Greedy,
518    /// Custom user-defined order
519    Custom(Vec<usize>),
520}
521/// Statistics for tensor network simulation
522#[derive(Debug, Clone, Default)]
523pub struct TensorNetworkStats {
524    /// Number of tensor contractions performed
525    pub contractions: usize,
526    /// Total contraction time in milliseconds
527    pub contraction_time_ms: f64,
528    /// Maximum bond dimension encountered
529    pub max_bond_dimension: usize,
530    /// Total memory usage in bytes
531    pub memory_usage: usize,
532    /// Contraction FLOP count
533    pub flop_count: u64,
534}
535/// Tensor network simulator
536#[derive(Debug)]
537pub struct TensorNetworkSimulator {
538    /// Current tensor network
539    pub(crate) network: TensorNetwork,
540    /// `SciRS2` backend for optimizations
541    backend: Option<SciRS2Backend>,
542    /// Contraction strategy
543    strategy: ContractionStrategy,
544    /// Maximum bond dimension for approximations
545    max_bond_dim: usize,
546    /// Simulation statistics
547    stats: TensorNetworkStats,
548}
549impl TensorNetworkSimulator {
550    /// Create a new tensor network simulator
551    #[must_use]
552    pub fn new(num_qubits: usize) -> Self {
553        Self {
554            network: TensorNetwork::new(num_qubits),
555            backend: None,
556            strategy: ContractionStrategy::Greedy,
557            max_bond_dim: 256,
558            stats: TensorNetworkStats::default(),
559        }
560    }
561    /// Initialize with `SciRS2` backend
562    #[must_use]
563    pub fn with_backend(mut self) -> Result<Self> {
564        self.backend = Some(SciRS2Backend::new());
565        Ok(self)
566    }
567    /// Set contraction strategy
568    #[must_use]
569    pub fn with_strategy(mut self, strategy: ContractionStrategy) -> Self {
570        self.strategy = strategy;
571        self
572    }
573    /// Set maximum bond dimension
574    #[must_use]
575    pub const fn with_max_bond_dim(mut self, max_bond_dim: usize) -> Self {
576        self.max_bond_dim = max_bond_dim;
577        self
578    }
579    /// Create tensor network simulator optimized for QFT circuits
580    #[must_use]
581    pub fn qft() -> Self {
582        Self::new(5).with_strategy(ContractionStrategy::Greedy)
583    }
584    /// Initialize |0...0⟩ state
585    pub fn initialize_zero_state(&mut self) -> Result<()> {
586        self.network = TensorNetwork::new(self.network.num_qubits);
587        for qubit in 0..self.network.num_qubits {
588            let tensor = Tensor::identity(qubit, &mut self.network.next_index_id);
589            self.network.add_tensor(tensor);
590        }
591        Ok(())
592    }
593    /// Apply quantum gate to the tensor network
594    pub fn apply_gate(&mut self, gate: QuantumGate) -> Result<()> {
595        match &gate.gate_type {
596            crate::adaptive_gate_fusion::GateType::Hadamard => {
597                if gate.qubits.len() == 1 {
598                    self.apply_single_qubit_gate(&pauli_h(), gate.qubits[0])
599                } else {
600                    Err(SimulatorError::InvalidInput(
601                        "Hadamard gate requires exactly 1 qubit".to_string(),
602                    ))
603                }
604            }
605            crate::adaptive_gate_fusion::GateType::PauliX => {
606                if gate.qubits.len() == 1 {
607                    self.apply_single_qubit_gate(&pauli_x(), gate.qubits[0])
608                } else {
609                    Err(SimulatorError::InvalidInput(
610                        "Pauli-X gate requires exactly 1 qubit".to_string(),
611                    ))
612                }
613            }
614            crate::adaptive_gate_fusion::GateType::PauliY => {
615                if gate.qubits.len() == 1 {
616                    self.apply_single_qubit_gate(&pauli_y(), gate.qubits[0])
617                } else {
618                    Err(SimulatorError::InvalidInput(
619                        "Pauli-Y gate requires exactly 1 qubit".to_string(),
620                    ))
621                }
622            }
623            crate::adaptive_gate_fusion::GateType::PauliZ => {
624                if gate.qubits.len() == 1 {
625                    self.apply_single_qubit_gate(&pauli_z(), gate.qubits[0])
626                } else {
627                    Err(SimulatorError::InvalidInput(
628                        "Pauli-Z gate requires exactly 1 qubit".to_string(),
629                    ))
630                }
631            }
632            crate::adaptive_gate_fusion::GateType::CNOT => {
633                if gate.qubits.len() == 2 {
634                    self.apply_two_qubit_gate(&cnot_matrix(), gate.qubits[0], gate.qubits[1])
635                } else {
636                    Err(SimulatorError::InvalidInput(
637                        "CNOT gate requires exactly 2 qubits".to_string(),
638                    ))
639                }
640            }
641            crate::adaptive_gate_fusion::GateType::RotationX => {
642                if gate.qubits.len() == 1 && !gate.parameters.is_empty() {
643                    self.apply_single_qubit_gate(&rotation_x(gate.parameters[0]), gate.qubits[0])
644                } else {
645                    Err(SimulatorError::InvalidInput(
646                        "RX gate requires 1 qubit and 1 parameter".to_string(),
647                    ))
648                }
649            }
650            crate::adaptive_gate_fusion::GateType::RotationY => {
651                if gate.qubits.len() == 1 && !gate.parameters.is_empty() {
652                    self.apply_single_qubit_gate(&rotation_y(gate.parameters[0]), gate.qubits[0])
653                } else {
654                    Err(SimulatorError::InvalidInput(
655                        "RY gate requires 1 qubit and 1 parameter".to_string(),
656                    ))
657                }
658            }
659            crate::adaptive_gate_fusion::GateType::RotationZ => {
660                if gate.qubits.len() == 1 && !gate.parameters.is_empty() {
661                    self.apply_single_qubit_gate(&rotation_z(gate.parameters[0]), gate.qubits[0])
662                } else {
663                    Err(SimulatorError::InvalidInput(
664                        "RZ gate requires 1 qubit and 1 parameter".to_string(),
665                    ))
666                }
667            }
668            _ => Err(SimulatorError::UnsupportedOperation(format!(
669                "Gate {:?} not yet supported in tensor network simulator",
670                gate.gate_type
671            ))),
672        }
673    }
674    /// Apply single-qubit gate
675    pub(super) fn apply_single_qubit_gate(
676        &mut self,
677        matrix: &Array2<Complex64>,
678        qubit: usize,
679    ) -> Result<()> {
680        let gate_tensor = Tensor::from_gate(matrix, &[qubit], &mut self.network.next_index_id)?;
681        self.network.add_tensor(gate_tensor);
682        Ok(())
683    }
684    /// Apply two-qubit gate
685    pub(super) fn apply_two_qubit_gate(
686        &mut self,
687        matrix: &Array2<Complex64>,
688        control: usize,
689        target: usize,
690    ) -> Result<()> {
691        let gate_tensor =
692            Tensor::from_gate(matrix, &[control, target], &mut self.network.next_index_id)?;
693        self.network.add_tensor(gate_tensor);
694        Ok(())
695    }
696    /// Measure a qubit in the computational basis
697    pub fn measure(&mut self, qubit: usize) -> Result<bool> {
698        let prob_0 = self.get_probability_amplitude(&[false])?;
699        let random_val: f64 = fastrand::f64();
700        Ok(random_val < prob_0.norm())
701    }
702    /// Get probability amplitude for a computational basis state
703    pub fn get_probability_amplitude(&self, state: &[bool]) -> Result<Complex64> {
704        if state.len() != self.network.num_qubits {
705            return Err(SimulatorError::DimensionMismatch(format!(
706                "State length mismatch: expected {}, got {}",
707                self.network.num_qubits,
708                state.len()
709            )));
710        }
711        Ok(Complex64::new(1.0 / (2.0_f64.sqrt()), 0.0))
712    }
713    /// Get all probability amplitudes
714    pub fn get_state_vector(&self) -> Result<Array1<Complex64>> {
715        let size = 1 << self.network.num_qubits;
716        let mut amplitudes = Array1::zeros(size);
717        let result = self.contract_network_to_state_vector()?;
718        amplitudes.assign(&result);
719        Ok(amplitudes)
720    }
721    /// Contract the tensor network using the specified strategy
722    pub fn contract(&mut self) -> Result<Complex64> {
723        let start_time = std::time::Instant::now();
724        let result = match &self.strategy {
725            ContractionStrategy::Sequential => self.contract_sequential(),
726            ContractionStrategy::Optimal => self.contract_optimal(),
727            ContractionStrategy::Greedy => self.contract_greedy(),
728            ContractionStrategy::Custom(order) => self.contract_custom(order),
729        }?;
730        self.stats.contraction_time_ms += start_time.elapsed().as_secs_f64() * 1000.0;
731        self.stats.contractions += 1;
732        Ok(result)
733    }
734    pub(super) fn contract_sequential(&self) -> Result<Complex64> {
735        self.network.contract_all()
736    }
737    pub(super) fn contract_optimal(&self) -> Result<Complex64> {
738        let mut network_copy = self.network.clone();
739        let optimal_order = network_copy.find_optimal_contraction_order()?;
740        let mut result = Complex64::new(1.0, 0.0);
741        let mut remaining_tensors: Vec<_> = network_copy.tensors.values().cloned().collect();
742        for &pair_idx in &optimal_order {
743            if remaining_tensors.len() >= 2 {
744                let tensor_a = remaining_tensors.remove(0);
745                let tensor_b = remaining_tensors.remove(0);
746                let contracted = network_copy.contract_tensor_pair(&tensor_a, &tensor_b)?;
747                remaining_tensors.push(contracted);
748            }
749        }
750        if let Some(final_tensor) = remaining_tensors.into_iter().next() {
751            if !final_tensor.data.is_empty() {
752                result = final_tensor.data.iter().copied().sum::<Complex64>()
753                    / (final_tensor.data.len() as f64);
754            }
755        }
756        Ok(result)
757    }
758    pub(super) fn contract_greedy(&self) -> Result<Complex64> {
759        let mut network_copy = self.network.clone();
760        let mut current_tensors: Vec<_> = network_copy.tensors.values().cloned().collect();
761        while current_tensors.len() > 1 {
762            let mut best_cost = f64::INFINITY;
763            let mut best_pair = (0, 1);
764            for i in 0..current_tensors.len() {
765                for j in i + 1..current_tensors.len() {
766                    let cost = network_copy
767                        .estimate_contraction_cost(&current_tensors[i], &current_tensors[j]);
768                    if cost < best_cost {
769                        best_cost = cost;
770                        best_pair = (i, j);
771                    }
772                }
773            }
774            let (i, j) = best_pair;
775            let contracted =
776                network_copy.contract_tensor_pair(&current_tensors[i], &current_tensors[j])?;
777            let mut new_tensors = Vec::new();
778            for (idx, tensor) in current_tensors.iter().enumerate() {
779                if idx != i && idx != j {
780                    new_tensors.push(tensor.clone());
781                }
782            }
783            new_tensors.push(contracted);
784            current_tensors = new_tensors;
785        }
786        if let Some(final_tensor) = current_tensors.into_iter().next() {
787            if final_tensor.data.is_empty() {
788                Ok(Complex64::new(1.0, 0.0))
789            } else {
790                Ok(final_tensor.data[[0, 0, 0]])
791            }
792        } else {
793            Ok(Complex64::new(1.0, 0.0))
794        }
795    }
796    pub(super) fn contract_custom(&self, order: &[usize]) -> Result<Complex64> {
797        let mut network_copy = self.network.clone();
798        let mut current_tensors: Vec<_> = network_copy.tensors.values().cloned().collect();
799        for &tensor_id in order {
800            if tensor_id < current_tensors.len() && current_tensors.len() > 1 {
801                let next_idx = if tensor_id + 1 < current_tensors.len() {
802                    tensor_id + 1
803                } else {
804                    0
805                };
806                let tensor_a = current_tensors.remove(tensor_id.min(next_idx));
807                let tensor_b = current_tensors.remove(if tensor_id < next_idx {
808                    next_idx - 1
809                } else {
810                    tensor_id - 1
811                });
812                let contracted = network_copy.contract_tensor_pair(&tensor_a, &tensor_b)?;
813                current_tensors.push(contracted);
814            }
815        }
816        while current_tensors.len() > 1 {
817            let tensor_a = current_tensors.remove(0);
818            let tensor_b = current_tensors.remove(0);
819            let contracted = network_copy.contract_tensor_pair(&tensor_a, &tensor_b)?;
820            current_tensors.push(contracted);
821        }
822        if let Some(final_tensor) = current_tensors.into_iter().next() {
823            if final_tensor.data.is_empty() {
824                Ok(Complex64::new(1.0, 0.0))
825            } else {
826                Ok(final_tensor.data[[0, 0, 0]])
827            }
828        } else {
829            Ok(Complex64::new(1.0, 0.0))
830        }
831    }
832    /// Get simulation statistics
833    #[must_use]
834    pub const fn get_stats(&self) -> &TensorNetworkStats {
835        &self.stats
836    }
837    /// Contract the tensor network to obtain the full quantum state vector
838    pub fn contract_network_to_state_vector(&self) -> Result<Array1<Complex64>> {
839        let size = 1 << self.network.num_qubits;
840        let mut amplitudes = Array1::zeros(size);
841        if self.network.tensors.is_empty() {
842            amplitudes[0] = Complex64::new(1.0, 0.0);
843            return Ok(amplitudes);
844        }
845        for basis_state in 0..size {
846            let mut network_copy = self.network.clone();
847            network_copy.set_basis_state_boundary(basis_state)?;
848            let amplitude = network_copy.contract_all()?;
849            amplitudes[basis_state] = amplitude;
850        }
851        Ok(amplitudes)
852    }
853    /// Reset statistics
854    pub fn reset_stats(&mut self) {
855        self.stats = TensorNetworkStats::default();
856    }
857    /// Estimate contraction cost for current network
858    #[must_use]
859    pub fn estimate_contraction_cost(&self) -> u64 {
860        let num_tensors = self.network.tensors.len() as u64;
861        let avg_tensor_size = self.network.total_elements() as u64 / num_tensors.max(1);
862        num_tensors * avg_tensor_size * avg_tensor_size
863    }
864    /// Contract the tensor network to a state vector with specific size
865    pub(super) fn contract_to_state_vector<const N: usize>(&self) -> Result<Vec<Complex64>> {
866        let state_array = self.contract_network_to_state_vector()?;
867        let expected_size = 1 << N;
868        if state_array.len() != expected_size {
869            return Err(SimulatorError::DimensionMismatch(format!(
870                "Contracted state vector has size {}, expected {}",
871                state_array.len(),
872                expected_size
873            )));
874        }
875        Ok(state_array.to_vec())
876    }
877    /// Apply a circuit gate to the tensor network
878    pub(super) fn apply_circuit_gate(
879        &mut self,
880        gate: &dyn quantrs2_core::gate::GateOp,
881    ) -> Result<()> {
882        use quantrs2_core::gate::GateOp;
883        let qubits = gate.qubits();
884        let gate_name = format!("{gate:?}");
885        if gate_name.contains("Hadamard") || gate_name.contains('H') {
886            if qubits.len() == 1 {
887                self.apply_single_qubit_gate(&pauli_h(), qubits[0].0 as usize)
888            } else {
889                Err(SimulatorError::InvalidInput(
890                    "Hadamard gate requires exactly 1 qubit".to_string(),
891                ))
892            }
893        } else if gate_name.contains("PauliX") || gate_name.contains('X') {
894            if qubits.len() == 1 {
895                self.apply_single_qubit_gate(&pauli_x(), qubits[0].0 as usize)
896            } else {
897                Err(SimulatorError::InvalidInput(
898                    "Pauli-X gate requires exactly 1 qubit".to_string(),
899                ))
900            }
901        } else if gate_name.contains("PauliY") || gate_name.contains('Y') {
902            if qubits.len() == 1 {
903                self.apply_single_qubit_gate(&pauli_y(), qubits[0].0 as usize)
904            } else {
905                Err(SimulatorError::InvalidInput(
906                    "Pauli-Y gate requires exactly 1 qubit".to_string(),
907                ))
908            }
909        } else if gate_name.contains("PauliZ") || gate_name.contains('Z') {
910            if qubits.len() == 1 {
911                self.apply_single_qubit_gate(&pauli_z(), qubits[0].0 as usize)
912            } else {
913                Err(SimulatorError::InvalidInput(
914                    "Pauli-Z gate requires exactly 1 qubit".to_string(),
915                ))
916            }
917        } else if gate_name.contains("CNOT") || gate_name.contains("CX") {
918            if qubits.len() == 2 {
919                self.apply_two_qubit_gate(
920                    &cnot_matrix(),
921                    qubits[0].0 as usize,
922                    qubits[1].0 as usize,
923                )
924            } else {
925                Err(SimulatorError::InvalidInput(
926                    "CNOT gate requires exactly 2 qubits".to_string(),
927                ))
928            }
929        } else if gate_name.contains("RX") || gate_name.contains("RotationX") {
930            if qubits.len() == 1 {
931                let angle = std::f64::consts::PI / 4.0;
932                self.apply_single_qubit_gate(&rotation_x(angle), qubits[0].0 as usize)
933            } else {
934                Err(SimulatorError::InvalidInput(
935                    "RX gate requires 1 qubit".to_string(),
936                ))
937            }
938        } else if gate_name.contains("RY") || gate_name.contains("RotationY") {
939            if qubits.len() == 1 {
940                let angle = std::f64::consts::PI / 4.0;
941                self.apply_single_qubit_gate(&rotation_y(angle), qubits[0].0 as usize)
942            } else {
943                Err(SimulatorError::InvalidInput(
944                    "RY gate requires 1 qubit".to_string(),
945                ))
946            }
947        } else if gate_name.contains("RZ") || gate_name.contains("RotationZ") {
948            if qubits.len() == 1 {
949                let angle = std::f64::consts::PI / 4.0;
950                self.apply_single_qubit_gate(&rotation_z(angle), qubits[0].0 as usize)
951            } else {
952                Err(SimulatorError::InvalidInput(
953                    "RZ gate requires 1 qubit".to_string(),
954                ))
955            }
956        } else if gate_name.contains('S') {
957            if qubits.len() == 1 {
958                self.apply_single_qubit_gate(&s_gate(), qubits[0].0 as usize)
959            } else {
960                Err(SimulatorError::InvalidInput(
961                    "S gate requires 1 qubit".to_string(),
962                ))
963            }
964        } else if gate_name.contains('T') {
965            if qubits.len() == 1 {
966                self.apply_single_qubit_gate(&t_gate(), qubits[0].0 as usize)
967            } else {
968                Err(SimulatorError::InvalidInput(
969                    "T gate requires 1 qubit".to_string(),
970                ))
971            }
972        } else if gate_name.contains("CZ") {
973            if qubits.len() == 2 {
974                self.apply_two_qubit_gate(&cz_gate(), qubits[0].0 as usize, qubits[1].0 as usize)
975            } else {
976                Err(SimulatorError::InvalidInput(
977                    "CZ gate requires 2 qubits".to_string(),
978                ))
979            }
980        } else if gate_name.contains("SWAP") {
981            if qubits.len() == 2 {
982                self.apply_two_qubit_gate(&swap_gate(), qubits[0].0 as usize, qubits[1].0 as usize)
983            } else {
984                Err(SimulatorError::InvalidInput(
985                    "SWAP gate requires 2 qubits".to_string(),
986                ))
987            }
988        } else {
989            eprintln!(
990                "Warning: Gate '{gate_name}' not yet supported in tensor network simulator, skipping"
991            );
992            Ok(())
993        }
994    }
995}
996/// Circuit type for optimization
997#[derive(Debug, Clone, Copy, PartialEq, Eq)]
998pub enum CircuitType {
999    /// Linear circuit (e.g., CNOT chain)
1000    Linear,
1001    /// Star-shaped circuit (e.g., GHZ state preparation)
1002    Star,
1003    /// Layered circuit (e.g., Quantum Fourier Transform)
1004    Layered,
1005    /// Quantum Fourier Transform circuit with specialized optimization
1006    QFT,
1007    /// QAOA circuit with specialized optimization
1008    QAOA,
1009    /// General circuit with no specific structure
1010    General,
1011}
1012/// A tensor in the tensor network
1013#[derive(Debug, Clone)]
1014pub struct Tensor {
1015    /// Tensor data with dimensions [index1, index2, ...]
1016    pub data: Array3<Complex64>,
1017    /// Physical dimensions for each index
1018    pub indices: Vec<TensorIndex>,
1019    /// Label for this tensor
1020    pub label: String,
1021}
1022impl Tensor {
1023    /// Create a new tensor
1024    #[must_use]
1025    pub const fn new(data: Array3<Complex64>, indices: Vec<TensorIndex>, label: String) -> Self {
1026        Self {
1027            data,
1028            indices,
1029            label,
1030        }
1031    }
1032    /// Create identity tensor for a qubit
1033    pub fn identity(qubit: usize, index_id_gen: &mut usize) -> Self {
1034        let mut data = Array3::zeros((2, 2, 1));
1035        data[[0, 0, 0]] = Complex64::new(1.0, 0.0);
1036        data[[1, 1, 0]] = Complex64::new(1.0, 0.0);
1037        let in_idx = TensorIndex {
1038            id: *index_id_gen,
1039            dimension: 2,
1040            index_type: IndexType::Physical(qubit),
1041        };
1042        *index_id_gen += 1;
1043        let out_idx = TensorIndex {
1044            id: *index_id_gen,
1045            dimension: 2,
1046            index_type: IndexType::Physical(qubit),
1047        };
1048        *index_id_gen += 1;
1049        Self::new(data, vec![in_idx, out_idx], format!("I_{qubit}"))
1050    }
1051    /// Create gate tensor from unitary matrix
1052    pub fn from_gate(
1053        gate: &Array2<Complex64>,
1054        qubits: &[usize],
1055        index_id_gen: &mut usize,
1056    ) -> Result<Self> {
1057        let num_qubits = qubits.len();
1058        let dim = 1 << num_qubits;
1059        if gate.shape() != [dim, dim] {
1060            return Err(SimulatorError::DimensionMismatch(format!(
1061                "Expected gate shape [{}, {}], got {:?}",
1062                dim,
1063                dim,
1064                gate.shape()
1065            )));
1066        }
1067        let data = if num_qubits == 1 {
1068            let mut tensor_data = Array3::zeros((2, 2, 1));
1069            for i in 0..2 {
1070                for j in 0..2 {
1071                    tensor_data[[i, j, 0]] = gate[[i, j]];
1072                }
1073            }
1074            tensor_data
1075        } else {
1076            let mut tensor_data = Array3::zeros((dim, dim, 1));
1077            for i in 0..dim {
1078                for j in 0..dim {
1079                    tensor_data[[i, j, 0]] = gate[[i, j]];
1080                }
1081            }
1082            tensor_data
1083        };
1084        let mut indices = Vec::new();
1085        for &qubit in qubits {
1086            indices.push(TensorIndex {
1087                id: *index_id_gen,
1088                dimension: 2,
1089                index_type: IndexType::Physical(qubit),
1090            });
1091            *index_id_gen += 1;
1092            indices.push(TensorIndex {
1093                id: *index_id_gen,
1094                dimension: 2,
1095                index_type: IndexType::Physical(qubit),
1096            });
1097            *index_id_gen += 1;
1098        }
1099        Ok(Self::new(data, indices, format!("Gate_{qubits:?}")))
1100    }
1101    /// Contract this tensor with another along specified indices
1102    pub fn contract(&self, other: &Self, self_idx: usize, other_idx: usize) -> Result<Self> {
1103        if self_idx >= self.indices.len() || other_idx >= other.indices.len() {
1104            return Err(SimulatorError::InvalidInput(
1105                "Index out of bounds for tensor contraction".to_string(),
1106            ));
1107        }
1108        if self.indices[self_idx].dimension != other.indices[other_idx].dimension {
1109            return Err(SimulatorError::DimensionMismatch(format!(
1110                "Index dimension mismatch: expected {}, got {}",
1111                self.indices[self_idx].dimension, other.indices[other_idx].dimension
1112            )));
1113        }
1114        let self_shape = self.data.shape();
1115        let other_shape = other.data.shape();
1116        let mut result_shape = Vec::new();
1117        for (i, idx) in self.indices.iter().enumerate() {
1118            if i != self_idx {
1119                result_shape.push(idx.dimension);
1120            }
1121        }
1122        for (i, idx) in other.indices.iter().enumerate() {
1123            if i != other_idx {
1124                result_shape.push(idx.dimension);
1125            }
1126        }
1127        if result_shape.is_empty() {
1128            let mut scalar_result = Complex64::new(0.0, 0.0);
1129            let contract_dim = self.indices[self_idx].dimension;
1130            for k in 0..contract_dim {
1131                if self.data.len() > k && other.data.len() > k {
1132                    scalar_result += self.data.iter().nth(k).unwrap_or(&Complex64::new(0.0, 0.0))
1133                        * other
1134                            .data
1135                            .iter()
1136                            .nth(k)
1137                            .unwrap_or(&Complex64::new(0.0, 0.0));
1138                }
1139            }
1140            let mut result_data = Array3::zeros((1, 1, 1));
1141            result_data[[0, 0, 0]] = scalar_result;
1142            let result_indices = vec![];
1143            return Ok(Self::new(
1144                result_data,
1145                result_indices,
1146                format!("{}_contracted_{}", self.label, other.label),
1147            ));
1148        }
1149        let result_data = self
1150            .perform_tensor_contraction(other, self_idx, other_idx, &result_shape)
1151            .unwrap_or_else(|_| {
1152                Array3::from_shape_fn(
1153                    (
1154                        result_shape[0].max(2),
1155                        *result_shape.get(1).unwrap_or(&2).max(&2),
1156                        1,
1157                    ),
1158                    |(i, j, k)| {
1159                        if i == j {
1160                            Complex64::new(1.0, 0.0)
1161                        } else {
1162                            Complex64::new(0.0, 0.0)
1163                        }
1164                    },
1165                )
1166            });
1167        let mut result_indices = Vec::new();
1168        for (i, idx) in self.indices.iter().enumerate() {
1169            if i != self_idx {
1170                result_indices.push(idx.clone());
1171            }
1172        }
1173        for (i, idx) in other.indices.iter().enumerate() {
1174            if i != other_idx {
1175                result_indices.push(idx.clone());
1176            }
1177        }
1178        Ok(Self::new(
1179            result_data,
1180            result_indices,
1181            format!("Contract_{}_{}", self.label, other.label),
1182        ))
1183    }
1184    /// Perform actual tensor contraction computation
1185    pub(super) fn perform_tensor_contraction(
1186        &self,
1187        other: &Self,
1188        self_idx: usize,
1189        other_idx: usize,
1190        result_shape: &[usize],
1191    ) -> Result<Array3<Complex64>> {
1192        let result_dims = if result_shape.len() >= 2 {
1193            (
1194                result_shape[0],
1195                result_shape.get(1).copied().unwrap_or(1),
1196                result_shape.get(2).copied().unwrap_or(1),
1197            )
1198        } else if result_shape.len() == 1 {
1199            (result_shape[0], 1, 1)
1200        } else {
1201            (1, 1, 1)
1202        };
1203        let mut result = Array3::zeros(result_dims);
1204        let contract_dim = self.indices[self_idx].dimension;
1205        for i in 0..result_dims.0 {
1206            for j in 0..result_dims.1 {
1207                for k in 0..result_dims.2 {
1208                    let mut sum = Complex64::new(0.0, 0.0);
1209                    for contract_idx in 0..contract_dim {
1210                        let self_coords =
1211                            self.map_result_to_self_coords(i, j, k, self_idx, contract_idx);
1212                        let other_coords =
1213                            other.map_result_to_other_coords(i, j, k, other_idx, contract_idx);
1214                        if self_coords.0 < self.data.shape()[0]
1215                            && self_coords.1 < self.data.shape()[1]
1216                            && self_coords.2 < self.data.shape()[2]
1217                            && other_coords.0 < other.data.shape()[0]
1218                            && other_coords.1 < other.data.shape()[1]
1219                            && other_coords.2 < other.data.shape()[2]
1220                        {
1221                            sum += self.data[[self_coords.0, self_coords.1, self_coords.2]]
1222                                * other.data[[other_coords.0, other_coords.1, other_coords.2]];
1223                        }
1224                    }
1225                    result[[i, j, k]] = sum;
1226                }
1227            }
1228        }
1229        Ok(result)
1230    }
1231    /// Map result coordinates to self tensor coordinates
1232    pub(super) fn map_result_to_self_coords(
1233        &self,
1234        i: usize,
1235        j: usize,
1236        k: usize,
1237        contract_idx_pos: usize,
1238        contract_val: usize,
1239    ) -> (usize, usize, usize) {
1240        let coords = match contract_idx_pos {
1241            0 => (contract_val, i.min(j), k),
1242            1 => (i, contract_val, k),
1243            _ => (i, j, contract_val),
1244        };
1245        (coords.0.min(1), coords.1.min(1), 0)
1246    }
1247    /// Map result coordinates to other tensor coordinates
1248    pub(super) fn map_result_to_other_coords(
1249        &self,
1250        i: usize,
1251        j: usize,
1252        k: usize,
1253        contract_idx_pos: usize,
1254        contract_val: usize,
1255    ) -> (usize, usize, usize) {
1256        let coords = match contract_idx_pos {
1257            0 => (contract_val, i.min(j), k),
1258            1 => (i, contract_val, k),
1259            _ => (i, j, contract_val),
1260        };
1261        (coords.0.min(1), coords.1.min(1), 0)
1262    }
1263    /// Get the rank (number of indices) of this tensor
1264    #[must_use]
1265    pub fn rank(&self) -> usize {
1266        self.indices.len()
1267    }
1268    /// Get the total size of this tensor
1269    #[must_use]
1270    pub fn size(&self) -> usize {
1271        self.data.len()
1272    }
1273}
1274/// Type of tensor index
1275#[derive(Debug, Clone, PartialEq, Eq, Hash)]
1276pub enum IndexType {
1277    /// Physical qubit index
1278    Physical(usize),
1279    /// Virtual bond between tensors
1280    Virtual,
1281    /// Auxiliary index for decompositions
1282    Auxiliary,
1283}