1use crate::error::{EncodingError, Result};
8use crate::search_space::{GateOperation, GateType, LayerTemplate, SearchSpace};
9use ndarray::{Array1, Array2};
10use rand::prelude::*;
11use rand_distr::Normal;
12use serde::{Deserialize, Serialize};
13
14#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
16pub struct DiscreteEncoding {
17 pub depth: usize,
19 pub layer_templates: Vec<usize>,
21 pub gate_choices: Vec<Vec<usize>>,
23 pub entanglement_choices: Vec<usize>,
25}
26
27impl DiscreteEncoding {
28 pub fn new(
30 depth: usize,
31 layer_templates: Vec<usize>,
32 gate_choices: Vec<Vec<usize>>,
33 entanglement_choices: Vec<usize>,
34 ) -> Self {
35 DiscreteEncoding {
36 depth,
37 layer_templates,
38 gate_choices,
39 entanglement_choices,
40 }
41 }
42
43 pub fn random<R: Rng>(space: &SearchSpace, rng: &mut R) -> Self {
45 let depth = rng.gen_range(space.min_depth..=space.max_depth);
46 let num_templates = space.layer_templates.len();
47 let num_gates = space.gate_set.len();
48
49 let layer_templates: Vec<usize> = (0..depth)
50 .map(|_| rng.gen_range(0..num_templates.max(1)))
51 .collect();
52
53 let gate_choices: Vec<Vec<usize>> = (0..depth)
54 .map(|_| {
55 (0..space.num_qubits)
56 .map(|_| rng.gen_range(0..num_gates.max(1)))
57 .collect()
58 })
59 .collect();
60
61 let entanglement_choices: Vec<usize> =
62 (0..depth).map(|_| rng.gen_range(0..6)).collect(); DiscreteEncoding {
65 depth,
66 layer_templates,
67 gate_choices,
68 entanglement_choices,
69 }
70 }
71
72 pub fn validate(&self, space: &SearchSpace) -> Result<()> {
74 if self.depth < space.min_depth || self.depth > space.max_depth {
75 return Err(EncodingError::InvalidDimension {
76 expected: space.max_depth,
77 got: self.depth,
78 }
79 .into());
80 }
81
82 if self.layer_templates.len() != self.depth {
83 return Err(EncodingError::InvalidDimension {
84 expected: self.depth,
85 got: self.layer_templates.len(),
86 }
87 .into());
88 }
89
90 let num_templates = space.layer_templates.len();
91 for (i, &template_idx) in self.layer_templates.iter().enumerate() {
92 if template_idx >= num_templates {
93 return Err(EncodingError::InvalidChoice {
94 position: i,
95 choice: template_idx,
96 max_choices: num_templates,
97 }
98 .into());
99 }
100 }
101
102 let num_gates = space.gate_set.len();
103 for (layer_idx, layer_gates) in self.gate_choices.iter().enumerate() {
104 for (pos, &gate_idx) in layer_gates.iter().enumerate() {
105 if gate_idx >= num_gates {
106 return Err(EncodingError::InvalidChoice {
107 position: layer_idx * space.num_qubits + pos,
108 choice: gate_idx,
109 max_choices: num_gates,
110 }
111 .into());
112 }
113 }
114 }
115
116 Ok(())
117 }
118
119 pub fn to_flat_vector(&self, space: &SearchSpace) -> Vec<usize> {
121 let mut flat = Vec::new();
122 flat.push(self.depth);
123
124 for &t in &self.layer_templates {
125 flat.push(t);
126 }
127
128 for layer_gates in &self.gate_choices {
129 for &g in layer_gates {
130 flat.push(g);
131 }
132 }
133
134 for &e in &self.entanglement_choices {
135 flat.push(e);
136 }
137
138 let expected_size = Self::expected_flat_size(space);
140 while flat.len() < expected_size {
141 flat.push(0);
142 }
143
144 flat
145 }
146
147 pub fn from_flat_vector(flat: &[usize], space: &SearchSpace) -> Result<Self> {
149 if flat.is_empty() {
150 return Err(EncodingError::DecodingFailed("Empty vector".to_string()).into());
151 }
152
153 let depth = flat[0].clamp(space.min_depth, space.max_depth);
154 let mut idx = 1;
155
156 let layer_templates: Vec<usize> = (0..depth)
157 .map(|_| {
158 let val = flat.get(idx).copied().unwrap_or(0);
159 idx += 1;
160 val % space.layer_templates.len().max(1)
161 })
162 .collect();
163
164 let gate_choices: Vec<Vec<usize>> = (0..depth)
165 .map(|_| {
166 (0..space.num_qubits)
167 .map(|_| {
168 let val = flat.get(idx).copied().unwrap_or(0);
169 idx += 1;
170 val % space.gate_set.len().max(1)
171 })
172 .collect()
173 })
174 .collect();
175
176 let entanglement_choices: Vec<usize> = (0..depth)
177 .map(|_| {
178 let val = flat.get(idx).copied().unwrap_or(0);
179 idx += 1;
180 val % 6
181 })
182 .collect();
183
184 Ok(DiscreteEncoding {
185 depth,
186 layer_templates,
187 gate_choices,
188 entanglement_choices,
189 })
190 }
191
192 pub fn expected_flat_size(space: &SearchSpace) -> usize {
194 1 + space.max_depth + space.max_depth * space.num_qubits + space.max_depth
196 }
197
198 pub fn mutate<R: Rng>(&mut self, space: &SearchSpace, mutation_rate: f64, rng: &mut R) {
200 let num_templates = space.layer_templates.len().max(1);
201 let num_gates = space.gate_set.len().max(1);
202
203 if rng.gen::<f64>() < mutation_rate {
205 let delta: i32 = rng.gen_range(-1..=1);
206 let new_depth =
207 (self.depth as i32 + delta).clamp(space.min_depth as i32, space.max_depth as i32)
208 as usize;
209
210 if new_depth > self.depth {
211 for _ in self.depth..new_depth {
213 self.layer_templates.push(rng.gen_range(0..num_templates));
214 self.gate_choices.push(
215 (0..space.num_qubits)
216 .map(|_| rng.gen_range(0..num_gates))
217 .collect(),
218 );
219 self.entanglement_choices.push(rng.gen_range(0..6));
220 }
221 } else if new_depth < self.depth {
222 self.layer_templates.truncate(new_depth);
224 self.gate_choices.truncate(new_depth);
225 self.entanglement_choices.truncate(new_depth);
226 }
227 self.depth = new_depth;
228 }
229
230 for template in &mut self.layer_templates {
232 if rng.gen::<f64>() < mutation_rate {
233 *template = rng.gen_range(0..num_templates);
234 }
235 }
236
237 for layer_gates in &mut self.gate_choices {
239 for gate in layer_gates {
240 if rng.gen::<f64>() < mutation_rate {
241 *gate = rng.gen_range(0..num_gates);
242 }
243 }
244 }
245
246 for ent in &mut self.entanglement_choices {
248 if rng.gen::<f64>() < mutation_rate {
249 *ent = rng.gen_range(0..6);
250 }
251 }
252 }
253
254 pub fn crossover<R: Rng>(&self, other: &Self, rng: &mut R) -> Self {
256 let depth = if rng.gen_bool(0.5) {
257 self.depth
258 } else {
259 other.depth
260 };
261
262 let layer_templates: Vec<usize> = (0..depth)
263 .map(|i| {
264 if rng.gen_bool(0.5) {
265 self.layer_templates.get(i).copied().unwrap_or(0)
266 } else {
267 other.layer_templates.get(i).copied().unwrap_or(0)
268 }
269 })
270 .collect();
271
272 let gate_choices: Vec<Vec<usize>> = (0..depth)
273 .map(|i| {
274 let self_gates = self.gate_choices.get(i);
275 let other_gates = other.gate_choices.get(i);
276
277 let num_qubits = self_gates
278 .map(|g| g.len())
279 .or(other_gates.map(|g| g.len()))
280 .unwrap_or(0);
281
282 (0..num_qubits)
283 .map(|j| {
284 if rng.gen_bool(0.5) {
285 self_gates.and_then(|g| g.get(j).copied()).unwrap_or(0)
286 } else {
287 other_gates.and_then(|g| g.get(j).copied()).unwrap_or(0)
288 }
289 })
290 .collect()
291 })
292 .collect();
293
294 let entanglement_choices: Vec<usize> = (0..depth)
295 .map(|i| {
296 if rng.gen_bool(0.5) {
297 self.entanglement_choices.get(i).copied().unwrap_or(0)
298 } else {
299 other.entanglement_choices.get(i).copied().unwrap_or(0)
300 }
301 })
302 .collect();
303
304 DiscreteEncoding {
305 depth,
306 layer_templates,
307 gate_choices,
308 entanglement_choices,
309 }
310 }
311}
312
313#[derive(Debug, Clone, Serialize, Deserialize)]
315pub struct ContinuousEncoding {
316 pub depth_weights: Array1<f64>,
318 pub template_weights: Array2<f64>,
320 pub gate_weights: Vec<Array2<f64>>,
322 pub entanglement_weights: Array2<f64>,
324 pub temperature: f64,
326}
327
328impl ContinuousEncoding {
329 pub fn uniform(space: &SearchSpace) -> Self {
331 let depth_range = space.max_depth - space.min_depth + 1;
332 let num_templates = space.layer_templates.len().max(1);
333 let num_gates = space.gate_set.len().max(1);
334 let num_entanglements = 6;
335
336 let depth_weights = Array1::from_elem(depth_range, 1.0 / depth_range as f64);
337 let template_weights =
338 Array2::from_elem((space.max_depth, num_templates), 1.0 / num_templates as f64);
339
340 let gate_weights: Vec<Array2<f64>> = (0..space.max_depth)
341 .map(|_| Array2::from_elem((space.num_qubits, num_gates), 1.0 / num_gates as f64))
342 .collect();
343
344 let entanglement_weights =
345 Array2::from_elem((space.max_depth, num_entanglements), 1.0 / num_entanglements as f64);
346
347 ContinuousEncoding {
348 depth_weights,
349 template_weights,
350 gate_weights,
351 entanglement_weights,
352 temperature: 1.0,
353 }
354 }
355
356 pub fn random<R: Rng>(space: &SearchSpace, rng: &mut R) -> Self {
358 let mut encoding = Self::uniform(space);
359
360 let normal = Normal::new(0.0, 0.1).unwrap();
362
363 for w in encoding.depth_weights.iter_mut() {
364 *w += rng.sample(normal);
365 }
366 Self::softmax_inplace(&mut encoding.depth_weights);
367
368 for mut row in encoding.template_weights.rows_mut() {
369 for w in row.iter_mut() {
370 *w += rng.sample(normal);
371 }
372 let sum: f64 = row.iter().sum();
373 if sum > 0.0 {
374 row.mapv_inplace(|x| x / sum);
375 }
376 }
377
378 for gate_w in &mut encoding.gate_weights {
379 for mut row in gate_w.rows_mut() {
380 for w in row.iter_mut() {
381 *w += rng.sample(normal);
382 }
383 let sum: f64 = row.iter().sum();
384 if sum > 0.0 {
385 row.mapv_inplace(|x| x / sum);
386 }
387 }
388 }
389
390 for mut row in encoding.entanglement_weights.rows_mut() {
391 for w in row.iter_mut() {
392 *w += rng.sample(normal);
393 }
394 let sum: f64 = row.iter().sum();
395 if sum > 0.0 {
396 row.mapv_inplace(|x| x / sum);
397 }
398 }
399
400 encoding
401 }
402
403 fn softmax_inplace(arr: &mut Array1<f64>) {
405 let max_val = arr.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
406 arr.mapv_inplace(|x| (x - max_val).exp());
407 let sum: f64 = arr.iter().sum();
408 if sum > 0.0 {
409 arr.mapv_inplace(|x| x / sum);
410 }
411 }
412
413 pub fn with_temperature(mut self, temperature: f64) -> Self {
415 self.temperature = temperature.max(0.01);
416 self
417 }
418
419 pub fn sample<R: Rng>(&self, space: &SearchSpace, rng: &mut R) -> DiscreteEncoding {
421 let depth_probs: Vec<f64> = self.depth_weights.iter().copied().collect();
423 let depth_idx = Self::sample_categorical(&depth_probs, rng);
424 let depth = space.min_depth + depth_idx;
425
426 let layer_templates: Vec<usize> = (0..depth)
428 .map(|i| {
429 let probs: Vec<f64> = self.template_weights.row(i).iter().copied().collect();
430 Self::sample_categorical(&probs, rng)
431 })
432 .collect();
433
434 let gate_choices: Vec<Vec<usize>> = (0..depth)
436 .map(|i| {
437 (0..space.num_qubits)
438 .map(|j| {
439 let probs: Vec<f64> =
440 self.gate_weights[i].row(j).iter().copied().collect();
441 Self::sample_categorical(&probs, rng)
442 })
443 .collect()
444 })
445 .collect();
446
447 let entanglement_choices: Vec<usize> = (0..depth)
449 .map(|i| {
450 let probs: Vec<f64> = self.entanglement_weights.row(i).iter().copied().collect();
451 Self::sample_categorical(&probs, rng)
452 })
453 .collect();
454
455 DiscreteEncoding {
456 depth,
457 layer_templates,
458 gate_choices,
459 entanglement_choices,
460 }
461 }
462
463 fn sample_categorical<R: Rng>(probs: &[f64], rng: &mut R) -> usize {
465 let sum: f64 = probs.iter().sum();
466 if sum <= 0.0 || probs.is_empty() {
467 return 0;
468 }
469
470 let threshold = rng.gen::<f64>() * sum;
471 let mut cumsum = 0.0;
472 for (i, &p) in probs.iter().enumerate() {
473 cumsum += p;
474 if cumsum >= threshold {
475 return i;
476 }
477 }
478 probs.len() - 1
479 }
480
481 pub fn to_discrete(&self, space: &SearchSpace) -> DiscreteEncoding {
483 let depth_idx = Self::argmax(&self.depth_weights.to_vec());
484 let depth = (space.min_depth + depth_idx).min(space.max_depth);
485
486 let layer_templates: Vec<usize> = (0..depth)
487 .map(|i| Self::argmax(&self.template_weights.row(i).to_vec()))
488 .collect();
489
490 let gate_choices: Vec<Vec<usize>> = (0..depth)
491 .map(|i| {
492 (0..space.num_qubits)
493 .map(|j| Self::argmax(&self.gate_weights[i].row(j).to_vec()))
494 .collect()
495 })
496 .collect();
497
498 let entanglement_choices: Vec<usize> = (0..depth)
499 .map(|i| Self::argmax(&self.entanglement_weights.row(i).to_vec()))
500 .collect();
501
502 DiscreteEncoding {
503 depth,
504 layer_templates,
505 gate_choices,
506 entanglement_choices,
507 }
508 }
509
510 fn argmax(arr: &[f64]) -> usize {
512 arr.iter()
513 .enumerate()
514 .max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
515 .map(|(i, _)| i)
516 .unwrap_or(0)
517 }
518
519 pub fn update(&mut self, discrete: &DiscreteEncoding, reward: f64, learning_rate: f64) {
521 let depth_idx = discrete.depth.saturating_sub(1);
523 if depth_idx < self.depth_weights.len() {
524 self.depth_weights[depth_idx] += learning_rate * reward;
525 Self::softmax_inplace(&mut self.depth_weights);
526 }
527
528 for (i, &template) in discrete.layer_templates.iter().enumerate() {
530 if i < self.template_weights.nrows() && template < self.template_weights.ncols() {
531 self.template_weights[[i, template]] += learning_rate * reward;
532 }
533 }
534
535 for mut row in self.template_weights.rows_mut() {
537 let sum: f64 = row.iter().sum();
538 if sum > 0.0 {
539 row.mapv_inplace(|x| x / sum);
540 }
541 }
542
543 for (i, layer_gates) in discrete.gate_choices.iter().enumerate() {
545 if i < self.gate_weights.len() {
546 for (j, &gate) in layer_gates.iter().enumerate() {
547 if j < self.gate_weights[i].nrows() && gate < self.gate_weights[i].ncols() {
548 self.gate_weights[i][[j, gate]] += learning_rate * reward;
549 }
550 }
551 for mut row in self.gate_weights[i].rows_mut() {
553 let sum: f64 = row.iter().sum();
554 if sum > 0.0 {
555 row.mapv_inplace(|x| x / sum);
556 }
557 }
558 }
559 }
560
561 for (i, &ent) in discrete.entanglement_choices.iter().enumerate() {
563 if i < self.entanglement_weights.nrows() && ent < self.entanglement_weights.ncols() {
564 self.entanglement_weights[[i, ent]] += learning_rate * reward;
565 }
566 }
567
568 for mut row in self.entanglement_weights.rows_mut() {
570 let sum: f64 = row.iter().sum();
571 if sum > 0.0 {
572 row.mapv_inplace(|x| x / sum);
573 }
574 }
575 }
576}
577
578#[derive(Debug, Clone, Serialize, Deserialize)]
580pub struct ArchitectureEmbedding {
581 pub dimension: usize,
583 pub vector: Array1<f64>,
585}
586
587impl ArchitectureEmbedding {
588 pub fn new(dimension: usize) -> Result<Self> {
590 if dimension == 0 {
591 return Err(EncodingError::InvalidEmbeddingDimension(dimension).into());
592 }
593
594 Ok(ArchitectureEmbedding {
595 dimension,
596 vector: Array1::zeros(dimension),
597 })
598 }
599
600 pub fn from_discrete(encoding: &DiscreteEncoding, space: &SearchSpace, dimension: usize) -> Result<Self> {
602 if dimension == 0 {
603 return Err(EncodingError::InvalidEmbeddingDimension(dimension).into());
604 }
605
606 let mut embedding = Self::new(dimension)?;
607
608 let depth_normalized =
610 (encoding.depth - space.min_depth) as f64 / (space.max_depth - space.min_depth) as f64;
611
612 let mut template_counts = vec![0.0; space.layer_templates.len().max(1)];
614 for &t in &encoding.layer_templates {
615 if t < template_counts.len() {
616 template_counts[t] += 1.0;
617 }
618 }
619 let template_sum: f64 = template_counts.iter().sum();
620 if template_sum > 0.0 {
621 for c in &mut template_counts {
622 *c /= template_sum;
623 }
624 }
625
626 let mut gate_counts = vec![0.0; space.gate_set.len().max(1)];
628 for layer_gates in &encoding.gate_choices {
629 for &g in layer_gates {
630 if g < gate_counts.len() {
631 gate_counts[g] += 1.0;
632 }
633 }
634 }
635 let gate_sum: f64 = gate_counts.iter().sum();
636 if gate_sum > 0.0 {
637 for c in &mut gate_counts {
638 *c /= gate_sum;
639 }
640 }
641
642 let mut idx = 0;
644
645 if idx < dimension {
647 embedding.vector[idx] = depth_normalized;
648 idx += 1;
649 }
650 if idx < dimension {
651 embedding.vector[idx] = encoding.depth as f64 / space.max_depth as f64;
652 idx += 1;
653 }
654
655 for &t in template_counts.iter().take(dimension.saturating_sub(idx)) {
657 if idx < dimension {
658 embedding.vector[idx] = t;
659 idx += 1;
660 }
661 }
662
663 for &g in gate_counts.iter().take(dimension.saturating_sub(idx)) {
665 if idx < dimension {
666 embedding.vector[idx] = g;
667 idx += 1;
668 }
669 }
670
671 let mut ent_counts = vec![0.0; 6];
673 for &e in &encoding.entanglement_choices {
674 if e < 6 {
675 ent_counts[e] += 1.0;
676 }
677 }
678 let ent_sum: f64 = ent_counts.iter().sum();
679 if ent_sum > 0.0 {
680 for c in &mut ent_counts {
681 *c /= ent_sum;
682 }
683 }
684
685 for &e in ent_counts.iter().take(dimension.saturating_sub(idx)) {
686 if idx < dimension {
687 embedding.vector[idx] = e;
688 idx += 1;
689 }
690 }
691
692 Ok(embedding)
693 }
694
695 pub fn cosine_similarity(&self, other: &Self) -> f64 {
697 if self.dimension != other.dimension {
698 return 0.0;
699 }
700
701 let dot: f64 = self.vector.iter().zip(other.vector.iter()).map(|(a, b)| a * b).sum();
702 let norm_a: f64 = self.vector.iter().map(|x| x * x).sum::<f64>().sqrt();
703 let norm_b: f64 = other.vector.iter().map(|x| x * x).sum::<f64>().sqrt();
704
705 if norm_a > 0.0 && norm_b > 0.0 {
706 dot / (norm_a * norm_b)
707 } else {
708 0.0
709 }
710 }
711
712 pub fn euclidean_distance(&self, other: &Self) -> f64 {
714 if self.dimension != other.dimension {
715 return f64::INFINITY;
716 }
717
718 self.vector
719 .iter()
720 .zip(other.vector.iter())
721 .map(|(a, b)| (a - b).powi(2))
722 .sum::<f64>()
723 .sqrt()
724 }
725}
726
727#[derive(Debug, Clone)]
729pub struct ArchitectureEncoder {
730 pub space: SearchSpace,
732 pub embedding_dim: usize,
734}
735
736impl ArchitectureEncoder {
737 pub fn new(space: SearchSpace, embedding_dim: usize) -> Self {
739 ArchitectureEncoder { space, embedding_dim }
740 }
741
742 pub fn decode_to_operations(&self, encoding: &DiscreteEncoding) -> Vec<Vec<GateOperation>> {
744 let mut layers = Vec::new();
745
746 for layer_idx in 0..encoding.depth {
747 let mut operations = Vec::new();
748 let template_idx = encoding.layer_templates.get(layer_idx).copied().unwrap_or(0);
749
750 let template = self
752 .space
753 .layer_templates
754 .get(template_idx % self.space.layer_templates.len().max(1))
755 .cloned()
756 .unwrap_or_else(LayerTemplate::standard_hardware_efficient);
757
758 match template {
759 LayerTemplate::HardwareEfficient {
760 single_qubit_gates,
761 entangling_gate,
762 entanglement,
763 } => {
764 let mut param_idx = 0;
766 for qubit in 0..self.space.num_qubits {
767 for gate in &single_qubit_gates {
768 let p_idx = if gate.is_parameterized() {
769 let idx = param_idx;
770 param_idx += 1;
771 Some(idx)
772 } else {
773 None
774 };
775 operations.push(GateOperation::single(*gate, qubit, p_idx));
776 }
777 }
778
779 for (ctrl, tgt) in entanglement.generate_pairs(self.space.num_qubits) {
781 if self.space.connectivity.is_connected(ctrl, tgt) {
782 let p_idx = if entangling_gate.is_parameterized() {
783 let idx = param_idx;
784 param_idx += 1;
785 Some(idx)
786 } else {
787 None
788 };
789 operations.push(GateOperation::two_qubit(entangling_gate, ctrl, tgt, p_idx));
790 }
791 }
792 }
793 LayerTemplate::QaoaInspired {
794 mixer_gates,
795 cost_gates,
796 } => {
797 let mut param_idx = 0;
798
799 for gate in &cost_gates {
801 if gate.is_two_qubit() {
802 for (i, j) in self.space.connectivity.edges() {
803 let p_idx = if gate.is_parameterized() {
804 let idx = param_idx;
805 param_idx += 1;
806 Some(idx)
807 } else {
808 None
809 };
810 operations.push(GateOperation::two_qubit(*gate, i, j, p_idx));
811 }
812 } else {
813 for qubit in 0..self.space.num_qubits {
814 let p_idx = if gate.is_parameterized() {
815 let idx = param_idx;
816 param_idx += 1;
817 Some(idx)
818 } else {
819 None
820 };
821 operations.push(GateOperation::single(*gate, qubit, p_idx));
822 }
823 }
824 }
825
826 for gate in &mixer_gates {
828 for qubit in 0..self.space.num_qubits {
829 let p_idx = if gate.is_parameterized() {
830 let idx = param_idx;
831 param_idx += 1;
832 Some(idx)
833 } else {
834 None
835 };
836 operations.push(GateOperation::single(*gate, qubit, p_idx));
837 }
838 }
839 }
840 LayerTemplate::StronglyEntangling {
841 rotation_gates,
842 entanglement_patterns,
843 } => {
844 let mut param_idx = 0;
845
846 for (sublayer, pattern) in entanglement_patterns.iter().enumerate() {
847 for qubit in 0..self.space.num_qubits {
849 let gate = rotation_gates[sublayer % rotation_gates.len()];
850 let p_idx = if gate.is_parameterized() {
851 let idx = param_idx;
852 param_idx += 1;
853 Some(idx)
854 } else {
855 None
856 };
857 operations.push(GateOperation::single(gate, qubit, p_idx));
858 }
859
860 for (ctrl, tgt) in pattern.generate_pairs(self.space.num_qubits) {
862 if self.space.connectivity.is_connected(ctrl, tgt) {
863 operations.push(GateOperation::two_qubit(GateType::CNOT, ctrl, tgt, None));
864 }
865 }
866 }
867 }
868 LayerTemplate::Custom { operations: ops } => {
869 operations.extend(ops);
870 }
871 }
872
873 layers.push(operations);
874 }
875
876 layers
877 }
878
879 pub fn embed(&self, encoding: &DiscreteEncoding) -> Result<ArchitectureEmbedding> {
881 ArchitectureEmbedding::from_discrete(encoding, &self.space, self.embedding_dim)
882 }
883
884 pub fn count_parameters(&self, encoding: &DiscreteEncoding) -> usize {
886 let operations = self.decode_to_operations(encoding);
887 operations
888 .iter()
889 .flat_map(|layer| layer.iter())
890 .filter(|op| op.gate.is_parameterized())
891 .count()
892 }
893
894 pub fn count_gates(&self, encoding: &DiscreteEncoding) -> usize {
896 let operations = self.decode_to_operations(encoding);
897 operations.iter().map(|layer| layer.len()).sum()
898 }
899
900 pub fn count_two_qubit_gates(&self, encoding: &DiscreteEncoding) -> usize {
902 let operations = self.decode_to_operations(encoding);
903 operations
904 .iter()
905 .flat_map(|layer| layer.iter())
906 .filter(|op| op.gate.is_two_qubit())
907 .count()
908 }
909}
910
911#[cfg(test)]
912mod tests {
913 use super::*;
914 use rand::SeedableRng;
915 use rand_chacha::ChaCha8Rng;
916
917 fn test_space() -> SearchSpace {
918 SearchSpace::hardware_efficient(4, 5).unwrap()
919 }
920
921 #[test]
922 fn test_discrete_encoding_creation() {
923 let encoding = DiscreteEncoding::new(3, vec![0, 1, 0], vec![vec![0, 1, 2, 3]], vec![0, 1, 2]);
924 assert_eq!(encoding.depth, 3);
925 assert_eq!(encoding.layer_templates.len(), 3);
926 }
927
928 #[test]
929 fn test_discrete_encoding_random() {
930 let space = test_space();
931 let mut rng = ChaCha8Rng::seed_from_u64(42);
932 let encoding = DiscreteEncoding::random(&space, &mut rng);
933
934 assert!(encoding.depth >= space.min_depth);
935 assert!(encoding.depth <= space.max_depth);
936 assert_eq!(encoding.layer_templates.len(), encoding.depth);
937 }
938
939 #[test]
940 fn test_discrete_encoding_validation() {
941 let space = test_space();
942 let mut rng = ChaCha8Rng::seed_from_u64(42);
943 let encoding = DiscreteEncoding::random(&space, &mut rng);
944
945 assert!(encoding.validate(&space).is_ok());
946
947 let invalid = DiscreteEncoding::new(100, vec![0], vec![], vec![]);
949 assert!(invalid.validate(&space).is_err());
950 }
951
952 #[test]
953 fn test_flat_vector_roundtrip() {
954 let space = test_space();
955 let mut rng = ChaCha8Rng::seed_from_u64(42);
956 let original = DiscreteEncoding::random(&space, &mut rng);
957
958 let flat = original.to_flat_vector(&space);
959 let reconstructed = DiscreteEncoding::from_flat_vector(&flat, &space).unwrap();
960
961 assert_eq!(original.depth, reconstructed.depth);
962 }
963
964 #[test]
965 fn test_mutation() {
966 let space = test_space();
967 let mut rng = ChaCha8Rng::seed_from_u64(42);
968 let mut encoding = DiscreteEncoding::random(&space, &mut rng);
969 let original_depth = encoding.depth;
970
971 encoding.mutate(&space, 0.5, &mut rng);
973
974 assert!(encoding.validate(&space).is_ok());
976 }
977
978 #[test]
979 fn test_crossover() {
980 let space = test_space();
981 let mut rng = ChaCha8Rng::seed_from_u64(42);
982 let parent1 = DiscreteEncoding::random(&space, &mut rng);
983 let parent2 = DiscreteEncoding::random(&space, &mut rng);
984
985 let child = parent1.crossover(&parent2, &mut rng);
986 assert!(child.validate(&space).is_ok());
987 }
988
989 #[test]
990 fn test_continuous_encoding_uniform() {
991 let space = test_space();
992 let encoding = ContinuousEncoding::uniform(&space);
993
994 let depth_sum: f64 = encoding.depth_weights.iter().sum();
996 assert!((depth_sum - 1.0).abs() < 1e-10);
997 }
998
999 #[test]
1000 fn test_continuous_encoding_sample() {
1001 let space = test_space();
1002 let mut rng = ChaCha8Rng::seed_from_u64(42);
1003 let continuous = ContinuousEncoding::uniform(&space);
1004
1005 let discrete = continuous.sample(&space, &mut rng);
1006 assert!(discrete.validate(&space).is_ok());
1007 }
1008
1009 #[test]
1010 fn test_continuous_to_discrete() {
1011 let space = test_space();
1012 let mut rng = ChaCha8Rng::seed_from_u64(42);
1013 let continuous = ContinuousEncoding::random(&space, &mut rng);
1014
1015 let discrete = continuous.to_discrete(&space);
1016 assert!(discrete.validate(&space).is_ok());
1017 }
1018
1019 #[test]
1020 fn test_continuous_update() {
1021 let space = test_space();
1022 let mut rng = ChaCha8Rng::seed_from_u64(42);
1023 let mut continuous = ContinuousEncoding::uniform(&space);
1024 let discrete = DiscreteEncoding::random(&space, &mut rng);
1025
1026 continuous.update(&discrete, 1.0, 0.1);
1027
1028 let depth_sum: f64 = continuous.depth_weights.iter().sum();
1030 assert!((depth_sum - 1.0).abs() < 1e-6);
1031 }
1032
1033 #[test]
1034 fn test_architecture_embedding() {
1035 let space = test_space();
1036 let mut rng = ChaCha8Rng::seed_from_u64(42);
1037 let encoding = DiscreteEncoding::random(&space, &mut rng);
1038
1039 let embedding = ArchitectureEmbedding::from_discrete(&encoding, &space, 32).unwrap();
1040 assert_eq!(embedding.dimension, 32);
1041 assert_eq!(embedding.vector.len(), 32);
1042 }
1043
1044 #[test]
1045 fn test_embedding_similarity() {
1046 let space = test_space();
1047 let mut rng = ChaCha8Rng::seed_from_u64(42);
1048 let encoding1 = DiscreteEncoding::random(&space, &mut rng);
1049 let encoding2 = DiscreteEncoding::random(&space, &mut rng);
1050
1051 let emb1 = ArchitectureEmbedding::from_discrete(&encoding1, &space, 32).unwrap();
1052 let emb2 = ArchitectureEmbedding::from_discrete(&encoding2, &space, 32).unwrap();
1053
1054 let sim = emb1.cosine_similarity(&emb2);
1055 assert!(sim >= -1.0 && sim <= 1.0);
1056
1057 let self_sim = emb1.cosine_similarity(&emb1);
1059 assert!((self_sim - 1.0).abs() < 1e-6);
1060 }
1061
1062 #[test]
1063 fn test_embedding_distance() {
1064 let space = test_space();
1065 let mut rng = ChaCha8Rng::seed_from_u64(42);
1066 let encoding = DiscreteEncoding::random(&space, &mut rng);
1067
1068 let emb = ArchitectureEmbedding::from_discrete(&encoding, &space, 32).unwrap();
1069
1070 let self_dist = emb.euclidean_distance(&emb);
1072 assert!(self_dist.abs() < 1e-10);
1073 }
1074
1075 #[test]
1076 fn test_architecture_encoder() {
1077 let space = test_space();
1078 let encoder = ArchitectureEncoder::new(space.clone(), 32);
1079
1080 let mut rng = ChaCha8Rng::seed_from_u64(42);
1081 let encoding = DiscreteEncoding::random(&space, &mut rng);
1082
1083 let operations = encoder.decode_to_operations(&encoding);
1084 assert_eq!(operations.len(), encoding.depth);
1085 }
1086
1087 #[test]
1088 fn test_encoder_parameter_count() {
1089 let space = test_space();
1090 let encoder = ArchitectureEncoder::new(space.clone(), 32);
1091
1092 let mut rng = ChaCha8Rng::seed_from_u64(42);
1093 let encoding = DiscreteEncoding::random(&space, &mut rng);
1094
1095 let param_count = encoder.count_parameters(&encoding);
1096 assert!(param_count > 0);
1097 }
1098
1099 #[test]
1100 fn test_encoder_gate_counts() {
1101 let space = test_space();
1102 let encoder = ArchitectureEncoder::new(space.clone(), 32);
1103
1104 let mut rng = ChaCha8Rng::seed_from_u64(42);
1105 let encoding = DiscreteEncoding::random(&space, &mut rng);
1106
1107 let total_gates = encoder.count_gates(&encoding);
1108 let two_qubit_gates = encoder.count_two_qubit_gates(&encoding);
1109
1110 assert!(total_gates >= two_qubit_gates);
1111 }
1112
1113 #[test]
1114 fn test_invalid_embedding_dimension() {
1115 let result = ArchitectureEmbedding::new(0);
1116 assert!(result.is_err());
1117 }
1118}