1use std::collections::{HashMap, HashSet, VecDeque};
19use std::sync::{Arc, Mutex, RwLock};
20use std::thread;
21use std::time::{Duration, Instant};
22
23use crate::applications::{ApplicationError, ApplicationResult};
24use crate::embedding::{Embedding, EmbeddingResult, HardwareTopology};
25use crate::ising::IsingModel;
26
27#[derive(Debug, Clone)]
29pub struct MultiChipConfig {
30 pub max_chips: usize,
32 pub min_problem_size: usize,
34 pub max_problem_size: usize,
36 pub load_balancing: LoadBalancingStrategy,
38 pub communication: CommunicationProtocol,
40 pub fault_tolerance: FaultToleranceConfig,
42 pub monitoring: MonitoringConfig,
44 pub timeout: Duration,
46}
47
48impl Default for MultiChipConfig {
49 fn default() -> Self {
50 Self {
51 max_chips: 4,
52 min_problem_size: 100,
53 max_problem_size: 2000,
54 load_balancing: LoadBalancingStrategy::Dynamic,
55 communication: CommunicationProtocol::Asynchronous,
56 fault_tolerance: FaultToleranceConfig::default(),
57 monitoring: MonitoringConfig::default(),
58 timeout: Duration::from_secs(300),
59 }
60 }
61}
62
63#[derive(Debug, Clone, PartialEq, Eq)]
65pub enum LoadBalancingStrategy {
66 Equal,
68 Dynamic,
70 ResourceAware,
72 TopologyOptimized,
74}
75
76#[derive(Debug, Clone, PartialEq, Eq)]
78pub enum CommunicationProtocol {
79 Synchronous,
81 Asynchronous,
83 MessagePassing,
85 SharedMemory,
87}
88
89#[derive(Debug, Clone)]
91pub struct FaultToleranceConfig {
92 pub enable_redundancy: bool,
94 pub backup_chips: usize,
96 pub max_retries: usize,
98 pub chip_timeout: Duration,
100 pub recovery_strategy: RecoveryStrategy,
102}
103
104impl Default for FaultToleranceConfig {
105 fn default() -> Self {
106 Self {
107 enable_redundancy: true,
108 backup_chips: 1,
109 max_retries: 3,
110 chip_timeout: Duration::from_secs(60),
111 recovery_strategy: RecoveryStrategy::Failover,
112 }
113 }
114}
115
116#[derive(Debug, Clone, PartialEq, Eq)]
118pub enum RecoveryStrategy {
119 Failover,
121 Redistribute,
123 Restart,
125 Degradation,
127}
128
129#[derive(Debug, Clone)]
131pub struct MonitoringConfig {
132 pub enable_monitoring: bool,
134 pub collection_interval: Duration,
136 pub detailed_logging: bool,
138 pub thresholds: PerformanceThresholds,
140}
141
142impl Default for MonitoringConfig {
143 fn default() -> Self {
144 Self {
145 enable_monitoring: true,
146 collection_interval: Duration::from_secs(10),
147 detailed_logging: false,
148 thresholds: PerformanceThresholds::default(),
149 }
150 }
151}
152
153#[derive(Debug, Clone)]
155pub struct PerformanceThresholds {
156 pub max_latency: Duration,
158 pub min_throughput: f64,
160 pub max_memory_usage: usize,
162 pub max_cpu_utilization: f64,
164}
165
166impl Default for PerformanceThresholds {
167 fn default() -> Self {
168 Self {
169 max_latency: Duration::from_secs(120),
170 min_throughput: 0.1,
171 max_memory_usage: 1024,
172 max_cpu_utilization: 0.9,
173 }
174 }
175}
176
177#[derive(Debug, Clone)]
179pub struct QuantumChip {
180 pub id: String,
182 pub topology: HardwareTopology,
184 pub status: ChipStatus,
186 pub performance: ChipPerformance,
188 pub workload: Option<ChipWorkload>,
190 pub available_qubits: usize,
192 pub connections: HashMap<String, f64>,
194}
195
196#[derive(Debug, Clone, PartialEq, Eq)]
198pub enum ChipStatus {
199 Available,
201 Busy,
203 Unavailable,
205 Maintenance,
207 Failed,
209}
210
211#[derive(Debug, Clone)]
213pub struct ChipPerformance {
214 pub throughput: f64,
216 pub latency: Duration,
218 pub success_rate: f64,
220 pub solution_quality: f64,
222 pub last_update: Instant,
224}
225
226impl Default for ChipPerformance {
227 fn default() -> Self {
228 Self {
229 throughput: 1.0,
230 latency: Duration::from_secs(30),
231 success_rate: 0.95,
232 solution_quality: 0.8,
233 last_update: Instant::now(),
234 }
235 }
236}
237
238#[derive(Debug, Clone)]
240pub struct ChipWorkload {
241 pub problem_id: String,
243 pub num_variables: usize,
245 pub start_time: Instant,
247 pub estimated_completion: Option<Instant>,
249 pub progress: f64,
251}
252
253#[derive(Debug, Clone)]
255pub struct ProblemPartition {
256 pub id: String,
258 pub parent_problem_id: String,
260 pub variables: Vec<usize>,
262 pub local_model: IsingModel,
264 pub dependencies: Vec<String>,
266 pub priority: i32,
268 pub estimated_time: Duration,
270}
271
272#[derive(Debug)]
274pub struct MultiChipCoordinator {
275 pub config: MultiChipConfig,
277 pub chips: Arc<RwLock<HashMap<String, QuantumChip>>>,
279 pub partitions: Arc<RwLock<HashMap<String, ProblemPartition>>>,
281 pub channels: Arc<Mutex<HashMap<String, CommunicationChannel>>>,
283 pub monitor: Arc<Mutex<PerformanceMonitor>>,
285 pub load_balancer: Arc<Mutex<LoadBalancer>>,
287}
288
289#[derive(Debug)]
291pub struct CommunicationChannel {
292 pub id: String,
294 pub source: String,
296 pub target: String,
298 pub message_queue: VecDeque<Message>,
300 pub status: ConnectionStatus,
302 pub bandwidth: f64,
304 pub latency: Duration,
306}
307
308#[derive(Debug, Clone)]
310pub struct Message {
311 pub id: String,
313 pub message_type: MessageType,
315 pub payload: Vec<u8>,
317 pub timestamp: Instant,
319 pub priority: u8,
321}
322
323#[derive(Debug, Clone, PartialEq, Eq)]
325pub enum MessageType {
326 WorkAssignment,
328 PartialResult,
330 StatusUpdate,
332 Error,
334 Sync,
336 ResourceRequest,
338}
339
340#[derive(Debug, Clone, PartialEq, Eq)]
342pub enum ConnectionStatus {
343 Active,
345 Disconnected,
347 Failed,
349 Maintenance,
351}
352
353#[derive(Debug)]
355pub struct PerformanceMonitor {
356 pub system_metrics: SystemMetrics,
358 pub chip_metrics: HashMap<String, ChipMetrics>,
360 pub history: VecDeque<PerformanceSnapshot>,
362 pub thresholds: PerformanceThresholds,
364}
365
366#[derive(Debug, Clone)]
368pub struct SystemMetrics {
369 pub total_throughput: f64,
371 pub average_latency: Duration,
373 pub active_chips: usize,
375 pub total_memory: usize,
377 pub success_rate: f64,
379 pub load_balance_factor: f64,
381}
382
383#[derive(Debug, Clone)]
385pub struct ChipMetrics {
386 pub chip_id: String,
388 pub current_load: f64,
390 pub queue_size: usize,
392 pub error_rate: f64,
394 pub resource_utilization: ResourceUtilization,
396}
397
398#[derive(Debug, Clone)]
400pub struct ResourceUtilization {
401 pub cpu: f64,
403 pub memory: usize,
405 pub network: f64,
407 pub qubits: f64,
409}
410
411#[derive(Debug, Clone)]
413pub struct PerformanceSnapshot {
414 pub timestamp: Instant,
416 pub system_metrics: SystemMetrics,
418 pub chip_metrics: HashMap<String, ChipMetrics>,
420}
421
422#[derive(Debug)]
424pub struct LoadBalancer {
425 pub strategy: LoadBalancingStrategy,
427 pub workloads: HashMap<String, f64>,
429 pub performance_history: HashMap<String, VecDeque<f64>>,
431 pub decisions: VecDeque<LoadBalancingDecision>,
433}
434
435#[derive(Debug, Clone)]
437pub struct LoadBalancingDecision {
438 pub timestamp: Instant,
440 pub source_chip: String,
442 pub target_chip: String,
444 pub work_transfer: WorkTransfer,
446 pub reason: String,
448}
449
450#[derive(Debug, Clone)]
452pub struct WorkTransfer {
453 pub partition_id: String,
455 pub transfer_time: Duration,
457 pub priority: u8,
459}
460
461impl MultiChipCoordinator {
462 #[must_use]
464 pub fn new(config: MultiChipConfig) -> Self {
465 Self {
466 config: config.clone(),
467 chips: Arc::new(RwLock::new(HashMap::new())),
468 partitions: Arc::new(RwLock::new(HashMap::new())),
469 channels: Arc::new(Mutex::new(HashMap::new())),
470 monitor: Arc::new(Mutex::new(PerformanceMonitor::new())),
471 load_balancer: Arc::new(Mutex::new(LoadBalancer::new(config.load_balancing))),
472 }
473 }
474
475 pub fn register_chip(&self, chip: QuantumChip) -> ApplicationResult<()> {
477 let chip_id = chip.id.clone();
478 let mut chips = self.chips.write().map_err(|_| {
479 ApplicationError::OptimizationError("Failed to acquire chip registry lock".to_string())
480 })?;
481
482 chips.insert(chip_id.clone(), chip);
483
484 for existing_chip_id in chips.keys() {
486 if existing_chip_id != &chip_id {
487 self.create_communication_channel(&chip_id, existing_chip_id)?;
488 }
489 }
490
491 println!("Registered quantum chip: {chip_id}");
492 Ok(())
493 }
494
495 fn create_communication_channel(&self, chip1: &str, chip2: &str) -> ApplicationResult<()> {
497 let channel_id = format!("{chip1}_{chip2}");
498 let channel = CommunicationChannel {
499 id: channel_id.clone(),
500 source: chip1.to_string(),
501 target: chip2.to_string(),
502 message_queue: VecDeque::new(),
503 status: ConnectionStatus::Active,
504 bandwidth: 100.0, latency: Duration::from_millis(10),
506 };
507
508 let mut channels = self.channels.lock().map_err(|_| {
509 ApplicationError::OptimizationError("Failed to acquire channel lock".to_string())
510 })?;
511
512 channels.insert(channel_id, channel);
513 Ok(())
514 }
515
516 pub fn distribute_problem(&self, problem: &IsingModel) -> ApplicationResult<Vec<String>> {
518 println!("Starting multi-chip problem distribution");
519
520 let problem_size = problem.num_qubits;
522 let optimal_chips = self.calculate_optimal_chip_count(problem_size)?;
523
524 let partitions = self.partition_problem(problem, optimal_chips)?;
526
527 let selected_chips = self.select_chips(&partitions)?;
529
530 self.assign_partitions_to_chips(&partitions, &selected_chips)?;
532
533 self.initialize_inter_chip_communication(&selected_chips)?;
535
536 println!("Problem distributed to {} chips", selected_chips.len());
537 Ok(selected_chips)
538 }
539
540 fn calculate_optimal_chip_count(&self, problem_size: usize) -> ApplicationResult<usize> {
542 let chips = self.chips.read().map_err(|_| {
543 ApplicationError::OptimizationError("Failed to read chip registry".to_string())
544 })?;
545
546 let available_chips = chips
547 .values()
548 .filter(|chip| chip.status == ChipStatus::Available)
549 .count();
550
551 let chips_needed =
553 (problem_size + self.config.max_problem_size - 1) / self.config.max_problem_size;
554 let optimal_chips = chips_needed.min(available_chips).min(self.config.max_chips);
555
556 Ok(optimal_chips.max(1))
557 }
558
559 fn partition_problem(
561 &self,
562 problem: &IsingModel,
563 num_partitions: usize,
564 ) -> ApplicationResult<Vec<ProblemPartition>> {
565 let mut partitions = Vec::new();
566 let variables_per_partition = (problem.num_qubits + num_partitions - 1) / num_partitions;
567
568 for i in 0..num_partitions {
569 let start_var = i * variables_per_partition;
570 let end_var = ((i + 1) * variables_per_partition).min(problem.num_qubits);
571
572 if start_var >= end_var {
573 break;
574 }
575
576 let variables: Vec<usize> = (start_var..end_var).collect();
577 let local_model = self.extract_subproblem(problem, &variables)?;
578
579 let partition = ProblemPartition {
580 id: format!("partition_{i}"),
581 parent_problem_id: "main_problem".to_string(),
582 variables,
583 local_model,
584 dependencies: Vec::new(),
585 priority: 0,
586 estimated_time: Duration::from_secs(60),
587 };
588
589 partitions.push(partition);
590 }
591
592 self.analyze_partition_dependencies(&mut partitions, problem)?;
594
595 Ok(partitions)
596 }
597
598 fn extract_subproblem(
600 &self,
601 problem: &IsingModel,
602 variables: &[usize],
603 ) -> ApplicationResult<IsingModel> {
604 let num_vars = variables.len();
605 let mut subproblem = IsingModel::new(num_vars);
606
607 let var_map: HashMap<usize, usize> = variables
609 .iter()
610 .enumerate()
611 .map(|(i, &var)| (var, i))
612 .collect();
613
614 for (i, &original_var) in variables.iter().enumerate() {
616 let biases = problem.biases();
617 for (qubit_index, bias_value) in biases {
618 if qubit_index == original_var {
619 subproblem.set_bias(i, bias_value)?;
620 break;
621 }
622 }
623 }
624
625 let couplings = problem.couplings();
627 for i in 0..variables.len() {
628 for j in (i + 1)..variables.len() {
629 let orig_i = variables[i];
630 let orig_j = variables[j];
631
632 for coupling in &couplings {
634 if (coupling.i == orig_i && coupling.j == orig_j)
635 || (coupling.i == orig_j && coupling.j == orig_i)
636 {
637 if coupling.strength != 0.0 {
638 subproblem.set_coupling(i, j, coupling.strength)?;
639 }
640 break;
641 }
642 }
643 }
644 }
645
646 Ok(subproblem)
647 }
648
649 fn analyze_partition_dependencies(
651 &self,
652 partitions: &mut [ProblemPartition],
653 problem: &IsingModel,
654 ) -> ApplicationResult<()> {
655 for i in 0..partitions.len() {
657 for j in (i + 1)..partitions.len() {
658 let has_coupling =
659 self.check_partition_coupling(&partitions[i], &partitions[j], problem)?;
660
661 if has_coupling {
662 partitions[i].dependencies.push(partitions[j].id.clone());
663 partitions[j].dependencies.push(partitions[i].id.clone());
664 }
665 }
666 }
667
668 Ok(())
669 }
670
671 fn check_partition_coupling(
673 &self,
674 partition1: &ProblemPartition,
675 partition2: &ProblemPartition,
676 problem: &IsingModel,
677 ) -> ApplicationResult<bool> {
678 let couplings = problem.couplings();
679 for &var1 in &partition1.variables {
680 for &var2 in &partition2.variables {
681 for coupling in &couplings {
683 if (coupling.i == var1 && coupling.j == var2)
684 || (coupling.i == var2 && coupling.j == var1)
685 {
686 if coupling.strength != 0.0 {
687 return Ok(true);
688 }
689 }
690 }
691 }
692 }
693
694 Ok(false)
695 }
696
697 fn select_chips(&self, partitions: &[ProblemPartition]) -> ApplicationResult<Vec<String>> {
699 let chips = self.chips.read().map_err(|_| {
700 ApplicationError::OptimizationError("Failed to read chip registry".to_string())
701 })?;
702
703 let mut available_chips: Vec<_> = chips
704 .values()
705 .filter(|chip| chip.status == ChipStatus::Available)
706 .collect();
707
708 available_chips.sort_by(|a, b| {
710 b.performance
711 .throughput
712 .partial_cmp(&a.performance.throughput)
713 .unwrap_or(std::cmp::Ordering::Equal)
714 });
715
716 let mut selected_chips = Vec::new();
718 let num_chips_needed = partitions.len().min(available_chips.len());
719
720 for i in 0..num_chips_needed {
721 selected_chips.push(available_chips[i].id.clone());
722 }
723
724 if selected_chips.is_empty() {
725 return Err(ApplicationError::ResourceLimitExceeded(
726 "No available chips for execution".to_string(),
727 ));
728 }
729
730 Ok(selected_chips)
731 }
732
733 fn assign_partitions_to_chips(
735 &self,
736 partitions: &[ProblemPartition],
737 chips: &[String],
738 ) -> ApplicationResult<()> {
739 let mut partitions_map = self.partitions.write().map_err(|_| {
740 ApplicationError::OptimizationError("Failed to acquire partitions lock".to_string())
741 })?;
742
743 let mut chips_map = self.chips.write().map_err(|_| {
744 ApplicationError::OptimizationError("Failed to acquire chips lock".to_string())
745 })?;
746
747 for (i, partition) in partitions.iter().enumerate() {
749 let chip_id = &chips[i % chips.len()];
750
751 if let Some(chip) = chips_map.get_mut(chip_id) {
753 chip.status = ChipStatus::Busy;
754 chip.workload = Some(ChipWorkload {
755 problem_id: partition.id.clone(),
756 num_variables: partition.variables.len(),
757 start_time: Instant::now(),
758 estimated_completion: Some(Instant::now() + partition.estimated_time),
759 progress: 0.0,
760 });
761 }
762
763 partitions_map.insert(partition.id.clone(), partition.clone());
765 }
766
767 Ok(())
768 }
769
770 fn initialize_inter_chip_communication(&self, chips: &[String]) -> ApplicationResult<()> {
772 for i in 0..chips.len() {
774 for j in (i + 1)..chips.len() {
775 self.create_communication_channel(&chips[i], &chips[j])?;
776 }
777 }
778
779 self.send_sync_messages(chips)?;
781
782 Ok(())
783 }
784
785 fn send_sync_messages(&self, chips: &[String]) -> ApplicationResult<()> {
787 let mut channels = self.channels.lock().map_err(|_| {
788 ApplicationError::OptimizationError("Failed to acquire channels lock".to_string())
789 })?;
790
791 for chip_id in chips {
792 let message = Message {
793 id: format!("sync_{chip_id}"),
794 message_type: MessageType::Sync,
795 payload: Vec::new(),
796 timestamp: Instant::now(),
797 priority: 255, };
799
800 for channel in channels.values_mut() {
802 if channel.source == *chip_id || channel.target == *chip_id {
803 channel.message_queue.push_back(message.clone());
804 }
805 }
806 }
807
808 Ok(())
809 }
810
811 pub fn execute_distributed(&self, chips: &[String]) -> ApplicationResult<Vec<i32>> {
813 println!("Starting distributed execution on {} chips", chips.len());
814
815 let start_time = Instant::now();
816
817 self.start_performance_monitoring()?;
819
820 let results = self.execute_on_chips(chips)?;
822
823 let final_result = self.aggregate_results(&results)?;
825
826 let execution_time = start_time.elapsed();
828 self.collect_execution_metrics(execution_time, &final_result)?;
829
830 println!("Distributed execution completed in {execution_time:?}");
831 Ok(final_result)
832 }
833
834 fn execute_on_chips(&self, chips: &[String]) -> ApplicationResult<HashMap<String, Vec<i32>>> {
836 let mut results = HashMap::new();
837
838 for chip_id in chips {
840 let result = self.execute_on_single_chip(chip_id)?;
841 results.insert(chip_id.clone(), result);
842 }
843
844 Ok(results)
845 }
846
847 fn execute_on_single_chip(&self, chip_id: &str) -> ApplicationResult<Vec<i32>> {
849 thread::sleep(Duration::from_millis(100)); let partitions = self.partitions.read().map_err(|_| {
854 ApplicationError::OptimizationError("Failed to read partitions".to_string())
855 })?;
856
857 if let Some(partition) = partitions.values().next() {
858 let solution_size = partition.variables.len();
860 let mut solution = vec![1; solution_size]; for i in 0..solution_size {
864 if i % 2 == 0 {
865 solution[i] = -1;
866 }
867 }
868
869 return Ok(solution);
870 }
871
872 Ok(vec![])
874 }
875
876 fn aggregate_results(
878 &self,
879 results: &HashMap<String, Vec<i32>>,
880 ) -> ApplicationResult<Vec<i32>> {
881 let mut final_solution = Vec::new();
882
883 let partitions = self.partitions.read().map_err(|_| {
885 ApplicationError::OptimizationError("Failed to read partitions".to_string())
886 })?;
887
888 let mut sorted_partitions: Vec<_> = partitions.values().collect();
890 sorted_partitions.sort_by(|a, b| a.id.cmp(&b.id));
891
892 for partition in sorted_partitions {
893 for (chip_id, result) in results {
895 if result.len() == partition.variables.len() {
896 final_solution.extend_from_slice(result);
897 break;
898 }
899 }
900 }
901
902 Ok(final_solution)
903 }
904
905 fn start_performance_monitoring(&self) -> ApplicationResult<()> {
907 let mut monitor = self.monitor.lock().map_err(|_| {
909 ApplicationError::OptimizationError("Failed to acquire monitor lock".to_string())
910 })?;
911
912 monitor.start_monitoring();
913 Ok(())
914 }
915
916 fn collect_execution_metrics(
918 &self,
919 execution_time: Duration,
920 solution: &[i32],
921 ) -> ApplicationResult<()> {
922 let mut monitor = self.monitor.lock().map_err(|_| {
923 ApplicationError::OptimizationError("Failed to acquire monitor lock".to_string())
924 })?;
925
926 monitor.record_execution(execution_time, solution.len());
927 Ok(())
928 }
929
930 pub fn get_performance_metrics(&self) -> ApplicationResult<SystemMetrics> {
932 let monitor = self.monitor.lock().map_err(|_| {
933 ApplicationError::OptimizationError("Failed to acquire monitor lock".to_string())
934 })?;
935
936 Ok(monitor.system_metrics.clone())
937 }
938}
939
940impl PerformanceMonitor {
941 fn new() -> Self {
942 Self {
943 system_metrics: SystemMetrics {
944 total_throughput: 0.0,
945 average_latency: Duration::from_secs(0),
946 active_chips: 0,
947 total_memory: 0,
948 success_rate: 1.0,
949 load_balance_factor: 1.0,
950 },
951 chip_metrics: HashMap::new(),
952 history: VecDeque::new(),
953 thresholds: PerformanceThresholds::default(),
954 }
955 }
956
957 fn start_monitoring(&self) {
958 println!("Performance monitoring started");
959 }
960
961 fn record_execution(&mut self, execution_time: Duration, solution_size: usize) {
962 self.system_metrics.total_throughput = solution_size as f64 / execution_time.as_secs_f64();
963 self.system_metrics.average_latency = execution_time;
964
965 println!("Recorded execution: {solution_size} variables in {execution_time:?}");
966 }
967}
968
969impl LoadBalancer {
970 fn new(strategy: LoadBalancingStrategy) -> Self {
971 Self {
972 strategy,
973 workloads: HashMap::new(),
974 performance_history: HashMap::new(),
975 decisions: VecDeque::new(),
976 }
977 }
978}
979
980pub fn create_example_multi_chip_system() -> ApplicationResult<MultiChipCoordinator> {
982 let config = MultiChipConfig::default();
983 let coordinator = MultiChipCoordinator::new(config);
984
985 for i in 0..4 {
987 let chip = QuantumChip {
988 id: format!("chip_{i}"),
989 topology: HardwareTopology::Pegasus(16), status: ChipStatus::Available,
991 performance: ChipPerformance::default(),
992 workload: None,
993 available_qubits: 1000 + i * 200,
994 connections: HashMap::new(),
995 };
996
997 coordinator.register_chip(chip)?;
998 }
999
1000 Ok(coordinator)
1001}
1002
1003#[cfg(test)]
1004mod tests {
1005 use super::*;
1006
1007 #[test]
1008 fn test_multi_chip_config() {
1009 let config = MultiChipConfig::default();
1010 assert_eq!(config.max_chips, 4);
1011 assert_eq!(config.load_balancing, LoadBalancingStrategy::Dynamic);
1012 assert_eq!(config.communication, CommunicationProtocol::Asynchronous);
1013 }
1014
1015 #[test]
1016 fn test_coordinator_creation() {
1017 let config = MultiChipConfig::default();
1018 let coordinator = MultiChipCoordinator::new(config);
1019
1020 let chips = coordinator
1021 .chips
1022 .read()
1023 .expect("failed to acquire read lock in test");
1024 assert!(chips.is_empty());
1025 }
1026
1027 #[test]
1028 fn test_chip_registration() {
1029 let coordinator =
1030 create_example_multi_chip_system().expect("failed to create multi-chip system in test");
1031
1032 let chips = coordinator
1033 .chips
1034 .read()
1035 .expect("failed to acquire read lock in test");
1036 assert_eq!(chips.len(), 4);
1037
1038 for i in 0..4 {
1039 let chip_id = format!("chip_{}", i);
1040 assert!(chips.contains_key(&chip_id));
1041 assert_eq!(chips[&chip_id].status, ChipStatus::Available);
1042 }
1043 }
1044
1045 #[test]
1046 fn test_problem_distribution() {
1047 let coordinator =
1048 create_example_multi_chip_system().expect("failed to create multi-chip system in test");
1049
1050 let mut problem = IsingModel::new(200);
1052
1053 let result = coordinator.distribute_problem(&problem);
1055 assert!(result.is_ok());
1056
1057 let selected_chips = result.expect("failed to distribute problem in test");
1058 assert!(!selected_chips.is_empty());
1059 assert!(selected_chips.len() <= 4);
1060 }
1061
1062 #[test]
1063 fn test_performance_monitoring() {
1064 let coordinator =
1065 create_example_multi_chip_system().expect("failed to create multi-chip system in test");
1066
1067 let result = coordinator.start_performance_monitoring();
1068 assert!(result.is_ok());
1069
1070 let metrics = coordinator
1071 .get_performance_metrics()
1072 .expect("failed to get performance metrics in test");
1073 assert_eq!(metrics.total_throughput, 0.0);
1074 assert_eq!(metrics.active_chips, 0);
1075 }
1076
1077 #[test]
1078 fn test_fault_tolerance_config() {
1079 let fault_config = FaultToleranceConfig::default();
1080 assert!(fault_config.enable_redundancy);
1081 assert_eq!(fault_config.backup_chips, 1);
1082 assert_eq!(fault_config.max_retries, 3);
1083 assert_eq!(fault_config.recovery_strategy, RecoveryStrategy::Failover);
1084 }
1085}