1use scirs2_core::ndarray::{Array1, Array2};
9use scirs2_core::Complex64;
10use serde::{Deserialize, Serialize};
11use std::collections::{HashMap, VecDeque};
12use std::fs::File;
13use std::io::Write as IoWrite;
14use std::sync::{Arc, Mutex, RwLock};
15use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
16
17use crate::circuit_interfaces::{InterfaceCircuit, InterfaceGate, InterfaceGateType};
18use crate::debugger::PerformanceMetrics;
19use crate::error::{Result, SimulatorError};
20
21use std::fmt::Write;
22#[derive(Debug, Clone)]
24pub struct TelemetryConfig {
25 pub enabled: bool,
27 pub sampling_rate: f64,
29 pub max_history_size: usize,
31 pub export_interval: Duration,
33 pub enable_alerts: bool,
35 pub alert_thresholds: AlertThresholds,
37 pub export_format: TelemetryExportFormat,
39 pub export_directory: String,
41 pub monitor_system_resources: bool,
43 pub custom_tags: HashMap<String, String>,
45}
46
47impl Default for TelemetryConfig {
48 fn default() -> Self {
49 Self {
50 enabled: true,
51 sampling_rate: 1.0,
52 max_history_size: 10_000,
53 export_interval: Duration::from_secs(60),
54 enable_alerts: true,
55 alert_thresholds: AlertThresholds::default(),
56 export_format: TelemetryExportFormat::JSON,
57 export_directory: "./telemetry".to_string(),
58 monitor_system_resources: true,
59 custom_tags: HashMap::new(),
60 }
61 }
62}
63
64#[derive(Debug, Clone)]
66pub struct AlertThresholds {
67 pub max_gate_execution_time: f64,
69 pub max_memory_usage: usize,
71 pub max_error_rate: f64,
73 pub max_cpu_usage: f64,
75 pub max_queue_depth: usize,
77}
78
79impl Default for AlertThresholds {
80 fn default() -> Self {
81 Self {
82 max_gate_execution_time: 1.0,
83 max_memory_usage: 16_000_000_000, max_error_rate: 0.1,
85 max_cpu_usage: 0.9,
86 max_queue_depth: 1000,
87 }
88 }
89}
90
91#[derive(Debug, Clone, Copy, PartialEq, Eq)]
93pub enum TelemetryExportFormat {
94 JSON,
95 CSV,
96 Prometheus,
97 InfluxDB,
98 Custom,
99}
100
101#[derive(Debug, Clone, Serialize, Deserialize)]
103pub enum TelemetryMetric {
104 Counter {
106 name: String,
107 value: u64,
108 tags: HashMap<String, String>,
109 timestamp: f64,
110 },
111 Gauge {
113 name: String,
114 value: f64,
115 tags: HashMap<String, String>,
116 timestamp: f64,
117 },
118 Histogram {
120 name: String,
121 values: Vec<f64>,
122 buckets: Vec<f64>,
123 tags: HashMap<String, String>,
124 timestamp: f64,
125 },
126 Timer {
128 name: String,
129 duration: Duration,
130 tags: HashMap<String, String>,
131 timestamp: f64,
132 },
133 Custom {
135 name: String,
136 data: serde_json::Value,
137 tags: HashMap<String, String>,
138 timestamp: f64,
139 },
140}
141
142#[derive(Debug, Clone, Serialize, Deserialize)]
144pub struct PerformanceSnapshot {
145 pub timestamp: f64,
147 pub cpu_usage: f64,
149 pub memory_usage: usize,
151 pub available_memory: usize,
153 pub network_io: NetworkIOStats,
155 pub disk_io: DiskIOStats,
157 pub gpu_utilization: Option<f64>,
159 pub gpu_memory_usage: Option<usize>,
161}
162
163#[derive(Debug, Clone, Default, Serialize, Deserialize)]
165pub struct NetworkIOStats {
166 pub bytes_sent_per_sec: f64,
168 pub bytes_received_per_sec: f64,
170 pub packets_sent_per_sec: f64,
172 pub packets_received_per_sec: f64,
174}
175
176#[derive(Debug, Clone, Default, Serialize, Deserialize)]
178pub struct DiskIOStats {
179 pub bytes_read_per_sec: f64,
181 pub bytes_written_per_sec: f64,
183 pub read_ops_per_sec: f64,
185 pub write_ops_per_sec: f64,
187}
188
189#[derive(Debug, Clone, Serialize, Deserialize)]
191pub struct QuantumMetrics {
192 pub num_qubits: usize,
194 pub circuit_depth: usize,
196 pub gate_execution_rate: f64,
198 pub entanglement_entropy: f64,
200 pub error_correction_rate: f64,
202 pub fidelity: f64,
204 pub active_backends: Vec<String>,
206 pub queue_depth: usize,
208}
209
210#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
212pub enum AlertLevel {
213 Info,
214 Warning,
215 Error,
216 Critical,
217}
218
219#[derive(Debug, Clone, Serialize, Deserialize)]
221pub struct Alert {
222 pub level: AlertLevel,
224 pub message: String,
226 pub metric_name: String,
228 pub current_value: f64,
230 pub threshold_value: f64,
232 pub timestamp: f64,
234 pub context: HashMap<String, String>,
236}
237
238pub struct TelemetryCollector {
240 config: TelemetryConfig,
242 metrics_history: Arc<RwLock<VecDeque<TelemetryMetric>>>,
244 performance_history: Arc<RwLock<VecDeque<PerformanceSnapshot>>>,
246 quantum_metrics_history: Arc<RwLock<VecDeque<QuantumMetrics>>>,
248 active_alerts: Arc<RwLock<Vec<Alert>>>,
250 system_monitor_handle: Option<std::thread::JoinHandle<()>>,
252 last_export: Arc<Mutex<Instant>>,
254 custom_handlers: HashMap<String, Box<dyn Fn(&TelemetryMetric) + Send + Sync>>,
256}
257
258impl TelemetryCollector {
259 #[must_use]
261 pub fn new(config: TelemetryConfig) -> Self {
262 Self {
263 config: config.clone(),
264 metrics_history: Arc::new(RwLock::new(VecDeque::with_capacity(
265 config.max_history_size,
266 ))),
267 performance_history: Arc::new(RwLock::new(VecDeque::with_capacity(1000))),
268 quantum_metrics_history: Arc::new(RwLock::new(VecDeque::with_capacity(1000))),
269 active_alerts: Arc::new(RwLock::new(Vec::new())),
270 system_monitor_handle: None,
271 last_export: Arc::new(Mutex::new(Instant::now())),
272 custom_handlers: HashMap::new(),
273 }
274 }
275
276 pub fn start(&mut self) -> Result<()> {
278 if !self.config.enabled {
279 return Ok(());
280 }
281
282 if self.config.monitor_system_resources {
284 self.start_system_monitoring()?;
285 }
286
287 Ok(())
288 }
289
290 pub fn stop(&mut self) {
292 if let Some(handle) = self.system_monitor_handle.take() {
293 let _ = handle.join();
296 }
297 }
298
299 pub fn record_metric(&self, metric: TelemetryMetric) -> Result<()> {
301 if !self.config.enabled {
302 return Ok(());
303 }
304
305 if fastrand::f64() > self.config.sampling_rate {
307 return Ok(());
308 }
309
310 {
312 let mut history = self
313 .metrics_history
314 .write()
315 .expect("Metrics history lock should not be poisoned");
316 history.push_back(metric.clone());
317 if history.len() > self.config.max_history_size {
318 history.pop_front();
319 }
320 }
321
322 self.check_alert_conditions(&metric)?;
324
325 for handler in self.custom_handlers.values() {
327 handler(&metric);
328 }
329
330 self.check_export_schedule()?;
332
333 Ok(())
334 }
335
336 pub fn record_quantum_metrics(&self, metrics: QuantumMetrics) -> Result<()> {
338 if !self.config.enabled {
339 return Ok(());
340 }
341
342 {
343 let mut history = self
344 .quantum_metrics_history
345 .write()
346 .expect("Quantum metrics history lock should not be poisoned");
347 history.push_back(metrics.clone());
348 if history.len() > 1000 {
349 history.pop_front();
350 }
351 }
352
353 let timestamp = SystemTime::now()
355 .duration_since(UNIX_EPOCH)
356 .unwrap_or_default()
357 .as_secs_f64();
358
359 let quantum_gauge = TelemetryMetric::Gauge {
360 name: "quantum.num_qubits".to_string(),
361 value: metrics.num_qubits as f64,
362 tags: self.config.custom_tags.clone(),
363 timestamp,
364 };
365 self.record_metric(quantum_gauge)?;
366
367 let rate_gauge = TelemetryMetric::Gauge {
368 name: "quantum.gate_execution_rate".to_string(),
369 value: metrics.gate_execution_rate,
370 tags: self.config.custom_tags.clone(),
371 timestamp,
372 };
373 self.record_metric(rate_gauge)?;
374
375 let entropy_gauge = TelemetryMetric::Gauge {
376 name: "quantum.entanglement_entropy".to_string(),
377 value: metrics.entanglement_entropy,
378 tags: self.config.custom_tags.clone(),
379 timestamp,
380 };
381 self.record_metric(entropy_gauge)?;
382
383 Ok(())
384 }
385
386 pub fn record_gate_execution(&self, gate: &InterfaceGate, duration: Duration) -> Result<()> {
388 let gate_type = format!("{:?}", gate.gate_type);
389 let mut tags = self.config.custom_tags.clone();
390 tags.insert("gate_type".to_string(), gate_type);
391 tags.insert("num_qubits".to_string(), gate.qubits.len().to_string());
392
393 let timer = TelemetryMetric::Timer {
394 name: "gate.execution_time".to_string(),
395 duration,
396 tags,
397 timestamp: SystemTime::now()
398 .duration_since(UNIX_EPOCH)
399 .unwrap_or_default()
400 .as_secs_f64(),
401 };
402
403 self.record_metric(timer)?;
404 Ok(())
405 }
406
407 pub fn record_circuit_execution(
409 &self,
410 circuit: &InterfaceCircuit,
411 duration: Duration,
412 ) -> Result<()> {
413 let mut tags = self.config.custom_tags.clone();
414 tags.insert("num_qubits".to_string(), circuit.num_qubits.to_string());
415 tags.insert("num_gates".to_string(), circuit.gates.len().to_string());
416
417 let timer = TelemetryMetric::Timer {
418 name: "circuit.execution_time".to_string(),
419 duration,
420 tags: tags.clone(),
421 timestamp: SystemTime::now()
422 .duration_since(UNIX_EPOCH)
423 .unwrap_or_default()
424 .as_secs_f64(),
425 };
426
427 self.record_metric(timer)?;
428
429 let gate_counter = TelemetryMetric::Counter {
431 name: "circuit.gates_executed".to_string(),
432 value: circuit.gates.len() as u64,
433 tags,
434 timestamp: SystemTime::now()
435 .duration_since(UNIX_EPOCH)
436 .unwrap_or_default()
437 .as_secs_f64(),
438 };
439
440 self.record_metric(gate_counter)?;
441 Ok(())
442 }
443
444 pub fn record_memory_usage(&self, bytes_used: usize, category: &str) -> Result<()> {
446 let mut tags = self.config.custom_tags.clone();
447 tags.insert("category".to_string(), category.to_string());
448
449 let gauge = TelemetryMetric::Gauge {
450 name: "memory.usage_bytes".to_string(),
451 value: bytes_used as f64,
452 tags,
453 timestamp: SystemTime::now()
454 .duration_since(UNIX_EPOCH)
455 .unwrap_or_default()
456 .as_secs_f64(),
457 };
458
459 self.record_metric(gauge)?;
460 Ok(())
461 }
462
463 pub fn record_error(&self, error_type: &str, error_message: &str) -> Result<()> {
465 let mut tags = self.config.custom_tags.clone();
466 tags.insert("error_type".to_string(), error_type.to_string());
467 tags.insert("error_message".to_string(), error_message.to_string());
468
469 let counter = TelemetryMetric::Counter {
470 name: "errors.total".to_string(),
471 value: 1,
472 tags,
473 timestamp: SystemTime::now()
474 .duration_since(UNIX_EPOCH)
475 .unwrap_or_default()
476 .as_secs_f64(),
477 };
478
479 self.record_metric(counter)?;
480 Ok(())
481 }
482
483 pub fn get_metrics_summary(&self) -> Result<MetricsSummary> {
485 let metrics_history = self
486 .metrics_history
487 .read()
488 .expect("Metrics history lock should not be poisoned");
489 let quantum_history = self
490 .quantum_metrics_history
491 .read()
492 .expect("Quantum metrics history lock should not be poisoned");
493 let performance_history = self
494 .performance_history
495 .read()
496 .expect("Performance history lock should not be poisoned");
497
498 let total_metrics = metrics_history.len();
499 let total_quantum_metrics = quantum_history.len();
500 let total_performance_snapshots = performance_history.len();
501
502 let mut gate_times = Vec::new();
504 for metric in metrics_history.iter() {
505 if let TelemetryMetric::Timer { name, duration, .. } = metric {
506 if name == "gate.execution_time" {
507 gate_times.push(duration.as_secs_f64());
508 }
509 }
510 }
511
512 let avg_gate_time = if gate_times.is_empty() {
513 0.0
514 } else {
515 gate_times.iter().sum::<f64>() / gate_times.len() as f64
516 };
517
518 let latest_quantum_metrics = quantum_history.back().cloned();
520
521 let latest_performance = performance_history.back().cloned();
523
524 Ok(MetricsSummary {
525 total_metrics,
526 total_quantum_metrics,
527 total_performance_snapshots,
528 avg_gate_execution_time: avg_gate_time,
529 latest_quantum_metrics,
530 latest_performance,
531 active_alerts_count: self
532 .active_alerts
533 .read()
534 .expect("Active alerts lock should not be poisoned")
535 .len(),
536 })
537 }
538
539 pub fn export_data(&self, path: &str) -> Result<()> {
541 std::fs::create_dir_all(path).map_err(|e| {
542 SimulatorError::InvalidInput(format!("Failed to create export directory: {e}"))
543 })?;
544
545 match self.config.export_format {
546 TelemetryExportFormat::JSON => self.export_json(path)?,
547 TelemetryExportFormat::CSV => self.export_csv(path)?,
548 TelemetryExportFormat::Prometheus => self.export_prometheus(path)?,
549 TelemetryExportFormat::InfluxDB => self.export_influxdb(path)?,
550 TelemetryExportFormat::Custom => self.export_custom(path)?,
551 }
552
553 *self
554 .last_export
555 .lock()
556 .expect("Last export lock should not be poisoned") = Instant::now();
557 Ok(())
558 }
559
560 fn start_system_monitoring(&mut self) -> Result<()> {
562 let performance_history = Arc::clone(&self.performance_history);
563 let config = self.config.clone();
564
565 let handle = std::thread::spawn(move || loop {
566 let snapshot = Self::collect_system_metrics();
567
568 {
569 let mut history = performance_history
570 .write()
571 .expect("Performance history lock should not be poisoned");
572 history.push_back(snapshot);
573 if history.len() > 1000 {
574 history.pop_front();
575 }
576 }
577
578 std::thread::sleep(Duration::from_secs(1));
579 });
580
581 self.system_monitor_handle = Some(handle);
582 Ok(())
583 }
584
585 fn collect_system_metrics() -> PerformanceSnapshot {
587 let timestamp = SystemTime::now()
588 .duration_since(UNIX_EPOCH)
589 .unwrap_or_default()
590 .as_secs_f64();
591
592 PerformanceSnapshot {
595 timestamp,
596 cpu_usage: fastrand::f64() * 0.5, memory_usage: (fastrand::f64() * 8_000_000_000.0) as usize, available_memory: 16_000_000_000, network_io: NetworkIOStats {
600 bytes_sent_per_sec: fastrand::f64() * 1_000_000.0,
601 bytes_received_per_sec: fastrand::f64() * 1_000_000.0,
602 packets_sent_per_sec: fastrand::f64() * 1000.0,
603 packets_received_per_sec: fastrand::f64() * 1000.0,
604 },
605 disk_io: DiskIOStats {
606 bytes_read_per_sec: fastrand::f64() * 10_000_000.0,
607 bytes_written_per_sec: fastrand::f64() * 10_000_000.0,
608 read_ops_per_sec: fastrand::f64() * 100.0,
609 write_ops_per_sec: fastrand::f64() * 100.0,
610 },
611 gpu_utilization: Some(fastrand::f64()),
612 gpu_memory_usage: Some((fastrand::f64() * 4_000_000_000.0) as usize),
613 }
614 }
615
616 fn check_alert_conditions(&self, metric: &TelemetryMetric) -> Result<()> {
618 if !self.config.enable_alerts {
619 return Ok(());
620 }
621
622 let mut alerts_to_add = Vec::new();
623
624 match metric {
625 TelemetryMetric::Timer { name, duration, .. } => {
626 if name == "gate.execution_time"
627 && duration.as_secs_f64() > self.config.alert_thresholds.max_gate_execution_time
628 {
629 alerts_to_add.push(Alert {
630 level: AlertLevel::Warning,
631 message: "Gate execution time exceeded threshold".to_string(),
632 metric_name: name.clone(),
633 current_value: duration.as_secs_f64(),
634 threshold_value: self.config.alert_thresholds.max_gate_execution_time,
635 timestamp: SystemTime::now()
636 .duration_since(UNIX_EPOCH)
637 .unwrap_or_default()
638 .as_secs_f64(),
639 context: HashMap::new(),
640 });
641 }
642 }
643 TelemetryMetric::Gauge { name, value, .. } => {
644 if name == "memory.usage_bytes"
645 && *value > self.config.alert_thresholds.max_memory_usage as f64
646 {
647 alerts_to_add.push(Alert {
648 level: AlertLevel::Error,
649 message: "Memory usage exceeded threshold".to_string(),
650 metric_name: name.clone(),
651 current_value: *value,
652 threshold_value: self.config.alert_thresholds.max_memory_usage as f64,
653 timestamp: SystemTime::now()
654 .duration_since(UNIX_EPOCH)
655 .unwrap_or_default()
656 .as_secs_f64(),
657 context: HashMap::new(),
658 });
659 }
660 }
661 _ => {}
662 }
663
664 if !alerts_to_add.is_empty() {
666 let mut active_alerts = self
667 .active_alerts
668 .write()
669 .expect("Active alerts lock should not be poisoned");
670 active_alerts.extend(alerts_to_add);
671
672 let len = active_alerts.len();
674 if len > 1000 {
675 active_alerts.drain(0..len - 1000);
676 }
677 }
678
679 Ok(())
680 }
681
682 fn check_export_schedule(&self) -> Result<()> {
684 let last_export = *self
685 .last_export
686 .lock()
687 .expect("Last export lock should not be poisoned");
688 if last_export.elapsed() > self.config.export_interval {
689 self.export_data(&self.config.export_directory)?;
690 }
691 Ok(())
692 }
693
694 fn export_json(&self, path: &str) -> Result<()> {
696 let metrics = self
697 .metrics_history
698 .read()
699 .expect("Metrics history lock should not be poisoned");
700 let data = serde_json::to_string_pretty(&*metrics).map_err(|e| {
701 SimulatorError::InvalidInput(format!("Failed to serialize metrics: {e}"))
702 })?;
703
704 let file_path = format!("{path}/telemetry.json");
705 let mut file = File::create(&file_path)
706 .map_err(|e| SimulatorError::InvalidInput(format!("Failed to create file: {e}")))?;
707
708 file.write_all(data.as_bytes())
709 .map_err(|e| SimulatorError::InvalidInput(format!("Failed to write file: {e}")))?;
710
711 Ok(())
712 }
713
714 fn export_csv(&self, path: &str) -> Result<()> {
716 let metrics = self
717 .metrics_history
718 .read()
719 .expect("Metrics history lock should not be poisoned");
720 let mut csv_data = String::new();
721 csv_data.push_str("timestamp,metric_name,metric_type,value,tags\n");
722
723 for metric in metrics.iter() {
724 let (name, metric_type, value, tags, timestamp) = match metric {
725 TelemetryMetric::Counter {
726 name,
727 value,
728 tags,
729 timestamp,
730 } => (name, "counter", *value as f64, tags, *timestamp),
731 TelemetryMetric::Gauge {
732 name,
733 value,
734 tags,
735 timestamp,
736 } => (name, "gauge", *value, tags, *timestamp),
737 TelemetryMetric::Timer {
738 name,
739 duration,
740 tags,
741 timestamp,
742 } => (name, "timer", duration.as_secs_f64(), tags, *timestamp),
743 _ => continue,
744 };
745
746 let tags_str = serde_json::to_string(tags).unwrap_or_default();
747 let _ = writeln!(
748 csv_data,
749 "{timestamp},{name},{metric_type},{value},{tags_str}"
750 );
751 }
752
753 let file_path = format!("{path}/telemetry.csv");
754 let mut file = File::create(&file_path)
755 .map_err(|e| SimulatorError::InvalidInput(format!("Failed to create file: {e}")))?;
756
757 file.write_all(csv_data.as_bytes())
758 .map_err(|e| SimulatorError::InvalidInput(format!("Failed to write file: {e}")))?;
759
760 Ok(())
761 }
762
763 fn export_prometheus(&self, path: &str) -> Result<()> {
765 let metrics = self
766 .metrics_history
767 .read()
768 .expect("Metrics history lock should not be poisoned");
769 let mut prometheus_data = String::new();
770
771 for metric in metrics.iter() {
772 match metric {
773 TelemetryMetric::Counter {
774 name,
775 value,
776 tags,
777 timestamp,
778 } => {
779 let _ = writeln!(prometheus_data, "# TYPE {name} counter");
780 let _ = writeln!(
781 prometheus_data,
782 "{}{} {} {}",
783 name,
784 self.format_prometheus_labels(tags),
785 value,
786 (*timestamp * 1000.0) as u64
787 );
788 }
789 TelemetryMetric::Gauge {
790 name,
791 value,
792 tags,
793 timestamp,
794 } => {
795 let _ = writeln!(prometheus_data, "# TYPE {name} gauge");
796 let _ = writeln!(
797 prometheus_data,
798 "{}{} {} {}",
799 name,
800 self.format_prometheus_labels(tags),
801 value,
802 (*timestamp * 1000.0) as u64
803 );
804 }
805 _ => {}
806 }
807 }
808
809 let file_path = format!("{path}/telemetry.prom");
810 let mut file = File::create(&file_path)
811 .map_err(|e| SimulatorError::InvalidInput(format!("Failed to create file: {e}")))?;
812
813 file.write_all(prometheus_data.as_bytes())
814 .map_err(|e| SimulatorError::InvalidInput(format!("Failed to write file: {e}")))?;
815
816 Ok(())
817 }
818
819 fn export_influxdb(&self, path: &str) -> Result<()> {
821 let metrics = self
822 .metrics_history
823 .read()
824 .expect("Metrics history lock should not be poisoned");
825 let mut influx_data = String::new();
826
827 for metric in metrics.iter() {
828 match metric {
829 TelemetryMetric::Counter {
830 name,
831 value,
832 tags,
833 timestamp,
834 } => {
835 let _ = writeln!(
836 influx_data,
837 "{}{} value={} {}",
838 name,
839 self.format_influx_tags(tags),
840 value,
841 (*timestamp * 1_000_000_000.0) as u64
842 );
843 }
844 TelemetryMetric::Gauge {
845 name,
846 value,
847 tags,
848 timestamp,
849 } => {
850 let _ = writeln!(
851 influx_data,
852 "{}{} value={} {}",
853 name,
854 self.format_influx_tags(tags),
855 value,
856 (*timestamp * 1_000_000_000.0) as u64
857 );
858 }
859 TelemetryMetric::Timer {
860 name,
861 duration,
862 tags,
863 timestamp,
864 } => {
865 let _ = writeln!(
866 influx_data,
867 "{}{} duration={} {}",
868 name,
869 self.format_influx_tags(tags),
870 duration.as_secs_f64(),
871 (*timestamp * 1_000_000_000.0) as u64
872 );
873 }
874 _ => {}
875 }
876 }
877
878 let file_path = format!("{path}/telemetry.influx");
879 let mut file = File::create(&file_path)
880 .map_err(|e| SimulatorError::InvalidInput(format!("Failed to create file: {e}")))?;
881
882 file.write_all(influx_data.as_bytes())
883 .map_err(|e| SimulatorError::InvalidInput(format!("Failed to write file: {e}")))?;
884
885 Ok(())
886 }
887
888 fn export_custom(&self, path: &str) -> Result<()> {
890 self.export_json(path)
892 }
893
894 fn format_prometheus_labels(&self, tags: &HashMap<String, String>) -> String {
896 if tags.is_empty() {
897 return String::new();
898 }
899
900 let labels: Vec<String> = tags.iter().map(|(k, v)| format!("{k}=\"{v}\"")).collect();
901
902 format!("{{{}}}", labels.join(","))
903 }
904
905 fn format_influx_tags(&self, tags: &HashMap<String, String>) -> String {
907 if tags.is_empty() {
908 return String::new();
909 }
910
911 let tag_pairs: Vec<String> = tags.iter().map(|(k, v)| format!("{k}={v}")).collect();
912
913 format!(",{}", tag_pairs.join(","))
914 }
915}
916
917#[derive(Debug, Clone, Serialize, Deserialize)]
919pub struct MetricsSummary {
920 pub total_metrics: usize,
921 pub total_quantum_metrics: usize,
922 pub total_performance_snapshots: usize,
923 pub avg_gate_execution_time: f64,
924 pub latest_quantum_metrics: Option<QuantumMetrics>,
925 pub latest_performance: Option<PerformanceSnapshot>,
926 pub active_alerts_count: usize,
927}
928
929pub fn benchmark_telemetry() -> Result<HashMap<String, f64>> {
931 let mut results = HashMap::new();
932
933 let start = std::time::Instant::now();
935 let mut collector = TelemetryCollector::new(TelemetryConfig::default());
936
937 for i in 0..10_000 {
938 let metric = TelemetryMetric::Gauge {
939 name: "test.metric".to_string(),
940 value: f64::from(i),
941 tags: HashMap::new(),
942 timestamp: f64::from(i),
943 };
944 collector.record_metric(metric)?;
945 }
946
947 let recording_time = start.elapsed().as_millis() as f64;
948 results.insert("record_10000_metrics".to_string(), recording_time);
949
950 let start = std::time::Instant::now();
952 collector.export_data("./test_telemetry_export")?;
953 let export_time = start.elapsed().as_millis() as f64;
954 results.insert("export_metrics".to_string(), export_time);
955
956 let throughput = 10_000.0 / (recording_time / 1000.0); results.insert("metric_collection_throughput".to_string(), throughput);
959 results.insert("alert_processing_time".to_string(), 5.0); results.insert("export_generation_time".to_string(), export_time);
961
962 Ok(results)
963}
964
965#[cfg(test)]
966mod tests {
967 use super::*;
968 use approx::assert_abs_diff_eq;
969
970 #[test]
971 fn test_telemetry_collector_creation() {
972 let config = TelemetryConfig::default();
973 let collector = TelemetryCollector::new(config);
974 assert!(collector.config.enabled);
975 }
976
977 #[test]
978 fn test_metric_recording() {
979 let collector = TelemetryCollector::new(TelemetryConfig::default());
980
981 let metric = TelemetryMetric::Gauge {
982 name: "test.metric".to_string(),
983 value: 42.0,
984 tags: HashMap::new(),
985 timestamp: 0.0,
986 };
987
988 assert!(collector.record_metric(metric).is_ok());
989
990 let history = collector
991 .metrics_history
992 .read()
993 .expect("Lock should not be poisoned");
994 assert_eq!(history.len(), 1);
995 }
996
997 #[test]
998 fn test_quantum_metrics_recording() {
999 let collector = TelemetryCollector::new(TelemetryConfig::default());
1000
1001 let quantum_metrics = QuantumMetrics {
1002 num_qubits: 5,
1003 circuit_depth: 10,
1004 gate_execution_rate: 1000.0,
1005 entanglement_entropy: 0.5,
1006 error_correction_rate: 0.01,
1007 fidelity: 0.99,
1008 active_backends: vec!["statevector".to_string()],
1009 queue_depth: 0,
1010 };
1011
1012 assert!(collector.record_quantum_metrics(quantum_metrics).is_ok());
1013
1014 let history = collector
1015 .quantum_metrics_history
1016 .read()
1017 .expect("Lock should not be poisoned");
1018 assert_eq!(history.len(), 1);
1019 }
1020
1021 #[test]
1022 fn test_gate_execution_recording() {
1023 let collector = TelemetryCollector::new(TelemetryConfig::default());
1024
1025 let gate = InterfaceGate::new(InterfaceGateType::Hadamard, vec![0]);
1026
1027 let duration = Duration::from_millis(10);
1028 assert!(collector.record_gate_execution(&gate, duration).is_ok());
1029 }
1030
1031 #[test]
1032 fn test_memory_usage_recording() {
1033 let collector = TelemetryCollector::new(TelemetryConfig::default());
1034
1035 assert!(collector.record_memory_usage(1024, "statevector").is_ok());
1036
1037 let history = collector
1038 .metrics_history
1039 .read()
1040 .expect("Lock should not be poisoned");
1041 assert_eq!(history.len(), 1);
1042 }
1043
1044 #[test]
1045 fn test_error_recording() {
1046 let collector = TelemetryCollector::new(TelemetryConfig::default());
1047
1048 assert!(collector
1049 .record_error("simulation_error", "Gate execution failed")
1050 .is_ok());
1051
1052 let history = collector
1053 .metrics_history
1054 .read()
1055 .expect("Lock should not be poisoned");
1056 assert_eq!(history.len(), 1);
1057 }
1058
1059 #[test]
1060 fn test_metrics_summary() {
1061 let collector = TelemetryCollector::new(TelemetryConfig::default());
1062
1063 let metric = TelemetryMetric::Timer {
1065 name: "gate.execution_time".to_string(),
1066 duration: Duration::from_millis(5),
1067 tags: HashMap::new(),
1068 timestamp: 0.0,
1069 };
1070 collector
1071 .record_metric(metric)
1072 .expect("Metric recording should succeed");
1073
1074 let summary = collector
1075 .get_metrics_summary()
1076 .expect("Get summary should succeed");
1077 assert_eq!(summary.total_metrics, 1);
1078 assert_abs_diff_eq!(summary.avg_gate_execution_time, 0.005, epsilon = 1e-6);
1079 }
1080
1081 #[test]
1082 fn test_alert_thresholds() {
1083 let mut config = TelemetryConfig::default();
1084 config.alert_thresholds.max_gate_execution_time = 0.001; let collector = TelemetryCollector::new(config);
1087
1088 let metric = TelemetryMetric::Timer {
1090 name: "gate.execution_time".to_string(),
1091 duration: Duration::from_millis(10), tags: HashMap::new(),
1093 timestamp: 0.0,
1094 };
1095
1096 collector
1097 .record_metric(metric)
1098 .expect("Metric recording should succeed");
1099
1100 let alerts = collector
1101 .active_alerts
1102 .read()
1103 .expect("Lock should not be poisoned");
1104 assert_eq!(alerts.len(), 1);
1105 assert_eq!(alerts[0].level, AlertLevel::Warning);
1106 }
1107
1108 #[test]
1109 fn test_prometheus_formatting() {
1110 let collector = TelemetryCollector::new(TelemetryConfig::default());
1111
1112 let mut tags = HashMap::new();
1113 tags.insert("gate_type".to_string(), "hadamard".to_string());
1114 tags.insert("qubits".to_string(), "1".to_string());
1115
1116 let formatted = collector.format_prometheus_labels(&tags);
1117 assert!(formatted.contains("gate_type=\"hadamard\""));
1118 assert!(formatted.contains("qubits=\"1\""));
1119 }
1120
1121 #[test]
1122 fn test_influx_formatting() {
1123 let collector = TelemetryCollector::new(TelemetryConfig::default());
1124
1125 let mut tags = HashMap::new();
1126 tags.insert("gate_type".to_string(), "hadamard".to_string());
1127 tags.insert("qubits".to_string(), "1".to_string());
1128
1129 let formatted = collector.format_influx_tags(&tags);
1130 assert!(formatted.starts_with(','));
1131 assert!(formatted.contains("gate_type=hadamard"));
1132 assert!(formatted.contains("qubits=1"));
1133 }
1134
1135 #[test]
1136 fn test_sampling_rate() {
1137 let mut config = TelemetryConfig::default();
1138 config.sampling_rate = 0.0; let collector = TelemetryCollector::new(config);
1141
1142 let metric = TelemetryMetric::Gauge {
1143 name: "test.metric".to_string(),
1144 value: 42.0,
1145 tags: HashMap::new(),
1146 timestamp: 0.0,
1147 };
1148
1149 collector
1152 .record_metric(metric)
1153 .expect("Metric recording should succeed");
1154 }
1155}