#![allow(dead_code)]
use crate::profiling::get_global_profiler;
use crate::{TorshDistributedError, TorshResult};
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, VecDeque};
use std::sync::{Arc, Mutex, RwLock};
use std::time::{Duration, Instant, SystemTime};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SystemMetrics {
pub cpu_usage_pct: f64,
pub memory_usage_bytes: u64,
pub memory_available_bytes: u64,
pub memory_usage_pct: f64,
pub gpu_memory_usage_bytes: Option<u64>,
pub gpu_memory_total_bytes: Option<u64>,
pub gpu_usage_pct: Option<f64>,
pub network_bytes_rx: u64,
pub network_bytes_tx: u64,
pub disk_bytes_read: u64,
pub disk_bytes_write: u64,
pub timestamp: SystemTime,
#[cfg(feature = "scirs2-profiling")]
pub scirs2_profile: Option<HashMap<String, f64>>,
#[cfg(feature = "scirs2-profiling")]
pub memory_profile: Option<HashMap<String, u64>>,
}
impl Default for SystemMetrics {
fn default() -> Self {
Self {
cpu_usage_pct: 0.0,
memory_usage_bytes: 0,
memory_available_bytes: 0,
memory_usage_pct: 0.0,
gpu_memory_usage_bytes: None,
gpu_memory_total_bytes: None,
gpu_usage_pct: None,
network_bytes_rx: 0,
network_bytes_tx: 0,
disk_bytes_read: 0,
disk_bytes_write: 0,
timestamp: SystemTime::now(),
#[cfg(feature = "scirs2-profiling")]
scirs2_profile: None,
#[cfg(feature = "scirs2-profiling")]
memory_profile: None,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CommunicationMetrics {
pub total_operations: u64,
pub total_bytes: u64,
pub avg_latency_ms: f64,
pub avg_bandwidth_mbps: f64,
pub efficiency_ratio: f64,
pub communication_compute_ratio: f64,
pub failed_operations: u64,
pub ops_per_second: f64,
pub timestamp: SystemTime,
}
impl Default for CommunicationMetrics {
fn default() -> Self {
Self {
total_operations: 0,
total_bytes: 0,
avg_latency_ms: 0.0,
avg_bandwidth_mbps: 0.0,
efficiency_ratio: 1.0,
communication_compute_ratio: 0.0,
failed_operations: 0,
ops_per_second: 0.0,
timestamp: SystemTime::now(),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TrainingMetrics {
pub current_epoch: u32,
pub current_step: u64,
pub training_loss: Option<f64>,
pub validation_loss: Option<f64>,
pub learning_rate: Option<f64>,
pub gradient_norm: Option<f64>,
pub samples_per_second: f64,
pub model_parameters: u64,
pub time_per_step_ms: f64,
pub eta_seconds: Option<u64>,
pub timestamp: SystemTime,
}
impl Default for TrainingMetrics {
fn default() -> Self {
Self {
current_epoch: 0,
current_step: 0,
training_loss: None,
validation_loss: None,
learning_rate: None,
gradient_norm: None,
samples_per_second: 0.0,
model_parameters: 0,
time_per_step_ms: 0.0,
eta_seconds: None,
timestamp: SystemTime::now(),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerformanceMetrics {
pub system: SystemMetrics,
pub communication: CommunicationMetrics,
pub training: TrainingMetrics,
pub timestamp: SystemTime,
}
impl Default for PerformanceMetrics {
fn default() -> Self {
Self {
system: SystemMetrics::default(),
communication: CommunicationMetrics::default(),
training: TrainingMetrics::default(),
timestamp: SystemTime::now(),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MetricsConfig {
pub enabled: bool,
pub collection_interval_secs: u64,
pub max_snapshots: usize,
pub collect_system_metrics: bool,
pub collect_communication_metrics: bool,
pub collect_training_metrics: bool,
pub enable_export: bool,
pub export_interval_secs: u64,
}
impl Default for MetricsConfig {
fn default() -> Self {
Self {
enabled: true,
collection_interval_secs: 5,
max_snapshots: 1000,
collect_system_metrics: true,
collect_communication_metrics: true,
collect_training_metrics: true,
enable_export: false,
export_interval_secs: 60,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TimeSeriesPoint<T> {
pub timestamp: SystemTime,
pub value: T,
}
impl<T> TimeSeriesPoint<T> {
pub fn new(value: T) -> Self {
Self {
timestamp: SystemTime::now(),
value,
}
}
}
#[derive(Debug)]
pub struct TimeSeries<T> {
points: VecDeque<TimeSeriesPoint<T>>,
max_size: usize,
}
impl<T> TimeSeries<T> {
pub fn new(max_size: usize) -> Self {
Self {
points: VecDeque::new(),
max_size,
}
}
pub fn add_point(&mut self, value: T) {
self.points.push_back(TimeSeriesPoint::new(value));
if self.points.len() > self.max_size {
self.points.pop_front();
}
}
pub fn get_points(&self) -> &VecDeque<TimeSeriesPoint<T>> {
&self.points
}
pub fn latest(&self) -> Option<&TimeSeriesPoint<T>> {
self.points.back()
}
pub fn len(&self) -> usize {
self.points.len()
}
pub fn is_empty(&self) -> bool {
self.points.is_empty()
}
}
impl<T: Clone> TimeSeries<T> {
pub fn get_range(&self, start: SystemTime, end: SystemTime) -> Vec<TimeSeriesPoint<T>> {
self.points
.iter()
.filter(|point| point.timestamp >= start && point.timestamp <= end)
.cloned()
.collect()
}
pub fn get_last_n(&self, n: usize) -> Vec<TimeSeriesPoint<T>> {
self.points
.iter()
.rev()
.take(n)
.cloned()
.collect::<Vec<_>>()
.into_iter()
.rev()
.collect()
}
}
pub struct MetricsCollector {
config: RwLock<MetricsConfig>,
metrics_history: Mutex<TimeSeries<PerformanceMetrics>>,
system_history: Mutex<TimeSeries<SystemMetrics>>,
communication_history: Mutex<TimeSeries<CommunicationMetrics>>,
training_history: Mutex<TimeSeries<TrainingMetrics>>,
last_collection: Mutex<Option<Instant>>,
collection_thread: Mutex<Option<std::thread::JoinHandle<()>>>,
shutdown: Arc<Mutex<bool>>,
custom_metrics: RwLock<HashMap<String, f64>>,
}
impl MetricsCollector {
pub fn new() -> Self {
Self::with_config(MetricsConfig::default())
}
pub fn with_config(config: MetricsConfig) -> Self {
let max_snapshots = config.max_snapshots;
Self {
config: RwLock::new(config),
metrics_history: Mutex::new(TimeSeries::new(max_snapshots)),
system_history: Mutex::new(TimeSeries::new(max_snapshots)),
communication_history: Mutex::new(TimeSeries::new(max_snapshots)),
training_history: Mutex::new(TimeSeries::new(max_snapshots)),
last_collection: Mutex::new(None),
collection_thread: Mutex::new(None),
shutdown: Arc::new(Mutex::new(false)),
custom_metrics: RwLock::new(HashMap::new()),
}
}
pub fn start_collection(&self) -> TorshResult<()> {
let config = self
.config
.read()
.map_err(|_| TorshDistributedError::backend_error("system", "Lock poisoned"))?;
if !config.enabled {
return Ok(());
}
let interval = Duration::from_secs(config.collection_interval_secs);
let collect_system = config.collect_system_metrics;
let collect_communication = config.collect_communication_metrics;
let collect_training = config.collect_training_metrics;
drop(config);
let shutdown_flag = Arc::clone(&self.shutdown);
let metrics_history = Arc::new(Mutex::new(TimeSeries::new(1000)));
let system_history = Arc::new(Mutex::new(TimeSeries::new(1000)));
let communication_history = Arc::new(Mutex::new(TimeSeries::new(1000)));
let training_history = Arc::new(Mutex::new(TimeSeries::new(1000)));
let handle = std::thread::spawn(move || {
loop {
{
let shutdown = shutdown_flag.lock().expect("lock should not be poisoned");
if *shutdown {
break;
}
}
let mut performance_metrics = PerformanceMetrics::default();
if collect_system {
let system_metrics = Self::collect_system_metrics();
performance_metrics.system = system_metrics.clone();
if let Ok(mut history) = system_history.lock() {
history.add_point(system_metrics);
}
}
if collect_communication {
let communication_metrics = Self::collect_communication_metrics();
performance_metrics.communication = communication_metrics.clone();
if let Ok(mut history) = communication_history.lock() {
history.add_point(communication_metrics);
}
}
if collect_training {
let training_metrics = Self::collect_training_metrics();
performance_metrics.training = training_metrics.clone();
if let Ok(mut history) = training_history.lock() {
history.add_point(training_metrics);
}
}
if let Ok(mut history) = metrics_history.lock() {
history.add_point(performance_metrics);
}
std::thread::sleep(interval);
}
});
let mut thread_handle = self
.collection_thread
.lock()
.map_err(|_| TorshDistributedError::backend_error("system", "Lock poisoned"))?;
*thread_handle = Some(handle);
Ok(())
}
pub fn stop_collection(&self) -> TorshResult<()> {
{
let mut shutdown = self
.shutdown
.lock()
.map_err(|_| TorshDistributedError::backend_error("system", "Lock poisoned"))?;
*shutdown = true;
}
let mut thread_handle = self
.collection_thread
.lock()
.map_err(|_| TorshDistributedError::backend_error("system", "Lock poisoned"))?;
if let Some(handle) = thread_handle.take() {
handle.join().map_err(|_| {
TorshDistributedError::backend_error("system", "Failed to join collection thread")
})?;
}
{
let mut shutdown = self
.shutdown
.lock()
.map_err(|_| TorshDistributedError::backend_error("system", "Lock poisoned"))?;
*shutdown = false;
}
Ok(())
}
fn collect_system_metrics() -> SystemMetrics {
SystemMetrics {
cpu_usage_pct: 50.0, memory_usage_bytes: 1024 * 1024 * 1024, memory_available_bytes: 8 * 1024 * 1024 * 1024, memory_usage_pct: 12.5, gpu_memory_usage_bytes: Some(512 * 1024 * 1024), gpu_memory_total_bytes: Some(4 * 1024 * 1024 * 1024), gpu_usage_pct: Some(75.0), network_bytes_rx: 1024 * 1024, network_bytes_tx: 2 * 1024 * 1024, disk_bytes_read: 10 * 1024 * 1024, disk_bytes_write: 5 * 1024 * 1024, timestamp: SystemTime::now(),
#[cfg(feature = "scirs2-profiling")]
scirs2_profile: None,
#[cfg(feature = "scirs2-profiling")]
memory_profile: None,
}
}
fn collect_communication_metrics() -> CommunicationMetrics {
let profiler = get_global_profiler();
if let Ok(all_stats) = profiler.get_all_operation_stats() {
let mut total_ops = 0u64;
let mut total_bytes = 0u64;
let mut total_duration = Duration::ZERO;
let mut total_bandwidth = 0.0;
for stats in all_stats.values() {
total_ops += stats.count;
total_bytes += stats.total_bytes;
total_duration += stats.total_duration;
total_bandwidth += stats.avg_bandwidth_bps;
}
let avg_latency_ms = if total_ops > 0 {
total_duration.as_secs_f64() * 1000.0 / total_ops as f64
} else {
0.0
};
let avg_bandwidth_mbps = if !all_stats.is_empty() {
total_bandwidth / (all_stats.len() as f64 * 1024.0 * 1024.0)
} else {
0.0
};
CommunicationMetrics {
total_operations: total_ops,
total_bytes,
avg_latency_ms,
avg_bandwidth_mbps,
efficiency_ratio: 0.85, communication_compute_ratio: 0.2, failed_operations: get_global_profiler().get_failed_operations_count(), ops_per_second: total_ops as f64 / total_duration.as_secs_f64().max(1.0),
timestamp: SystemTime::now(),
}
} else {
CommunicationMetrics::default()
}
}
fn collect_training_metrics() -> TrainingMetrics {
TrainingMetrics::default()
}
pub fn update_training_metrics(&self, metrics: TrainingMetrics) -> TorshResult<()> {
let mut history = self
.training_history
.lock()
.map_err(|_| TorshDistributedError::backend_error("system", "Lock poisoned"))?;
history.add_point(metrics);
Ok(())
}
pub fn add_custom_metric(&self, name: String, value: f64) -> TorshResult<()> {
let mut custom_metrics = self
.custom_metrics
.write()
.map_err(|_| TorshDistributedError::backend_error("metrics", "Lock poisoned"))?;
custom_metrics.insert(name, value);
Ok(())
}
pub fn get_custom_metric(&self, name: &str) -> TorshResult<Option<f64>> {
let custom_metrics = self
.custom_metrics
.read()
.map_err(|_| TorshDistributedError::backend_error("system", "Lock poisoned"))?;
Ok(custom_metrics.get(name).copied())
}
pub fn get_latest_metrics(&self) -> TorshResult<Option<PerformanceMetrics>> {
let history = self
.metrics_history
.lock()
.map_err(|_| TorshDistributedError::backend_error("system", "Lock poisoned"))?;
Ok(history.latest().map(|point| point.value.clone()))
}
pub fn get_metrics_history(&self) -> TorshResult<Vec<TimeSeriesPoint<PerformanceMetrics>>> {
let history = self
.metrics_history
.lock()
.map_err(|_| TorshDistributedError::backend_error("system", "Lock poisoned"))?;
Ok(history.get_points().iter().cloned().collect())
}
pub fn get_system_history(&self) -> TorshResult<Vec<TimeSeriesPoint<SystemMetrics>>> {
let history = self
.system_history
.lock()
.map_err(|_| TorshDistributedError::backend_error("system", "Lock poisoned"))?;
Ok(history.get_points().iter().cloned().collect())
}
pub fn get_communication_history(
&self,
) -> TorshResult<Vec<TimeSeriesPoint<CommunicationMetrics>>> {
let history = self
.communication_history
.lock()
.map_err(|_| TorshDistributedError::backend_error("system", "Lock poisoned"))?;
Ok(history.get_points().iter().cloned().collect())
}
pub fn get_training_history(&self) -> TorshResult<Vec<TimeSeriesPoint<TrainingMetrics>>> {
let history = self
.training_history
.lock()
.map_err(|_| TorshDistributedError::backend_error("system", "Lock poisoned"))?;
Ok(history.get_points().iter().cloned().collect())
}
pub fn export_metrics_json(&self) -> TorshResult<String> {
#[derive(Serialize)]
struct MetricsExport {
config: MetricsConfig,
latest_metrics: Option<PerformanceMetrics>,
system_history: Vec<TimeSeriesPoint<SystemMetrics>>,
communication_history: Vec<TimeSeriesPoint<CommunicationMetrics>>,
training_history: Vec<TimeSeriesPoint<TrainingMetrics>>,
custom_metrics: HashMap<String, f64>,
}
let config = self
.config
.read()
.map_err(|_| TorshDistributedError::backend_error("system", "Lock poisoned"))?
.clone();
let latest_metrics = self.get_latest_metrics()?;
let system_history = self.get_system_history()?;
let communication_history = self.get_communication_history()?;
let training_history = self.get_training_history()?;
let custom_metrics = self
.custom_metrics
.read()
.map_err(|_| TorshDistributedError::backend_error("system", "Lock poisoned"))?
.clone();
let export = MetricsExport {
config,
latest_metrics,
system_history,
communication_history,
training_history,
custom_metrics,
};
serde_json::to_string_pretty(&export).map_err(|e| {
TorshDistributedError::backend_error(
"metrics",
format!("JSON serialization failed: {}", e),
)
})
}
pub fn generate_summary(&self) -> TorshResult<String> {
let mut report = String::new();
report.push_str("=== Performance Metrics Summary ===\n\n");
if let Some(latest) = self.get_latest_metrics()? {
report.push_str("=== Latest System Metrics ===\n");
report.push_str(&format!("CPU Usage: {:.1}%\n", latest.system.cpu_usage_pct));
report.push_str(&format!(
"Memory Usage: {:.1}% ({:.1} GB / {:.1} GB)\n",
latest.system.memory_usage_pct,
latest.system.memory_usage_bytes as f64 / (1024.0 * 1024.0 * 1024.0),
latest.system.memory_available_bytes as f64 / (1024.0 * 1024.0 * 1024.0)
));
if let Some(gpu_usage) = latest.system.gpu_usage_pct {
report.push_str(&format!("GPU Usage: {:.1}%\n", gpu_usage));
}
report.push_str("\n=== Latest Communication Metrics ===\n");
report.push_str(&format!(
"Total Operations: {}\n",
latest.communication.total_operations
));
report.push_str(&format!(
"Total Data: {:.1} MB\n",
latest.communication.total_bytes as f64 / (1024.0 * 1024.0)
));
report.push_str(&format!(
"Average Latency: {:.2} ms\n",
latest.communication.avg_latency_ms
));
report.push_str(&format!(
"Average Bandwidth: {:.2} MB/s\n",
latest.communication.avg_bandwidth_mbps
));
report.push_str(&format!(
"Operations/sec: {:.1}\n",
latest.communication.ops_per_second
));
report.push_str("\n=== Latest Training Metrics ===\n");
report.push_str(&format!(
"Current Epoch: {}\n",
latest.training.current_epoch
));
report.push_str(&format!("Current Step: {}\n", latest.training.current_step));
if let Some(loss) = latest.training.training_loss {
report.push_str(&format!("Training Loss: {:.6}\n", loss));
}
if let Some(lr) = latest.training.learning_rate {
report.push_str(&format!("Learning Rate: {:.2e}\n", lr));
}
report.push_str(&format!(
"Samples/sec: {:.1}\n",
latest.training.samples_per_second
));
}
let custom_metrics = self
.custom_metrics
.read()
.map_err(|_| TorshDistributedError::backend_error("system", "Lock poisoned"))?;
if !custom_metrics.is_empty() {
report.push_str("\n=== Custom Metrics ===\n");
for (name, value) in custom_metrics.iter() {
report.push_str(&format!("{}: {:.6}\n", name, value));
}
}
Ok(report)
}
pub fn clear(&self) -> TorshResult<()> {
{
let mut history = self
.metrics_history
.lock()
.map_err(|_| TorshDistributedError::backend_error("system", "Lock poisoned"))?;
*history = TimeSeries::new(history.max_size);
}
{
let mut history = self
.system_history
.lock()
.map_err(|_| TorshDistributedError::backend_error("system", "Lock poisoned"))?;
*history = TimeSeries::new(history.max_size);
}
{
let mut history = self
.communication_history
.lock()
.map_err(|_| TorshDistributedError::backend_error("system", "Lock poisoned"))?;
*history = TimeSeries::new(history.max_size);
}
{
let mut history = self
.training_history
.lock()
.map_err(|_| TorshDistributedError::backend_error("system", "Lock poisoned"))?;
*history = TimeSeries::new(history.max_size);
}
{
let mut custom_metrics = self
.custom_metrics
.write()
.map_err(|_| TorshDistributedError::backend_error("metrics", "Lock poisoned"))?;
custom_metrics.clear();
}
Ok(())
}
#[cfg(feature = "scirs2-profiling")]
pub fn collect_scirs2_system_metrics(&self) -> TorshResult<SystemMetrics> {
let mut metrics = MetricsCollector::collect_system_metrics();
let mut scirs2_profile = HashMap::new();
scirs2_profile.insert(
"memory_bandwidth_gbps".to_string(),
self.estimate_memory_bandwidth(),
);
scirs2_profile.insert(
"memory_latency_ns".to_string(),
self.estimate_memory_latency(),
);
metrics.scirs2_profile = Some(scirs2_profile);
Ok(metrics)
}
#[cfg(feature = "scirs2-profiling")]
pub fn run_performance_benchmarks(&self) -> TorshResult<HashMap<String, f64>> {
let results = HashMap::new();
Ok(results)
}
#[cfg(feature = "scirs2-profiling")]
pub fn collect_enhanced_metrics(&self) -> TorshResult<PerformanceMetrics> {
let system_metrics = self.collect_scirs2_system_metrics()?;
let comm_metrics = MetricsCollector::collect_communication_metrics();
let training_metrics = MetricsCollector::collect_training_metrics();
Ok(PerformanceMetrics {
system: system_metrics,
communication: comm_metrics,
training: training_metrics,
timestamp: SystemTime::now(),
})
}
#[cfg(feature = "scirs2-profiling")]
fn benchmark_memory_throughput_sequential(&self) -> Duration {
let start = Instant::now();
let mut data = vec![0u8; 1024 * 1024]; for (i, item) in data.iter_mut().enumerate() {
*item = (i % 256) as u8;
}
let _ = data.iter().sum::<u8>();
start.elapsed()
}
#[cfg(feature = "scirs2-profiling")]
fn benchmark_memory_throughput_random(&self) -> Duration {
let start = Instant::now();
let mut data = vec![0u8; 1024 * 1024]; for i in (0..data.len()).step_by(4096) {
data[i] = ((i * 7) % 256) as u8;
}
let _ = data.iter().sum::<u8>();
start.elapsed()
}
#[cfg(feature = "scirs2-profiling")]
fn benchmark_network_latency(&self) -> Duration {
let start = Instant::now();
std::thread::sleep(Duration::from_micros(100)); start.elapsed()
}
#[cfg(feature = "scirs2-profiling")]
fn benchmark_tensor_operations(&self) -> Duration {
let start = Instant::now();
let a = vec![1.0f32; 1000];
let b = vec![2.0f32; 1000];
let _c: Vec<f32> = a.iter().zip(b.iter()).map(|(x, y)| x * y).collect();
start.elapsed()
}
#[cfg(feature = "scirs2-profiling")]
fn estimate_memory_bandwidth(&self) -> f64 {
4.0 }
#[cfg(feature = "scirs2-profiling")]
fn estimate_memory_latency(&self) -> f64 {
50.0 }
}
impl Default for MetricsCollector {
fn default() -> Self {
Self::new()
}
}
impl Drop for MetricsCollector {
fn drop(&mut self) {
let _ = self.stop_collection();
}
}
static GLOBAL_METRICS_COLLECTOR: std::sync::OnceLock<Arc<MetricsCollector>> =
std::sync::OnceLock::new();
pub fn get_global_metrics_collector() -> &'static Arc<MetricsCollector> {
GLOBAL_METRICS_COLLECTOR.get_or_init(|| Arc::new(MetricsCollector::new()))
}
pub fn init_global_metrics_collector(config: MetricsConfig) -> TorshResult<()> {
let collector = Arc::new(MetricsCollector::with_config(config));
GLOBAL_METRICS_COLLECTOR.set(collector).map_err(|_| {
TorshDistributedError::backend_error(
"metrics",
"Global metrics collector already initialized",
)
})?;
Ok(())
}
pub fn start_global_metrics_collection() -> TorshResult<()> {
get_global_metrics_collector().start_collection()
}
pub fn stop_global_metrics_collection() -> TorshResult<()> {
get_global_metrics_collector().stop_collection()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_metrics_collector_creation() {
let collector = MetricsCollector::new();
let latest = collector.get_latest_metrics().unwrap();
assert!(latest.is_none());
}
#[test]
fn test_time_series() {
let mut ts = TimeSeries::new(3);
ts.add_point(1.0);
ts.add_point(2.0);
ts.add_point(3.0);
ts.add_point(4.0);
assert_eq!(ts.len(), 3);
assert_eq!(ts.latest().unwrap().value, 4.0);
}
#[test]
fn test_custom_metrics() {
let collector = MetricsCollector::new();
collector
.add_custom_metric("test_metric".to_string(), 42.0)
.unwrap();
let value = collector.get_custom_metric("test_metric").unwrap();
assert_eq!(value, Some(42.0));
}
#[test]
fn test_training_metrics_update() {
let collector = MetricsCollector::new();
let training_metrics = TrainingMetrics {
current_epoch: 5,
training_loss: Some(0.123),
..Default::default()
};
collector
.update_training_metrics(training_metrics.clone())
.unwrap();
let history = collector.get_training_history().unwrap();
assert_eq!(history.len(), 1);
assert_eq!(history[0].value.current_epoch, 5);
assert_eq!(history[0].value.training_loss, Some(0.123));
}
#[test]
fn test_metrics_export() {
let collector = MetricsCollector::new();
collector
.add_custom_metric("export_test".to_string(), std::f64::consts::PI)
.unwrap();
let json = collector.export_metrics_json().unwrap();
assert!(json.contains("export_test"));
assert!(json.contains("3.14"));
}
#[test]
fn test_global_metrics_collector() {
let collector = get_global_metrics_collector();
collector
.add_custom_metric("global_test".to_string(), 7.5)
.unwrap();
let value = collector.get_custom_metric("global_test").unwrap();
assert_eq!(value, Some(7.5));
}
}