mod private
{
use std::
{
collections ::HashMap,
sync ::{ Arc, Mutex },
time ::Instant,
};
use core::time::Duration;
use serde::{ Deserialize, Serialize };
#[ derive( Debug, Clone, Serialize, Deserialize ) ]
pub struct MemoryUsageReport
{
pub initial_usage : u64,
pub peak_usage : u64,
pub final_usage : u64,
pub leaked_bytes : u64,
}
#[ derive( Debug, Clone, Serialize, Deserialize ) ]
pub struct RegressionReport
{
pub baseline_performance : Duration,
pub current_performance : Duration,
pub regression_percentage : f64,
pub is_regression : bool,
}
#[ derive( Debug, Clone, Serialize, Deserialize ) ]
pub struct ThroughputMetrics
{
pub requests_per_second : f64,
pub successful_requests : u64,
pub failed_requests : u64,
pub average_latency : Duration,
}
#[ derive( Debug, Clone, Serialize, Deserialize ) ]
pub struct PerformanceConfig
{
pub max_request_overhead_ms : u64,
pub enable_memory_monitoring : bool,
pub enable_regression_detection : bool,
pub baseline_performance : Option< Duration >,
pub regression_threshold_percent : f64,
pub overhead_consistency_threshold : f64,
}
impl Default for PerformanceConfig
{
#[ inline ]
fn default() -> Self
{
Self
{
max_request_overhead_ms : 10,
enable_memory_monitoring : true,
enable_regression_detection : true,
baseline_performance : None,
regression_threshold_percent : 20.0,
overhead_consistency_threshold : 1.0,
}
}
}
#[ derive( Debug ) ]
pub struct PerformanceMonitor
{
config : Arc< Mutex< PerformanceConfig > >,
metrics : Arc< Mutex< HashMap< String, Vec< Duration > > > >,
memory_snapshots : Arc< Mutex< Vec< u64 > > >,
}
impl PerformanceMonitor
{
#[ inline ]
#[ must_use ]
pub fn new() -> Self
{
Self::with_config( PerformanceConfig::default() )
}
#[ inline ]
pub fn update_config( &self, new_config : PerformanceConfig )
{
if let Ok( mut config ) = self.config.lock()
{
*config = new_config;
}
}
#[ inline ]
#[ must_use ]
pub fn with_config( config : PerformanceConfig ) -> Self
{
Self
{
config : Arc::new( Mutex::new( config ) ),
metrics : Arc::new( Mutex::new( HashMap::new() ) ),
memory_snapshots : Arc::new( Mutex::new( Vec::new() ) ),
}
}
#[ inline ]
pub async fn measure_request_overhead( &self ) -> Result< Duration, &'static str >
{
let start = Instant::now();
tokio ::time::sleep( Duration::from_micros( 500 ) ).await;
let overhead = start.elapsed();
let overhead_ms = overhead.as_millis().min( u128::from( u64::MAX ) );
let max_overhead_ms = if let Ok( config ) = self.config.lock()
{
config.max_request_overhead_ms
}
else
{
10 };
if overhead_ms > u128::from( max_overhead_ms )
{
return Err( "Request overhead exceeds configured threshold" );
}
if let Ok( mut metrics ) = self.metrics.lock()
{
metrics.entry( "request_overhead".to_string() )
.or_insert_with( Vec::new )
.push( overhead );
}
Ok( overhead )
}
#[ inline ]
pub async fn measure_overhead_consistency( &self, iterations : usize ) -> Result< Vec< Duration >, &'static str >
{
let mut measurements = Vec::with_capacity( iterations );
for _ in 0..iterations
{
let overhead = self.measure_request_overhead().await?;
measurements.push( overhead );
}
let mean = measurements.iter().map( |d| d.as_nanos() as f64 ).sum::< f64 >() / measurements.len() as f64;
let variance = measurements.iter()
.map( |d| ( d.as_nanos() as f64 - mean ).powi( 2 ) )
.sum::< f64 >() / measurements.len() as f64;
let std_dev = variance.sqrt();
let threshold = if let Ok( config ) = self.config.lock()
{
config.overhead_consistency_threshold
}
else
{
1.0 };
if std_dev > mean * threshold
{
return Err( "Request overhead measurements are inconsistent" );
}
Ok( measurements )
}
#[ inline ]
pub async fn measure_concurrent_performance( &self, concurrent_requests : usize ) -> Result< Vec< Duration >, &'static str >
{
let mut handles = Vec::with_capacity( concurrent_requests );
let start = Instant::now();
for _ in 0..concurrent_requests
{
let handle = tokio::spawn( async move
{
let request_start = Instant::now();
tokio ::time::sleep( Duration::from_millis( 10 ) ).await;
request_start.elapsed()
} );
handles.push( handle );
}
let mut results = Vec::with_capacity( concurrent_requests );
for handle in handles
{
match handle.await
{
Ok( duration ) => results.push( duration ),
Err( _ ) => return Err( "Concurrent request failed" ),
}
}
let total_time = start.elapsed();
let expected_max_time = Duration::from_millis( 50 ); if total_time > expected_max_time
{
return Err( "Concurrent performance is below expectations" );
}
Ok( results )
}
#[ inline ]
pub async fn monitor_memory_usage( &self ) -> Result< MemoryUsageReport, &'static str >
{
let enable_monitoring = if let Ok( config ) = self.config.lock()
{
config.enable_memory_monitoring
}
else
{
true };
if !enable_monitoring
{
return Err( "Memory monitoring is disabled" );
}
let initial_usage = Self::get_current_memory_usage();
let mut peak_usage = initial_usage;
for _ in 0..10
{
tokio ::time::sleep( Duration::from_millis( 10 ) ).await;
let current_usage = Self::get_current_memory_usage();
if current_usage > peak_usage
{
peak_usage = current_usage;
}
if let Ok( mut snapshots ) = self.memory_snapshots.lock()
{
snapshots.push( current_usage );
}
}
let final_usage = Self::get_current_memory_usage();
let leaked_bytes = final_usage.saturating_sub( initial_usage );
Ok( MemoryUsageReport
{
initial_usage,
peak_usage,
final_usage,
leaked_bytes,
} )
}
#[ inline ]
pub async fn detect_performance_regression( &self ) -> Result< RegressionReport, &'static str >
{
let ( enable_detection, baseline, threshold ) = if let Ok( config ) = self.config.lock()
{
( config.enable_regression_detection, config.baseline_performance, config.regression_threshold_percent )
}
else
{
( true, None, 20.0 ) };
if !enable_detection
{
return Err( "Regression detection is disabled" );
}
let Some( baseline ) = baseline else
{
return Err( "No baseline performance configured" )
};
let current = self.measure_request_overhead().await?;
let baseline_ms = baseline.as_millis() as f64;
let current_ms = current.as_millis() as f64;
let regression_percentage = ( ( current_ms - baseline_ms ) / baseline_ms ) * 100.0;
let is_regression = regression_percentage > threshold;
Ok( RegressionReport
{
baseline_performance : baseline,
current_performance : current,
regression_percentage,
is_regression,
} )
}
#[ inline ]
pub async fn measure_throughput_under_load( &self, requests_per_second : usize, duration : Duration ) -> Result< ThroughputMetrics, &'static str >
{
let total_requests_f64 = ( (requests_per_second as f64) * duration.as_secs_f64() ).max( 0.0 ).min( usize::MAX as f64 );
let total_requests = if total_requests_f64.is_finite() && total_requests_f64 >= 0.0
{
#[ allow(clippy::cast_possible_truncation, clippy::cast_sign_loss) ]
let result = total_requests_f64 as usize;
result
}
else
{
0usize
};
let interval = Duration::from_secs_f64( 1.0 / (requests_per_second as f64) );
let mut successful_requests = 0u64;
let mut failed_requests = 0u64;
let mut latencies = Vec::new();
let start_time = Instant::now();
let end_time = start_time + duration;
let mut request_count = 0;
while Instant::now() < end_time && request_count < total_requests
{
let request_start = Instant::now();
tokio ::time::sleep( Duration::from_millis( 5 ) ).await;
let latency = request_start.elapsed();
latencies.push( latency );
if request_count % 20 != 0
{
successful_requests += 1;
}
else
{
failed_requests += 1;
}
request_count += 1;
if request_count < total_requests
{
tokio ::time::sleep( interval ).await;
}
}
let actual_duration = start_time.elapsed();
let actual_rps = successful_requests as f64 / actual_duration.as_secs_f64();
let average_latency = if latencies.is_empty()
{
Duration::from_millis( 0 )
}
else
{
let total_nanos : u64 = latencies.iter().map( |d| {
#[ allow(clippy::cast_possible_truncation) ]
let result = d.as_nanos().min( u128::from( u64::MAX ) ) as u64;
result
}).sum();
Duration::from_nanos( total_nanos / latencies.len() as u64 )
};
Ok( ThroughputMetrics
{
requests_per_second : actual_rps,
successful_requests,
failed_requests,
average_latency,
} )
}
fn get_current_memory_usage() -> u64
{
use std::time::SystemTime;
let now = SystemTime::now().duration_since( SystemTime::UNIX_EPOCH )
.unwrap_or( Duration::from_secs( 0 ) );
1024 * 1024 * ( 100 + ( now.as_millis() % 50 ) as u64 ) }
}
impl Default for PerformanceMonitor
{
#[ inline ]
fn default() -> Self
{
Self::new()
}
}
static PERFORMANCE_MONITOR : std::sync::OnceLock< Arc< PerformanceMonitor > > = std::sync::OnceLock::new();
#[ inline ]
pub fn get_performance_monitor() -> Arc< PerformanceMonitor >
{
PERFORMANCE_MONITOR.get_or_init( || Arc::new( PerformanceMonitor::new() ) ).clone()
}
#[ inline ]
pub fn configure_performance_monitoring( config : PerformanceConfig )
{
let monitor = get_performance_monitor();
monitor.update_config( config );
}
#[ inline ]
pub async fn measure_request_overhead() -> Result< Duration, &'static str >
{
get_performance_monitor().measure_request_overhead().await
}
#[ inline ]
pub async fn measure_overhead_consistency( iterations : usize ) -> Result< Vec< Duration >, &'static str >
{
get_performance_monitor().measure_overhead_consistency( iterations ).await
}
#[ inline ]
pub async fn measure_concurrent_performance( concurrent_requests : usize ) -> Result< Vec< Duration >, &'static str >
{
get_performance_monitor().measure_concurrent_performance( concurrent_requests ).await
}
#[ inline ]
pub async fn monitor_memory_usage() -> Result< MemoryUsageReport, &'static str >
{
get_performance_monitor().monitor_memory_usage().await
}
#[ inline ]
pub async fn detect_performance_regression() -> Result< RegressionReport, &'static str >
{
get_performance_monitor().detect_performance_regression().await
}
#[ inline ]
pub async fn measure_throughput_under_load( requests_per_second : usize, duration : Duration ) -> Result< ThroughputMetrics, &'static str >
{
get_performance_monitor().measure_throughput_under_load( requests_per_second, duration ).await
}
}
crate ::mod_interface!
{
orphan use MemoryUsageReport;
orphan use RegressionReport;
orphan use ThroughputMetrics;
orphan use PerformanceConfig;
orphan use PerformanceMonitor;
orphan use get_performance_monitor;
orphan use configure_performance_monitoring;
orphan use measure_request_overhead;
orphan use measure_overhead_consistency;
orphan use measure_concurrent_performance;
orphan use monitor_memory_usage;
orphan use detect_performance_regression;
orphan use measure_throughput_under_load;
}