use anyhow::{anyhow, Result};
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, VecDeque};
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::sync::RwLock;
use tracing::info;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProfilerConfig {
pub enable_cpu_profiling: bool,
pub enable_memory_profiling: bool,
pub enable_latency_tracking: bool,
pub enable_throughput_tracking: bool,
pub sampling_interval: Duration,
pub history_size: usize,
pub enable_recommendations: bool,
pub percentiles: Vec<f64>,
pub warning_thresholds: WarningThresholds,
pub enable_flame_graph: bool,
pub max_span_depth: usize,
}
impl Default for ProfilerConfig {
fn default() -> Self {
Self {
enable_cpu_profiling: true,
enable_memory_profiling: true,
enable_latency_tracking: true,
enable_throughput_tracking: true,
sampling_interval: Duration::from_secs(1),
history_size: 3600, enable_recommendations: true,
percentiles: vec![50.0, 90.0, 95.0, 99.0, 99.9],
warning_thresholds: WarningThresholds::default(),
enable_flame_graph: false,
max_span_depth: 100,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WarningThresholds {
pub cpu_usage_percent: f64,
pub memory_usage_percent: f64,
pub p99_latency_us: u64,
pub min_throughput: f64,
pub buffer_usage_percent: f64,
}
impl Default for WarningThresholds {
fn default() -> Self {
Self {
cpu_usage_percent: 80.0,
memory_usage_percent: 85.0,
p99_latency_us: 10000, min_throughput: 1000.0,
buffer_usage_percent: 90.0,
}
}
}
#[derive(Debug, Clone)]
pub struct Span {
pub name: String,
pub start: Instant,
pub end: Option<Instant>,
pub parent_id: Option<u64>,
pub id: u64,
pub tags: HashMap<String, String>,
pub children: Vec<u64>,
}
impl Span {
pub fn new(name: &str, id: u64) -> Self {
Self {
name: name.to_string(),
start: Instant::now(),
end: None,
parent_id: None,
id,
tags: HashMap::new(),
children: Vec::new(),
}
}
pub fn finish(&mut self) {
self.end = Some(Instant::now());
}
pub fn duration(&self) -> Duration {
if let Some(end) = self.end {
end.duration_since(self.start)
} else {
self.start.elapsed()
}
}
pub fn tag(&mut self, key: &str, value: &str) {
self.tags.insert(key.to_string(), value.to_string());
}
}
pub struct LatencyHistogram {
buckets: Vec<(u64, AtomicU64)>, total: AtomicU64,
sum: AtomicU64,
max: AtomicU64,
min: AtomicU64,
}
impl LatencyHistogram {
pub fn new() -> Self {
let bucket_bounds = vec![
1,
10,
50,
100,
500,
1000,
5000,
10000,
50000,
100000,
500000,
1000000,
u64::MAX,
];
let buckets = bucket_bounds
.into_iter()
.map(|b| (b, AtomicU64::new(0)))
.collect();
Self {
buckets,
total: AtomicU64::new(0),
sum: AtomicU64::new(0),
max: AtomicU64::new(0),
min: AtomicU64::new(u64::MAX),
}
}
pub fn record(&self, latency_us: u64) {
self.total.fetch_add(1, Ordering::Relaxed);
self.sum.fetch_add(latency_us, Ordering::Relaxed);
let mut current_max = self.max.load(Ordering::Relaxed);
while latency_us > current_max {
match self.max.compare_exchange_weak(
current_max,
latency_us,
Ordering::SeqCst,
Ordering::Relaxed,
) {
Ok(_) => break,
Err(v) => current_max = v,
}
}
let mut current_min = self.min.load(Ordering::Relaxed);
while latency_us < current_min {
match self.min.compare_exchange_weak(
current_min,
latency_us,
Ordering::SeqCst,
Ordering::Relaxed,
) {
Ok(_) => break,
Err(v) => current_min = v,
}
}
for (bound, count) in &self.buckets {
if latency_us <= *bound {
count.fetch_add(1, Ordering::Relaxed);
break;
}
}
}
pub fn percentile(&self, p: f64) -> u64 {
let total = self.total.load(Ordering::Relaxed);
if total == 0 {
return 0;
}
let target = ((total as f64) * p / 100.0) as u64;
let mut cumulative = 0u64;
for (bound, count) in &self.buckets {
cumulative += count.load(Ordering::Relaxed);
if cumulative >= target {
return *bound;
}
}
self.max.load(Ordering::Relaxed)
}
pub fn mean(&self) -> f64 {
let total = self.total.load(Ordering::Relaxed);
if total == 0 {
return 0.0;
}
self.sum.load(Ordering::Relaxed) as f64 / total as f64
}
pub fn stats(&self) -> HistogramStats {
HistogramStats {
count: self.total.load(Ordering::Relaxed),
mean: self.mean(),
min: self.min.load(Ordering::Relaxed),
max: self.max.load(Ordering::Relaxed),
p50: self.percentile(50.0),
p90: self.percentile(90.0),
p95: self.percentile(95.0),
p99: self.percentile(99.0),
p999: self.percentile(99.9),
}
}
pub fn reset(&self) {
self.total.store(0, Ordering::Relaxed);
self.sum.store(0, Ordering::Relaxed);
self.max.store(0, Ordering::Relaxed);
self.min.store(u64::MAX, Ordering::Relaxed);
for (_, count) in &self.buckets {
count.store(0, Ordering::Relaxed);
}
}
}
impl Default for LatencyHistogram {
fn default() -> Self {
Self::new()
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HistogramStats {
pub count: u64,
pub mean: f64,
pub min: u64,
pub max: u64,
pub p50: u64,
pub p90: u64,
pub p95: u64,
pub p99: u64,
pub p999: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerformanceSample {
pub timestamp: DateTime<Utc>,
pub cpu_usage_percent: f64,
pub memory_usage_bytes: u64,
pub memory_usage_percent: f64,
pub events_per_second: f64,
pub bytes_per_second: u64,
pub active_operations: u64,
pub p99_latency_us: u64,
pub buffer_usage_percent: f64,
}
pub struct OperationTimer {
name: String,
start: Instant,
tags: HashMap<String, String>,
}
impl OperationTimer {
pub fn new(name: &str) -> Self {
Self {
name: name.to_string(),
start: Instant::now(),
tags: HashMap::new(),
}
}
pub fn tag(mut self, key: &str, value: &str) -> Self {
self.tags.insert(key.to_string(), value.to_string());
self
}
pub fn elapsed(&self) -> Duration {
self.start.elapsed()
}
}
pub struct PerformanceProfiler {
config: ProfilerConfig,
running: Arc<AtomicBool>,
latency_histogram: Arc<LatencyHistogram>,
spans: Arc<RwLock<HashMap<u64, Span>>>,
samples: Arc<RwLock<VecDeque<PerformanceSample>>>,
warnings: Arc<RwLock<Vec<PerformanceWarning>>>,
recommendations: Arc<RwLock<Vec<Recommendation>>>,
stats: Arc<RwLock<ProfilerStats>>,
next_span_id: AtomicU64,
start_time: Arc<RwLock<Option<Instant>>>,
events_counter: AtomicU64,
bytes_counter: AtomicU64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerformanceWarning {
pub warning_type: WarningType,
pub message: String,
pub severity: WarningSeverity,
pub timestamp: DateTime<Utc>,
pub current_value: f64,
pub threshold: f64,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum WarningType {
HighCpuUsage,
HighMemoryUsage,
HighLatency,
LowThroughput,
BufferOverflow,
MemoryLeak,
GarbageCollection,
ThreadContention,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum WarningSeverity {
Info,
Warning,
Critical,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Recommendation {
pub category: RecommendationCategory,
pub title: String,
pub description: String,
pub impact: RecommendationImpact,
pub effort: RecommendationEffort,
pub priority: u8,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum RecommendationCategory {
BatchSize,
BufferSize,
Parallelism,
MemoryManagement,
CpuOptimization,
NetworkOptimization,
QueryOptimization,
Configuration,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum RecommendationImpact {
Low,
Medium,
High,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum RecommendationEffort {
Low,
Medium,
High,
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ProfilerStats {
pub total_events: u64,
pub total_bytes: u64,
pub total_duration_secs: f64,
pub avg_throughput: f64,
pub peak_throughput: f64,
pub warnings_generated: u64,
pub spans_recorded: u64,
pub samples_collected: u64,
}
impl PerformanceProfiler {
pub fn builder() -> ProfilerBuilder {
ProfilerBuilder::new()
}
pub fn new(config: ProfilerConfig) -> Self {
Self {
config,
running: Arc::new(AtomicBool::new(false)),
latency_histogram: Arc::new(LatencyHistogram::new()),
spans: Arc::new(RwLock::new(HashMap::new())),
samples: Arc::new(RwLock::new(VecDeque::new())),
warnings: Arc::new(RwLock::new(Vec::new())),
recommendations: Arc::new(RwLock::new(Vec::new())),
stats: Arc::new(RwLock::new(ProfilerStats::default())),
next_span_id: AtomicU64::new(0),
start_time: Arc::new(RwLock::new(None)),
events_counter: AtomicU64::new(0),
bytes_counter: AtomicU64::new(0),
}
}
pub async fn start(&self) -> Result<()> {
if self.running.load(Ordering::Acquire) {
return Err(anyhow!("Profiler already running"));
}
self.running.store(true, Ordering::Release);
*self.start_time.write().await = Some(Instant::now());
info!("Performance profiler started");
Ok(())
}
pub async fn stop(&self) -> Result<()> {
self.running.store(false, Ordering::Release);
if let Some(start) = *self.start_time.read().await {
let duration = start.elapsed();
let mut stats = self.stats.write().await;
stats.total_duration_secs = duration.as_secs_f64();
stats.total_events = self.events_counter.load(Ordering::Relaxed);
stats.total_bytes = self.bytes_counter.load(Ordering::Relaxed);
if duration.as_secs_f64() > 0.0 {
stats.avg_throughput = stats.total_events as f64 / duration.as_secs_f64();
}
}
info!("Performance profiler stopped");
Ok(())
}
pub fn is_running(&self) -> bool {
self.running.load(Ordering::Acquire)
}
pub fn record_event(&self, bytes: u64) {
self.events_counter.fetch_add(1, Ordering::Relaxed);
self.bytes_counter.fetch_add(bytes, Ordering::Relaxed);
}
pub fn record_latency(&self, latency: Duration) {
self.latency_histogram.record(latency.as_micros() as u64);
}
pub async fn start_span(&self, name: &str) -> u64 {
let id = self.next_span_id.fetch_add(1, Ordering::SeqCst);
let span = Span::new(name, id);
let mut spans = self.spans.write().await;
spans.insert(id, span);
let mut stats = self.stats.write().await;
stats.spans_recorded += 1;
id
}
pub async fn end_span(&self, id: u64) -> Option<Duration> {
let mut spans = self.spans.write().await;
if let Some(span) = spans.get_mut(&id) {
span.finish();
let duration = span.duration();
if self.config.enable_latency_tracking {
self.record_latency(duration);
}
Some(duration)
} else {
None
}
}
pub fn time_operation(&self, name: &str) -> OperationTimer {
OperationTimer::new(name)
}
pub fn record_operation(&self, timer: OperationTimer) {
let duration = timer.elapsed();
self.record_latency(duration);
}
pub async fn collect_sample(&self) -> PerformanceSample {
let now = Utc::now();
let events = self.events_counter.load(Ordering::Relaxed);
let bytes = self.bytes_counter.load(Ordering::Relaxed);
let (events_per_second, bytes_per_second) =
if let Some(start) = *self.start_time.read().await {
let duration = start.elapsed().as_secs_f64();
if duration > 0.0 {
(events as f64 / duration, (bytes as f64 / duration) as u64)
} else {
(0.0, 0)
}
} else {
(0.0, 0)
};
let latency_stats = self.latency_histogram.stats();
let sample = PerformanceSample {
timestamp: now,
cpu_usage_percent: 0.0, memory_usage_bytes: 0, memory_usage_percent: 0.0,
events_per_second,
bytes_per_second,
active_operations: self.spans.read().await.len() as u64,
p99_latency_us: latency_stats.p99,
buffer_usage_percent: 0.0,
};
let mut samples = self.samples.write().await;
samples.push_back(sample.clone());
while samples.len() > self.config.history_size {
samples.pop_front();
}
drop(samples);
let mut stats = self.stats.write().await;
stats.samples_collected += 1;
drop(stats);
self.check_warnings(&sample).await;
sample
}
async fn check_warnings(&self, sample: &PerformanceSample) {
let mut warnings = self.warnings.write().await;
if sample.cpu_usage_percent > self.config.warning_thresholds.cpu_usage_percent {
warnings.push(PerformanceWarning {
warning_type: WarningType::HighCpuUsage,
message: format!(
"CPU usage {}% exceeds threshold {}%",
sample.cpu_usage_percent, self.config.warning_thresholds.cpu_usage_percent
),
severity: if sample.cpu_usage_percent > 95.0 {
WarningSeverity::Critical
} else {
WarningSeverity::Warning
},
timestamp: sample.timestamp,
current_value: sample.cpu_usage_percent,
threshold: self.config.warning_thresholds.cpu_usage_percent,
});
}
if sample.p99_latency_us > self.config.warning_thresholds.p99_latency_us {
warnings.push(PerformanceWarning {
warning_type: WarningType::HighLatency,
message: format!(
"P99 latency {}us exceeds threshold {}us",
sample.p99_latency_us, self.config.warning_thresholds.p99_latency_us
),
severity: if sample.p99_latency_us
> self.config.warning_thresholds.p99_latency_us * 2
{
WarningSeverity::Critical
} else {
WarningSeverity::Warning
},
timestamp: sample.timestamp,
current_value: sample.p99_latency_us as f64,
threshold: self.config.warning_thresholds.p99_latency_us as f64,
});
}
if sample.events_per_second < self.config.warning_thresholds.min_throughput {
warnings.push(PerformanceWarning {
warning_type: WarningType::LowThroughput,
message: format!(
"Throughput {:.2} events/sec below threshold {:.2}",
sample.events_per_second, self.config.warning_thresholds.min_throughput
),
severity: WarningSeverity::Warning,
timestamp: sample.timestamp,
current_value: sample.events_per_second,
threshold: self.config.warning_thresholds.min_throughput,
});
}
let mut stats = self.stats.write().await;
stats.warnings_generated = warnings.len() as u64;
}
pub async fn generate_recommendations(&self) -> Vec<Recommendation> {
let mut recommendations = Vec::new();
let latency_stats = self.latency_histogram.stats();
let stats = self.stats.read().await;
if latency_stats.p99 > 10000 {
recommendations.push(Recommendation {
category: RecommendationCategory::BatchSize,
title: "Increase batch size".to_string(),
description: "High P99 latency detected. Consider increasing batch size to amortize overhead.".to_string(),
impact: RecommendationImpact::High,
effort: RecommendationEffort::Low,
priority: 9,
});
}
if stats.avg_throughput < 1000.0 && stats.total_events > 100 {
recommendations.push(Recommendation {
category: RecommendationCategory::Parallelism,
title: "Increase parallelism".to_string(),
description:
"Low throughput detected. Consider increasing worker threads or partitions."
.to_string(),
impact: RecommendationImpact::High,
effort: RecommendationEffort::Medium,
priority: 8,
});
}
if latency_stats.max > latency_stats.p99 * 10 {
recommendations.push(Recommendation {
category: RecommendationCategory::MemoryManagement,
title: "Investigate latency spikes".to_string(),
description: "Large variance in latency detected. May indicate GC pressure or resource contention.".to_string(),
impact: RecommendationImpact::Medium,
effort: RecommendationEffort::High,
priority: 7,
});
}
*self.recommendations.write().await = recommendations.clone();
recommendations
}
pub fn get_latency_stats(&self) -> HistogramStats {
self.latency_histogram.stats()
}
pub async fn get_warnings(&self) -> Vec<PerformanceWarning> {
self.warnings.read().await.clone()
}
pub async fn get_samples(&self) -> Vec<PerformanceSample> {
self.samples.read().await.iter().cloned().collect()
}
pub async fn get_stats(&self) -> ProfilerStats {
self.stats.read().await.clone()
}
pub async fn generate_report(&self) -> PerformanceReport {
let stats = self.stats.read().await.clone();
let latency_stats = self.latency_histogram.stats();
let warnings = self.warnings.read().await.clone();
let recommendations = self.generate_recommendations().await;
let samples = self.samples.read().await.iter().cloned().collect();
let summary = self.generate_summary(&stats, &latency_stats).await;
PerformanceReport {
generated_at: Utc::now(),
duration_secs: stats.total_duration_secs,
total_events: stats.total_events,
total_bytes: stats.total_bytes,
avg_throughput: stats.avg_throughput,
peak_throughput: stats.peak_throughput,
latency_stats,
warnings,
recommendations,
samples,
summary,
}
}
async fn generate_summary(&self, stats: &ProfilerStats, latency: &HistogramStats) -> String {
let mut summary = String::new();
summary.push_str(&format!("Performance Summary\n{}\n", "=".repeat(50)));
summary.push_str(&format!("Duration: {:.2}s\n", stats.total_duration_secs));
summary.push_str(&format!("Events processed: {}\n", stats.total_events));
summary.push_str(&format!(
"Throughput: {:.2} events/sec\n",
stats.avg_throughput
));
summary.push_str(&format!(
"Latency P50/P99/Max: {}us / {}us / {}us\n",
latency.p50, latency.p99, latency.max
));
summary.push_str(&format!("Warnings: {}\n", stats.warnings_generated));
summary
}
pub async fn reset(&self) {
self.latency_histogram.reset();
self.spans.write().await.clear();
self.samples.write().await.clear();
self.warnings.write().await.clear();
self.recommendations.write().await.clear();
*self.stats.write().await = ProfilerStats::default();
self.events_counter.store(0, Ordering::Relaxed);
self.bytes_counter.store(0, Ordering::Relaxed);
*self.start_time.write().await = None;
info!("Performance profiler reset");
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerformanceReport {
pub generated_at: DateTime<Utc>,
pub duration_secs: f64,
pub total_events: u64,
pub total_bytes: u64,
pub avg_throughput: f64,
pub peak_throughput: f64,
pub latency_stats: HistogramStats,
pub warnings: Vec<PerformanceWarning>,
pub recommendations: Vec<Recommendation>,
pub samples: Vec<PerformanceSample>,
pub summary: String,
}
impl PerformanceReport {
pub fn to_json(&self) -> Result<String> {
serde_json::to_string_pretty(self).map_err(|e| anyhow!("JSON error: {}", e))
}
pub fn print(&self) {
println!("{}", self.summary);
if !self.warnings.is_empty() {
println!("\nWarnings:");
for warning in &self.warnings {
println!(" [{:?}] {}", warning.severity, warning.message);
}
}
if !self.recommendations.is_empty() {
println!("\nRecommendations:");
for rec in &self.recommendations {
println!(
" [Priority {}] {} - {}",
rec.priority, rec.title, rec.description
);
}
}
}
}
pub struct ProfilerBuilder {
config: ProfilerConfig,
}
impl ProfilerBuilder {
pub fn new() -> Self {
Self {
config: ProfilerConfig::default(),
}
}
pub fn with_cpu_profiling(mut self) -> Self {
self.config.enable_cpu_profiling = true;
self
}
pub fn with_memory_tracking(mut self) -> Self {
self.config.enable_memory_profiling = true;
self
}
pub fn sampling_interval(mut self, interval: Duration) -> Self {
self.config.sampling_interval = interval;
self
}
pub fn history_size(mut self, size: usize) -> Self {
self.config.history_size = size;
self
}
pub fn warning_thresholds(mut self, thresholds: WarningThresholds) -> Self {
self.config.warning_thresholds = thresholds;
self
}
pub fn build(self) -> PerformanceProfiler {
PerformanceProfiler::new(self.config)
}
}
impl Default for ProfilerBuilder {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_profiler_creation() {
let profiler = PerformanceProfiler::builder().build();
assert!(!profiler.is_running());
}
#[tokio::test]
async fn test_start_stop() {
let profiler = PerformanceProfiler::builder().build();
profiler.start().await.unwrap();
assert!(profiler.is_running());
profiler.stop().await.unwrap();
assert!(!profiler.is_running());
}
#[tokio::test]
async fn test_record_event() {
let profiler = PerformanceProfiler::builder().build();
profiler.start().await.unwrap();
profiler.record_event(100);
profiler.record_event(200);
profiler.stop().await.unwrap();
let stats = profiler.get_stats().await;
assert_eq!(stats.total_events, 2);
assert_eq!(stats.total_bytes, 300);
}
#[tokio::test]
async fn test_latency_histogram() {
let histogram = LatencyHistogram::new();
histogram.record(100);
histogram.record(200);
histogram.record(1000);
histogram.record(5000);
histogram.record(10000);
let stats = histogram.stats();
assert_eq!(stats.count, 5);
assert!(stats.min <= 100);
assert!(stats.max >= 10000);
}
#[tokio::test]
async fn test_spans() {
let profiler = PerformanceProfiler::builder().build();
profiler.start().await.unwrap();
let span_id = profiler.start_span("test_operation").await;
tokio::time::sleep(Duration::from_millis(10)).await;
let duration = profiler.end_span(span_id).await;
assert!(duration.is_some());
assert!(duration.unwrap() >= Duration::from_millis(10));
}
#[tokio::test]
async fn test_operation_timer() {
let profiler = PerformanceProfiler::builder().build();
let timer = profiler.time_operation("test");
tokio::time::sleep(Duration::from_millis(5)).await;
profiler.record_operation(timer);
let stats = profiler.get_latency_stats();
assert!(stats.count > 0);
}
#[tokio::test]
async fn test_collect_sample() {
let profiler = PerformanceProfiler::builder().build();
profiler.start().await.unwrap();
profiler.record_event(100);
let sample = profiler.collect_sample().await;
assert!(sample.events_per_second >= 0.0);
}
#[tokio::test]
async fn test_recommendations() {
let profiler = PerformanceProfiler::builder().build();
profiler.start().await.unwrap();
for _ in 0..100 {
profiler.record_latency(Duration::from_millis(50));
}
let recommendations = profiler.generate_recommendations().await;
assert!(!recommendations.is_empty());
}
#[tokio::test]
async fn test_generate_report() {
let profiler = PerformanceProfiler::builder().build();
profiler.start().await.unwrap();
for _ in 0..10 {
profiler.record_event(100);
profiler.record_latency(Duration::from_micros(500));
}
profiler.stop().await.unwrap();
let report = profiler.generate_report().await;
assert_eq!(report.total_events, 10);
assert!(!report.summary.is_empty());
}
#[tokio::test]
async fn test_warnings() {
let thresholds = WarningThresholds {
min_throughput: 10000.0, ..Default::default()
};
let profiler = PerformanceProfiler::builder()
.warning_thresholds(thresholds)
.build();
profiler.start().await.unwrap();
tokio::time::sleep(tokio::time::Duration::from_millis(10)).await;
profiler.record_event(100);
profiler.collect_sample().await;
let warnings = profiler.get_warnings().await;
assert!(warnings
.iter()
.any(|w| w.warning_type == WarningType::LowThroughput));
}
#[tokio::test]
async fn test_reset() {
let profiler = PerformanceProfiler::builder().build();
profiler.start().await.unwrap();
profiler.record_event(100);
profiler.record_latency(Duration::from_micros(100));
profiler.reset().await;
let stats = profiler.get_stats().await;
assert_eq!(stats.total_events, 0);
let latency = profiler.get_latency_stats();
assert_eq!(latency.count, 0);
}
#[test]
fn test_histogram_percentiles() {
let histogram = LatencyHistogram::new();
for i in 1..=100 {
histogram.record(i * 10);
}
let p50 = histogram.percentile(50.0);
let p99 = histogram.percentile(99.0);
assert!(p50 <= p99);
}
}