use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
use std::time::Duration;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LatencyHistogram {
buckets: Vec<u64>,
counts: Vec<u64>,
total_samples: u64,
min_latency_us: u64,
max_latency_us: u64,
sum_latency_us: u64,
}
impl LatencyHistogram {
pub fn new() -> Self {
Self::with_buckets(vec![10, 50, 100, 500, 1000, 5000, 10000, 50000])
}
pub fn with_buckets(mut buckets: Vec<u64>) -> Self {
buckets.sort_unstable();
let counts = vec![0; buckets.len() + 1];
Self {
buckets,
counts,
total_samples: 0,
min_latency_us: u64::MAX,
max_latency_us: 0,
sum_latency_us: 0,
}
}
pub fn record(&mut self, latency: Duration) {
let latency_us = latency.as_micros() as u64;
self.min_latency_us = self.min_latency_us.min(latency_us);
self.max_latency_us = self.max_latency_us.max(latency_us);
self.sum_latency_us += latency_us;
self.total_samples += 1;
let bucket_idx = self
.buckets
.iter()
.position(|&b| latency_us < b)
.unwrap_or(self.buckets.len());
self.counts[bucket_idx] += 1;
}
pub fn avg(&self) -> Duration {
if self.total_samples == 0 {
Duration::from_micros(0)
} else {
Duration::from_micros(self.sum_latency_us / self.total_samples)
}
}
pub fn min(&self) -> Duration {
if self.min_latency_us == u64::MAX {
Duration::from_micros(0)
} else {
Duration::from_micros(self.min_latency_us)
}
}
pub fn max(&self) -> Duration {
Duration::from_micros(self.max_latency_us)
}
pub fn percentile(&self, p: f64) -> Duration {
if self.total_samples == 0 {
return Duration::from_micros(0);
}
let target_count = (self.total_samples as f64 * p) as u64;
let mut cumulative = 0u64;
for (idx, &count) in self.counts.iter().enumerate() {
cumulative += count;
if cumulative >= target_count {
let latency_us = if idx < self.buckets.len() {
self.buckets[idx]
} else {
self.max_latency_us
};
return Duration::from_micros(latency_us);
}
}
Duration::from_micros(self.max_latency_us)
}
pub fn p50(&self) -> Duration {
self.percentile(0.50)
}
pub fn p90(&self) -> Duration {
self.percentile(0.90)
}
pub fn p95(&self) -> Duration {
self.percentile(0.95)
}
pub fn p99(&self) -> Duration {
self.percentile(0.99)
}
pub fn p999(&self) -> Duration {
self.percentile(0.999)
}
pub fn count(&self) -> u64 {
self.total_samples
}
pub fn summary(&self) -> String {
format!(
"Samples: {}, Min: {:?}, Max: {:?}, Avg: {:?}, P50: {:?}, P90: {:?}, P95: {:?}, P99: {:?}",
self.total_samples,
self.min(),
self.max(),
self.avg(),
self.p50(),
self.p90(),
self.p95(),
self.p99()
)
}
}
impl Default for LatencyHistogram {
fn default() -> Self {
Self::new()
}
}
#[derive(Debug, Clone, Default)]
pub struct PerformanceProfiler {
histograms: BTreeMap<String, LatencyHistogram>,
}
impl PerformanceProfiler {
pub fn new() -> Self {
Self {
histograms: BTreeMap::new(),
}
}
pub fn record(&mut self, operation: &str, latency: Duration) {
self.histograms
.entry(operation.to_string())
.or_default()
.record(latency);
}
pub fn get_histogram(&self, operation: &str) -> Option<&LatencyHistogram> {
self.histograms.get(operation)
}
pub fn histograms(&self) -> &BTreeMap<String, LatencyHistogram> {
&self.histograms
}
pub fn report(&self) -> String {
let mut report = String::from("=== Performance Profile ===\n\n");
for (operation, histogram) in &self.histograms {
report.push_str(&format!("Operation: {operation}\n"));
report.push_str(&format!(" {}\n\n", histogram.summary()));
}
report
}
pub fn reset(&mut self) {
self.histograms.clear();
}
}
#[derive(Debug, Clone)]
pub struct ThroughputTracker {
operation: String,
total_ops: u64,
total_bytes: u64,
start_time: std::time::Instant,
}
impl ThroughputTracker {
pub fn new(operation: String) -> Self {
Self {
operation,
total_ops: 0,
total_bytes: 0,
start_time: std::time::Instant::now(),
}
}
pub fn record_op(&mut self) {
self.total_ops += 1;
}
pub fn record_bytes(&mut self, bytes: u64) {
self.total_bytes += bytes;
}
pub fn ops_per_second(&self) -> f64 {
let elapsed = self.start_time.elapsed().as_secs_f64();
if elapsed > 0.0 {
self.total_ops as f64 / elapsed
} else {
0.0
}
}
pub fn bytes_per_second(&self) -> f64 {
let elapsed = self.start_time.elapsed().as_secs_f64();
if elapsed > 0.0 {
self.total_bytes as f64 / elapsed
} else {
0.0
}
}
pub fn megabytes_per_second(&self) -> f64 {
self.bytes_per_second() / (1024.0 * 1024.0)
}
pub fn elapsed(&self) -> Duration {
self.start_time.elapsed()
}
pub fn summary(&self) -> String {
format!(
"{}: {} ops in {:?} ({:.2} ops/s, {:.2} MB/s)",
self.operation,
self.total_ops,
self.elapsed(),
self.ops_per_second(),
self.megabytes_per_second()
)
}
}
#[derive(Debug, Clone, Default)]
pub struct BatchProfiler {
total_batches: u64,
total_items: u64,
batch_sizes: LatencyHistogram,
batch_latencies: LatencyHistogram,
}
impl BatchProfiler {
pub fn new() -> Self {
Self {
total_batches: 0,
total_items: 0,
batch_sizes: LatencyHistogram::with_buckets(vec![1, 10, 50, 100, 500, 1000]),
batch_latencies: LatencyHistogram::new(),
}
}
pub fn record_batch(&mut self, batch_size: usize, latency: Duration) {
self.total_batches += 1;
self.total_items += batch_size as u64;
self.batch_sizes
.record(Duration::from_micros(batch_size as u64));
self.batch_latencies.record(latency);
}
pub fn avg_batch_size(&self) -> f64 {
if self.total_batches == 0 {
0.0
} else {
self.total_items as f64 / self.total_batches as f64
}
}
pub fn avg_latency_per_item(&self) -> Duration {
if self.total_items == 0 {
Duration::from_micros(0)
} else {
let total_latency_us = self.batch_latencies.sum_latency_us;
Duration::from_micros(total_latency_us / self.total_items)
}
}
pub fn summary(&self) -> String {
format!(
"Batches: {}, Items: {}, Avg Batch Size: {:.2}, Avg Latency: {:?}, Avg per Item: {:?}",
self.total_batches,
self.total_items,
self.avg_batch_size(),
self.batch_latencies.avg(),
self.avg_latency_per_item()
)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_latency_histogram_basic() {
let mut hist = LatencyHistogram::new();
hist.record(Duration::from_micros(50));
hist.record(Duration::from_micros(100));
hist.record(Duration::from_micros(150));
assert_eq!(hist.count(), 3);
assert!(hist.min() <= Duration::from_micros(50));
assert!(hist.max() >= Duration::from_micros(150));
}
#[test]
fn test_latency_histogram_percentiles() {
let mut hist = LatencyHistogram::new();
for i in 1..=100 {
hist.record(Duration::from_micros(i * 100));
}
assert_eq!(hist.count(), 100);
let p50 = hist.p50();
let p90 = hist.p90();
let p99 = hist.p99();
assert!(p50 <= p90);
assert!(p90 <= p99);
}
#[test]
fn test_performance_profiler() {
let mut profiler = PerformanceProfiler::new();
profiler.record("put", Duration::from_micros(100));
profiler.record("put", Duration::from_micros(150));
profiler.record("get", Duration::from_micros(50));
assert!(profiler.get_histogram("put").is_some());
assert!(profiler.get_histogram("get").is_some());
assert!(profiler.get_histogram("delete").is_none());
let put_hist = profiler.get_histogram("put").unwrap();
assert_eq!(put_hist.count(), 2);
let report = profiler.report();
assert!(report.contains("put"));
assert!(report.contains("get"));
}
#[test]
fn test_throughput_tracker() {
let mut tracker = ThroughputTracker::new("test".to_string());
for _ in 0..100 {
tracker.record_op();
tracker.record_bytes(1024);
}
assert_eq!(tracker.total_ops, 100);
assert_eq!(tracker.total_bytes, 102400);
assert!(tracker.ops_per_second() > 0.0);
let summary = tracker.summary();
assert!(summary.contains("test"));
assert!(summary.contains("100 ops"));
}
#[test]
fn test_batch_profiler() {
let mut profiler = BatchProfiler::new();
profiler.record_batch(10, Duration::from_micros(1000));
profiler.record_batch(20, Duration::from_micros(2000));
profiler.record_batch(30, Duration::from_micros(3000));
assert_eq!(profiler.total_batches, 3);
assert_eq!(profiler.total_items, 60);
assert_eq!(profiler.avg_batch_size(), 20.0);
let summary = profiler.summary();
assert!(summary.contains("Batches: 3"));
assert!(summary.contains("Items: 60"));
}
}