Skip to main content

fastapi_core/
bench.rs

1//! Latency measurement and benchmarking utilities.
2//!
3//! Provides tools for measuring request processing latency with
4//! percentile tracking, histogram distribution, and reporting.
5//!
6//! # Example
7//!
8//! ```ignore
9//! use fastapi_core::bench::{LatencyHistogram, BenchmarkRunner, BenchmarkConfig};
10//!
11//! let config = BenchmarkConfig::new("simple_get")
12//!     .warmup_iterations(100)
13//!     .iterations(10_000);
14//!
15//! let report = BenchmarkRunner::run(&config, || {
16//!     // exercise some code path
17//! });
18//!
19//! println!("{report}");
20//! ```
21
22use std::fmt;
23use std::time::{Duration, Instant};
24
25/// Collects latency samples and computes percentile statistics.
26///
27/// Samples are stored unsorted for fast insertion. Sorting happens
28/// lazily when percentiles or reports are requested.
29pub struct LatencyHistogram {
30    samples: Vec<Duration>,
31    sorted: bool,
32}
33
34impl Default for LatencyHistogram {
35    fn default() -> Self {
36        Self::new()
37    }
38}
39
40impl LatencyHistogram {
41    /// Create a new empty histogram.
42    #[must_use]
43    pub fn new() -> Self {
44        Self {
45            samples: Vec::new(),
46            sorted: false,
47        }
48    }
49
50    /// Create a histogram pre-allocated for the given capacity.
51    #[must_use]
52    pub fn with_capacity(capacity: usize) -> Self {
53        Self {
54            samples: Vec::with_capacity(capacity),
55            sorted: false,
56        }
57    }
58
59    /// Record a latency sample.
60    pub fn record(&mut self, duration: Duration) {
61        self.samples.push(duration);
62        self.sorted = false;
63    }
64
65    /// Record latency by measuring the duration of a closure.
66    pub fn measure<F, R>(&mut self, f: F) -> R
67    where
68        F: FnOnce() -> R,
69    {
70        let start = Instant::now();
71        let result = f();
72        self.record(start.elapsed());
73        result
74    }
75
76    /// Number of recorded samples.
77    #[must_use]
78    pub fn count(&self) -> usize {
79        self.samples.len()
80    }
81
82    /// Returns true if no samples have been recorded.
83    #[must_use]
84    pub fn is_empty(&self) -> bool {
85        self.samples.is_empty()
86    }
87
88    /// Ensure samples are sorted for percentile computation.
89    fn ensure_sorted(&mut self) {
90        if !self.sorted {
91            self.samples.sort_unstable();
92            self.sorted = true;
93        }
94    }
95
96    /// Compute the value at the given percentile (0.0 to 100.0).
97    ///
98    /// Returns `None` if no samples have been recorded.
99    /// Uses nearest-rank method for percentile computation.
100    #[allow(
101        clippy::cast_precision_loss,
102        clippy::cast_possible_truncation,
103        clippy::cast_sign_loss
104    )]
105    pub fn percentile(&mut self, p: f64) -> Option<Duration> {
106        if self.samples.is_empty() {
107            return None;
108        }
109        self.ensure_sorted();
110
111        let clamped = p.clamp(0.0, 100.0);
112        let rank = (clamped / 100.0 * self.samples.len() as f64).ceil() as usize;
113        let index = rank.saturating_sub(1).min(self.samples.len() - 1);
114        Some(self.samples[index])
115    }
116
117    /// Minimum recorded latency.
118    pub fn min(&mut self) -> Option<Duration> {
119        self.ensure_sorted();
120        self.samples.first().copied()
121    }
122
123    /// Maximum recorded latency.
124    pub fn max(&mut self) -> Option<Duration> {
125        self.ensure_sorted();
126        self.samples.last().copied()
127    }
128
129    /// Mean (average) latency.
130    #[must_use]
131    pub fn mean(&self) -> Option<Duration> {
132        if self.samples.is_empty() {
133            return None;
134        }
135        let total: Duration = self.samples.iter().sum();
136        Some(total / self.samples.len() as u32)
137    }
138
139    /// Standard deviation of latency samples.
140    #[must_use]
141    #[allow(
142        clippy::cast_precision_loss,
143        clippy::cast_possible_truncation,
144        clippy::cast_sign_loss
145    )]
146    pub fn std_dev(&self) -> Option<Duration> {
147        if self.samples.len() < 2 {
148            return None;
149        }
150        let mean_nanos = self.mean()?.as_nanos() as f64;
151        let variance: f64 = self
152            .samples
153            .iter()
154            .map(|s| {
155                let diff = s.as_nanos() as f64 - mean_nanos;
156                diff * diff
157            })
158            .sum::<f64>()
159            / (self.samples.len() - 1) as f64;
160
161        Some(Duration::from_nanos(variance.sqrt() as u64))
162    }
163
164    /// Generate a full latency report with all standard percentiles.
165    pub fn report(&mut self) -> Option<LatencyReport> {
166        if self.samples.is_empty() {
167            return None;
168        }
169
170        Some(LatencyReport {
171            count: self.count(),
172            min: self.min().unwrap_or_default(),
173            max: self.max().unwrap_or_default(),
174            mean: self.mean().unwrap_or_default(),
175            std_dev: self.std_dev().unwrap_or_default(),
176            p50: self.percentile(50.0).unwrap_or_default(),
177            p90: self.percentile(90.0).unwrap_or_default(),
178            p95: self.percentile(95.0).unwrap_or_default(),
179            p99: self.percentile(99.0).unwrap_or_default(),
180            p999: self.percentile(99.9).unwrap_or_default(),
181            histogram_buckets: self.histogram_buckets(10),
182        })
183    }
184
185    /// Build histogram buckets with the specified number of bins.
186    ///
187    /// Returns a list of `(bucket_start, bucket_end, count)` tuples.
188    #[allow(
189        clippy::cast_precision_loss,
190        clippy::cast_possible_truncation,
191        clippy::cast_sign_loss
192    )]
193    pub fn histogram_buckets(&mut self, num_buckets: usize) -> Vec<HistogramBucket> {
194        if self.samples.is_empty() || num_buckets == 0 {
195            return Vec::new();
196        }
197        self.ensure_sorted();
198
199        let min_ns = self.samples.first().unwrap().as_nanos() as f64;
200        let max_ns = self.samples.last().unwrap().as_nanos() as f64;
201
202        if (max_ns - min_ns).abs() < f64::EPSILON {
203            // All samples are the same value
204            return vec![HistogramBucket {
205                range_start: self.samples[0],
206                range_end: self.samples[0],
207                count: self.samples.len(),
208            }];
209        }
210
211        let bucket_width = (max_ns - min_ns) / num_buckets as f64;
212        let mut buckets = Vec::with_capacity(num_buckets);
213
214        for i in 0..num_buckets {
215            let start_ns = min_ns + (i as f64 * bucket_width);
216            let end_ns = if i == num_buckets - 1 {
217                max_ns + 1.0 // Include the maximum value
218            } else {
219                min_ns + ((i + 1) as f64 * bucket_width)
220            };
221
222            let count = self
223                .samples
224                .iter()
225                .filter(|s| {
226                    let ns = s.as_nanos() as f64;
227                    ns >= start_ns && ns < end_ns
228                })
229                .count();
230
231            buckets.push(HistogramBucket {
232                range_start: Duration::from_nanos(start_ns as u64),
233                range_end: Duration::from_nanos(end_ns as u64),
234                count,
235            });
236        }
237
238        buckets
239    }
240
241    /// Clear all recorded samples.
242    pub fn clear(&mut self) {
243        self.samples.clear();
244        self.sorted = true;
245    }
246}
247
248/// A single histogram bucket.
249#[derive(Debug, Clone)]
250pub struct HistogramBucket {
251    /// Start of the bucket range (inclusive).
252    pub range_start: Duration,
253    /// End of the bucket range (exclusive).
254    pub range_end: Duration,
255    /// Number of samples in this bucket.
256    pub count: usize,
257}
258
259/// Summary report of latency measurements.
260#[derive(Debug, Clone)]
261pub struct LatencyReport {
262    /// Total number of samples.
263    pub count: usize,
264    /// Minimum latency.
265    pub min: Duration,
266    /// Maximum latency.
267    pub max: Duration,
268    /// Mean (average) latency.
269    pub mean: Duration,
270    /// Standard deviation.
271    pub std_dev: Duration,
272    /// 50th percentile (median).
273    pub p50: Duration,
274    /// 90th percentile.
275    pub p90: Duration,
276    /// 95th percentile.
277    pub p95: Duration,
278    /// 99th percentile.
279    pub p99: Duration,
280    /// 99.9th percentile.
281    pub p999: Duration,
282    /// Histogram distribution buckets.
283    pub histogram_buckets: Vec<HistogramBucket>,
284}
285
286impl LatencyReport {
287    /// Returns true if any percentile exceeds the given threshold.
288    #[must_use]
289    pub fn has_tail_latency_above(&self, threshold: Duration) -> bool {
290        self.p99 > threshold || self.p999 > threshold
291    }
292
293    /// Compare against a baseline report and return the comparison.
294    #[must_use]
295    pub fn compare(&self, baseline: &Self) -> LatencyComparison {
296        LatencyComparison {
297            current: self.clone(),
298            baseline: baseline.clone(),
299        }
300    }
301}
302
303impl fmt::Display for LatencyReport {
304    #[allow(
305        clippy::cast_precision_loss,
306        clippy::cast_possible_truncation,
307        clippy::cast_sign_loss
308    )]
309    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
310        writeln!(f, "Latency Report ({} samples)", self.count)?;
311        writeln!(f, "  min:    {}", format_duration(self.min))?;
312        writeln!(f, "  mean:   {}", format_duration(self.mean))?;
313        writeln!(f, "  stddev: {}", format_duration(self.std_dev))?;
314        writeln!(f, "  max:    {}", format_duration(self.max))?;
315        writeln!(f)?;
316        writeln!(f, "  Percentiles:")?;
317        writeln!(f, "    p50:   {}", format_duration(self.p50))?;
318        writeln!(f, "    p90:   {}", format_duration(self.p90))?;
319        writeln!(f, "    p95:   {}", format_duration(self.p95))?;
320        writeln!(f, "    p99:   {}", format_duration(self.p99))?;
321        writeln!(f, "    p99.9: {}", format_duration(self.p999))?;
322
323        if !self.histogram_buckets.is_empty() {
324            writeln!(f)?;
325            writeln!(f, "  Distribution:")?;
326            let max_count = self
327                .histogram_buckets
328                .iter()
329                .map(|b| b.count)
330                .max()
331                .unwrap_or(1);
332            let bar_width: usize = 40;
333
334            for bucket in &self.histogram_buckets {
335                let bar_len = if max_count > 0 {
336                    (bucket.count as f64 / max_count as f64 * bar_width as f64) as usize
337                } else {
338                    0
339                };
340                let bar: String = "#".repeat(bar_len);
341                writeln!(
342                    f,
343                    "    [{:>8} - {:>8}] {:>6} |{bar}",
344                    format_duration(bucket.range_start),
345                    format_duration(bucket.range_end),
346                    bucket.count,
347                )?;
348            }
349        }
350
351        Ok(())
352    }
353}
354
355/// Comparison between current and baseline latency reports.
356#[derive(Debug, Clone)]
357pub struct LatencyComparison {
358    /// Current measurement.
359    pub current: LatencyReport,
360    /// Baseline measurement to compare against.
361    pub baseline: LatencyReport,
362}
363
364impl LatencyComparison {
365    /// Returns true if any percentile regressed beyond the given factor.
366    ///
367    /// A factor of 1.1 means a 10% regression threshold.
368    #[must_use]
369    #[allow(clippy::cast_precision_loss)]
370    pub fn has_regression(&self, factor: f64) -> bool {
371        let check = |current: Duration, baseline: Duration| -> bool {
372            if baseline.is_zero() {
373                return false;
374            }
375            let ratio = current.as_nanos() as f64 / baseline.as_nanos() as f64;
376            ratio > factor
377        };
378
379        check(self.current.p50, self.baseline.p50)
380            || check(self.current.p95, self.baseline.p95)
381            || check(self.current.p99, self.baseline.p99)
382            || check(self.current.p999, self.baseline.p999)
383    }
384}
385
386impl fmt::Display for LatencyComparison {
387    #[allow(clippy::cast_precision_loss)]
388    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
389        writeln!(f, "Latency Comparison")?;
390        writeln!(
391            f,
392            "  Samples: {} (current) vs {} (baseline)",
393            self.current.count, self.baseline.count
394        )?;
395        writeln!(f)?;
396        writeln!(
397            f,
398            "  {:>8}  {:>10}  {:>10}  {:>8}",
399            "metric", "current", "baseline", "change"
400        )?;
401        writeln!(
402            f,
403            "  {:>8}  {:>10}  {:>10}  {:>8}",
404            "------", "-------", "--------", "------"
405        )?;
406
407        for (label, cur, base) in [
408            ("p50", self.current.p50, self.baseline.p50),
409            ("p90", self.current.p90, self.baseline.p90),
410            ("p95", self.current.p95, self.baseline.p95),
411            ("p99", self.current.p99, self.baseline.p99),
412            ("p99.9", self.current.p999, self.baseline.p999),
413            ("mean", self.current.mean, self.baseline.mean),
414        ] {
415            let change = if base.is_zero() {
416                "N/A".to_string()
417            } else {
418                let ratio = cur.as_nanos() as f64 / base.as_nanos() as f64;
419                let pct = (ratio - 1.0) * 100.0;
420                if pct >= 0.0 {
421                    format!("+{pct:.1}%")
422                } else {
423                    format!("{pct:.1}%")
424                }
425            };
426
427            writeln!(
428                f,
429                "  {:>8}  {:>10}  {:>10}  {:>8}",
430                label,
431                format_duration(cur),
432                format_duration(base),
433                change,
434            )?;
435        }
436
437        Ok(())
438    }
439}
440
441/// Configuration for a benchmark run.
442#[derive(Debug, Clone)]
443pub struct BenchmarkConfig {
444    /// Name for this benchmark.
445    pub name: String,
446    /// Number of warmup iterations (discarded).
447    pub warmup_iterations: usize,
448    /// Number of measured iterations.
449    pub iterations: usize,
450}
451
452impl BenchmarkConfig {
453    /// Create a new benchmark configuration with the given name.
454    #[must_use]
455    pub fn new(name: impl Into<String>) -> Self {
456        Self {
457            name: name.into(),
458            warmup_iterations: 100,
459            iterations: 10_000,
460        }
461    }
462
463    /// Set the number of warmup iterations.
464    #[must_use]
465    pub fn warmup_iterations(mut self, n: usize) -> Self {
466        self.warmup_iterations = n;
467        self
468    }
469
470    /// Set the number of measured iterations.
471    #[must_use]
472    pub fn iterations(mut self, n: usize) -> Self {
473        self.iterations = n;
474        self
475    }
476}
477
478/// Runs benchmarks and collects latency data.
479pub struct BenchmarkRunner;
480
481impl BenchmarkRunner {
482    /// Run a benchmark with the given configuration and return a latency report.
483    ///
484    /// The closure `f` is called `warmup + iterations` times.
485    /// Only the last `iterations` calls are measured.
486    pub fn run<F>(config: &BenchmarkConfig, mut f: F) -> LatencyReport
487    where
488        F: FnMut(),
489    {
490        // Warmup phase
491        for _ in 0..config.warmup_iterations {
492            f();
493        }
494
495        // Measurement phase
496        let mut histogram = LatencyHistogram::with_capacity(config.iterations);
497        for _ in 0..config.iterations {
498            histogram.measure(|| f());
499        }
500
501        histogram.report().unwrap_or_else(|| LatencyReport {
502            count: 0,
503            min: Duration::ZERO,
504            max: Duration::ZERO,
505            mean: Duration::ZERO,
506            std_dev: Duration::ZERO,
507            p50: Duration::ZERO,
508            p90: Duration::ZERO,
509            p95: Duration::ZERO,
510            p99: Duration::ZERO,
511            p999: Duration::ZERO,
512            histogram_buckets: Vec::new(),
513        })
514    }
515
516    /// Run a benchmark that returns a value, discarding the return value.
517    pub fn run_with_result<F, R>(config: &BenchmarkConfig, mut f: F) -> LatencyReport
518    where
519        F: FnMut() -> R,
520    {
521        Self::run(config, || {
522            let _ = std::hint::black_box(f());
523        })
524    }
525
526    /// Run multiple named benchmarks and return all reports.
527    pub fn run_suite(
528        suite: Vec<(BenchmarkConfig, Box<dyn FnMut()>)>,
529    ) -> Vec<(String, LatencyReport)> {
530        suite
531            .into_iter()
532            .map(|(config, mut f)| {
533                let name = config.name.clone();
534                let report = Self::run(&config, &mut *f);
535                (name, report)
536            })
537            .collect()
538    }
539}
540
541/// Format a duration in a human-readable way.
542#[must_use]
543#[allow(clippy::cast_precision_loss)]
544pub fn format_duration(d: Duration) -> String {
545    let nanos = d.as_nanos();
546    if nanos < 1_000 {
547        format!("{nanos}ns")
548    } else if nanos < 1_000_000 {
549        format!("{:.1}us", nanos as f64 / 1_000.0)
550    } else if nanos < 1_000_000_000 {
551        format!("{:.2}ms", nanos as f64 / 1_000_000.0)
552    } else {
553        format!("{:.3}s", nanos as f64 / 1_000_000_000.0)
554    }
555}
556
557// ============================================================================
558// Memory Tracking
559// ============================================================================
560
561/// A snapshot of memory usage at a point in time.
562#[derive(Debug, Clone, Copy, PartialEq, Eq)]
563pub struct MemorySnapshot {
564    /// Resident set size in bytes (physical memory used).
565    pub rss_bytes: usize,
566    /// Virtual memory size in bytes.
567    pub vms_bytes: usize,
568}
569
570impl MemorySnapshot {
571    /// Take a snapshot of current process memory usage.
572    ///
573    /// On Linux, reads `/proc/self/status` for `VmRSS` and `VmSize`.
574    /// On other platforms, returns zero values.
575    #[must_use]
576    pub fn current() -> Self {
577        read_proc_memory().unwrap_or(Self {
578            rss_bytes: 0,
579            vms_bytes: 0,
580        })
581    }
582
583    /// Returns RSS formatted as a human-readable string.
584    #[must_use]
585    pub fn rss_display(&self) -> String {
586        format_bytes_size(self.rss_bytes)
587    }
588
589    /// Returns VMS formatted as a human-readable string.
590    #[must_use]
591    pub fn vms_display(&self) -> String {
592        format_bytes_size(self.vms_bytes)
593    }
594}
595
596impl fmt::Display for MemorySnapshot {
597    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
598        write!(
599            f,
600            "RSS: {}, VMS: {}",
601            self.rss_display(),
602            self.vms_display()
603        )
604    }
605}
606
607/// Tracks memory usage over time with baseline, peak, and delta computation.
608///
609/// # Example
610///
611/// ```ignore
612/// use fastapi_core::bench::MemoryTracker;
613///
614/// let mut tracker = MemoryTracker::new();
615///
616/// // ... do some work ...
617/// tracker.sample();
618///
619/// // ... do more work ...
620/// tracker.sample();
621///
622/// let report = tracker.report();
623/// println!("{report}");
624/// ```
625pub struct MemoryTracker {
626    baseline: MemorySnapshot,
627    samples: Vec<MemorySnapshot>,
628    peak_rss: usize,
629}
630
631impl Default for MemoryTracker {
632    fn default() -> Self {
633        Self::new()
634    }
635}
636
637impl MemoryTracker {
638    /// Create a new memory tracker, recording the current RSS as baseline.
639    #[must_use]
640    pub fn new() -> Self {
641        let baseline = MemorySnapshot::current();
642        Self {
643            baseline,
644            samples: Vec::new(),
645            peak_rss: baseline.rss_bytes,
646        }
647    }
648
649    /// Take a memory sample at the current moment.
650    pub fn sample(&mut self) {
651        let snap = MemorySnapshot::current();
652        if snap.rss_bytes > self.peak_rss {
653            self.peak_rss = snap.rss_bytes;
654        }
655        self.samples.push(snap);
656    }
657
658    /// Record a sample while executing a closure, returning the closure's result.
659    pub fn measure<F, R>(&mut self, f: F) -> R
660    where
661        F: FnOnce() -> R,
662    {
663        let result = f();
664        self.sample();
665        result
666    }
667
668    /// Record N iterations of a closure, sampling after each.
669    pub fn measure_repeated<F>(&mut self, iterations: usize, mut f: F)
670    where
671        F: FnMut(),
672    {
673        for _ in 0..iterations {
674            f();
675            self.sample();
676        }
677    }
678
679    /// Number of samples taken (excluding baseline).
680    #[must_use]
681    pub fn sample_count(&self) -> usize {
682        self.samples.len()
683    }
684
685    /// Get the baseline memory snapshot.
686    #[must_use]
687    pub fn baseline(&self) -> MemorySnapshot {
688        self.baseline
689    }
690
691    /// Get the peak RSS observed.
692    #[must_use]
693    pub fn peak_rss(&self) -> usize {
694        self.peak_rss
695    }
696
697    /// Generate a memory usage report.
698    #[must_use]
699    pub fn report(&self) -> MemoryReport {
700        let current = self.samples.last().copied().unwrap_or(self.baseline);
701
702        let delta_rss = current.rss_bytes.saturating_sub(self.baseline.rss_bytes);
703
704        let per_operation_bytes = if self.samples.len() > 1 {
705            Some(delta_rss / self.samples.len())
706        } else {
707            None
708        };
709
710        let leak_suspect = self.detect_leak_trend();
711
712        MemoryReport {
713            baseline_rss: self.baseline.rss_bytes,
714            current_rss: current.rss_bytes,
715            peak_rss: self.peak_rss,
716            delta_rss,
717            per_operation_bytes,
718            sample_count: self.samples.len(),
719            leak_suspect,
720        }
721    }
722
723    /// Detect if memory is trending upward (potential leak).
724    ///
725    /// Uses simple linear regression on RSS samples. Returns `true` if
726    /// the trend shows consistent growth exceeding 1 KB per sample.
727    #[allow(clippy::cast_precision_loss)]
728    fn detect_leak_trend(&self) -> bool {
729        if self.samples.len() < 10 {
730            return false;
731        }
732
733        // Split samples into two halves and compare means
734        let mid = self.samples.len() / 2;
735        let first_half_mean: f64 = self.samples[..mid]
736            .iter()
737            .map(|s| s.rss_bytes as f64)
738            .sum::<f64>()
739            / mid as f64;
740
741        let second_half_mean: f64 = self.samples[mid..]
742            .iter()
743            .map(|s| s.rss_bytes as f64)
744            .sum::<f64>()
745            / (self.samples.len() - mid) as f64;
746
747        // Suspect leak if second half is > 1KB higher than first half on average
748        second_half_mean - first_half_mean > 1024.0
749    }
750
751    /// Reset the tracker with a new baseline.
752    pub fn reset(&mut self) {
753        self.baseline = MemorySnapshot::current();
754        self.samples.clear();
755        self.peak_rss = self.baseline.rss_bytes;
756    }
757}
758
759/// Report summarizing memory usage measurements.
760#[derive(Debug, Clone)]
761pub struct MemoryReport {
762    /// Baseline RSS at tracker creation.
763    pub baseline_rss: usize,
764    /// Most recent RSS measurement.
765    pub current_rss: usize,
766    /// Peak RSS observed during tracking.
767    pub peak_rss: usize,
768    /// RSS change from baseline to current.
769    pub delta_rss: usize,
770    /// Estimated bytes per operation (if multiple samples).
771    pub per_operation_bytes: Option<usize>,
772    /// Total number of samples taken.
773    pub sample_count: usize,
774    /// Whether a memory leak trend was detected.
775    pub leak_suspect: bool,
776}
777
778impl MemoryReport {
779    /// Compare against a baseline report.
780    #[must_use]
781    pub fn compare(&self, baseline: &Self) -> MemoryComparison {
782        MemoryComparison {
783            current: self.clone(),
784            baseline: baseline.clone(),
785        }
786    }
787}
788
789impl fmt::Display for MemoryReport {
790    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
791        writeln!(f, "Memory Report ({} samples)", self.sample_count)?;
792        writeln!(f, "  baseline: {}", format_bytes_size(self.baseline_rss))?;
793        writeln!(f, "  current:  {}", format_bytes_size(self.current_rss))?;
794        writeln!(f, "  peak:     {}", format_bytes_size(self.peak_rss))?;
795        writeln!(f, "  delta:    {}", format_bytes_size(self.delta_rss))?;
796
797        if let Some(per_op) = self.per_operation_bytes {
798            writeln!(f, "  per-op:   {}", format_bytes_size(per_op))?;
799        }
800
801        if self.leak_suspect {
802            writeln!(f, "  WARNING: Potential memory leak detected!")?;
803        }
804
805        Ok(())
806    }
807}
808
809/// Comparison between two memory reports.
810#[derive(Debug, Clone)]
811pub struct MemoryComparison {
812    /// Current measurement.
813    pub current: MemoryReport,
814    /// Baseline measurement.
815    pub baseline: MemoryReport,
816}
817
818impl MemoryComparison {
819    /// Returns true if current peak RSS exceeds baseline by the given factor.
820    #[must_use]
821    #[allow(clippy::cast_precision_loss)]
822    pub fn has_regression(&self, factor: f64) -> bool {
823        if self.baseline.peak_rss == 0 {
824            return false;
825        }
826        let ratio = self.current.peak_rss as f64 / self.baseline.peak_rss as f64;
827        ratio > factor
828    }
829}
830
831impl fmt::Display for MemoryComparison {
832    #[allow(clippy::cast_precision_loss)]
833    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
834        writeln!(f, "Memory Comparison")?;
835        writeln!(f)?;
836        writeln!(
837            f,
838            "  {:>10}  {:>10}  {:>10}  {:>8}",
839            "metric", "current", "baseline", "change"
840        )?;
841        writeln!(
842            f,
843            "  {:>10}  {:>10}  {:>10}  {:>8}",
844            "------", "-------", "--------", "------"
845        )?;
846
847        for (label, cur, base) in [
848            ("peak RSS", self.current.peak_rss, self.baseline.peak_rss),
849            ("delta RSS", self.current.delta_rss, self.baseline.delta_rss),
850        ] {
851            let change = if base == 0 {
852                "N/A".to_string()
853            } else {
854                let ratio = cur as f64 / base as f64;
855                let pct = (ratio - 1.0) * 100.0;
856                if pct >= 0.0 {
857                    format!("+{pct:.1}%")
858                } else {
859                    format!("{pct:.1}%")
860                }
861            };
862
863            writeln!(
864                f,
865                "  {:>10}  {:>10}  {:>10}  {:>8}",
866                label,
867                format_bytes_size(cur),
868                format_bytes_size(base),
869                change,
870            )?;
871        }
872
873        Ok(())
874    }
875}
876
877/// Read memory info from `/proc/self/status` on Linux.
878fn read_proc_memory() -> Option<MemorySnapshot> {
879    let status = std::fs::read_to_string("/proc/self/status").ok()?;
880
881    let mut rss_bytes = 0;
882    let mut vms_bytes = 0;
883
884    for line in status.lines() {
885        if let Some(value) = line.strip_prefix("VmRSS:") {
886            rss_bytes = parse_proc_kb(value)?;
887        } else if let Some(value) = line.strip_prefix("VmSize:") {
888            vms_bytes = parse_proc_kb(value)?;
889        }
890    }
891
892    Some(MemorySnapshot {
893        rss_bytes,
894        vms_bytes,
895    })
896}
897
898/// Parse a value like "  12345 kB" from /proc/self/status.
899fn parse_proc_kb(value: &str) -> Option<usize> {
900    let trimmed = value.trim();
901    let num_str = trimmed
902        .strip_suffix("kB")
903        .or_else(|| trimmed.strip_suffix("KB"))?
904        .trim();
905    let kb: usize = num_str.parse().ok()?;
906    Some(kb * 1024)
907}
908
909/// Format a byte count in human-readable form.
910#[must_use]
911#[allow(clippy::cast_precision_loss)]
912pub fn format_bytes_size(bytes: usize) -> String {
913    if bytes < 1024 {
914        format!("{bytes}B")
915    } else if bytes < 1024 * 1024 {
916        format!("{:.1}KB", bytes as f64 / 1024.0)
917    } else if bytes < 1024 * 1024 * 1024 {
918        format!("{:.1}MB", bytes as f64 / (1024.0 * 1024.0))
919    } else {
920        format!("{:.2}GB", bytes as f64 / (1024.0 * 1024.0 * 1024.0))
921    }
922}
923
924#[cfg(test)]
925mod tests {
926    use super::*;
927
928    #[test]
929    fn histogram_empty() {
930        let mut h = LatencyHistogram::new();
931        assert!(h.is_empty());
932        assert_eq!(h.count(), 0);
933        assert!(h.percentile(50.0).is_none());
934        assert!(h.min().is_none());
935        assert!(h.max().is_none());
936        assert!(h.mean().is_none());
937        assert!(h.std_dev().is_none());
938        assert!(h.report().is_none());
939    }
940
941    #[test]
942    fn histogram_single_sample() {
943        let mut h = LatencyHistogram::new();
944        h.record(Duration::from_micros(100));
945        assert_eq!(h.count(), 1);
946        assert!(!h.is_empty());
947        assert_eq!(h.percentile(50.0), Some(Duration::from_micros(100)));
948        assert_eq!(h.min(), Some(Duration::from_micros(100)));
949        assert_eq!(h.max(), Some(Duration::from_micros(100)));
950        assert_eq!(h.mean(), Some(Duration::from_micros(100)));
951    }
952
953    #[test]
954    fn histogram_multiple_samples() {
955        let mut h = LatencyHistogram::new();
956        for i in 1..=100 {
957            h.record(Duration::from_micros(i));
958        }
959        assert_eq!(h.count(), 100);
960        assert_eq!(h.min(), Some(Duration::from_micros(1)));
961        assert_eq!(h.max(), Some(Duration::from_micros(100)));
962
963        // p50 should be around 50
964        let p50 = h.percentile(50.0).unwrap();
965        assert!(
966            p50.as_micros() >= 49 && p50.as_micros() <= 51,
967            "p50 = {p50:?}"
968        );
969
970        // p99 should be around 99
971        let p99 = h.percentile(99.0).unwrap();
972        assert!(
973            p99.as_micros() >= 98 && p99.as_micros() <= 100,
974            "p99 = {p99:?}"
975        );
976    }
977
978    #[test]
979    fn histogram_percentile_boundary_values() {
980        let mut h = LatencyHistogram::new();
981        for i in 1..=1000 {
982            h.record(Duration::from_micros(i));
983        }
984
985        // p0 should return the min
986        let p0 = h.percentile(0.0).unwrap();
987        assert_eq!(p0, Duration::from_micros(1));
988
989        // p100 should return the max
990        let p100 = h.percentile(100.0).unwrap();
991        assert_eq!(p100, Duration::from_millis(1));
992
993        // p99.9 should be close to 999
994        let p999 = h.percentile(99.9).unwrap();
995        assert!(
996            p999.as_micros() >= 998 && p999.as_micros() <= 1000,
997            "p99.9 = {p999:?}"
998        );
999    }
1000
1001    #[test]
1002    fn histogram_mean_calculation() {
1003        let mut h = LatencyHistogram::new();
1004        h.record(Duration::from_micros(10));
1005        h.record(Duration::from_micros(20));
1006        h.record(Duration::from_micros(30));
1007        assert_eq!(h.mean(), Some(Duration::from_micros(20)));
1008    }
1009
1010    #[test]
1011    fn histogram_std_dev() {
1012        let mut h = LatencyHistogram::new();
1013        // All same values => zero std dev
1014        for _ in 0..10 {
1015            h.record(Duration::from_micros(100));
1016        }
1017        let sd = h.std_dev().unwrap();
1018        assert_eq!(sd, Duration::ZERO);
1019
1020        // Different values
1021        let mut h2 = LatencyHistogram::new();
1022        h2.record(Duration::from_micros(10));
1023        h2.record(Duration::from_micros(20));
1024        let sd2 = h2.std_dev().unwrap();
1025        assert!(sd2 > Duration::ZERO);
1026    }
1027
1028    #[test]
1029    fn histogram_measure_closure() {
1030        let mut h = LatencyHistogram::new();
1031        let result = h.measure(|| 42);
1032        assert_eq!(result, 42);
1033        assert_eq!(h.count(), 1);
1034        // Duration should be very small (just overhead)
1035        assert!(h.min().unwrap() < Duration::from_millis(10));
1036    }
1037
1038    #[test]
1039    fn histogram_clear() {
1040        let mut h = LatencyHistogram::new();
1041        h.record(Duration::from_micros(100));
1042        assert_eq!(h.count(), 1);
1043        h.clear();
1044        assert!(h.is_empty());
1045        assert_eq!(h.count(), 0);
1046    }
1047
1048    #[test]
1049    fn histogram_buckets_empty() {
1050        let mut h = LatencyHistogram::new();
1051        assert!(h.histogram_buckets(10).is_empty());
1052    }
1053
1054    #[test]
1055    fn histogram_buckets_uniform() {
1056        let mut h = LatencyHistogram::new();
1057        for i in 1..=100 {
1058            h.record(Duration::from_micros(i));
1059        }
1060        let buckets = h.histogram_buckets(10);
1061        assert_eq!(buckets.len(), 10);
1062
1063        // Total across all buckets should equal sample count
1064        let total: usize = buckets.iter().map(|b| b.count).sum();
1065        assert_eq!(total, 100);
1066    }
1067
1068    #[test]
1069    fn histogram_buckets_same_value() {
1070        let mut h = LatencyHistogram::new();
1071        for _ in 0..50 {
1072            h.record(Duration::from_micros(100));
1073        }
1074        let buckets = h.histogram_buckets(10);
1075        // When all values are the same, we get a single bucket
1076        assert_eq!(buckets.len(), 1);
1077        assert_eq!(buckets[0].count, 50);
1078    }
1079
1080    #[test]
1081    fn report_generation() {
1082        let mut h = LatencyHistogram::new();
1083        for i in 1..=1000 {
1084            h.record(Duration::from_micros(i));
1085        }
1086        let report = h.report().unwrap();
1087        assert_eq!(report.count, 1000);
1088        assert_eq!(report.min, Duration::from_micros(1));
1089        assert_eq!(report.max, Duration::from_millis(1));
1090        assert!(report.p50 <= report.p90);
1091        assert!(report.p90 <= report.p95);
1092        assert!(report.p95 <= report.p99);
1093        assert!(report.p99 <= report.p999);
1094    }
1095
1096    #[test]
1097    fn report_display_format() {
1098        let mut h = LatencyHistogram::new();
1099        for i in 1..=100 {
1100            h.record(Duration::from_micros(i));
1101        }
1102        let report = h.report().unwrap();
1103        let output = format!("{report}");
1104        assert!(output.contains("Latency Report"));
1105        assert!(output.contains("p50:"));
1106        assert!(output.contains("p95:"));
1107        assert!(output.contains("p99:"));
1108        assert!(output.contains("p99.9:"));
1109        assert!(output.contains("Distribution:"));
1110    }
1111
1112    #[test]
1113    fn report_tail_latency_detection() {
1114        let mut h = LatencyHistogram::new();
1115        for i in 1..=100 {
1116            h.record(Duration::from_micros(i));
1117        }
1118        let report = h.report().unwrap();
1119
1120        // Threshold below max should detect tail latency
1121        assert!(report.has_tail_latency_above(Duration::from_micros(50)));
1122        // Threshold above max should not
1123        assert!(!report.has_tail_latency_above(Duration::from_micros(200)));
1124    }
1125
1126    #[test]
1127    fn comparison_no_regression() {
1128        let mut h1 = LatencyHistogram::new();
1129        let mut h2 = LatencyHistogram::new();
1130        for i in 1..=100 {
1131            h1.record(Duration::from_micros(i));
1132            h2.record(Duration::from_micros(i));
1133        }
1134        let r1 = h1.report().unwrap();
1135        let r2 = h2.report().unwrap();
1136        let cmp = r1.compare(&r2);
1137        assert!(!cmp.has_regression(1.1)); // 10% threshold
1138    }
1139
1140    #[test]
1141    fn comparison_with_regression() {
1142        let mut baseline = LatencyHistogram::new();
1143        let mut current = LatencyHistogram::new();
1144        for i in 1..=100 {
1145            baseline.record(Duration::from_micros(i));
1146            current.record(Duration::from_micros(i * 2)); // 2x slower
1147        }
1148        let r_base = baseline.report().unwrap();
1149        let r_curr = current.report().unwrap();
1150        let cmp = r_curr.compare(&r_base);
1151        assert!(cmp.has_regression(1.1)); // Expect regression detected
1152    }
1153
1154    #[test]
1155    fn comparison_display_format() {
1156        let mut h1 = LatencyHistogram::new();
1157        let mut h2 = LatencyHistogram::new();
1158        for i in 1..=100 {
1159            h1.record(Duration::from_micros(i));
1160            h2.record(Duration::from_micros(i));
1161        }
1162        let r1 = h1.report().unwrap();
1163        let r2 = h2.report().unwrap();
1164        let cmp = r1.compare(&r2);
1165        let output = format!("{cmp}");
1166        assert!(output.contains("Latency Comparison"));
1167        assert!(output.contains("current"));
1168        assert!(output.contains("baseline"));
1169        assert!(output.contains("change"));
1170    }
1171
1172    #[test]
1173    fn benchmark_runner_basic() {
1174        let config = BenchmarkConfig::new("test_bench")
1175            .warmup_iterations(10)
1176            .iterations(100);
1177
1178        let mut counter = 0u64;
1179        let report = BenchmarkRunner::run(&config, || {
1180            counter += 1;
1181        });
1182
1183        // Warmup + measured iterations
1184        assert_eq!(counter, 110);
1185        assert_eq!(report.count, 100);
1186        assert!(report.min <= report.max);
1187    }
1188
1189    #[test]
1190    fn benchmark_runner_with_result() {
1191        let config = BenchmarkConfig::new("result_bench")
1192            .warmup_iterations(5)
1193            .iterations(50);
1194
1195        let report = BenchmarkRunner::run_with_result(&config, || 42);
1196        assert_eq!(report.count, 50);
1197    }
1198
1199    #[test]
1200    fn benchmark_config_defaults() {
1201        let config = BenchmarkConfig::new("default");
1202        assert_eq!(config.name, "default");
1203        assert_eq!(config.warmup_iterations, 100);
1204        assert_eq!(config.iterations, 10_000);
1205    }
1206
1207    #[test]
1208    fn benchmark_config_builder() {
1209        let config = BenchmarkConfig::new("custom")
1210            .warmup_iterations(50)
1211            .iterations(500);
1212        assert_eq!(config.name, "custom");
1213        assert_eq!(config.warmup_iterations, 50);
1214        assert_eq!(config.iterations, 500);
1215    }
1216
1217    #[test]
1218    fn format_duration_nanos() {
1219        assert_eq!(format_duration(Duration::from_nanos(42)), "42ns");
1220        assert_eq!(format_duration(Duration::from_nanos(999)), "999ns");
1221    }
1222
1223    #[test]
1224    fn format_duration_micros() {
1225        assert_eq!(format_duration(Duration::from_micros(1)), "1.0us");
1226        assert_eq!(format_duration(Duration::from_micros(500)), "500.0us");
1227    }
1228
1229    #[test]
1230    fn format_duration_millis() {
1231        assert_eq!(format_duration(Duration::from_millis(1)), "1.00ms");
1232        assert_eq!(format_duration(Duration::from_millis(42)), "42.00ms");
1233    }
1234
1235    #[test]
1236    fn format_duration_seconds() {
1237        assert_eq!(format_duration(Duration::from_secs(1)), "1.000s");
1238        assert_eq!(format_duration(Duration::from_millis(1500)), "1.500s");
1239    }
1240
1241    #[test]
1242    fn benchmark_suite_runs_all() {
1243        let suite: Vec<(BenchmarkConfig, Box<dyn FnMut()>)> = vec![
1244            (
1245                BenchmarkConfig::new("a")
1246                    .warmup_iterations(1)
1247                    .iterations(10),
1248                Box::new(|| {}),
1249            ),
1250            (
1251                BenchmarkConfig::new("b")
1252                    .warmup_iterations(1)
1253                    .iterations(10),
1254                Box::new(|| {}),
1255            ),
1256        ];
1257        let results = BenchmarkRunner::run_suite(suite);
1258        assert_eq!(results.len(), 2);
1259        assert_eq!(results[0].0, "a");
1260        assert_eq!(results[1].0, "b");
1261    }
1262
1263    #[test]
1264    fn histogram_with_capacity() {
1265        let mut h = LatencyHistogram::with_capacity(1000);
1266        assert!(h.is_empty());
1267        h.record(Duration::from_micros(1));
1268        assert_eq!(h.count(), 1);
1269    }
1270
1271    #[test]
1272    fn percentile_clamping() {
1273        let mut h = LatencyHistogram::new();
1274        h.record(Duration::from_micros(10));
1275        h.record(Duration::from_micros(20));
1276
1277        // Negative percentile clamps to 0
1278        let p_neg = h.percentile(-10.0);
1279        assert!(p_neg.is_some());
1280
1281        // Percentile > 100 clamps to 100
1282        let p_over = h.percentile(200.0);
1283        assert_eq!(p_over, Some(Duration::from_micros(20)));
1284    }
1285
1286    // ================================================================
1287    // Memory tracking tests
1288    // ================================================================
1289
1290    #[test]
1291    fn memory_snapshot_current() {
1292        let snap = MemorySnapshot::current();
1293        // On Linux, RSS should be non-zero for a running process
1294        #[cfg(target_os = "linux")]
1295        assert!(snap.rss_bytes > 0, "RSS should be positive on Linux");
1296        #[cfg(target_os = "linux")]
1297        assert!(snap.vms_bytes > 0, "VMS should be positive on Linux");
1298        // On other platforms, gracefully returns zero
1299        let _ = snap;
1300    }
1301
1302    #[test]
1303    fn memory_snapshot_display() {
1304        let snap = MemorySnapshot {
1305            rss_bytes: 10 * 1024 * 1024,
1306            vms_bytes: 100 * 1024 * 1024,
1307        };
1308        let display = format!("{snap}");
1309        assert!(display.contains("RSS:"));
1310        assert!(display.contains("VMS:"));
1311        assert!(display.contains("10.0MB"));
1312        assert!(display.contains("100.0MB"));
1313    }
1314
1315    #[test]
1316    fn memory_snapshot_display_methods() {
1317        let snap = MemorySnapshot {
1318            rss_bytes: 2048,
1319            vms_bytes: 4096,
1320        };
1321        assert_eq!(snap.rss_display(), "2.0KB");
1322        assert_eq!(snap.vms_display(), "4.0KB");
1323    }
1324
1325    #[test]
1326    fn memory_tracker_new_has_baseline() {
1327        let tracker = MemoryTracker::new();
1328        assert_eq!(tracker.sample_count(), 0);
1329        #[cfg(target_os = "linux")]
1330        assert!(tracker.baseline().rss_bytes > 0);
1331    }
1332
1333    #[test]
1334    fn memory_tracker_sample() {
1335        let mut tracker = MemoryTracker::new();
1336        tracker.sample();
1337        assert_eq!(tracker.sample_count(), 1);
1338        tracker.sample();
1339        assert_eq!(tracker.sample_count(), 2);
1340    }
1341
1342    #[test]
1343    fn memory_tracker_measure() {
1344        let mut tracker = MemoryTracker::new();
1345        let result = tracker.measure(|| 42);
1346        assert_eq!(result, 42);
1347        assert_eq!(tracker.sample_count(), 1);
1348    }
1349
1350    #[test]
1351    fn memory_tracker_measure_repeated() {
1352        let mut tracker = MemoryTracker::new();
1353        let mut counter = 0;
1354        tracker.measure_repeated(5, || {
1355            counter += 1;
1356        });
1357        assert_eq!(counter, 5);
1358        assert_eq!(tracker.sample_count(), 5);
1359    }
1360
1361    #[test]
1362    fn memory_tracker_peak_rss() {
1363        let tracker = MemoryTracker::new();
1364        assert!(tracker.peak_rss() >= tracker.baseline().rss_bytes);
1365    }
1366
1367    #[test]
1368    fn memory_tracker_reset() {
1369        let mut tracker = MemoryTracker::new();
1370        tracker.sample();
1371        tracker.sample();
1372        assert_eq!(tracker.sample_count(), 2);
1373        tracker.reset();
1374        assert_eq!(tracker.sample_count(), 0);
1375    }
1376
1377    #[test]
1378    fn memory_tracker_default() {
1379        let tracker = MemoryTracker::default();
1380        assert_eq!(tracker.sample_count(), 0);
1381    }
1382
1383    #[test]
1384    fn memory_report_generation() {
1385        let mut tracker = MemoryTracker::new();
1386        tracker.sample();
1387        tracker.sample();
1388        let report = tracker.report();
1389        assert_eq!(report.sample_count, 2);
1390        assert!(report.peak_rss >= report.baseline_rss);
1391    }
1392
1393    #[test]
1394    fn memory_report_display() {
1395        let report = MemoryReport {
1396            baseline_rss: 10 * 1024 * 1024,
1397            current_rss: 12 * 1024 * 1024,
1398            peak_rss: 15 * 1024 * 1024,
1399            delta_rss: 2 * 1024 * 1024,
1400            per_operation_bytes: Some(1024),
1401            sample_count: 100,
1402            leak_suspect: false,
1403        };
1404        let output = format!("{report}");
1405        assert!(output.contains("Memory Report"));
1406        assert!(output.contains("baseline:"));
1407        assert!(output.contains("current:"));
1408        assert!(output.contains("peak:"));
1409        assert!(output.contains("delta:"));
1410        assert!(output.contains("per-op:"));
1411        assert!(!output.contains("leak"));
1412    }
1413
1414    #[test]
1415    fn memory_report_display_with_leak() {
1416        let report = MemoryReport {
1417            baseline_rss: 10 * 1024 * 1024,
1418            current_rss: 20 * 1024 * 1024,
1419            peak_rss: 20 * 1024 * 1024,
1420            delta_rss: 10 * 1024 * 1024,
1421            per_operation_bytes: None,
1422            sample_count: 1,
1423            leak_suspect: true,
1424        };
1425        let output = format!("{report}");
1426        assert!(output.contains("leak"));
1427    }
1428
1429    #[test]
1430    fn memory_report_comparison() {
1431        let current = MemoryReport {
1432            baseline_rss: 10_000,
1433            current_rss: 20_000,
1434            peak_rss: 25_000,
1435            delta_rss: 10_000,
1436            per_operation_bytes: Some(100),
1437            sample_count: 100,
1438            leak_suspect: false,
1439        };
1440        let baseline = MemoryReport {
1441            baseline_rss: 10_000,
1442            current_rss: 12_000,
1443            peak_rss: 15_000,
1444            delta_rss: 2_000,
1445            per_operation_bytes: Some(50),
1446            sample_count: 100,
1447            leak_suspect: false,
1448        };
1449        let cmp = current.compare(&baseline);
1450        assert!(cmp.has_regression(1.1)); // 25K/15K > 1.1
1451    }
1452
1453    #[test]
1454    fn memory_comparison_no_regression() {
1455        let report = MemoryReport {
1456            baseline_rss: 10_000,
1457            current_rss: 10_000,
1458            peak_rss: 10_000,
1459            delta_rss: 0,
1460            per_operation_bytes: None,
1461            sample_count: 1,
1462            leak_suspect: false,
1463        };
1464        let cmp = report.compare(&report);
1465        assert!(!cmp.has_regression(1.1));
1466    }
1467
1468    #[test]
1469    fn memory_comparison_display() {
1470        let current = MemoryReport {
1471            baseline_rss: 1024,
1472            current_rss: 2048,
1473            peak_rss: 3072,
1474            delta_rss: 1024,
1475            per_operation_bytes: None,
1476            sample_count: 1,
1477            leak_suspect: false,
1478        };
1479        let baseline = current.clone();
1480        let cmp = current.compare(&baseline);
1481        let output = format!("{cmp}");
1482        assert!(output.contains("Memory Comparison"));
1483        assert!(output.contains("peak RSS"));
1484    }
1485
1486    #[test]
1487    fn memory_comparison_zero_baseline() {
1488        let current = MemoryReport {
1489            baseline_rss: 0,
1490            current_rss: 0,
1491            peak_rss: 1024,
1492            delta_rss: 0,
1493            per_operation_bytes: None,
1494            sample_count: 0,
1495            leak_suspect: false,
1496        };
1497        let baseline = MemoryReport {
1498            baseline_rss: 0,
1499            current_rss: 0,
1500            peak_rss: 0,
1501            delta_rss: 0,
1502            per_operation_bytes: None,
1503            sample_count: 0,
1504            leak_suspect: false,
1505        };
1506        let cmp = current.compare(&baseline);
1507        assert!(!cmp.has_regression(1.1)); // Zero baseline should not flag
1508    }
1509
1510    #[test]
1511    fn format_bytes_size_units() {
1512        assert_eq!(format_bytes_size(0), "0B");
1513        assert_eq!(format_bytes_size(512), "512B");
1514        assert_eq!(format_bytes_size(1024), "1.0KB");
1515        assert_eq!(format_bytes_size(1536), "1.5KB");
1516        assert_eq!(format_bytes_size(1024 * 1024), "1.0MB");
1517        assert_eq!(format_bytes_size(1024 * 1024 * 1024), "1.00GB");
1518    }
1519
1520    #[test]
1521    fn parse_proc_kb_valid() {
1522        assert_eq!(parse_proc_kb("   12345 kB"), Some(12345 * 1024));
1523        assert_eq!(parse_proc_kb("  100 kB"), Some(100 * 1024));
1524    }
1525
1526    #[test]
1527    fn parse_proc_kb_invalid() {
1528        assert_eq!(parse_proc_kb("not a number kB"), None);
1529        assert_eq!(parse_proc_kb("12345 MB"), None);
1530        assert_eq!(parse_proc_kb(""), None);
1531    }
1532
1533    #[test]
1534    fn leak_detection_too_few_samples() {
1535        let tracker = MemoryTracker::new();
1536        // With 0 samples, no leak detected
1537        assert!(!tracker.report().leak_suspect);
1538    }
1539}