Skip to main content

dev_stress/
latency.rs

1//! Per-operation latency tracking for stress runs.
2//!
3//! [`LatencyTracker`] is a thread-local sampler. Each thread allocates
4//! its own tracker; samples are merged at run finish to compute
5//! [`LatencyStats`] (p50, p95, p99). No locking on the hot path.
6
7use std::time::{Duration, Instant};
8
9/// Captures per-operation latency samples for a single thread.
10///
11/// The tracker is intentionally thread-local: each thread keeps a
12/// `Vec<Duration>` and the runner concatenates them at finish. There
13/// is no shared state, so sample collection introduces no
14/// synchronization that would distort the measurement.
15pub struct LatencyTracker {
16    samples: Vec<Duration>,
17    sample_rate: usize,
18}
19
20impl LatencyTracker {
21    /// Create a tracker that samples `1` of every `rate` iterations.
22    ///
23    /// `rate = 1` records every iteration. `rate = 100` records 1%.
24    /// Pass at least `1`; values below are clamped.
25    pub fn new(rate: usize) -> Self {
26        Self {
27            samples: Vec::new(),
28            sample_rate: rate.max(1),
29        }
30    }
31
32    /// Run the closure and, if `iter_index` is on the sampling
33    /// schedule, record its duration.
34    ///
35    /// `iter_index` is the 0-based iteration counter on the calling
36    /// thread. The tracker records the sample when
37    /// `iter_index % sample_rate == 0`.
38    ///
39    /// # Example
40    ///
41    /// ```
42    /// use dev_stress::LatencyTracker;
43    ///
44    /// let mut t = LatencyTracker::new(1);
45    /// t.record(0, || std::hint::black_box(1 + 1));
46    /// t.record(1, || std::hint::black_box(1 + 1));
47    /// assert_eq!(t.samples_count(), 2);
48    /// ```
49    pub fn record<F, R>(&mut self, iter_index: usize, f: F) -> R
50    where
51        F: FnOnce() -> R,
52    {
53        if iter_index % self.sample_rate == 0 {
54            let start = Instant::now();
55            let r = f();
56            self.samples.push(start.elapsed());
57            r
58        } else {
59            f()
60        }
61    }
62
63    /// Number of samples currently held by this tracker.
64    pub fn samples_count(&self) -> usize {
65        self.samples.len()
66    }
67
68    /// Move all samples out of this tracker.
69    pub fn into_samples(self) -> Vec<Duration> {
70        self.samples
71    }
72}
73
74/// Aggregated latency statistics across a stress run.
75///
76/// Computed from the concatenation of every per-thread tracker's samples.
77///
78/// # Example
79///
80/// ```
81/// use dev_stress::LatencyStats;
82/// use std::time::Duration;
83///
84/// let stats = LatencyStats::from_samples(vec![
85///     Duration::from_nanos(10),
86///     Duration::from_nanos(20),
87///     Duration::from_nanos(30),
88///     Duration::from_nanos(40),
89///     Duration::from_nanos(50),
90/// ]);
91/// assert_eq!(stats.samples_count, 5);
92/// assert_eq!(stats.p50, Duration::from_nanos(30));
93/// ```
94#[derive(Debug, Clone, PartialEq, Eq)]
95pub struct LatencyStats {
96    /// 50th percentile sample.
97    pub p50: Duration,
98    /// 95th percentile sample.
99    pub p95: Duration,
100    /// 99th percentile sample.
101    pub p99: Duration,
102    /// Total number of samples used to compute the percentiles.
103    pub samples_count: usize,
104}
105
106impl LatencyStats {
107    /// Compute percentile statistics from a sample set.
108    ///
109    /// Returns zero-valued percentiles when `samples` is empty.
110    pub fn from_samples(mut samples: Vec<Duration>) -> Self {
111        let n = samples.len();
112        if n == 0 {
113            return Self {
114                p50: Duration::ZERO,
115                p95: Duration::ZERO,
116                p99: Duration::ZERO,
117                samples_count: 0,
118            };
119        }
120        samples.sort();
121        let p50 = samples[n / 2];
122        let p95 = samples[((n as f64 * 0.95).floor() as usize).min(n - 1)];
123        let p99 = samples[((n as f64 * 0.99).floor() as usize).min(n - 1)];
124        Self {
125            p50,
126            p95,
127            p99,
128            samples_count: n,
129        }
130    }
131}
132
133#[cfg(test)]
134mod tests {
135    use super::*;
136
137    #[test]
138    fn rate_one_records_every_iter() {
139        let mut t = LatencyTracker::new(1);
140        for i in 0..10 {
141            t.record(i, || std::hint::black_box(i));
142        }
143        assert_eq!(t.samples_count(), 10);
144    }
145
146    #[test]
147    fn rate_n_records_one_in_n() {
148        let mut t = LatencyTracker::new(5);
149        for i in 0..50 {
150            t.record(i, || std::hint::black_box(i));
151        }
152        // Iter indices 0, 5, 10, 15, 20, 25, 30, 35, 40, 45 -> 10 samples.
153        assert_eq!(t.samples_count(), 10);
154    }
155
156    #[test]
157    fn empty_samples_yield_zero_stats() {
158        let s = LatencyStats::from_samples(vec![]);
159        assert_eq!(s.p50, Duration::ZERO);
160        assert_eq!(s.p95, Duration::ZERO);
161        assert_eq!(s.p99, Duration::ZERO);
162        assert_eq!(s.samples_count, 0);
163    }
164
165    #[test]
166    fn percentiles_are_ordered() {
167        let samples: Vec<Duration> = (1..=100).map(|i| Duration::from_nanos(i as u64)).collect();
168        let s = LatencyStats::from_samples(samples);
169        assert!(s.p50 <= s.p95);
170        assert!(s.p95 <= s.p99);
171    }
172
173    #[test]
174    fn into_samples_moves_data() {
175        let mut t = LatencyTracker::new(1);
176        for i in 0..5 {
177            t.record(i, || ());
178        }
179        let s = t.into_samples();
180        assert_eq!(s.len(), 5);
181    }
182
183    #[test]
184    fn rate_zero_clamps_to_one() {
185        let mut t = LatencyTracker::new(0);
186        for i in 0..5 {
187            t.record(i, || ());
188        }
189        assert_eq!(t.samples_count(), 5);
190    }
191}