#![allow(clippy::cast_precision_loss)]
#![allow(clippy::cast_possible_truncation)]
use std::collections::VecDeque;
#[derive(Debug, Clone, Default)]
pub struct RunningStats {
count: u64,
mean: f64,
m2: f64,
min: f64,
max: f64,
}
impl RunningStats {
#[must_use]
pub fn new() -> Self {
Self {
count: 0,
mean: 0.0,
m2: 0.0,
min: f64::INFINITY,
max: f64::NEG_INFINITY,
}
}
pub fn push(&mut self, value: f64) {
if value.is_nan() {
return;
}
self.count += 1;
let delta = value - self.mean;
self.mean += delta / self.count as f64;
let delta2 = value - self.mean;
self.m2 += delta * delta2;
if value < self.min {
self.min = value;
}
if value > self.max {
self.max = value;
}
}
#[must_use]
pub fn count(&self) -> u64 {
self.count
}
#[must_use]
pub fn is_empty(&self) -> bool {
self.count == 0
}
#[must_use]
pub fn mean(&self) -> f64 {
if self.count == 0 {
0.0
} else {
self.mean
}
}
#[must_use]
pub fn variance(&self) -> f64 {
if self.count < 2 {
0.0
} else {
self.m2 / (self.count - 1) as f64
}
}
#[must_use]
pub fn population_variance(&self) -> f64 {
if self.count == 0 {
0.0
} else {
self.m2 / self.count as f64
}
}
#[must_use]
pub fn stddev(&self) -> f64 {
self.variance().sqrt()
}
#[must_use]
pub fn population_stddev(&self) -> f64 {
self.population_variance().sqrt()
}
#[must_use]
pub fn min(&self) -> f64 {
self.min
}
#[must_use]
pub fn max(&self) -> f64 {
self.max
}
#[must_use]
pub fn range(&self) -> f64 {
if self.count < 2 {
0.0
} else {
self.max - self.min
}
}
#[must_use]
pub fn cv(&self) -> f64 {
let m = self.mean();
if m == 0.0 {
f64::NAN
} else {
self.stddev() / m.abs()
}
}
pub fn merge(&mut self, other: &Self) {
if other.count == 0 {
return;
}
if self.count == 0 {
*self = other.clone();
return;
}
let combined = self.count + other.count;
let delta = other.mean - self.mean;
let new_mean =
(self.mean * self.count as f64 + other.mean * other.count as f64) / combined as f64;
let new_m2 = self.m2
+ other.m2
+ delta * delta * (self.count as f64 * other.count as f64) / combined as f64;
self.count = combined;
self.mean = new_mean;
self.m2 = new_m2;
if other.min < self.min {
self.min = other.min;
}
if other.max > self.max {
self.max = other.max;
}
}
pub fn reset(&mut self) {
*self = Self::new();
}
}
#[derive(Debug, Clone)]
pub struct Ewma {
alpha: f64,
value: Option<f64>,
count: u64,
variance: f64,
}
impl Ewma {
#[must_use]
pub fn new(alpha: f64) -> Self {
debug_assert!(alpha > 0.0 && alpha <= 1.0, "alpha must be in (0, 1]");
let alpha = alpha.clamp(1e-9, 1.0);
Self {
alpha,
value: None,
count: 0,
variance: 0.0,
}
}
pub fn update(&mut self, sample: f64) {
if sample.is_nan() {
return;
}
self.count += 1;
match self.value {
None => {
self.value = Some(sample);
self.variance = 0.0;
}
Some(prev) => {
let new_val = self.alpha * sample + (1.0 - self.alpha) * prev;
let diff = sample - prev;
self.variance = (1.0 - self.alpha) * (self.variance + self.alpha * diff * diff);
self.value = Some(new_val);
}
}
}
#[must_use]
pub fn value_opt(&self) -> Option<f64> {
self.value
}
#[must_use]
pub fn value(&self) -> f64 {
self.value.unwrap_or(0.0)
}
#[must_use]
pub fn variance(&self) -> f64 {
self.variance
}
#[must_use]
pub fn stddev(&self) -> f64 {
self.variance.sqrt()
}
#[must_use]
pub fn count(&self) -> u64 {
self.count
}
#[must_use]
pub fn alpha(&self) -> f64 {
self.alpha
}
pub fn reset(&mut self) {
self.value = None;
self.count = 0;
self.variance = 0.0;
}
}
#[derive(Debug, Clone)]
pub struct RollingWindow {
capacity: usize,
buffer: VecDeque<f64>,
sum: f64,
sum_sq: f64,
}
impl RollingWindow {
#[must_use]
pub fn new(capacity: usize) -> Self {
assert!(capacity >= 1, "RollingWindow capacity must be ≥ 1");
Self {
capacity,
buffer: VecDeque::with_capacity(capacity),
sum: 0.0,
sum_sq: 0.0,
}
}
pub fn push(&mut self, value: f64) {
if self.buffer.len() == self.capacity {
if let Some(old) = self.buffer.pop_front() {
self.sum -= old;
self.sum_sq -= old * old;
}
}
self.buffer.push_back(value);
self.sum += value;
self.sum_sq += value * value;
}
#[must_use]
pub fn len(&self) -> usize {
self.buffer.len()
}
#[must_use]
pub fn is_empty(&self) -> bool {
self.buffer.is_empty()
}
#[must_use]
pub fn is_full(&self) -> bool {
self.buffer.len() == self.capacity
}
#[must_use]
pub fn capacity(&self) -> usize {
self.capacity
}
#[must_use]
pub fn mean(&self) -> f64 {
if self.buffer.is_empty() {
0.0
} else {
self.sum / self.buffer.len() as f64
}
}
#[must_use]
pub fn variance(&self) -> f64 {
let n = self.buffer.len() as f64;
if n < 1.0 {
return 0.0;
}
let mean = self.sum / n;
let mean_sq = self.sum_sq / n;
(mean_sq - mean * mean).max(0.0)
}
#[must_use]
pub fn stddev(&self) -> f64 {
self.variance().sqrt()
}
#[must_use]
pub fn min(&self) -> f64 {
self.buffer
.iter()
.copied()
.fold(f64::INFINITY, f64::min)
}
#[must_use]
pub fn max(&self) -> f64 {
self.buffer
.iter()
.copied()
.fold(f64::NEG_INFINITY, f64::max)
}
#[must_use]
pub fn samples(&self) -> Vec<f64> {
self.buffer.iter().copied().collect()
}
}
#[derive(Debug, Clone)]
pub struct PercentileEstimator {
p: f64,
q: [f64; 5],
dn: [f64; 5],
n: [i64; 5],
count: u64,
bootstrap: Vec<f64>,
}
impl PercentileEstimator {
#[must_use]
pub fn new(p: f64) -> Self {
debug_assert!(p > 0.0 && p < 1.0, "p must be in (0, 1)");
let p = p.clamp(1e-9, 1.0 - 1e-9);
Self {
p,
q: [0.0; 5],
dn: [0.0, p / 2.0, p, (1.0 + p) / 2.0, 1.0],
n: [1, 2, 3, 4, 5],
count: 0,
bootstrap: Vec::with_capacity(5),
}
}
pub fn update(&mut self, x: f64) {
if x.is_nan() {
return;
}
self.count += 1;
if self.bootstrap.len() < 5 {
self.bootstrap.push(x);
if self.bootstrap.len() == 5 {
self.bootstrap.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
for i in 0..5 {
self.q[i] = self.bootstrap[i];
}
self.n = [1, 2, 3, 4, 5];
}
return;
}
let k = if x < self.q[0] {
self.q[0] = x;
0
} else if x < self.q[1] {
0
} else if x < self.q[2] {
1
} else if x < self.q[3] {
2
} else if x < self.q[4] {
3
} else {
self.q[4] = x;
3
};
for i in (k + 1)..5 {
self.n[i] += 1;
}
let obs_count = (self.count - 5) as f64 + 1.0; self.dn = [
0.0,
self.p / 2.0 * obs_count,
self.p * obs_count,
(1.0 + self.p) / 2.0 * obs_count,
obs_count,
];
for i in 1..=3 {
let d = self.dn[i] - self.n[i] as f64;
if (d >= 1.0 && (self.n[i + 1] - self.n[i]) > 1)
|| (d <= -1.0 && (self.n[i - 1] - self.n[i]) < -1)
{
let sign = if d > 0.0 { 1 } else { -1 };
let q_new = self.parabolic(i, sign as f64);
if q_new > self.q[i - 1] && q_new < self.q[i + 1] {
self.q[i] = q_new;
} else {
self.q[i] = self.linear(i, sign as f64);
}
self.n[i] += sign;
}
}
}
fn parabolic(&self, i: usize, d: f64) -> f64 {
let qi = self.q[i];
let qm = self.q[i - 1];
let qp = self.q[i + 1];
let ni = self.n[i] as f64;
let nm = self.n[i - 1] as f64;
let np = self.n[i + 1] as f64;
qi + d / (np - nm)
* ((ni - nm + d) * (qp - qi) / (np - ni)
+ (np - ni - d) * (qi - qm) / (ni - nm))
}
fn linear(&self, i: usize, d: f64) -> f64 {
let qi = self.q[i];
let idx = if d > 0.0 { i + 1 } else { i - 1 };
let qother = self.q[idx];
let ni = self.n[i] as f64;
let nother = self.n[idx] as f64;
qi + d * (qother - qi) / (nother - ni)
}
#[must_use]
pub fn estimate(&self) -> Option<f64> {
if self.count < 5 {
None
} else {
Some(self.q[2])
}
}
#[must_use]
pub fn count(&self) -> u64 {
self.count
}
#[must_use]
pub fn quantile(&self) -> f64 {
self.p
}
}
#[derive(Debug)]
pub struct BitrateRunningAnalyzer {
fps: f64,
global: RunningStats,
trend: Ewma,
window: RollingWindow,
p95: PercentileEstimator,
total_bits: u64,
frame_count: u64,
}
impl BitrateRunningAnalyzer {
#[must_use]
pub fn new(fps: f64, window_frames: usize) -> Self {
let window_frames = window_frames.max(1);
Self {
fps,
global: RunningStats::new(),
trend: Ewma::new(0.1),
window: RollingWindow::new(window_frames),
p95: PercentileEstimator::new(0.95),
total_bits: 0,
frame_count: 0,
}
}
pub fn push_frame(&mut self, bits_per_frame: u64) {
let bits_f = bits_per_frame as f64;
self.global.push(bits_f);
self.trend.update(bits_f);
self.window.push(bits_f);
self.p95.update(bits_f);
self.total_bits += bits_per_frame;
self.frame_count += 1;
}
#[must_use]
pub fn summary(&self) -> BitrateSummary {
let scale = self.fps; BitrateSummary {
frame_count: self.frame_count,
total_bits: self.total_bits,
mean_bps: self.global.mean() * scale,
stddev_bps: self.global.stddev() * scale,
peak_bps: self.global.max() * scale,
min_bps: if self.global.is_empty() {
0.0
} else {
self.global.min() * scale
},
trend_bps: self.trend.value() * scale,
window_mean_bps: self.window.mean() * scale,
window_stddev_bps: self.window.stddev() * scale,
p95_bps: self.p95.estimate().map(|v| v * scale),
cv: self.global.cv(),
}
}
pub fn reset(&mut self) {
self.global.reset();
self.trend.reset();
self.window = RollingWindow::new(self.window.capacity());
self.p95 = PercentileEstimator::new(0.95);
self.total_bits = 0;
self.frame_count = 0;
}
}
#[derive(Debug, Clone)]
pub struct BitrateSummary {
pub frame_count: u64,
pub total_bits: u64,
pub mean_bps: f64,
pub stddev_bps: f64,
pub peak_bps: f64,
pub min_bps: f64,
pub trend_bps: f64,
pub window_mean_bps: f64,
pub window_stddev_bps: f64,
pub p95_bps: Option<f64>,
pub cv: f64,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_running_stats_empty() {
let stats = RunningStats::new();
assert!(stats.is_empty());
assert_eq!(stats.count(), 0);
assert_eq!(stats.mean(), 0.0);
assert_eq!(stats.variance(), 0.0);
assert_eq!(stats.stddev(), 0.0);
}
#[test]
fn test_running_stats_single_sample() {
let mut stats = RunningStats::new();
stats.push(42.0);
assert_eq!(stats.count(), 1);
assert!((stats.mean() - 42.0).abs() < 1e-10);
assert_eq!(stats.variance(), 0.0);
assert_eq!(stats.min(), 42.0);
assert_eq!(stats.max(), 42.0);
}
#[test]
fn test_running_stats_known_values() {
let mut stats = RunningStats::new();
for v in [2.0_f64, 4.0, 4.0, 4.0, 5.0, 5.0, 7.0, 9.0] {
stats.push(v);
}
assert!((stats.mean() - 5.0).abs() < 1e-10, "mean {}", stats.mean());
assert!((stats.population_stddev() - 2.0).abs() < 1e-10);
assert_eq!(stats.min(), 2.0);
assert_eq!(stats.max(), 9.0);
assert_eq!(stats.range(), 7.0);
}
#[test]
fn test_running_stats_nan_ignored() {
let mut stats = RunningStats::new();
stats.push(10.0);
stats.push(f64::NAN);
stats.push(20.0);
assert_eq!(stats.count(), 2);
assert!((stats.mean() - 15.0).abs() < 1e-10);
}
#[test]
fn test_running_stats_merge() {
let mut a = RunningStats::new();
let mut b = RunningStats::new();
for v in [1.0_f64, 2.0, 3.0] {
a.push(v);
}
for v in [4.0_f64, 5.0, 6.0] {
b.push(v);
}
a.merge(&b);
assert_eq!(a.count(), 6);
assert!((a.mean() - 3.5).abs() < 1e-10, "merged mean {}", a.mean());
assert_eq!(a.min(), 1.0);
assert_eq!(a.max(), 6.0);
}
#[test]
fn test_running_stats_merge_empty_rhs() {
let mut a = RunningStats::new();
a.push(5.0);
let empty = RunningStats::new();
a.merge(&empty);
assert_eq!(a.count(), 1);
assert!((a.mean() - 5.0).abs() < 1e-10);
}
#[test]
fn test_running_stats_reset() {
let mut stats = RunningStats::new();
stats.push(100.0);
stats.reset();
assert!(stats.is_empty());
assert_eq!(stats.mean(), 0.0);
}
#[test]
fn test_running_stats_cv() {
let mut stats = RunningStats::new();
for _ in 0..5 {
stats.push(10.0);
}
let cv = stats.cv();
assert!(cv.is_finite() && cv < 1e-10, "cv {cv}");
}
#[test]
fn test_ewma_initial_value() {
let mut ewma = Ewma::new(0.5);
assert!(ewma.value_opt().is_none());
ewma.update(100.0);
assert!((ewma.value() - 100.0).abs() < 1e-10);
}
#[test]
fn test_ewma_convergence() {
let mut ewma = Ewma::new(0.3);
for _ in 0..200 {
ewma.update(50.0);
}
assert!((ewma.value() - 50.0).abs() < 0.1, "value {}", ewma.value());
}
#[test]
fn test_ewma_nan_ignored() {
let mut ewma = Ewma::new(0.5);
ewma.update(10.0);
let before = ewma.value();
ewma.update(f64::NAN);
assert!((ewma.value() - before).abs() < 1e-12);
assert_eq!(ewma.count(), 1);
}
#[test]
fn test_ewma_reset() {
let mut ewma = Ewma::new(0.2);
ewma.update(42.0);
ewma.reset();
assert!(ewma.value_opt().is_none());
assert_eq!(ewma.count(), 0);
}
#[test]
fn test_rolling_window_basic() {
let mut w = RollingWindow::new(3);
assert!(w.is_empty());
w.push(1.0);
w.push(2.0);
w.push(3.0);
assert!(w.is_full());
assert!((w.mean() - 2.0).abs() < 1e-10);
}
#[test]
fn test_rolling_window_eviction() {
let mut w = RollingWindow::new(3);
w.push(10.0);
w.push(20.0);
w.push(30.0);
w.push(40.0);
assert_eq!(w.len(), 3);
assert!((w.mean() - 30.0).abs() < 1e-10, "mean {}", w.mean());
}
#[test]
fn test_rolling_window_variance() {
let mut w = RollingWindow::new(4);
for v in [2.0_f64, 4.0, 4.0, 4.0] {
w.push(v);
}
assert!((w.variance() - 0.75).abs() < 1e-10, "var {}", w.variance());
}
#[test]
fn test_rolling_window_min_max() {
let mut w = RollingWindow::new(5);
for v in [5.0_f64, 3.0, 8.0, 1.0, 6.0] {
w.push(v);
}
assert_eq!(w.min(), 1.0);
assert_eq!(w.max(), 8.0);
}
#[test]
fn test_percentile_estimator_bootstrap() {
let mut est = PercentileEstimator::new(0.5);
for v in 1..=4 {
est.update(v as f64);
assert!(est.estimate().is_none());
}
est.update(5.0);
assert!(est.estimate().is_some());
}
#[test]
fn test_percentile_estimator_median_uniform() {
let mut est = PercentileEstimator::new(0.5);
for v in 1..=1000 {
est.update(v as f64);
}
let estimated = est.estimate().expect("should have estimate");
assert!(
(estimated - 500.5).abs() < 30.0,
"median estimate {estimated}"
);
}
#[test]
fn test_percentile_estimator_p95() {
let mut est = PercentileEstimator::new(0.95);
for v in 0..=99 {
est.update(v as f64);
}
let estimated = est.estimate().expect("should have estimate");
assert!(
(estimated - 94.05).abs() < 15.0,
"p95 estimate {estimated}"
);
}
#[test]
fn test_bitrate_analyzer_basic() {
let mut analyzer = BitrateRunningAnalyzer::new(30.0, 30);
for bits in [40_000u64, 50_000, 60_000, 45_000, 55_000] {
analyzer.push_frame(bits);
}
let s = analyzer.summary();
assert_eq!(s.frame_count, 5);
assert!(s.mean_bps > 0.0);
assert!(s.peak_bps >= s.mean_bps);
assert!(s.min_bps <= s.mean_bps);
assert_eq!(s.total_bits, 250_000);
}
#[test]
fn test_bitrate_analyzer_reset() {
let mut analyzer = BitrateRunningAnalyzer::new(25.0, 10);
analyzer.push_frame(100_000);
analyzer.reset();
let s = analyzer.summary();
assert_eq!(s.frame_count, 0);
assert_eq!(s.total_bits, 0);
}
#[test]
fn test_bitrate_analyzer_trend_smoother_than_peak() {
let mut analyzer = BitrateRunningAnalyzer::new(30.0, 10);
for i in 0..100 {
analyzer.push_frame(if i % 2 == 0 { 10_000 } else { 200_000 });
}
let s = analyzer.summary();
assert!(s.peak_bps > s.trend_bps);
}
}