use std::collections::HashMap;
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::time::{Duration, Instant};
static TRACE_ENABLED: AtomicBool = AtomicBool::new(false);
static START_TIME_US: AtomicU64 = AtomicU64::new(0);
pub fn enable_tracing() {
START_TIME_US.store(
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_micros() as u64,
Ordering::Relaxed,
);
TRACE_ENABLED.store(true, Ordering::Release);
}
pub fn disable_tracing() {
TRACE_ENABLED.store(false, Ordering::Release);
}
#[inline]
pub fn is_tracing_enabled() -> bool {
TRACE_ENABLED.load(Ordering::Acquire)
}
pub struct TimingGuard {
name: &'static str,
start: Option<Instant>,
budget_us: u64,
}
impl TimingGuard {
#[inline]
pub fn new(name: &'static str, budget_us: u64) -> Self {
if is_tracing_enabled() {
Self {
name,
start: Some(Instant::now()),
budget_us,
}
} else {
Self {
name,
start: None,
budget_us,
}
}
}
#[inline]
pub fn with_default_budget(name: &'static str) -> Self {
Self::new(name, 1000)
}
#[inline]
pub fn render(name: &'static str) -> Self {
Self::new(name, 16_000)
}
#[inline]
pub fn collect(name: &'static str) -> Self {
Self::new(name, 100_000)
}
}
impl Drop for TimingGuard {
fn drop(&mut self) {
if let Some(start) = self.start {
let elapsed = start.elapsed();
let elapsed_us = elapsed.as_micros() as u64;
let exceeded = elapsed_us > self.budget_us;
let relative_ms = (std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_micros() as u64)
.saturating_sub(START_TIME_US.load(Ordering::Relaxed))
/ 1000;
let status = if exceeded { "⚠️" } else { "✓" };
eprintln!(
"[+{:04}ms] [TRACE] {status} {} <- {:.2}ms (budget: {}μs)",
relative_ms,
self.name,
elapsed_us as f64 / 1000.0,
self.budget_us
);
}
}
}
#[repr(C, align(64))] #[derive(Debug, Clone, Default)]
pub struct SimdStats {
pub count: u64,
pub sum: f64,
pub sum_sq: f64,
pub min: f64,
pub max: f64,
}
impl SimdStats {
pub fn new() -> Self {
Self {
count: 0,
sum: 0.0,
sum_sq: 0.0,
min: f64::MAX,
max: f64::MIN,
}
}
#[inline]
pub fn update(&mut self, value: f64) {
self.count += 1;
self.sum += value;
self.sum_sq += value * value;
self.min = self.min.min(value);
self.max = self.max.max(value);
}
#[inline]
pub fn mean(&self) -> f64 {
if self.count == 0 {
0.0
} else {
self.sum / self.count as f64
}
}
#[inline]
pub fn variance(&self) -> f64 {
if self.count < 2 {
return 0.0;
}
let n = self.count as f64;
(self.sum_sq - (self.sum * self.sum) / n) / (n - 1.0)
}
#[inline]
pub fn std_dev(&self) -> f64 {
self.variance().sqrt()
}
#[inline]
pub fn cv_percent(&self) -> f64 {
let mean = self.mean();
if mean.abs() < 1e-9 {
0.0
} else {
(self.std_dev() / mean) * 100.0
}
}
pub fn reset(&mut self) {
*self = Self::new();
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum BrickType {
Collect,
Render,
Compute,
Network,
Storage,
}
impl BrickType {
#[must_use]
pub fn default_budget_us(&self) -> u64 {
match self {
Self::Collect => 100_000, Self::Render => 16_000, Self::Compute => 1_000, Self::Network => 500_000, Self::Storage => 50_000, }
}
#[must_use]
pub fn cv_threshold(&self) -> f64 {
match self {
Self::Render => 10.0, Self::Compute => 15.0, Self::Collect => 25.0, Self::Network => 50.0, Self::Storage => 30.0, }
}
}
#[derive(Debug)]
pub struct BrickProfiler {
stats: std::collections::HashMap<String, (BrickType, SimdStats)>,
enabled: bool,
}
impl Default for BrickProfiler {
fn default() -> Self {
Self::new()
}
}
impl BrickProfiler {
#[must_use]
pub fn new() -> Self {
Self {
stats: std::collections::HashMap::new(),
enabled: is_tracing_enabled(),
}
}
pub fn profile<F, R>(&mut self, name: &str, brick_type: BrickType, f: F) -> R
where
F: FnOnce() -> R,
{
if !self.enabled {
return f();
}
let start = std::time::Instant::now();
let result = f();
let elapsed_us = start.elapsed().as_micros() as f64;
let (_, stats) = self
.stats
.entry(name.to_string())
.or_insert_with(|| (brick_type, SimdStats::new()));
stats.update(elapsed_us);
result
}
#[must_use]
pub fn should_escalate(&self, name: &str) -> bool {
if let Some((brick_type, stats)) = self.stats.get(name) {
let cv = stats.cv_percent();
cv > brick_type.cv_threshold()
} else {
false
}
}
#[must_use]
pub fn get_stats(&self, name: &str) -> Option<&SimdStats> {
self.stats.get(name).map(|(_, s)| s)
}
#[must_use]
pub fn summary(&self) -> String {
let mut lines = vec!["=== Brick Profiler Summary ===".to_string()];
let mut sorted: Vec<_> = self.stats.iter().collect();
sorted.sort_by(|a, b| {
let a_total = a.1 .1.sum;
let b_total = b.1 .1.sum;
b_total
.partial_cmp(&a_total)
.unwrap_or(std::cmp::Ordering::Equal)
});
for (name, (brick_type, stats)) in sorted {
let budget = brick_type.default_budget_us();
let avg = stats.mean();
let cv = stats.cv_percent();
let status = if avg > budget as f64 { "⚠️" } else { "✓" };
let escalate = if self.should_escalate(name) {
" [ESCALATE]"
} else {
""
};
lines.push(format!(
"{status} {name} ({brick_type:?}): avg={avg:.0}μs cv={cv:.1}% n={}{escalate}",
stats.count
));
}
lines.join("\n")
}
}
#[derive(Debug, Clone)]
pub struct TraceEvent {
pub name: String,
pub duration: Duration,
pub timestamp_us: u64,
pub budget_exceeded: bool,
pub budget_us: Option<u64>,
}
#[derive(Debug, Clone, Default)]
pub struct TraceStats {
pub count: u64,
pub total_duration: Duration,
pub min_duration: Duration,
pub max_duration: Duration,
pub budget_violations: u64,
pub budget_us: u64,
}
impl TraceStats {
fn new(duration: Duration, budget_us: u64, exceeded: bool) -> Self {
Self {
count: 1,
total_duration: duration,
min_duration: duration,
max_duration: duration,
budget_violations: u64::from(exceeded),
budget_us,
}
}
fn update(&mut self, duration: Duration, exceeded: bool) {
self.count += 1;
self.total_duration += duration;
self.min_duration = self.min_duration.min(duration);
self.max_duration = self.max_duration.max(duration);
if exceeded {
self.budget_violations += 1;
}
}
pub fn avg_duration(&self) -> Duration {
if self.count == 0 {
Duration::ZERO
} else {
self.total_duration / self.count as u32
}
}
pub fn cv_percent(&self) -> f64 {
if self.count < 2 {
return 0.0;
}
let avg = self.avg_duration().as_secs_f64();
if avg < 1e-9 {
return 0.0;
}
let range = self
.max_duration
.checked_sub(self.min_duration)
.unwrap_or_default()
.as_secs_f64();
(range / avg) * 50.0 }
pub fn efficiency_percent(&self) -> f64 {
if self.budget_us == 0 {
return 100.0;
}
let avg_us = self.avg_duration().as_micros() as f64;
((self.budget_us as f64) / avg_us * 100.0).min(100.0)
}
}
#[derive(Debug, Clone, Copy)]
pub struct EscalationThresholds {
pub cv_percent: f64,
pub efficiency_percent: f64,
pub max_traces_per_sec: u32,
}
impl Default for EscalationThresholds {
fn default() -> Self {
Self {
cv_percent: 15.0,
efficiency_percent: 25.0,
max_traces_per_sec: 100,
}
}
}
#[derive(Debug)]
pub struct PerfTracer {
stats: HashMap<String, TraceStats>,
recent_events: Vec<TraceEvent>,
max_recent: usize,
start_time: Instant,
thresholds: EscalationThresholds,
traces_this_second: u32,
last_second: u64,
}
impl Default for PerfTracer {
fn default() -> Self {
Self::new()
}
}
impl PerfTracer {
#[must_use]
pub fn new() -> Self {
Self {
stats: HashMap::new(),
recent_events: Vec::with_capacity(100),
max_recent: 100,
start_time: Instant::now(),
thresholds: EscalationThresholds::default(),
traces_this_second: 0,
last_second: 0,
}
}
#[must_use]
pub fn with_thresholds(thresholds: EscalationThresholds) -> Self {
Self {
thresholds,
..Self::new()
}
}
pub fn trace_with_budget<F, R>(&mut self, name: &str, budget_us: u64, f: F) -> R
where
F: FnOnce() -> R,
{
let start = Instant::now();
let result = f();
let duration = start.elapsed();
self.record_trace(name, duration, budget_us);
result
}
pub fn trace<F, R>(&mut self, name: &str, f: F) -> R
where
F: FnOnce() -> R,
{
self.trace_with_budget(name, 1000, f) }
fn record_trace(&mut self, name: &str, duration: Duration, budget_us: u64) {
let timestamp_us = self.start_time.elapsed().as_micros() as u64;
let budget_exceeded = duration.as_micros() as u64 > budget_us;
let current_second = timestamp_us / 1_000_000;
if current_second != self.last_second {
self.traces_this_second = 0;
self.last_second = current_second;
}
self.traces_this_second += 1;
let event = TraceEvent {
name: name.to_string(),
duration,
timestamp_us,
budget_exceeded,
budget_us: Some(budget_us),
};
if let Some(stats) = self.stats.get_mut(name) {
stats.update(duration, budget_exceeded);
} else {
self.stats.insert(
name.to_string(),
TraceStats::new(duration, budget_us, budget_exceeded),
);
}
if self.recent_events.len() >= self.max_recent {
self.recent_events.remove(0);
}
self.recent_events.push(event);
}
#[must_use]
pub fn should_escalate(&self, name: &str) -> bool {
if let Some(stats) = self.stats.get(name) {
let cv = stats.cv_percent();
let efficiency = stats.efficiency_percent();
cv > self.thresholds.cv_percent || efficiency < self.thresholds.efficiency_percent
} else {
false
}
}
#[must_use]
pub fn get_stats(&self, name: &str) -> Option<&TraceStats> {
self.stats.get(name)
}
#[must_use]
pub fn all_stats(&self) -> &HashMap<String, TraceStats> {
&self.stats
}
#[must_use]
pub fn summary(&self) -> String {
let mut lines = vec![
"=== Performance Trace Summary ===".to_string(),
String::new(),
];
let mut sorted: Vec<_> = self.stats.iter().collect();
sorted.sort_by(|a, b| b.1.total_duration.cmp(&a.1.total_duration));
for (name, stats) in sorted {
let avg_us = stats.avg_duration().as_micros();
let max_us = stats.max_duration.as_micros();
let cv = stats.cv_percent();
let eff = stats.efficiency_percent();
let status = if stats.budget_violations > 0 {
"⚠️"
} else {
"✓"
};
lines.push(format!(
"{status} {name}: avg={avg_us}μs max={max_us}μs count={} cv={cv:.1}% eff={eff:.0}%",
stats.count
));
if self.should_escalate(name) {
lines.push(format!(
" └── ESCALATE: CV={cv:.1}% > {}% OR eff={eff:.0}% < {}%",
self.thresholds.cv_percent, self.thresholds.efficiency_percent
));
}
}
lines.join("\n")
}
#[must_use]
pub fn export_renacer_format(&self) -> String {
let mut lines = vec!["# renacer-compatible trace export".to_string()];
lines.push(format!("# timestamp: {:?}", self.start_time.elapsed()));
for (name, stats) in &self.stats {
lines.push(format!(
"TRACE {} count={} total_us={} avg_us={} max_us={} cv={:.2} eff={:.2} violations={}",
name,
stats.count,
stats.total_duration.as_micros(),
stats.avg_duration().as_micros(),
stats.max_duration.as_micros(),
stats.cv_percent(),
stats.efficiency_percent(),
stats.budget_violations
));
}
lines.join("\n")
}
pub fn clear(&mut self) {
self.stats.clear();
self.recent_events.clear();
}
}
#[derive(Debug, Clone)]
pub struct RingBuffer<T, const N: usize> {
data: [T; N],
head: usize,
len: usize,
}
impl<T: Default + Copy, const N: usize> Default for RingBuffer<T, N> {
fn default() -> Self {
Self::new()
}
}
impl<T: Default + Copy, const N: usize> RingBuffer<T, N> {
#[must_use]
pub fn new() -> Self {
Self {
data: [T::default(); N],
head: 0,
len: 0,
}
}
pub fn push(&mut self, value: T) {
self.data[self.head] = value;
self.head = (self.head + 1) % N;
self.len = self.len.saturating_add(1).min(N);
}
#[must_use]
pub fn len(&self) -> usize {
self.len
}
#[must_use]
pub fn is_empty(&self) -> bool {
self.len == 0
}
#[must_use]
pub fn is_full(&self) -> bool {
self.len == N
}
#[must_use]
pub fn capacity(&self) -> usize {
N
}
#[must_use]
pub fn latest(&self) -> Option<&T> {
if self.len == 0 {
None
} else {
let idx = if self.head == 0 { N - 1 } else { self.head - 1 };
Some(&self.data[idx])
}
}
#[must_use]
pub fn get(&self, index: usize) -> Option<&T> {
if index >= self.len {
return None;
}
let start = if self.len < N { 0 } else { self.head };
let actual_idx = (start + index) % N;
Some(&self.data[actual_idx])
}
pub fn iter(&self) -> impl Iterator<Item = &T> {
let start = if self.len < N { 0 } else { self.head };
(0..self.len).map(move |i| {
let idx = (start + i) % N;
&self.data[idx]
})
}
pub fn clear(&mut self) {
self.head = 0;
self.len = 0;
}
}
impl<const N: usize> RingBuffer<f64, N> {
#[must_use]
pub fn sum(&self) -> f64 {
self.iter().sum()
}
#[must_use]
pub fn mean(&self) -> f64 {
if self.len == 0 {
0.0
} else {
self.sum() / self.len as f64
}
}
#[must_use]
pub fn min(&self) -> Option<f64> {
self.iter()
.copied()
.min_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
}
#[must_use]
pub fn max(&self) -> Option<f64> {
self.iter()
.copied()
.max_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
}
}
#[derive(Debug, Clone)]
pub struct LatencyHistogram {
bins: [u64; 7],
count: u64,
}
impl Default for LatencyHistogram {
fn default() -> Self {
Self::new()
}
}
impl LatencyHistogram {
#[must_use]
pub fn new() -> Self {
Self {
bins: [0; 7],
count: 0,
}
}
pub fn record(&mut self, latency_us: u64) {
let bin = match latency_us {
0..=999 => 0, 1000..=4999 => 1, 5000..=9999 => 2, 10000..=49999 => 3, 50000..=99999 => 4, 100000..=499999 => 5, _ => 6, };
self.bins[bin] += 1;
self.count += 1;
}
#[must_use]
pub fn count(&self) -> u64 {
self.count
}
#[must_use]
pub fn bin_count(&self, bin: usize) -> u64 {
self.bins.get(bin).copied().unwrap_or(0)
}
#[must_use]
pub fn percentages(&self) -> [f64; 7] {
if self.count == 0 {
return [0.0; 7];
}
let mut pcts = [0.0; 7];
for (i, &count) in self.bins.iter().enumerate() {
pcts[i] = (count as f64 / self.count as f64) * 100.0;
}
pcts
}
#[must_use]
pub fn bin_label(bin: usize) -> &'static str {
match bin {
0 => "0-1ms",
1 => "1-5ms",
2 => "5-10ms",
3 => "10-50ms",
4 => "50-100ms",
5 => "100-500ms",
6 => "500ms+",
_ => "?",
}
}
#[must_use]
pub fn ascii_histogram(&self, width: usize) -> String {
let pcts = self.percentages();
let mut lines = Vec::new();
for (i, pct) in pcts.iter().enumerate() {
let bar_len = ((*pct / 100.0) * width as f64) as usize;
let bar: String = "█".repeat(bar_len);
lines.push(format!("{:>10} {:5.1}% {}", Self::bin_label(i), pct, bar));
}
lines.join("\n")
}
pub fn reset(&mut self) {
self.bins = [0; 7];
self.count = 0;
}
}
macro_rules! define_tracker {
(
$(#[$meta:meta])*
$vis:vis struct $name:ident {
$( $(#[$fmeta:meta])* $fvis:vis $fname:ident : $fty:ty ),+ $(,)?
}
) => {
$(#[$meta])*
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
$vis struct $name {
$( $(#[$fmeta])* $fvis $fname : $fty, )+
}
impl $name {
#[inline]
#[must_use]
pub const fn new() -> Self {
Self { $( $fname: 0, )+ }
}
#[inline]
pub fn reset(&mut self) {
*self = Self::new();
}
}
};
}
#[derive(Debug, Clone)]
pub struct EmaTracker {
value: f64,
alpha: f64,
initialized: bool,
}
impl Default for EmaTracker {
fn default() -> Self {
Self::new(0.1) }
}
impl EmaTracker {
#[must_use]
pub fn new(alpha: f64) -> Self {
Self {
value: 0.0,
alpha: alpha.clamp(0.0, 1.0),
initialized: false,
}
}
#[must_use]
pub fn for_fps() -> Self {
Self::new(0.3)
}
#[must_use]
pub fn for_load() -> Self {
Self::new(0.05)
}
pub fn update(&mut self, sample: f64) {
if self.initialized {
self.value = self.alpha * sample + (1.0 - self.alpha) * self.value;
} else {
self.value = sample;
self.initialized = true;
}
}
#[must_use]
pub fn value(&self) -> f64 {
self.value
}
#[must_use]
pub fn is_initialized(&self) -> bool {
self.initialized
}
#[must_use]
pub fn alpha(&self) -> f64 {
self.alpha
}
pub fn reset(&mut self) {
self.value = 0.0;
self.initialized = false;
}
pub fn set_alpha(&mut self, alpha: f64) {
self.alpha = alpha.clamp(0.0, 1.0);
}
}
#[derive(Debug, Clone)]
pub struct RateLimiter {
last_allowed_us: u64,
interval_us: u64,
}
impl Default for RateLimiter {
fn default() -> Self {
Self::new_hz(60) }
}
impl RateLimiter {
#[must_use]
pub fn new(interval_us: u64) -> Self {
Self {
last_allowed_us: 0,
interval_us,
}
}
#[must_use]
pub fn new_hz(hz: u32) -> Self {
let interval_us = if hz == 0 {
1_000_000
} else {
1_000_000 / hz as u64
};
Self::new(interval_us)
}
#[must_use]
pub fn new_ms(ms: u64) -> Self {
Self::new(ms * 1000)
}
pub fn check(&mut self) -> bool {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_micros() as u64;
if now >= self.last_allowed_us + self.interval_us {
self.last_allowed_us = now;
true
} else {
false
}
}
#[must_use]
pub fn would_allow(&self) -> bool {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_micros() as u64;
now >= self.last_allowed_us + self.interval_us
}
#[must_use]
pub fn interval_us(&self) -> u64 {
self.interval_us
}
#[must_use]
pub fn hz(&self) -> f64 {
if self.interval_us == 0 {
0.0
} else {
1_000_000.0 / self.interval_us as f64
}
}
pub fn reset(&mut self) {
self.last_allowed_us = 0;
}
}
#[derive(Debug, Clone)]
pub struct ThresholdDetector {
low: f64,
high: f64,
is_high: bool,
}
impl ThresholdDetector {
#[must_use]
pub fn new(low: f64, high: f64) -> Self {
Self {
low,
high: high.max(low), is_high: false,
}
}
#[must_use]
pub fn percent(low: f64, high: f64) -> Self {
Self::new(low.clamp(0.0, 100.0), high.clamp(0.0, 100.0))
}
#[must_use]
pub fn for_resource() -> Self {
Self::new(70.0, 90.0)
}
#[must_use]
pub fn for_temperature() -> Self {
Self::new(60.0, 80.0)
}
pub fn update(&mut self, value: f64) -> bool {
let was_high = self.is_high;
if self.is_high && value < self.low {
self.is_high = false;
} else if !self.is_high && value > self.high {
self.is_high = true;
}
was_high != self.is_high
}
#[must_use]
pub fn is_high(&self) -> bool {
self.is_high
}
#[must_use]
pub fn is_low(&self) -> bool {
!self.is_high
}
#[must_use]
pub fn low_threshold(&self) -> f64 {
self.low
}
#[must_use]
pub fn high_threshold(&self) -> f64 {
self.high
}
pub fn reset(&mut self) {
self.is_high = false;
}
pub fn set_high(&mut self) {
self.is_high = true;
}
}
#[derive(Debug, Clone)]
pub struct SampleCounter {
count: u64,
last_count: u64,
last_time_us: u64,
rate: f64,
}
impl Default for SampleCounter {
fn default() -> Self {
Self::new()
}
}
impl SampleCounter {
#[must_use]
pub fn new() -> Self {
Self {
count: 0,
last_count: 0,
last_time_us: 0,
rate: 0.0,
}
}
pub fn increment(&mut self) {
self.count += 1;
}
pub fn add(&mut self, n: u64) {
self.count += n;
}
#[must_use]
pub fn count(&self) -> u64 {
self.count
}
pub fn calculate_rate(&mut self) -> f64 {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_micros() as u64;
if self.last_time_us > 0 {
let elapsed_us = now.saturating_sub(self.last_time_us);
if elapsed_us > 0 {
let delta = self.count.saturating_sub(self.last_count);
self.rate = (delta as f64 * 1_000_000.0) / elapsed_us as f64;
}
}
self.last_count = self.count;
self.last_time_us = now;
self.rate
}
#[must_use]
pub fn rate(&self) -> f64 {
self.rate
}
pub fn reset(&mut self) {
self.count = 0;
self.last_count = 0;
self.last_time_us = 0;
self.rate = 0.0;
}
}
#[derive(Debug, Clone)]
pub struct BudgetTracker {
budget: f64,
usage: f64,
peak: f64,
}
impl BudgetTracker {
#[must_use]
pub fn new(budget: f64) -> Self {
Self {
budget: budget.max(0.0),
usage: 0.0,
peak: 0.0,
}
}
#[must_use]
pub fn for_render() -> Self {
Self::new(16_000.0) }
#[must_use]
pub fn for_compute() -> Self {
Self::new(1_000.0) }
pub fn record(&mut self, usage: f64) {
self.usage = usage;
self.peak = self.peak.max(usage);
}
#[must_use]
pub fn usage(&self) -> f64 {
self.usage
}
#[must_use]
pub fn peak(&self) -> f64 {
self.peak
}
#[must_use]
pub fn budget(&self) -> f64 {
self.budget
}
#[must_use]
pub fn utilization(&self) -> f64 {
if self.budget <= 0.0 {
0.0
} else {
(self.usage / self.budget) * 100.0
}
}
#[must_use]
pub fn peak_utilization(&self) -> f64 {
if self.budget <= 0.0 {
0.0
} else {
(self.peak / self.budget) * 100.0
}
}
#[must_use]
pub fn is_over_budget(&self) -> bool {
self.usage > self.budget
}
#[must_use]
pub fn remaining(&self) -> f64 {
(self.budget - self.usage).max(0.0)
}
pub fn reset(&mut self) {
self.usage = 0.0;
self.peak = 0.0;
}
pub fn set_budget(&mut self, budget: f64) {
self.budget = budget.max(0.0);
}
}
#[derive(Debug, Clone)]
pub struct MinMaxTracker {
min: f64,
max: f64,
min_time_us: u64,
max_time_us: u64,
count: u64,
}
impl Default for MinMaxTracker {
fn default() -> Self {
Self::new()
}
}
impl MinMaxTracker {
#[must_use]
pub fn new() -> Self {
Self {
min: f64::MAX,
max: f64::MIN,
min_time_us: 0,
max_time_us: 0,
count: 0,
}
}
pub fn record(&mut self, value: f64) {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_micros() as u64;
if value < self.min {
self.min = value;
self.min_time_us = now;
}
if value > self.max {
self.max = value;
self.max_time_us = now;
}
self.count += 1;
}
#[must_use]
pub fn min(&self) -> Option<f64> {
if self.count > 0 {
Some(self.min)
} else {
None
}
}
#[must_use]
pub fn max(&self) -> Option<f64> {
if self.count > 0 {
Some(self.max)
} else {
None
}
}
#[must_use]
pub fn range(&self) -> Option<f64> {
if self.count > 0 {
Some(self.max - self.min)
} else {
None
}
}
#[must_use]
pub fn count(&self) -> u64 {
self.count
}
#[must_use]
pub fn time_since_min_us(&self) -> u64 {
if self.min_time_us == 0 {
return 0;
}
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_micros() as u64;
now.saturating_sub(self.min_time_us)
}
#[must_use]
pub fn time_since_max_us(&self) -> u64 {
if self.max_time_us == 0 {
return 0;
}
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_micros() as u64;
now.saturating_sub(self.max_time_us)
}
pub fn reset(&mut self) {
self.min = f64::MAX;
self.max = f64::MIN;
self.min_time_us = 0;
self.max_time_us = 0;
self.count = 0;
}
}
#[derive(Debug, Clone)]
pub struct MovingWindow {
current_sum: f64,
current_count: u64,
prev_sum: f64,
prev_count: u64,
window_us: u64,
bucket_start_us: u64,
}
impl MovingWindow {
#[must_use]
pub fn new(window_ms: u64) -> Self {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_micros() as u64;
Self {
current_sum: 0.0,
current_count: 0,
prev_sum: 0.0,
prev_count: 0,
window_us: window_ms * 1000,
bucket_start_us: now,
}
}
#[must_use]
pub fn one_second() -> Self {
Self::new(1000)
}
#[must_use]
pub fn one_minute() -> Self {
Self::new(60_000)
}
pub fn record(&mut self, value: f64) {
self.maybe_rotate();
self.current_sum += value;
self.current_count += 1;
}
pub fn increment(&mut self) {
self.record(1.0);
}
fn maybe_rotate(&mut self) {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_micros() as u64;
let elapsed = now.saturating_sub(self.bucket_start_us);
if elapsed >= self.window_us {
self.prev_sum = self.current_sum;
self.prev_count = self.current_count;
self.current_sum = 0.0;
self.current_count = 0;
self.bucket_start_us = now;
}
}
#[must_use]
pub fn sum(&mut self) -> f64 {
self.maybe_rotate();
self.current_sum + self.prev_sum
}
#[must_use]
pub fn count(&mut self) -> u64 {
self.maybe_rotate();
self.current_count + self.prev_count
}
#[must_use]
pub fn rate_per_second(&mut self) -> f64 {
self.maybe_rotate();
let total = self.current_sum + self.prev_sum;
let window_secs = (self.window_us as f64) / 1_000_000.0;
if window_secs > 0.0 {
total / window_secs
} else {
0.0
}
}
#[must_use]
pub fn count_rate(&mut self) -> f64 {
self.maybe_rotate();
let total = self.current_count + self.prev_count;
let window_secs = (self.window_us as f64) / 1_000_000.0;
if window_secs > 0.0 {
total as f64 / window_secs
} else {
0.0
}
}
pub fn reset(&mut self) {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_micros() as u64;
self.current_sum = 0.0;
self.current_count = 0;
self.prev_sum = 0.0;
self.prev_count = 0;
self.bucket_start_us = now;
}
}
#[derive(Debug, Clone)]
pub struct PercentileTracker {
buckets: [u64; 10],
count: u64,
boundaries: [u64; 10],
}
impl Default for PercentileTracker {
fn default() -> Self {
Self::new()
}
}
impl PercentileTracker {
#[must_use]
pub fn new() -> Self {
Self {
buckets: [0; 10],
count: 0,
boundaries: [
1_000, 5_000, 10_000, 25_000, 50_000, 100_000, 250_000, 500_000, 1_000_000, u64::MAX, ],
}
}
#[must_use]
pub fn with_boundaries(boundaries: [u64; 10]) -> Self {
Self {
buckets: [0; 10],
count: 0,
boundaries,
}
}
pub fn record_us(&mut self, value_us: u64) {
for (i, &boundary) in self.boundaries.iter().enumerate() {
if value_us < boundary {
self.buckets[i] += 1;
self.count += 1;
return;
}
}
self.buckets[9] += 1;
self.count += 1;
}
pub fn record_ms(&mut self, value_ms: f64) {
self.record_us((value_ms * 1000.0) as u64);
}
#[must_use]
pub fn percentile_us(&self, pct: f64) -> u64 {
if self.count == 0 {
return 0;
}
let target = ((pct / 100.0) * self.count as f64) as u64;
let mut cumulative = 0u64;
for (i, &bucket_count) in self.buckets.iter().enumerate() {
cumulative += bucket_count;
if cumulative >= target {
let lower = if i == 0 { 0 } else { self.boundaries[i - 1] };
let upper = self.boundaries[i];
if upper == u64::MAX {
return lower + 500_000; }
return (lower + upper) / 2;
}
}
self.boundaries[8] }
#[must_use]
pub fn percentile_ms(&self, pct: f64) -> f64 {
self.percentile_us(pct) as f64 / 1000.0
}
#[must_use]
pub fn p50_ms(&self) -> f64 {
self.percentile_ms(50.0)
}
#[must_use]
pub fn p90_ms(&self) -> f64 {
self.percentile_ms(90.0)
}
#[must_use]
pub fn p99_ms(&self) -> f64 {
self.percentile_ms(99.0)
}
#[must_use]
pub fn count(&self) -> u64 {
self.count
}
pub fn reset(&mut self) {
self.buckets = [0; 10];
self.count = 0;
}
}
#[derive(Debug, Clone)]
pub struct StateTracker<const N: usize> {
current: usize,
entered_us: u64,
durations: [u64; N],
transitions: [u64; N],
}
impl<const N: usize> Default for StateTracker<N> {
fn default() -> Self {
Self::new()
}
}
impl<const N: usize> StateTracker<N> {
#[must_use]
pub fn new() -> Self {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_micros() as u64;
let mut transitions = [0u64; N];
if N > 0 {
transitions[0] = 1; }
Self {
current: 0,
entered_us: now,
durations: [0u64; N],
transitions,
}
}
pub fn transition(&mut self, new_state: usize) {
if new_state >= N {
return; }
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_micros() as u64;
let elapsed = now.saturating_sub(self.entered_us);
self.durations[self.current] += elapsed;
self.current = new_state;
self.entered_us = now;
self.transitions[new_state] += 1;
}
#[must_use]
pub fn current(&self) -> usize {
self.current
}
#[must_use]
pub fn time_in_current_us(&self) -> u64 {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_micros() as u64;
now.saturating_sub(self.entered_us)
}
#[must_use]
pub fn total_time_in_state_us(&self, state: usize) -> u64 {
if state >= N {
return 0;
}
if state == self.current {
self.durations[state] + self.time_in_current_us()
} else {
self.durations[state]
}
}
#[must_use]
pub fn transition_count(&self, state: usize) -> u64 {
if state >= N {
0
} else {
self.transitions[state]
}
}
#[must_use]
pub fn total_transitions(&self) -> u64 {
self.transitions.iter().sum()
}
pub fn reset(&mut self) {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_micros() as u64;
self.current = 0;
self.entered_us = now;
self.durations = [0u64; N];
self.transitions = [0u64; N];
if N > 0 {
self.transitions[0] = 1;
}
}
}
#[derive(Debug, Clone)]
pub struct ChangeDetector {
baseline: f64,
abs_threshold: f64,
rel_threshold: f64,
last_value: f64,
change_count: u64,
}
impl Default for ChangeDetector {
fn default() -> Self {
Self::new(0.0, 1.0, 5.0)
}
}
impl ChangeDetector {
#[must_use]
pub fn new(baseline: f64, abs_threshold: f64, rel_threshold: f64) -> Self {
Self {
baseline,
abs_threshold: abs_threshold.abs(),
rel_threshold: rel_threshold.abs(),
last_value: baseline,
change_count: 0,
}
}
#[must_use]
pub fn for_percentage() -> Self {
Self::new(0.0, 1.0, 5.0)
}
#[must_use]
pub fn for_latency() -> Self {
Self::new(0.0, 1000.0, 10.0)
}
#[must_use]
pub fn has_changed(&self, value: f64) -> bool {
let abs_diff = (value - self.last_value).abs();
if abs_diff >= self.abs_threshold {
return true;
}
if self.last_value.abs() > f64::EPSILON {
let rel_diff = (abs_diff / self.last_value.abs()) * 100.0;
if rel_diff >= self.rel_threshold {
return true;
}
}
false
}
pub fn update(&mut self, value: f64) -> bool {
let changed = self.has_changed(value);
if changed {
self.change_count += 1;
}
self.last_value = value;
changed
}
pub fn update_baseline(&mut self) {
self.baseline = self.last_value;
}
pub fn set_baseline(&mut self, baseline: f64) {
self.baseline = baseline;
}
#[must_use]
pub fn baseline(&self) -> f64 {
self.baseline
}
#[must_use]
pub fn last_value(&self) -> f64 {
self.last_value
}
#[must_use]
pub fn change_count(&self) -> u64 {
self.change_count
}
#[must_use]
pub fn change_from_baseline(&self) -> f64 {
self.last_value - self.baseline
}
#[must_use]
pub fn relative_change(&self) -> f64 {
if self.baseline.abs() > f64::EPSILON {
((self.last_value - self.baseline) / self.baseline.abs()) * 100.0
} else {
0.0
}
}
pub fn reset(&mut self) {
self.last_value = self.baseline;
self.change_count = 0;
}
}
#[derive(Debug, Clone)]
pub struct Accumulator {
value: u64,
prev_raw: u64,
initialized: bool,
overflows: u64,
}
impl Default for Accumulator {
fn default() -> Self {
Self::new()
}
}
impl Accumulator {
#[must_use]
pub fn new() -> Self {
Self {
value: 0,
prev_raw: 0,
initialized: false,
overflows: 0,
}
}
pub fn update(&mut self, raw: u64) {
if !self.initialized {
self.prev_raw = raw;
self.initialized = true;
return;
}
let delta = if raw >= self.prev_raw {
raw - self.prev_raw
} else {
self.overflows += 1;
(u64::MAX - self.prev_raw) + raw + 1
};
self.value += delta;
self.prev_raw = raw;
}
pub fn add(&mut self, delta: u64) {
self.value += delta;
self.initialized = true;
}
#[must_use]
pub fn value(&self) -> u64 {
self.value
}
#[must_use]
pub fn overflows(&self) -> u64 {
self.overflows
}
#[must_use]
pub fn is_initialized(&self) -> bool {
self.initialized
}
#[must_use]
pub fn last_raw(&self) -> u64 {
self.prev_raw
}
pub fn reset(&mut self) {
self.value = 0;
self.prev_raw = 0;
self.initialized = false;
self.overflows = 0;
}
}
#[derive(Debug, Clone)]
pub struct EventCounter<const N: usize> {
counts: [u64; N],
total: u64,
}
impl<const N: usize> Default for EventCounter<N> {
fn default() -> Self {
Self::new()
}
}
impl<const N: usize> EventCounter<N> {
#[must_use]
pub fn new() -> Self {
Self {
counts: [0u64; N],
total: 0,
}
}
pub fn increment(&mut self, category: usize) {
if category < N {
self.counts[category] += 1;
self.total += 1;
}
}
pub fn add(&mut self, category: usize, count: u64) {
if category < N {
self.counts[category] += count;
self.total += count;
}
}
#[must_use]
pub fn count(&self, category: usize) -> u64 {
if category < N {
self.counts[category]
} else {
0
}
}
#[must_use]
pub fn total(&self) -> u64 {
self.total
}
#[must_use]
pub fn percentage(&self, category: usize) -> f64 {
if self.total == 0 || category >= N {
0.0
} else {
(self.counts[category] as f64 / self.total as f64) * 100.0
}
}
#[must_use]
pub fn dominant(&self) -> Option<usize> {
if self.total == 0 {
return None;
}
self.counts
.iter()
.enumerate()
.max_by_key(|(_, &count)| count)
.map(|(idx, _)| idx)
}
pub fn reset(&mut self) {
self.counts = [0u64; N];
self.total = 0;
}
}
#[derive(Debug, Clone)]
pub struct TrendDetector {
sum: f64,
sum_xy: f64,
index: u64,
count: u64,
threshold: f64,
}
impl Default for TrendDetector {
fn default() -> Self {
Self::new(0.1)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Trend {
Up,
Down,
Flat,
Unknown,
}
impl TrendDetector {
#[must_use]
pub fn new(threshold: f64) -> Self {
Self {
sum: 0.0,
sum_xy: 0.0,
index: 0,
count: 0,
threshold: threshold.abs(),
}
}
#[must_use]
pub fn for_percentage() -> Self {
Self::new(0.5)
}
#[must_use]
pub fn for_latency() -> Self {
Self::new(1.0)
}
pub fn update(&mut self, value: f64) {
self.sum += value;
self.sum_xy += (self.index as f64) * value;
self.index += 1;
self.count += 1;
}
#[must_use]
pub fn slope(&self) -> f64 {
if self.count < 2 {
return 0.0;
}
let n = self.count as f64;
let sum_x = (self.count * (self.count - 1)) as f64 / 2.0; let sum_x2 = (self.count * (self.count - 1) * (2 * self.count - 1)) as f64 / 6.0;
let sum_x_squared = sum_x.powi(2);
let denominator = n * sum_x2 - sum_x_squared;
if denominator.abs() < f64::EPSILON {
return 0.0;
}
(n * self.sum_xy - sum_x * self.sum) / denominator
}
#[must_use]
pub fn trend(&self) -> Trend {
if self.count < 3 {
return Trend::Unknown;
}
let slope = self.slope();
if slope > self.threshold {
Trend::Up
} else if slope < -self.threshold {
Trend::Down
} else {
Trend::Flat
}
}
#[must_use]
pub fn is_trending_up(&self) -> bool {
self.trend() == Trend::Up
}
#[must_use]
pub fn is_trending_down(&self) -> bool {
self.trend() == Trend::Down
}
#[must_use]
pub fn count(&self) -> u64 {
self.count
}
pub fn reset(&mut self) {
self.sum = 0.0;
self.sum_xy = 0.0;
self.index = 0;
self.count = 0;
}
}
#[derive(Debug, Clone)]
pub struct AnomalyDetector {
mean: f64,
m2: f64,
count: u64,
threshold: f64,
last_value: f64,
anomaly_count: u64,
}
impl Default for AnomalyDetector {
fn default() -> Self {
Self::new(3.0)
}
}
impl AnomalyDetector {
#[must_use]
pub fn new(threshold: f64) -> Self {
Self {
mean: 0.0,
m2: 0.0,
count: 0,
threshold: threshold.abs(),
last_value: 0.0,
anomaly_count: 0,
}
}
#[must_use]
pub fn two_sigma() -> Self {
Self::new(2.0)
}
#[must_use]
pub fn three_sigma() -> Self {
Self::new(3.0)
}
pub fn update(&mut self, value: f64) -> bool {
self.last_value = value;
self.count += 1;
if self.count == 1 {
self.mean = value;
return false;
}
let is_anomaly = self.is_anomaly(value);
if is_anomaly {
self.anomaly_count += 1;
}
let delta = value - self.mean;
self.mean += delta / self.count as f64;
let delta2 = value - self.mean;
self.m2 += delta * delta2;
is_anomaly
}
#[must_use]
pub fn is_anomaly(&self, value: f64) -> bool {
if self.count < 10 {
return false; }
let z = self.z_score(value);
z.abs() > self.threshold
}
#[must_use]
pub fn z_score(&self, value: f64) -> f64 {
let std_dev = self.std_dev();
if std_dev < f64::EPSILON {
return 0.0;
}
(value - self.mean) / std_dev
}
#[must_use]
pub fn mean(&self) -> f64 {
self.mean
}
#[must_use]
pub fn variance(&self) -> f64 {
if self.count < 2 {
0.0
} else {
self.m2 / (self.count - 1) as f64
}
}
#[must_use]
pub fn std_dev(&self) -> f64 {
self.variance().sqrt()
}
#[must_use]
pub fn count(&self) -> u64 {
self.count
}
#[must_use]
pub fn anomaly_count(&self) -> u64 {
self.anomaly_count
}
#[must_use]
pub fn anomaly_rate(&self) -> f64 {
if self.count == 0 {
0.0
} else {
(self.anomaly_count as f64 / self.count as f64) * 100.0
}
}
#[must_use]
pub fn threshold(&self) -> f64 {
self.threshold
}
pub fn reset(&mut self) {
self.mean = 0.0;
self.m2 = 0.0;
self.count = 0;
self.last_value = 0.0;
self.anomaly_count = 0;
}
}
#[derive(Debug, Clone)]
pub struct ThroughputTracker {
total: u64,
prev_total: u64,
last_time_us: u64,
rate: f64,
peak_rate: f64,
}
impl Default for ThroughputTracker {
fn default() -> Self {
Self::new()
}
}
impl ThroughputTracker {
#[must_use]
pub fn new() -> Self {
Self {
total: 0,
prev_total: 0,
last_time_us: 0,
rate: 0.0,
peak_rate: 0.0,
}
}
pub fn add(&mut self, count: u64) {
self.total += count;
}
pub fn calculate_rate(&mut self) -> f64 {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_micros() as u64;
if self.last_time_us > 0 {
let elapsed_us = now.saturating_sub(self.last_time_us);
if elapsed_us > 0 {
let delta = self.total.saturating_sub(self.prev_total);
self.rate = (delta as f64 * 1_000_000.0) / elapsed_us as f64;
self.peak_rate = self.peak_rate.max(self.rate);
}
}
self.prev_total = self.total;
self.last_time_us = now;
self.rate
}
#[must_use]
pub fn rate(&self) -> f64 {
self.rate
}
#[must_use]
pub fn peak_rate(&self) -> f64 {
self.peak_rate
}
#[must_use]
pub fn total(&self) -> u64 {
self.total
}
#[must_use]
pub fn format_rate(&self) -> String {
let rate = self.rate;
if rate >= 1_000_000_000.0 {
format!("{:.1}G/s", rate / 1_000_000_000.0)
} else if rate >= 1_000_000.0 {
format!("{:.1}M/s", rate / 1_000_000.0)
} else if rate >= 1_000.0 {
format!("{:.1}K/s", rate / 1_000.0)
} else {
format!("{:.0}/s", rate)
}
}
#[must_use]
pub fn format_bytes_rate(&self) -> String {
let rate = self.rate;
if rate >= 1_073_741_824.0 {
format!("{:.1}GB/s", rate / 1_073_741_824.0)
} else if rate >= 1_048_576.0 {
format!("{:.1}MB/s", rate / 1_048_576.0)
} else if rate >= 1_024.0 {
format!("{:.1}KB/s", rate / 1_024.0)
} else {
format!("{:.0}B/s", rate)
}
}
pub fn reset(&mut self) {
self.total = 0;
self.prev_total = 0;
self.last_time_us = 0;
self.rate = 0.0;
self.peak_rate = 0.0;
}
}
#[derive(Debug, Clone)]
pub struct JitterTracker {
prev: f64,
jitter: f64,
peak_jitter: f64,
count: u64,
alpha: f64,
}
impl Default for JitterTracker {
fn default() -> Self {
Self::new()
}
}
impl JitterTracker {
#[must_use]
pub fn new() -> Self {
Self {
prev: 0.0,
jitter: 0.0,
peak_jitter: 0.0,
count: 0,
alpha: 1.0 / 16.0, }
}
#[must_use]
pub fn with_alpha(alpha: f64) -> Self {
Self {
prev: 0.0,
jitter: 0.0,
peak_jitter: 0.0,
count: 0,
alpha: alpha.clamp(0.0, 1.0),
}
}
pub fn update(&mut self, value: f64) {
self.count += 1;
if self.count == 1 {
self.prev = value;
return;
}
let diff = (value - self.prev).abs();
self.prev = value;
self.jitter += self.alpha * (diff - self.jitter);
self.peak_jitter = self.peak_jitter.max(self.jitter);
}
#[must_use]
pub fn jitter(&self) -> f64 {
self.jitter
}
#[must_use]
pub fn peak_jitter(&self) -> f64 {
self.peak_jitter
}
#[must_use]
pub fn count(&self) -> u64 {
self.count
}
#[must_use]
pub fn exceeds(&self, threshold: f64) -> bool {
self.jitter > threshold
}
pub fn reset(&mut self) {
self.prev = 0.0;
self.jitter = 0.0;
self.peak_jitter = 0.0;
self.count = 0;
}
}
#[derive(Debug, Clone)]
pub struct DerivativeTracker {
prev: f64,
prev_time_us: u64,
derivative: f64,
smoothed: f64,
alpha: f64,
count: u64,
}
impl Default for DerivativeTracker {
fn default() -> Self {
Self::new()
}
}
impl DerivativeTracker {
#[must_use]
pub fn new() -> Self {
Self {
prev: 0.0,
prev_time_us: 0,
derivative: 0.0,
smoothed: 0.0,
alpha: 0.3,
count: 0,
}
}
#[must_use]
pub fn with_alpha(alpha: f64) -> Self {
Self {
prev: 0.0,
prev_time_us: 0,
derivative: 0.0,
smoothed: 0.0,
alpha: alpha.clamp(0.0, 1.0),
count: 0,
}
}
pub fn update(&mut self, value: f64) {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_micros() as u64;
self.count += 1;
if self.count == 1 {
self.prev = value;
self.prev_time_us = now;
return;
}
let dt = (now.saturating_sub(self.prev_time_us)) as f64 / 1_000_000.0; if dt > 0.0 {
self.derivative = (value - self.prev) / dt;
self.smoothed = self.alpha * self.derivative + (1.0 - self.alpha) * self.smoothed;
}
self.prev = value;
self.prev_time_us = now;
}
pub fn update_with_dt(&mut self, value: f64, dt_secs: f64) {
self.count += 1;
if self.count == 1 {
self.prev = value;
return;
}
if dt_secs > 0.0 {
self.derivative = (value - self.prev) / dt_secs;
self.smoothed = self.alpha * self.derivative + (1.0 - self.alpha) * self.smoothed;
}
self.prev = value;
}
#[must_use]
pub fn derivative(&self) -> f64 {
self.derivative
}
#[must_use]
pub fn smoothed(&self) -> f64 {
self.smoothed
}
#[must_use]
pub fn is_accelerating(&self) -> bool {
self.smoothed > 0.0
}
#[must_use]
pub fn is_decelerating(&self) -> bool {
self.smoothed < 0.0
}
#[must_use]
pub fn count(&self) -> u64 {
self.count
}
pub fn reset(&mut self) {
self.prev = 0.0;
self.prev_time_us = 0;
self.derivative = 0.0;
self.smoothed = 0.0;
self.count = 0;
}
}
#[derive(Debug, Clone)]
pub struct IntegralTracker {
prev: f64,
prev_time_us: u64,
integral: f64,
count: u64,
}
impl Default for IntegralTracker {
fn default() -> Self {
Self::new()
}
}
impl IntegralTracker {
#[must_use]
pub fn new() -> Self {
Self {
prev: 0.0,
prev_time_us: 0,
integral: 0.0,
count: 0,
}
}
pub fn update(&mut self, value: f64) {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_micros() as u64;
self.count += 1;
if self.count == 1 {
self.prev = value;
self.prev_time_us = now;
return;
}
let dt = (now.saturating_sub(self.prev_time_us)) as f64 / 1_000_000.0; self.integral += (self.prev + value) / 2.0 * dt;
self.prev = value;
self.prev_time_us = now;
}
pub fn update_with_dt(&mut self, value: f64, dt_secs: f64) {
self.count += 1;
if self.count == 1 {
self.prev = value;
return;
}
self.integral += (self.prev + value) / 2.0 * dt_secs;
self.prev = value;
}
#[must_use]
pub fn integral(&self) -> f64 {
self.integral
}
#[must_use]
pub fn average(&self) -> f64 {
if self.count < 2 {
return self.prev;
}
self.prev
}
#[must_use]
pub fn count(&self) -> u64 {
self.count
}
pub fn reset(&mut self) {
self.prev = 0.0;
self.prev_time_us = 0;
self.integral = 0.0;
self.count = 0;
}
}
#[derive(Debug, Clone)]
pub struct CorrelationTracker {
mean_x: f64,
mean_y: f64,
cov_sum: f64,
var_x_sum: f64,
var_y_sum: f64,
count: u64,
}
impl Default for CorrelationTracker {
fn default() -> Self {
Self::new()
}
}
impl CorrelationTracker {
#[must_use]
pub fn new() -> Self {
Self {
mean_x: 0.0,
mean_y: 0.0,
cov_sum: 0.0,
var_x_sum: 0.0,
var_y_sum: 0.0,
count: 0,
}
}
pub fn update(&mut self, x: f64, y: f64) {
self.count += 1;
let n = self.count as f64;
let delta_x = x - self.mean_x;
let delta_y = y - self.mean_y;
self.mean_x += delta_x / n;
self.mean_y += delta_y / n;
let delta_x2 = x - self.mean_x;
let delta_y2 = y - self.mean_y;
self.cov_sum += delta_x * delta_y2;
self.var_x_sum += delta_x * delta_x2;
self.var_y_sum += delta_y * delta_y2;
}
#[must_use]
pub fn correlation(&self) -> f64 {
if self.count < 2 {
return 0.0;
}
let denominator = (self.var_x_sum * self.var_y_sum).sqrt();
if denominator < f64::EPSILON {
return 0.0;
}
(self.cov_sum / denominator).clamp(-1.0, 1.0)
}
#[must_use]
pub fn is_positive(&self) -> bool {
self.correlation() > 0.5
}
#[must_use]
pub fn is_negative(&self) -> bool {
self.correlation() < -0.5
}
#[must_use]
pub fn is_strong(&self) -> bool {
self.correlation().abs() > 0.7
}
#[must_use]
pub fn covariance(&self) -> f64 {
if self.count < 2 {
return 0.0;
}
self.cov_sum / (self.count - 1) as f64
}
#[must_use]
pub fn count(&self) -> u64 {
self.count
}
pub fn reset(&mut self) {
self.mean_x = 0.0;
self.mean_y = 0.0;
self.cov_sum = 0.0;
self.var_x_sum = 0.0;
self.var_y_sum = 0.0;
self.count = 0;
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum CircuitState {
Closed,
Open,
HalfOpen,
}
#[derive(Debug, Clone)]
pub struct CircuitBreaker {
state: CircuitState,
failures: u64,
successes: u64,
failure_threshold: u64,
success_threshold: u64,
opened_at: u64,
timeout_us: u64,
}
impl Default for CircuitBreaker {
fn default() -> Self {
Self::new(5, 3, 30_000_000) }
}
impl CircuitBreaker {
#[must_use]
pub fn new(failure_threshold: u64, success_threshold: u64, timeout_us: u64) -> Self {
Self {
state: CircuitState::Closed,
failures: 0,
successes: 0,
failure_threshold,
success_threshold,
opened_at: 0,
timeout_us,
}
}
#[must_use]
pub fn for_network() -> Self {
Self::new(5, 3, 30_000_000)
}
#[must_use]
pub fn for_fast_fail() -> Self {
Self::new(3, 2, 5_000_000)
}
#[must_use]
pub fn is_allowed(&mut self) -> bool {
match self.state {
CircuitState::Closed => true,
CircuitState::Open => {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_micros() as u64;
if now.saturating_sub(self.opened_at) >= self.timeout_us {
self.state = CircuitState::HalfOpen;
self.successes = 0;
true
} else {
false
}
}
CircuitState::HalfOpen => true,
}
}
pub fn record_success(&mut self) {
match self.state {
CircuitState::Closed => {
self.failures = 0;
}
CircuitState::HalfOpen => {
self.successes += 1;
if self.successes >= self.success_threshold {
self.state = CircuitState::Closed;
self.failures = 0;
}
}
CircuitState::Open => {}
}
}
pub fn record_failure(&mut self) {
match self.state {
CircuitState::Closed => {
self.failures += 1;
if self.failures >= self.failure_threshold {
self.state = CircuitState::Open;
self.opened_at = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_micros() as u64;
}
}
CircuitState::HalfOpen => {
self.state = CircuitState::Open;
self.opened_at = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_micros() as u64;
}
CircuitState::Open => {}
}
}
#[must_use]
pub fn state(&self) -> CircuitState {
self.state
}
#[must_use]
pub fn failures(&self) -> u64 {
self.failures
}
#[must_use]
pub fn is_open(&self) -> bool {
self.state == CircuitState::Open
}
#[must_use]
pub fn is_closed(&self) -> bool {
self.state == CircuitState::Closed
}
pub fn reset(&mut self) {
self.state = CircuitState::Closed;
self.failures = 0;
self.successes = 0;
}
}
#[derive(Debug, Clone)]
pub struct ExponentialBackoff {
base_us: u64,
max_us: u64,
attempt: u64,
multiplier: f64,
jitter: bool,
}
impl Default for ExponentialBackoff {
fn default() -> Self {
Self::new(100_000, 30_000_000) }
}
impl ExponentialBackoff {
#[must_use]
pub fn new(base_us: u64, max_us: u64) -> Self {
Self {
base_us,
max_us,
attempt: 0,
multiplier: 2.0,
jitter: false,
}
}
#[must_use]
pub fn with_jitter(mut self) -> Self {
self.jitter = true;
self
}
#[must_use]
pub fn with_multiplier(mut self, multiplier: f64) -> Self {
self.multiplier = multiplier.max(1.0);
self
}
#[must_use]
pub fn for_network() -> Self {
Self::new(100_000, 30_000_000).with_jitter()
}
#[must_use]
pub fn for_fast() -> Self {
Self::new(10_000, 1_000_000)
}
pub fn next_delay(&mut self) -> u64 {
let delay = self.current_delay();
self.attempt += 1;
delay
}
#[must_use]
pub fn current_delay(&self) -> u64 {
let delay = (self.base_us as f64 * self.multiplier.powi(self.attempt as i32)) as u64;
let capped = delay.min(self.max_us);
if self.jitter {
let jitter_factor = 0.5 + (self.attempt % 10) as f64 * 0.05;
((capped as f64) * jitter_factor) as u64
} else {
capped
}
}
#[must_use]
pub fn current_delay_ms(&self) -> u64 {
self.current_delay() / 1000
}
#[must_use]
pub fn attempt(&self) -> u64 {
self.attempt
}
#[must_use]
pub fn is_at_max(&self) -> bool {
self.current_delay() >= self.max_us
}
pub fn reset(&mut self) {
self.attempt = 0;
}
}
#[derive(Debug, Clone)]
pub struct SlidingMedian {
buckets: [u64; 10],
boundaries: [f64; 10],
count: u64,
min: f64,
max: f64,
}
impl Default for SlidingMedian {
fn default() -> Self {
Self::new()
}
}
impl SlidingMedian {
#[must_use]
pub fn new() -> Self {
Self {
buckets: [0; 10],
boundaries: [
100.0, 200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0, 1000.0,
],
count: 0,
min: f64::MAX,
max: f64::MIN,
}
}
#[must_use]
pub fn for_latency() -> Self {
Self {
buckets: [0; 10],
boundaries: [1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1000.0],
count: 0,
min: f64::MAX,
max: f64::MIN,
}
}
#[must_use]
pub fn for_percentage() -> Self {
Self {
buckets: [0; 10],
boundaries: [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0],
count: 0,
min: f64::MAX,
max: f64::MIN,
}
}
pub fn update(&mut self, value: f64) {
self.count += 1;
self.min = self.min.min(value);
self.max = self.max.max(value);
for (i, &boundary) in self.boundaries.iter().enumerate() {
if value <= boundary {
self.buckets[i] += 1;
return;
}
}
self.buckets[9] += 1;
}
#[must_use]
pub fn median(&self) -> f64 {
self.percentile(50)
}
#[must_use]
pub fn percentile(&self, p: u8) -> f64 {
if self.count == 0 {
return 0.0;
}
let target = (self.count as f64 * p as f64 / 100.0) as u64;
let mut cumulative = 0u64;
for (i, &count) in self.buckets.iter().enumerate() {
cumulative += count;
if cumulative >= target {
let lower = if i == 0 { 0.0 } else { self.boundaries[i - 1] };
return (lower + self.boundaries[i]) / 2.0;
}
}
self.boundaries[9]
}
#[must_use]
pub fn count(&self) -> u64 {
self.count
}
#[must_use]
pub fn min(&self) -> f64 {
if self.count == 0 {
0.0
} else {
self.min
}
}
#[must_use]
pub fn max(&self) -> f64 {
if self.count == 0 {
0.0
} else {
self.max
}
}
pub fn reset(&mut self) {
self.buckets = [0; 10];
self.count = 0;
self.min = f64::MAX;
self.max = f64::MIN;
}
}
#[derive(Debug, Clone)]
pub struct HysteresisFilter {
output: f64,
dead_band: f64,
count: u64,
}
impl Default for HysteresisFilter {
fn default() -> Self {
Self::new(1.0)
}
}
impl HysteresisFilter {
#[must_use]
pub fn new(dead_band: f64) -> Self {
Self {
output: 0.0,
dead_band: dead_band.abs(),
count: 0,
}
}
#[must_use]
pub fn for_percentage() -> Self {
Self::new(1.0)
}
#[must_use]
pub fn for_latency() -> Self {
Self::new(0.5)
}
#[must_use]
pub fn for_temperature() -> Self {
Self::new(0.5)
}
pub fn update(&mut self, value: f64) -> bool {
self.count += 1;
if self.count == 1 {
self.output = value;
return true;
}
if (value - self.output).abs() >= self.dead_band {
self.output = value;
return true;
}
false
}
#[must_use]
pub fn output(&self) -> f64 {
self.output
}
#[must_use]
pub fn dead_band(&self) -> f64 {
self.dead_band
}
pub fn set_dead_band(&mut self, dead_band: f64) {
self.dead_band = dead_band.abs();
}
#[must_use]
pub fn count(&self) -> u64 {
self.count
}
pub fn reset(&mut self) {
self.output = 0.0;
self.count = 0;
}
}
#[derive(Debug, Clone)]
pub struct SpikeFilter {
avg: f64,
threshold: f64,
alpha: f64,
spikes: u64,
count: u64,
last_accepted: f64,
}
impl Default for SpikeFilter {
fn default() -> Self {
Self::new(3.0)
}
}
impl SpikeFilter {
#[must_use]
pub fn new(threshold: f64) -> Self {
Self {
avg: 0.0,
threshold: threshold.abs(),
alpha: 0.1,
spikes: 0,
count: 0,
last_accepted: 0.0,
}
}
#[must_use]
pub fn for_percentage() -> Self {
Self::new(50.0) }
#[must_use]
pub fn for_latency() -> Self {
Self::new(100.0) }
pub fn update(&mut self, value: f64) -> f64 {
self.count += 1;
if self.count == 1 {
self.avg = value;
self.last_accepted = value;
return value;
}
let deviation = (value - self.avg).abs();
if deviation > self.threshold {
self.spikes += 1;
return self.last_accepted;
}
self.avg = self.alpha * value + (1.0 - self.alpha) * self.avg;
self.last_accepted = value;
value
}
#[must_use]
pub fn average(&self) -> f64 {
self.avg
}
#[must_use]
pub fn spikes(&self) -> u64 {
self.spikes
}
#[must_use]
pub fn spike_rate(&self) -> f64 {
if self.count == 0 {
0.0
} else {
(self.spikes as f64 / self.count as f64) * 100.0
}
}
#[must_use]
pub fn count(&self) -> u64 {
self.count
}
#[must_use]
pub fn last_accepted(&self) -> f64 {
self.last_accepted
}
pub fn reset(&mut self) {
self.avg = 0.0;
self.spikes = 0;
self.count = 0;
self.last_accepted = 0.0;
}
}
#[derive(Debug, Clone)]
pub struct GaugeTracker {
current: f64,
min: f64,
max: f64,
sum: f64,
count: u64,
}
impl Default for GaugeTracker {
fn default() -> Self {
Self::new()
}
}
impl GaugeTracker {
#[must_use]
pub fn new() -> Self {
Self {
current: 0.0,
min: f64::MAX,
max: f64::MIN,
sum: 0.0,
count: 0,
}
}
pub fn set(&mut self, value: f64) {
self.current = value;
self.min = self.min.min(value);
self.max = self.max.max(value);
self.sum += value;
self.count += 1;
}
pub fn inc(&mut self) {
self.set(self.current + 1.0);
}
pub fn dec(&mut self) {
self.set(self.current - 1.0);
}
pub fn add(&mut self, delta: f64) {
self.set(self.current + delta);
}
#[must_use]
pub fn current(&self) -> f64 {
self.current
}
#[must_use]
pub fn min(&self) -> f64 {
if self.count == 0 {
0.0
} else {
self.min
}
}
#[must_use]
pub fn max(&self) -> f64 {
if self.count == 0 {
0.0
} else {
self.max
}
}
#[must_use]
pub fn average(&self) -> f64 {
if self.count == 0 {
0.0
} else {
self.sum / self.count as f64
}
}
#[must_use]
pub fn range(&self) -> f64 {
if self.count == 0 {
0.0
} else {
self.max - self.min
}
}
#[must_use]
pub fn count(&self) -> u64 {
self.count
}
pub fn reset(&mut self) {
self.current = 0.0;
self.min = f64::MAX;
self.max = f64::MIN;
self.sum = 0.0;
self.count = 0;
}
}
#[derive(Debug, Clone)]
pub struct CounterPair {
successes: u64,
failures: u64,
}
impl Default for CounterPair {
fn default() -> Self {
Self::new()
}
}
impl CounterPair {
#[must_use]
pub fn new() -> Self {
Self {
successes: 0,
failures: 0,
}
}
pub fn success(&mut self) {
self.successes += 1;
}
pub fn failure(&mut self) {
self.failures += 1;
}
pub fn add_successes(&mut self, count: u64) {
self.successes += count;
}
pub fn add_failures(&mut self, count: u64) {
self.failures += count;
}
#[must_use]
pub fn successes(&self) -> u64 {
self.successes
}
#[must_use]
pub fn failures(&self) -> u64 {
self.failures
}
#[must_use]
pub fn total(&self) -> u64 {
self.successes + self.failures
}
#[must_use]
pub fn success_rate(&self) -> f64 {
let total = self.total();
if total == 0 {
100.0
} else {
(self.successes as f64 / total as f64) * 100.0
}
}
#[must_use]
pub fn failure_rate(&self) -> f64 {
100.0 - self.success_rate()
}
#[must_use]
pub fn is_healthy(&self, threshold: f64) -> bool {
self.success_rate() >= threshold
}
pub fn reset(&mut self) {
self.successes = 0;
self.failures = 0;
}
}
#[derive(Debug, Clone)]
pub struct HealthScore {
scores: [f64; 8],
weights: [f64; 8],
active: usize,
}
impl Default for HealthScore {
fn default() -> Self {
Self::new()
}
}
impl HealthScore {
#[must_use]
pub fn new() -> Self {
Self {
scores: [100.0; 8],
weights: [1.0; 8],
active: 0,
}
}
pub fn set(&mut self, index: usize, score: f64) {
if index < 8 {
self.scores[index] = score.clamp(0.0, 100.0);
if index >= self.active {
self.active = index + 1;
}
}
}
pub fn set_weight(&mut self, index: usize, weight: f64) {
if index < 8 {
self.weights[index] = weight.max(0.0);
}
}
#[must_use]
pub fn score(&self) -> f64 {
if self.active == 0 {
return 100.0;
}
let mut weighted_sum = 0.0;
let mut weight_sum = 0.0;
for i in 0..self.active {
weighted_sum += self.scores[i] * self.weights[i];
weight_sum += self.weights[i];
}
if weight_sum < f64::EPSILON {
100.0
} else {
(weighted_sum / weight_sum).clamp(0.0, 100.0)
}
}
#[must_use]
pub fn status(&self) -> HealthStatus {
let score = self.score();
if score >= 90.0 {
HealthStatus::Healthy
} else if score >= 70.0 {
HealthStatus::Degraded
} else if score >= 50.0 {
HealthStatus::Warning
} else {
HealthStatus::Critical
}
}
#[must_use]
pub fn is_healthy(&self) -> bool {
self.score() >= 90.0
}
#[must_use]
pub fn min_score(&self) -> f64 {
if self.active == 0 {
return 100.0;
}
self.scores[..self.active]
.iter()
.fold(f64::MAX, |a, &b| a.min(b))
}
#[must_use]
pub fn active_components(&self) -> usize {
self.active
}
pub fn reset(&mut self) {
self.scores = [100.0; 8];
self.active = 0;
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum HealthStatus {
Healthy,
Degraded,
Warning,
Critical,
}
#[derive(Debug, Clone)]
pub struct BatchProcessor {
count: u64,
batch_size: u64,
batches_completed: u64,
total_items: u64,
}
impl Default for BatchProcessor {
fn default() -> Self {
Self::new(100)
}
}
impl BatchProcessor {
#[must_use]
pub fn new(batch_size: u64) -> Self {
Self {
count: 0,
batch_size: batch_size.max(1),
batches_completed: 0,
total_items: 0,
}
}
#[must_use]
pub fn for_network() -> Self {
Self::new(1000)
}
#[must_use]
pub fn for_disk() -> Self {
Self::new(100)
}
#[must_use]
pub fn for_metrics() -> Self {
Self::new(50)
}
pub fn add(&mut self) -> bool {
self.count += 1;
self.total_items += 1;
if self.count >= self.batch_size {
self.count = 0;
self.batches_completed += 1;
true
} else {
false
}
}
pub fn add_many(&mut self, n: u64) -> u64 {
self.total_items += n;
let new_count = self.count + n;
let batches = new_count / self.batch_size;
self.count = new_count % self.batch_size;
self.batches_completed += batches;
batches
}
#[must_use]
pub fn is_ready(&self) -> bool {
self.count >= self.batch_size
}
#[must_use]
pub fn fill_percentage(&self) -> f64 {
(self.count as f64 / self.batch_size as f64) * 100.0
}
#[must_use]
pub fn remaining(&self) -> u64 {
self.batch_size.saturating_sub(self.count)
}
#[must_use]
pub fn batches_completed(&self) -> u64 {
self.batches_completed
}
#[must_use]
pub fn total_items(&self) -> u64 {
self.total_items
}
pub fn flush(&mut self) {
if self.count > 0 {
self.count = 0;
self.batches_completed += 1;
}
}
pub fn reset(&mut self) {
self.count = 0;
self.batches_completed = 0;
self.total_items = 0;
}
}
#[derive(Debug, Clone)]
pub struct PipelineStage {
in_flight: u64,
peak_in_flight: u64,
entered: u64,
exited: u64,
total_latency_us: u64,
}
impl Default for PipelineStage {
fn default() -> Self {
Self::new()
}
}
impl PipelineStage {
#[must_use]
pub fn new() -> Self {
Self {
in_flight: 0,
peak_in_flight: 0,
entered: 0,
exited: 0,
total_latency_us: 0,
}
}
pub fn enter(&mut self) {
self.in_flight += 1;
self.entered += 1;
if self.in_flight > self.peak_in_flight {
self.peak_in_flight = self.in_flight;
}
}
pub fn exit(&mut self, latency_us: u64) {
self.in_flight = self.in_flight.saturating_sub(1);
self.exited += 1;
self.total_latency_us += latency_us;
}
pub fn exit_simple(&mut self) {
self.in_flight = self.in_flight.saturating_sub(1);
self.exited += 1;
}
#[must_use]
pub fn depth(&self) -> u64 {
self.in_flight
}
#[must_use]
pub fn peak_depth(&self) -> u64 {
self.peak_in_flight
}
#[must_use]
pub fn avg_latency_us(&self) -> f64 {
if self.exited == 0 {
0.0
} else {
self.total_latency_us as f64 / self.exited as f64
}
}
#[must_use]
pub fn avg_latency_ms(&self) -> f64 {
self.avg_latency_us() / 1000.0
}
#[must_use]
pub fn throughput(&self) -> u64 {
self.exited
}
#[must_use]
pub fn total_entered(&self) -> u64 {
self.entered
}
#[must_use]
pub fn is_idle(&self) -> bool {
self.in_flight == 0
}
#[must_use]
pub fn is_backlogged(&self, threshold: u64) -> bool {
self.in_flight > threshold
}
pub fn reset(&mut self) {
self.in_flight = 0;
self.peak_in_flight = 0;
self.entered = 0;
self.exited = 0;
self.total_latency_us = 0;
}
}
#[derive(Debug, Clone)]
pub struct WorkQueue {
size: u64,
peak_size: u64,
enqueued: u64,
dequeued: u64,
total_wait_us: u64,
capacity: u64,
}
impl Default for WorkQueue {
fn default() -> Self {
Self::new()
}
}
impl WorkQueue {
#[must_use]
pub fn new() -> Self {
Self {
size: 0,
peak_size: 0,
enqueued: 0,
dequeued: 0,
total_wait_us: 0,
capacity: 0,
}
}
#[must_use]
pub fn with_capacity(capacity: u64) -> Self {
Self {
capacity,
..Self::new()
}
}
pub fn enqueue(&mut self) -> bool {
if self.capacity > 0 && self.size >= self.capacity {
return false; }
self.size += 1;
self.enqueued += 1;
if self.size > self.peak_size {
self.peak_size = self.size;
}
true
}
pub fn dequeue(&mut self, wait_us: u64) -> bool {
if self.size == 0 {
return false;
}
self.size -= 1;
self.dequeued += 1;
self.total_wait_us += wait_us;
true
}
pub fn dequeue_simple(&mut self) -> bool {
if self.size == 0 {
return false;
}
self.size -= 1;
self.dequeued += 1;
true
}
#[must_use]
pub fn size(&self) -> u64 {
self.size
}
#[must_use]
pub fn peak_size(&self) -> u64 {
self.peak_size
}
#[must_use]
pub fn avg_wait_us(&self) -> f64 {
if self.dequeued == 0 {
0.0
} else {
self.total_wait_us as f64 / self.dequeued as f64
}
}
#[must_use]
pub fn utilization(&self) -> f64 {
if self.capacity == 0 {
0.0
} else {
(self.size as f64 / self.capacity as f64) * 100.0
}
}
#[must_use]
pub fn is_empty(&self) -> bool {
self.size == 0
}
#[must_use]
pub fn is_full(&self) -> bool {
self.capacity > 0 && self.size >= self.capacity
}
#[must_use]
pub fn remaining_capacity(&self) -> u64 {
if self.capacity == 0 {
u64::MAX
} else {
self.capacity.saturating_sub(self.size)
}
}
#[must_use]
pub fn total_enqueued(&self) -> u64 {
self.enqueued
}
#[must_use]
pub fn total_dequeued(&self) -> u64 {
self.dequeued
}
pub fn reset(&mut self) {
self.size = 0;
self.peak_size = 0;
self.enqueued = 0;
self.dequeued = 0;
self.total_wait_us = 0;
}
}
#[derive(Debug, Clone)]
pub struct LeakyBucket {
level: f64,
capacity: f64,
leak_rate: f64,
last_update_us: u64,
overflows: u64,
}
impl Default for LeakyBucket {
fn default() -> Self {
Self::new(100.0, 10.0)
}
}
impl LeakyBucket {
#[must_use]
pub fn new(capacity: f64, leak_rate: f64) -> Self {
Self {
level: 0.0,
capacity: capacity.max(1.0),
leak_rate: leak_rate.max(0.1),
last_update_us: 0,
overflows: 0,
}
}
#[must_use]
pub fn for_api() -> Self {
Self::new(200.0, 100.0)
}
#[must_use]
pub fn for_network() -> Self {
Self::new(5_000_000.0, 1_000_000.0)
}
pub fn add(&mut self, tokens: f64, now_us: u64) -> bool {
self.leak(now_us);
let new_level = self.level + tokens;
if new_level > self.capacity {
self.overflows += 1;
false
} else {
self.level = new_level;
true
}
}
fn leak(&mut self, now_us: u64) {
if self.last_update_us == 0 {
self.last_update_us = now_us;
return;
}
let elapsed_s = (now_us.saturating_sub(self.last_update_us)) as f64 / 1_000_000.0;
let leaked = elapsed_s * self.leak_rate;
self.level = (self.level - leaked).max(0.0);
self.last_update_us = now_us;
}
#[must_use]
pub fn level(&self) -> f64 {
self.level
}
#[must_use]
pub fn fill_percentage(&self) -> f64 {
(self.level / self.capacity) * 100.0
}
#[must_use]
pub fn overflows(&self) -> u64 {
self.overflows
}
#[must_use]
pub fn is_empty(&self) -> bool {
self.level <= 0.0
}
pub fn reset(&mut self) {
self.level = 0.0;
self.overflows = 0;
self.last_update_us = 0;
}
pub fn update_with_time(&mut self, now_us: u64) {
self.leak(now_us);
}
}
#[derive(Debug, Clone)]
pub struct SlidingWindowRate {
windows: [u64; 10],
current: usize,
window_us: u64,
last_rotate_us: u64,
limit: u64,
exceeded: u64,
}
impl Default for SlidingWindowRate {
fn default() -> Self {
Self::new(1_000_000, 100)
}
}
impl SlidingWindowRate {
#[must_use]
pub fn new(window_us: u64, limit: u64) -> Self {
Self {
windows: [0; 10],
current: 0,
window_us: window_us.max(10_000), last_rotate_us: 0,
limit,
exceeded: 0,
}
}
#[must_use]
pub fn per_second(limit: u64) -> Self {
Self::new(1_000_000, limit)
}
#[must_use]
pub fn per_minute(limit: u64) -> Self {
Self::new(60_000_000, limit)
}
pub fn record(&mut self, now_us: u64) -> bool {
self.rotate(now_us);
let count = self.count();
if count >= self.limit {
self.exceeded += 1;
false
} else {
self.windows[self.current] += 1;
true
}
}
fn rotate(&mut self, now_us: u64) {
if self.last_rotate_us == 0 {
self.last_rotate_us = now_us;
return;
}
let sub_window_us = self.window_us / 10;
let elapsed = now_us.saturating_sub(self.last_rotate_us);
let rotations = (elapsed / sub_window_us).min(10) as usize;
for _ in 0..rotations {
self.current = (self.current + 1) % 10;
self.windows[self.current] = 0;
}
if rotations > 0 {
self.last_rotate_us = now_us;
}
}
#[must_use]
pub fn count(&self) -> u64 {
self.windows.iter().sum()
}
#[must_use]
pub fn rate_percentage(&self) -> f64 {
if self.limit == 0 {
0.0
} else {
(self.count() as f64 / self.limit as f64) * 100.0
}
}
#[must_use]
pub fn would_exceed(&self) -> bool {
self.count() >= self.limit
}
#[must_use]
pub fn exceeded(&self) -> u64 {
self.exceeded
}
pub fn reset(&mut self) {
self.windows = [0; 10];
self.current = 0;
self.exceeded = 0;
self.last_rotate_us = 0;
}
pub fn update_with_time(&mut self, now_us: u64) {
self.rotate(now_us);
}
}
#[derive(Debug, Clone)]
pub struct ResourcePool {
capacity: u64,
in_use: u64,
peak_in_use: u64,
acquisitions: u64,
releases: u64,
timeouts: u64,
total_wait_us: u64,
}
impl Default for ResourcePool {
fn default() -> Self {
Self::new(10)
}
}
impl ResourcePool {
#[must_use]
pub fn new(capacity: u64) -> Self {
Self {
capacity: capacity.max(1),
in_use: 0,
peak_in_use: 0,
acquisitions: 0,
releases: 0,
timeouts: 0,
total_wait_us: 0,
}
}
#[must_use]
pub fn for_database() -> Self {
Self::new(20)
}
#[must_use]
pub fn for_http() -> Self {
Self::new(100)
}
pub fn acquire(&mut self, wait_us: u64) -> bool {
if self.in_use >= self.capacity {
self.timeouts += 1;
return false;
}
self.in_use += 1;
self.acquisitions += 1;
self.total_wait_us += wait_us;
if self.in_use > self.peak_in_use {
self.peak_in_use = self.in_use;
}
true
}
pub fn release(&mut self) {
if self.in_use > 0 {
self.in_use -= 1;
self.releases += 1;
}
}
#[must_use]
pub fn utilization(&self) -> f64 {
(self.in_use as f64 / self.capacity as f64) * 100.0
}
#[must_use]
pub fn available(&self) -> u64 {
self.capacity.saturating_sub(self.in_use)
}
#[must_use]
pub fn avg_wait_us(&self) -> f64 {
if self.acquisitions == 0 {
0.0
} else {
self.total_wait_us as f64 / self.acquisitions as f64
}
}
#[must_use]
pub fn timeout_rate(&self) -> f64 {
let total = self.acquisitions + self.timeouts;
if total == 0 {
0.0
} else {
(self.timeouts as f64 / total as f64) * 100.0
}
}
#[must_use]
pub fn is_exhausted(&self) -> bool {
self.in_use >= self.capacity
}
#[must_use]
pub fn is_idle(&self) -> bool {
self.in_use == 0
}
#[must_use]
pub fn peak_utilization(&self) -> f64 {
(self.peak_in_use as f64 / self.capacity as f64) * 100.0
}
pub fn reset(&mut self) {
self.in_use = 0;
self.peak_in_use = 0;
self.acquisitions = 0;
self.releases = 0;
self.timeouts = 0;
self.total_wait_us = 0;
}
}
#[derive(Debug, Clone)]
pub struct Histogram2D {
cells: [[u64; 10]; 10],
x_min: f64,
x_max: f64,
y_min: f64,
y_max: f64,
count: u64,
}
impl Default for Histogram2D {
fn default() -> Self {
Self::new(0.0, 100.0, 0.0, 100.0)
}
}
impl Histogram2D {
#[must_use]
pub fn new(x_min: f64, x_max: f64, y_min: f64, y_max: f64) -> Self {
Self {
cells: [[0; 10]; 10],
x_min,
x_max: x_max.max(x_min + 1.0),
y_min,
y_max: y_max.max(y_min + 1.0),
count: 0,
}
}
#[must_use]
pub fn for_latency_throughput() -> Self {
Self::new(0.0, 100.0, 0.0, 1000.0)
}
#[must_use]
pub fn for_cpu_memory() -> Self {
Self::new(0.0, 100.0, 0.0, 100.0)
}
pub fn add(&mut self, x: f64, y: f64) {
let xi = self.x_to_index(x);
let yi = self.y_to_index(y);
self.cells[yi][xi] += 1;
self.count += 1;
}
fn x_to_index(&self, x: f64) -> usize {
let normalized = (x - self.x_min) / (self.x_max - self.x_min);
(normalized * 10.0).clamp(0.0, 9.0) as usize
}
fn y_to_index(&self, y: f64) -> usize {
let normalized = (y - self.y_min) / (self.y_max - self.y_min);
(normalized * 10.0).clamp(0.0, 9.0) as usize
}
#[must_use]
pub fn get(&self, xi: usize, yi: usize) -> u64 {
if xi < 10 && yi < 10 {
self.cells[yi][xi]
} else {
0
}
}
#[must_use]
pub fn density(&self, xi: usize, yi: usize) -> f64 {
if self.count == 0 || xi >= 10 || yi >= 10 {
0.0
} else {
(self.cells[yi][xi] as f64 / self.count as f64) * 100.0
}
}
#[must_use]
pub fn max_count(&self) -> u64 {
self.cells
.iter()
.flat_map(|r| r.iter())
.copied()
.max()
.unwrap_or(0)
}
#[must_use]
pub fn hotspot(&self) -> (usize, usize) {
let mut max_val = 0;
let mut max_pos = (0, 0);
for (yi, row) in self.cells.iter().enumerate() {
for (xi, &val) in row.iter().enumerate() {
if val > max_val {
max_val = val;
max_pos = (xi, yi);
}
}
}
max_pos
}
#[must_use]
pub fn count(&self) -> u64 {
self.count
}
pub fn reset(&mut self) {
self.cells = [[0; 10]; 10];
self.count = 0;
}
}
#[derive(Debug, Clone)]
pub struct ReservoirSampler {
samples: [f64; 16],
size: usize,
capacity: usize,
seen: u64,
rng_state: u64,
}
impl Default for ReservoirSampler {
fn default() -> Self {
Self::new(16)
}
}
impl ReservoirSampler {
#[must_use]
pub fn new(capacity: usize) -> Self {
Self {
samples: [0.0; 16],
size: 0,
capacity: capacity.min(16),
seen: 0,
rng_state: 12345,
}
}
fn next_random(&mut self) -> u64 {
self.rng_state = self
.rng_state
.wrapping_mul(6364136223846793005)
.wrapping_add(1);
self.rng_state
}
pub fn add(&mut self, value: f64) {
self.seen += 1;
if self.size < self.capacity {
self.samples[self.size] = value;
self.size += 1;
} else {
let r = (self.next_random() % self.seen) as usize;
if r < self.capacity {
self.samples[r] = value;
}
}
}
#[must_use]
pub fn get(&self, index: usize) -> Option<f64> {
if index < self.size {
Some(self.samples[index])
} else {
None
}
}
#[must_use]
pub fn len(&self) -> usize {
self.size
}
#[must_use]
pub fn is_empty(&self) -> bool {
self.size == 0
}
#[must_use]
pub fn total_seen(&self) -> u64 {
self.seen
}
#[must_use]
pub fn mean(&self) -> f64 {
if self.size == 0 {
0.0
} else {
self.samples[..self.size].iter().sum::<f64>() / self.size as f64
}
}
#[must_use]
pub fn min(&self) -> f64 {
if self.size == 0 {
0.0
} else {
self.samples[..self.size]
.iter()
.fold(f64::MAX, |a, &b| a.min(b))
}
}
#[must_use]
pub fn max(&self) -> f64 {
if self.size == 0 {
0.0
} else {
self.samples[..self.size]
.iter()
.fold(f64::MIN, |a, &b| a.max(b))
}
}
pub fn reset(&mut self) {
self.samples = [0.0; 16];
self.size = 0;
self.seen = 0;
self.rng_state = 12345;
}
}
#[derive(Debug, Clone)]
pub struct ExponentialHistogram {
buckets: [u64; 8],
base: f64,
count: u64,
sum: f64,
}
impl Default for ExponentialHistogram {
fn default() -> Self {
Self::new(1.0)
}
}
impl ExponentialHistogram {
#[must_use]
pub fn new(base: f64) -> Self {
Self {
buckets: [0; 8],
base: base.max(0.001),
count: 0,
sum: 0.0,
}
}
#[must_use]
pub fn for_latency_ms() -> Self {
Self::new(1.0)
}
#[must_use]
pub fn for_bytes_kb() -> Self {
Self::new(1024.0)
}
pub fn add(&mut self, value: f64) {
self.count += 1;
self.sum += value;
let bucket = self.value_to_bucket(value);
self.buckets[bucket] += 1;
}
fn value_to_bucket(&self, value: f64) -> usize {
if value < self.base {
return 0;
}
let ratio = value / self.base;
let bucket = ratio.log2().floor() as usize;
bucket.min(7)
}
#[must_use]
pub fn bucket_count(&self, bucket: usize) -> u64 {
if bucket < 8 {
self.buckets[bucket]
} else {
0
}
}
#[must_use]
pub fn bucket_upper_bound(&self, bucket: usize) -> f64 {
if bucket >= 7 {
f64::INFINITY
} else {
self.base * 2.0_f64.powi(bucket as i32 + 1)
}
}
#[must_use]
pub fn count(&self) -> u64 {
self.count
}
#[must_use]
pub fn mean(&self) -> f64 {
if self.count == 0 {
0.0
} else {
self.sum / self.count as f64
}
}
#[must_use]
pub fn mode_bucket(&self) -> usize {
self.buckets
.iter()
.enumerate()
.max_by_key(|(_, &c)| c)
.map(|(i, _)| i)
.unwrap_or(0)
}
pub fn reset(&mut self) {
self.buckets = [0; 8];
self.count = 0;
self.sum = 0.0;
}
}
#[derive(Debug, Clone)]
pub struct CacheStats {
hits: u64,
misses: u64,
evictions: u64,
insertions: u64,
bytes_cached: u64,
capacity_bytes: u64,
}
impl Default for CacheStats {
fn default() -> Self {
Self::new(0)
}
}
impl CacheStats {
#[must_use]
pub fn new(capacity_bytes: u64) -> Self {
Self {
hits: 0,
misses: 0,
evictions: 0,
insertions: 0,
bytes_cached: 0,
capacity_bytes,
}
}
#[must_use]
pub fn for_l1_cache() -> Self {
Self::new(32 * 1024)
}
#[must_use]
pub fn for_l2_cache() -> Self {
Self::new(256 * 1024)
}
#[must_use]
pub fn for_app_cache() -> Self {
Self::new(16 * 1024 * 1024)
}
pub fn hit(&mut self) {
self.hits += 1;
}
pub fn miss(&mut self) {
self.misses += 1;
}
pub fn evict(&mut self, bytes: u64) {
self.evictions += 1;
self.bytes_cached = self.bytes_cached.saturating_sub(bytes);
}
pub fn insert(&mut self, bytes: u64) {
self.insertions += 1;
self.bytes_cached += bytes;
}
#[must_use]
pub fn hit_rate(&self) -> f64 {
let total = self.hits + self.misses;
if total == 0 {
0.0
} else {
(self.hits as f64 / total as f64) * 100.0
}
}
#[must_use]
pub fn miss_rate(&self) -> f64 {
100.0 - self.hit_rate()
}
#[must_use]
pub fn eviction_rate(&self) -> f64 {
if self.insertions == 0 {
0.0
} else {
self.evictions as f64 / self.insertions as f64
}
}
#[must_use]
pub fn fill_percentage(&self) -> f64 {
if self.capacity_bytes == 0 {
0.0
} else {
(self.bytes_cached as f64 / self.capacity_bytes as f64) * 100.0
}
}
#[must_use]
pub fn total_requests(&self) -> u64 {
self.hits + self.misses
}
#[must_use]
pub fn is_effective(&self, threshold: f64) -> bool {
self.hit_rate() >= threshold
}
pub fn reset(&mut self) {
self.hits = 0;
self.misses = 0;
self.evictions = 0;
self.insertions = 0;
self.bytes_cached = 0;
}
}
#[derive(Debug, Clone)]
pub struct BloomFilter {
bits: [u64; 16], hash_count: u32,
items: u64,
}
impl Default for BloomFilter {
fn default() -> Self {
Self::new(3)
}
}
impl BloomFilter {
#[must_use]
pub fn new(hash_count: u32) -> Self {
Self {
bits: [0; 16],
hash_count: hash_count.clamp(1, 10),
items: 0,
}
}
#[must_use]
pub fn for_small() -> Self {
Self::new(3)
}
#[must_use]
pub fn for_medium() -> Self {
Self::new(5)
}
fn hash(&self, value: u64, seed: u32) -> usize {
let mut h = value.wrapping_mul(0x517cc1b727220a95);
h = h.wrapping_add(seed as u64);
h ^= h >> 33;
h = h.wrapping_mul(0xff51afd7ed558ccd);
(h as usize) % 1024
}
pub fn add(&mut self, value: u64) {
for i in 0..self.hash_count {
let bit_idx = self.hash(value, i);
let word_idx = bit_idx / 64;
let bit_pos = bit_idx % 64;
self.bits[word_idx] |= 1 << bit_pos;
}
self.items += 1;
}
#[must_use]
pub fn might_contain(&self, value: u64) -> bool {
for i in 0..self.hash_count {
let bit_idx = self.hash(value, i);
let word_idx = bit_idx / 64;
let bit_pos = bit_idx % 64;
if self.bits[word_idx] & (1 << bit_pos) == 0 {
return false;
}
}
true
}
#[must_use]
pub fn len(&self) -> u64 {
self.items
}
#[must_use]
pub fn is_empty(&self) -> bool {
self.items == 0
}
#[must_use]
pub fn false_positive_rate(&self) -> f64 {
let m = 1024.0; let k = self.hash_count as f64;
let n = self.items as f64;
if n == 0.0 {
return 0.0;
}
(1.0 - (-k * n / m).exp()).powf(k)
}
#[must_use]
pub fn fill_percentage(&self) -> f64 {
let set_bits: u32 = self.bits.iter().map(|w| w.count_ones()).sum();
(set_bits as f64 / 1024.0) * 100.0
}
pub fn reset(&mut self) {
self.bits = [0; 16];
self.items = 0;
}
}
#[derive(Debug, Clone)]
pub struct LoadBalancer {
weights: [u32; 8],
current: [i32; 8],
active: usize,
dispatched: u64,
per_backend: [u64; 8],
}
impl Default for LoadBalancer {
fn default() -> Self {
Self::new()
}
}
impl LoadBalancer {
#[must_use]
pub fn new() -> Self {
Self {
weights: [0; 8],
current: [0; 8],
active: 0,
dispatched: 0,
per_backend: [0; 8],
}
}
#[must_use]
pub fn equal_weights(n: usize) -> Self {
let mut lb = Self::new();
for _ in 0..n.min(8) {
lb.add_backend(1);
}
lb
}
pub fn add_backend(&mut self, weight: u32) {
if self.active < 8 {
self.weights[self.active] = weight.max(1);
self.current[self.active] = 0;
self.active += 1;
}
}
#[must_use]
pub fn select_backend(&mut self) -> Option<usize> {
if self.active == 0 {
return None;
}
let total_weight: i32 = self.weights[..self.active].iter().map(|&w| w as i32).sum();
for i in 0..self.active {
self.current[i] += self.weights[i] as i32;
}
let mut max_idx = 0;
let mut max_weight = self.current[0];
for i in 1..self.active {
if self.current[i] > max_weight {
max_weight = self.current[i];
max_idx = i;
}
}
self.current[max_idx] -= total_weight;
self.dispatched += 1;
self.per_backend[max_idx] += 1;
Some(max_idx)
}
#[must_use]
pub fn distribution(&self, backend: usize) -> f64 {
if self.dispatched == 0 || backend >= self.active {
0.0
} else {
(self.per_backend[backend] as f64 / self.dispatched as f64) * 100.0
}
}
#[must_use]
pub fn total_dispatched(&self) -> u64 {
self.dispatched
}
#[must_use]
pub fn backend_count(&self) -> usize {
self.active
}
#[must_use]
pub fn is_balanced(&self, threshold: f64) -> bool {
if self.active <= 1 || self.dispatched < 10 {
return true;
}
let avg = self.dispatched as f64 / self.active as f64;
for i in 0..self.active {
let deviation = ((self.per_backend[i] as f64 - avg) / avg).abs() * 100.0;
if deviation > threshold {
return false;
}
}
true
}
pub fn reset(&mut self) {
self.current = [0; 8];
self.dispatched = 0;
self.per_backend = [0; 8];
}
}
#[derive(Debug, Clone)]
pub struct BurstTracker {
tokens: f64,
capacity: f64,
refill_rate: f64,
last_update_us: u64,
burst_count: u64,
max_burst: u64,
total_bursts: u64,
}
impl Default for BurstTracker {
fn default() -> Self {
Self::new(100.0, 10.0)
}
}
impl BurstTracker {
#[must_use]
pub fn new(capacity: f64, refill_rate: f64) -> Self {
Self {
tokens: capacity,
capacity: capacity.max(1.0),
refill_rate: refill_rate.max(0.1),
last_update_us: 0,
burst_count: 0,
max_burst: 0,
total_bursts: 0,
}
}
#[must_use]
pub fn for_api() -> Self {
Self::new(100.0, 50.0)
}
#[must_use]
pub fn for_network() -> Self {
Self::new(1000.0, 100.0)
}
pub fn consume(&mut self, tokens: f64, now_us: u64) -> bool {
self.refill(now_us);
if tokens <= self.tokens {
self.tokens -= tokens;
self.burst_count += 1;
if self.burst_count > self.max_burst {
self.max_burst = self.burst_count;
}
true
} else {
if self.burst_count > 0 {
self.total_bursts += 1;
}
self.burst_count = 0;
false
}
}
fn refill(&mut self, now_us: u64) {
if self.last_update_us == 0 {
self.last_update_us = now_us;
return;
}
let elapsed_s = (now_us.saturating_sub(self.last_update_us)) as f64 / 1_000_000.0;
let refill = elapsed_s * self.refill_rate;
self.tokens = (self.tokens + refill).min(self.capacity);
self.last_update_us = now_us;
}
#[must_use]
pub fn tokens(&self) -> f64 {
self.tokens
}
#[must_use]
pub fn fill_percentage(&self) -> f64 {
(self.tokens / self.capacity) * 100.0
}
#[must_use]
pub fn max_burst(&self) -> u64 {
self.max_burst
}
#[must_use]
pub fn total_bursts(&self) -> u64 {
self.total_bursts
}
#[must_use]
pub fn avg_burst(&self) -> f64 {
if self.total_bursts == 0 {
0.0
} else {
self.max_burst as f64 }
}
pub fn reset(&mut self) {
self.tokens = self.capacity;
self.burst_count = 0;
self.max_burst = 0;
self.total_bursts = 0;
self.last_update_us = 0;
}
}
#[derive(Debug, Clone)]
pub struct TopKTracker {
values: [f64; 32],
count: usize,
k: usize,
}
impl Default for TopKTracker {
fn default() -> Self {
Self::new(10)
}
}
impl TopKTracker {
#[must_use]
pub fn new(k: usize) -> Self {
Self {
values: [f64::NEG_INFINITY; 32],
count: 0,
k: k.min(32),
}
}
#[must_use]
pub fn for_metrics() -> Self {
Self::new(10)
}
#[must_use]
pub fn for_processes() -> Self {
Self::new(20)
}
pub fn add(&mut self, value: f64) {
if self.count < self.k {
let mut i = self.count;
while i > 0 && self.values[i - 1] < value {
self.values[i] = self.values[i - 1];
i -= 1;
}
self.values[i] = value;
self.count += 1;
} else if value > self.values[self.k - 1] {
let mut i = self.k - 1;
while i > 0 && self.values[i - 1] < value {
self.values[i] = self.values[i - 1];
i -= 1;
}
self.values[i] = value;
}
}
#[must_use]
pub fn top(&self) -> &[f64] {
&self.values[..self.count]
}
#[must_use]
pub fn k(&self) -> usize {
self.k
}
#[must_use]
pub fn count(&self) -> usize {
self.count
}
#[must_use]
pub fn minimum(&self) -> Option<f64> {
if self.count > 0 {
Some(self.values[self.count - 1])
} else {
None
}
}
#[must_use]
pub fn maximum(&self) -> Option<f64> {
if self.count > 0 {
Some(self.values[0])
} else {
None
}
}
pub fn reset(&mut self) {
self.values = [f64::NEG_INFINITY; 32];
self.count = 0;
}
}
#[derive(Debug, Clone)]
pub struct QuotaTracker {
limit: u64,
used: u64,
peak_usage: u64,
}
impl Default for QuotaTracker {
fn default() -> Self {
Self::new(1000)
}
}
impl QuotaTracker {
#[must_use]
pub fn new(limit: u64) -> Self {
Self {
limit: limit.max(1),
used: 0,
peak_usage: 0,
}
}
#[must_use]
pub fn for_api_daily() -> Self {
Self::new(10000)
}
#[must_use]
pub fn for_storage_gb() -> Self {
Self::new(100)
}
pub fn use_quota(&mut self, amount: u64) -> bool {
if self.used + amount > self.limit {
false
} else {
self.used += amount;
if self.used > self.peak_usage {
self.peak_usage = self.used;
}
true
}
}
pub fn release(&mut self, amount: u64) {
self.used = self.used.saturating_sub(amount);
}
#[must_use]
pub fn limit(&self) -> u64 {
self.limit
}
#[must_use]
pub fn remaining(&self) -> u64 {
self.limit.saturating_sub(self.used)
}
#[must_use]
pub fn usage_percentage(&self) -> f64 {
(self.used as f64 / self.limit as f64) * 100.0
}
#[must_use]
pub fn is_exhausted(&self) -> bool {
self.used >= self.limit
}
#[must_use]
pub fn peak_usage(&self) -> u64 {
self.peak_usage
}
pub fn reset(&mut self) {
self.used = 0;
self.peak_usage = 0;
}
}
#[derive(Debug, Clone)]
pub struct FrequencyCounter {
counts: [u64; 16],
total: u64,
}
impl Default for FrequencyCounter {
fn default() -> Self {
Self::new()
}
}
impl FrequencyCounter {
#[must_use]
pub fn new() -> Self {
Self {
counts: [0; 16],
total: 0,
}
}
pub fn increment(&mut self, category: usize) {
if category < 16 {
self.counts[category] += 1;
self.total += 1;
}
}
pub fn add(&mut self, category: usize, count: u64) {
if category < 16 {
self.counts[category] += count;
self.total += count;
}
}
#[must_use]
pub fn count(&self, category: usize) -> u64 {
if category < 16 {
self.counts[category]
} else {
0
}
}
#[must_use]
pub fn frequency(&self, category: usize) -> f64 {
if self.total == 0 || category >= 16 {
0.0
} else {
(self.counts[category] as f64 / self.total as f64) * 100.0
}
}
#[must_use]
pub fn total(&self) -> u64 {
self.total
}
#[must_use]
pub fn most_frequent(&self) -> Option<usize> {
if self.total == 0 {
return None;
}
let mut max_idx = 0;
let mut max_count = self.counts[0];
for i in 1..16 {
if self.counts[i] > max_count {
max_count = self.counts[i];
max_idx = i;
}
}
Some(max_idx)
}
#[must_use]
pub fn non_zero_count(&self) -> usize {
self.counts.iter().filter(|&&c| c > 0).count()
}
#[must_use]
pub fn entropy(&self) -> f64 {
if self.total == 0 {
return 0.0;
}
let mut entropy = 0.0;
for &count in &self.counts {
if count > 0 {
let p = count as f64 / self.total as f64;
entropy -= p * p.log2();
}
}
entropy / 4.0
}
pub fn reset(&mut self) {
self.counts = [0; 16];
self.total = 0;
}
}
#[derive(Debug, Clone)]
pub struct MovingRange {
values: [f64; 128],
window_size: usize,
head: usize,
count: usize,
current_min: f64,
current_max: f64,
}
impl Default for MovingRange {
fn default() -> Self {
Self::new(10)
}
}
impl MovingRange {
#[must_use]
pub fn new(window_size: usize) -> Self {
Self {
values: [0.0; 128],
window_size: window_size.min(128),
head: 0,
count: 0,
current_min: f64::INFINITY,
current_max: f64::NEG_INFINITY,
}
}
#[must_use]
pub fn for_prices() -> Self {
Self::new(20)
}
#[must_use]
pub fn for_latency() -> Self {
Self::new(100)
}
pub fn add(&mut self, value: f64) {
let idx = self.head;
self.values[idx] = value;
self.head = (self.head + 1) % self.window_size;
if self.count < self.window_size {
self.count += 1;
}
self.recalculate_minmax();
}
fn recalculate_minmax(&mut self) {
self.current_min = f64::INFINITY;
self.current_max = f64::NEG_INFINITY;
for i in 0..self.count {
let v = self.values[i];
if v < self.current_min {
self.current_min = v;
}
if v > self.current_max {
self.current_max = v;
}
}
}
#[must_use]
pub fn window_size(&self) -> usize {
self.window_size
}
#[must_use]
pub fn count(&self) -> usize {
self.count
}
#[must_use]
pub fn min(&self) -> Option<f64> {
if self.count > 0 {
Some(self.current_min)
} else {
None
}
}
#[must_use]
pub fn max(&self) -> Option<f64> {
if self.count > 0 {
Some(self.current_max)
} else {
None
}
}
#[must_use]
pub fn range(&self) -> f64 {
if self.count > 0 {
self.current_max - self.current_min
} else {
0.0
}
}
#[must_use]
pub fn midrange(&self) -> f64 {
if self.count > 0 {
(self.current_max + self.current_min) / 2.0
} else {
0.0
}
}
#[must_use]
pub fn volatility(&self) -> f64 {
let mid = self.midrange();
if mid.abs() < 0.0001 {
0.0
} else {
(self.range() / mid) * 100.0
}
}
pub fn reset(&mut self) {
self.values = [0.0; 128];
self.head = 0;
self.count = 0;
self.current_min = f64::INFINITY;
self.current_max = f64::NEG_INFINITY;
}
}
#[derive(Debug, Clone)]
pub struct TimeoutTracker {
timeout_us: u64,
total: u64,
timed_out: u64,
last_duration_us: u64,
max_duration_us: u64,
}
impl Default for TimeoutTracker {
fn default() -> Self {
Self::new(1_000_000) }
}
impl TimeoutTracker {
#[must_use]
pub fn new(timeout_us: u64) -> Self {
Self {
timeout_us: timeout_us.max(1),
total: 0,
timed_out: 0,
last_duration_us: 0,
max_duration_us: 0,
}
}
#[must_use]
pub fn for_network() -> Self {
Self::new(5_000_000)
}
#[must_use]
pub fn for_database() -> Self {
Self::new(30_000_000)
}
#[must_use]
pub fn for_fast() -> Self {
Self::new(100_000)
}
pub fn record(&mut self, duration_us: u64) {
self.total += 1;
self.last_duration_us = duration_us;
if duration_us > self.max_duration_us {
self.max_duration_us = duration_us;
}
if duration_us > self.timeout_us {
self.timed_out += 1;
}
}
#[must_use]
pub fn total(&self) -> u64 {
self.total
}
#[must_use]
pub fn timed_out(&self) -> u64 {
self.timed_out
}
#[must_use]
pub fn timeout_rate(&self) -> f64 {
if self.total == 0 {
0.0
} else {
(self.timed_out as f64 / self.total as f64) * 100.0
}
}
#[must_use]
pub fn success_rate(&self) -> f64 {
100.0 - self.timeout_rate()
}
#[must_use]
pub fn is_healthy(&self, max_timeout_rate: f64) -> bool {
self.timeout_rate() <= max_timeout_rate
}
#[must_use]
pub fn max_duration_us(&self) -> u64 {
self.max_duration_us
}
#[must_use]
pub fn timeout_threshold_us(&self) -> u64 {
self.timeout_us
}
pub fn reset(&mut self) {
self.total = 0;
self.timed_out = 0;
self.last_duration_us = 0;
self.max_duration_us = 0;
}
}
#[derive(Debug, Clone)]
pub struct RetryTracker {
max_retries: u32,
base_delay_ms: u64,
max_delay_ms: u64,
total_attempts: u64,
total_retries: u64,
successful_retries: u64,
current_retry: u32,
}
impl Default for RetryTracker {
fn default() -> Self {
Self::new(3, 100, 10000)
}
}
impl RetryTracker {
#[must_use]
pub fn new(max_retries: u32, base_delay_ms: u64, max_delay_ms: u64) -> Self {
Self {
max_retries,
base_delay_ms: base_delay_ms.max(1),
max_delay_ms: max_delay_ms.max(base_delay_ms),
total_attempts: 0,
total_retries: 0,
successful_retries: 0,
current_retry: 0,
}
}
#[must_use]
pub fn for_api() -> Self {
Self::new(3, 100, 10000)
}
#[must_use]
pub fn for_network() -> Self {
Self::new(5, 1000, 30000)
}
pub fn attempt(&mut self) {
self.total_attempts += 1;
}
pub fn retry(&mut self) {
self.total_retries += 1;
if self.current_retry < self.max_retries {
self.current_retry += 1;
}
}
pub fn success(&mut self) {
if self.current_retry > 0 {
self.successful_retries += 1;
}
self.current_retry = 0;
}
#[must_use]
pub fn next_delay_ms(&self) -> u64 {
let delay = self.base_delay_ms * (1 << self.current_retry);
delay.min(self.max_delay_ms)
}
#[must_use]
pub fn retries_exhausted(&self) -> bool {
self.current_retry >= self.max_retries
}
#[must_use]
pub fn retry_rate(&self) -> f64 {
if self.total_attempts == 0 {
0.0
} else {
(self.total_retries as f64 / self.total_attempts as f64) * 100.0
}
}
#[must_use]
pub fn successful_retry_rate(&self) -> f64 {
if self.total_retries == 0 {
0.0
} else {
(self.successful_retries as f64 / self.total_retries as f64) * 100.0
}
}
#[must_use]
pub fn current_retry(&self) -> u32 {
self.current_retry
}
pub fn reset(&mut self) {
self.total_attempts = 0;
self.total_retries = 0;
self.successful_retries = 0;
self.current_retry = 0;
}
}
#[derive(Debug, Clone)]
pub struct ScheduleSlot {
slot_duration_us: u64,
num_slots: usize,
current_slot: usize,
slot_start_us: u64,
executions_per_slot: [u64; 16],
}
impl Default for ScheduleSlot {
fn default() -> Self {
Self::new(1_000_000, 10) }
}
impl ScheduleSlot {
#[must_use]
pub fn new(slot_duration_us: u64, num_slots: usize) -> Self {
Self {
slot_duration_us: slot_duration_us.max(1),
num_slots: num_slots.min(16).max(1),
current_slot: 0,
slot_start_us: 0,
executions_per_slot: [0; 16],
}
}
#[must_use]
pub fn for_round_robin() -> Self {
Self::new(1_000_000, 10)
}
#[must_use]
pub fn for_minute() -> Self {
Self::new(60_000_000, 5)
}
pub fn update(&mut self, now_us: u64) {
if self.slot_start_us == 0 {
self.slot_start_us = now_us;
return;
}
let elapsed = now_us.saturating_sub(self.slot_start_us);
let slots_passed = (elapsed / self.slot_duration_us) as usize;
if slots_passed > 0 {
self.current_slot = (self.current_slot + slots_passed) % self.num_slots;
self.slot_start_us = now_us;
}
}
pub fn execute(&mut self, now_us: u64) {
self.update(now_us);
if self.current_slot < 16 {
self.executions_per_slot[self.current_slot] += 1;
}
}
#[must_use]
pub fn current_slot(&self) -> usize {
self.current_slot
}
#[must_use]
pub fn num_slots(&self) -> usize {
self.num_slots
}
#[must_use]
pub fn executions(&self, slot: usize) -> u64 {
if slot < 16 {
self.executions_per_slot[slot]
} else {
0
}
}
#[must_use]
pub fn total_executions(&self) -> u64 {
self.executions_per_slot[..self.num_slots].iter().sum()
}
#[must_use]
pub fn is_balanced(&self, threshold: f64) -> bool {
let total = self.total_executions();
if total == 0 {
return true;
}
let expected = total as f64 / self.num_slots as f64;
for i in 0..self.num_slots {
let diff = (self.executions_per_slot[i] as f64 - expected).abs();
if diff / expected * 100.0 > threshold {
return false;
}
}
true
}
pub fn reset(&mut self) {
self.current_slot = 0;
self.slot_start_us = 0;
self.executions_per_slot = [0; 16];
}
}
#[derive(Debug, Clone)]
pub struct CooldownTimer {
cooldown_us: u64,
last_action_us: u64,
total_actions: u64,
blocked_attempts: u64,
}
impl Default for CooldownTimer {
fn default() -> Self {
Self::new(1_000_000) }
}
impl CooldownTimer {
#[must_use]
pub fn new(cooldown_us: u64) -> Self {
Self {
cooldown_us: cooldown_us.max(1),
last_action_us: 0,
total_actions: 0,
blocked_attempts: 0,
}
}
#[must_use]
pub fn for_fast() -> Self {
Self::new(100_000)
}
#[must_use]
pub fn for_normal() -> Self {
Self::new(1_000_000)
}
#[must_use]
pub fn for_slow() -> Self {
Self::new(10_000_000)
}
#[must_use]
pub fn is_ready(&self, now_us: u64) -> bool {
if self.last_action_us == 0 {
return true;
}
now_us.saturating_sub(self.last_action_us) >= self.cooldown_us
}
pub fn try_action(&mut self, now_us: u64) -> bool {
if self.is_ready(now_us) {
self.last_action_us = now_us;
self.total_actions += 1;
true
} else {
self.blocked_attempts += 1;
false
}
}
pub fn force_action(&mut self, now_us: u64) {
self.last_action_us = now_us;
self.total_actions += 1;
}
#[must_use]
pub fn remaining_us(&self, now_us: u64) -> u64 {
if self.is_ready(now_us) {
0
} else {
self.cooldown_us
.saturating_sub(now_us.saturating_sub(self.last_action_us))
}
}
#[must_use]
pub fn cooldown_us(&self) -> u64 {
self.cooldown_us
}
#[must_use]
pub fn total_actions(&self) -> u64 {
self.total_actions
}
#[must_use]
pub fn blocked_attempts(&self) -> u64 {
self.blocked_attempts
}
#[must_use]
pub fn block_rate(&self) -> f64 {
let total = self.total_actions + self.blocked_attempts;
if total == 0 {
0.0
} else {
(self.blocked_attempts as f64 / total as f64) * 100.0
}
}
pub fn reset(&mut self) {
self.last_action_us = 0;
self.total_actions = 0;
self.blocked_attempts = 0;
}
}
#[derive(Debug, Clone)]
pub struct BackpressureMonitor {
signals: u64,
total_ops: u64,
consecutive: u32,
max_consecutive: u32,
last_signal_us: u64,
}
impl Default for BackpressureMonitor {
fn default() -> Self {
Self::new()
}
}
impl BackpressureMonitor {
#[must_use]
pub fn new() -> Self {
Self {
signals: 0,
total_ops: 0,
consecutive: 0,
max_consecutive: 0,
last_signal_us: 0,
}
}
pub fn success(&mut self) {
self.total_ops += 1;
self.consecutive = 0;
}
pub fn signal(&mut self, now_us: u64) {
self.signals += 1;
self.total_ops += 1;
self.consecutive += 1;
self.last_signal_us = now_us;
if self.consecutive > self.max_consecutive {
self.max_consecutive = self.consecutive;
}
}
#[must_use]
pub fn pressure_rate(&self) -> f64 {
if self.total_ops == 0 {
0.0
} else {
(self.signals as f64 / self.total_ops as f64) * 100.0
}
}
#[must_use]
pub fn is_under_pressure(&self, threshold: u32) -> bool {
self.consecutive >= threshold
}
#[must_use]
pub fn consecutive(&self) -> u32 {
self.consecutive
}
#[must_use]
pub fn max_consecutive(&self) -> u32 {
self.max_consecutive
}
#[must_use]
pub fn total_signals(&self) -> u64 {
self.signals
}
#[must_use]
pub fn is_healthy(&self, max_rate: f64) -> bool {
self.pressure_rate() <= max_rate
}
pub fn reset(&mut self) {
self.signals = 0;
self.total_ops = 0;
self.consecutive = 0;
self.max_consecutive = 0;
self.last_signal_us = 0;
}
}
#[derive(Debug, Clone)]
pub struct CapacityPlanner {
capacity: u64,
current: u64,
peak: u64,
samples: u32,
sum_utilization: f64,
growth_rate: f64,
}
impl Default for CapacityPlanner {
fn default() -> Self {
Self::new(1000)
}
}
impl CapacityPlanner {
#[must_use]
pub fn new(capacity: u64) -> Self {
Self {
capacity: capacity.max(1),
current: 0,
peak: 0,
samples: 0,
sum_utilization: 0.0,
growth_rate: 0.0,
}
}
#[must_use]
pub fn for_connections() -> Self {
Self::new(1000)
}
#[must_use]
pub fn for_storage() -> Self {
Self::new(100)
}
pub fn update(&mut self, current: u64) {
let old = self.current;
self.current = current;
if current > self.peak {
self.peak = current;
}
self.samples += 1;
self.sum_utilization += self.utilization();
if old > 0 {
self.growth_rate = (current as f64 - old as f64) / old as f64;
}
}
#[must_use]
pub fn utilization(&self) -> f64 {
(self.current as f64 / self.capacity as f64) * 100.0
}
#[must_use]
pub fn peak_utilization(&self) -> f64 {
(self.peak as f64 / self.capacity as f64) * 100.0
}
#[must_use]
pub fn avg_utilization(&self) -> f64 {
if self.samples == 0 {
0.0
} else {
self.sum_utilization / self.samples as f64
}
}
#[must_use]
pub fn remaining(&self) -> u64 {
self.capacity.saturating_sub(self.current)
}
#[must_use]
pub fn at_risk(&self, threshold: f64) -> bool {
self.utilization() >= threshold
}
#[must_use]
pub fn growth_rate(&self) -> f64 {
self.growth_rate
}
pub fn reset(&mut self) {
self.current = 0;
self.peak = 0;
self.samples = 0;
self.sum_utilization = 0.0;
self.growth_rate = 0.0;
}
}
#[derive(Debug, Clone)]
pub struct DriftTracker {
expected_interval_us: u64,
last_timestamp_us: u64,
total_drift_us: i64,
samples: u64,
max_drift_us: i64,
min_drift_us: i64,
}
impl Default for DriftTracker {
fn default() -> Self {
Self::new(1_000_000) }
}
impl DriftTracker {
#[must_use]
pub fn new(expected_interval_us: u64) -> Self {
Self {
expected_interval_us: expected_interval_us.max(1),
last_timestamp_us: 0,
total_drift_us: 0,
samples: 0,
max_drift_us: i64::MIN,
min_drift_us: i64::MAX,
}
}
#[must_use]
pub fn for_60fps() -> Self {
Self::new(16_667)
}
#[must_use]
pub fn for_heartbeat() -> Self {
Self::new(1_000_000)
}
pub fn record(&mut self, now_us: u64) {
if self.last_timestamp_us == 0 {
self.last_timestamp_us = now_us;
return;
}
let actual_interval = now_us.saturating_sub(self.last_timestamp_us);
let drift = actual_interval as i64 - self.expected_interval_us as i64;
self.total_drift_us += drift;
self.samples += 1;
if drift > self.max_drift_us {
self.max_drift_us = drift;
}
if drift < self.min_drift_us {
self.min_drift_us = drift;
}
self.last_timestamp_us = now_us;
}
#[must_use]
pub fn avg_drift_us(&self) -> f64 {
if self.samples == 0 {
0.0
} else {
self.total_drift_us as f64 / self.samples as f64
}
}
#[must_use]
pub fn max_drift_us(&self) -> i64 {
if self.samples == 0 {
0
} else {
self.max_drift_us
}
}
#[must_use]
pub fn min_drift_us(&self) -> i64 {
if self.samples == 0 {
0
} else {
self.min_drift_us
}
}
#[must_use]
pub fn is_stable(&self, tolerance_us: i64) -> bool {
self.avg_drift_us().abs() < tolerance_us as f64
}
#[must_use]
pub fn drift_range_us(&self) -> i64 {
if self.samples == 0 {
0
} else {
self.max_drift_us - self.min_drift_us
}
}
#[must_use]
pub fn samples(&self) -> u64 {
self.samples
}
pub fn reset(&mut self) {
self.last_timestamp_us = 0;
self.total_drift_us = 0;
self.samples = 0;
self.max_drift_us = i64::MIN;
self.min_drift_us = i64::MAX;
}
}
#[derive(Debug, Clone)]
pub struct SemaphoreTracker {
total_permits: u32,
acquired: u32,
peak_acquired: u32,
acquisitions: u64,
releases: u64,
contentions: u64,
}
impl Default for SemaphoreTracker {
fn default() -> Self {
Self::new(10)
}
}
impl SemaphoreTracker {
#[must_use]
pub fn new(total_permits: u32) -> Self {
Self {
total_permits: total_permits.max(1),
acquired: 0,
peak_acquired: 0,
acquisitions: 0,
releases: 0,
contentions: 0,
}
}
#[must_use]
pub fn for_database() -> Self {
Self::new(20)
}
#[must_use]
pub fn for_workers() -> Self {
Self::new(8)
}
pub fn try_acquire(&mut self) -> bool {
if self.acquired < self.total_permits {
self.acquired += 1;
self.acquisitions += 1;
if self.acquired > self.peak_acquired {
self.peak_acquired = self.acquired;
}
true
} else {
self.contentions += 1;
false
}
}
pub fn release(&mut self) {
if self.acquired > 0 {
self.acquired -= 1;
self.releases += 1;
}
}
#[must_use]
pub fn available(&self) -> u32 {
self.total_permits.saturating_sub(self.acquired)
}
#[must_use]
pub fn utilization(&self) -> f64 {
(self.acquired as f64 / self.total_permits as f64) * 100.0
}
#[must_use]
pub fn peak_utilization(&self) -> f64 {
(self.peak_acquired as f64 / self.total_permits as f64) * 100.0
}
#[must_use]
pub fn contention_rate(&self) -> f64 {
let total = self.acquisitions + self.contentions;
if total == 0 {
0.0
} else {
(self.contentions as f64 / total as f64) * 100.0
}
}
#[must_use]
pub fn is_healthy(&self, max_contention: f64) -> bool {
self.contention_rate() <= max_contention
}
#[must_use]
pub fn total_permits(&self) -> u32 {
self.total_permits
}
pub fn reset(&mut self) {
self.acquired = 0;
self.peak_acquired = 0;
self.acquisitions = 0;
self.releases = 0;
self.contentions = 0;
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::thread;
#[test]
fn test_perf_tracer_new() {
let tracer = PerfTracer::new();
assert!(tracer.stats.is_empty());
}
#[test]
fn test_perf_tracer_trace() {
let mut tracer = PerfTracer::new();
let result = tracer.trace("test_op", || {
thread::sleep(Duration::from_micros(100));
42
});
assert_eq!(result, 42);
assert!(tracer.get_stats("test_op").is_some());
assert_eq!(tracer.get_stats("test_op").unwrap().count, 1);
}
#[test]
fn test_perf_tracer_multiple_traces() {
let mut tracer = PerfTracer::new();
for _ in 0..5 {
tracer.trace("test_op", || {});
}
let stats = tracer.get_stats("test_op").unwrap();
assert_eq!(stats.count, 5);
}
#[test]
fn test_perf_tracer_budget_exceeded() {
let mut tracer = PerfTracer::new();
tracer.trace_with_budget("slow_op", 1, || {
thread::sleep(Duration::from_millis(1));
});
let stats = tracer.get_stats("slow_op").unwrap();
assert_eq!(stats.budget_violations, 1);
}
#[test]
fn test_perf_tracer_summary() {
let mut tracer = PerfTracer::new();
tracer.trace("op1", || {});
tracer.trace("op2", || {});
let summary = tracer.summary();
assert!(summary.contains("op1"));
assert!(summary.contains("op2"));
}
#[test]
fn test_trace_stats_avg_duration() {
let mut stats = TraceStats::new(Duration::from_micros(100), 1000, false);
stats.update(Duration::from_micros(200), false);
stats.update(Duration::from_micros(300), false);
let avg = stats.avg_duration();
assert_eq!(avg.as_micros(), 200);
}
#[test]
fn test_trace_stats_cv() {
let mut stats = TraceStats::new(Duration::from_micros(100), 1000, false);
stats.update(Duration::from_micros(100), false);
stats.update(Duration::from_micros(100), false);
let cv = stats.cv_percent();
assert!(cv < 5.0);
}
#[test]
fn test_escalation_thresholds() {
let mut tracer = PerfTracer::with_thresholds(EscalationThresholds {
cv_percent: 10.0,
efficiency_percent: 50.0,
max_traces_per_sec: 100,
});
tracer.trace_with_budget("variable_op", 10, || {
thread::sleep(Duration::from_micros(100));
});
tracer.trace_with_budget("variable_op", 10, || {
thread::sleep(Duration::from_micros(500));
});
assert!(tracer.should_escalate("variable_op"));
}
#[test]
fn test_export_renacer_format() {
let mut tracer = PerfTracer::new();
tracer.trace("test_op", || {});
let export = tracer.export_renacer_format();
assert!(export.contains("TRACE test_op"));
assert!(export.contains("count=1"));
}
#[test]
fn test_clear() {
let mut tracer = PerfTracer::new();
tracer.trace("op1", || {});
tracer.trace("op2", || {});
tracer.clear();
assert!(tracer.stats.is_empty());
}
#[test]
fn test_perf_tracer_default() {
let tracer = PerfTracer::default();
assert!(tracer.stats.is_empty());
}
#[test]
fn test_all_stats() {
let mut tracer = PerfTracer::new();
tracer.trace("op1", || {});
tracer.trace("op2", || {});
let all = tracer.all_stats();
assert_eq!(all.len(), 2);
assert!(all.contains_key("op1"));
assert!(all.contains_key("op2"));
}
#[test]
fn test_get_stats_nonexistent() {
let tracer = PerfTracer::new();
assert!(tracer.get_stats("nonexistent").is_none());
}
#[test]
fn test_should_escalate_nonexistent() {
let tracer = PerfTracer::new();
assert!(!tracer.should_escalate("nonexistent"));
}
#[test]
fn test_trace_stats_efficiency_percent() {
let stats = TraceStats::new(Duration::from_micros(500), 1000, false);
let eff = stats.efficiency_percent();
assert!(eff <= 100.0 && eff > 50.0);
}
#[test]
fn test_trace_stats_efficiency_zero_budget() {
let stats = TraceStats::new(Duration::from_micros(500), 0, false);
assert_eq!(stats.efficiency_percent(), 100.0);
}
#[test]
fn test_trace_stats_avg_duration_zero_count() {
let stats = TraceStats::default();
assert_eq!(stats.avg_duration(), Duration::ZERO);
}
#[test]
fn test_trace_stats_cv_single_sample() {
let stats = TraceStats::new(Duration::from_micros(100), 1000, false);
assert_eq!(stats.cv_percent(), 0.0);
}
#[test]
fn test_trace_stats_cv_zero_avg() {
let stats = TraceStats::new(Duration::ZERO, 1000, false);
assert_eq!(stats.cv_percent(), 0.0);
}
#[test]
fn test_trace_event_debug() {
let event = TraceEvent {
name: "test".to_string(),
duration: Duration::from_micros(100),
timestamp_us: 1000,
budget_exceeded: false,
budget_us: Some(200),
};
let debug = format!("{:?}", event);
assert!(debug.contains("TraceEvent"));
assert!(debug.contains("test"));
}
#[test]
fn test_trace_event_clone() {
let event = TraceEvent {
name: "test".to_string(),
duration: Duration::from_micros(100),
timestamp_us: 1000,
budget_exceeded: true,
budget_us: Some(50),
};
let cloned = event.clone();
assert_eq!(cloned.name, "test");
assert!(cloned.budget_exceeded);
}
#[test]
fn test_trace_stats_clone() {
let stats = TraceStats::new(Duration::from_micros(100), 1000, false);
let cloned = stats.clone();
assert_eq!(cloned.count, 1);
}
#[test]
fn test_trace_stats_debug() {
let stats = TraceStats::new(Duration::from_micros(100), 1000, false);
let debug = format!("{:?}", stats);
assert!(debug.contains("TraceStats"));
}
#[test]
fn test_escalation_thresholds_default() {
let thresholds = EscalationThresholds::default();
assert_eq!(thresholds.cv_percent, 15.0);
assert_eq!(thresholds.efficiency_percent, 25.0);
assert_eq!(thresholds.max_traces_per_sec, 100);
}
#[test]
fn test_escalation_thresholds_clone() {
let thresholds = EscalationThresholds::default();
let cloned = thresholds.clone();
assert_eq!(cloned.cv_percent, 15.0);
}
#[test]
fn test_escalation_thresholds_copy() {
let thresholds = EscalationThresholds::default();
let copied = thresholds; assert_eq!(copied.cv_percent, 15.0);
}
#[test]
fn test_escalation_thresholds_debug() {
let thresholds = EscalationThresholds::default();
let debug = format!("{:?}", thresholds);
assert!(debug.contains("EscalationThresholds"));
}
#[test]
fn test_perf_tracer_debug() {
let tracer = PerfTracer::new();
let debug = format!("{:?}", tracer);
assert!(debug.contains("PerfTracer"));
}
#[test]
fn test_trace_stats_min_max() {
let mut stats = TraceStats::new(Duration::from_micros(100), 1000, false);
stats.update(Duration::from_micros(50), false);
stats.update(Duration::from_micros(200), false);
assert_eq!(stats.min_duration, Duration::from_micros(50));
assert_eq!(stats.max_duration, Duration::from_micros(200));
}
#[test]
fn test_trace_stats_budget_violations() {
let mut stats = TraceStats::new(Duration::from_micros(100), 50, true);
stats.update(Duration::from_micros(100), true);
stats.update(Duration::from_micros(30), false);
assert_eq!(stats.budget_violations, 2);
}
#[test]
fn test_recent_events_ring_buffer() {
let mut tracer = PerfTracer::new();
for i in 0..150 {
tracer.trace(&format!("op_{}", i), || {});
}
assert!(tracer.recent_events.len() <= 100);
}
#[test]
fn test_rate_limiting_reset() {
let mut tracer = PerfTracer::new();
tracer.trace("op1", || {});
assert_eq!(tracer.traces_this_second, 1);
tracer.trace("op2", || {});
assert_eq!(tracer.traces_this_second, 2);
}
#[test]
fn test_timing_guard_disabled_by_default() {
disable_tracing();
assert!(!is_tracing_enabled());
let guard = TimingGuard::new("test", 1000);
assert!(guard.start.is_none());
}
#[test]
fn test_timing_guard_enabled() {
enable_tracing();
assert!(is_tracing_enabled());
let guard = TimingGuard::new("test", 1000);
assert!(guard.start.is_some());
drop(guard);
disable_tracing();
assert!(!is_tracing_enabled());
}
#[test]
fn test_timing_guard_with_default_budget() {
let guard = TimingGuard::with_default_budget("test");
assert_eq!(guard.budget_us, 1000);
}
#[test]
fn test_timing_guard_render() {
let guard = TimingGuard::render("test");
assert_eq!(guard.budget_us, 16_000);
}
#[test]
fn test_timing_guard_collect() {
let guard = TimingGuard::collect("test");
assert_eq!(guard.budget_us, 100_000);
}
#[test]
fn test_simd_stats_new() {
let stats = SimdStats::new();
assert_eq!(stats.count, 0);
assert_eq!(stats.sum, 0.0);
assert_eq!(stats.mean(), 0.0);
}
#[test]
fn test_simd_stats_update() {
let mut stats = SimdStats::new();
stats.update(10.0);
stats.update(20.0);
stats.update(30.0);
assert_eq!(stats.count, 3);
assert_eq!(stats.sum, 60.0);
assert_eq!(stats.mean(), 20.0);
assert_eq!(stats.min, 10.0);
assert_eq!(stats.max, 30.0);
}
#[test]
fn test_simd_stats_variance() {
let mut stats = SimdStats::new();
for &v in &[2.0, 4.0, 4.0, 4.0, 5.0, 5.0, 7.0, 9.0] {
stats.update(v);
}
let var = stats.variance();
assert!((var - 4.571).abs() < 0.01); }
#[test]
fn test_simd_stats_std_dev() {
let mut stats = SimdStats::new();
for &v in &[2.0, 4.0, 4.0, 4.0, 5.0, 5.0, 7.0, 9.0] {
stats.update(v);
}
let std = stats.std_dev();
assert!(std > 2.0 && std < 2.2);
}
#[test]
fn test_simd_stats_cv_percent() {
let mut stats = SimdStats::new();
for &v in &[10.0, 10.0, 10.0, 10.0] {
stats.update(v);
}
assert_eq!(stats.cv_percent(), 0.0);
let mut stats2 = SimdStats::new();
for &v in &[10.0, 20.0, 30.0, 40.0] {
stats2.update(v);
}
assert!(stats2.cv_percent() > 0.0);
}
#[test]
fn test_simd_stats_reset() {
let mut stats = SimdStats::new();
stats.update(100.0);
stats.update(200.0);
assert_eq!(stats.count, 2);
stats.reset();
assert_eq!(stats.count, 0);
assert_eq!(stats.sum, 0.0);
}
#[test]
fn test_simd_stats_single_sample_variance() {
let mut stats = SimdStats::new();
stats.update(42.0);
assert_eq!(stats.variance(), 0.0);
}
#[test]
fn test_simd_stats_cv_zero_mean() {
let mut stats = SimdStats::new();
stats.update(0.0);
stats.update(0.0);
assert_eq!(stats.cv_percent(), 0.0);
}
#[test]
fn test_simd_stats_default() {
let stats = SimdStats::default();
assert_eq!(stats.count, 0);
}
#[test]
fn test_simd_stats_clone() {
let mut stats = SimdStats::new();
stats.update(42.0);
let cloned = stats.clone();
assert_eq!(cloned.count, 1);
assert_eq!(cloned.sum, 42.0);
}
#[test]
fn test_simd_stats_debug() {
let stats = SimdStats::new();
let debug = format!("{:?}", stats);
assert!(debug.contains("SimdStats"));
}
#[test]
fn test_simd_stats_cache_aligned() {
assert_eq!(std::mem::align_of::<SimdStats>(), 64);
}
#[test]
fn f_brick_001_default_budgets_nonzero() {
assert!(BrickType::Collect.default_budget_us() > 0);
assert!(BrickType::Render.default_budget_us() > 0);
assert!(BrickType::Compute.default_budget_us() > 0);
assert!(BrickType::Network.default_budget_us() > 0);
assert!(BrickType::Storage.default_budget_us() > 0);
}
#[test]
fn f_brick_002_render_budget_60fps() {
assert_eq!(BrickType::Render.default_budget_us(), 16_000);
}
#[test]
fn f_brick_003_compute_budget_strictest() {
let compute = BrickType::Compute.default_budget_us();
assert!(compute <= BrickType::Render.default_budget_us());
assert!(compute <= BrickType::Collect.default_budget_us());
assert!(compute <= BrickType::Storage.default_budget_us());
assert!(compute <= BrickType::Network.default_budget_us());
}
#[test]
fn f_brick_004_cv_thresholds_bounded() {
for brick_type in [
BrickType::Collect,
BrickType::Render,
BrickType::Compute,
BrickType::Network,
BrickType::Storage,
] {
let cv = brick_type.cv_threshold();
assert!(cv > 0.0, "CV threshold must be positive");
assert!(cv <= 100.0, "CV threshold must be <= 100%");
}
}
#[test]
fn f_brick_005_render_cv_strictest() {
let render_cv = BrickType::Render.cv_threshold();
assert!(render_cv <= BrickType::Compute.cv_threshold());
assert!(render_cv <= BrickType::Collect.cv_threshold());
assert!(render_cv <= BrickType::Network.cv_threshold());
assert!(render_cv <= BrickType::Storage.cv_threshold());
}
#[test]
fn f_brick_006_brick_type_debug() {
let debug = format!("{:?}", BrickType::Render);
assert!(debug.contains("Render"));
}
#[test]
fn f_brick_007_brick_type_clone_copy() {
let original = BrickType::Compute;
let cloned = original.clone();
let copied = original; assert_eq!(original, cloned);
assert_eq!(original, copied);
}
#[test]
fn f_brick_008_brick_type_equality() {
assert_eq!(BrickType::Render, BrickType::Render);
assert_ne!(BrickType::Render, BrickType::Compute);
}
#[test]
fn f_brick_009_brick_type_hash() {
use std::collections::HashSet;
let mut set = HashSet::new();
set.insert(BrickType::Render);
set.insert(BrickType::Compute);
set.insert(BrickType::Render); assert_eq!(set.len(), 2);
}
#[test]
fn f_profiler_001_new_empty() {
disable_tracing(); let profiler = BrickProfiler::new();
assert!(profiler.get_stats("any").is_none());
}
#[test]
fn f_profiler_002_default() {
let p1 = BrickProfiler::new();
let p2 = BrickProfiler::default();
assert!(p1.get_stats("x").is_none());
assert!(p2.get_stats("x").is_none());
}
#[test]
fn f_profiler_003_profile_returns_result() {
let mut profiler = BrickProfiler::new();
let result = profiler.profile("test", BrickType::Compute, || 42);
assert_eq!(result, 42);
}
#[test]
fn f_profiler_004_profile_records_stats() {
enable_tracing();
let mut profiler = BrickProfiler::new();
profiler.enabled = true;
profiler.profile("test_brick", BrickType::Render, || {
std::thread::sleep(std::time::Duration::from_micros(50));
});
let stats = profiler.get_stats("test_brick");
assert!(stats.is_some());
assert_eq!(stats.unwrap().count, 1);
disable_tracing();
}
#[test]
fn f_profiler_005_escalate_nonexistent() {
let profiler = BrickProfiler::new();
assert!(!profiler.should_escalate("nonexistent"));
}
#[test]
fn f_profiler_006_summary_contains_names() {
enable_tracing();
let mut profiler = BrickProfiler::new();
profiler.enabled = true;
profiler.profile("cpu_render", BrickType::Render, || {});
profiler.profile("disk_collect", BrickType::Collect, || {});
let summary = profiler.summary();
assert!(summary.contains("cpu_render") || summary.contains("Brick Profiler"));
disable_tracing();
}
#[test]
fn f_profiler_007_summary_brick_type() {
enable_tracing();
let mut profiler = BrickProfiler::new();
profiler.enabled = true;
profiler.profile("render_test", BrickType::Render, || {});
let summary = profiler.summary();
assert!(summary.contains("Render") || summary.contains("Brick Profiler"));
disable_tracing();
}
#[test]
fn f_profiler_008_accumulate_stats() {
enable_tracing();
let mut profiler = BrickProfiler::new();
profiler.enabled = true;
for _ in 0..5 {
profiler.profile("repeated", BrickType::Compute, || {});
}
let stats = profiler.get_stats("repeated");
assert!(stats.is_some());
assert_eq!(stats.unwrap().count, 5);
disable_tracing();
}
#[test]
fn f_profiler_009_debug() {
let profiler = BrickProfiler::new();
let debug = format!("{:?}", profiler);
assert!(debug.contains("BrickProfiler"));
}
#[test]
fn f_profiler_010_disabled_zero_cost() {
disable_tracing();
let mut profiler = BrickProfiler::new();
profiler.enabled = false;
let result = profiler.profile("test", BrickType::Render, || 123);
assert_eq!(result, 123);
assert!(profiler.get_stats("test").is_none());
}
#[test]
fn f_ring_001_new_empty() {
let rb: RingBuffer<f64, 10> = RingBuffer::new();
assert!(rb.is_empty());
assert_eq!(rb.len(), 0);
}
#[test]
fn f_ring_002_push_increments_len() {
let mut rb: RingBuffer<f64, 10> = RingBuffer::new();
rb.push(1.0);
assert_eq!(rb.len(), 1);
rb.push(2.0);
assert_eq!(rb.len(), 2);
}
#[test]
fn f_ring_003_capacity() {
let rb: RingBuffer<f64, 5> = RingBuffer::new();
assert_eq!(rb.capacity(), 5);
}
#[test]
fn f_ring_004_is_full() {
let mut rb: RingBuffer<f64, 3> = RingBuffer::new();
assert!(!rb.is_full());
rb.push(1.0);
rb.push(2.0);
rb.push(3.0);
assert!(rb.is_full());
}
#[test]
fn f_ring_005_latest() {
let mut rb: RingBuffer<f64, 10> = RingBuffer::new();
rb.push(1.0);
rb.push(2.0);
rb.push(3.0);
assert_eq!(rb.latest(), Some(&3.0));
}
#[test]
fn f_ring_006_latest_empty() {
let rb: RingBuffer<f64, 10> = RingBuffer::new();
assert_eq!(rb.latest(), None);
}
#[test]
fn f_ring_007_get_by_index() {
let mut rb: RingBuffer<f64, 10> = RingBuffer::new();
rb.push(1.0);
rb.push(2.0);
rb.push(3.0);
assert_eq!(rb.get(0), Some(&1.0)); assert_eq!(rb.get(2), Some(&3.0)); }
#[test]
fn f_ring_008_get_out_of_bounds() {
let mut rb: RingBuffer<f64, 10> = RingBuffer::new();
rb.push(1.0);
assert_eq!(rb.get(5), None);
}
#[test]
fn f_ring_009_wrap_around() {
let mut rb: RingBuffer<i32, 3> = RingBuffer::new();
rb.push(1);
rb.push(2);
rb.push(3);
rb.push(4); assert_eq!(rb.len(), 3);
let values: Vec<_> = rb.iter().copied().collect();
assert_eq!(values, vec![2, 3, 4]);
}
#[test]
fn f_ring_010_clear() {
let mut rb: RingBuffer<f64, 10> = RingBuffer::new();
rb.push(1.0);
rb.push(2.0);
rb.clear();
assert!(rb.is_empty());
assert_eq!(rb.len(), 0);
}
#[test]
fn f_ring_011_sum() {
let mut rb: RingBuffer<f64, 10> = RingBuffer::new();
rb.push(1.0);
rb.push(2.0);
rb.push(3.0);
assert!((rb.sum() - 6.0).abs() < 0.001);
}
#[test]
fn f_ring_012_mean() {
let mut rb: RingBuffer<f64, 10> = RingBuffer::new();
rb.push(2.0);
rb.push(4.0);
rb.push(6.0);
assert!((rb.mean() - 4.0).abs() < 0.001);
}
#[test]
fn f_ring_013_mean_empty() {
let rb: RingBuffer<f64, 10> = RingBuffer::new();
assert_eq!(rb.mean(), 0.0);
}
#[test]
fn f_ring_014_min() {
let mut rb: RingBuffer<f64, 10> = RingBuffer::new();
rb.push(5.0);
rb.push(2.0);
rb.push(8.0);
assert_eq!(rb.min(), Some(2.0));
}
#[test]
fn f_ring_015_max() {
let mut rb: RingBuffer<f64, 10> = RingBuffer::new();
rb.push(5.0);
rb.push(2.0);
rb.push(8.0);
assert_eq!(rb.max(), Some(8.0));
}
#[test]
fn f_ring_016_default() {
let rb: RingBuffer<f64, 10> = RingBuffer::default();
assert!(rb.is_empty());
}
#[test]
fn f_ring_017_debug() {
let rb: RingBuffer<f64, 10> = RingBuffer::new();
let debug = format!("{:?}", rb);
assert!(debug.contains("RingBuffer"));
}
#[test]
fn f_hist_001_new_empty() {
let h = LatencyHistogram::new();
assert_eq!(h.count(), 0);
}
#[test]
fn f_hist_002_record_increments() {
let mut h = LatencyHistogram::new();
h.record(500);
assert_eq!(h.count(), 1);
h.record(1500);
assert_eq!(h.count(), 2);
}
#[test]
fn f_hist_003_bin_0_1ms() {
let mut h = LatencyHistogram::new();
h.record(500); h.record(999); assert_eq!(h.bin_count(0), 2);
}
#[test]
fn f_hist_004_bin_1_5ms() {
let mut h = LatencyHistogram::new();
h.record(1000); h.record(4999); assert_eq!(h.bin_count(1), 2);
}
#[test]
fn f_hist_005_bin_500ms_plus() {
let mut h = LatencyHistogram::new();
h.record(500_000); h.record(1_000_000); assert_eq!(h.bin_count(6), 2);
}
#[test]
fn f_hist_006_percentages_sum() {
let mut h = LatencyHistogram::new();
h.record(500);
h.record(2000);
h.record(600_000);
let pcts = h.percentages();
let sum: f64 = pcts.iter().sum();
assert!((sum - 100.0).abs() < 0.01);
}
#[test]
fn f_hist_007_percentages_empty() {
let h = LatencyHistogram::new();
let pcts = h.percentages();
assert!(pcts.iter().all(|&p| p == 0.0));
}
#[test]
fn f_hist_008_bin_labels() {
assert_eq!(LatencyHistogram::bin_label(0), "0-1ms");
assert_eq!(LatencyHistogram::bin_label(6), "500ms+");
assert_eq!(LatencyHistogram::bin_label(99), "?");
}
#[test]
fn f_hist_009_ascii_histogram() {
let mut h = LatencyHistogram::new();
h.record(500);
h.record(2000);
let ascii = h.ascii_histogram(20);
assert!(!ascii.is_empty());
assert!(ascii.contains("0-1ms"));
}
#[test]
fn f_hist_010_reset() {
let mut h = LatencyHistogram::new();
h.record(500);
h.record(2000);
h.reset();
assert_eq!(h.count(), 0);
assert!(h.percentages().iter().all(|&p| p == 0.0));
}
#[test]
fn f_hist_011_default() {
let h = LatencyHistogram::default();
assert_eq!(h.count(), 0);
}
#[test]
fn f_hist_012_debug() {
let h = LatencyHistogram::new();
let debug = format!("{:?}", h);
assert!(debug.contains("LatencyHistogram"));
}
#[test]
fn f_hist_013_clone() {
let mut h = LatencyHistogram::new();
h.record(500);
h.record(2000);
let cloned = h.clone();
assert_eq!(cloned.count(), h.count());
assert_eq!(cloned.bin_count(0), h.bin_count(0));
}
#[test]
fn f_hist_014_all_bins() {
let mut h = LatencyHistogram::new();
h.record(500); h.record(2000); h.record(7000); h.record(25000); h.record(75000); h.record(250000); h.record(750000);
for i in 0..7 {
assert!(h.bin_count(i) > 0, "Bin {} should have count", i);
}
}
#[test]
fn f_hist_015_bin_out_of_range() {
let h = LatencyHistogram::new();
assert_eq!(h.bin_count(99), 0);
}
#[test]
fn f_ema_001_new_not_initialized() {
let ema = EmaTracker::new(0.1);
assert!(!ema.is_initialized());
assert_eq!(ema.value(), 0.0);
}
#[test]
fn f_ema_002_first_update() {
let mut ema = EmaTracker::new(0.1);
ema.update(100.0);
assert!(ema.is_initialized());
assert!((ema.value() - 100.0).abs() < 0.001);
}
#[test]
fn f_ema_003_smoothing() {
let mut ema = EmaTracker::new(0.5); ema.update(100.0);
ema.update(0.0);
assert!((ema.value() - 50.0).abs() < 0.001);
}
#[test]
fn f_ema_004_alpha_clamped() {
let ema_low = EmaTracker::new(-0.5);
assert_eq!(ema_low.alpha(), 0.0);
let ema_high = EmaTracker::new(1.5);
assert_eq!(ema_high.alpha(), 1.0);
}
#[test]
fn f_ema_005_for_fps() {
let ema = EmaTracker::for_fps();
assert!((ema.alpha() - 0.3).abs() < 0.001);
}
#[test]
fn f_ema_006_for_load() {
let ema = EmaTracker::for_load();
assert!((ema.alpha() - 0.05).abs() < 0.001);
}
#[test]
fn f_ema_007_reset() {
let mut ema = EmaTracker::new(0.1);
ema.update(100.0);
assert!(ema.is_initialized());
ema.reset();
assert!(!ema.is_initialized());
assert_eq!(ema.value(), 0.0);
}
#[test]
fn f_ema_008_set_alpha() {
let mut ema = EmaTracker::new(0.1);
ema.set_alpha(0.5);
assert!((ema.alpha() - 0.5).abs() < 0.001);
}
#[test]
fn f_ema_009_default() {
let ema = EmaTracker::default();
assert!((ema.alpha() - 0.1).abs() < 0.001);
}
#[test]
fn f_ema_010_debug() {
let ema = EmaTracker::new(0.1);
let debug = format!("{:?}", ema);
assert!(debug.contains("EmaTracker"));
}
#[test]
fn f_ema_011_clone() {
let mut ema = EmaTracker::new(0.3);
ema.update(50.0);
let cloned = ema.clone();
assert!((ema.value() - cloned.value()).abs() < 0.001);
assert_eq!(ema.alpha(), cloned.alpha());
}
#[test]
fn f_ema_012_high_alpha_responsive() {
let mut ema_high = EmaTracker::new(0.9);
let mut ema_low = EmaTracker::new(0.1);
ema_high.update(100.0);
ema_low.update(100.0);
ema_high.update(0.0);
ema_low.update(0.0);
assert!(ema_high.value() < ema_low.value());
}
#[test]
fn f_rate_001_first_check_allowed() {
let mut rl = RateLimiter::new(1_000_000); assert!(rl.check());
}
#[test]
fn f_rate_002_immediate_denied() {
let mut rl = RateLimiter::new(1_000_000); rl.check();
assert!(!rl.check());
}
#[test]
fn f_rate_003_new_hz() {
let rl = RateLimiter::new_hz(60);
assert!((rl.hz() - 60.0).abs() < 1.0);
}
#[test]
fn f_rate_004_new_ms() {
let rl = RateLimiter::new_ms(100);
assert_eq!(rl.interval_us(), 100_000);
}
#[test]
fn f_rate_005_would_allow_no_update() {
let mut rl = RateLimiter::new(1_000_000);
rl.check(); let peek1 = rl.would_allow();
let peek2 = rl.would_allow();
assert_eq!(peek1, peek2); }
#[test]
fn f_rate_006_reset() {
let mut rl = RateLimiter::new(1_000_000);
rl.check();
assert!(!rl.check());
rl.reset();
assert!(rl.check());
}
#[test]
fn f_rate_007_default() {
let rl = RateLimiter::default();
assert!((rl.hz() - 60.0).abs() < 1.0);
}
#[test]
fn f_rate_008_debug() {
let rl = RateLimiter::new(1000);
let debug = format!("{:?}", rl);
assert!(debug.contains("RateLimiter"));
}
#[test]
fn f_rate_009_clone() {
let rl = RateLimiter::new_hz(30);
let cloned = rl.clone();
assert_eq!(rl.interval_us(), cloned.interval_us());
}
#[test]
fn f_rate_010_zero_hz() {
let rl = RateLimiter::new_hz(0);
assert_eq!(rl.interval_us(), 1_000_000); }
#[test]
fn f_rate_011_hz_zero_interval() {
let rl = RateLimiter::new(0);
assert_eq!(rl.hz(), 0.0);
}
#[test]
fn f_rate_012_small_interval() {
let mut rl = RateLimiter::new(1); rl.check();
thread::sleep(std::time::Duration::from_micros(10));
assert!(rl.check()); }
#[test]
fn f_thresh_001_starts_low() {
let td = ThresholdDetector::new(30.0, 70.0);
assert!(td.is_low());
assert!(!td.is_high());
}
#[test]
fn f_thresh_002_transition_high() {
let mut td = ThresholdDetector::new(30.0, 70.0);
let changed = td.update(80.0);
assert!(changed);
assert!(td.is_high());
}
#[test]
fn f_thresh_003_hysteresis() {
let mut td = ThresholdDetector::new(30.0, 70.0);
td.update(80.0); let changed = td.update(50.0); assert!(!changed); assert!(td.is_high()); }
#[test]
fn f_thresh_004_transition_low() {
let mut td = ThresholdDetector::new(30.0, 70.0);
td.update(80.0); let changed = td.update(20.0); assert!(changed);
assert!(td.is_low());
}
#[test]
fn f_thresh_005_for_resource() {
let td = ThresholdDetector::for_resource();
assert_eq!(td.low_threshold(), 70.0);
assert_eq!(td.high_threshold(), 90.0);
}
#[test]
fn f_thresh_006_for_temperature() {
let td = ThresholdDetector::for_temperature();
assert_eq!(td.low_threshold(), 60.0);
assert_eq!(td.high_threshold(), 80.0);
}
#[test]
fn f_thresh_007_percent_clamp() {
let td = ThresholdDetector::percent(-10.0, 150.0);
assert_eq!(td.low_threshold(), 0.0);
assert_eq!(td.high_threshold(), 100.0);
}
#[test]
fn f_thresh_008_reset() {
let mut td = ThresholdDetector::new(30.0, 70.0);
td.update(80.0);
assert!(td.is_high());
td.reset();
assert!(td.is_low());
}
#[test]
fn f_thresh_009_set_high() {
let mut td = ThresholdDetector::new(30.0, 70.0);
td.set_high();
assert!(td.is_high());
}
#[test]
fn f_thresh_010_debug() {
let td = ThresholdDetector::new(30.0, 70.0);
let debug = format!("{:?}", td);
assert!(debug.contains("ThresholdDetector"));
}
#[test]
fn f_thresh_011_clone() {
let td = ThresholdDetector::new(25.0, 75.0);
let cloned = td.clone();
assert_eq!(td.low_threshold(), cloned.low_threshold());
assert_eq!(td.high_threshold(), cloned.high_threshold());
}
#[test]
fn f_thresh_012_high_clamp() {
let td = ThresholdDetector::new(80.0, 20.0);
assert!(td.high_threshold() >= td.low_threshold());
}
#[test]
fn f_thresh_013_exact_threshold() {
let mut td = ThresholdDetector::new(30.0, 70.0);
let changed = td.update(70.0); assert!(!changed); assert!(td.is_low());
}
#[test]
fn f_thresh_014_update_returns_change() {
let mut td = ThresholdDetector::new(30.0, 70.0);
assert!(!td.update(50.0)); assert!(td.update(80.0)); assert!(!td.update(85.0)); assert!(td.update(20.0)); }
#[test]
fn f_count_001_starts_zero() {
let sc = SampleCounter::new();
assert_eq!(sc.count(), 0);
assert_eq!(sc.rate(), 0.0);
}
#[test]
fn f_count_002_increment() {
let mut sc = SampleCounter::new();
sc.increment();
assert_eq!(sc.count(), 1);
sc.increment();
assert_eq!(sc.count(), 2);
}
#[test]
fn f_count_003_add() {
let mut sc = SampleCounter::new();
sc.add(10);
assert_eq!(sc.count(), 10);
sc.add(5);
assert_eq!(sc.count(), 15);
}
#[test]
fn f_count_004_reset() {
let mut sc = SampleCounter::new();
sc.add(100);
sc.reset();
assert_eq!(sc.count(), 0);
assert_eq!(sc.rate(), 0.0);
}
#[test]
fn f_count_005_default() {
let sc = SampleCounter::default();
assert_eq!(sc.count(), 0);
}
#[test]
fn f_count_006_debug() {
let sc = SampleCounter::new();
let debug = format!("{:?}", sc);
assert!(debug.contains("SampleCounter"));
}
#[test]
fn f_count_007_clone() {
let mut sc = SampleCounter::new();
sc.add(42);
let cloned = sc.clone();
assert_eq!(sc.count(), cloned.count());
}
#[test]
fn f_count_008_rate_calculation() {
let mut sc = SampleCounter::new();
sc.calculate_rate(); sc.add(100);
thread::sleep(std::time::Duration::from_millis(10));
let rate = sc.calculate_rate();
assert!(rate > 0.0, "Rate should be positive after adding samples");
}
#[test]
fn f_count_009_first_rate_zero() {
let mut sc = SampleCounter::new();
let rate = sc.calculate_rate();
assert!(rate >= 0.0);
}
#[test]
fn f_count_010_rate_cached() {
let mut sc = SampleCounter::new();
sc.calculate_rate();
sc.add(50);
thread::sleep(std::time::Duration::from_millis(10));
let calculated = sc.calculate_rate();
let cached = sc.rate();
assert_eq!(calculated, cached);
}
#[test]
fn f_budget_001_starts_zero() {
let bt = BudgetTracker::new(100.0);
assert_eq!(bt.usage(), 0.0);
assert_eq!(bt.peak(), 0.0);
assert_eq!(bt.budget(), 100.0);
}
#[test]
fn f_budget_002_record() {
let mut bt = BudgetTracker::new(100.0);
bt.record(50.0);
assert_eq!(bt.usage(), 50.0);
assert_eq!(bt.peak(), 50.0);
}
#[test]
fn f_budget_003_peak_max() {
let mut bt = BudgetTracker::new(100.0);
bt.record(80.0);
bt.record(60.0);
bt.record(70.0);
assert_eq!(bt.usage(), 70.0); assert_eq!(bt.peak(), 80.0); }
#[test]
fn f_budget_004_for_render() {
let bt = BudgetTracker::for_render();
assert_eq!(bt.budget(), 16_000.0);
}
#[test]
fn f_budget_005_for_compute() {
let bt = BudgetTracker::for_compute();
assert_eq!(bt.budget(), 1_000.0);
}
#[test]
fn f_budget_006_utilization() {
let mut bt = BudgetTracker::new(100.0);
bt.record(50.0);
assert_eq!(bt.utilization(), 50.0);
}
#[test]
fn f_budget_007_peak_utilization() {
let mut bt = BudgetTracker::new(100.0);
bt.record(80.0);
bt.record(40.0);
assert_eq!(bt.utilization(), 40.0);
assert_eq!(bt.peak_utilization(), 80.0);
}
#[test]
fn f_budget_008_over_budget() {
let mut bt = BudgetTracker::new(100.0);
bt.record(50.0);
assert!(!bt.is_over_budget());
bt.record(150.0);
assert!(bt.is_over_budget());
}
#[test]
fn f_budget_009_remaining() {
let mut bt = BudgetTracker::new(100.0);
bt.record(30.0);
assert_eq!(bt.remaining(), 70.0);
bt.record(150.0);
assert_eq!(bt.remaining(), 0.0); }
#[test]
fn f_budget_010_reset() {
let mut bt = BudgetTracker::new(100.0);
bt.record(80.0);
bt.reset();
assert_eq!(bt.usage(), 0.0);
assert_eq!(bt.peak(), 0.0);
}
#[test]
fn f_budget_011_set_budget() {
let mut bt = BudgetTracker::new(100.0);
bt.set_budget(200.0);
assert_eq!(bt.budget(), 200.0);
}
#[test]
fn f_budget_012_negative_clamp() {
let bt = BudgetTracker::new(-50.0);
assert_eq!(bt.budget(), 0.0);
}
#[test]
fn f_budget_013_zero_budget() {
let mut bt = BudgetTracker::new(0.0);
bt.record(50.0);
assert_eq!(bt.utilization(), 0.0);
assert_eq!(bt.peak_utilization(), 0.0);
}
#[test]
fn f_budget_014_debug() {
let bt = BudgetTracker::new(100.0);
let debug = format!("{:?}", bt);
assert!(debug.contains("BudgetTracker"));
}
#[test]
fn f_budget_015_clone() {
let mut bt = BudgetTracker::new(100.0);
bt.record(60.0);
let cloned = bt.clone();
assert_eq!(bt.budget(), cloned.budget());
assert_eq!(bt.usage(), cloned.usage());
assert_eq!(bt.peak(), cloned.peak());
}
#[test]
fn f_minmax_001_starts_empty() {
let mm = MinMaxTracker::new();
assert!(mm.min().is_none());
assert!(mm.max().is_none());
assert!(mm.range().is_none());
assert_eq!(mm.count(), 0);
}
#[test]
fn f_minmax_002_single_value() {
let mut mm = MinMaxTracker::new();
mm.record(42.0);
assert_eq!(mm.min(), Some(42.0));
assert_eq!(mm.max(), Some(42.0));
assert_eq!(mm.range(), Some(0.0));
assert_eq!(mm.count(), 1);
}
#[test]
fn f_minmax_003_multiple_values() {
let mut mm = MinMaxTracker::new();
mm.record(50.0);
mm.record(10.0);
mm.record(90.0);
mm.record(30.0);
assert_eq!(mm.min(), Some(10.0));
assert_eq!(mm.max(), Some(90.0));
assert_eq!(mm.range(), Some(80.0));
assert_eq!(mm.count(), 4);
}
#[test]
fn f_minmax_004_reset() {
let mut mm = MinMaxTracker::new();
mm.record(100.0);
mm.reset();
assert!(mm.min().is_none());
assert!(mm.max().is_none());
assert_eq!(mm.count(), 0);
}
#[test]
fn f_minmax_005_default() {
let mm = MinMaxTracker::default();
assert_eq!(mm.count(), 0);
}
#[test]
fn f_minmax_006_debug() {
let mm = MinMaxTracker::new();
let debug = format!("{:?}", mm);
assert!(debug.contains("MinMaxTracker"));
}
#[test]
fn f_minmax_007_clone() {
let mut mm = MinMaxTracker::new();
mm.record(25.0);
mm.record(75.0);
let cloned = mm.clone();
assert_eq!(mm.min(), cloned.min());
assert_eq!(mm.max(), cloned.max());
assert_eq!(mm.count(), cloned.count());
}
#[test]
fn f_minmax_008_time_since_min() {
let mm = MinMaxTracker::new();
assert_eq!(mm.time_since_min_us(), 0);
}
#[test]
fn f_minmax_009_time_since_max() {
let mm = MinMaxTracker::new();
assert_eq!(mm.time_since_max_us(), 0);
}
#[test]
fn f_minmax_010_time_after_record() {
let mut mm = MinMaxTracker::new();
mm.record(50.0);
assert!(mm.time_since_min_us() < 1_000_000);
assert!(mm.time_since_max_us() < 1_000_000);
}
#[test]
fn f_window_001_starts_empty() {
let mut mw = MovingWindow::new(1000);
assert_eq!(mw.sum(), 0.0);
assert_eq!(mw.count(), 0);
}
#[test]
fn f_window_002_record_sum() {
let mut mw = MovingWindow::new(1000);
mw.record(10.0);
mw.record(20.0);
mw.record(30.0);
assert_eq!(mw.sum(), 60.0);
assert_eq!(mw.count(), 3);
}
#[test]
fn f_window_003_increment() {
let mut mw = MovingWindow::new(1000);
mw.increment();
mw.increment();
mw.increment();
assert_eq!(mw.count(), 3);
assert_eq!(mw.sum(), 3.0);
}
#[test]
fn f_window_004_one_second() {
let mw = MovingWindow::one_second();
assert_eq!(mw.window_us, 1_000_000);
}
#[test]
fn f_window_005_one_minute() {
let mw = MovingWindow::one_minute();
assert_eq!(mw.window_us, 60_000_000);
}
#[test]
fn f_window_006_reset() {
let mut mw = MovingWindow::new(1000);
mw.record(100.0);
mw.reset();
assert_eq!(mw.sum(), 0.0);
assert_eq!(mw.count(), 0);
}
#[test]
fn f_window_007_debug() {
let mw = MovingWindow::new(1000);
let debug = format!("{:?}", mw);
assert!(debug.contains("MovingWindow"));
}
#[test]
fn f_window_008_clone() {
let mut mw = MovingWindow::new(1000);
mw.record(50.0);
let cloned = mw.clone();
assert_eq!(mw.window_us, cloned.window_us);
assert_eq!(mw.current_sum, cloned.current_sum);
}
#[test]
fn f_window_009_rate_per_second() {
let mut mw = MovingWindow::new(1000); mw.record(100.0);
let rate = mw.rate_per_second();
assert!(rate >= 0.0);
}
#[test]
fn f_window_010_count_rate() {
let mut mw = MovingWindow::new(1000);
for _ in 0..10 {
mw.increment();
}
let rate = mw.count_rate();
assert!(rate >= 0.0);
}
#[test]
fn f_pct_001_starts_empty() {
let pt = PercentileTracker::new();
assert_eq!(pt.count(), 0);
}
#[test]
fn f_pct_002_empty_percentile() {
let pt = PercentileTracker::new();
assert_eq!(pt.p50_ms(), 0.0);
assert_eq!(pt.p90_ms(), 0.0);
assert_eq!(pt.p99_ms(), 0.0);
}
#[test]
fn f_pct_003_record_count() {
let mut pt = PercentileTracker::new();
pt.record_ms(5.0);
pt.record_ms(10.0);
pt.record_ms(15.0);
assert_eq!(pt.count(), 3);
}
#[test]
fn f_pct_004_record_us() {
let mut pt = PercentileTracker::new();
pt.record_us(500); pt.record_us(3000); assert_eq!(pt.count(), 2);
}
#[test]
fn f_pct_005_reset() {
let mut pt = PercentileTracker::new();
pt.record_ms(10.0);
pt.record_ms(20.0);
pt.reset();
assert_eq!(pt.count(), 0);
}
#[test]
fn f_pct_006_default() {
let pt = PercentileTracker::default();
assert_eq!(pt.count(), 0);
}
#[test]
fn f_pct_007_debug() {
let pt = PercentileTracker::new();
let debug = format!("{:?}", pt);
assert!(debug.contains("PercentileTracker"));
}
#[test]
fn f_pct_008_clone() {
let mut pt = PercentileTracker::new();
pt.record_ms(5.0);
let cloned = pt.clone();
assert_eq!(pt.count(), cloned.count());
}
#[test]
fn f_pct_009_custom_boundaries() {
let boundaries = [100, 200, 300, 400, 500, 600, 700, 800, 900, u64::MAX];
let pt = PercentileTracker::with_boundaries(boundaries);
assert_eq!(pt.count(), 0);
}
#[test]
fn f_pct_010_p50() {
let mut pt = PercentileTracker::new();
for _ in 0..50 {
pt.record_ms(2.0); }
for _ in 0..50 {
pt.record_ms(20.0); }
let p50 = pt.p50_ms();
assert!(p50 > 0.0);
}
#[test]
fn f_pct_011_p90_higher() {
let mut pt = PercentileTracker::new();
for i in 1..=100 {
pt.record_ms(i as f64);
}
let p50 = pt.p50_ms();
let p90 = pt.p90_ms();
assert!(p90 >= p50);
}
#[test]
fn f_pct_012_p99_higher() {
let mut pt = PercentileTracker::new();
for i in 1..=100 {
pt.record_ms(i as f64);
}
let p90 = pt.p90_ms();
let p99 = pt.p99_ms();
assert!(p99 >= p90);
}
#[test]
fn f_pct_013_percentile_us() {
let mut pt = PercentileTracker::new();
pt.record_ms(5.0); let p50_us = pt.percentile_us(50.0);
let p50_ms = pt.percentile_ms(50.0);
assert_eq!(p50_us, (p50_ms * 1000.0) as u64);
}
#[test]
fn f_pct_014_large_values() {
let mut pt = PercentileTracker::new();
pt.record_ms(2000.0); assert_eq!(pt.count(), 1);
}
#[test]
fn f_pct_015_zero_value() {
let mut pt = PercentileTracker::new();
pt.record_us(0);
assert_eq!(pt.count(), 1);
let p50 = pt.percentile_us(50.0);
assert!(p50 <= 1000); }
#[test]
fn f_state_001_starts_zero() {
let st: StateTracker<4> = StateTracker::new();
assert_eq!(st.current(), 0);
}
#[test]
fn f_state_002_transition() {
let mut st: StateTracker<4> = StateTracker::new();
st.transition(2);
assert_eq!(st.current(), 2);
}
#[test]
fn f_state_003_invalid_transition() {
let mut st: StateTracker<4> = StateTracker::new();
st.transition(10); assert_eq!(st.current(), 0); }
#[test]
fn f_state_004_transition_count() {
let mut st: StateTracker<4> = StateTracker::new();
st.transition(1);
st.transition(2);
st.transition(1);
assert_eq!(st.transition_count(1), 2);
assert_eq!(st.transition_count(2), 1);
}
#[test]
fn f_state_005_total_transitions() {
let mut st: StateTracker<4> = StateTracker::new();
st.transition(1);
st.transition(2);
assert_eq!(st.total_transitions(), 3); }
#[test]
fn f_state_006_time_in_current() {
let st: StateTracker<4> = StateTracker::new();
assert!(st.time_in_current_us() < 1_000_000);
}
#[test]
fn f_state_007_reset() {
let mut st: StateTracker<4> = StateTracker::new();
st.transition(2);
st.reset();
assert_eq!(st.current(), 0);
assert_eq!(st.transition_count(2), 0);
}
#[test]
fn f_state_008_default() {
let st: StateTracker<4> = StateTracker::default();
assert_eq!(st.current(), 0);
}
#[test]
fn f_state_009_debug() {
let st: StateTracker<4> = StateTracker::new();
let debug = format!("{:?}", st);
assert!(debug.contains("StateTracker"));
}
#[test]
fn f_state_010_clone() {
let mut st: StateTracker<4> = StateTracker::new();
st.transition(2);
let cloned = st.clone();
assert_eq!(st.current(), cloned.current());
}
#[test]
fn f_state_011_transition_count_bounds() {
let st: StateTracker<4> = StateTracker::new();
assert_eq!(st.transition_count(100), 0);
}
#[test]
fn f_state_012_total_time() {
let st: StateTracker<4> = StateTracker::new();
let time = st.total_time_in_state_us(0);
assert!(time < 1_000_000);
}
#[test]
fn f_change_001_new_baseline() {
let cd = ChangeDetector::new(50.0, 1.0, 5.0);
assert_eq!(cd.baseline(), 50.0);
assert_eq!(cd.last_value(), 50.0);
}
#[test]
fn f_change_002_abs_change() {
let mut cd = ChangeDetector::new(0.0, 5.0, 100.0);
assert!(!cd.has_changed(3.0)); assert!(cd.has_changed(6.0)); }
#[test]
fn f_change_003_rel_change() {
let mut cd = ChangeDetector::new(100.0, 100.0, 10.0);
cd.update(100.0);
assert!(!cd.has_changed(105.0)); assert!(cd.has_changed(115.0)); }
#[test]
fn f_change_004_update_returns() {
let mut cd = ChangeDetector::new(0.0, 5.0, 100.0);
assert!(!cd.update(2.0)); assert!(cd.update(10.0)); }
#[test]
fn f_change_005_change_count() {
let mut cd = ChangeDetector::new(0.0, 5.0, 100.0);
cd.update(1.0);
cd.update(10.0);
cd.update(20.0);
assert_eq!(cd.change_count(), 2); }
#[test]
fn f_change_006_for_percentage() {
let cd = ChangeDetector::for_percentage();
assert_eq!(cd.baseline(), 0.0);
}
#[test]
fn f_change_007_for_latency() {
let cd = ChangeDetector::for_latency();
assert_eq!(cd.baseline(), 0.0);
}
#[test]
fn f_change_008_update_baseline() {
let mut cd = ChangeDetector::new(0.0, 1.0, 5.0);
cd.update(50.0);
cd.update_baseline();
assert_eq!(cd.baseline(), 50.0);
}
#[test]
fn f_change_009_set_baseline() {
let mut cd = ChangeDetector::new(0.0, 1.0, 5.0);
cd.set_baseline(100.0);
assert_eq!(cd.baseline(), 100.0);
}
#[test]
fn f_change_010_change_from_baseline() {
let mut cd = ChangeDetector::new(100.0, 1.0, 5.0);
cd.update(150.0);
assert_eq!(cd.change_from_baseline(), 50.0);
}
#[test]
fn f_change_011_relative_change() {
let mut cd = ChangeDetector::new(100.0, 1.0, 5.0);
cd.update(150.0);
assert_eq!(cd.relative_change(), 50.0); }
#[test]
fn f_change_012_reset() {
let mut cd = ChangeDetector::new(50.0, 1.0, 5.0);
cd.update(100.0);
cd.reset();
assert_eq!(cd.last_value(), 50.0);
assert_eq!(cd.change_count(), 0);
}
#[test]
fn f_change_013_default() {
let cd = ChangeDetector::default();
assert_eq!(cd.baseline(), 0.0);
}
#[test]
fn f_change_014_debug() {
let cd = ChangeDetector::new(0.0, 1.0, 5.0);
let debug = format!("{:?}", cd);
assert!(debug.contains("ChangeDetector"));
}
#[test]
fn f_change_015_clone() {
let cd = ChangeDetector::new(50.0, 1.0, 5.0);
let cloned = cd.clone();
assert_eq!(cd.baseline(), cloned.baseline());
}
#[test]
fn f_accum_001_starts_zero() {
let acc = Accumulator::new();
assert_eq!(acc.value(), 0);
assert!(!acc.is_initialized());
}
#[test]
fn f_accum_002_first_update() {
let mut acc = Accumulator::new();
acc.update(100);
assert!(acc.is_initialized());
assert_eq!(acc.value(), 0); }
#[test]
fn f_accum_003_delta() {
let mut acc = Accumulator::new();
acc.update(100);
acc.update(150);
assert_eq!(acc.value(), 50); }
#[test]
fn f_accum_004_add() {
let mut acc = Accumulator::new();
acc.add(100);
acc.add(50);
assert_eq!(acc.value(), 150);
}
#[test]
fn f_accum_005_overflow() {
let mut acc = Accumulator::new();
acc.update(u64::MAX - 10);
acc.update(5); assert_eq!(acc.overflows(), 1);
}
#[test]
fn f_accum_006_last_raw() {
let mut acc = Accumulator::new();
acc.update(100);
acc.update(200);
assert_eq!(acc.last_raw(), 200);
}
#[test]
fn f_accum_007_reset() {
let mut acc = Accumulator::new();
acc.update(100);
acc.update(200);
acc.reset();
assert_eq!(acc.value(), 0);
assert!(!acc.is_initialized());
assert_eq!(acc.overflows(), 0);
}
#[test]
fn f_accum_008_default() {
let acc = Accumulator::default();
assert_eq!(acc.value(), 0);
}
#[test]
fn f_accum_009_debug() {
let acc = Accumulator::new();
let debug = format!("{:?}", acc);
assert!(debug.contains("Accumulator"));
}
#[test]
fn f_accum_010_clone() {
let mut acc = Accumulator::new();
acc.add(100);
let cloned = acc.clone();
assert_eq!(acc.value(), cloned.value());
}
#[test]
fn f_event_001_starts_zero() {
let ec: EventCounter<5> = EventCounter::new();
assert_eq!(ec.total(), 0);
}
#[test]
fn f_event_002_increment() {
let mut ec: EventCounter<5> = EventCounter::new();
ec.increment(0);
ec.increment(0);
ec.increment(1);
assert_eq!(ec.count(0), 2);
assert_eq!(ec.count(1), 1);
assert_eq!(ec.total(), 3);
}
#[test]
fn f_event_003_add() {
let mut ec: EventCounter<5> = EventCounter::new();
ec.add(2, 10);
assert_eq!(ec.count(2), 10);
assert_eq!(ec.total(), 10);
}
#[test]
fn f_event_004_invalid_category() {
let mut ec: EventCounter<5> = EventCounter::new();
ec.increment(100); assert_eq!(ec.total(), 0);
}
#[test]
fn f_event_005_percentage() {
let mut ec: EventCounter<5> = EventCounter::new();
ec.add(0, 25);
ec.add(1, 75);
assert_eq!(ec.percentage(0), 25.0);
assert_eq!(ec.percentage(1), 75.0);
}
#[test]
fn f_event_006_dominant() {
let mut ec: EventCounter<5> = EventCounter::new();
ec.add(0, 10);
ec.add(2, 50);
ec.add(4, 30);
assert_eq!(ec.dominant(), Some(2));
}
#[test]
fn f_event_007_empty_dominant() {
let ec: EventCounter<5> = EventCounter::new();
assert_eq!(ec.dominant(), None);
}
#[test]
fn f_event_008_reset() {
let mut ec: EventCounter<5> = EventCounter::new();
ec.add(0, 100);
ec.reset();
assert_eq!(ec.total(), 0);
assert_eq!(ec.count(0), 0);
}
#[test]
fn f_event_009_default() {
let ec: EventCounter<5> = EventCounter::default();
assert_eq!(ec.total(), 0);
}
#[test]
fn f_event_010_debug() {
let ec: EventCounter<5> = EventCounter::new();
let debug = format!("{:?}", ec);
assert!(debug.contains("EventCounter"));
}
#[test]
fn f_event_011_clone() {
let mut ec: EventCounter<5> = EventCounter::new();
ec.add(0, 50);
let cloned = ec.clone();
assert_eq!(ec.total(), cloned.total());
}
#[test]
fn f_event_012_count_bounds() {
let ec: EventCounter<5> = EventCounter::new();
assert_eq!(ec.count(100), 0);
}
#[test]
fn f_event_013_percentage_bounds() {
let mut ec: EventCounter<5> = EventCounter::new();
ec.add(0, 100);
assert_eq!(ec.percentage(100), 0.0);
}
#[test]
fn f_trend_001_new_empty() {
let td = TrendDetector::new(0.1);
assert_eq!(td.count(), 0);
assert_eq!(td.trend(), Trend::Unknown);
}
#[test]
fn f_trend_002_default() {
let td = TrendDetector::default();
assert_eq!(td.count(), 0);
}
#[test]
fn f_trend_003_upward_trend() {
let mut td = TrendDetector::new(0.1);
for i in 0..10 {
td.update(i as f64 * 10.0); }
assert!(td.slope() > 0.0, "Slope should be positive");
assert_eq!(td.trend(), Trend::Up);
assert!(td.is_trending_up());
}
#[test]
fn f_trend_004_downward_trend() {
let mut td = TrendDetector::new(0.1);
for i in 0..10 {
td.update(100.0 - i as f64 * 10.0); }
assert!(td.slope() < 0.0, "Slope should be negative");
assert_eq!(td.trend(), Trend::Down);
assert!(td.is_trending_down());
}
#[test]
fn f_trend_005_flat_trend() {
let mut td = TrendDetector::new(0.1);
for _ in 0..10 {
td.update(50.0);
}
assert!(td.slope().abs() < 0.1, "Slope should be near zero");
assert_eq!(td.trend(), Trend::Flat);
}
#[test]
fn f_trend_006_unknown_few_samples() {
let mut td = TrendDetector::new(0.1);
td.update(10.0);
td.update(20.0);
assert_eq!(td.trend(), Trend::Unknown);
}
#[test]
fn f_trend_007_for_percentage() {
let td = TrendDetector::for_percentage();
assert_eq!(td.trend(), Trend::Unknown);
}
#[test]
fn f_trend_008_for_latency() {
let td = TrendDetector::for_latency();
assert_eq!(td.trend(), Trend::Unknown);
}
#[test]
fn f_trend_009_reset() {
let mut td = TrendDetector::new(0.1);
for i in 0..10 {
td.update(i as f64);
}
td.reset();
assert_eq!(td.count(), 0);
assert_eq!(td.slope(), 0.0);
}
#[test]
fn f_trend_010_slope_single() {
let mut td = TrendDetector::new(0.1);
td.update(50.0);
assert_eq!(td.slope(), 0.0);
}
#[test]
fn f_trend_011_debug() {
let td = TrendDetector::new(0.1);
let debug = format!("{:?}", td);
assert!(debug.contains("TrendDetector"));
}
#[test]
fn f_trend_012_clone() {
let mut td = TrendDetector::new(0.1);
td.update(10.0);
td.update(20.0);
let cloned = td.clone();
assert_eq!(td.count(), cloned.count());
}
#[test]
fn f_anomaly_001_new_empty() {
let ad = AnomalyDetector::new(3.0);
assert_eq!(ad.count(), 0);
assert_eq!(ad.mean(), 0.0);
}
#[test]
fn f_anomaly_002_default() {
let ad = AnomalyDetector::default();
assert_eq!(ad.threshold(), 3.0);
}
#[test]
fn f_anomaly_003_two_sigma() {
let ad = AnomalyDetector::two_sigma();
assert_eq!(ad.threshold(), 2.0);
}
#[test]
fn f_anomaly_004_three_sigma() {
let ad = AnomalyDetector::three_sigma();
assert_eq!(ad.threshold(), 3.0);
}
#[test]
fn f_anomaly_005_mean_tracking() {
let mut ad = AnomalyDetector::new(3.0);
ad.update(10.0);
ad.update(20.0);
ad.update(30.0);
assert!((ad.mean() - 20.0).abs() < 0.01);
}
#[test]
fn f_anomaly_006_variance() {
let mut ad = AnomalyDetector::new(3.0);
ad.update(10.0);
ad.update(20.0);
ad.update(30.0);
assert!((ad.variance() - 100.0).abs() < 0.01);
}
#[test]
fn f_anomaly_007_std_dev() {
let mut ad = AnomalyDetector::new(3.0);
ad.update(10.0);
ad.update(20.0);
ad.update(30.0);
assert!((ad.std_dev() - 10.0).abs() < 0.01);
}
#[test]
fn f_anomaly_008_first_not_anomaly() {
let mut ad = AnomalyDetector::new(3.0);
let is_anomaly = ad.update(1000.0);
assert!(!is_anomaly);
}
#[test]
fn f_anomaly_009_min_samples() {
let mut ad = AnomalyDetector::new(3.0);
for i in 0..9 {
ad.update(50.0 + i as f64);
}
assert!(!ad.is_anomaly(1000.0));
}
#[test]
fn f_anomaly_010_detect_outlier() {
let mut ad = AnomalyDetector::new(3.0);
for i in 0..20 {
ad.update(50.0 + (i as f64 % 5.0)); }
assert!(ad.is_anomaly(1000.0));
}
#[test]
fn f_anomaly_011_z_score() {
let mut ad = AnomalyDetector::new(3.0);
for i in 0..21 {
ad.update(50.0 + (i as f64 % 3.0)); }
let z = ad.z_score(ad.mean());
assert!(z.abs() < 0.01);
}
#[test]
fn f_anomaly_012_anomaly_count() {
let mut ad = AnomalyDetector::new(2.0);
for _ in 0..15 {
ad.update(50.0);
}
ad.update(51.0); ad.update(49.0);
ad.update(1000.0); assert!(ad.anomaly_count() >= 1);
}
#[test]
fn f_anomaly_013_anomaly_rate() {
let ad = AnomalyDetector::new(3.0);
assert_eq!(ad.anomaly_rate(), 0.0);
}
#[test]
fn f_anomaly_014_reset() {
let mut ad = AnomalyDetector::new(3.0);
for i in 0..10 {
ad.update(i as f64);
}
ad.reset();
assert_eq!(ad.count(), 0);
assert_eq!(ad.mean(), 0.0);
assert_eq!(ad.anomaly_count(), 0);
}
#[test]
fn f_anomaly_015_clone() {
let mut ad = AnomalyDetector::new(3.0);
ad.update(50.0);
let cloned = ad.clone();
assert_eq!(ad.count(), cloned.count());
assert_eq!(ad.mean(), cloned.mean());
}
#[test]
fn f_thru_001_new_zero() {
let tt = ThroughputTracker::new();
assert_eq!(tt.total(), 0);
assert_eq!(tt.rate(), 0.0);
}
#[test]
fn f_thru_002_default() {
let tt = ThroughputTracker::default();
assert_eq!(tt.total(), 0);
}
#[test]
fn f_thru_003_add() {
let mut tt = ThroughputTracker::new();
tt.add(100);
tt.add(200);
assert_eq!(tt.total(), 300);
}
#[test]
fn f_thru_004_peak_rate() {
let tt = ThroughputTracker::new();
assert_eq!(tt.peak_rate(), 0.0);
}
#[test]
fn f_thru_005_format_small() {
let mut tt = ThroughputTracker::new();
tt.rate = 500.0;
assert_eq!(tt.format_rate(), "500/s");
}
#[test]
fn f_thru_006_format_k() {
let mut tt = ThroughputTracker::new();
tt.rate = 5_000.0;
assert_eq!(tt.format_rate(), "5.0K/s");
}
#[test]
fn f_thru_007_format_m() {
let mut tt = ThroughputTracker::new();
tt.rate = 5_000_000.0;
assert_eq!(tt.format_rate(), "5.0M/s");
}
#[test]
fn f_thru_008_format_g() {
let mut tt = ThroughputTracker::new();
tt.rate = 5_000_000_000.0;
assert_eq!(tt.format_rate(), "5.0G/s");
}
#[test]
fn f_thru_009_format_kb() {
let mut tt = ThroughputTracker::new();
tt.rate = 5_120.0; assert_eq!(tt.format_bytes_rate(), "5.0KB/s");
}
#[test]
fn f_thru_010_format_mb() {
let mut tt = ThroughputTracker::new();
tt.rate = 5_242_880.0; assert_eq!(tt.format_bytes_rate(), "5.0MB/s");
}
#[test]
fn f_thru_011_reset() {
let mut tt = ThroughputTracker::new();
tt.add(1000);
tt.rate = 500.0;
tt.peak_rate = 1000.0;
tt.reset();
assert_eq!(tt.total(), 0);
assert_eq!(tt.rate(), 0.0);
assert_eq!(tt.peak_rate(), 0.0);
}
#[test]
fn f_thru_012_clone() {
let mut tt = ThroughputTracker::new();
tt.add(500);
let cloned = tt.clone();
assert_eq!(tt.total(), cloned.total());
}
#[test]
fn f_jitter_001_new_zero() {
let jt = JitterTracker::new();
assert_eq!(jt.jitter(), 0.0);
assert_eq!(jt.count(), 0);
}
#[test]
fn f_jitter_002_default() {
let jt = JitterTracker::default();
assert_eq!(jt.jitter(), 0.0);
}
#[test]
fn f_jitter_003_alpha_clamped() {
let jt1 = JitterTracker::with_alpha(-0.5);
let jt2 = JitterTracker::with_alpha(2.0);
assert_eq!(jt1.jitter(), 0.0);
assert_eq!(jt2.jitter(), 0.0);
}
#[test]
fn f_jitter_004_first_update() {
let mut jt = JitterTracker::new();
jt.update(100.0);
assert_eq!(jt.jitter(), 0.0);
assert_eq!(jt.count(), 1);
}
#[test]
fn f_jitter_005_constant_zero_jitter() {
let mut jt = JitterTracker::new();
for _ in 0..10 {
jt.update(50.0);
}
assert!(jt.jitter().abs() < 0.01);
}
#[test]
fn f_jitter_006_variable_jitter() {
let mut jt = JitterTracker::new();
jt.update(10.0);
jt.update(50.0); jt.update(10.0); jt.update(50.0); assert!(jt.jitter() > 0.0);
}
#[test]
fn f_jitter_007_peak_tracking() {
let mut jt = JitterTracker::new();
jt.update(10.0);
jt.update(100.0); let peak1 = jt.peak_jitter();
jt.update(99.0); jt.update(98.0);
assert!(jt.peak_jitter() >= peak1 * 0.9);
}
#[test]
fn f_jitter_008_exceeds() {
let mut jt = JitterTracker::new();
jt.update(0.0);
jt.update(100.0);
assert!(jt.exceeds(1.0));
assert!(!jt.exceeds(1000.0));
}
#[test]
fn f_jitter_009_reset() {
let mut jt = JitterTracker::new();
jt.update(10.0);
jt.update(50.0);
jt.reset();
assert_eq!(jt.jitter(), 0.0);
assert_eq!(jt.peak_jitter(), 0.0);
assert_eq!(jt.count(), 0);
}
#[test]
fn f_jitter_010_clone() {
let mut jt = JitterTracker::new();
jt.update(10.0);
jt.update(50.0);
let cloned = jt.clone();
assert_eq!(jt.jitter(), cloned.jitter());
assert_eq!(jt.count(), cloned.count());
}
#[test]
fn f_deriv_001_new_zero() {
let dt = DerivativeTracker::new();
assert_eq!(dt.derivative(), 0.0);
assert_eq!(dt.count(), 0);
}
#[test]
fn f_deriv_002_default() {
let dt = DerivativeTracker::default();
assert_eq!(dt.derivative(), 0.0);
}
#[test]
fn f_deriv_003_alpha_clamped() {
let dt1 = DerivativeTracker::with_alpha(-0.5);
let dt2 = DerivativeTracker::with_alpha(2.0);
assert_eq!(dt1.derivative(), 0.0);
assert_eq!(dt2.derivative(), 0.0);
}
#[test]
fn f_deriv_004_first_update() {
let mut dt = DerivativeTracker::new();
dt.update_with_dt(100.0, 1.0);
assert_eq!(dt.derivative(), 0.0); assert_eq!(dt.count(), 1);
}
#[test]
fn f_deriv_005_positive() {
let mut dt = DerivativeTracker::new();
dt.update_with_dt(0.0, 1.0);
dt.update_with_dt(100.0, 1.0); assert!(dt.derivative() > 0.0);
assert!(dt.is_accelerating());
}
#[test]
fn f_deriv_006_negative() {
let mut dt = DerivativeTracker::new();
dt.update_with_dt(100.0, 1.0);
dt.update_with_dt(0.0, 1.0); assert!(dt.derivative() < 0.0);
assert!(dt.is_decelerating());
}
#[test]
fn f_deriv_007_smoothed() {
let mut dt = DerivativeTracker::new();
dt.update_with_dt(0.0, 1.0);
for i in 1..10 {
dt.update_with_dt(i as f64 * 10.0, 1.0);
}
assert!(dt.smoothed() > 5.0);
}
#[test]
fn f_deriv_008_reset() {
let mut dt = DerivativeTracker::new();
dt.update_with_dt(100.0, 1.0);
dt.update_with_dt(200.0, 1.0);
dt.reset();
assert_eq!(dt.derivative(), 0.0);
assert_eq!(dt.smoothed(), 0.0);
assert_eq!(dt.count(), 0);
}
#[test]
fn f_deriv_009_debug() {
let dt = DerivativeTracker::new();
let debug = format!("{:?}", dt);
assert!(debug.contains("DerivativeTracker"));
}
#[test]
fn f_deriv_010_clone() {
let mut dt = DerivativeTracker::new();
dt.update_with_dt(50.0, 1.0);
let cloned = dt.clone();
assert_eq!(dt.count(), cloned.count());
}
#[test]
fn f_integ_001_new_zero() {
let it = IntegralTracker::new();
assert_eq!(it.integral(), 0.0);
assert_eq!(it.count(), 0);
}
#[test]
fn f_integ_002_default() {
let it = IntegralTracker::default();
assert_eq!(it.integral(), 0.0);
}
#[test]
fn f_integ_003_first_update() {
let mut it = IntegralTracker::new();
it.update_with_dt(100.0, 1.0);
assert_eq!(it.integral(), 0.0); assert_eq!(it.count(), 1);
}
#[test]
fn f_integ_004_constant() {
let mut it = IntegralTracker::new();
it.update_with_dt(10.0, 1.0);
it.update_with_dt(10.0, 1.0); assert!((it.integral() - 10.0).abs() < 0.01);
}
#[test]
fn f_integ_005_trapezoidal() {
let mut it = IntegralTracker::new();
it.update_with_dt(0.0, 1.0);
it.update_with_dt(10.0, 1.0); assert!((it.integral() - 5.0).abs() < 0.01);
}
#[test]
fn f_integ_006_accumulate() {
let mut it = IntegralTracker::new();
it.update_with_dt(10.0, 1.0);
it.update_with_dt(10.0, 1.0); it.update_with_dt(10.0, 1.0); assert!((it.integral() - 20.0).abs() < 0.01);
}
#[test]
fn f_integ_007_reset() {
let mut it = IntegralTracker::new();
it.update_with_dt(100.0, 1.0);
it.update_with_dt(100.0, 1.0);
it.reset();
assert_eq!(it.integral(), 0.0);
assert_eq!(it.count(), 0);
}
#[test]
fn f_integ_008_debug() {
let it = IntegralTracker::new();
let debug = format!("{:?}", it);
assert!(debug.contains("IntegralTracker"));
}
#[test]
fn f_integ_009_clone() {
let mut it = IntegralTracker::new();
it.update_with_dt(50.0, 1.0);
let cloned = it.clone();
assert_eq!(it.count(), cloned.count());
}
#[test]
fn f_integ_010_average() {
let mut it = IntegralTracker::new();
it.update_with_dt(42.0, 1.0);
assert_eq!(it.average(), 42.0);
}
#[test]
fn f_corr_001_new_zero() {
let ct = CorrelationTracker::new();
assert_eq!(ct.correlation(), 0.0);
assert_eq!(ct.count(), 0);
}
#[test]
fn f_corr_002_default() {
let ct = CorrelationTracker::default();
assert_eq!(ct.correlation(), 0.0);
}
#[test]
fn f_corr_003_perfect_positive() {
let mut ct = CorrelationTracker::new();
for i in 0..10 {
ct.update(i as f64, i as f64);
}
assert!(ct.correlation() > 0.99, "Should be ~1.0");
assert!(ct.is_positive());
assert!(ct.is_strong());
}
#[test]
fn f_corr_004_perfect_negative() {
let mut ct = CorrelationTracker::new();
for i in 0..10 {
ct.update(i as f64, -(i as f64));
}
assert!(ct.correlation() < -0.99, "Should be ~-1.0");
assert!(ct.is_negative());
assert!(ct.is_strong());
}
#[test]
fn f_corr_005_no_correlation() {
let mut ct = CorrelationTracker::new();
for i in 0..10 {
ct.update(i as f64, 5.0); }
assert!(ct.correlation().abs() < 0.1);
}
#[test]
fn f_corr_006_covariance() {
let mut ct = CorrelationTracker::new();
ct.update(1.0, 2.0);
ct.update(2.0, 4.0);
ct.update(3.0, 6.0);
assert!(ct.covariance() > 0.0); }
#[test]
fn f_corr_007_insufficient() {
let mut ct = CorrelationTracker::new();
ct.update(1.0, 2.0);
assert_eq!(ct.correlation(), 0.0);
}
#[test]
fn f_corr_008_reset() {
let mut ct = CorrelationTracker::new();
for i in 0..10 {
ct.update(i as f64, i as f64);
}
ct.reset();
assert_eq!(ct.correlation(), 0.0);
assert_eq!(ct.count(), 0);
}
#[test]
fn f_corr_009_debug() {
let ct = CorrelationTracker::new();
let debug = format!("{:?}", ct);
assert!(debug.contains("CorrelationTracker"));
}
#[test]
fn f_corr_010_clone() {
let mut ct = CorrelationTracker::new();
ct.update(1.0, 2.0);
ct.update(2.0, 4.0);
let cloned = ct.clone();
assert_eq!(ct.count(), cloned.count());
}
#[test]
fn f_corr_011_clamped() {
let mut ct = CorrelationTracker::new();
for i in 0..100 {
ct.update(i as f64, i as f64 * 2.0);
}
let r = ct.correlation();
assert!(r >= -1.0 && r <= 1.0);
}
#[test]
fn f_corr_012_weak() {
let ct = CorrelationTracker::new();
assert!(!ct.is_strong());
assert!(!ct.is_positive());
assert!(!ct.is_negative());
}
#[test]
fn f_circuit_001_starts_closed() {
let cb = CircuitBreaker::new(5, 3, 1_000_000);
assert_eq!(cb.state(), CircuitState::Closed);
assert!(cb.is_closed());
}
#[test]
fn f_circuit_002_default() {
let cb = CircuitBreaker::default();
assert!(cb.is_closed());
}
#[test]
fn f_circuit_003_for_network() {
let cb = CircuitBreaker::for_network();
assert!(cb.is_closed());
}
#[test]
fn f_circuit_004_for_fast_fail() {
let cb = CircuitBreaker::for_fast_fail();
assert!(cb.is_closed());
}
#[test]
fn f_circuit_005_opens() {
let mut cb = CircuitBreaker::new(3, 2, 1_000_000);
cb.record_failure();
cb.record_failure();
cb.record_failure();
assert!(cb.is_open());
assert_eq!(cb.state(), CircuitState::Open);
}
#[test]
fn f_circuit_006_closed_allows() {
let mut cb = CircuitBreaker::new(3, 2, 1_000_000);
assert!(cb.is_allowed());
}
#[test]
fn f_circuit_007_success_resets() {
let mut cb = CircuitBreaker::new(3, 2, 1_000_000);
cb.record_failure();
cb.record_failure();
cb.record_success();
assert_eq!(cb.failures(), 0);
assert!(cb.is_closed());
}
#[test]
fn f_circuit_008_reset() {
let mut cb = CircuitBreaker::new(3, 2, 1_000_000);
cb.record_failure();
cb.record_failure();
cb.record_failure();
assert!(cb.is_open());
cb.reset();
assert!(cb.is_closed());
assert_eq!(cb.failures(), 0);
}
#[test]
fn f_circuit_009_debug() {
let cb = CircuitBreaker::new(3, 2, 1_000_000);
let debug = format!("{:?}", cb);
assert!(debug.contains("CircuitBreaker"));
}
#[test]
fn f_circuit_010_clone() {
let mut cb = CircuitBreaker::new(3, 2, 1_000_000);
cb.record_failure();
let cloned = cb.clone();
assert_eq!(cb.failures(), cloned.failures());
}
#[test]
fn f_circuit_011_state_derives() {
let s1 = CircuitState::Closed;
let s2 = CircuitState::Closed;
assert_eq!(s1, s2);
let _ = format!("{:?}", s1);
let _ = s1.clone();
}
#[test]
fn f_circuit_012_halfopen_fails() {
let mut cb = CircuitBreaker::new(3, 2, 0); cb.record_failure();
cb.record_failure();
cb.record_failure();
assert!(cb.is_open());
assert!(cb.is_allowed());
assert_eq!(cb.state(), CircuitState::HalfOpen);
cb.record_failure();
assert!(cb.is_open());
}
#[test]
fn f_backoff_001_starts_zero() {
let eb = ExponentialBackoff::new(100_000, 30_000_000);
assert_eq!(eb.attempt(), 0);
}
#[test]
fn f_backoff_002_default() {
let eb = ExponentialBackoff::default();
assert_eq!(eb.attempt(), 0);
}
#[test]
fn f_backoff_003_for_network() {
let eb = ExponentialBackoff::for_network();
assert_eq!(eb.attempt(), 0);
}
#[test]
fn f_backoff_004_for_fast() {
let eb = ExponentialBackoff::for_fast();
assert_eq!(eb.attempt(), 0);
}
#[test]
fn f_backoff_005_first_delay() {
let eb = ExponentialBackoff::new(100_000, 30_000_000);
assert_eq!(eb.current_delay(), 100_000);
}
#[test]
fn f_backoff_006_doubles() {
let mut eb = ExponentialBackoff::new(100_000, 30_000_000);
let d1 = eb.next_delay();
let d2 = eb.next_delay();
assert_eq!(d2, d1 * 2);
}
#[test]
fn f_backoff_007_capped() {
let mut eb = ExponentialBackoff::new(100_000, 500_000); for _ in 0..20 {
eb.next_delay();
}
assert!(eb.current_delay() <= 500_000);
assert!(eb.is_at_max());
}
#[test]
fn f_backoff_008_ms() {
let eb = ExponentialBackoff::new(100_000, 30_000_000);
assert_eq!(eb.current_delay_ms(), 100);
}
#[test]
fn f_backoff_009_reset() {
let mut eb = ExponentialBackoff::new(100_000, 30_000_000);
eb.next_delay();
eb.next_delay();
eb.reset();
assert_eq!(eb.attempt(), 0);
assert_eq!(eb.current_delay(), 100_000);
}
#[test]
fn f_backoff_010_multiplier() {
let mut eb = ExponentialBackoff::new(100_000, 30_000_000).with_multiplier(3.0);
let d1 = eb.next_delay();
let d2 = eb.next_delay();
assert_eq!(d2, d1 * 3);
}
#[test]
fn f_backoff_011_debug() {
let eb = ExponentialBackoff::new(100_000, 30_000_000);
let debug = format!("{:?}", eb);
assert!(debug.contains("ExponentialBackoff"));
}
#[test]
fn f_backoff_012_clone() {
let mut eb = ExponentialBackoff::new(100_000, 30_000_000);
eb.next_delay();
let cloned = eb.clone();
assert_eq!(eb.attempt(), cloned.attempt());
}
#[test]
fn f_median_001_new_empty() {
let sm = SlidingMedian::new();
assert_eq!(sm.count(), 0);
assert_eq!(sm.median(), 0.0);
}
#[test]
fn f_median_002_default() {
let sm = SlidingMedian::default();
assert_eq!(sm.count(), 0);
}
#[test]
fn f_median_003_for_latency() {
let sm = SlidingMedian::for_latency();
assert_eq!(sm.count(), 0);
}
#[test]
fn f_median_004_for_percentage() {
let sm = SlidingMedian::for_percentage();
assert_eq!(sm.count(), 0);
}
#[test]
fn f_median_005_single() {
let mut sm = SlidingMedian::for_percentage();
sm.update(50.0);
assert!(sm.median() > 0.0);
}
#[test]
fn f_median_006_minmax() {
let mut sm = SlidingMedian::new();
sm.update(100.0);
sm.update(500.0);
sm.update(300.0);
assert_eq!(sm.min(), 100.0);
assert_eq!(sm.max(), 500.0);
}
#[test]
fn f_median_007_percentile() {
let mut sm = SlidingMedian::for_percentage();
for i in 1..=100 {
sm.update(i as f64);
}
let p90 = sm.percentile(90);
assert!(p90 >= 80.0 && p90 <= 100.0);
}
#[test]
fn f_median_008_reset() {
let mut sm = SlidingMedian::new();
sm.update(50.0);
sm.reset();
assert_eq!(sm.count(), 0);
}
#[test]
fn f_median_009_debug() {
let sm = SlidingMedian::new();
let debug = format!("{:?}", sm);
assert!(debug.contains("SlidingMedian"));
}
#[test]
fn f_median_010_clone() {
let mut sm = SlidingMedian::new();
sm.update(50.0);
let cloned = sm.clone();
assert_eq!(sm.count(), cloned.count());
}
#[test]
fn f_hyst_001_new_zero() {
let hf = HysteresisFilter::new(1.0);
assert_eq!(hf.output(), 0.0);
assert_eq!(hf.count(), 0);
}
#[test]
fn f_hyst_002_default() {
let hf = HysteresisFilter::default();
assert_eq!(hf.dead_band(), 1.0);
}
#[test]
fn f_hyst_003_for_percentage() {
let hf = HysteresisFilter::for_percentage();
assert_eq!(hf.dead_band(), 1.0);
}
#[test]
fn f_hyst_004_for_latency() {
let hf = HysteresisFilter::for_latency();
assert_eq!(hf.dead_band(), 0.5);
}
#[test]
fn f_hyst_005_first_update() {
let mut hf = HysteresisFilter::new(1.0);
let changed = hf.update(50.0);
assert!(changed);
assert_eq!(hf.output(), 50.0);
}
#[test]
fn f_hyst_006_within_deadband() {
let mut hf = HysteresisFilter::new(5.0);
hf.update(50.0);
let changed = hf.update(52.0); assert!(!changed);
assert_eq!(hf.output(), 50.0);
}
#[test]
fn f_hyst_007_outside_deadband() {
let mut hf = HysteresisFilter::new(5.0);
hf.update(50.0);
let changed = hf.update(60.0); assert!(changed);
assert_eq!(hf.output(), 60.0);
}
#[test]
fn f_hyst_008_reset() {
let mut hf = HysteresisFilter::new(1.0);
hf.update(50.0);
hf.reset();
assert_eq!(hf.output(), 0.0);
assert_eq!(hf.count(), 0);
}
#[test]
fn f_hyst_009_debug() {
let hf = HysteresisFilter::new(1.0);
let debug = format!("{:?}", hf);
assert!(debug.contains("HysteresisFilter"));
}
#[test]
fn f_hyst_010_clone() {
let mut hf = HysteresisFilter::new(1.0);
hf.update(50.0);
let cloned = hf.clone();
assert_eq!(hf.output(), cloned.output());
}
#[test]
fn f_spike_001_new_zero() {
let sf = SpikeFilter::new(3.0);
assert_eq!(sf.average(), 0.0);
assert_eq!(sf.count(), 0);
}
#[test]
fn f_spike_002_default() {
let sf = SpikeFilter::default();
assert_eq!(sf.count(), 0);
}
#[test]
fn f_spike_003_first_accepted() {
let mut sf = SpikeFilter::new(3.0);
let result = sf.update(50.0);
assert_eq!(result, 50.0);
assert_eq!(sf.last_accepted(), 50.0);
}
#[test]
fn f_spike_004_normal_accepted() {
let mut sf = SpikeFilter::new(10.0);
sf.update(50.0);
let result = sf.update(52.0);
assert_eq!(result, 52.0);
}
#[test]
fn f_spike_005_spike_rejected() {
let mut sf = SpikeFilter::new(10.0);
sf.update(50.0);
let result = sf.update(1000.0); assert_eq!(result, 50.0); assert_eq!(sf.spikes(), 1);
}
#[test]
fn f_spike_006_spike_rate() {
let mut sf = SpikeFilter::new(10.0);
sf.update(50.0);
sf.update(51.0);
sf.update(1000.0); sf.update(52.0);
assert!(sf.spike_rate() > 0.0);
}
#[test]
fn f_spike_007_reset() {
let mut sf = SpikeFilter::new(3.0);
sf.update(50.0);
sf.update(1000.0);
sf.reset();
assert_eq!(sf.spikes(), 0);
assert_eq!(sf.count(), 0);
}
#[test]
fn f_spike_008_debug() {
let sf = SpikeFilter::new(3.0);
let debug = format!("{:?}", sf);
assert!(debug.contains("SpikeFilter"));
}
#[test]
fn f_spike_009_clone() {
let mut sf = SpikeFilter::new(3.0);
sf.update(50.0);
let cloned = sf.clone();
assert_eq!(sf.count(), cloned.count());
}
#[test]
fn f_spike_010_for_percentage() {
let sf = SpikeFilter::for_percentage();
assert_eq!(sf.count(), 0);
}
#[test]
fn f_gauge_001_new_zero() {
let gt = GaugeTracker::new();
assert_eq!(gt.current(), 0.0);
assert_eq!(gt.count(), 0);
}
#[test]
fn f_gauge_002_default() {
let gt = GaugeTracker::default();
assert_eq!(gt.current(), 0.0);
}
#[test]
fn f_gauge_003_set() {
let mut gt = GaugeTracker::new();
gt.set(50.0);
assert_eq!(gt.current(), 50.0);
}
#[test]
fn f_gauge_004_inc() {
let mut gt = GaugeTracker::new();
gt.set(10.0);
gt.inc();
assert_eq!(gt.current(), 11.0);
}
#[test]
fn f_gauge_005_dec() {
let mut gt = GaugeTracker::new();
gt.set(10.0);
gt.dec();
assert_eq!(gt.current(), 9.0);
}
#[test]
fn f_gauge_006_minmax() {
let mut gt = GaugeTracker::new();
gt.set(50.0);
gt.set(20.0);
gt.set(80.0);
assert_eq!(gt.min(), 20.0);
assert_eq!(gt.max(), 80.0);
}
#[test]
fn f_gauge_007_average() {
let mut gt = GaugeTracker::new();
gt.set(10.0);
gt.set(20.0);
gt.set(30.0);
assert_eq!(gt.average(), 20.0);
}
#[test]
fn f_gauge_008_range() {
let mut gt = GaugeTracker::new();
gt.set(20.0);
gt.set(80.0);
assert_eq!(gt.range(), 60.0);
}
#[test]
fn f_gauge_009_reset() {
let mut gt = GaugeTracker::new();
gt.set(50.0);
gt.reset();
assert_eq!(gt.current(), 0.0);
assert_eq!(gt.count(), 0);
}
#[test]
fn f_gauge_010_debug() {
let gt = GaugeTracker::new();
let debug = format!("{:?}", gt);
assert!(debug.contains("GaugeTracker"));
}
#[test]
fn f_gauge_011_clone() {
let mut gt = GaugeTracker::new();
gt.set(50.0);
let cloned = gt.clone();
assert_eq!(gt.current(), cloned.current());
}
#[test]
fn f_gauge_012_add() {
let mut gt = GaugeTracker::new();
gt.set(50.0);
gt.add(10.0);
assert_eq!(gt.current(), 60.0);
}
#[test]
fn f_pair_001_new_zero() {
let cp = CounterPair::new();
assert_eq!(cp.successes(), 0);
assert_eq!(cp.failures(), 0);
}
#[test]
fn f_pair_002_default() {
let cp = CounterPair::default();
assert_eq!(cp.total(), 0);
}
#[test]
fn f_pair_003_success() {
let mut cp = CounterPair::new();
cp.success();
assert_eq!(cp.successes(), 1);
}
#[test]
fn f_pair_004_failure() {
let mut cp = CounterPair::new();
cp.failure();
assert_eq!(cp.failures(), 1);
}
#[test]
fn f_pair_005_total() {
let mut cp = CounterPair::new();
cp.success();
cp.success();
cp.failure();
assert_eq!(cp.total(), 3);
}
#[test]
fn f_pair_006_success_rate() {
let mut cp = CounterPair::new();
cp.add_successes(80);
cp.add_failures(20);
assert_eq!(cp.success_rate(), 80.0);
}
#[test]
fn f_pair_007_failure_rate() {
let mut cp = CounterPair::new();
cp.add_successes(80);
cp.add_failures(20);
assert_eq!(cp.failure_rate(), 20.0);
}
#[test]
fn f_pair_008_empty_healthy() {
let cp = CounterPair::new();
assert_eq!(cp.success_rate(), 100.0);
}
#[test]
fn f_pair_009_is_healthy() {
let mut cp = CounterPair::new();
cp.add_successes(95);
cp.add_failures(5);
assert!(cp.is_healthy(90.0));
assert!(!cp.is_healthy(99.0));
}
#[test]
fn f_pair_010_reset() {
let mut cp = CounterPair::new();
cp.success();
cp.failure();
cp.reset();
assert_eq!(cp.total(), 0);
}
#[test]
fn f_pair_011_debug() {
let cp = CounterPair::new();
let debug = format!("{:?}", cp);
assert!(debug.contains("CounterPair"));
}
#[test]
fn f_pair_012_clone() {
let mut cp = CounterPair::new();
cp.success();
let cloned = cp.clone();
assert_eq!(cp.successes(), cloned.successes());
}
#[test]
fn f_health_001_new_100() {
let hs = HealthScore::new();
assert_eq!(hs.score(), 100.0);
}
#[test]
fn f_health_002_default() {
let hs = HealthScore::default();
assert_eq!(hs.score(), 100.0);
}
#[test]
fn f_health_003_set() {
let mut hs = HealthScore::new();
hs.set(0, 80.0);
assert_eq!(hs.score(), 80.0);
}
#[test]
fn f_health_004_weighted() {
let mut hs = HealthScore::new();
hs.set(0, 100.0);
hs.set_weight(0, 2.0);
hs.set(1, 50.0);
hs.set_weight(1, 1.0);
let score = hs.score();
assert!(score > 80.0 && score < 90.0);
}
#[test]
fn f_health_005_status_healthy() {
let hs = HealthScore::new();
assert_eq!(hs.status(), HealthStatus::Healthy);
}
#[test]
fn f_health_006_status_degraded() {
let mut hs = HealthScore::new();
hs.set(0, 75.0);
assert_eq!(hs.status(), HealthStatus::Degraded);
}
#[test]
fn f_health_007_status_warning() {
let mut hs = HealthScore::new();
hs.set(0, 55.0);
assert_eq!(hs.status(), HealthStatus::Warning);
}
#[test]
fn f_health_008_status_critical() {
let mut hs = HealthScore::new();
hs.set(0, 30.0);
assert_eq!(hs.status(), HealthStatus::Critical);
}
#[test]
fn f_health_009_min_score() {
let mut hs = HealthScore::new();
hs.set(0, 90.0);
hs.set(1, 60.0);
hs.set(2, 80.0);
assert_eq!(hs.min_score(), 60.0);
}
#[test]
fn f_health_010_reset() {
let mut hs = HealthScore::new();
hs.set(0, 50.0);
hs.reset();
assert_eq!(hs.score(), 100.0);
}
#[test]
fn f_health_011_debug() {
let hs = HealthScore::new();
let debug = format!("{:?}", hs);
assert!(debug.contains("HealthScore"));
}
#[test]
fn f_health_012_clone() {
let mut hs = HealthScore::new();
hs.set(0, 75.0);
let cloned = hs.clone();
assert_eq!(hs.score(), cloned.score());
}
#[test]
fn f_batch_001_new() {
let bp = BatchProcessor::new(10);
assert_eq!(bp.batches_completed(), 0);
assert_eq!(bp.total_items(), 0);
}
#[test]
fn f_batch_002_default() {
let bp = BatchProcessor::default();
assert_eq!(bp.remaining(), 100);
}
#[test]
fn f_batch_003_add_partial() {
let mut bp = BatchProcessor::new(3);
assert!(!bp.add());
assert!(!bp.add());
assert!(bp.add()); }
#[test]
fn f_batch_004_batch_complete() {
let mut bp = BatchProcessor::new(2);
bp.add();
bp.add();
assert_eq!(bp.batches_completed(), 1);
assert_eq!(bp.remaining(), 2);
}
#[test]
fn f_batch_005_add_many() {
let mut bp = BatchProcessor::new(10);
let batches = bp.add_many(25);
assert_eq!(batches, 2);
assert_eq!(bp.remaining(), 5);
}
#[test]
fn f_batch_006_fill_percentage() {
let mut bp = BatchProcessor::new(10);
bp.add_many(5);
assert!((bp.fill_percentage() - 50.0).abs() < 0.01);
}
#[test]
fn f_batch_007_for_network() {
let bp = BatchProcessor::for_network();
assert_eq!(bp.remaining(), 1000);
}
#[test]
fn f_batch_008_for_disk() {
let bp = BatchProcessor::for_disk();
assert_eq!(bp.remaining(), 100);
}
#[test]
fn f_batch_009_for_metrics() {
let bp = BatchProcessor::for_metrics();
assert_eq!(bp.remaining(), 50);
}
#[test]
fn f_batch_010_flush() {
let mut bp = BatchProcessor::new(10);
bp.add_many(5);
bp.flush();
assert_eq!(bp.batches_completed(), 1);
assert_eq!(bp.remaining(), 10);
}
#[test]
fn f_batch_011_reset() {
let mut bp = BatchProcessor::new(10);
bp.add_many(25);
bp.reset();
assert_eq!(bp.batches_completed(), 0);
assert_eq!(bp.total_items(), 0);
}
#[test]
fn f_batch_012_clone() {
let mut bp = BatchProcessor::new(10);
bp.add_many(5);
let cloned = bp.clone();
assert_eq!(bp.remaining(), cloned.remaining());
}
#[test]
fn f_pipe_001_new() {
let ps = PipelineStage::new();
assert!(ps.is_idle());
assert_eq!(ps.depth(), 0);
}
#[test]
fn f_pipe_002_default() {
let ps = PipelineStage::default();
assert!(ps.is_idle());
}
#[test]
fn f_pipe_003_enter() {
let mut ps = PipelineStage::new();
ps.enter();
assert_eq!(ps.depth(), 1);
assert!(!ps.is_idle());
}
#[test]
fn f_pipe_004_exit() {
let mut ps = PipelineStage::new();
ps.enter();
ps.exit(1000);
assert_eq!(ps.depth(), 0);
}
#[test]
fn f_pipe_005_peak() {
let mut ps = PipelineStage::new();
ps.enter();
ps.enter();
ps.enter();
ps.exit_simple();
assert_eq!(ps.peak_depth(), 3);
}
#[test]
fn f_pipe_006_avg_latency() {
let mut ps = PipelineStage::new();
ps.enter();
ps.exit(1000);
ps.enter();
ps.exit(2000);
assert!((ps.avg_latency_us() - 1500.0).abs() < 0.01);
}
#[test]
fn f_pipe_007_latency_ms() {
let mut ps = PipelineStage::new();
ps.enter();
ps.exit(1000);
assert!((ps.avg_latency_ms() - 1.0).abs() < 0.01);
}
#[test]
fn f_pipe_008_throughput() {
let mut ps = PipelineStage::new();
ps.enter();
ps.exit_simple();
ps.enter();
ps.exit_simple();
assert_eq!(ps.throughput(), 2);
}
#[test]
fn f_pipe_009_total_entered() {
let mut ps = PipelineStage::new();
ps.enter();
ps.enter();
ps.exit_simple();
assert_eq!(ps.total_entered(), 2);
}
#[test]
fn f_pipe_010_backlogged() {
let mut ps = PipelineStage::new();
ps.enter();
ps.enter();
ps.enter();
assert!(ps.is_backlogged(2));
}
#[test]
fn f_pipe_011_reset() {
let mut ps = PipelineStage::new();
ps.enter();
ps.exit(1000);
ps.reset();
assert!(ps.is_idle());
assert_eq!(ps.throughput(), 0);
}
#[test]
fn f_pipe_012_clone() {
let mut ps = PipelineStage::new();
ps.enter();
let cloned = ps.clone();
assert_eq!(ps.depth(), cloned.depth());
}
#[test]
fn f_queue_001_new() {
let wq = WorkQueue::new();
assert!(wq.is_empty());
assert_eq!(wq.size(), 0);
}
#[test]
fn f_queue_002_default() {
let wq = WorkQueue::default();
assert!(wq.is_empty());
}
#[test]
fn f_queue_003_with_capacity() {
let wq = WorkQueue::with_capacity(10);
assert_eq!(wq.remaining_capacity(), 10);
}
#[test]
fn f_queue_004_enqueue() {
let mut wq = WorkQueue::new();
assert!(wq.enqueue());
assert_eq!(wq.size(), 1);
}
#[test]
fn f_queue_005_dequeue() {
let mut wq = WorkQueue::new();
wq.enqueue();
assert!(wq.dequeue(100));
assert!(wq.is_empty());
}
#[test]
fn f_queue_006_full() {
let mut wq = WorkQueue::with_capacity(1);
wq.enqueue();
assert!(!wq.enqueue());
assert!(wq.is_full());
}
#[test]
fn f_queue_007_empty_dequeue() {
let mut wq = WorkQueue::new();
assert!(!wq.dequeue_simple());
}
#[test]
fn f_queue_008_peak() {
let mut wq = WorkQueue::new();
wq.enqueue();
wq.enqueue();
wq.dequeue_simple();
assert_eq!(wq.peak_size(), 2);
}
#[test]
fn f_queue_009_avg_wait() {
let mut wq = WorkQueue::new();
wq.enqueue();
wq.dequeue(1000);
wq.enqueue();
wq.dequeue(2000);
assert!((wq.avg_wait_us() - 1500.0).abs() < 0.01);
}
#[test]
fn f_queue_010_utilization() {
let mut wq = WorkQueue::with_capacity(10);
wq.enqueue();
wq.enqueue();
wq.enqueue();
wq.enqueue();
wq.enqueue();
assert!((wq.utilization() - 50.0).abs() < 0.01);
}
#[test]
fn f_queue_011_reset() {
let mut wq = WorkQueue::new();
wq.enqueue();
wq.dequeue(1000);
wq.reset();
assert!(wq.is_empty());
assert_eq!(wq.total_dequeued(), 0);
}
#[test]
fn f_queue_012_clone() {
let mut wq = WorkQueue::new();
wq.enqueue();
let cloned = wq.clone();
assert_eq!(wq.size(), cloned.size());
}
#[test]
fn f_leak_001_new() {
let lb = LeakyBucket::new(100.0, 10.0);
assert!(lb.is_empty());
assert_eq!(lb.overflows(), 0);
}
#[test]
fn f_leak_002_default() {
let lb = LeakyBucket::default();
assert!(lb.is_empty());
}
#[test]
fn f_leak_003_add() {
let mut lb = LeakyBucket::new(100.0, 10.0);
assert!(lb.add(50.0, 0));
assert!((lb.level() - 50.0).abs() < 0.01);
}
#[test]
fn f_leak_004_overflow() {
let mut lb = LeakyBucket::new(100.0, 10.0);
assert!(lb.add(80.0, 0));
assert!(!lb.add(50.0, 0)); assert_eq!(lb.overflows(), 1);
}
#[test]
fn f_leak_005_leak() {
let mut lb = LeakyBucket::new(100.0, 10.0);
lb.add(50.0, 1000); lb.update_with_time(1_001_000); assert!(lb.level() < 45.0);
}
#[test]
fn f_leak_006_fill_percentage() {
let mut lb = LeakyBucket::new(100.0, 10.0);
lb.add(50.0, 0);
assert!((lb.fill_percentage() - 50.0).abs() < 0.01);
}
#[test]
fn f_leak_007_for_api() {
let lb = LeakyBucket::for_api();
assert!(lb.is_empty());
}
#[test]
fn f_leak_008_for_network() {
let lb = LeakyBucket::for_network();
assert!(lb.is_empty());
}
#[test]
fn f_leak_009_full_leak() {
let mut lb = LeakyBucket::new(100.0, 100.0);
lb.add(50.0, 1000); lb.update_with_time(1_001_000); assert!(lb.is_empty());
}
#[test]
fn f_leak_010_reset() {
let mut lb = LeakyBucket::new(100.0, 10.0);
lb.add(50.0, 0);
lb.add(200.0, 0); lb.reset();
assert!(lb.is_empty());
assert_eq!(lb.overflows(), 0);
}
#[test]
fn f_leak_011_debug() {
let lb = LeakyBucket::new(100.0, 10.0);
let debug = format!("{:?}", lb);
assert!(debug.contains("LeakyBucket"));
}
#[test]
fn f_leak_012_clone() {
let mut lb = LeakyBucket::new(100.0, 10.0);
lb.add(50.0, 0);
let cloned = lb.clone();
assert!((lb.level() - cloned.level()).abs() < 0.01);
}
#[test]
fn f_slide_001_new() {
let sw = SlidingWindowRate::new(1_000_000, 100);
assert_eq!(sw.count(), 0);
assert_eq!(sw.exceeded(), 0);
}
#[test]
fn f_slide_002_default() {
let sw = SlidingWindowRate::default();
assert_eq!(sw.count(), 0);
}
#[test]
fn f_slide_003_record() {
let mut sw = SlidingWindowRate::new(1_000_000, 100);
assert!(sw.record(0));
assert_eq!(sw.count(), 1);
}
#[test]
fn f_slide_004_exceed() {
let mut sw = SlidingWindowRate::new(1_000_000, 3);
sw.record(0);
sw.record(0);
sw.record(0);
assert!(!sw.record(0)); assert_eq!(sw.exceeded(), 1);
}
#[test]
fn f_slide_005_rotation() {
let mut sw = SlidingWindowRate::new(1_000_000, 100);
sw.record(1000); sw.record(1000);
sw.update_with_time(2_001_000); assert_eq!(sw.count(), 0);
}
#[test]
fn f_slide_006_rate_percentage() {
let mut sw = SlidingWindowRate::new(1_000_000, 100);
for _ in 0..50 {
sw.record(0);
}
assert!((sw.rate_percentage() - 50.0).abs() < 0.01);
}
#[test]
fn f_slide_007_would_exceed() {
let mut sw = SlidingWindowRate::new(1_000_000, 2);
sw.record(0);
sw.record(0);
assert!(sw.would_exceed());
}
#[test]
fn f_slide_008_per_second() {
let sw = SlidingWindowRate::per_second(100);
assert_eq!(sw.count(), 0);
}
#[test]
fn f_slide_009_per_minute() {
let sw = SlidingWindowRate::per_minute(100);
assert_eq!(sw.count(), 0);
}
#[test]
fn f_slide_010_reset() {
let mut sw = SlidingWindowRate::new(1_000_000, 100);
sw.record(0);
sw.reset();
assert_eq!(sw.count(), 0);
assert_eq!(sw.exceeded(), 0);
}
#[test]
fn f_slide_011_debug() {
let sw = SlidingWindowRate::new(1_000_000, 100);
let debug = format!("{:?}", sw);
assert!(debug.contains("SlidingWindowRate"));
}
#[test]
fn f_slide_012_clone() {
let mut sw = SlidingWindowRate::new(1_000_000, 100);
sw.record(0);
let cloned = sw.clone();
assert_eq!(sw.count(), cloned.count());
}
#[test]
fn f_pool_001_new() {
let pool = ResourcePool::new(10);
assert!(pool.is_idle());
assert_eq!(pool.available(), 10);
}
#[test]
fn f_pool_002_default() {
let pool = ResourcePool::default();
assert_eq!(pool.available(), 10);
}
#[test]
fn f_pool_003_acquire() {
let mut pool = ResourcePool::new(10);
assert!(pool.acquire(100));
assert_eq!(pool.available(), 9);
}
#[test]
fn f_pool_004_release() {
let mut pool = ResourcePool::new(10);
pool.acquire(100);
pool.release();
assert_eq!(pool.available(), 10);
}
#[test]
fn f_pool_005_exhausted() {
let mut pool = ResourcePool::new(1);
pool.acquire(100);
assert!(!pool.acquire(100));
assert!(pool.is_exhausted());
}
#[test]
fn f_pool_006_utilization() {
let mut pool = ResourcePool::new(10);
for _ in 0..5 {
pool.acquire(100);
}
assert!((pool.utilization() - 50.0).abs() < 0.01);
}
#[test]
fn f_pool_007_avg_wait() {
let mut pool = ResourcePool::new(10);
pool.acquire(1000);
pool.acquire(2000);
assert!((pool.avg_wait_us() - 1500.0).abs() < 0.01);
}
#[test]
fn f_pool_008_timeout_rate() {
let mut pool = ResourcePool::new(1);
pool.acquire(100);
pool.acquire(100); pool.acquire(100); assert!(pool.timeout_rate() > 60.0);
}
#[test]
fn f_pool_009_peak() {
let mut pool = ResourcePool::new(10);
pool.acquire(100);
pool.acquire(100);
pool.acquire(100);
pool.release();
assert!((pool.peak_utilization() - 30.0).abs() < 0.01);
}
#[test]
fn f_pool_010_for_database() {
let pool = ResourcePool::for_database();
assert_eq!(pool.available(), 20);
}
#[test]
fn f_pool_011_for_http() {
let pool = ResourcePool::for_http();
assert_eq!(pool.available(), 100);
}
#[test]
fn f_pool_012_reset() {
let mut pool = ResourcePool::new(10);
pool.acquire(100);
pool.reset();
assert!(pool.is_idle());
}
#[test]
fn f_hist2d_001_new() {
let h = Histogram2D::new(0.0, 100.0, 0.0, 100.0);
assert_eq!(h.count(), 0);
}
#[test]
fn f_hist2d_002_default() {
let h = Histogram2D::default();
assert_eq!(h.count(), 0);
}
#[test]
fn f_hist2d_003_add() {
let mut h = Histogram2D::new(0.0, 100.0, 0.0, 100.0);
h.add(50.0, 50.0);
assert_eq!(h.count(), 1);
}
#[test]
fn f_hist2d_004_get() {
let mut h = Histogram2D::new(0.0, 100.0, 0.0, 100.0);
h.add(50.0, 50.0);
assert_eq!(h.get(5, 5), 1);
}
#[test]
fn f_hist2d_005_density() {
let mut h = Histogram2D::new(0.0, 100.0, 0.0, 100.0);
h.add(50.0, 50.0);
h.add(50.0, 50.0);
assert!((h.density(5, 5) - 100.0).abs() < 0.01);
}
#[test]
fn f_hist2d_006_max_count() {
let mut h = Histogram2D::new(0.0, 100.0, 0.0, 100.0);
h.add(50.0, 50.0);
h.add(50.0, 50.0);
h.add(10.0, 10.0);
assert_eq!(h.max_count(), 2);
}
#[test]
fn f_hist2d_007_hotspot() {
let mut h = Histogram2D::new(0.0, 100.0, 0.0, 100.0);
h.add(50.0, 50.0);
h.add(50.0, 50.0);
assert_eq!(h.hotspot(), (5, 5));
}
#[test]
fn f_hist2d_008_for_latency() {
let h = Histogram2D::for_latency_throughput();
assert_eq!(h.count(), 0);
}
#[test]
fn f_hist2d_009_for_cpu() {
let h = Histogram2D::for_cpu_memory();
assert_eq!(h.count(), 0);
}
#[test]
fn f_hist2d_010_reset() {
let mut h = Histogram2D::new(0.0, 100.0, 0.0, 100.0);
h.add(50.0, 50.0);
h.reset();
assert_eq!(h.count(), 0);
}
#[test]
fn f_hist2d_011_debug() {
let h = Histogram2D::new(0.0, 100.0, 0.0, 100.0);
let debug = format!("{:?}", h);
assert!(debug.contains("Histogram2D"));
}
#[test]
fn f_hist2d_012_clone() {
let mut h = Histogram2D::new(0.0, 100.0, 0.0, 100.0);
h.add(50.0, 50.0);
let cloned = h.clone();
assert_eq!(h.count(), cloned.count());
}
#[test]
fn f_reservoir_001_new() {
let s = ReservoirSampler::new(10);
assert!(s.is_empty());
assert_eq!(s.len(), 0);
}
#[test]
fn f_reservoir_002_default() {
let s = ReservoirSampler::default();
assert!(s.is_empty());
}
#[test]
fn f_reservoir_003_add() {
let mut s = ReservoirSampler::new(10);
s.add(1.0);
s.add(2.0);
assert_eq!(s.len(), 2);
}
#[test]
fn f_reservoir_004_get() {
let mut s = ReservoirSampler::new(10);
s.add(42.0);
assert_eq!(s.get(0), Some(42.0));
}
#[test]
fn f_reservoir_005_total_seen() {
let mut s = ReservoirSampler::new(2);
s.add(1.0);
s.add(2.0);
s.add(3.0);
assert_eq!(s.total_seen(), 3);
assert_eq!(s.len(), 2);
}
#[test]
fn f_reservoir_006_mean() {
let mut s = ReservoirSampler::new(10);
s.add(10.0);
s.add(20.0);
assert!((s.mean() - 15.0).abs() < 0.01);
}
#[test]
fn f_reservoir_007_min() {
let mut s = ReservoirSampler::new(10);
s.add(30.0);
s.add(10.0);
s.add(20.0);
assert!((s.min() - 10.0).abs() < 0.01);
}
#[test]
fn f_reservoir_008_max() {
let mut s = ReservoirSampler::new(10);
s.add(10.0);
s.add(30.0);
s.add(20.0);
assert!((s.max() - 30.0).abs() < 0.01);
}
#[test]
fn f_reservoir_009_oob() {
let s = ReservoirSampler::new(10);
assert_eq!(s.get(0), None);
}
#[test]
fn f_reservoir_010_reset() {
let mut s = ReservoirSampler::new(10);
s.add(1.0);
s.reset();
assert!(s.is_empty());
}
#[test]
fn f_reservoir_011_debug() {
let s = ReservoirSampler::new(10);
let debug = format!("{:?}", s);
assert!(debug.contains("ReservoirSampler"));
}
#[test]
fn f_reservoir_012_clone() {
let mut s = ReservoirSampler::new(10);
s.add(42.0);
let cloned = s.clone();
assert_eq!(s.len(), cloned.len());
}
#[test]
fn f_exphist_001_new() {
let h = ExponentialHistogram::new(1.0);
assert_eq!(h.count(), 0);
}
#[test]
fn f_exphist_002_default() {
let h = ExponentialHistogram::default();
assert_eq!(h.count(), 0);
}
#[test]
fn f_exphist_003_add() {
let mut h = ExponentialHistogram::new(1.0);
h.add(5.0);
assert_eq!(h.count(), 1);
}
#[test]
fn f_exphist_004_bucket() {
let mut h = ExponentialHistogram::new(1.0);
h.add(0.5); h.add(1.5); h.add(3.0); h.add(5.0); assert!(h.bucket_count(0) >= 1);
}
#[test]
fn f_exphist_005_mean() {
let mut h = ExponentialHistogram::new(1.0);
h.add(10.0);
h.add(20.0);
assert!((h.mean() - 15.0).abs() < 0.01);
}
#[test]
fn f_exphist_006_mode() {
let mut h = ExponentialHistogram::new(1.0);
h.add(0.5);
h.add(0.6);
h.add(0.7);
h.add(10.0);
assert_eq!(h.mode_bucket(), 0);
}
#[test]
fn f_exphist_007_for_latency() {
let h = ExponentialHistogram::for_latency_ms();
assert_eq!(h.count(), 0);
}
#[test]
fn f_exphist_008_for_bytes() {
let h = ExponentialHistogram::for_bytes_kb();
assert_eq!(h.count(), 0);
}
#[test]
fn f_exphist_009_upper_bound() {
let h = ExponentialHistogram::new(1.0);
assert!((h.bucket_upper_bound(0) - 2.0).abs() < 0.01);
assert!((h.bucket_upper_bound(1) - 4.0).abs() < 0.01);
}
#[test]
fn f_exphist_010_reset() {
let mut h = ExponentialHistogram::new(1.0);
h.add(5.0);
h.reset();
assert_eq!(h.count(), 0);
}
#[test]
fn f_exphist_011_debug() {
let h = ExponentialHistogram::new(1.0);
let debug = format!("{:?}", h);
assert!(debug.contains("ExponentialHistogram"));
}
#[test]
fn f_exphist_012_clone() {
let mut h = ExponentialHistogram::new(1.0);
h.add(5.0);
let cloned = h.clone();
assert_eq!(h.count(), cloned.count());
}
#[test]
fn f_cache_001_new() {
let cs = CacheStats::new(1024);
assert_eq!(cs.total_requests(), 0);
}
#[test]
fn f_cache_002_default() {
let cs = CacheStats::default();
assert_eq!(cs.total_requests(), 0);
}
#[test]
fn f_cache_003_hit() {
let mut cs = CacheStats::new(1024);
cs.hit();
assert_eq!(cs.total_requests(), 1);
}
#[test]
fn f_cache_004_miss() {
let mut cs = CacheStats::new(1024);
cs.miss();
assert_eq!(cs.total_requests(), 1);
}
#[test]
fn f_cache_005_hit_rate() {
let mut cs = CacheStats::new(1024);
cs.hit();
cs.hit();
cs.miss();
assert!(cs.hit_rate() > 60.0);
}
#[test]
fn f_cache_006_miss_rate() {
let mut cs = CacheStats::new(1024);
cs.hit();
cs.miss();
assert!((cs.miss_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_cache_007_eviction() {
let mut cs = CacheStats::new(1024);
cs.insert(512);
cs.evict(256);
assert!(cs.eviction_rate() > 0.0);
}
#[test]
fn f_cache_008_fill() {
let mut cs = CacheStats::new(1024);
cs.insert(512);
assert!((cs.fill_percentage() - 50.0).abs() < 0.01);
}
#[test]
fn f_cache_009_for_l1() {
let cs = CacheStats::for_l1_cache();
assert_eq!(cs.total_requests(), 0);
}
#[test]
fn f_cache_010_for_app() {
let cs = CacheStats::for_app_cache();
assert_eq!(cs.total_requests(), 0);
}
#[test]
fn f_cache_011_effective() {
let mut cs = CacheStats::new(1024);
cs.hit();
cs.hit();
cs.miss();
assert!(cs.is_effective(60.0));
}
#[test]
fn f_cache_012_reset() {
let mut cs = CacheStats::new(1024);
cs.hit();
cs.reset();
assert_eq!(cs.total_requests(), 0);
}
#[test]
fn f_bloom_001_new() {
let bf = BloomFilter::new(3);
assert!(bf.is_empty());
}
#[test]
fn f_bloom_002_default() {
let bf = BloomFilter::default();
assert!(bf.is_empty());
}
#[test]
fn f_bloom_003_add() {
let mut bf = BloomFilter::new(3);
bf.add(42);
assert_eq!(bf.len(), 1);
}
#[test]
fn f_bloom_004_contains() {
let mut bf = BloomFilter::new(3);
bf.add(42);
assert!(bf.might_contain(42));
}
#[test]
fn f_bloom_005_not_contains() {
let bf = BloomFilter::new(3);
assert!(!bf.might_contain(12345));
}
#[test]
fn f_bloom_006_fill() {
let mut bf = BloomFilter::new(3);
bf.add(1);
bf.add(2);
bf.add(3);
assert!(bf.fill_percentage() > 0.0);
}
#[test]
fn f_bloom_007_fpr() {
let mut bf = BloomFilter::new(3);
for i in 0..100 {
bf.add(i);
}
assert!(bf.false_positive_rate() > 0.0);
}
#[test]
fn f_bloom_008_for_small() {
let bf = BloomFilter::for_small();
assert!(bf.is_empty());
}
#[test]
fn f_bloom_009_for_medium() {
let bf = BloomFilter::for_medium();
assert!(bf.is_empty());
}
#[test]
fn f_bloom_010_reset() {
let mut bf = BloomFilter::new(3);
bf.add(42);
bf.reset();
assert!(bf.is_empty());
}
#[test]
fn f_bloom_011_debug() {
let bf = BloomFilter::new(3);
let debug = format!("{:?}", bf);
assert!(debug.contains("BloomFilter"));
}
#[test]
fn f_bloom_012_clone() {
let mut bf = BloomFilter::new(3);
bf.add(42);
let cloned = bf.clone();
assert_eq!(bf.len(), cloned.len());
}
#[test]
fn f_lb_001_new() {
let lb = LoadBalancer::new();
assert_eq!(lb.backend_count(), 0);
}
#[test]
fn f_lb_002_default() {
let lb = LoadBalancer::default();
assert_eq!(lb.backend_count(), 0);
}
#[test]
fn f_lb_003_add_backend() {
let mut lb = LoadBalancer::new();
lb.add_backend(1);
assert_eq!(lb.backend_count(), 1);
}
#[test]
fn f_lb_004_next() {
let mut lb = LoadBalancer::new();
lb.add_backend(1);
assert_eq!(lb.select_backend(), Some(0));
}
#[test]
fn f_lb_005_empty_next() {
let mut lb = LoadBalancer::new();
assert_eq!(lb.select_backend(), None);
}
#[test]
fn f_lb_006_equal_weights() {
let mut lb = LoadBalancer::equal_weights(2);
for _ in 0..10 {
lb.select_backend();
}
assert!(lb.distribution(0) > 40.0);
assert!(lb.distribution(1) > 40.0);
}
#[test]
fn f_lb_007_dispatched() {
let mut lb = LoadBalancer::equal_weights(2);
lb.select_backend();
lb.select_backend();
lb.select_backend();
assert_eq!(lb.total_dispatched(), 3);
}
#[test]
fn f_lb_008_balanced() {
let mut lb = LoadBalancer::equal_weights(2);
for _ in 0..100 {
lb.select_backend();
}
assert!(lb.is_balanced(20.0));
}
#[test]
fn f_lb_009_distribution() {
let mut lb = LoadBalancer::equal_weights(1);
lb.select_backend();
assert!((lb.distribution(0) - 100.0).abs() < 0.01);
}
#[test]
fn f_lb_010_reset() {
let mut lb = LoadBalancer::equal_weights(2);
lb.select_backend();
lb.reset();
assert_eq!(lb.total_dispatched(), 0);
}
#[test]
fn f_lb_011_debug() {
let lb = LoadBalancer::new();
let debug = format!("{:?}", lb);
assert!(debug.contains("LoadBalancer"));
}
#[test]
fn f_lb_012_clone() {
let mut lb = LoadBalancer::equal_weights(2);
lb.select_backend();
let cloned = lb.clone();
assert_eq!(lb.total_dispatched(), cloned.total_dispatched());
}
#[test]
fn f_burst_001_new() {
let bt = BurstTracker::new(100.0, 10.0);
assert!((bt.tokens() - 100.0).abs() < 0.01);
}
#[test]
fn f_burst_002_default() {
let bt = BurstTracker::default();
assert!((bt.tokens() - 100.0).abs() < 0.01);
}
#[test]
fn f_burst_003_consume() {
let mut bt = BurstTracker::new(100.0, 10.0);
assert!(bt.consume(10.0, 1000));
assert!((bt.tokens() - 90.0).abs() < 0.01);
}
#[test]
fn f_burst_004_empty() {
let mut bt = BurstTracker::new(10.0, 1.0);
bt.consume(10.0, 1000);
assert!(!bt.consume(10.0, 1000));
}
#[test]
fn f_burst_005_max_burst() {
let mut bt = BurstTracker::new(100.0, 10.0);
bt.consume(1.0, 1000);
bt.consume(1.0, 1000);
bt.consume(1.0, 1000);
assert_eq!(bt.max_burst(), 3);
}
#[test]
fn f_burst_006_fill() {
let mut bt = BurstTracker::new(100.0, 10.0);
bt.consume(50.0, 1000);
assert!((bt.fill_percentage() - 50.0).abs() < 0.01);
}
#[test]
fn f_burst_007_for_api() {
let bt = BurstTracker::for_api();
assert!(bt.tokens() > 0.0);
}
#[test]
fn f_burst_008_for_network() {
let bt = BurstTracker::for_network();
assert!(bt.tokens() > 0.0);
}
#[test]
fn f_burst_009_refill() {
let mut bt = BurstTracker::new(100.0, 100.0);
bt.consume(50.0, 1000);
bt.consume(0.0, 1_001_000); assert!(bt.tokens() > 50.0);
}
#[test]
fn f_burst_010_reset() {
let mut bt = BurstTracker::new(100.0, 10.0);
bt.consume(50.0, 1000);
bt.reset();
assert!((bt.tokens() - 100.0).abs() < 0.01);
}
#[test]
fn f_burst_011_debug() {
let bt = BurstTracker::new(100.0, 10.0);
let debug = format!("{:?}", bt);
assert!(debug.contains("BurstTracker"));
}
#[test]
fn f_burst_012_clone() {
let mut bt = BurstTracker::new(100.0, 10.0);
bt.consume(50.0, 1000);
let cloned = bt.clone();
assert!((bt.tokens() - cloned.tokens()).abs() < 0.01);
}
#[test]
fn f_topk_001_new() {
let tk = TopKTracker::new(5);
assert_eq!(tk.count(), 0);
}
#[test]
fn f_topk_002_default() {
let tk = TopKTracker::default();
assert_eq!(tk.k(), 10);
}
#[test]
fn f_topk_003_add() {
let mut tk = TopKTracker::new(5);
tk.add(10.0);
assert_eq!(tk.count(), 1);
}
#[test]
fn f_topk_004_top() {
let mut tk = TopKTracker::new(3);
tk.add(10.0);
tk.add(30.0);
tk.add(20.0);
let top = tk.top();
assert!((top[0] - 30.0).abs() < 0.01);
}
#[test]
fn f_topk_005_limit() {
let mut tk = TopKTracker::new(3);
for i in 0..10 {
tk.add(i as f64);
}
assert_eq!(tk.top().len(), 3);
}
#[test]
fn f_topk_006_minimum() {
let mut tk = TopKTracker::new(3);
tk.add(100.0);
tk.add(200.0);
tk.add(300.0);
assert!((tk.minimum().unwrap() - 100.0).abs() < 0.01);
}
#[test]
fn f_topk_007_maximum() {
let mut tk = TopKTracker::new(3);
tk.add(100.0);
tk.add(200.0);
tk.add(300.0);
assert!((tk.maximum().unwrap() - 300.0).abs() < 0.01);
}
#[test]
fn f_topk_008_for_metrics() {
let tk = TopKTracker::for_metrics();
assert_eq!(tk.k(), 10);
}
#[test]
fn f_topk_009_for_processes() {
let tk = TopKTracker::for_processes();
assert_eq!(tk.k(), 20);
}
#[test]
fn f_topk_010_reset() {
let mut tk = TopKTracker::new(5);
tk.add(10.0);
tk.reset();
assert_eq!(tk.count(), 0);
}
#[test]
fn f_topk_011_debug() {
let tk = TopKTracker::new(5);
let debug = format!("{:?}", tk);
assert!(debug.contains("TopKTracker"));
}
#[test]
fn f_topk_012_clone() {
let mut tk = TopKTracker::new(5);
tk.add(10.0);
let cloned = tk.clone();
assert_eq!(tk.count(), cloned.count());
}
#[test]
fn f_quota_001_new() {
let qt = QuotaTracker::new(1000);
assert_eq!(qt.limit(), 1000);
}
#[test]
fn f_quota_002_default() {
let qt = QuotaTracker::default();
assert_eq!(qt.limit(), 1000);
}
#[test]
fn f_quota_003_use() {
let mut qt = QuotaTracker::new(100);
qt.use_quota(30);
assert_eq!(qt.remaining(), 70);
}
#[test]
fn f_quota_004_exceeded() {
let mut qt = QuotaTracker::new(100);
assert!(!qt.use_quota(150));
}
#[test]
fn f_quota_005_usage() {
let mut qt = QuotaTracker::new(100);
qt.use_quota(50);
assert!((qt.usage_percentage() - 50.0).abs() < 0.01);
}
#[test]
fn f_quota_006_exhausted() {
let mut qt = QuotaTracker::new(100);
qt.use_quota(100);
assert!(qt.is_exhausted());
}
#[test]
fn f_quota_007_for_api() {
let qt = QuotaTracker::for_api_daily();
assert_eq!(qt.limit(), 10000);
}
#[test]
fn f_quota_008_for_storage() {
let qt = QuotaTracker::for_storage_gb();
assert_eq!(qt.limit(), 100);
}
#[test]
fn f_quota_009_release() {
let mut qt = QuotaTracker::new(100);
qt.use_quota(50);
qt.release(20);
assert_eq!(qt.remaining(), 70);
}
#[test]
fn f_quota_010_reset() {
let mut qt = QuotaTracker::new(100);
qt.use_quota(50);
qt.reset();
assert_eq!(qt.remaining(), 100);
}
#[test]
fn f_quota_011_debug() {
let qt = QuotaTracker::new(100);
let debug = format!("{:?}", qt);
assert!(debug.contains("QuotaTracker"));
}
#[test]
fn f_quota_012_clone() {
let mut qt = QuotaTracker::new(100);
qt.use_quota(30);
let cloned = qt.clone();
assert_eq!(qt.remaining(), cloned.remaining());
}
#[test]
fn f_freq_001_new() {
let fc = FrequencyCounter::new();
assert_eq!(fc.total(), 0);
}
#[test]
fn f_freq_002_default() {
let fc = FrequencyCounter::default();
assert_eq!(fc.total(), 0);
}
#[test]
fn f_freq_003_increment() {
let mut fc = FrequencyCounter::new();
fc.increment(0);
assert_eq!(fc.count(0), 1);
}
#[test]
fn f_freq_004_frequency() {
let mut fc = FrequencyCounter::new();
fc.increment(0);
fc.increment(0);
fc.increment(1);
assert!((fc.frequency(0) - 66.666).abs() < 1.0);
}
#[test]
fn f_freq_005_most_frequent() {
let mut fc = FrequencyCounter::new();
fc.increment(0);
fc.increment(1);
fc.increment(1);
assert_eq!(fc.most_frequent(), Some(1));
}
#[test]
fn f_freq_006_slots() {
let mut fc = FrequencyCounter::new();
for i in 0..16 {
fc.increment(i);
}
assert_eq!(fc.total(), 16);
}
#[test]
fn f_freq_007_non_zero() {
let mut fc = FrequencyCounter::new();
fc.increment(0);
fc.increment(5);
assert_eq!(fc.non_zero_count(), 2);
}
#[test]
fn f_freq_008_add() {
let mut fc = FrequencyCounter::new();
fc.add(0, 10);
assert_eq!(fc.count(0), 10);
}
#[test]
fn f_freq_009_entropy() {
let mut fc = FrequencyCounter::new();
for i in 0..16 {
fc.add(i, 10);
}
assert!(fc.entropy() > 0.9);
}
#[test]
fn f_freq_010_reset() {
let mut fc = FrequencyCounter::new();
fc.increment(0);
fc.reset();
assert_eq!(fc.total(), 0);
}
#[test]
fn f_freq_011_debug() {
let fc = FrequencyCounter::new();
let debug = format!("{:?}", fc);
assert!(debug.contains("FrequencyCounter"));
}
#[test]
fn f_freq_012_clone() {
let mut fc = FrequencyCounter::new();
fc.increment(0);
let cloned = fc.clone();
assert_eq!(fc.total(), cloned.total());
}
#[test]
fn f_range_001_new() {
let mr = MovingRange::new(10);
assert_eq!(mr.count(), 0);
}
#[test]
fn f_range_002_default() {
let mr = MovingRange::default();
assert_eq!(mr.window_size(), 10);
}
#[test]
fn f_range_003_add() {
let mut mr = MovingRange::new(10);
mr.add(50.0);
assert!((mr.min().unwrap() - 50.0).abs() < 0.01);
assert!((mr.max().unwrap() - 50.0).abs() < 0.01);
}
#[test]
fn f_range_004_range() {
let mut mr = MovingRange::new(10);
mr.add(10.0);
mr.add(30.0);
assert!((mr.range() - 20.0).abs() < 0.01);
}
#[test]
fn f_range_005_midrange() {
let mut mr = MovingRange::new(10);
mr.add(10.0);
mr.add(30.0);
assert!((mr.midrange() - 20.0).abs() < 0.01);
}
#[test]
fn f_range_006_volatility() {
let mut mr = MovingRange::new(10);
mr.add(10.0);
mr.add(30.0);
assert!((mr.volatility() - 100.0).abs() < 0.01);
}
#[test]
fn f_range_007_window() {
let mut mr = MovingRange::new(3);
mr.add(100.0);
mr.add(10.0);
mr.add(20.0);
mr.add(30.0);
assert!((mr.max().unwrap() - 30.0).abs() < 0.01);
}
#[test]
fn f_range_008_for_prices() {
let mr = MovingRange::for_prices();
assert_eq!(mr.window_size(), 20);
}
#[test]
fn f_range_009_for_latency() {
let mr = MovingRange::for_latency();
assert_eq!(mr.window_size(), 100);
}
#[test]
fn f_range_010_reset() {
let mut mr = MovingRange::new(10);
mr.add(50.0);
mr.reset();
assert_eq!(mr.count(), 0);
}
#[test]
fn f_range_011_debug() {
let mr = MovingRange::new(10);
let debug = format!("{:?}", mr);
assert!(debug.contains("MovingRange"));
}
#[test]
fn f_range_012_clone() {
let mut mr = MovingRange::new(10);
mr.add(50.0);
let cloned = mr.clone();
assert_eq!(mr.count(), cloned.count());
}
#[test]
fn f_timeout_001_new() {
let tt = TimeoutTracker::new(1_000_000);
assert_eq!(tt.total(), 0);
}
#[test]
fn f_timeout_002_default() {
let tt = TimeoutTracker::default();
assert_eq!(tt.timeout_threshold_us(), 1_000_000);
}
#[test]
fn f_timeout_003_record() {
let mut tt = TimeoutTracker::new(1_000_000);
tt.record(500_000);
assert_eq!(tt.total(), 1);
}
#[test]
fn f_timeout_004_timeout() {
let mut tt = TimeoutTracker::new(1_000_000);
tt.record(1_500_000); assert_eq!(tt.timed_out(), 1);
}
#[test]
fn f_timeout_005_rate() {
let mut tt = TimeoutTracker::new(1_000_000);
tt.record(500_000); tt.record(1_500_000); assert!((tt.timeout_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_timeout_006_success() {
let mut tt = TimeoutTracker::new(1_000_000);
tt.record(500_000);
assert!((tt.success_rate() - 100.0).abs() < 0.01);
}
#[test]
fn f_timeout_007_for_network() {
let tt = TimeoutTracker::for_network();
assert_eq!(tt.timeout_threshold_us(), 5_000_000);
}
#[test]
fn f_timeout_008_for_database() {
let tt = TimeoutTracker::for_database();
assert_eq!(tt.timeout_threshold_us(), 30_000_000);
}
#[test]
fn f_timeout_009_max() {
let mut tt = TimeoutTracker::new(1_000_000);
tt.record(100_000);
tt.record(500_000);
assert_eq!(tt.max_duration_us(), 500_000);
}
#[test]
fn f_timeout_010_reset() {
let mut tt = TimeoutTracker::new(1_000_000);
tt.record(500_000);
tt.reset();
assert_eq!(tt.total(), 0);
}
#[test]
fn f_timeout_011_debug() {
let tt = TimeoutTracker::new(1_000_000);
let debug = format!("{:?}", tt);
assert!(debug.contains("TimeoutTracker"));
}
#[test]
fn f_timeout_012_clone() {
let mut tt = TimeoutTracker::new(1_000_000);
tt.record(500_000);
let cloned = tt.clone();
assert_eq!(tt.total(), cloned.total());
}
#[test]
fn f_retry_001_new() {
let rt = RetryTracker::new(3, 100, 10000);
assert_eq!(rt.current_retry(), 0);
}
#[test]
fn f_retry_002_default() {
let rt = RetryTracker::default();
assert!(!rt.retries_exhausted());
}
#[test]
fn f_retry_003_retry() {
let mut rt = RetryTracker::new(3, 100, 10000);
rt.retry();
assert_eq!(rt.current_retry(), 1);
}
#[test]
fn f_retry_004_success() {
let mut rt = RetryTracker::new(3, 100, 10000);
rt.retry();
rt.success();
assert_eq!(rt.current_retry(), 0);
}
#[test]
fn f_retry_005_exhausted() {
let mut rt = RetryTracker::new(3, 100, 10000);
rt.retry();
rt.retry();
rt.retry();
assert!(rt.retries_exhausted());
}
#[test]
fn f_retry_006_delay() {
let mut rt = RetryTracker::new(3, 100, 10000);
assert_eq!(rt.next_delay_ms(), 100);
rt.retry();
assert_eq!(rt.next_delay_ms(), 200);
rt.retry();
assert_eq!(rt.next_delay_ms(), 400);
}
#[test]
fn f_retry_007_for_api() {
let rt = RetryTracker::for_api();
assert_eq!(rt.next_delay_ms(), 100);
}
#[test]
fn f_retry_008_for_network() {
let rt = RetryTracker::for_network();
assert_eq!(rt.next_delay_ms(), 1000);
}
#[test]
fn f_retry_009_max_delay() {
let mut rt = RetryTracker::new(10, 1000, 5000);
for _ in 0..10 {
rt.retry();
}
assert!(rt.next_delay_ms() <= 5000);
}
#[test]
fn f_retry_010_reset() {
let mut rt = RetryTracker::new(3, 100, 10000);
rt.retry();
rt.reset();
assert_eq!(rt.current_retry(), 0);
}
#[test]
fn f_retry_011_debug() {
let rt = RetryTracker::new(3, 100, 10000);
let debug = format!("{:?}", rt);
assert!(debug.contains("RetryTracker"));
}
#[test]
fn f_retry_012_clone() {
let mut rt = RetryTracker::new(3, 100, 10000);
rt.retry();
let cloned = rt.clone();
assert_eq!(rt.current_retry(), cloned.current_retry());
}
#[test]
fn f_sched_001_new() {
let ss = ScheduleSlot::new(1_000_000, 10);
assert_eq!(ss.current_slot(), 0);
}
#[test]
fn f_sched_002_default() {
let ss = ScheduleSlot::default();
assert_eq!(ss.num_slots(), 10);
}
#[test]
fn f_sched_003_execute() {
let mut ss = ScheduleSlot::new(1_000_000, 10);
ss.execute(1000);
assert_eq!(ss.executions(0), 1);
}
#[test]
fn f_sched_004_advance() {
let mut ss = ScheduleSlot::new(1_000_000, 10);
ss.update(1000);
ss.update(2_001_000); assert!(ss.current_slot() > 0);
}
#[test]
fn f_sched_005_total() {
let mut ss = ScheduleSlot::new(1_000_000, 10);
ss.execute(1000);
ss.execute(1000);
assert_eq!(ss.total_executions(), 2);
}
#[test]
fn f_sched_006_wrap() {
let mut ss = ScheduleSlot::new(100_000, 3);
ss.update(1000);
ss.update(401_000); assert!(ss.current_slot() < 3);
}
#[test]
fn f_sched_007_for_round_robin() {
let ss = ScheduleSlot::for_round_robin();
assert_eq!(ss.num_slots(), 10);
}
#[test]
fn f_sched_008_for_minute() {
let ss = ScheduleSlot::for_minute();
assert_eq!(ss.num_slots(), 5);
}
#[test]
fn f_sched_009_balanced() {
let ss = ScheduleSlot::new(1_000_000, 10);
assert!(ss.is_balanced(50.0));
}
#[test]
fn f_sched_010_reset() {
let mut ss = ScheduleSlot::new(1_000_000, 10);
ss.execute(1000);
ss.reset();
assert_eq!(ss.total_executions(), 0);
}
#[test]
fn f_sched_011_debug() {
let ss = ScheduleSlot::new(1_000_000, 10);
let debug = format!("{:?}", ss);
assert!(debug.contains("ScheduleSlot"));
}
#[test]
fn f_sched_012_clone() {
let mut ss = ScheduleSlot::new(1_000_000, 10);
ss.execute(1000);
let cloned = ss.clone();
assert_eq!(ss.total_executions(), cloned.total_executions());
}
#[test]
fn f_cool_001_new() {
let ct = CooldownTimer::new(1_000_000);
assert_eq!(ct.cooldown_us(), 1_000_000);
}
#[test]
fn f_cool_002_default() {
let ct = CooldownTimer::default();
assert_eq!(ct.cooldown_us(), 1_000_000);
}
#[test]
fn f_cool_003_first_ready() {
let ct = CooldownTimer::new(1_000_000);
assert!(ct.is_ready(1000));
}
#[test]
fn f_cool_004_blocked() {
let mut ct = CooldownTimer::new(1_000_000);
ct.try_action(1000);
assert!(!ct.try_action(500_000)); }
#[test]
fn f_cool_005_ready_after() {
let mut ct = CooldownTimer::new(1_000_000);
ct.try_action(1000);
assert!(ct.is_ready(1_001_000)); }
#[test]
fn f_cool_006_block_rate() {
let mut ct = CooldownTimer::new(1_000_000);
ct.try_action(1000);
ct.try_action(500_000); assert!((ct.block_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_cool_007_for_fast() {
let ct = CooldownTimer::for_fast();
assert_eq!(ct.cooldown_us(), 100_000);
}
#[test]
fn f_cool_008_for_slow() {
let ct = CooldownTimer::for_slow();
assert_eq!(ct.cooldown_us(), 10_000_000);
}
#[test]
fn f_cool_009_remaining() {
let mut ct = CooldownTimer::new(1_000_000);
ct.try_action(1000);
assert!(ct.remaining_us(500_000) > 0);
}
#[test]
fn f_cool_010_reset() {
let mut ct = CooldownTimer::new(1_000_000);
ct.try_action(1000);
ct.reset();
assert_eq!(ct.total_actions(), 0);
}
#[test]
fn f_cool_011_debug() {
let ct = CooldownTimer::new(1_000_000);
let debug = format!("{:?}", ct);
assert!(debug.contains("CooldownTimer"));
}
#[test]
fn f_cool_012_clone() {
let mut ct = CooldownTimer::new(1_000_000);
ct.try_action(1000);
let cloned = ct.clone();
assert_eq!(ct.total_actions(), cloned.total_actions());
}
#[test]
fn f_bp_001_new() {
let bp = BackpressureMonitor::new();
assert_eq!(bp.total_signals(), 0);
}
#[test]
fn f_bp_002_default() {
let bp = BackpressureMonitor::default();
assert_eq!(bp.consecutive(), 0);
}
#[test]
fn f_bp_003_success() {
let mut bp = BackpressureMonitor::new();
bp.signal(1000);
bp.success();
assert_eq!(bp.consecutive(), 0);
}
#[test]
fn f_bp_004_signal() {
let mut bp = BackpressureMonitor::new();
bp.signal(1000);
bp.signal(2000);
assert_eq!(bp.consecutive(), 2);
}
#[test]
fn f_bp_005_rate() {
let mut bp = BackpressureMonitor::new();
bp.success();
bp.signal(1000);
assert!((bp.pressure_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_bp_006_max_consecutive() {
let mut bp = BackpressureMonitor::new();
bp.signal(1000);
bp.signal(2000);
bp.success();
bp.signal(3000);
assert_eq!(bp.max_consecutive(), 2);
}
#[test]
fn f_bp_007_under_pressure() {
let mut bp = BackpressureMonitor::new();
bp.signal(1000);
bp.signal(2000);
bp.signal(3000);
assert!(bp.is_under_pressure(3));
}
#[test]
fn f_bp_008_healthy() {
let mut bp = BackpressureMonitor::new();
bp.success();
bp.success();
assert!(bp.is_healthy(10.0));
}
#[test]
fn f_bp_009_total() {
let mut bp = BackpressureMonitor::new();
bp.signal(1000);
bp.signal(2000);
assert_eq!(bp.total_signals(), 2);
}
#[test]
fn f_bp_010_reset() {
let mut bp = BackpressureMonitor::new();
bp.signal(1000);
bp.reset();
assert_eq!(bp.total_signals(), 0);
}
#[test]
fn f_bp_011_debug() {
let bp = BackpressureMonitor::new();
let debug = format!("{:?}", bp);
assert!(debug.contains("BackpressureMonitor"));
}
#[test]
fn f_bp_012_clone() {
let mut bp = BackpressureMonitor::new();
bp.signal(1000);
let cloned = bp.clone();
assert_eq!(bp.total_signals(), cloned.total_signals());
}
#[test]
fn f_cap_001_new() {
let cp = CapacityPlanner::new(1000);
assert_eq!(cp.remaining(), 1000);
}
#[test]
fn f_cap_002_default() {
let cp = CapacityPlanner::default();
assert_eq!(cp.remaining(), 1000);
}
#[test]
fn f_cap_003_update() {
let mut cp = CapacityPlanner::new(1000);
cp.update(500);
assert_eq!(cp.remaining(), 500);
}
#[test]
fn f_cap_004_peak() {
let mut cp = CapacityPlanner::new(1000);
cp.update(800);
cp.update(500);
assert!((cp.peak_utilization() - 80.0).abs() < 0.01);
}
#[test]
fn f_cap_005_utilization() {
let mut cp = CapacityPlanner::new(100);
cp.update(50);
assert!((cp.utilization() - 50.0).abs() < 0.01);
}
#[test]
fn f_cap_006_at_risk() {
let mut cp = CapacityPlanner::new(100);
cp.update(90);
assert!(cp.at_risk(80.0));
}
#[test]
fn f_cap_007_for_connections() {
let cp = CapacityPlanner::for_connections();
assert_eq!(cp.remaining(), 1000);
}
#[test]
fn f_cap_008_for_storage() {
let cp = CapacityPlanner::for_storage();
assert_eq!(cp.remaining(), 100);
}
#[test]
fn f_cap_009_avg() {
let mut cp = CapacityPlanner::new(100);
cp.update(50);
cp.update(50);
assert!((cp.avg_utilization() - 50.0).abs() < 0.01);
}
#[test]
fn f_cap_010_reset() {
let mut cp = CapacityPlanner::new(100);
cp.update(50);
cp.reset();
assert_eq!(cp.remaining(), 100);
}
#[test]
fn f_cap_011_debug() {
let cp = CapacityPlanner::new(100);
let debug = format!("{:?}", cp);
assert!(debug.contains("CapacityPlanner"));
}
#[test]
fn f_cap_012_clone() {
let mut cp = CapacityPlanner::new(100);
cp.update(50);
let cloned = cp.clone();
assert_eq!(cp.remaining(), cloned.remaining());
}
#[test]
fn f_drift_001_new() {
let dt = DriftTracker::new(1_000_000);
assert_eq!(dt.samples(), 0);
}
#[test]
fn f_drift_002_default() {
let dt = DriftTracker::default();
assert_eq!(dt.samples(), 0);
}
#[test]
fn f_drift_003_baseline() {
let mut dt = DriftTracker::new(1_000_000);
dt.record(1000);
assert_eq!(dt.samples(), 0); }
#[test]
fn f_drift_004_drift() {
let mut dt = DriftTracker::new(1_000_000);
dt.record(1000);
dt.record(1_001_000); assert!(dt.avg_drift_us().abs() < 1.0);
}
#[test]
fn f_drift_005_late() {
let mut dt = DriftTracker::new(1_000_000);
dt.record(1000);
dt.record(1_100_000); assert!(dt.avg_drift_us() > 0.0);
}
#[test]
fn f_drift_006_early() {
let mut dt = DriftTracker::new(1_000_000);
dt.record(1000);
dt.record(901_000); assert!(dt.avg_drift_us() < 0.0);
}
#[test]
fn f_drift_007_for_60fps() {
let dt = DriftTracker::for_60fps();
assert_eq!(dt.samples(), 0);
}
#[test]
fn f_drift_008_for_heartbeat() {
let dt = DriftTracker::for_heartbeat();
assert_eq!(dt.samples(), 0);
}
#[test]
fn f_drift_009_stable() {
let mut dt = DriftTracker::new(1_000_000);
dt.record(1000);
dt.record(1_001_000);
assert!(dt.is_stable(10_000));
}
#[test]
fn f_drift_010_reset() {
let mut dt = DriftTracker::new(1_000_000);
dt.record(1000);
dt.record(2_000_000);
dt.reset();
assert_eq!(dt.samples(), 0);
}
#[test]
fn f_drift_011_debug() {
let dt = DriftTracker::new(1_000_000);
let debug = format!("{:?}", dt);
assert!(debug.contains("DriftTracker"));
}
#[test]
fn f_drift_012_clone() {
let mut dt = DriftTracker::new(1_000_000);
dt.record(1000);
dt.record(2_000_000);
let cloned = dt.clone();
assert_eq!(dt.samples(), cloned.samples());
}
#[test]
fn f_sem_001_new() {
let st = SemaphoreTracker::new(10);
assert_eq!(st.available(), 10);
}
#[test]
fn f_sem_002_default() {
let st = SemaphoreTracker::default();
assert_eq!(st.available(), 10);
}
#[test]
fn f_sem_003_acquire() {
let mut st = SemaphoreTracker::new(10);
st.try_acquire();
assert_eq!(st.available(), 9);
}
#[test]
fn f_sem_004_release() {
let mut st = SemaphoreTracker::new(10);
st.try_acquire();
st.release();
assert_eq!(st.available(), 10);
}
#[test]
fn f_sem_005_contention() {
let mut st = SemaphoreTracker::new(1);
st.try_acquire();
assert!(!st.try_acquire()); }
#[test]
fn f_sem_006_contention_rate() {
let mut st = SemaphoreTracker::new(1);
st.try_acquire();
st.try_acquire(); assert!((st.contention_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_sem_007_for_database() {
let st = SemaphoreTracker::for_database();
assert_eq!(st.total_permits(), 20);
}
#[test]
fn f_sem_008_for_workers() {
let st = SemaphoreTracker::for_workers();
assert_eq!(st.total_permits(), 8);
}
#[test]
fn f_sem_009_peak() {
let mut st = SemaphoreTracker::new(10);
st.try_acquire();
st.try_acquire();
st.release();
assert!((st.peak_utilization() - 20.0).abs() < 0.01);
}
#[test]
fn f_sem_010_reset() {
let mut st = SemaphoreTracker::new(10);
st.try_acquire();
st.reset();
assert_eq!(st.available(), 10);
}
#[test]
fn f_sem_011_debug() {
let st = SemaphoreTracker::new(10);
let debug = format!("{:?}", st);
assert!(debug.contains("SemaphoreTracker"));
}
#[test]
fn f_sem_012_clone() {
let mut st = SemaphoreTracker::new(10);
st.try_acquire();
let cloned = st.clone();
assert_eq!(st.available(), cloned.available());
}
}
#[derive(Debug, Clone)]
pub struct GCTracker {
gc_count: u64,
total_pause_us: u64,
total_time_us: u64,
max_pause_us: u64,
last_gc_us: u64,
}
impl Default for GCTracker {
fn default() -> Self {
Self::new()
}
}
impl GCTracker {
#[must_use]
pub fn new() -> Self {
Self {
gc_count: 0,
total_pause_us: 0,
total_time_us: 0,
max_pause_us: 0,
last_gc_us: 0,
}
}
pub fn record_gc(&mut self, pause_us: u64, now_us: u64) {
self.gc_count += 1;
self.total_pause_us += pause_us;
if pause_us > self.max_pause_us {
self.max_pause_us = pause_us;
}
if self.last_gc_us > 0 && now_us > self.last_gc_us {
self.total_time_us += now_us - self.last_gc_us;
}
self.last_gc_us = now_us;
}
#[must_use]
pub fn overhead_percentage(&self) -> f64 {
if self.total_time_us == 0 {
0.0
} else {
(self.total_pause_us as f64 / self.total_time_us as f64) * 100.0
}
}
#[must_use]
pub fn avg_pause_us(&self) -> f64 {
if self.gc_count == 0 {
0.0
} else {
self.total_pause_us as f64 / self.gc_count as f64
}
}
#[must_use]
pub fn max_pause_us(&self) -> u64 {
self.max_pause_us
}
#[must_use]
pub fn gc_count(&self) -> u64 {
self.gc_count
}
#[must_use]
pub fn is_healthy(&self, max_overhead: f64) -> bool {
self.overhead_percentage() < max_overhead
}
pub fn reset(&mut self) {
self.gc_count = 0;
self.total_pause_us = 0;
self.total_time_us = 0;
self.max_pause_us = 0;
self.last_gc_us = 0;
}
}
#[cfg(test)]
mod gc_tracker_tests {
use super::*;
#[test]
fn f_gc_001_new() {
let gc = GCTracker::new();
assert_eq!(gc.gc_count(), 0);
}
#[test]
fn f_gc_002_default() {
let gc = GCTracker::default();
assert_eq!(gc.gc_count(), 0);
}
#[test]
fn f_gc_003_record() {
let mut gc = GCTracker::new();
gc.record_gc(1000, 10000);
assert_eq!(gc.gc_count(), 1);
}
#[test]
fn f_gc_004_max_pause() {
let mut gc = GCTracker::new();
gc.record_gc(500, 10000);
gc.record_gc(2000, 20000);
gc.record_gc(800, 30000);
assert_eq!(gc.max_pause_us(), 2000);
}
#[test]
fn f_gc_005_avg_pause() {
let mut gc = GCTracker::new();
gc.record_gc(1000, 10000);
gc.record_gc(2000, 20000);
assert!((gc.avg_pause_us() - 1500.0).abs() < 0.01);
}
#[test]
fn f_gc_006_overhead() {
let mut gc = GCTracker::new();
gc.record_gc(100, 1000);
gc.record_gc(100, 2000); assert!(gc.overhead_percentage() > 0.0);
}
#[test]
fn f_gc_007_healthy() {
let mut gc = GCTracker::new();
gc.record_gc(10, 1000);
gc.record_gc(10, 2000);
assert!(gc.is_healthy(10.0));
}
#[test]
fn f_gc_008_unhealthy() {
let mut gc = GCTracker::new();
gc.record_gc(500, 1000);
gc.record_gc(500, 2000); assert!(!gc.is_healthy(10.0));
}
#[test]
fn f_gc_009_reset() {
let mut gc = GCTracker::new();
gc.record_gc(1000, 10000);
gc.reset();
assert_eq!(gc.gc_count(), 0);
}
#[test]
fn f_gc_010_zero_overhead() {
let gc = GCTracker::new();
assert!((gc.overhead_percentage() - 0.0).abs() < 0.01);
}
#[test]
fn f_gc_011_debug() {
let gc = GCTracker::new();
let debug = format!("{:?}", gc);
assert!(debug.contains("GCTracker"));
}
#[test]
fn f_gc_012_clone() {
let mut gc = GCTracker::new();
gc.record_gc(1000, 10000);
let cloned = gc.clone();
assert_eq!(gc.gc_count(), cloned.gc_count());
}
}
#[derive(Debug, Clone)]
pub struct CompactionTracker {
compactions: u64,
total_duration_us: u64,
total_bytes: u64,
max_duration_us: u64,
active: bool,
start_us: u64,
}
impl Default for CompactionTracker {
fn default() -> Self {
Self::new()
}
}
impl CompactionTracker {
#[must_use]
pub fn new() -> Self {
Self {
compactions: 0,
total_duration_us: 0,
total_bytes: 0,
max_duration_us: 0,
active: false,
start_us: 0,
}
}
#[must_use]
pub fn for_database() -> Self {
Self::new()
}
#[must_use]
pub fn for_logs() -> Self {
Self::new()
}
pub fn start(&mut self, now_us: u64) {
self.active = true;
self.start_us = now_us;
}
pub fn complete(&mut self, bytes: u64, now_us: u64) {
if self.active && now_us >= self.start_us {
let duration = now_us - self.start_us;
self.compactions += 1;
self.total_duration_us += duration;
self.total_bytes += bytes;
if duration > self.max_duration_us {
self.max_duration_us = duration;
}
}
self.active = false;
}
#[must_use]
pub fn compaction_count(&self) -> u64 {
self.compactions
}
#[must_use]
pub fn throughput_bytes_per_sec(&self) -> f64 {
if self.total_duration_us == 0 {
0.0
} else {
(self.total_bytes as f64 / self.total_duration_us as f64) * 1_000_000.0
}
}
#[must_use]
pub fn avg_duration_us(&self) -> f64 {
if self.compactions == 0 {
0.0
} else {
self.total_duration_us as f64 / self.compactions as f64
}
}
#[must_use]
pub fn max_duration_us(&self) -> u64 {
self.max_duration_us
}
#[must_use]
pub fn is_active(&self) -> bool {
self.active
}
pub fn reset(&mut self) {
self.compactions = 0;
self.total_duration_us = 0;
self.total_bytes = 0;
self.max_duration_us = 0;
self.active = false;
self.start_us = 0;
}
}
#[cfg(test)]
mod compaction_tracker_tests {
use super::*;
#[test]
fn f_compact_001_new() {
let ct = CompactionTracker::new();
assert_eq!(ct.compaction_count(), 0);
}
#[test]
fn f_compact_002_default() {
let ct = CompactionTracker::default();
assert_eq!(ct.compaction_count(), 0);
}
#[test]
fn f_compact_003_start() {
let mut ct = CompactionTracker::new();
ct.start(1000);
assert!(ct.is_active());
}
#[test]
fn f_compact_004_complete() {
let mut ct = CompactionTracker::new();
ct.start(1000);
ct.complete(1024, 2000);
assert_eq!(ct.compaction_count(), 1);
}
#[test]
fn f_compact_005_throughput() {
let mut ct = CompactionTracker::new();
ct.start(0);
ct.complete(1_000_000, 1_000_000); assert!((ct.throughput_bytes_per_sec() - 1_000_000.0).abs() < 1.0);
}
#[test]
fn f_compact_006_max_duration() {
let mut ct = CompactionTracker::new();
ct.start(0);
ct.complete(100, 1000);
ct.start(2000);
ct.complete(100, 5000); assert_eq!(ct.max_duration_us(), 3000);
}
#[test]
fn f_compact_007_for_database() {
let ct = CompactionTracker::for_database();
assert_eq!(ct.compaction_count(), 0);
}
#[test]
fn f_compact_008_for_logs() {
let ct = CompactionTracker::for_logs();
assert_eq!(ct.compaction_count(), 0);
}
#[test]
fn f_compact_009_avg_duration() {
let mut ct = CompactionTracker::new();
ct.start(0);
ct.complete(100, 1000);
ct.start(2000);
ct.complete(100, 4000);
assert!((ct.avg_duration_us() - 1500.0).abs() < 0.01);
}
#[test]
fn f_compact_010_reset() {
let mut ct = CompactionTracker::new();
ct.start(0);
ct.complete(100, 1000);
ct.reset();
assert_eq!(ct.compaction_count(), 0);
}
#[test]
fn f_compact_011_debug() {
let ct = CompactionTracker::new();
let debug = format!("{:?}", ct);
assert!(debug.contains("CompactionTracker"));
}
#[test]
fn f_compact_012_clone() {
let mut ct = CompactionTracker::new();
ct.start(0);
ct.complete(100, 1000);
let cloned = ct.clone();
assert_eq!(ct.compaction_count(), cloned.compaction_count());
}
}
#[derive(Debug, Clone)]
pub struct FlushTracker {
flushes: u64,
total_bytes: u64,
max_bytes: u64,
last_flush_us: u64,
min_interval_us: u64,
}
impl Default for FlushTracker {
fn default() -> Self {
Self::new()
}
}
impl FlushTracker {
#[must_use]
pub fn new() -> Self {
Self {
flushes: 0,
total_bytes: 0,
max_bytes: 0,
last_flush_us: 0,
min_interval_us: u64::MAX,
}
}
#[must_use]
pub fn for_write_buffer() -> Self {
Self::new()
}
#[must_use]
pub fn for_network() -> Self {
Self::new()
}
pub fn flush(&mut self, bytes: u64, now_us: u64) {
self.flushes += 1;
self.total_bytes += bytes;
if bytes > self.max_bytes {
self.max_bytes = bytes;
}
if self.last_flush_us > 0 && now_us > self.last_flush_us {
let interval = now_us - self.last_flush_us;
if interval < self.min_interval_us {
self.min_interval_us = interval;
}
}
self.last_flush_us = now_us;
}
#[must_use]
pub fn flush_count(&self) -> u64 {
self.flushes
}
#[must_use]
pub fn total_bytes(&self) -> u64 {
self.total_bytes
}
#[must_use]
pub fn avg_bytes(&self) -> f64 {
if self.flushes == 0 {
0.0
} else {
self.total_bytes as f64 / self.flushes as f64
}
}
#[must_use]
pub fn max_bytes(&self) -> u64 {
self.max_bytes
}
#[must_use]
pub fn min_interval_us(&self) -> u64 {
if self.min_interval_us == u64::MAX {
0
} else {
self.min_interval_us
}
}
#[must_use]
pub fn is_bursty(&self, threshold_us: u64) -> bool {
self.min_interval_us < threshold_us
}
pub fn reset(&mut self) {
self.flushes = 0;
self.total_bytes = 0;
self.max_bytes = 0;
self.last_flush_us = 0;
self.min_interval_us = u64::MAX;
}
}
#[cfg(test)]
mod flush_tracker_tests {
use super::*;
#[test]
fn f_flush_001_new() {
let ft = FlushTracker::new();
assert_eq!(ft.flush_count(), 0);
}
#[test]
fn f_flush_002_default() {
let ft = FlushTracker::default();
assert_eq!(ft.flush_count(), 0);
}
#[test]
fn f_flush_003_flush() {
let mut ft = FlushTracker::new();
ft.flush(1024, 1000);
assert_eq!(ft.flush_count(), 1);
}
#[test]
fn f_flush_004_total_bytes() {
let mut ft = FlushTracker::new();
ft.flush(1024, 1000);
ft.flush(2048, 2000);
assert_eq!(ft.total_bytes(), 3072);
}
#[test]
fn f_flush_005_max_bytes() {
let mut ft = FlushTracker::new();
ft.flush(1024, 1000);
ft.flush(4096, 2000);
ft.flush(2048, 3000);
assert_eq!(ft.max_bytes(), 4096);
}
#[test]
fn f_flush_006_avg_bytes() {
let mut ft = FlushTracker::new();
ft.flush(1000, 1000);
ft.flush(2000, 2000);
assert!((ft.avg_bytes() - 1500.0).abs() < 0.01);
}
#[test]
fn f_flush_007_for_write_buffer() {
let ft = FlushTracker::for_write_buffer();
assert_eq!(ft.flush_count(), 0);
}
#[test]
fn f_flush_008_for_network() {
let ft = FlushTracker::for_network();
assert_eq!(ft.flush_count(), 0);
}
#[test]
fn f_flush_009_min_interval() {
let mut ft = FlushTracker::new();
ft.flush(100, 1000);
ft.flush(100, 1100); ft.flush(100, 2000); assert_eq!(ft.min_interval_us(), 100);
}
#[test]
fn f_flush_010_bursty() {
let mut ft = FlushTracker::new();
ft.flush(100, 1000);
ft.flush(100, 1050); assert!(ft.is_bursty(100));
}
#[test]
fn f_flush_011_reset() {
let mut ft = FlushTracker::new();
ft.flush(1024, 1000);
ft.reset();
assert_eq!(ft.flush_count(), 0);
}
#[test]
fn f_flush_012_clone() {
let mut ft = FlushTracker::new();
ft.flush(1024, 1000);
let cloned = ft.clone();
assert_eq!(ft.flush_count(), cloned.flush_count());
}
}
#[derive(Debug, Clone)]
pub struct WatermarkTracker {
low_watermark: u64,
high_watermark: u64,
current: u64,
peak: u64,
high_events: u64,
low_events: u64,
}
impl Default for WatermarkTracker {
fn default() -> Self {
Self::new(25, 75)
}
}
impl WatermarkTracker {
#[must_use]
pub fn new(low: u64, high: u64) -> Self {
Self {
low_watermark: low,
high_watermark: high,
current: 0,
peak: 0,
high_events: 0,
low_events: 0,
}
}
#[must_use]
pub fn for_buffer() -> Self {
Self::new(25, 75)
}
#[must_use]
pub fn for_queue() -> Self {
Self::new(10, 90)
}
pub fn update(&mut self, value: u64) {
let was_above_high = self.current >= self.high_watermark;
let was_below_low = self.current <= self.low_watermark;
self.current = value;
if value > self.peak {
self.peak = value;
}
if !was_above_high && value >= self.high_watermark {
self.high_events += 1;
}
if !was_below_low && value <= self.low_watermark {
self.low_events += 1;
}
}
#[must_use]
pub fn current(&self) -> u64 {
self.current
}
#[must_use]
pub fn peak(&self) -> u64 {
self.peak
}
#[must_use]
pub fn is_high(&self) -> bool {
self.current >= self.high_watermark
}
#[must_use]
pub fn is_low(&self) -> bool {
self.current <= self.low_watermark
}
#[must_use]
pub fn high_events(&self) -> u64 {
self.high_events
}
#[must_use]
pub fn low_events(&self) -> u64 {
self.low_events
}
#[must_use]
pub fn is_normal(&self) -> bool {
self.current > self.low_watermark && self.current < self.high_watermark
}
pub fn reset(&mut self) {
self.current = 0;
self.peak = 0;
self.high_events = 0;
self.low_events = 0;
}
}
#[cfg(test)]
mod watermark_tracker_tests {
use super::*;
#[test]
fn f_water_001_new() {
let wt = WatermarkTracker::new(25, 75);
assert_eq!(wt.current(), 0);
}
#[test]
fn f_water_002_default() {
let wt = WatermarkTracker::default();
assert!(wt.is_low()); }
#[test]
fn f_water_003_update() {
let mut wt = WatermarkTracker::new(25, 75);
wt.update(50);
assert_eq!(wt.current(), 50);
}
#[test]
fn f_water_004_peak() {
let mut wt = WatermarkTracker::new(25, 75);
wt.update(80);
wt.update(30);
assert_eq!(wt.peak(), 80);
}
#[test]
fn f_water_005_is_high() {
let mut wt = WatermarkTracker::new(25, 75);
wt.update(80);
assert!(wt.is_high());
}
#[test]
fn f_water_006_is_low() {
let mut wt = WatermarkTracker::new(25, 75);
wt.update(20);
assert!(wt.is_low());
}
#[test]
fn f_water_007_for_buffer() {
let wt = WatermarkTracker::for_buffer();
assert_eq!(wt.current(), 0);
}
#[test]
fn f_water_008_for_queue() {
let wt = WatermarkTracker::for_queue();
assert_eq!(wt.current(), 0);
}
#[test]
fn f_water_009_high_events() {
let mut wt = WatermarkTracker::new(25, 75);
wt.update(50);
wt.update(80); assert_eq!(wt.high_events(), 1);
}
#[test]
fn f_water_010_low_events() {
let mut wt = WatermarkTracker::new(25, 75);
wt.update(50);
wt.update(20); assert_eq!(wt.low_events(), 1);
}
#[test]
fn f_water_011_normal() {
let mut wt = WatermarkTracker::new(25, 75);
wt.update(50);
assert!(wt.is_normal());
}
#[test]
fn f_water_012_reset() {
let mut wt = WatermarkTracker::new(25, 75);
wt.update(80);
wt.reset();
assert_eq!(wt.current(), 0);
}
}
#[derive(Debug, Clone)]
pub struct SnapshotTracker {
snapshot_count: u64,
last_snapshot_us: u64,
total_size_bytes: u64,
max_size_bytes: u64,
avg_interval_us: f64,
}
impl Default for SnapshotTracker {
fn default() -> Self {
Self::new()
}
}
impl SnapshotTracker {
#[must_use]
pub fn new() -> Self {
Self {
snapshot_count: 0,
last_snapshot_us: 0,
total_size_bytes: 0,
max_size_bytes: 0,
avg_interval_us: 0.0,
}
}
#[must_use]
pub fn for_database() -> Self {
Self::new()
}
#[must_use]
pub fn for_state() -> Self {
Self::new()
}
pub fn snapshot(&mut self, size_bytes: u64, now_us: u64) {
if self.last_snapshot_us > 0 && now_us > self.last_snapshot_us {
let interval = (now_us - self.last_snapshot_us) as f64;
let n = self.snapshot_count as f64;
self.avg_interval_us = (self.avg_interval_us * n + interval) / (n + 1.0);
}
self.snapshot_count += 1;
self.total_size_bytes += size_bytes;
if size_bytes > self.max_size_bytes {
self.max_size_bytes = size_bytes;
}
self.last_snapshot_us = now_us;
}
#[must_use]
pub fn snapshot_count(&self) -> u64 {
self.snapshot_count
}
#[must_use]
pub fn total_bytes(&self) -> u64 {
self.total_size_bytes
}
#[must_use]
pub fn avg_size_bytes(&self) -> f64 {
if self.snapshot_count == 0 {
0.0
} else {
self.total_size_bytes as f64 / self.snapshot_count as f64
}
}
#[must_use]
pub fn max_size_bytes(&self) -> u64 {
self.max_size_bytes
}
#[must_use]
pub fn avg_interval_us(&self) -> f64 {
self.avg_interval_us
}
#[must_use]
pub fn last_snapshot_us(&self) -> u64 {
self.last_snapshot_us
}
pub fn reset(&mut self) {
self.snapshot_count = 0;
self.last_snapshot_us = 0;
self.total_size_bytes = 0;
self.max_size_bytes = 0;
self.avg_interval_us = 0.0;
}
}
#[cfg(test)]
mod snapshot_tracker_tests {
use super::*;
#[test]
fn f_snap_001_new() {
let st = SnapshotTracker::new();
assert_eq!(st.snapshot_count(), 0);
}
#[test]
fn f_snap_002_default() {
let st = SnapshotTracker::default();
assert_eq!(st.snapshot_count(), 0);
}
#[test]
fn f_snap_003_snapshot() {
let mut st = SnapshotTracker::new();
st.snapshot(1024, 1000);
assert_eq!(st.snapshot_count(), 1);
}
#[test]
fn f_snap_004_total_bytes() {
let mut st = SnapshotTracker::new();
st.snapshot(1024, 1000);
st.snapshot(2048, 2000);
assert_eq!(st.total_bytes(), 3072);
}
#[test]
fn f_snap_005_max_size() {
let mut st = SnapshotTracker::new();
st.snapshot(1024, 1000);
st.snapshot(4096, 2000);
st.snapshot(2048, 3000);
assert_eq!(st.max_size_bytes(), 4096);
}
#[test]
fn f_snap_006_avg_size() {
let mut st = SnapshotTracker::new();
st.snapshot(1000, 1000);
st.snapshot(2000, 2000);
assert!((st.avg_size_bytes() - 1500.0).abs() < 0.01);
}
#[test]
fn f_snap_007_for_database() {
let st = SnapshotTracker::for_database();
assert_eq!(st.snapshot_count(), 0);
}
#[test]
fn f_snap_008_for_state() {
let st = SnapshotTracker::for_state();
assert_eq!(st.snapshot_count(), 0);
}
#[test]
fn f_snap_009_avg_interval() {
let mut st = SnapshotTracker::new();
st.snapshot(100, 1000);
st.snapshot(100, 2000); assert!(st.avg_interval_us() > 0.0);
}
#[test]
fn f_snap_010_last_snapshot() {
let mut st = SnapshotTracker::new();
st.snapshot(100, 5000);
assert_eq!(st.last_snapshot_us(), 5000);
}
#[test]
fn f_snap_011_reset() {
let mut st = SnapshotTracker::new();
st.snapshot(1024, 1000);
st.reset();
assert_eq!(st.snapshot_count(), 0);
}
#[test]
fn f_snap_012_clone() {
let mut st = SnapshotTracker::new();
st.snapshot(1024, 1000);
let cloned = st.clone();
assert_eq!(st.snapshot_count(), cloned.snapshot_count());
}
}
#[derive(Debug, Clone)]
pub struct VersionTracker {
current_version: u64,
updates: u64,
conflicts: u64,
last_update_us: u64,
}
impl Default for VersionTracker {
fn default() -> Self {
Self::new()
}
}
impl VersionTracker {
#[must_use]
pub fn new() -> Self {
Self {
current_version: 0,
updates: 0,
conflicts: 0,
last_update_us: 0,
}
}
#[must_use]
pub fn for_record() -> Self {
Self::new()
}
#[must_use]
pub fn for_cache() -> Self {
Self::new()
}
pub fn try_update(&mut self, expected_version: u64, now_us: u64) -> bool {
if self.current_version == expected_version {
self.current_version += 1;
self.updates += 1;
self.last_update_us = now_us;
true
} else {
self.conflicts += 1;
false
}
}
pub fn force_update(&mut self, now_us: u64) {
self.current_version += 1;
self.updates += 1;
self.last_update_us = now_us;
}
#[must_use]
pub fn version(&self) -> u64 {
self.current_version
}
#[must_use]
pub fn updates(&self) -> u64 {
self.updates
}
#[must_use]
pub fn conflicts(&self) -> u64 {
self.conflicts
}
#[must_use]
pub fn conflict_rate(&self) -> f64 {
let total = self.updates + self.conflicts;
if total == 0 {
0.0
} else {
self.conflicts as f64 / total as f64
}
}
#[must_use]
pub fn is_healthy(&self, max_conflict_rate: f64) -> bool {
self.conflict_rate() <= max_conflict_rate
}
pub fn reset(&mut self) {
self.current_version = 0;
self.updates = 0;
self.conflicts = 0;
self.last_update_us = 0;
}
}
#[cfg(test)]
mod version_tracker_tests {
use super::*;
#[test]
fn f_ver_001_new() {
let vt = VersionTracker::new();
assert_eq!(vt.version(), 0);
}
#[test]
fn f_ver_002_default() {
let vt = VersionTracker::default();
assert_eq!(vt.version(), 0);
}
#[test]
fn f_ver_003_try_update_success() {
let mut vt = VersionTracker::new();
assert!(vt.try_update(0, 1000));
assert_eq!(vt.version(), 1);
}
#[test]
fn f_ver_004_try_update_conflict() {
let mut vt = VersionTracker::new();
vt.try_update(0, 1000); assert!(!vt.try_update(0, 2000)); }
#[test]
fn f_ver_005_force_update() {
let mut vt = VersionTracker::new();
vt.force_update(1000);
assert_eq!(vt.version(), 1);
}
#[test]
fn f_ver_006_conflicts() {
let mut vt = VersionTracker::new();
vt.try_update(0, 1000);
vt.try_update(0, 2000); assert_eq!(vt.conflicts(), 1);
}
#[test]
fn f_ver_007_for_record() {
let vt = VersionTracker::for_record();
assert_eq!(vt.version(), 0);
}
#[test]
fn f_ver_008_for_cache() {
let vt = VersionTracker::for_cache();
assert_eq!(vt.version(), 0);
}
#[test]
fn f_ver_009_conflict_rate() {
let mut vt = VersionTracker::new();
vt.try_update(0, 1000); vt.try_update(0, 2000); assert!((vt.conflict_rate() - 0.5).abs() < 0.01);
}
#[test]
fn f_ver_010_healthy() {
let mut vt = VersionTracker::new();
vt.try_update(0, 1000);
assert!(vt.is_healthy(0.1));
}
#[test]
fn f_ver_011_reset() {
let mut vt = VersionTracker::new();
vt.try_update(0, 1000);
vt.reset();
assert_eq!(vt.version(), 0);
}
#[test]
fn f_ver_012_clone() {
let mut vt = VersionTracker::new();
vt.try_update(0, 1000);
let cloned = vt.clone();
assert_eq!(vt.version(), cloned.version());
}
}
#[derive(Debug, Clone)]
pub struct TokenBucketShaper {
bucket_size: u64,
tokens: u64,
fill_rate_per_us: f64,
last_fill_us: u64,
bytes_shaped: u64,
drops: u64,
}
impl Default for TokenBucketShaper {
fn default() -> Self {
Self::for_network()
}
}
impl TokenBucketShaper {
#[must_use]
pub fn new(bucket_size: u64, fill_rate_per_sec: u64) -> Self {
Self {
bucket_size,
tokens: bucket_size, fill_rate_per_us: fill_rate_per_sec as f64 / 1_000_000.0,
last_fill_us: 0,
bytes_shaped: 0,
drops: 0,
}
}
#[must_use]
pub fn for_network() -> Self {
Self::new(1_000_000, 100_000)
}
#[must_use]
pub fn for_api() -> Self {
Self::new(10_000, 1_000)
}
fn refill(&mut self, now_us: u64) {
if self.last_fill_us > 0 && now_us > self.last_fill_us {
let elapsed = now_us - self.last_fill_us;
let new_tokens = (elapsed as f64 * self.fill_rate_per_us) as u64;
self.tokens = (self.tokens + new_tokens).min(self.bucket_size);
}
self.last_fill_us = now_us;
}
pub fn try_consume(&mut self, bytes: u64, now_us: u64) -> bool {
self.refill(now_us);
if self.tokens >= bytes {
self.tokens -= bytes;
self.bytes_shaped += bytes;
true
} else {
self.drops += 1;
false
}
}
#[must_use]
pub fn tokens(&self) -> u64 {
self.tokens
}
#[must_use]
pub fn bytes_shaped(&self) -> u64 {
self.bytes_shaped
}
#[must_use]
pub fn drops(&self) -> u64 {
self.drops
}
#[must_use]
pub fn fill_percentage(&self) -> f64 {
if self.bucket_size == 0 {
0.0
} else {
(self.tokens as f64 / self.bucket_size as f64) * 100.0
}
}
pub fn reset(&mut self) {
self.tokens = self.bucket_size;
self.last_fill_us = 0;
self.bytes_shaped = 0;
self.drops = 0;
}
}
#[cfg(test)]
mod token_bucket_shaper_tests {
use super::*;
#[test]
fn f_shape_001_new() {
let ts = TokenBucketShaper::new(1000, 100);
assert_eq!(ts.tokens(), 1000);
}
#[test]
fn f_shape_002_default() {
let ts = TokenBucketShaper::default();
assert_eq!(ts.tokens(), 1_000_000);
}
#[test]
fn f_shape_003_consume() {
let mut ts = TokenBucketShaper::new(1000, 100);
ts.try_consume(100, 1000);
assert_eq!(ts.tokens(), 900);
}
#[test]
fn f_shape_004_consume_fail() {
let mut ts = TokenBucketShaper::new(100, 10);
assert!(!ts.try_consume(200, 1000));
}
#[test]
fn f_shape_005_drops() {
let mut ts = TokenBucketShaper::new(100, 10);
ts.try_consume(200, 1000);
assert_eq!(ts.drops(), 1);
}
#[test]
fn f_shape_006_bytes_shaped() {
let mut ts = TokenBucketShaper::new(1000, 100);
ts.try_consume(100, 1000);
ts.try_consume(200, 2000);
assert_eq!(ts.bytes_shaped(), 300);
}
#[test]
fn f_shape_007_for_network() {
let ts = TokenBucketShaper::for_network();
assert_eq!(ts.tokens(), 1_000_000);
}
#[test]
fn f_shape_008_for_api() {
let ts = TokenBucketShaper::for_api();
assert_eq!(ts.tokens(), 10_000);
}
#[test]
fn f_shape_009_fill_percentage() {
let mut ts = TokenBucketShaper::new(1000, 100);
ts.try_consume(500, 1000);
assert!((ts.fill_percentage() - 50.0).abs() < 0.01);
}
#[test]
fn f_shape_010_refill() {
let mut ts = TokenBucketShaper::new(1000, 1_000_000); ts.try_consume(500, 0);
ts.try_consume(0, 250); assert!(ts.tokens() >= 500); }
#[test]
fn f_shape_011_reset() {
let mut ts = TokenBucketShaper::new(1000, 100);
ts.try_consume(500, 1000);
ts.reset();
assert_eq!(ts.tokens(), 1000);
}
#[test]
fn f_shape_012_clone() {
let mut ts = TokenBucketShaper::new(1000, 100);
ts.try_consume(100, 1000);
let cloned = ts.clone();
assert_eq!(ts.tokens(), cloned.tokens());
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ElectionState {
Follower,
Candidate,
Leader,
}
#[derive(Debug, Clone)]
pub struct LeaderElection {
state: ElectionState,
term: u64,
elections: u64,
terms_as_leader: u64,
last_heartbeat_us: u64,
}
impl Default for LeaderElection {
fn default() -> Self {
Self::new()
}
}
impl LeaderElection {
#[must_use]
pub fn new() -> Self {
Self {
state: ElectionState::Follower,
term: 0,
elections: 0,
terms_as_leader: 0,
last_heartbeat_us: 0,
}
}
#[must_use]
pub fn for_cluster() -> Self {
Self::new()
}
pub fn start_election(&mut self, now_us: u64) {
self.state = ElectionState::Candidate;
self.term += 1;
self.elections += 1;
self.last_heartbeat_us = now_us;
}
pub fn win_election(&mut self, now_us: u64) {
if self.state == ElectionState::Candidate {
self.state = ElectionState::Leader;
self.terms_as_leader += 1;
self.last_heartbeat_us = now_us;
}
}
pub fn step_down(&mut self, new_term: u64) {
if new_term > self.term {
self.term = new_term;
}
self.state = ElectionState::Follower;
}
pub fn heartbeat(&mut self, now_us: u64) {
self.last_heartbeat_us = now_us;
}
#[must_use]
pub fn state(&self) -> ElectionState {
self.state
}
#[must_use]
pub fn term(&self) -> u64 {
self.term
}
#[must_use]
pub fn is_leader(&self) -> bool {
self.state == ElectionState::Leader
}
#[must_use]
pub fn elections(&self) -> u64 {
self.elections
}
#[must_use]
pub fn terms_as_leader(&self) -> u64 {
self.terms_as_leader
}
pub fn reset(&mut self) {
self.state = ElectionState::Follower;
self.term = 0;
self.elections = 0;
self.terms_as_leader = 0;
self.last_heartbeat_us = 0;
}
}
#[cfg(test)]
mod leader_election_tests {
use super::*;
#[test]
fn f_elect_001_new() {
let le = LeaderElection::new();
assert_eq!(le.state(), ElectionState::Follower);
}
#[test]
fn f_elect_002_default() {
let le = LeaderElection::default();
assert_eq!(le.state(), ElectionState::Follower);
}
#[test]
fn f_elect_003_start_election() {
let mut le = LeaderElection::new();
le.start_election(1000);
assert_eq!(le.state(), ElectionState::Candidate);
}
#[test]
fn f_elect_004_term_increment() {
let mut le = LeaderElection::new();
le.start_election(1000);
assert_eq!(le.term(), 1);
}
#[test]
fn f_elect_005_win_election() {
let mut le = LeaderElection::new();
le.start_election(1000);
le.win_election(2000);
assert!(le.is_leader());
}
#[test]
fn f_elect_006_step_down() {
let mut le = LeaderElection::new();
le.start_election(1000);
le.win_election(2000);
le.step_down(2);
assert_eq!(le.state(), ElectionState::Follower);
}
#[test]
fn f_elect_007_for_cluster() {
let le = LeaderElection::for_cluster();
assert_eq!(le.term(), 0);
}
#[test]
fn f_elect_008_elections() {
let mut le = LeaderElection::new();
le.start_election(1000);
le.start_election(2000);
assert_eq!(le.elections(), 2);
}
#[test]
fn f_elect_009_terms_as_leader() {
let mut le = LeaderElection::new();
le.start_election(1000);
le.win_election(2000);
assert_eq!(le.terms_as_leader(), 1);
}
#[test]
fn f_elect_010_win_requires_candidate() {
let mut le = LeaderElection::new();
le.win_election(1000); assert!(!le.is_leader());
}
#[test]
fn f_elect_011_reset() {
let mut le = LeaderElection::new();
le.start_election(1000);
le.win_election(2000);
le.reset();
assert_eq!(le.state(), ElectionState::Follower);
}
#[test]
fn f_elect_012_clone() {
let mut le = LeaderElection::new();
le.start_election(1000);
let cloned = le.clone();
assert_eq!(le.state(), cloned.state());
}
}
#[derive(Debug, Clone)]
pub struct CheckpointTracker {
checkpoints: u64,
total_duration_us: u64,
last_checkpoint_us: u64,
bytes_written: u64,
failures: u64,
}
impl Default for CheckpointTracker {
fn default() -> Self {
Self::new()
}
}
impl CheckpointTracker {
#[must_use]
pub fn new() -> Self {
Self {
checkpoints: 0,
total_duration_us: 0,
last_checkpoint_us: 0,
bytes_written: 0,
failures: 0,
}
}
#[must_use]
pub fn for_database() -> Self {
Self::new()
}
#[must_use]
pub fn for_wal() -> Self {
Self::new()
}
pub fn checkpoint(&mut self, duration_us: u64, bytes: u64, now_us: u64) {
self.checkpoints += 1;
self.total_duration_us += duration_us;
self.bytes_written += bytes;
self.last_checkpoint_us = now_us;
}
pub fn fail(&mut self) {
self.failures += 1;
}
#[must_use]
pub fn checkpoint_count(&self) -> u64 {
self.checkpoints
}
#[must_use]
pub fn avg_duration_us(&self) -> f64 {
if self.checkpoints == 0 {
0.0
} else {
self.total_duration_us as f64 / self.checkpoints as f64
}
}
#[must_use]
pub fn bytes_written(&self) -> u64 {
self.bytes_written
}
#[must_use]
pub fn failure_rate(&self) -> f64 {
let total = self.checkpoints + self.failures;
if total == 0 {
0.0
} else {
self.failures as f64 / total as f64
}
}
#[must_use]
pub fn is_healthy(&self, max_failure_rate: f64) -> bool {
self.failure_rate() <= max_failure_rate
}
#[must_use]
pub fn time_since_checkpoint(&self, now_us: u64) -> u64 {
if self.last_checkpoint_us == 0 {
0
} else {
now_us.saturating_sub(self.last_checkpoint_us)
}
}
pub fn reset(&mut self) {
self.checkpoints = 0;
self.total_duration_us = 0;
self.last_checkpoint_us = 0;
self.bytes_written = 0;
self.failures = 0;
}
}
#[cfg(test)]
mod checkpoint_tracker_tests {
use super::*;
#[test]
fn f_ckpt_001_new() {
let ct = CheckpointTracker::new();
assert_eq!(ct.checkpoint_count(), 0);
}
#[test]
fn f_ckpt_002_default() {
let ct = CheckpointTracker::default();
assert_eq!(ct.checkpoint_count(), 0);
}
#[test]
fn f_ckpt_003_checkpoint() {
let mut ct = CheckpointTracker::new();
ct.checkpoint(1000, 1024, 10000);
assert_eq!(ct.checkpoint_count(), 1);
}
#[test]
fn f_ckpt_004_bytes_written() {
let mut ct = CheckpointTracker::new();
ct.checkpoint(1000, 1024, 10000);
ct.checkpoint(1000, 2048, 20000);
assert_eq!(ct.bytes_written(), 3072);
}
#[test]
fn f_ckpt_005_avg_duration() {
let mut ct = CheckpointTracker::new();
ct.checkpoint(1000, 100, 10000);
ct.checkpoint(2000, 100, 20000);
assert!((ct.avg_duration_us() - 1500.0).abs() < 0.01);
}
#[test]
fn f_ckpt_006_failures() {
let mut ct = CheckpointTracker::new();
ct.checkpoint(1000, 100, 10000);
ct.fail();
assert!((ct.failure_rate() - 0.5).abs() < 0.01);
}
#[test]
fn f_ckpt_007_for_database() {
let ct = CheckpointTracker::for_database();
assert_eq!(ct.checkpoint_count(), 0);
}
#[test]
fn f_ckpt_008_for_wal() {
let ct = CheckpointTracker::for_wal();
assert_eq!(ct.checkpoint_count(), 0);
}
#[test]
fn f_ckpt_009_healthy() {
let mut ct = CheckpointTracker::new();
ct.checkpoint(1000, 100, 10000);
assert!(ct.is_healthy(0.1));
}
#[test]
fn f_ckpt_010_time_since() {
let mut ct = CheckpointTracker::new();
ct.checkpoint(1000, 100, 10000);
assert_eq!(ct.time_since_checkpoint(15000), 5000);
}
#[test]
fn f_ckpt_011_reset() {
let mut ct = CheckpointTracker::new();
ct.checkpoint(1000, 100, 10000);
ct.reset();
assert_eq!(ct.checkpoint_count(), 0);
}
#[test]
fn f_ckpt_012_clone() {
let mut ct = CheckpointTracker::new();
ct.checkpoint(1000, 100, 10000);
let cloned = ct.clone();
assert_eq!(ct.checkpoint_count(), cloned.checkpoint_count());
}
}
#[derive(Debug, Clone)]
pub struct ReplicationLag {
samples: u64,
total_lag_us: u64,
max_lag_us: u64,
current_lag_us: u64,
threshold_us: u64,
breaches: u64,
}
impl Default for ReplicationLag {
fn default() -> Self {
Self::for_database()
}
}
impl ReplicationLag {
#[must_use]
pub fn new(threshold_us: u64) -> Self {
Self {
samples: 0,
total_lag_us: 0,
max_lag_us: 0,
current_lag_us: 0,
threshold_us,
breaches: 0,
}
}
#[must_use]
pub fn for_database() -> Self {
Self::new(1_000_000) }
#[must_use]
pub fn for_cache() -> Self {
Self::new(100_000) }
pub fn record(&mut self, lag_us: u64) {
self.samples += 1;
self.total_lag_us += lag_us;
self.current_lag_us = lag_us;
if lag_us > self.max_lag_us {
self.max_lag_us = lag_us;
}
if lag_us > self.threshold_us {
self.breaches += 1;
}
}
#[must_use]
pub fn current_lag_us(&self) -> u64 {
self.current_lag_us
}
#[must_use]
pub fn avg_lag_us(&self) -> f64 {
if self.samples == 0 {
0.0
} else {
self.total_lag_us as f64 / self.samples as f64
}
}
#[must_use]
pub fn max_lag_us(&self) -> u64 {
self.max_lag_us
}
#[must_use]
pub fn breaches(&self) -> u64 {
self.breaches
}
#[must_use]
pub fn is_healthy(&self) -> bool {
self.current_lag_us <= self.threshold_us
}
#[must_use]
pub fn breach_rate(&self) -> f64 {
if self.samples == 0 {
0.0
} else {
self.breaches as f64 / self.samples as f64
}
}
pub fn reset(&mut self) {
self.samples = 0;
self.total_lag_us = 0;
self.max_lag_us = 0;
self.current_lag_us = 0;
self.breaches = 0;
}
}
#[cfg(test)]
mod replication_lag_tests {
use super::*;
#[test]
fn f_repl_001_new() {
let rl = ReplicationLag::new(1000);
assert_eq!(rl.current_lag_us(), 0);
}
#[test]
fn f_repl_002_default() {
let rl = ReplicationLag::default();
assert!(rl.is_healthy()); }
#[test]
fn f_repl_003_record() {
let mut rl = ReplicationLag::new(1000);
rl.record(500);
assert_eq!(rl.current_lag_us(), 500);
}
#[test]
fn f_repl_004_max_lag() {
let mut rl = ReplicationLag::new(10000);
rl.record(500);
rl.record(2000);
rl.record(800);
assert_eq!(rl.max_lag_us(), 2000);
}
#[test]
fn f_repl_005_avg_lag() {
let mut rl = ReplicationLag::new(10000);
rl.record(1000);
rl.record(2000);
assert!((rl.avg_lag_us() - 1500.0).abs() < 0.01);
}
#[test]
fn f_repl_006_breaches() {
let mut rl = ReplicationLag::new(1000);
rl.record(500);
rl.record(1500); assert_eq!(rl.breaches(), 1);
}
#[test]
fn f_repl_007_for_database() {
let rl = ReplicationLag::for_database();
assert_eq!(rl.current_lag_us(), 0);
}
#[test]
fn f_repl_008_for_cache() {
let rl = ReplicationLag::for_cache();
assert_eq!(rl.current_lag_us(), 0);
}
#[test]
fn f_repl_009_healthy() {
let mut rl = ReplicationLag::new(1000);
rl.record(500);
assert!(rl.is_healthy());
}
#[test]
fn f_repl_010_unhealthy() {
let mut rl = ReplicationLag::new(1000);
rl.record(1500);
assert!(!rl.is_healthy());
}
#[test]
fn f_repl_011_reset() {
let mut rl = ReplicationLag::new(1000);
rl.record(500);
rl.reset();
assert_eq!(rl.current_lag_us(), 0);
}
#[test]
fn f_repl_012_clone() {
let mut rl = ReplicationLag::new(1000);
rl.record(500);
let cloned = rl.clone();
assert_eq!(rl.current_lag_us(), cloned.current_lag_us());
}
}
#[derive(Debug, Clone)]
pub struct QuorumTracker {
total_nodes: u32,
votes_received: u32,
quorum_threshold: u32,
rounds: u64,
quorum_achieved: u64,
}
impl Default for QuorumTracker {
fn default() -> Self {
Self::for_cluster(3)
}
}
impl QuorumTracker {
#[must_use]
pub fn new(total_nodes: u32) -> Self {
Self {
total_nodes,
votes_received: 0,
quorum_threshold: total_nodes / 2 + 1, rounds: 0,
quorum_achieved: 0,
}
}
#[must_use]
pub fn for_cluster(nodes: u32) -> Self {
Self::new(nodes)
}
pub fn start_round(&mut self) {
self.votes_received = 0;
self.rounds += 1;
}
pub fn vote(&mut self) {
if self.votes_received < self.total_nodes {
self.votes_received += 1;
if self.votes_received == self.quorum_threshold {
self.quorum_achieved += 1;
}
}
}
#[must_use]
pub fn has_quorum(&self) -> bool {
self.votes_received >= self.quorum_threshold
}
#[must_use]
pub fn votes(&self) -> u32 {
self.votes_received
}
#[must_use]
pub fn votes_needed(&self) -> u32 {
self.quorum_threshold.saturating_sub(self.votes_received)
}
#[must_use]
pub fn rounds(&self) -> u64 {
self.rounds
}
#[must_use]
pub fn success_rate(&self) -> f64 {
if self.rounds == 0 {
0.0
} else {
self.quorum_achieved as f64 / self.rounds as f64
}
}
pub fn reset(&mut self) {
self.votes_received = 0;
self.rounds = 0;
self.quorum_achieved = 0;
}
}
#[cfg(test)]
mod quorum_tracker_tests {
use super::*;
#[test]
fn f_quorum_001_new() {
let qt = QuorumTracker::new(5);
assert_eq!(qt.votes(), 0);
}
#[test]
fn f_quorum_002_default() {
let qt = QuorumTracker::default();
assert!(!qt.has_quorum());
}
#[test]
fn f_quorum_003_vote() {
let mut qt = QuorumTracker::new(5);
qt.vote();
assert_eq!(qt.votes(), 1);
}
#[test]
fn f_quorum_004_quorum() {
let mut qt = QuorumTracker::new(5);
qt.vote();
qt.vote();
qt.vote(); assert!(qt.has_quorum());
}
#[test]
fn f_quorum_005_no_quorum() {
let mut qt = QuorumTracker::new(5);
qt.vote();
qt.vote(); assert!(!qt.has_quorum());
}
#[test]
fn f_quorum_006_votes_needed() {
let mut qt = QuorumTracker::new(5);
qt.vote();
assert_eq!(qt.votes_needed(), 2); }
#[test]
fn f_quorum_007_for_cluster() {
let qt = QuorumTracker::for_cluster(7);
assert_eq!(qt.votes_needed(), 4); }
#[test]
fn f_quorum_008_start_round() {
let mut qt = QuorumTracker::new(5);
qt.vote();
qt.vote();
qt.start_round();
assert_eq!(qt.votes(), 0);
}
#[test]
fn f_quorum_009_rounds() {
let mut qt = QuorumTracker::new(5);
qt.start_round();
qt.start_round();
assert_eq!(qt.rounds(), 2);
}
#[test]
fn f_quorum_010_success_rate() {
let mut qt = QuorumTracker::new(3);
qt.start_round();
qt.vote();
qt.vote(); qt.start_round();
assert!((qt.success_rate() - 0.5).abs() < 0.01);
}
#[test]
fn f_quorum_011_reset() {
let mut qt = QuorumTracker::new(5);
qt.vote();
qt.reset();
assert_eq!(qt.votes(), 0);
}
#[test]
fn f_quorum_012_clone() {
let mut qt = QuorumTracker::new(5);
qt.vote();
let cloned = qt.clone();
assert_eq!(qt.votes(), cloned.votes());
}
}
#[derive(Debug, Clone)]
pub struct PartitionTracker {
total_partitions: u32,
assigned: u32,
healthy: u32,
rebalances: u64,
last_rebalance_us: u64,
}
impl Default for PartitionTracker {
fn default() -> Self {
Self::for_kafka()
}
}
impl PartitionTracker {
#[must_use]
pub fn new(total_partitions: u32) -> Self {
Self {
total_partitions,
assigned: 0,
healthy: 0,
rebalances: 0,
last_rebalance_us: 0,
}
}
#[must_use]
pub fn for_kafka() -> Self {
Self::new(12)
}
#[must_use]
pub fn for_shards() -> Self {
Self::new(8)
}
pub fn assign(&mut self, count: u32) {
self.assigned = count.min(self.total_partitions);
}
pub fn mark_healthy(&mut self, count: u32) {
self.healthy = count.min(self.assigned);
}
pub fn rebalance(&mut self, now_us: u64) {
self.rebalances += 1;
self.last_rebalance_us = now_us;
}
#[must_use]
pub fn assigned(&self) -> u32 {
self.assigned
}
#[must_use]
pub fn healthy(&self) -> u32 {
self.healthy
}
#[must_use]
pub fn assignment_rate(&self) -> f64 {
if self.total_partitions == 0 {
0.0
} else {
(self.assigned as f64 / self.total_partitions as f64) * 100.0
}
}
#[must_use]
pub fn health_rate(&self) -> f64 {
if self.assigned == 0 {
0.0
} else {
(self.healthy as f64 / self.assigned as f64) * 100.0
}
}
#[must_use]
pub fn is_fully_healthy(&self) -> bool {
self.healthy == self.assigned && self.assigned > 0
}
#[must_use]
pub fn rebalances(&self) -> u64 {
self.rebalances
}
pub fn reset(&mut self) {
self.assigned = 0;
self.healthy = 0;
self.rebalances = 0;
self.last_rebalance_us = 0;
}
}
#[cfg(test)]
mod partition_tracker_tests {
use super::*;
#[test]
fn f_part_001_new() {
let pt = PartitionTracker::new(10);
assert_eq!(pt.assigned(), 0);
}
#[test]
fn f_part_002_default() {
let pt = PartitionTracker::default();
assert_eq!(pt.assigned(), 0);
}
#[test]
fn f_part_003_assign() {
let mut pt = PartitionTracker::new(10);
pt.assign(5);
assert_eq!(pt.assigned(), 5);
}
#[test]
fn f_part_004_assign_cap() {
let mut pt = PartitionTracker::new(10);
pt.assign(15);
assert_eq!(pt.assigned(), 10);
}
#[test]
fn f_part_005_mark_healthy() {
let mut pt = PartitionTracker::new(10);
pt.assign(5);
pt.mark_healthy(3);
assert_eq!(pt.healthy(), 3);
}
#[test]
fn f_part_006_health_rate() {
let mut pt = PartitionTracker::new(10);
pt.assign(10);
pt.mark_healthy(5);
assert!((pt.health_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_part_007_for_kafka() {
let pt = PartitionTracker::for_kafka();
assert_eq!(pt.assigned(), 0);
}
#[test]
fn f_part_008_for_shards() {
let pt = PartitionTracker::for_shards();
assert_eq!(pt.assigned(), 0);
}
#[test]
fn f_part_009_fully_healthy() {
let mut pt = PartitionTracker::new(10);
pt.assign(5);
pt.mark_healthy(5);
assert!(pt.is_fully_healthy());
}
#[test]
fn f_part_010_rebalances() {
let mut pt = PartitionTracker::new(10);
pt.rebalance(1000);
pt.rebalance(2000);
assert_eq!(pt.rebalances(), 2);
}
#[test]
fn f_part_011_reset() {
let mut pt = PartitionTracker::new(10);
pt.assign(5);
pt.reset();
assert_eq!(pt.assigned(), 0);
}
#[test]
fn f_part_012_clone() {
let mut pt = PartitionTracker::new(10);
pt.assign(5);
let cloned = pt.clone();
assert_eq!(pt.assigned(), cloned.assigned());
}
}
#[derive(Debug, Clone)]
pub struct ConnectionPool {
max_size: u32,
active: u32,
idle: u32,
created: u64,
destroyed: u64,
wait_count: u64,
}
impl Default for ConnectionPool {
fn default() -> Self {
Self::for_database()
}
}
impl ConnectionPool {
#[must_use]
pub fn new(max_size: u32) -> Self {
Self {
max_size,
active: 0,
idle: 0,
created: 0,
destroyed: 0,
wait_count: 0,
}
}
#[must_use]
pub fn for_database() -> Self {
Self::new(20)
}
#[must_use]
pub fn for_http() -> Self {
Self::new(100)
}
pub fn acquire(&mut self) -> bool {
if self.idle > 0 {
self.idle -= 1;
self.active += 1;
true
} else if self.active + self.idle < self.max_size {
self.active += 1;
self.created += 1;
true
} else {
self.wait_count += 1;
false
}
}
pub fn release(&mut self) {
if self.active > 0 {
self.active -= 1;
self.idle += 1;
}
}
pub fn destroy(&mut self) {
if self.idle > 0 {
self.idle -= 1;
self.destroyed += 1;
}
}
#[must_use]
pub fn active(&self) -> u32 {
self.active
}
#[must_use]
pub fn idle(&self) -> u32 {
self.idle
}
#[must_use]
pub fn utilization(&self) -> f64 {
if self.max_size == 0 {
0.0
} else {
(self.active as f64 / self.max_size as f64) * 100.0
}
}
#[must_use]
pub fn is_exhausted(&self) -> bool {
self.active >= self.max_size && self.idle == 0
}
#[must_use]
pub fn wait_count(&self) -> u64 {
self.wait_count
}
pub fn reset(&mut self) {
self.active = 0;
self.idle = 0;
self.created = 0;
self.destroyed = 0;
self.wait_count = 0;
}
}
#[cfg(test)]
mod connection_pool_tests {
use super::*;
#[test]
fn f_cpool_001_new() {
let cp = ConnectionPool::new(10);
assert_eq!(cp.active(), 0);
}
#[test]
fn f_cpool_002_default() {
let cp = ConnectionPool::default();
assert_eq!(cp.active(), 0);
}
#[test]
fn f_cpool_003_acquire() {
let mut cp = ConnectionPool::new(10);
assert!(cp.acquire());
assert_eq!(cp.active(), 1);
}
#[test]
fn f_cpool_004_release() {
let mut cp = ConnectionPool::new(10);
cp.acquire();
cp.release();
assert_eq!(cp.idle(), 1);
}
#[test]
fn f_cpool_005_acquire_idle() {
let mut cp = ConnectionPool::new(10);
cp.acquire();
cp.release();
cp.acquire();
assert_eq!(cp.active(), 1);
assert_eq!(cp.idle(), 0);
}
#[test]
fn f_cpool_006_exhausted() {
let mut cp = ConnectionPool::new(2);
cp.acquire();
cp.acquire();
assert!(cp.is_exhausted());
}
#[test]
fn f_cpool_007_for_database() {
let cp = ConnectionPool::for_database();
assert_eq!(cp.active(), 0);
}
#[test]
fn f_cpool_008_for_http() {
let cp = ConnectionPool::for_http();
assert_eq!(cp.active(), 0);
}
#[test]
fn f_cpool_009_utilization() {
let mut cp = ConnectionPool::new(10);
cp.acquire();
cp.acquire();
assert!((cp.utilization() - 20.0).abs() < 0.01);
}
#[test]
fn f_cpool_010_wait_count() {
let mut cp = ConnectionPool::new(1);
cp.acquire();
cp.acquire(); assert_eq!(cp.wait_count(), 1);
}
#[test]
fn f_cpool_011_reset() {
let mut cp = ConnectionPool::new(10);
cp.acquire();
cp.reset();
assert_eq!(cp.active(), 0);
}
#[test]
fn f_cpool_012_clone() {
let mut cp = ConnectionPool::new(10);
cp.acquire();
let cloned = cp.clone();
assert_eq!(cp.active(), cloned.active());
}
}
#[derive(Debug, Clone)]
pub struct RequestTracker {
total: u64,
success: u64,
errors: u64,
total_latency_us: u64,
max_latency_us: u64,
in_flight: u32,
}
impl Default for RequestTracker {
fn default() -> Self {
Self::new()
}
}
impl RequestTracker {
#[must_use]
pub fn new() -> Self {
Self {
total: 0,
success: 0,
errors: 0,
total_latency_us: 0,
max_latency_us: 0,
in_flight: 0,
}
}
#[must_use]
pub fn for_api() -> Self {
Self::new()
}
#[must_use]
pub fn for_queries() -> Self {
Self::new()
}
pub fn start(&mut self) {
self.in_flight += 1;
}
pub fn complete(&mut self, latency_us: u64) {
self.total += 1;
self.success += 1;
self.total_latency_us += latency_us;
if latency_us > self.max_latency_us {
self.max_latency_us = latency_us;
}
if self.in_flight > 0 {
self.in_flight -= 1;
}
}
pub fn fail(&mut self, latency_us: u64) {
self.total += 1;
self.errors += 1;
self.total_latency_us += latency_us;
if latency_us > self.max_latency_us {
self.max_latency_us = latency_us;
}
if self.in_flight > 0 {
self.in_flight -= 1;
}
}
#[must_use]
pub fn total(&self) -> u64 {
self.total
}
#[must_use]
pub fn success_rate(&self) -> f64 {
if self.total == 0 {
0.0
} else {
(self.success as f64 / self.total as f64) * 100.0
}
}
#[must_use]
pub fn error_rate(&self) -> f64 {
if self.total == 0 {
0.0
} else {
(self.errors as f64 / self.total as f64) * 100.0
}
}
#[must_use]
pub fn avg_latency_us(&self) -> f64 {
if self.total == 0 {
0.0
} else {
self.total_latency_us as f64 / self.total as f64
}
}
#[must_use]
pub fn in_flight(&self) -> u32 {
self.in_flight
}
#[must_use]
pub fn is_healthy(&self, max_error_rate: f64) -> bool {
self.error_rate() <= max_error_rate
}
pub fn reset(&mut self) {
self.total = 0;
self.success = 0;
self.errors = 0;
self.total_latency_us = 0;
self.max_latency_us = 0;
self.in_flight = 0;
}
}
#[cfg(test)]
mod request_tracker_tests {
use super::*;
#[test]
fn f_req_001_new() {
let rt = RequestTracker::new();
assert_eq!(rt.total(), 0);
}
#[test]
fn f_req_002_default() {
let rt = RequestTracker::default();
assert_eq!(rt.total(), 0);
}
#[test]
fn f_req_003_start() {
let mut rt = RequestTracker::new();
rt.start();
assert_eq!(rt.in_flight(), 1);
}
#[test]
fn f_req_004_complete() {
let mut rt = RequestTracker::new();
rt.start();
rt.complete(1000);
assert_eq!(rt.total(), 1);
assert!((rt.success_rate() - 100.0).abs() < 0.01);
}
#[test]
fn f_req_005_fail() {
let mut rt = RequestTracker::new();
rt.start();
rt.fail(1000);
assert!((rt.error_rate() - 100.0).abs() < 0.01);
}
#[test]
fn f_req_006_avg_latency() {
let mut rt = RequestTracker::new();
rt.complete(1000);
rt.complete(2000);
assert!((rt.avg_latency_us() - 1500.0).abs() < 0.01);
}
#[test]
fn f_req_007_for_api() {
let rt = RequestTracker::for_api();
assert_eq!(rt.total(), 0);
}
#[test]
fn f_req_008_for_queries() {
let rt = RequestTracker::for_queries();
assert_eq!(rt.total(), 0);
}
#[test]
fn f_req_009_healthy() {
let mut rt = RequestTracker::new();
rt.complete(1000);
assert!(rt.is_healthy(1.0));
}
#[test]
fn f_req_010_unhealthy() {
let mut rt = RequestTracker::new();
rt.fail(1000);
assert!(!rt.is_healthy(1.0));
}
#[test]
fn f_req_011_reset() {
let mut rt = RequestTracker::new();
rt.complete(1000);
rt.reset();
assert_eq!(rt.total(), 0);
}
#[test]
fn f_req_012_clone() {
let mut rt = RequestTracker::new();
rt.complete(1000);
let cloned = rt.clone();
assert_eq!(rt.total(), cloned.total());
}
}
#[derive(Debug, Clone)]
pub struct SessionTracker {
active: u64,
created: u64,
expired: u64,
peak: u64,
total_duration_us: u64,
}
impl Default for SessionTracker {
fn default() -> Self {
Self::new()
}
}
impl SessionTracker {
#[must_use]
pub fn new() -> Self {
Self {
active: 0,
created: 0,
expired: 0,
peak: 0,
total_duration_us: 0,
}
}
#[must_use]
pub fn for_users() -> Self {
Self::new()
}
#[must_use]
pub fn for_api() -> Self {
Self::new()
}
pub fn create(&mut self) {
self.active += 1;
self.created += 1;
if self.active > self.peak {
self.peak = self.active;
}
}
pub fn end(&mut self, duration_us: u64) {
if self.active > 0 {
self.active -= 1;
self.total_duration_us += duration_us;
}
}
pub fn expire(&mut self, duration_us: u64) {
if self.active > 0 {
self.active -= 1;
self.expired += 1;
self.total_duration_us += duration_us;
}
}
#[must_use]
pub fn active(&self) -> u64 {
self.active
}
#[must_use]
pub fn created(&self) -> u64 {
self.created
}
#[must_use]
pub fn peak(&self) -> u64 {
self.peak
}
#[must_use]
pub fn expiration_rate(&self) -> f64 {
let total_ended = self.created - self.active;
if total_ended == 0 {
0.0
} else {
(self.expired as f64 / total_ended as f64) * 100.0
}
}
#[must_use]
pub fn avg_duration_us(&self) -> f64 {
let ended = self.created - self.active;
if ended == 0 {
0.0
} else {
self.total_duration_us as f64 / ended as f64
}
}
pub fn reset(&mut self) {
self.active = 0;
self.created = 0;
self.expired = 0;
self.peak = 0;
self.total_duration_us = 0;
}
}
#[cfg(test)]
mod session_tracker_tests {
use super::*;
#[test]
fn f_sess_001_new() {
let st = SessionTracker::new();
assert_eq!(st.active(), 0);
}
#[test]
fn f_sess_002_default() {
let st = SessionTracker::default();
assert_eq!(st.active(), 0);
}
#[test]
fn f_sess_003_create() {
let mut st = SessionTracker::new();
st.create();
assert_eq!(st.active(), 1);
}
#[test]
fn f_sess_004_end() {
let mut st = SessionTracker::new();
st.create();
st.end(1000);
assert_eq!(st.active(), 0);
}
#[test]
fn f_sess_005_expire() {
let mut st = SessionTracker::new();
st.create();
st.expire(1000);
assert!(st.expiration_rate() > 0.0);
}
#[test]
fn f_sess_006_peak() {
let mut st = SessionTracker::new();
st.create();
st.create();
st.end(1000);
assert_eq!(st.peak(), 2);
}
#[test]
fn f_sess_007_for_users() {
let st = SessionTracker::for_users();
assert_eq!(st.active(), 0);
}
#[test]
fn f_sess_008_for_api() {
let st = SessionTracker::for_api();
assert_eq!(st.active(), 0);
}
#[test]
fn f_sess_009_avg_duration() {
let mut st = SessionTracker::new();
st.create();
st.end(1000);
st.create();
st.end(2000);
assert!((st.avg_duration_us() - 1500.0).abs() < 0.01);
}
#[test]
fn f_sess_010_created() {
let mut st = SessionTracker::new();
st.create();
st.create();
assert_eq!(st.created(), 2);
}
#[test]
fn f_sess_011_reset() {
let mut st = SessionTracker::new();
st.create();
st.reset();
assert_eq!(st.active(), 0);
}
#[test]
fn f_sess_012_clone() {
let mut st = SessionTracker::new();
st.create();
let cloned = st.clone();
assert_eq!(st.active(), cloned.active());
}
}
#[derive(Debug, Clone)]
pub struct TransactionTracker {
active: u32,
committed: u64,
rolled_back: u64,
deadlocks: u64,
total_duration_us: u64,
}
impl Default for TransactionTracker {
fn default() -> Self {
Self::new()
}
}
impl TransactionTracker {
#[must_use]
pub fn new() -> Self {
Self {
active: 0,
committed: 0,
rolled_back: 0,
deadlocks: 0,
total_duration_us: 0,
}
}
#[must_use]
pub fn for_database() -> Self {
Self::new()
}
#[must_use]
pub fn for_distributed() -> Self {
Self::new()
}
pub fn begin(&mut self) {
self.active += 1;
}
pub fn commit(&mut self, duration_us: u64) {
if self.active > 0 {
self.active -= 1;
self.committed += 1;
self.total_duration_us += duration_us;
}
}
pub fn rollback(&mut self, duration_us: u64) {
if self.active > 0 {
self.active -= 1;
self.rolled_back += 1;
self.total_duration_us += duration_us;
}
}
pub fn deadlock(&mut self) {
self.deadlocks += 1;
}
#[must_use]
pub fn active(&self) -> u32 {
self.active
}
#[must_use]
pub fn committed(&self) -> u64 {
self.committed
}
#[must_use]
pub fn commit_rate(&self) -> f64 {
let total = self.committed + self.rolled_back;
if total == 0 {
0.0
} else {
(self.committed as f64 / total as f64) * 100.0
}
}
#[must_use]
pub fn rollback_rate(&self) -> f64 {
let total = self.committed + self.rolled_back;
if total == 0 {
0.0
} else {
(self.rolled_back as f64 / total as f64) * 100.0
}
}
#[must_use]
pub fn deadlocks(&self) -> u64 {
self.deadlocks
}
#[must_use]
pub fn is_healthy(&self, max_rollback_rate: f64) -> bool {
self.rollback_rate() <= max_rollback_rate
}
pub fn reset(&mut self) {
self.active = 0;
self.committed = 0;
self.rolled_back = 0;
self.deadlocks = 0;
self.total_duration_us = 0;
}
}
#[cfg(test)]
mod transaction_tracker_tests {
use super::*;
#[test]
fn f_txn_001_new() {
let tt = TransactionTracker::new();
assert_eq!(tt.active(), 0);
}
#[test]
fn f_txn_002_default() {
let tt = TransactionTracker::default();
assert_eq!(tt.active(), 0);
}
#[test]
fn f_txn_003_begin() {
let mut tt = TransactionTracker::new();
tt.begin();
assert_eq!(tt.active(), 1);
}
#[test]
fn f_txn_004_commit() {
let mut tt = TransactionTracker::new();
tt.begin();
tt.commit(1000);
assert_eq!(tt.committed(), 1);
}
#[test]
fn f_txn_005_rollback() {
let mut tt = TransactionTracker::new();
tt.begin();
tt.rollback(1000);
assert!((tt.rollback_rate() - 100.0).abs() < 0.01);
}
#[test]
fn f_txn_006_commit_rate() {
let mut tt = TransactionTracker::new();
tt.begin();
tt.commit(1000);
tt.begin();
tt.rollback(1000);
assert!((tt.commit_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_txn_007_for_database() {
let tt = TransactionTracker::for_database();
assert_eq!(tt.active(), 0);
}
#[test]
fn f_txn_008_for_distributed() {
let tt = TransactionTracker::for_distributed();
assert_eq!(tt.active(), 0);
}
#[test]
fn f_txn_009_deadlocks() {
let mut tt = TransactionTracker::new();
tt.deadlock();
tt.deadlock();
assert_eq!(tt.deadlocks(), 2);
}
#[test]
fn f_txn_010_healthy() {
let mut tt = TransactionTracker::new();
tt.begin();
tt.commit(1000);
assert!(tt.is_healthy(10.0));
}
#[test]
fn f_txn_011_reset() {
let mut tt = TransactionTracker::new();
tt.begin();
tt.commit(1000);
tt.reset();
assert_eq!(tt.committed(), 0);
}
#[test]
fn f_txn_012_clone() {
let mut tt = TransactionTracker::new();
tt.begin();
let cloned = tt.clone();
assert_eq!(tt.active(), cloned.active());
}
}
#[derive(Debug, Clone)]
pub struct EventEmitter {
events_emitted: u64,
events_delivered: u64,
events_dropped: u64,
subscribers: u32,
max_subscribers: u32,
}
impl Default for EventEmitter {
fn default() -> Self {
Self::new()
}
}
impl EventEmitter {
#[must_use]
pub fn new() -> Self {
Self {
events_emitted: 0,
events_delivered: 0,
events_dropped: 0,
subscribers: 0,
max_subscribers: 0,
}
}
#[must_use]
pub fn for_ui() -> Self {
Self::new()
}
#[must_use]
pub fn for_system() -> Self {
Self::new()
}
pub fn subscribe(&mut self) {
self.subscribers += 1;
self.max_subscribers = self.max_subscribers.max(self.subscribers);
}
pub fn unsubscribe(&mut self) {
self.subscribers = self.subscribers.saturating_sub(1);
}
pub fn emit(&mut self, delivered: u32) {
self.events_emitted += 1;
self.events_delivered += u64::from(delivered);
if delivered < self.subscribers {
self.events_dropped += u64::from(self.subscribers - delivered);
}
}
#[must_use]
pub fn emitted(&self) -> u64 {
self.events_emitted
}
#[must_use]
pub fn subscribers(&self) -> u32 {
self.subscribers
}
#[must_use]
pub fn delivery_rate(&self) -> f64 {
let total = self.events_delivered + self.events_dropped;
if total == 0 {
100.0
} else {
(self.events_delivered as f64 / total as f64) * 100.0
}
}
#[must_use]
pub fn is_healthy(&self, min_delivery_rate: f64) -> bool {
self.delivery_rate() >= min_delivery_rate
}
pub fn reset(&mut self) {
self.events_emitted = 0;
self.events_delivered = 0;
self.events_dropped = 0;
self.max_subscribers = self.subscribers;
}
}
#[derive(Debug, Clone)]
pub struct QueueDepth {
capacity: u64,
current: u64,
peak: u64,
enqueued: u64,
dequeued: u64,
}
impl Default for QueueDepth {
fn default() -> Self {
Self::new(1000)
}
}
impl QueueDepth {
#[must_use]
pub fn new(capacity: u64) -> Self {
Self {
capacity,
current: 0,
peak: 0,
enqueued: 0,
dequeued: 0,
}
}
#[must_use]
pub fn for_messages() -> Self {
Self::new(10000)
}
#[must_use]
pub fn for_tasks() -> Self {
Self::new(1000)
}
pub fn enqueue(&mut self) -> bool {
if self.current < self.capacity {
self.current += 1;
self.enqueued += 1;
self.peak = self.peak.max(self.current);
true
} else {
false
}
}
pub fn dequeue(&mut self) -> bool {
if self.current > 0 {
self.current -= 1;
self.dequeued += 1;
true
} else {
false
}
}
#[must_use]
pub fn depth(&self) -> u64 {
self.current
}
#[must_use]
pub fn utilization(&self) -> f64 {
if self.capacity == 0 {
0.0
} else {
(self.current as f64 / self.capacity as f64) * 100.0
}
}
#[must_use]
pub fn is_full(&self) -> bool {
self.current >= self.capacity
}
#[must_use]
pub fn is_empty(&self) -> bool {
self.current == 0
}
#[must_use]
pub fn throughput(&self) -> u64 {
self.dequeued
}
pub fn reset(&mut self) {
self.peak = self.current;
self.enqueued = 0;
self.dequeued = 0;
}
}
#[derive(Debug, Clone)]
pub struct TaskScheduler {
scheduled: u64,
executed: u64,
missed: u64,
cancelled: u64,
total_latency_us: u64,
}
impl Default for TaskScheduler {
fn default() -> Self {
Self::new()
}
}
impl TaskScheduler {
#[must_use]
pub fn new() -> Self {
Self {
scheduled: 0,
executed: 0,
missed: 0,
cancelled: 0,
total_latency_us: 0,
}
}
#[must_use]
pub fn for_periodic() -> Self {
Self::new()
}
#[must_use]
pub fn for_oneshot() -> Self {
Self::new()
}
pub fn schedule(&mut self) {
self.scheduled += 1;
}
pub fn execute(&mut self, latency_us: u64) {
self.executed += 1;
self.total_latency_us += latency_us;
}
pub fn miss(&mut self) {
self.missed += 1;
}
pub fn cancel(&mut self) {
self.cancelled += 1;
}
#[must_use]
pub fn execution_rate(&self) -> f64 {
if self.scheduled == 0 {
100.0
} else {
(self.executed as f64 / self.scheduled as f64) * 100.0
}
}
#[must_use]
pub fn miss_rate(&self) -> f64 {
let total = self.executed + self.missed;
if total == 0 {
0.0
} else {
(self.missed as f64 / total as f64) * 100.0
}
}
#[must_use]
pub fn avg_latency_us(&self) -> u64 {
if self.executed == 0 {
0
} else {
self.total_latency_us / self.executed
}
}
#[must_use]
pub fn is_healthy(&self, max_miss_rate: f64) -> bool {
self.miss_rate() <= max_miss_rate
}
pub fn reset(&mut self) {
self.scheduled = 0;
self.executed = 0;
self.missed = 0;
self.cancelled = 0;
self.total_latency_us = 0;
}
}
#[derive(Debug, Clone)]
pub struct DeadletterQueue {
capacity: u64,
current: u64,
added: u64,
reprocessed: u64,
expired: u64,
}
impl Default for DeadletterQueue {
fn default() -> Self {
Self::new(1000)
}
}
impl DeadletterQueue {
#[must_use]
pub fn new(capacity: u64) -> Self {
Self {
capacity,
current: 0,
added: 0,
reprocessed: 0,
expired: 0,
}
}
#[must_use]
pub fn for_messages() -> Self {
Self::new(10000)
}
#[must_use]
pub fn for_events() -> Self {
Self::new(1000)
}
pub fn add(&mut self) -> bool {
if self.current < self.capacity {
self.current += 1;
self.added += 1;
true
} else {
false
}
}
pub fn reprocess(&mut self) -> bool {
if self.current > 0 {
self.current -= 1;
self.reprocessed += 1;
true
} else {
false
}
}
pub fn expire(&mut self) -> bool {
if self.current > 0 {
self.current -= 1;
self.expired += 1;
true
} else {
false
}
}
#[must_use]
pub fn size(&self) -> u64 {
self.current
}
#[must_use]
pub fn recovery_rate(&self) -> f64 {
let processed = self.reprocessed + self.expired;
if processed == 0 {
100.0
} else {
(self.reprocessed as f64 / processed as f64) * 100.0
}
}
#[must_use]
pub fn is_healthy(&self, min_recovery_rate: f64) -> bool {
self.recovery_rate() >= min_recovery_rate
}
#[must_use]
pub fn is_full(&self) -> bool {
self.current >= self.capacity
}
pub fn reset(&mut self) {
self.added = 0;
self.reprocessed = 0;
self.expired = 0;
}
}
#[cfg(test)]
mod event_emitter_tests {
use super::*;
#[test]
fn f_emit_001_new() {
let ee = EventEmitter::new();
assert_eq!(ee.emitted(), 0);
}
#[test]
fn f_emit_002_default() {
let ee = EventEmitter::default();
assert_eq!(ee.subscribers(), 0);
}
#[test]
fn f_emit_003_subscribe() {
let mut ee = EventEmitter::new();
ee.subscribe();
assert_eq!(ee.subscribers(), 1);
}
#[test]
fn f_emit_004_unsubscribe() {
let mut ee = EventEmitter::new();
ee.subscribe();
ee.unsubscribe();
assert_eq!(ee.subscribers(), 0);
}
#[test]
fn f_emit_005_emit() {
let mut ee = EventEmitter::new();
ee.subscribe();
ee.emit(1);
assert_eq!(ee.emitted(), 1);
}
#[test]
fn f_emit_006_delivery_rate() {
let mut ee = EventEmitter::new();
ee.subscribe();
ee.subscribe();
ee.emit(1); assert!((ee.delivery_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_emit_007_for_ui() {
let ee = EventEmitter::for_ui();
assert_eq!(ee.emitted(), 0);
}
#[test]
fn f_emit_008_for_system() {
let ee = EventEmitter::for_system();
assert_eq!(ee.subscribers(), 0);
}
#[test]
fn f_emit_009_healthy() {
let mut ee = EventEmitter::new();
ee.subscribe();
ee.emit(1);
assert!(ee.is_healthy(90.0));
}
#[test]
fn f_emit_010_unhealthy() {
let mut ee = EventEmitter::new();
ee.subscribe();
ee.subscribe();
ee.emit(0); assert!(!ee.is_healthy(50.0));
}
#[test]
fn f_emit_011_reset() {
let mut ee = EventEmitter::new();
ee.emit(0);
ee.reset();
assert_eq!(ee.emitted(), 0);
}
#[test]
fn f_emit_012_clone() {
let mut ee = EventEmitter::new();
ee.subscribe();
let cloned = ee.clone();
assert_eq!(ee.subscribers(), cloned.subscribers());
}
}
#[cfg(test)]
mod queue_depth_tests {
use super::*;
#[test]
fn f_qdepth_001_new() {
let qd = QueueDepth::new(100);
assert_eq!(qd.depth(), 0);
}
#[test]
fn f_qdepth_002_default() {
let qd = QueueDepth::default();
assert!(qd.is_empty());
}
#[test]
fn f_qdepth_003_enqueue() {
let mut qd = QueueDepth::new(100);
assert!(qd.enqueue());
assert_eq!(qd.depth(), 1);
}
#[test]
fn f_qdepth_004_dequeue() {
let mut qd = QueueDepth::new(100);
qd.enqueue();
assert!(qd.dequeue());
assert_eq!(qd.depth(), 0);
}
#[test]
fn f_qdepth_005_utilization() {
let mut qd = QueueDepth::new(100);
for _ in 0..50 {
qd.enqueue();
}
assert!((qd.utilization() - 50.0).abs() < 0.01);
}
#[test]
fn f_qdepth_006_full() {
let mut qd = QueueDepth::new(2);
qd.enqueue();
qd.enqueue();
assert!(qd.is_full());
}
#[test]
fn f_qdepth_007_for_messages() {
let qd = QueueDepth::for_messages();
assert_eq!(qd.capacity, 10000);
}
#[test]
fn f_qdepth_008_for_tasks() {
let qd = QueueDepth::for_tasks();
assert_eq!(qd.capacity, 1000);
}
#[test]
fn f_qdepth_009_throughput() {
let mut qd = QueueDepth::new(100);
qd.enqueue();
qd.dequeue();
assert_eq!(qd.throughput(), 1);
}
#[test]
fn f_qdepth_010_enqueue_full() {
let mut qd = QueueDepth::new(1);
qd.enqueue();
assert!(!qd.enqueue());
}
#[test]
fn f_qdepth_011_reset() {
let mut qd = QueueDepth::new(100);
qd.enqueue();
qd.dequeue();
qd.reset();
assert_eq!(qd.throughput(), 0);
}
#[test]
fn f_qdepth_012_clone() {
let mut qd = QueueDepth::new(100);
qd.enqueue();
let cloned = qd.clone();
assert_eq!(qd.depth(), cloned.depth());
}
}
#[cfg(test)]
mod task_scheduler_tests {
use super::*;
#[test]
fn f_tsched_001_new() {
let ts = TaskScheduler::new();
assert_eq!(ts.scheduled, 0);
}
#[test]
fn f_tsched_002_default() {
let ts = TaskScheduler::default();
assert_eq!(ts.executed, 0);
}
#[test]
fn f_tsched_003_schedule() {
let mut ts = TaskScheduler::new();
ts.schedule();
assert_eq!(ts.scheduled, 1);
}
#[test]
fn f_tsched_004_execute() {
let mut ts = TaskScheduler::new();
ts.schedule();
ts.execute(1000);
assert_eq!(ts.executed, 1);
}
#[test]
fn f_tsched_005_miss() {
let mut ts = TaskScheduler::new();
ts.schedule();
ts.miss();
assert!((ts.miss_rate() - 100.0).abs() < 0.01);
}
#[test]
fn f_tsched_006_execution_rate() {
let mut ts = TaskScheduler::new();
ts.schedule();
ts.execute(1000);
assert!((ts.execution_rate() - 100.0).abs() < 0.01);
}
#[test]
fn f_tsched_007_for_periodic() {
let ts = TaskScheduler::for_periodic();
assert_eq!(ts.scheduled, 0);
}
#[test]
fn f_tsched_008_for_oneshot() {
let ts = TaskScheduler::for_oneshot();
assert_eq!(ts.executed, 0);
}
#[test]
fn f_tsched_009_avg_latency() {
let mut ts = TaskScheduler::new();
ts.execute(1000);
ts.execute(2000);
assert_eq!(ts.avg_latency_us(), 1500);
}
#[test]
fn f_tsched_010_healthy() {
let mut ts = TaskScheduler::new();
ts.execute(1000);
assert!(ts.is_healthy(5.0));
}
#[test]
fn f_tsched_011_reset() {
let mut ts = TaskScheduler::new();
ts.schedule();
ts.execute(1000);
ts.reset();
assert_eq!(ts.scheduled, 0);
}
#[test]
fn f_tsched_012_clone() {
let mut ts = TaskScheduler::new();
ts.schedule();
let cloned = ts.clone();
assert_eq!(ts.scheduled, cloned.scheduled);
}
}
#[cfg(test)]
mod deadletter_queue_tests {
use super::*;
#[test]
fn f_dlq_001_new() {
let dlq = DeadletterQueue::new(100);
assert_eq!(dlq.size(), 0);
}
#[test]
fn f_dlq_002_default() {
let dlq = DeadletterQueue::default();
assert!(!dlq.is_full());
}
#[test]
fn f_dlq_003_add() {
let mut dlq = DeadletterQueue::new(100);
assert!(dlq.add());
assert_eq!(dlq.size(), 1);
}
#[test]
fn f_dlq_004_reprocess() {
let mut dlq = DeadletterQueue::new(100);
dlq.add();
assert!(dlq.reprocess());
assert_eq!(dlq.size(), 0);
}
#[test]
fn f_dlq_005_expire() {
let mut dlq = DeadletterQueue::new(100);
dlq.add();
assert!(dlq.expire());
assert_eq!(dlq.size(), 0);
}
#[test]
fn f_dlq_006_recovery_rate() {
let mut dlq = DeadletterQueue::new(100);
dlq.add();
dlq.add();
dlq.reprocess();
dlq.expire();
assert!((dlq.recovery_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_dlq_007_for_messages() {
let dlq = DeadletterQueue::for_messages();
assert_eq!(dlq.capacity, 10000);
}
#[test]
fn f_dlq_008_for_events() {
let dlq = DeadletterQueue::for_events();
assert_eq!(dlq.capacity, 1000);
}
#[test]
fn f_dlq_009_full() {
let mut dlq = DeadletterQueue::new(1);
dlq.add();
assert!(dlq.is_full());
}
#[test]
fn f_dlq_010_healthy() {
let mut dlq = DeadletterQueue::new(100);
dlq.add();
dlq.reprocess();
assert!(dlq.is_healthy(90.0));
}
#[test]
fn f_dlq_011_reset() {
let mut dlq = DeadletterQueue::new(100);
dlq.add();
dlq.reprocess();
dlq.reset();
assert_eq!(dlq.reprocessed, 0);
}
#[test]
fn f_dlq_012_clone() {
let mut dlq = DeadletterQueue::new(100);
dlq.add();
let cloned = dlq.clone();
assert_eq!(dlq.size(), cloned.size());
}
}
#[derive(Debug, Clone)]
pub struct StreamProcessor {
records_in: u64,
records_out: u64,
records_dropped: u64,
bytes_processed: u64,
watermark_us: u64,
}
impl Default for StreamProcessor {
fn default() -> Self {
Self::new()
}
}
impl StreamProcessor {
#[must_use]
pub fn new() -> Self {
Self {
records_in: 0,
records_out: 0,
records_dropped: 0,
bytes_processed: 0,
watermark_us: 0,
}
}
#[must_use]
pub fn for_kafka() -> Self {
Self::new()
}
#[must_use]
pub fn for_events() -> Self {
Self::new()
}
pub fn process_in(&mut self, bytes: u64) {
self.records_in += 1;
self.bytes_processed += bytes;
}
pub fn emit(&mut self) {
self.records_out += 1;
}
pub fn drop_record(&mut self) {
self.records_dropped += 1;
}
pub fn update_watermark(&mut self, timestamp_us: u64) {
self.watermark_us = timestamp_us;
}
#[must_use]
pub fn processing_ratio(&self) -> f64 {
if self.records_in == 0 {
1.0
} else {
self.records_out as f64 / self.records_in as f64
}
}
#[must_use]
pub fn drop_rate(&self) -> f64 {
let total = self.records_in;
if total == 0 {
0.0
} else {
(self.records_dropped as f64 / total as f64) * 100.0
}
}
#[must_use]
pub fn is_healthy(&self, max_drop_rate: f64) -> bool {
self.drop_rate() <= max_drop_rate
}
pub fn reset(&mut self) {
self.records_in = 0;
self.records_out = 0;
self.records_dropped = 0;
self.bytes_processed = 0;
}
}
#[derive(Debug, Clone)]
pub struct BatchAggregator {
batch_size: u64,
current_count: u64,
batches_flushed: u64,
total_items: u64,
flush_trigger_size: u64,
}
impl Default for BatchAggregator {
fn default() -> Self {
Self::new(100)
}
}
impl BatchAggregator {
#[must_use]
pub fn new(batch_size: u64) -> Self {
Self {
batch_size,
current_count: 0,
batches_flushed: 0,
total_items: 0,
flush_trigger_size: 0,
}
}
#[must_use]
pub fn for_writes() -> Self {
Self::new(1000)
}
#[must_use]
pub fn for_small() -> Self {
Self::new(10)
}
pub fn add(&mut self) -> bool {
self.current_count += 1;
self.total_items += 1;
if self.current_count >= self.batch_size {
self.flush_trigger_size += self.current_count;
self.batches_flushed += 1;
self.current_count = 0;
true
} else {
false
}
}
pub fn flush(&mut self) {
if self.current_count > 0 {
self.flush_trigger_size += self.current_count;
self.batches_flushed += 1;
self.current_count = 0;
}
}
#[must_use]
pub fn fill_level(&self) -> f64 {
if self.batch_size == 0 {
0.0
} else {
(self.current_count as f64 / self.batch_size as f64) * 100.0
}
}
#[must_use]
pub fn avg_batch_size(&self) -> u64 {
if self.batches_flushed == 0 {
0
} else {
self.flush_trigger_size / self.batches_flushed
}
}
#[must_use]
pub fn batches(&self) -> u64 {
self.batches_flushed
}
pub fn reset(&mut self) {
self.current_count = 0;
self.batches_flushed = 0;
self.total_items = 0;
self.flush_trigger_size = 0;
}
}
#[derive(Debug, Clone)]
pub struct WindowTracker {
window_size_us: u64,
slide_interval_us: u64,
windows_completed: u64,
current_count: u64,
last_window_start_us: u64,
}
impl Default for WindowTracker {
fn default() -> Self {
Self::new(60_000_000, 60_000_000) }
}
impl WindowTracker {
#[must_use]
pub fn new(window_size_us: u64, slide_interval_us: u64) -> Self {
Self {
window_size_us,
slide_interval_us,
windows_completed: 0,
current_count: 0,
last_window_start_us: 0,
}
}
#[must_use]
pub fn for_minute_tumbling() -> Self {
Self::new(60_000_000, 60_000_000)
}
#[must_use]
pub fn for_10s_sliding() -> Self {
Self::new(10_000_000, 1_000_000)
}
pub fn add_event(&mut self) {
self.current_count += 1;
}
pub fn close_window(&mut self, timestamp_us: u64) {
self.windows_completed += 1;
self.current_count = 0;
self.last_window_start_us = timestamp_us;
}
#[must_use]
pub fn current_count(&self) -> u64 {
self.current_count
}
#[must_use]
pub fn windows(&self) -> u64 {
self.windows_completed
}
#[must_use]
pub fn is_tumbling(&self) -> bool {
self.window_size_us == self.slide_interval_us
}
#[must_use]
pub fn is_sliding(&self) -> bool {
self.window_size_us != self.slide_interval_us
}
pub fn reset(&mut self) {
self.windows_completed = 0;
self.current_count = 0;
self.last_window_start_us = 0;
}
}
#[derive(Debug, Clone)]
pub struct PriorityQueueTracker {
capacity: u64,
current: u64,
enqueued: u64,
dequeued: u64,
priority_sum: u64,
max_priority: u64,
}
impl Default for PriorityQueueTracker {
fn default() -> Self {
Self::new(1000)
}
}
impl PriorityQueueTracker {
#[must_use]
pub fn new(capacity: u64) -> Self {
Self {
capacity,
current: 0,
enqueued: 0,
dequeued: 0,
priority_sum: 0,
max_priority: 0,
}
}
#[must_use]
pub fn for_tasks() -> Self {
Self::new(1000)
}
#[must_use]
pub fn for_events() -> Self {
Self::new(10000)
}
pub fn enqueue(&mut self, priority: u64) -> bool {
if self.current < self.capacity {
self.current += 1;
self.enqueued += 1;
self.priority_sum += priority;
self.max_priority = self.max_priority.max(priority);
true
} else {
false
}
}
pub fn dequeue(&mut self) -> bool {
if self.current > 0 {
self.current -= 1;
self.dequeued += 1;
true
} else {
false
}
}
#[must_use]
pub fn size(&self) -> u64 {
self.current
}
#[must_use]
pub fn avg_priority(&self) -> f64 {
if self.enqueued == 0 {
0.0
} else {
self.priority_sum as f64 / self.enqueued as f64
}
}
#[must_use]
pub fn is_full(&self) -> bool {
self.current >= self.capacity
}
#[must_use]
pub fn is_empty(&self) -> bool {
self.current == 0
}
#[must_use]
pub fn utilization(&self) -> f64 {
if self.capacity == 0 {
0.0
} else {
(self.current as f64 / self.capacity as f64) * 100.0
}
}
pub fn reset(&mut self) {
self.current = 0;
self.enqueued = 0;
self.dequeued = 0;
self.priority_sum = 0;
self.max_priority = 0;
}
}
#[cfg(test)]
mod stream_processor_tests {
use super::*;
#[test]
fn f_stream_001_new() {
let sp = StreamProcessor::new();
assert_eq!(sp.records_in, 0);
}
#[test]
fn f_stream_002_default() {
let sp = StreamProcessor::default();
assert_eq!(sp.records_out, 0);
}
#[test]
fn f_stream_003_process() {
let mut sp = StreamProcessor::new();
sp.process_in(100);
assert_eq!(sp.records_in, 1);
assert_eq!(sp.bytes_processed, 100);
}
#[test]
fn f_stream_004_emit() {
let mut sp = StreamProcessor::new();
sp.emit();
assert_eq!(sp.records_out, 1);
}
#[test]
fn f_stream_005_drop() {
let mut sp = StreamProcessor::new();
sp.process_in(100);
sp.drop_record();
assert!((sp.drop_rate() - 100.0).abs() < 0.01);
}
#[test]
fn f_stream_006_ratio() {
let mut sp = StreamProcessor::new();
sp.process_in(100);
sp.process_in(100);
sp.emit();
assert!((sp.processing_ratio() - 0.5).abs() < 0.01);
}
#[test]
fn f_stream_007_for_kafka() {
let sp = StreamProcessor::for_kafka();
assert_eq!(sp.records_in, 0);
}
#[test]
fn f_stream_008_for_events() {
let sp = StreamProcessor::for_events();
assert_eq!(sp.records_out, 0);
}
#[test]
fn f_stream_009_watermark() {
let mut sp = StreamProcessor::new();
sp.update_watermark(1000);
assert_eq!(sp.watermark_us, 1000);
}
#[test]
fn f_stream_010_healthy() {
let mut sp = StreamProcessor::new();
sp.process_in(100);
sp.emit();
assert!(sp.is_healthy(5.0));
}
#[test]
fn f_stream_011_reset() {
let mut sp = StreamProcessor::new();
sp.process_in(100);
sp.reset();
assert_eq!(sp.records_in, 0);
}
#[test]
fn f_stream_012_clone() {
let mut sp = StreamProcessor::new();
sp.process_in(100);
let cloned = sp.clone();
assert_eq!(sp.records_in, cloned.records_in);
}
}
#[cfg(test)]
mod batch_aggregator_tests {
use super::*;
#[test]
fn f_batch_001_new() {
let ba = BatchAggregator::new(100);
assert_eq!(ba.current_count, 0);
}
#[test]
fn f_batch_002_default() {
let ba = BatchAggregator::default();
assert_eq!(ba.batch_size, 100);
}
#[test]
fn f_batch_003_add() {
let mut ba = BatchAggregator::new(100);
ba.add();
assert_eq!(ba.current_count, 1);
}
#[test]
fn f_batch_004_auto_flush() {
let mut ba = BatchAggregator::new(2);
ba.add();
let flushed = ba.add();
assert!(flushed);
assert_eq!(ba.batches(), 1);
}
#[test]
fn f_batch_005_manual_flush() {
let mut ba = BatchAggregator::new(100);
ba.add();
ba.flush();
assert_eq!(ba.batches(), 1);
}
#[test]
fn f_batch_006_fill_level() {
let mut ba = BatchAggregator::new(100);
for _ in 0..50 {
ba.add();
}
assert!((ba.fill_level() - 50.0).abs() < 0.01);
}
#[test]
fn f_batch_007_for_writes() {
let ba = BatchAggregator::for_writes();
assert_eq!(ba.batch_size, 1000);
}
#[test]
fn f_batch_008_for_small() {
let ba = BatchAggregator::for_small();
assert_eq!(ba.batch_size, 10);
}
#[test]
fn f_batch_009_avg_batch() {
let mut ba = BatchAggregator::new(10);
for _ in 0..10 {
ba.add();
}
assert_eq!(ba.avg_batch_size(), 10);
}
#[test]
fn f_batch_010_total() {
let mut ba = BatchAggregator::new(100);
ba.add();
ba.add();
assert_eq!(ba.total_items, 2);
}
#[test]
fn f_batch_011_reset() {
let mut ba = BatchAggregator::new(100);
ba.add();
ba.flush();
ba.reset();
assert_eq!(ba.batches(), 0);
}
#[test]
fn f_batch_012_clone() {
let mut ba = BatchAggregator::new(100);
ba.add();
let cloned = ba.clone();
assert_eq!(ba.current_count, cloned.current_count);
}
}
#[cfg(test)]
mod window_tracker_tests {
use super::*;
#[test]
fn f_window_001_new() {
let wt = WindowTracker::new(60_000_000, 60_000_000);
assert_eq!(wt.current_count(), 0);
}
#[test]
fn f_window_002_default() {
let wt = WindowTracker::default();
assert!(wt.is_tumbling());
}
#[test]
fn f_window_003_add() {
let mut wt = WindowTracker::new(60_000_000, 60_000_000);
wt.add_event();
assert_eq!(wt.current_count(), 1);
}
#[test]
fn f_window_004_close() {
let mut wt = WindowTracker::new(60_000_000, 60_000_000);
wt.add_event();
wt.close_window(1000);
assert_eq!(wt.windows(), 1);
assert_eq!(wt.current_count(), 0);
}
#[test]
fn f_window_005_tumbling() {
let wt = WindowTracker::for_minute_tumbling();
assert!(wt.is_tumbling());
}
#[test]
fn f_window_006_sliding() {
let wt = WindowTracker::for_10s_sliding();
assert!(wt.is_sliding());
}
#[test]
fn f_window_007_for_minute() {
let wt = WindowTracker::for_minute_tumbling();
assert_eq!(wt.window_size_us, 60_000_000);
}
#[test]
fn f_window_008_for_10s() {
let wt = WindowTracker::for_10s_sliding();
assert_eq!(wt.window_size_us, 10_000_000);
assert_eq!(wt.slide_interval_us, 1_000_000);
}
#[test]
fn f_window_009_last_start() {
let mut wt = WindowTracker::new(60_000_000, 60_000_000);
wt.close_window(5000);
assert_eq!(wt.last_window_start_us, 5000);
}
#[test]
fn f_window_010_multiple() {
let mut wt = WindowTracker::new(60_000_000, 60_000_000);
wt.close_window(1000);
wt.close_window(2000);
assert_eq!(wt.windows(), 2);
}
#[test]
fn f_window_011_reset() {
let mut wt = WindowTracker::new(60_000_000, 60_000_000);
wt.add_event();
wt.close_window(1000);
wt.reset();
assert_eq!(wt.windows(), 0);
}
#[test]
fn f_window_012_clone() {
let mut wt = WindowTracker::new(60_000_000, 60_000_000);
wt.add_event();
let cloned = wt.clone();
assert_eq!(wt.current_count(), cloned.current_count());
}
}
#[cfg(test)]
mod priority_queue_tracker_tests {
use super::*;
#[test]
fn f_pqueue_001_new() {
let pq = PriorityQueueTracker::new(100);
assert_eq!(pq.size(), 0);
}
#[test]
fn f_pqueue_002_default() {
let pq = PriorityQueueTracker::default();
assert!(pq.is_empty());
}
#[test]
fn f_pqueue_003_enqueue() {
let mut pq = PriorityQueueTracker::new(100);
assert!(pq.enqueue(5));
assert_eq!(pq.size(), 1);
}
#[test]
fn f_pqueue_004_dequeue() {
let mut pq = PriorityQueueTracker::new(100);
pq.enqueue(5);
assert!(pq.dequeue());
assert_eq!(pq.size(), 0);
}
#[test]
fn f_pqueue_005_priority() {
let mut pq = PriorityQueueTracker::new(100);
pq.enqueue(5);
pq.enqueue(10);
assert!((pq.avg_priority() - 7.5).abs() < 0.01);
}
#[test]
fn f_pqueue_006_full() {
let mut pq = PriorityQueueTracker::new(2);
pq.enqueue(1);
pq.enqueue(2);
assert!(pq.is_full());
}
#[test]
fn f_pqueue_007_for_tasks() {
let pq = PriorityQueueTracker::for_tasks();
assert_eq!(pq.capacity, 1000);
}
#[test]
fn f_pqueue_008_for_events() {
let pq = PriorityQueueTracker::for_events();
assert_eq!(pq.capacity, 10000);
}
#[test]
fn f_pqueue_009_utilization() {
let mut pq = PriorityQueueTracker::new(100);
for i in 0..50 {
pq.enqueue(i);
}
assert!((pq.utilization() - 50.0).abs() < 0.01);
}
#[test]
fn f_pqueue_010_full_enqueue() {
let mut pq = PriorityQueueTracker::new(1);
pq.enqueue(1);
assert!(!pq.enqueue(2));
}
#[test]
fn f_pqueue_011_reset() {
let mut pq = PriorityQueueTracker::new(100);
pq.enqueue(5);
pq.reset();
assert_eq!(pq.size(), 0);
}
#[test]
fn f_pqueue_012_clone() {
let mut pq = PriorityQueueTracker::new(100);
pq.enqueue(5);
let cloned = pq.clone();
assert_eq!(pq.size(), cloned.size());
}
}
#[derive(Debug, Clone)]
pub struct MetricRegistry {
counters: u32,
gauges: u32,
histograms: u32,
collections: u64,
last_collection_us: u64,
}
impl Default for MetricRegistry {
fn default() -> Self {
Self::new()
}
}
impl MetricRegistry {
#[must_use]
pub fn new() -> Self {
Self {
counters: 0,
gauges: 0,
histograms: 0,
collections: 0,
last_collection_us: 0,
}
}
#[must_use]
pub fn for_application() -> Self {
Self::new()
}
#[must_use]
pub fn for_system() -> Self {
Self::new()
}
pub fn register_counter(&mut self) {
self.counters += 1;
}
pub fn register_gauge(&mut self) {
self.gauges += 1;
}
pub fn register_histogram(&mut self) {
self.histograms += 1;
}
pub fn collect(&mut self, timestamp_us: u64) {
self.collections += 1;
self.last_collection_us = timestamp_us;
}
#[must_use]
pub fn total_metrics(&self) -> u32 {
self.counters + self.gauges + self.histograms
}
#[must_use]
pub fn collections(&self) -> u64 {
self.collections
}
pub fn reset(&mut self) {
self.counters = 0;
self.gauges = 0;
self.histograms = 0;
self.collections = 0;
self.last_collection_us = 0;
}
}
#[derive(Debug, Clone)]
pub struct AlertManager {
active: u32,
fired: u64,
acknowledged: u64,
resolved: u64,
suppressed: u64,
}
impl Default for AlertManager {
fn default() -> Self {
Self::new()
}
}
impl AlertManager {
#[must_use]
pub fn new() -> Self {
Self {
active: 0,
fired: 0,
acknowledged: 0,
resolved: 0,
suppressed: 0,
}
}
#[must_use]
pub fn for_critical() -> Self {
Self::new()
}
#[must_use]
pub fn for_warnings() -> Self {
Self::new()
}
pub fn fire(&mut self) {
self.active += 1;
self.fired += 1;
}
pub fn acknowledge(&mut self) {
self.acknowledged += 1;
}
pub fn resolve(&mut self) {
if self.active > 0 {
self.active -= 1;
self.resolved += 1;
}
}
pub fn suppress(&mut self) {
if self.active > 0 {
self.active -= 1;
self.suppressed += 1;
}
}
#[must_use]
pub fn active(&self) -> u32 {
self.active
}
#[must_use]
pub fn resolution_rate(&self) -> f64 {
if self.fired == 0 {
100.0
} else {
(self.resolved as f64 / self.fired as f64) * 100.0
}
}
#[must_use]
pub fn is_healthy(&self, max_active: u32) -> bool {
self.active <= max_active
}
pub fn reset(&mut self) {
self.active = 0;
self.fired = 0;
self.acknowledged = 0;
self.resolved = 0;
self.suppressed = 0;
}
}
#[derive(Debug, Clone)]
pub struct IndexBuilder {
entries_indexed: u64,
bytes_indexed: u64,
segments_built: u64,
merges_completed: u64,
build_time_us: u64,
}
impl Default for IndexBuilder {
fn default() -> Self {
Self::new()
}
}
impl IndexBuilder {
#[must_use]
pub fn new() -> Self {
Self {
entries_indexed: 0,
bytes_indexed: 0,
segments_built: 0,
merges_completed: 0,
build_time_us: 0,
}
}
#[must_use]
pub fn for_search() -> Self {
Self::new()
}
#[must_use]
pub fn for_database() -> Self {
Self::new()
}
pub fn index_entry(&mut self, bytes: u64) {
self.entries_indexed += 1;
self.bytes_indexed += bytes;
}
pub fn build_segment(&mut self, duration_us: u64) {
self.segments_built += 1;
self.build_time_us += duration_us;
}
pub fn complete_merge(&mut self) {
self.merges_completed += 1;
}
#[must_use]
pub fn throughput(&self) -> f64 {
if self.build_time_us == 0 {
0.0
} else {
(self.entries_indexed as f64 / self.build_time_us as f64) * 1_000_000.0
}
}
#[must_use]
pub fn avg_segment_time_us(&self) -> u64 {
if self.segments_built == 0 {
0
} else {
self.build_time_us / self.segments_built
}
}
pub fn reset(&mut self) {
self.entries_indexed = 0;
self.bytes_indexed = 0;
self.segments_built = 0;
self.merges_completed = 0;
self.build_time_us = 0;
}
}
#[derive(Debug, Clone)]
pub struct CompactionPolicy {
evaluations: u64,
triggered: u64,
skipped: u64,
bytes_reclaimed: u64,
space_amplification: f64,
}
impl Default for CompactionPolicy {
fn default() -> Self {
Self::new()
}
}
impl CompactionPolicy {
#[must_use]
pub fn new() -> Self {
Self {
evaluations: 0,
triggered: 0,
skipped: 0,
bytes_reclaimed: 0,
space_amplification: 1.0,
}
}
#[must_use]
pub fn for_leveled() -> Self {
Self::new()
}
#[must_use]
pub fn for_size_tiered() -> Self {
Self::new()
}
pub fn evaluate(&mut self, should_compact: bool) {
self.evaluations += 1;
if should_compact {
self.triggered += 1;
} else {
self.skipped += 1;
}
}
pub fn reclaim(&mut self, bytes: u64) {
self.bytes_reclaimed += bytes;
}
pub fn set_amplification(&mut self, factor: f64) {
self.space_amplification = factor;
}
#[must_use]
pub fn trigger_rate(&self) -> f64 {
if self.evaluations == 0 {
0.0
} else {
(self.triggered as f64 / self.evaluations as f64) * 100.0
}
}
#[must_use]
pub fn is_effective(&self, max_amplification: f64) -> bool {
self.space_amplification <= max_amplification
}
#[must_use]
pub fn reclaimed(&self) -> u64 {
self.bytes_reclaimed
}
pub fn reset(&mut self) {
self.evaluations = 0;
self.triggered = 0;
self.skipped = 0;
self.bytes_reclaimed = 0;
self.space_amplification = 1.0;
}
}
#[cfg(test)]
mod metric_registry_tests {
use super::*;
#[test]
fn f_mreg_001_new() {
let mr = MetricRegistry::new();
assert_eq!(mr.total_metrics(), 0);
}
#[test]
fn f_mreg_002_default() {
let mr = MetricRegistry::default();
assert_eq!(mr.collections(), 0);
}
#[test]
fn f_mreg_003_counter() {
let mut mr = MetricRegistry::new();
mr.register_counter();
assert_eq!(mr.counters, 1);
}
#[test]
fn f_mreg_004_gauge() {
let mut mr = MetricRegistry::new();
mr.register_gauge();
assert_eq!(mr.gauges, 1);
}
#[test]
fn f_mreg_005_histogram() {
let mut mr = MetricRegistry::new();
mr.register_histogram();
assert_eq!(mr.histograms, 1);
}
#[test]
fn f_mreg_006_total() {
let mut mr = MetricRegistry::new();
mr.register_counter();
mr.register_gauge();
mr.register_histogram();
assert_eq!(mr.total_metrics(), 3);
}
#[test]
fn f_mreg_007_for_app() {
let mr = MetricRegistry::for_application();
assert_eq!(mr.total_metrics(), 0);
}
#[test]
fn f_mreg_008_for_system() {
let mr = MetricRegistry::for_system();
assert_eq!(mr.collections(), 0);
}
#[test]
fn f_mreg_009_collect() {
let mut mr = MetricRegistry::new();
mr.collect(1000);
assert_eq!(mr.collections(), 1);
assert_eq!(mr.last_collection_us, 1000);
}
#[test]
fn f_mreg_010_multi_collect() {
let mut mr = MetricRegistry::new();
mr.collect(1000);
mr.collect(2000);
assert_eq!(mr.collections(), 2);
}
#[test]
fn f_mreg_011_reset() {
let mut mr = MetricRegistry::new();
mr.register_counter();
mr.collect(1000);
mr.reset();
assert_eq!(mr.total_metrics(), 0);
}
#[test]
fn f_mreg_012_clone() {
let mut mr = MetricRegistry::new();
mr.register_counter();
let cloned = mr.clone();
assert_eq!(mr.counters, cloned.counters);
}
}
#[cfg(test)]
mod alert_manager_tests {
use super::*;
#[test]
fn f_alert_001_new() {
let am = AlertManager::new();
assert_eq!(am.active(), 0);
}
#[test]
fn f_alert_002_default() {
let am = AlertManager::default();
assert_eq!(am.fired, 0);
}
#[test]
fn f_alert_003_fire() {
let mut am = AlertManager::new();
am.fire();
assert_eq!(am.active(), 1);
}
#[test]
fn f_alert_004_resolve() {
let mut am = AlertManager::new();
am.fire();
am.resolve();
assert_eq!(am.active(), 0);
}
#[test]
fn f_alert_005_ack() {
let mut am = AlertManager::new();
am.fire();
am.acknowledge();
assert_eq!(am.acknowledged, 1);
}
#[test]
fn f_alert_006_resolution_rate() {
let mut am = AlertManager::new();
am.fire();
am.resolve();
assert!((am.resolution_rate() - 100.0).abs() < 0.01);
}
#[test]
fn f_alert_007_for_critical() {
let am = AlertManager::for_critical();
assert_eq!(am.active(), 0);
}
#[test]
fn f_alert_008_for_warnings() {
let am = AlertManager::for_warnings();
assert_eq!(am.fired, 0);
}
#[test]
fn f_alert_009_suppress() {
let mut am = AlertManager::new();
am.fire();
am.suppress();
assert_eq!(am.active(), 0);
assert_eq!(am.suppressed, 1);
}
#[test]
fn f_alert_010_healthy() {
let mut am = AlertManager::new();
am.fire();
assert!(am.is_healthy(5));
}
#[test]
fn f_alert_011_reset() {
let mut am = AlertManager::new();
am.fire();
am.reset();
assert_eq!(am.active(), 0);
}
#[test]
fn f_alert_012_clone() {
let mut am = AlertManager::new();
am.fire();
let cloned = am.clone();
assert_eq!(am.active(), cloned.active());
}
}
#[cfg(test)]
mod index_builder_tests {
use super::*;
#[test]
fn f_idxb_001_new() {
let ib = IndexBuilder::new();
assert_eq!(ib.entries_indexed, 0);
}
#[test]
fn f_idxb_002_default() {
let ib = IndexBuilder::default();
assert_eq!(ib.segments_built, 0);
}
#[test]
fn f_idxb_003_index() {
let mut ib = IndexBuilder::new();
ib.index_entry(100);
assert_eq!(ib.entries_indexed, 1);
assert_eq!(ib.bytes_indexed, 100);
}
#[test]
fn f_idxb_004_segment() {
let mut ib = IndexBuilder::new();
ib.build_segment(1000);
assert_eq!(ib.segments_built, 1);
}
#[test]
fn f_idxb_005_merge() {
let mut ib = IndexBuilder::new();
ib.complete_merge();
assert_eq!(ib.merges_completed, 1);
}
#[test]
fn f_idxb_006_throughput() {
let mut ib = IndexBuilder::new();
ib.index_entry(100);
ib.build_segment(1_000_000); assert!((ib.throughput() - 1.0).abs() < 0.01);
}
#[test]
fn f_idxb_007_for_search() {
let ib = IndexBuilder::for_search();
assert_eq!(ib.entries_indexed, 0);
}
#[test]
fn f_idxb_008_for_database() {
let ib = IndexBuilder::for_database();
assert_eq!(ib.segments_built, 0);
}
#[test]
fn f_idxb_009_avg_segment() {
let mut ib = IndexBuilder::new();
ib.build_segment(1000);
ib.build_segment(2000);
assert_eq!(ib.avg_segment_time_us(), 1500);
}
#[test]
fn f_idxb_010_multi_entry() {
let mut ib = IndexBuilder::new();
ib.index_entry(100);
ib.index_entry(200);
assert_eq!(ib.bytes_indexed, 300);
}
#[test]
fn f_idxb_011_reset() {
let mut ib = IndexBuilder::new();
ib.index_entry(100);
ib.reset();
assert_eq!(ib.entries_indexed, 0);
}
#[test]
fn f_idxb_012_clone() {
let mut ib = IndexBuilder::new();
ib.index_entry(100);
let cloned = ib.clone();
assert_eq!(ib.entries_indexed, cloned.entries_indexed);
}
}
#[cfg(test)]
mod compaction_policy_tests {
use super::*;
#[test]
fn f_cpol_001_new() {
let cp = CompactionPolicy::new();
assert_eq!(cp.evaluations, 0);
}
#[test]
fn f_cpol_002_default() {
let cp = CompactionPolicy::default();
assert_eq!(cp.triggered, 0);
}
#[test]
fn f_cpol_003_trigger() {
let mut cp = CompactionPolicy::new();
cp.evaluate(true);
assert_eq!(cp.triggered, 1);
}
#[test]
fn f_cpol_004_skip() {
let mut cp = CompactionPolicy::new();
cp.evaluate(false);
assert_eq!(cp.skipped, 1);
}
#[test]
fn f_cpol_005_reclaim() {
let mut cp = CompactionPolicy::new();
cp.reclaim(1000);
assert_eq!(cp.reclaimed(), 1000);
}
#[test]
fn f_cpol_006_trigger_rate() {
let mut cp = CompactionPolicy::new();
cp.evaluate(true);
cp.evaluate(false);
assert!((cp.trigger_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_cpol_007_for_leveled() {
let cp = CompactionPolicy::for_leveled();
assert_eq!(cp.evaluations, 0);
}
#[test]
fn f_cpol_008_for_size_tiered() {
let cp = CompactionPolicy::for_size_tiered();
assert_eq!(cp.triggered, 0);
}
#[test]
fn f_cpol_009_amplification() {
let mut cp = CompactionPolicy::new();
cp.set_amplification(2.5);
assert!((cp.space_amplification - 2.5).abs() < 0.01);
}
#[test]
fn f_cpol_010_effective() {
let cp = CompactionPolicy::new();
assert!(cp.is_effective(2.0));
}
#[test]
fn f_cpol_011_reset() {
let mut cp = CompactionPolicy::new();
cp.evaluate(true);
cp.reclaim(1000);
cp.reset();
assert_eq!(cp.evaluations, 0);
}
#[test]
fn f_cpol_012_clone() {
let mut cp = CompactionPolicy::new();
cp.evaluate(true);
let cloned = cp.clone();
assert_eq!(cp.triggered, cloned.triggered);
}
}
#[derive(Debug, Clone)]
pub struct WriteAmplification {
user_bytes: u64,
actual_bytes: u64,
writes: u64,
compaction_bytes: u64,
}
impl Default for WriteAmplification {
fn default() -> Self {
Self::new()
}
}
impl WriteAmplification {
#[must_use]
pub fn new() -> Self {
Self {
user_bytes: 0,
actual_bytes: 0,
writes: 0,
compaction_bytes: 0,
}
}
#[must_use]
pub fn for_lsm() -> Self {
Self::new()
}
#[must_use]
pub fn for_btree() -> Self {
Self::new()
}
pub fn user_write(&mut self, bytes: u64) {
self.user_bytes += bytes;
self.writes += 1;
}
pub fn disk_write(&mut self, bytes: u64) {
self.actual_bytes += bytes;
}
pub fn compaction_write(&mut self, bytes: u64) {
self.compaction_bytes += bytes;
self.actual_bytes += bytes;
}
#[must_use]
pub fn amplification(&self) -> f64 {
if self.user_bytes == 0 {
1.0
} else {
self.actual_bytes as f64 / self.user_bytes as f64
}
}
#[must_use]
pub fn is_acceptable(&self, max_amp: f64) -> bool {
self.amplification() <= max_amp
}
#[must_use]
pub fn writes(&self) -> u64 {
self.writes
}
pub fn reset(&mut self) {
self.user_bytes = 0;
self.actual_bytes = 0;
self.writes = 0;
self.compaction_bytes = 0;
}
}
#[derive(Debug, Clone)]
pub struct ReadAmplification {
logical_reads: u64,
physical_reads: u64,
cache_hits: u64,
bloom_filter_hits: u64,
}
impl Default for ReadAmplification {
fn default() -> Self {
Self::new()
}
}
impl ReadAmplification {
#[must_use]
pub fn new() -> Self {
Self {
logical_reads: 0,
physical_reads: 0,
cache_hits: 0,
bloom_filter_hits: 0,
}
}
#[must_use]
pub fn for_lsm() -> Self {
Self::new()
}
#[must_use]
pub fn for_btree() -> Self {
Self::new()
}
pub fn logical_read(&mut self) {
self.logical_reads += 1;
}
pub fn physical_read(&mut self) {
self.physical_reads += 1;
}
pub fn cache_hit(&mut self) {
self.cache_hits += 1;
}
pub fn bloom_hit(&mut self) {
self.bloom_filter_hits += 1;
}
#[must_use]
pub fn amplification(&self) -> f64 {
if self.logical_reads == 0 {
1.0
} else {
self.physical_reads as f64 / self.logical_reads as f64
}
}
#[must_use]
pub fn cache_hit_rate(&self) -> f64 {
let total = self.cache_hits + self.physical_reads;
if total == 0 {
0.0
} else {
(self.cache_hits as f64 / total as f64) * 100.0
}
}
#[must_use]
pub fn is_acceptable(&self, max_amp: f64) -> bool {
self.amplification() <= max_amp
}
pub fn reset(&mut self) {
self.logical_reads = 0;
self.physical_reads = 0;
self.cache_hits = 0;
self.bloom_filter_hits = 0;
}
}
#[derive(Debug, Clone)]
pub struct LockManager {
acquisitions: u64,
contentions: u64,
deadlocks: u64,
total_wait_us: u64,
held_count: u32,
}
impl Default for LockManager {
fn default() -> Self {
Self::new()
}
}
impl LockManager {
#[must_use]
pub fn new() -> Self {
Self {
acquisitions: 0,
contentions: 0,
deadlocks: 0,
total_wait_us: 0,
held_count: 0,
}
}
#[must_use]
pub fn for_mutex() -> Self {
Self::new()
}
#[must_use]
pub fn for_rwlock() -> Self {
Self::new()
}
pub fn acquire(&mut self, wait_us: u64) {
self.acquisitions += 1;
self.total_wait_us += wait_us;
self.held_count += 1;
if wait_us > 0 {
self.contentions += 1;
}
}
pub fn release(&mut self) {
self.held_count = self.held_count.saturating_sub(1);
}
pub fn deadlock(&mut self) {
self.deadlocks += 1;
}
#[must_use]
pub fn contention_rate(&self) -> f64 {
if self.acquisitions == 0 {
0.0
} else {
(self.contentions as f64 / self.acquisitions as f64) * 100.0
}
}
#[must_use]
pub fn avg_wait_us(&self) -> u64 {
if self.acquisitions == 0 {
0
} else {
self.total_wait_us / self.acquisitions
}
}
#[must_use]
pub fn is_healthy(&self, max_contention_rate: f64) -> bool {
self.contention_rate() <= max_contention_rate && self.deadlocks == 0
}
pub fn reset(&mut self) {
self.acquisitions = 0;
self.contentions = 0;
self.deadlocks = 0;
self.total_wait_us = 0;
self.held_count = 0;
}
}
#[derive(Debug, Clone)]
pub struct MemoryPressure {
allocated_bytes: u64,
limit_bytes: u64,
pressure_events: u64,
gc_triggers: u64,
evictions: u64,
}
impl Default for MemoryPressure {
fn default() -> Self {
Self::new(1024 * 1024 * 1024) }
}
impl MemoryPressure {
#[must_use]
pub fn new(limit_bytes: u64) -> Self {
Self {
allocated_bytes: 0,
limit_bytes,
pressure_events: 0,
gc_triggers: 0,
evictions: 0,
}
}
#[must_use]
pub fn for_heap() -> Self {
Self::new(8 * 1024 * 1024 * 1024) }
#[must_use]
pub fn for_cache() -> Self {
Self::new(1024 * 1024 * 1024) }
pub fn allocate(&mut self, bytes: u64) {
self.allocated_bytes += bytes;
if self.allocated_bytes > self.limit_bytes * 80 / 100 {
self.pressure_events += 1;
}
}
pub fn free(&mut self, bytes: u64) {
self.allocated_bytes = self.allocated_bytes.saturating_sub(bytes);
}
pub fn trigger_gc(&mut self) {
self.gc_triggers += 1;
}
pub fn evict(&mut self, bytes: u64) {
self.evictions += 1;
self.allocated_bytes = self.allocated_bytes.saturating_sub(bytes);
}
#[must_use]
pub fn utilization(&self) -> f64 {
if self.limit_bytes == 0 {
0.0
} else {
(self.allocated_bytes as f64 / self.limit_bytes as f64) * 100.0
}
}
#[must_use]
pub fn is_under_pressure(&self) -> bool {
self.utilization() > 80.0
}
#[must_use]
pub fn is_healthy(&self, max_utilization: f64) -> bool {
self.utilization() <= max_utilization
}
pub fn reset(&mut self) {
self.allocated_bytes = 0;
self.pressure_events = 0;
self.gc_triggers = 0;
self.evictions = 0;
}
}
#[cfg(test)]
mod write_amplification_tests {
use super::*;
#[test]
fn f_wamp_001_new() {
let wa = WriteAmplification::new();
assert_eq!(wa.writes(), 0);
}
#[test]
fn f_wamp_002_default() {
let wa = WriteAmplification::default();
assert!((wa.amplification() - 1.0).abs() < 0.01);
}
#[test]
fn f_wamp_003_user_write() {
let mut wa = WriteAmplification::new();
wa.user_write(100);
assert_eq!(wa.user_bytes, 100);
}
#[test]
fn f_wamp_004_disk_write() {
let mut wa = WriteAmplification::new();
wa.disk_write(200);
assert_eq!(wa.actual_bytes, 200);
}
#[test]
fn f_wamp_005_amplification() {
let mut wa = WriteAmplification::new();
wa.user_write(100);
wa.disk_write(300);
assert!((wa.amplification() - 3.0).abs() < 0.01);
}
#[test]
fn f_wamp_006_compaction() {
let mut wa = WriteAmplification::new();
wa.compaction_write(500);
assert_eq!(wa.compaction_bytes, 500);
}
#[test]
fn f_wamp_007_for_lsm() {
let wa = WriteAmplification::for_lsm();
assert_eq!(wa.writes(), 0);
}
#[test]
fn f_wamp_008_for_btree() {
let wa = WriteAmplification::for_btree();
assert_eq!(wa.user_bytes, 0);
}
#[test]
fn f_wamp_009_acceptable() {
let mut wa = WriteAmplification::new();
wa.user_write(100);
wa.disk_write(150);
assert!(wa.is_acceptable(2.0));
}
#[test]
fn f_wamp_010_not_acceptable() {
let mut wa = WriteAmplification::new();
wa.user_write(100);
wa.disk_write(500);
assert!(!wa.is_acceptable(2.0));
}
#[test]
fn f_wamp_011_reset() {
let mut wa = WriteAmplification::new();
wa.user_write(100);
wa.reset();
assert_eq!(wa.writes(), 0);
}
#[test]
fn f_wamp_012_clone() {
let mut wa = WriteAmplification::new();
wa.user_write(100);
let cloned = wa.clone();
assert_eq!(wa.user_bytes, cloned.user_bytes);
}
}
#[cfg(test)]
mod read_amplification_tests {
use super::*;
#[test]
fn f_ramp_001_new() {
let ra = ReadAmplification::new();
assert_eq!(ra.logical_reads, 0);
}
#[test]
fn f_ramp_002_default() {
let ra = ReadAmplification::default();
assert!((ra.amplification() - 1.0).abs() < 0.01);
}
#[test]
fn f_ramp_003_logical() {
let mut ra = ReadAmplification::new();
ra.logical_read();
assert_eq!(ra.logical_reads, 1);
}
#[test]
fn f_ramp_004_physical() {
let mut ra = ReadAmplification::new();
ra.physical_read();
assert_eq!(ra.physical_reads, 1);
}
#[test]
fn f_ramp_005_amplification() {
let mut ra = ReadAmplification::new();
ra.logical_read();
ra.physical_read();
ra.physical_read();
ra.physical_read();
assert!((ra.amplification() - 3.0).abs() < 0.01);
}
#[test]
fn f_ramp_006_cache() {
let mut ra = ReadAmplification::new();
ra.cache_hit();
assert_eq!(ra.cache_hits, 1);
}
#[test]
fn f_ramp_007_for_lsm() {
let ra = ReadAmplification::for_lsm();
assert_eq!(ra.logical_reads, 0);
}
#[test]
fn f_ramp_008_for_btree() {
let ra = ReadAmplification::for_btree();
assert_eq!(ra.physical_reads, 0);
}
#[test]
fn f_ramp_009_cache_rate() {
let mut ra = ReadAmplification::new();
ra.cache_hit();
ra.physical_read();
assert!((ra.cache_hit_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_ramp_010_bloom() {
let mut ra = ReadAmplification::new();
ra.bloom_hit();
assert_eq!(ra.bloom_filter_hits, 1);
}
#[test]
fn f_ramp_011_reset() {
let mut ra = ReadAmplification::new();
ra.logical_read();
ra.reset();
assert_eq!(ra.logical_reads, 0);
}
#[test]
fn f_ramp_012_clone() {
let mut ra = ReadAmplification::new();
ra.logical_read();
let cloned = ra.clone();
assert_eq!(ra.logical_reads, cloned.logical_reads);
}
}
#[cfg(test)]
mod lock_manager_tests {
use super::*;
#[test]
fn f_lock_001_new() {
let lm = LockManager::new();
assert_eq!(lm.acquisitions, 0);
}
#[test]
fn f_lock_002_default() {
let lm = LockManager::default();
assert_eq!(lm.contentions, 0);
}
#[test]
fn f_lock_003_acquire() {
let mut lm = LockManager::new();
lm.acquire(0);
assert_eq!(lm.acquisitions, 1);
}
#[test]
fn f_lock_004_release() {
let mut lm = LockManager::new();
lm.acquire(0);
lm.release();
assert_eq!(lm.held_count, 0);
}
#[test]
fn f_lock_005_contention() {
let mut lm = LockManager::new();
lm.acquire(100); assert_eq!(lm.contentions, 1);
}
#[test]
fn f_lock_006_rate() {
let mut lm = LockManager::new();
lm.acquire(0);
lm.acquire(100);
assert!((lm.contention_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_lock_007_for_mutex() {
let lm = LockManager::for_mutex();
assert_eq!(lm.acquisitions, 0);
}
#[test]
fn f_lock_008_for_rwlock() {
let lm = LockManager::for_rwlock();
assert_eq!(lm.contentions, 0);
}
#[test]
fn f_lock_009_deadlock() {
let mut lm = LockManager::new();
lm.deadlock();
assert_eq!(lm.deadlocks, 1);
}
#[test]
fn f_lock_010_healthy() {
let mut lm = LockManager::new();
lm.acquire(0);
assert!(lm.is_healthy(50.0));
}
#[test]
fn f_lock_011_reset() {
let mut lm = LockManager::new();
lm.acquire(100);
lm.reset();
assert_eq!(lm.acquisitions, 0);
}
#[test]
fn f_lock_012_clone() {
let mut lm = LockManager::new();
lm.acquire(100);
let cloned = lm.clone();
assert_eq!(lm.contentions, cloned.contentions);
}
}
#[cfg(test)]
mod memory_pressure_tests {
use super::*;
#[test]
fn f_mpress_001_new() {
let mp = MemoryPressure::new(1000);
assert_eq!(mp.allocated_bytes, 0);
}
#[test]
fn f_mpress_002_default() {
let mp = MemoryPressure::default();
assert!(mp.limit_bytes > 0);
}
#[test]
fn f_mpress_003_allocate() {
let mut mp = MemoryPressure::new(1000);
mp.allocate(100);
assert_eq!(mp.allocated_bytes, 100);
}
#[test]
fn f_mpress_004_free() {
let mut mp = MemoryPressure::new(1000);
mp.allocate(100);
mp.free(50);
assert_eq!(mp.allocated_bytes, 50);
}
#[test]
fn f_mpress_005_utilization() {
let mut mp = MemoryPressure::new(100);
mp.allocate(50);
assert!((mp.utilization() - 50.0).abs() < 0.01);
}
#[test]
fn f_mpress_006_pressure() {
let mut mp = MemoryPressure::new(100);
mp.allocate(90);
assert!(mp.is_under_pressure());
}
#[test]
fn f_mpress_007_for_heap() {
let mp = MemoryPressure::for_heap();
assert!(mp.limit_bytes > 1024 * 1024 * 1024);
}
#[test]
fn f_mpress_008_for_cache() {
let mp = MemoryPressure::for_cache();
assert_eq!(mp.limit_bytes, 1024 * 1024 * 1024);
}
#[test]
fn f_mpress_009_gc() {
let mut mp = MemoryPressure::new(1000);
mp.trigger_gc();
assert_eq!(mp.gc_triggers, 1);
}
#[test]
fn f_mpress_010_evict() {
let mut mp = MemoryPressure::new(1000);
mp.allocate(100);
mp.evict(50);
assert_eq!(mp.evictions, 1);
assert_eq!(mp.allocated_bytes, 50);
}
#[test]
fn f_mpress_011_reset() {
let mut mp = MemoryPressure::new(1000);
mp.allocate(100);
mp.reset();
assert_eq!(mp.allocated_bytes, 0);
}
#[test]
fn f_mpress_012_clone() {
let mut mp = MemoryPressure::new(1000);
mp.allocate(100);
let cloned = mp.clone();
assert_eq!(mp.allocated_bytes, cloned.allocated_bytes);
}
}
#[derive(Debug, Clone)]
pub struct FileDescriptorTracker {
pub open_fds: u32,
pub max_fds: u32,
pub opens: u64,
pub closes: u64,
pub leaks: u64,
pub peak_open: u32,
}
impl Default for FileDescriptorTracker {
fn default() -> Self {
Self::for_process()
}
}
impl FileDescriptorTracker {
#[must_use]
pub fn new(max_fds: u32) -> Self {
Self {
open_fds: 0,
max_fds,
opens: 0,
closes: 0,
leaks: 0,
peak_open: 0,
}
}
#[must_use]
pub fn for_process() -> Self {
Self::new(1024)
}
#[must_use]
pub fn for_server() -> Self {
Self::new(65536)
}
pub fn open(&mut self) {
self.opens += 1;
self.open_fds += 1;
if self.open_fds > self.peak_open {
self.peak_open = self.open_fds;
}
}
pub fn close(&mut self) {
self.closes += 1;
self.open_fds = self.open_fds.saturating_sub(1);
}
pub fn leak(&mut self) {
self.leaks += 1;
}
#[must_use]
pub fn utilization(&self) -> f64 {
if self.max_fds == 0 {
return 0.0;
}
(self.open_fds as f64 / self.max_fds as f64) * 100.0
}
#[must_use]
pub fn is_at_risk(&self) -> bool {
self.utilization() > 80.0
}
#[must_use]
pub fn leak_rate(&self) -> f64 {
if self.opens == 0 {
return 0.0;
}
(self.leaks as f64 / self.opens as f64) * 100.0
}
pub fn reset(&mut self) {
self.open_fds = 0;
self.opens = 0;
self.closes = 0;
self.leaks = 0;
self.peak_open = 0;
}
}
#[cfg(test)]
mod fd_tracker_tests {
use super::*;
#[test]
fn f_fd_001_new() {
let fd = FileDescriptorTracker::new(1024);
assert_eq!(fd.max_fds, 1024);
}
#[test]
fn f_fd_002_default() {
let fd = FileDescriptorTracker::default();
assert_eq!(fd.max_fds, 1024);
}
#[test]
fn f_fd_003_open() {
let mut fd = FileDescriptorTracker::new(100);
fd.open();
assert_eq!(fd.open_fds, 1);
assert_eq!(fd.opens, 1);
}
#[test]
fn f_fd_004_close() {
let mut fd = FileDescriptorTracker::new(100);
fd.open();
fd.close();
assert_eq!(fd.open_fds, 0);
assert_eq!(fd.closes, 1);
}
#[test]
fn f_fd_005_utilization() {
let mut fd = FileDescriptorTracker::new(100);
for _ in 0..50 {
fd.open();
}
assert!((fd.utilization() - 50.0).abs() < 0.01);
}
#[test]
fn f_fd_006_risk() {
let mut fd = FileDescriptorTracker::new(100);
for _ in 0..85 {
fd.open();
}
assert!(fd.is_at_risk());
}
#[test]
fn f_fd_007_for_process() {
let fd = FileDescriptorTracker::for_process();
assert_eq!(fd.max_fds, 1024);
}
#[test]
fn f_fd_008_for_server() {
let fd = FileDescriptorTracker::for_server();
assert_eq!(fd.max_fds, 65536);
}
#[test]
fn f_fd_009_leak() {
let mut fd = FileDescriptorTracker::new(100);
fd.leak();
assert_eq!(fd.leaks, 1);
}
#[test]
fn f_fd_010_leak_rate() {
let mut fd = FileDescriptorTracker::new(100);
fd.open();
fd.open();
fd.leak();
assert!((fd.leak_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_fd_011_reset() {
let mut fd = FileDescriptorTracker::new(100);
fd.open();
fd.reset();
assert_eq!(fd.open_fds, 0);
}
#[test]
fn f_fd_012_clone() {
let mut fd = FileDescriptorTracker::new(100);
fd.open();
let cloned = fd.clone();
assert_eq!(fd.open_fds, cloned.open_fds);
}
}
#[derive(Debug, Clone)]
pub struct SocketTracker {
pub active: u32,
pub max_sockets: u32,
pub time_wait: u32,
pub connections: u64,
pub accepts: u64,
pub errors: u64,
}
impl Default for SocketTracker {
fn default() -> Self {
Self::for_server()
}
}
impl SocketTracker {
#[must_use]
pub fn new(max_sockets: u32) -> Self {
Self {
active: 0,
max_sockets,
time_wait: 0,
connections: 0,
accepts: 0,
errors: 0,
}
}
#[must_use]
pub fn for_server() -> Self {
Self::new(10000)
}
#[must_use]
pub fn for_client() -> Self {
Self::new(100)
}
pub fn connect(&mut self) {
self.connections += 1;
self.active += 1;
}
pub fn accept(&mut self) {
self.accepts += 1;
self.active += 1;
}
pub fn close(&mut self) {
self.active = self.active.saturating_sub(1);
self.time_wait += 1;
}
pub fn expire_time_wait(&mut self) {
self.time_wait = self.time_wait.saturating_sub(1);
}
pub fn error(&mut self) {
self.errors += 1;
}
#[must_use]
pub fn utilization(&self) -> f64 {
if self.max_sockets == 0 {
return 0.0;
}
((self.active + self.time_wait) as f64 / self.max_sockets as f64) * 100.0
}
#[must_use]
pub fn has_time_wait_issue(&self) -> bool {
self.time_wait > self.active * 2
}
#[must_use]
pub fn error_rate(&self) -> f64 {
let total = self.connections + self.accepts;
if total == 0 {
return 0.0;
}
(self.errors as f64 / total as f64) * 100.0
}
pub fn reset(&mut self) {
self.active = 0;
self.time_wait = 0;
self.connections = 0;
self.accepts = 0;
self.errors = 0;
}
}
#[cfg(test)]
mod socket_tracker_tests {
use super::*;
#[test]
fn f_sock_001_new() {
let sock = SocketTracker::new(1000);
assert_eq!(sock.max_sockets, 1000);
}
#[test]
fn f_sock_002_default() {
let sock = SocketTracker::default();
assert_eq!(sock.max_sockets, 10000);
}
#[test]
fn f_sock_003_connect() {
let mut sock = SocketTracker::new(100);
sock.connect();
assert_eq!(sock.active, 1);
assert_eq!(sock.connections, 1);
}
#[test]
fn f_sock_004_accept() {
let mut sock = SocketTracker::new(100);
sock.accept();
assert_eq!(sock.active, 1);
assert_eq!(sock.accepts, 1);
}
#[test]
fn f_sock_005_close() {
let mut sock = SocketTracker::new(100);
sock.connect();
sock.close();
assert_eq!(sock.active, 0);
assert_eq!(sock.time_wait, 1);
}
#[test]
fn f_sock_006_expire() {
let mut sock = SocketTracker::new(100);
sock.connect();
sock.close();
sock.expire_time_wait();
assert_eq!(sock.time_wait, 0);
}
#[test]
fn f_sock_007_for_server() {
let sock = SocketTracker::for_server();
assert_eq!(sock.max_sockets, 10000);
}
#[test]
fn f_sock_008_for_client() {
let sock = SocketTracker::for_client();
assert_eq!(sock.max_sockets, 100);
}
#[test]
fn f_sock_009_utilization() {
let mut sock = SocketTracker::new(100);
for _ in 0..30 {
sock.connect();
}
for _ in 0..20 {
sock.close();
}
assert!((sock.utilization() - 30.0).abs() < 0.01);
}
#[test]
fn f_sock_010_time_wait_issue() {
let mut sock = SocketTracker::new(100);
sock.active = 10;
sock.time_wait = 30;
assert!(sock.has_time_wait_issue());
}
#[test]
fn f_sock_011_error_rate() {
let mut sock = SocketTracker::new(100);
sock.connect();
sock.connect();
sock.error();
assert!((sock.error_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_sock_012_clone() {
let mut sock = SocketTracker::new(100);
sock.connect();
let cloned = sock.clone();
assert_eq!(sock.active, cloned.active);
}
}
#[derive(Debug, Clone)]
pub struct ThreadPoolTracker {
pub workers: u32,
pub active: u32,
pub queued: u64,
pub completed: u64,
pub rejected: u64,
pub peak_queued: u64,
}
impl Default for ThreadPoolTracker {
fn default() -> Self {
Self::for_cpu()
}
}
impl ThreadPoolTracker {
#[must_use]
pub fn new(workers: u32) -> Self {
Self {
workers,
active: 0,
queued: 0,
completed: 0,
rejected: 0,
peak_queued: 0,
}
}
#[must_use]
pub fn for_cpu() -> Self {
Self::new(8)
}
#[must_use]
pub fn for_io() -> Self {
Self::new(64)
}
pub fn submit(&mut self) {
self.queued += 1;
if self.queued > self.peak_queued {
self.peak_queued = self.queued;
}
}
pub fn start(&mut self) {
if self.queued > 0 {
self.queued -= 1;
}
self.active += 1;
}
pub fn complete(&mut self) {
self.active = self.active.saturating_sub(1);
self.completed += 1;
}
pub fn reject(&mut self) {
self.rejected += 1;
}
#[must_use]
pub fn utilization(&self) -> f64 {
if self.workers == 0 {
return 0.0;
}
(self.active as f64 / self.workers as f64) * 100.0
}
#[must_use]
pub fn is_saturated(&self) -> bool {
self.active >= self.workers
}
#[must_use]
pub fn rejection_rate(&self) -> f64 {
let submitted = self.completed + self.rejected + self.queued;
if submitted == 0 {
return 0.0;
}
(self.rejected as f64 / submitted as f64) * 100.0
}
#[must_use]
pub fn throughput(&self) -> u64 {
self.completed
}
pub fn reset(&mut self) {
self.active = 0;
self.queued = 0;
self.completed = 0;
self.rejected = 0;
self.peak_queued = 0;
}
}
#[cfg(test)]
mod thread_pool_tests {
use super::*;
#[test]
fn f_tpool_001_new() {
let tp = ThreadPoolTracker::new(8);
assert_eq!(tp.workers, 8);
}
#[test]
fn f_tpool_002_default() {
let tp = ThreadPoolTracker::default();
assert_eq!(tp.workers, 8);
}
#[test]
fn f_tpool_003_submit() {
let mut tp = ThreadPoolTracker::new(8);
tp.submit();
assert_eq!(tp.queued, 1);
}
#[test]
fn f_tpool_004_start() {
let mut tp = ThreadPoolTracker::new(8);
tp.submit();
tp.start();
assert_eq!(tp.active, 1);
assert_eq!(tp.queued, 0);
}
#[test]
fn f_tpool_005_complete() {
let mut tp = ThreadPoolTracker::new(8);
tp.submit();
tp.start();
tp.complete();
assert_eq!(tp.active, 0);
assert_eq!(tp.completed, 1);
}
#[test]
fn f_tpool_006_utilization() {
let mut tp = ThreadPoolTracker::new(8);
tp.active = 4;
assert!((tp.utilization() - 50.0).abs() < 0.01);
}
#[test]
fn f_tpool_007_saturated() {
let mut tp = ThreadPoolTracker::new(8);
tp.active = 8;
assert!(tp.is_saturated());
}
#[test]
fn f_tpool_008_for_cpu() {
let tp = ThreadPoolTracker::for_cpu();
assert_eq!(tp.workers, 8);
}
#[test]
fn f_tpool_009_for_io() {
let tp = ThreadPoolTracker::for_io();
assert_eq!(tp.workers, 64);
}
#[test]
fn f_tpool_010_reject() {
let mut tp = ThreadPoolTracker::new(8);
tp.reject();
assert_eq!(tp.rejected, 1);
}
#[test]
fn f_tpool_011_rejection_rate() {
let mut tp = ThreadPoolTracker::new(8);
tp.completed = 9;
tp.rejected = 1;
assert!((tp.rejection_rate() - 10.0).abs() < 0.01);
}
#[test]
fn f_tpool_012_clone() {
let mut tp = ThreadPoolTracker::new(8);
tp.submit();
let cloned = tp.clone();
assert_eq!(tp.queued, cloned.queued);
}
}
#[derive(Debug, Clone)]
pub struct IoCostTracker {
pub reads: u64,
pub writes: u64,
pub read_bytes: u64,
pub write_bytes: u64,
pub total_latency_us: u64,
pub errors: u64,
}
impl Default for IoCostTracker {
fn default() -> Self {
Self::new()
}
}
impl IoCostTracker {
#[must_use]
pub fn new() -> Self {
Self {
reads: 0,
writes: 0,
read_bytes: 0,
write_bytes: 0,
total_latency_us: 0,
errors: 0,
}
}
#[must_use]
pub fn for_disk() -> Self {
Self::new()
}
#[must_use]
pub fn for_network() -> Self {
Self::new()
}
pub fn read(&mut self, bytes: u64, latency_us: u64) {
self.reads += 1;
self.read_bytes += bytes;
self.total_latency_us += latency_us;
}
pub fn write(&mut self, bytes: u64, latency_us: u64) {
self.writes += 1;
self.write_bytes += bytes;
self.total_latency_us += latency_us;
}
pub fn error(&mut self) {
self.errors += 1;
}
#[must_use]
pub fn total_ops(&self) -> u64 {
self.reads + self.writes
}
#[must_use]
pub fn total_bytes(&self) -> u64 {
self.read_bytes + self.write_bytes
}
#[must_use]
pub fn avg_latency_us(&self) -> u64 {
let ops = self.total_ops();
if ops == 0 {
return 0;
}
self.total_latency_us / ops
}
#[must_use]
pub fn read_ratio(&self) -> f64 {
let ops = self.total_ops();
if ops == 0 {
return 0.0;
}
(self.reads as f64 / ops as f64) * 100.0
}
#[must_use]
pub fn error_rate(&self) -> f64 {
let ops = self.total_ops();
if ops == 0 {
return 0.0;
}
(self.errors as f64 / ops as f64) * 100.0
}
#[must_use]
pub fn is_healthy(&self) -> bool {
self.error_rate() < 1.0
}
pub fn reset(&mut self) {
self.reads = 0;
self.writes = 0;
self.read_bytes = 0;
self.write_bytes = 0;
self.total_latency_us = 0;
self.errors = 0;
}
}
#[cfg(test)]
mod io_cost_tests {
use super::*;
#[test]
fn f_io_001_new() {
let io = IoCostTracker::new();
assert_eq!(io.total_ops(), 0);
}
#[test]
fn f_io_002_default() {
let io = IoCostTracker::default();
assert_eq!(io.total_ops(), 0);
}
#[test]
fn f_io_003_read() {
let mut io = IoCostTracker::new();
io.read(1024, 100);
assert_eq!(io.reads, 1);
assert_eq!(io.read_bytes, 1024);
}
#[test]
fn f_io_004_write() {
let mut io = IoCostTracker::new();
io.write(2048, 200);
assert_eq!(io.writes, 1);
assert_eq!(io.write_bytes, 2048);
}
#[test]
fn f_io_005_total_ops() {
let mut io = IoCostTracker::new();
io.read(1024, 100);
io.write(1024, 100);
assert_eq!(io.total_ops(), 2);
}
#[test]
fn f_io_006_avg_latency() {
let mut io = IoCostTracker::new();
io.read(1024, 100);
io.write(1024, 200);
assert_eq!(io.avg_latency_us(), 150);
}
#[test]
fn f_io_007_for_disk() {
let io = IoCostTracker::for_disk();
assert_eq!(io.total_ops(), 0);
}
#[test]
fn f_io_008_for_network() {
let io = IoCostTracker::for_network();
assert_eq!(io.total_ops(), 0);
}
#[test]
fn f_io_009_read_ratio() {
let mut io = IoCostTracker::new();
io.read(1024, 100);
io.write(1024, 100);
assert!((io.read_ratio() - 50.0).abs() < 0.01);
}
#[test]
fn f_io_010_error() {
let mut io = IoCostTracker::new();
io.error();
assert_eq!(io.errors, 1);
}
#[test]
fn f_io_011_healthy() {
let mut io = IoCostTracker::new();
io.reads = 100;
assert!(io.is_healthy());
}
#[test]
fn f_io_012_clone() {
let mut io = IoCostTracker::new();
io.read(1024, 100);
let cloned = io.clone();
assert_eq!(io.reads, cloned.reads);
}
}
#[derive(Debug, Clone)]
pub struct PageCacheTracker {
pub hits: u64,
pub misses: u64,
pub evictions: u64,
pub writebacks: u64,
pub total_pages: u64,
}
impl Default for PageCacheTracker {
fn default() -> Self {
Self::new()
}
}
impl PageCacheTracker {
#[must_use]
pub fn new() -> Self {
Self {
hits: 0,
misses: 0,
evictions: 0,
writebacks: 0,
total_pages: 0,
}
}
#[must_use]
pub fn for_file_cache() -> Self {
Self::new()
}
#[must_use]
pub fn for_mmap() -> Self {
Self::new()
}
pub fn hit(&mut self) {
self.hits += 1;
}
pub fn miss(&mut self) {
self.misses += 1;
self.total_pages += 1;
}
pub fn evict(&mut self) {
self.evictions += 1;
if self.total_pages > 0 {
self.total_pages -= 1;
}
}
pub fn writeback(&mut self) {
self.writebacks += 1;
}
#[must_use]
pub fn hit_rate(&self) -> f64 {
let total = self.hits + self.misses;
if total == 0 {
return 0.0;
}
(self.hits as f64 / total as f64) * 100.0
}
#[must_use]
pub fn eviction_rate(&self) -> f64 {
let total = self.hits + self.misses;
if total == 0 {
return 0.0;
}
(self.evictions as f64 / total as f64) * 100.0
}
#[must_use]
pub fn is_effective(&self) -> bool {
self.hit_rate() > 80.0
}
pub fn reset(&mut self) {
self.hits = 0;
self.misses = 0;
self.evictions = 0;
self.writebacks = 0;
self.total_pages = 0;
}
}
#[cfg(test)]
mod page_cache_tests {
use super::*;
#[test]
fn f_pcache_001_new() {
let pc = PageCacheTracker::new();
assert_eq!(pc.hits, 0);
}
#[test]
fn f_pcache_002_default() {
let pc = PageCacheTracker::default();
assert_eq!(pc.hits, 0);
}
#[test]
fn f_pcache_003_hit() {
let mut pc = PageCacheTracker::new();
pc.hit();
assert_eq!(pc.hits, 1);
}
#[test]
fn f_pcache_004_miss() {
let mut pc = PageCacheTracker::new();
pc.miss();
assert_eq!(pc.misses, 1);
assert_eq!(pc.total_pages, 1);
}
#[test]
fn f_pcache_005_hit_rate() {
let mut pc = PageCacheTracker::new();
pc.hit();
pc.miss();
assert!((pc.hit_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_pcache_006_evict() {
let mut pc = PageCacheTracker::new();
pc.miss();
pc.evict();
assert_eq!(pc.evictions, 1);
}
#[test]
fn f_pcache_007_for_file_cache() {
let pc = PageCacheTracker::for_file_cache();
assert_eq!(pc.hits, 0);
}
#[test]
fn f_pcache_008_for_mmap() {
let pc = PageCacheTracker::for_mmap();
assert_eq!(pc.hits, 0);
}
#[test]
fn f_pcache_009_writeback() {
let mut pc = PageCacheTracker::new();
pc.writeback();
assert_eq!(pc.writebacks, 1);
}
#[test]
fn f_pcache_010_effective() {
let mut pc = PageCacheTracker::new();
for _ in 0..9 {
pc.hit();
}
pc.miss();
assert!(pc.is_effective());
}
#[test]
fn f_pcache_011_reset() {
let mut pc = PageCacheTracker::new();
pc.hit();
pc.reset();
assert_eq!(pc.hits, 0);
}
#[test]
fn f_pcache_012_clone() {
let mut pc = PageCacheTracker::new();
pc.hit();
let cloned = pc.clone();
assert_eq!(pc.hits, cloned.hits);
}
}
#[derive(Debug, Clone)]
pub struct BufferPoolTracker {
pub capacity: u32,
pub allocated: u32,
pub reuses: u64,
pub allocations: u64,
pub peak_allocated: u32,
}
impl Default for BufferPoolTracker {
fn default() -> Self {
Self::for_small()
}
}
impl BufferPoolTracker {
#[must_use]
pub fn new(capacity: u32) -> Self {
Self {
capacity,
allocated: 0,
reuses: 0,
allocations: 0,
peak_allocated: 0,
}
}
#[must_use]
pub fn for_small() -> Self {
Self::new(64)
}
#[must_use]
pub fn for_large() -> Self {
Self::new(1024)
}
pub fn get(&mut self) {
self.allocated += 1;
if self.allocated > self.peak_allocated {
self.peak_allocated = self.allocated;
}
if self.allocated <= self.capacity {
self.reuses += 1;
} else {
self.allocations += 1;
}
}
pub fn put(&mut self) {
self.allocated = self.allocated.saturating_sub(1);
}
#[must_use]
pub fn utilization(&self) -> f64 {
if self.capacity == 0 {
return 0.0;
}
(self.allocated as f64 / self.capacity as f64) * 100.0
}
#[must_use]
pub fn reuse_rate(&self) -> f64 {
let total = self.reuses + self.allocations;
if total == 0 {
return 0.0;
}
(self.reuses as f64 / total as f64) * 100.0
}
#[must_use]
pub fn is_efficient(&self) -> bool {
self.reuse_rate() > 90.0
}
#[must_use]
pub fn needs_expansion(&self) -> bool {
self.peak_allocated > self.capacity
}
pub fn reset(&mut self) {
self.allocated = 0;
self.reuses = 0;
self.allocations = 0;
self.peak_allocated = 0;
}
}
#[cfg(test)]
mod buffer_pool_tests {
use super::*;
#[test]
fn f_bpool_001_new() {
let bp = BufferPoolTracker::new(100);
assert_eq!(bp.capacity, 100);
}
#[test]
fn f_bpool_002_default() {
let bp = BufferPoolTracker::default();
assert_eq!(bp.capacity, 64);
}
#[test]
fn f_bpool_003_get() {
let mut bp = BufferPoolTracker::new(100);
bp.get();
assert_eq!(bp.allocated, 1);
}
#[test]
fn f_bpool_004_put() {
let mut bp = BufferPoolTracker::new(100);
bp.get();
bp.put();
assert_eq!(bp.allocated, 0);
}
#[test]
fn f_bpool_005_utilization() {
let mut bp = BufferPoolTracker::new(100);
for _ in 0..50 {
bp.get();
}
assert!((bp.utilization() - 50.0).abs() < 0.01);
}
#[test]
fn f_bpool_006_reuse() {
let mut bp = BufferPoolTracker::new(100);
bp.get();
assert_eq!(bp.reuses, 1);
}
#[test]
fn f_bpool_007_for_small() {
let bp = BufferPoolTracker::for_small();
assert_eq!(bp.capacity, 64);
}
#[test]
fn f_bpool_008_for_large() {
let bp = BufferPoolTracker::for_large();
assert_eq!(bp.capacity, 1024);
}
#[test]
fn f_bpool_009_reuse_rate() {
let mut bp = BufferPoolTracker::new(10);
for _ in 0..10 {
bp.get();
}
assert!((bp.reuse_rate() - 100.0).abs() < 0.01);
}
#[test]
fn f_bpool_010_needs_expansion() {
let mut bp = BufferPoolTracker::new(10);
for _ in 0..15 {
bp.get();
}
assert!(bp.needs_expansion());
}
#[test]
fn f_bpool_011_reset() {
let mut bp = BufferPoolTracker::new(100);
bp.get();
bp.reset();
assert_eq!(bp.allocated, 0);
}
#[test]
fn f_bpool_012_clone() {
let mut bp = BufferPoolTracker::new(100);
bp.get();
let cloned = bp.clone();
assert_eq!(bp.allocated, cloned.allocated);
}
}
#[derive(Debug, Clone)]
pub struct AsyncTaskTracker {
pub pending: u64,
pub running: u64,
pub completed: u64,
pub failed: u64,
pub peak_concurrent: u64,
}
impl Default for AsyncTaskTracker {
fn default() -> Self {
Self::new()
}
}
impl AsyncTaskTracker {
#[must_use]
pub fn new() -> Self {
Self {
pending: 0,
running: 0,
completed: 0,
failed: 0,
peak_concurrent: 0,
}
}
#[must_use]
pub fn for_io() -> Self {
Self::new()
}
#[must_use]
pub fn for_cpu() -> Self {
Self::new()
}
pub fn spawn(&mut self) {
self.pending += 1;
}
pub fn start(&mut self) {
if self.pending > 0 {
self.pending -= 1;
}
self.running += 1;
let concurrent = self.pending + self.running;
if concurrent > self.peak_concurrent {
self.peak_concurrent = concurrent;
}
}
pub fn complete(&mut self) {
if self.running > 0 {
self.running -= 1;
}
self.completed += 1;
}
pub fn fail(&mut self) {
if self.running > 0 {
self.running -= 1;
}
self.failed += 1;
}
#[must_use]
pub fn success_rate(&self) -> f64 {
let total = self.completed + self.failed;
if total == 0 {
return 0.0;
}
(self.completed as f64 / total as f64) * 100.0
}
#[must_use]
pub fn active(&self) -> u64 {
self.pending + self.running
}
#[must_use]
pub fn is_healthy(&self) -> bool {
self.success_rate() > 95.0
}
pub fn reset(&mut self) {
self.pending = 0;
self.running = 0;
self.completed = 0;
self.failed = 0;
self.peak_concurrent = 0;
}
}
#[cfg(test)]
mod async_task_tests {
use super::*;
#[test]
fn f_async_001_new() {
let at = AsyncTaskTracker::new();
assert_eq!(at.active(), 0);
}
#[test]
fn f_async_002_default() {
let at = AsyncTaskTracker::default();
assert_eq!(at.active(), 0);
}
#[test]
fn f_async_003_spawn() {
let mut at = AsyncTaskTracker::new();
at.spawn();
assert_eq!(at.pending, 1);
}
#[test]
fn f_async_004_start() {
let mut at = AsyncTaskTracker::new();
at.spawn();
at.start();
assert_eq!(at.pending, 0);
assert_eq!(at.running, 1);
}
#[test]
fn f_async_005_complete() {
let mut at = AsyncTaskTracker::new();
at.spawn();
at.start();
at.complete();
assert_eq!(at.running, 0);
assert_eq!(at.completed, 1);
}
#[test]
fn f_async_006_fail() {
let mut at = AsyncTaskTracker::new();
at.spawn();
at.start();
at.fail();
assert_eq!(at.running, 0);
assert_eq!(at.failed, 1);
}
#[test]
fn f_async_007_for_io() {
let at = AsyncTaskTracker::for_io();
assert_eq!(at.active(), 0);
}
#[test]
fn f_async_008_for_cpu() {
let at = AsyncTaskTracker::for_cpu();
assert_eq!(at.active(), 0);
}
#[test]
fn f_async_009_success_rate() {
let mut at = AsyncTaskTracker::new();
at.completed = 9;
at.failed = 1;
assert!((at.success_rate() - 90.0).abs() < 0.01);
}
#[test]
fn f_async_010_healthy() {
let mut at = AsyncTaskTracker::new();
at.completed = 100;
assert!(at.is_healthy());
}
#[test]
fn f_async_011_reset() {
let mut at = AsyncTaskTracker::new();
at.spawn();
at.reset();
assert_eq!(at.pending, 0);
}
#[test]
fn f_async_012_clone() {
let mut at = AsyncTaskTracker::new();
at.spawn();
let cloned = at.clone();
assert_eq!(at.pending, cloned.pending);
}
}
#[derive(Debug, Clone)]
pub struct ContextSwitchTracker {
pub voluntary: u64,
pub involuntary: u64,
pub total: u64,
pub peak_rate: u64,
pub last_interval: u64,
}
impl Default for ContextSwitchTracker {
fn default() -> Self {
Self::new()
}
}
impl ContextSwitchTracker {
#[must_use]
pub fn new() -> Self {
Self {
voluntary: 0,
involuntary: 0,
total: 0,
peak_rate: 0,
last_interval: 0,
}
}
#[must_use]
pub fn for_process() -> Self {
Self::new()
}
#[must_use]
pub fn for_thread() -> Self {
Self::new()
}
pub fn voluntary_switch(&mut self) {
self.voluntary += 1;
self.total += 1;
self.last_interval += 1;
}
pub fn involuntary_switch(&mut self) {
self.involuntary += 1;
self.total += 1;
self.last_interval += 1;
}
pub fn end_interval(&mut self) {
if self.last_interval > self.peak_rate {
self.peak_rate = self.last_interval;
}
self.last_interval = 0;
}
#[must_use]
pub fn voluntary_rate(&self) -> f64 {
if self.total == 0 {
return 0.0;
}
(self.voluntary as f64 / self.total as f64) * 100.0
}
#[must_use]
pub fn has_preemption_issue(&self) -> bool {
self.voluntary_rate() < 70.0 && self.total > 0
}
#[must_use]
pub fn rate(&self) -> u64 {
self.last_interval
}
pub fn reset(&mut self) {
self.voluntary = 0;
self.involuntary = 0;
self.total = 0;
self.peak_rate = 0;
self.last_interval = 0;
}
}
#[cfg(test)]
mod context_switch_tests {
use super::*;
#[test]
fn f_ctxsw_001_new() {
let cs = ContextSwitchTracker::new();
assert_eq!(cs.total, 0);
}
#[test]
fn f_ctxsw_002_default() {
let cs = ContextSwitchTracker::default();
assert_eq!(cs.total, 0);
}
#[test]
fn f_ctxsw_003_voluntary() {
let mut cs = ContextSwitchTracker::new();
cs.voluntary_switch();
assert_eq!(cs.voluntary, 1);
assert_eq!(cs.total, 1);
}
#[test]
fn f_ctxsw_004_involuntary() {
let mut cs = ContextSwitchTracker::new();
cs.involuntary_switch();
assert_eq!(cs.involuntary, 1);
assert_eq!(cs.total, 1);
}
#[test]
fn f_ctxsw_005_voluntary_rate() {
let mut cs = ContextSwitchTracker::new();
cs.voluntary_switch();
cs.involuntary_switch();
assert!((cs.voluntary_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_ctxsw_006_end_interval() {
let mut cs = ContextSwitchTracker::new();
cs.voluntary_switch();
cs.voluntary_switch();
cs.end_interval();
assert_eq!(cs.peak_rate, 2);
assert_eq!(cs.last_interval, 0);
}
#[test]
fn f_ctxsw_007_for_process() {
let cs = ContextSwitchTracker::for_process();
assert_eq!(cs.total, 0);
}
#[test]
fn f_ctxsw_008_for_thread() {
let cs = ContextSwitchTracker::for_thread();
assert_eq!(cs.total, 0);
}
#[test]
fn f_ctxsw_009_preemption() {
let mut cs = ContextSwitchTracker::new();
cs.voluntary = 3;
cs.involuntary = 7;
cs.total = 10;
assert!(cs.has_preemption_issue());
}
#[test]
fn f_ctxsw_010_rate() {
let mut cs = ContextSwitchTracker::new();
cs.voluntary_switch();
assert_eq!(cs.rate(), 1);
}
#[test]
fn f_ctxsw_011_reset() {
let mut cs = ContextSwitchTracker::new();
cs.voluntary_switch();
cs.reset();
assert_eq!(cs.total, 0);
}
#[test]
fn f_ctxsw_012_clone() {
let mut cs = ContextSwitchTracker::new();
cs.voluntary_switch();
let cloned = cs.clone();
assert_eq!(cs.total, cloned.total);
}
}
#[derive(Debug, Clone)]
pub struct HeapFragmentationTracker {
pub allocated: u64,
pub freed: u64,
pub allocations: u64,
pub frees: u64,
pub peak_allocated: u64,
pub fragmentation_events: u64,
}
impl Default for HeapFragmentationTracker {
fn default() -> Self {
Self::new()
}
}
impl HeapFragmentationTracker {
#[must_use]
pub fn new() -> Self {
Self {
allocated: 0,
freed: 0,
allocations: 0,
frees: 0,
peak_allocated: 0,
fragmentation_events: 0,
}
}
#[must_use]
pub fn for_jemalloc() -> Self {
Self::new()
}
#[must_use]
pub fn for_system() -> Self {
Self::new()
}
pub fn allocate(&mut self, bytes: u64) {
self.allocated += bytes;
self.allocations += 1;
let current = self.allocated.saturating_sub(self.freed);
if current > self.peak_allocated {
self.peak_allocated = current;
}
}
pub fn free(&mut self, bytes: u64) {
self.freed += bytes;
self.frees += 1;
}
pub fn fragment(&mut self) {
self.fragmentation_events += 1;
}
#[must_use]
pub fn in_use(&self) -> u64 {
self.allocated.saturating_sub(self.freed)
}
#[must_use]
pub fn fragmentation_rate(&self) -> f64 {
if self.allocations == 0 {
return 0.0;
}
(self.fragmentation_events as f64 / self.allocations as f64) * 100.0
}
#[must_use]
pub fn is_fragmented(&self) -> bool {
self.fragmentation_rate() > 5.0
}
pub fn reset(&mut self) {
self.allocated = 0;
self.freed = 0;
self.allocations = 0;
self.frees = 0;
self.peak_allocated = 0;
self.fragmentation_events = 0;
}
}
#[cfg(test)]
mod heap_frag_tests {
use super::*;
#[test]
fn f_heap_001_new() {
let hf = HeapFragmentationTracker::new();
assert_eq!(hf.in_use(), 0);
}
#[test]
fn f_heap_002_default() {
let hf = HeapFragmentationTracker::default();
assert_eq!(hf.in_use(), 0);
}
#[test]
fn f_heap_003_allocate() {
let mut hf = HeapFragmentationTracker::new();
hf.allocate(1024);
assert_eq!(hf.allocated, 1024);
assert_eq!(hf.allocations, 1);
}
#[test]
fn f_heap_004_free() {
let mut hf = HeapFragmentationTracker::new();
hf.allocate(1024);
hf.free(512);
assert_eq!(hf.in_use(), 512);
}
#[test]
fn f_heap_005_peak() {
let mut hf = HeapFragmentationTracker::new();
hf.allocate(1024);
hf.free(512);
hf.allocate(256);
assert_eq!(hf.peak_allocated, 1024);
}
#[test]
fn f_heap_006_fragment() {
let mut hf = HeapFragmentationTracker::new();
hf.fragment();
assert_eq!(hf.fragmentation_events, 1);
}
#[test]
fn f_heap_007_for_jemalloc() {
let hf = HeapFragmentationTracker::for_jemalloc();
assert_eq!(hf.in_use(), 0);
}
#[test]
fn f_heap_008_for_system() {
let hf = HeapFragmentationTracker::for_system();
assert_eq!(hf.in_use(), 0);
}
#[test]
fn f_heap_009_frag_rate() {
let mut hf = HeapFragmentationTracker::new();
hf.allocations = 100;
hf.fragmentation_events = 10;
assert!((hf.fragmentation_rate() - 10.0).abs() < 0.01);
}
#[test]
fn f_heap_010_is_fragmented() {
let mut hf = HeapFragmentationTracker::new();
hf.allocations = 100;
hf.fragmentation_events = 10;
assert!(hf.is_fragmented());
}
#[test]
fn f_heap_011_reset() {
let mut hf = HeapFragmentationTracker::new();
hf.allocate(1024);
hf.reset();
assert_eq!(hf.in_use(), 0);
}
#[test]
fn f_heap_012_clone() {
let mut hf = HeapFragmentationTracker::new();
hf.allocate(1024);
let cloned = hf.clone();
assert_eq!(hf.allocated, cloned.allocated);
}
}
#[derive(Debug, Clone)]
pub struct StackDepthTracker {
pub depth: u32,
pub peak_depth: u32,
pub calls: u64,
pub warnings: u64,
pub threshold: u32,
}
impl Default for StackDepthTracker {
fn default() -> Self {
Self::for_default()
}
}
impl StackDepthTracker {
#[must_use]
pub fn new(threshold: u32) -> Self {
Self {
depth: 0,
peak_depth: 0,
calls: 0,
warnings: 0,
threshold,
}
}
#[must_use]
pub fn for_default() -> Self {
Self::new(100)
}
#[must_use]
pub fn for_deep() -> Self {
Self::new(1000)
}
pub fn enter(&mut self) {
self.depth += 1;
self.calls += 1;
if self.depth > self.peak_depth {
self.peak_depth = self.depth;
}
if self.depth > self.threshold {
self.warnings += 1;
}
}
pub fn exit(&mut self) {
self.depth = self.depth.saturating_sub(1);
}
#[must_use]
pub fn current(&self) -> u32 {
self.depth
}
#[must_use]
pub fn utilization(&self) -> f64 {
if self.threshold == 0 {
return 0.0;
}
(self.depth as f64 / self.threshold as f64) * 100.0
}
#[must_use]
pub fn is_at_risk(&self) -> bool {
self.utilization() > 80.0
}
pub fn reset(&mut self) {
self.depth = 0;
self.peak_depth = 0;
self.calls = 0;
self.warnings = 0;
}
}
#[cfg(test)]
mod stack_depth_tests {
use super::*;
#[test]
fn f_stack_001_new() {
let sd = StackDepthTracker::new(100);
assert_eq!(sd.threshold, 100);
}
#[test]
fn f_stack_002_default() {
let sd = StackDepthTracker::default();
assert_eq!(sd.threshold, 100);
}
#[test]
fn f_stack_003_enter() {
let mut sd = StackDepthTracker::new(100);
sd.enter();
assert_eq!(sd.depth, 1);
assert_eq!(sd.calls, 1);
}
#[test]
fn f_stack_004_exit() {
let mut sd = StackDepthTracker::new(100);
sd.enter();
sd.exit();
assert_eq!(sd.depth, 0);
}
#[test]
fn f_stack_005_peak() {
let mut sd = StackDepthTracker::new(100);
sd.enter();
sd.enter();
sd.exit();
assert_eq!(sd.peak_depth, 2);
}
#[test]
fn f_stack_006_warning() {
let mut sd = StackDepthTracker::new(2);
sd.enter();
sd.enter();
sd.enter();
assert_eq!(sd.warnings, 1);
}
#[test]
fn f_stack_007_for_default() {
let sd = StackDepthTracker::for_default();
assert_eq!(sd.threshold, 100);
}
#[test]
fn f_stack_008_for_deep() {
let sd = StackDepthTracker::for_deep();
assert_eq!(sd.threshold, 1000);
}
#[test]
fn f_stack_009_utilization() {
let mut sd = StackDepthTracker::new(100);
for _ in 0..50 {
sd.enter();
}
assert!((sd.utilization() - 50.0).abs() < 0.01);
}
#[test]
fn f_stack_010_at_risk() {
let mut sd = StackDepthTracker::new(100);
for _ in 0..85 {
sd.enter();
}
assert!(sd.is_at_risk());
}
#[test]
fn f_stack_011_reset() {
let mut sd = StackDepthTracker::new(100);
sd.enter();
sd.reset();
assert_eq!(sd.depth, 0);
}
#[test]
fn f_stack_012_clone() {
let mut sd = StackDepthTracker::new(100);
sd.enter();
let cloned = sd.clone();
assert_eq!(sd.depth, cloned.depth);
}
}
#[derive(Debug, Clone)]
pub struct SyscallTracker {
pub total: u64,
pub reads: u64,
pub writes: u64,
pub other: u64,
pub total_latency_us: u64,
pub errors: u64,
}
impl Default for SyscallTracker {
fn default() -> Self {
Self::new()
}
}
impl SyscallTracker {
#[must_use]
pub fn new() -> Self {
Self {
total: 0,
reads: 0,
writes: 0,
other: 0,
total_latency_us: 0,
errors: 0,
}
}
#[must_use]
pub fn for_io() -> Self {
Self::new()
}
#[must_use]
pub fn for_general() -> Self {
Self::new()
}
pub fn read(&mut self, latency_us: u64) {
self.total += 1;
self.reads += 1;
self.total_latency_us += latency_us;
}
pub fn write(&mut self, latency_us: u64) {
self.total += 1;
self.writes += 1;
self.total_latency_us += latency_us;
}
pub fn other(&mut self, latency_us: u64) {
self.total += 1;
self.other += 1;
self.total_latency_us += latency_us;
}
pub fn error(&mut self) {
self.errors += 1;
}
#[must_use]
pub fn avg_latency_us(&self) -> u64 {
if self.total == 0 {
return 0;
}
self.total_latency_us / self.total
}
#[must_use]
pub fn io_percentage(&self) -> f64 {
if self.total == 0 {
return 0.0;
}
((self.reads + self.writes) as f64 / self.total as f64) * 100.0
}
#[must_use]
pub fn error_rate(&self) -> f64 {
if self.total == 0 {
return 0.0;
}
(self.errors as f64 / self.total as f64) * 100.0
}
pub fn reset(&mut self) {
self.total = 0;
self.reads = 0;
self.writes = 0;
self.other = 0;
self.total_latency_us = 0;
self.errors = 0;
}
}
#[cfg(test)]
mod syscall_tests {
use super::*;
#[test]
fn f_syscall_001_new() {
let sc = SyscallTracker::new();
assert_eq!(sc.total, 0);
}
#[test]
fn f_syscall_002_default() {
let sc = SyscallTracker::default();
assert_eq!(sc.total, 0);
}
#[test]
fn f_syscall_003_read() {
let mut sc = SyscallTracker::new();
sc.read(100);
assert_eq!(sc.reads, 1);
assert_eq!(sc.total, 1);
}
#[test]
fn f_syscall_004_write() {
let mut sc = SyscallTracker::new();
sc.write(100);
assert_eq!(sc.writes, 1);
assert_eq!(sc.total, 1);
}
#[test]
fn f_syscall_005_other() {
let mut sc = SyscallTracker::new();
sc.other(100);
assert_eq!(sc.other, 1);
assert_eq!(sc.total, 1);
}
#[test]
fn f_syscall_006_avg_latency() {
let mut sc = SyscallTracker::new();
sc.read(100);
sc.write(200);
assert_eq!(sc.avg_latency_us(), 150);
}
#[test]
fn f_syscall_007_for_io() {
let sc = SyscallTracker::for_io();
assert_eq!(sc.total, 0);
}
#[test]
fn f_syscall_008_for_general() {
let sc = SyscallTracker::for_general();
assert_eq!(sc.total, 0);
}
#[test]
fn f_syscall_009_io_percentage() {
let mut sc = SyscallTracker::new();
sc.read(100);
sc.write(100);
sc.other(100);
sc.other(100);
assert!((sc.io_percentage() - 50.0).abs() < 0.01);
}
#[test]
fn f_syscall_010_error() {
let mut sc = SyscallTracker::new();
sc.error();
assert_eq!(sc.errors, 1);
}
#[test]
fn f_syscall_011_reset() {
let mut sc = SyscallTracker::new();
sc.read(100);
sc.reset();
assert_eq!(sc.total, 0);
}
#[test]
fn f_syscall_012_clone() {
let mut sc = SyscallTracker::new();
sc.read(100);
let cloned = sc.clone();
assert_eq!(sc.total, cloned.total);
}
}
#[derive(Debug, Clone)]
pub struct SignalTracker {
pub received: u64,
pub handled: u64,
pub ignored: u64,
pub fatal: u64,
pub last_signal: u32,
}
impl Default for SignalTracker {
fn default() -> Self {
Self::new()
}
}
impl SignalTracker {
#[must_use]
pub fn new() -> Self {
Self {
received: 0,
handled: 0,
ignored: 0,
fatal: 0,
last_signal: 0,
}
}
#[must_use]
pub fn for_process() -> Self {
Self::new()
}
#[must_use]
pub fn for_daemon() -> Self {
Self::new()
}
pub fn handle(&mut self, signal: u32) {
self.received += 1;
self.handled += 1;
self.last_signal = signal;
}
pub fn ignore(&mut self, signal: u32) {
self.received += 1;
self.ignored += 1;
self.last_signal = signal;
}
pub fn fatal(&mut self, signal: u32) {
self.received += 1;
self.fatal += 1;
self.last_signal = signal;
}
#[must_use]
pub fn handling_rate(&self) -> f64 {
if self.received == 0 {
return 0.0;
}
(self.handled as f64 / self.received as f64) * 100.0
}
#[must_use]
pub fn has_fatal(&self) -> bool {
self.fatal > 0
}
#[must_use]
pub fn total(&self) -> u64 {
self.received
}
pub fn reset(&mut self) {
self.received = 0;
self.handled = 0;
self.ignored = 0;
self.fatal = 0;
self.last_signal = 0;
}
}
#[cfg(test)]
mod signal_tests {
use super::*;
#[test]
fn f_signal_001_new() {
let sig = SignalTracker::new();
assert_eq!(sig.total(), 0);
}
#[test]
fn f_signal_002_default() {
let sig = SignalTracker::default();
assert_eq!(sig.total(), 0);
}
#[test]
fn f_signal_003_handle() {
let mut sig = SignalTracker::new();
sig.handle(15); assert_eq!(sig.handled, 1);
assert_eq!(sig.received, 1);
}
#[test]
fn f_signal_004_ignore() {
let mut sig = SignalTracker::new();
sig.ignore(1); assert_eq!(sig.ignored, 1);
assert_eq!(sig.received, 1);
}
#[test]
fn f_signal_005_fatal() {
let mut sig = SignalTracker::new();
sig.fatal(9); assert_eq!(sig.fatal, 1);
assert!(sig.has_fatal());
}
#[test]
fn f_signal_006_handling_rate() {
let mut sig = SignalTracker::new();
sig.handle(15);
sig.ignore(1);
assert!((sig.handling_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_signal_007_for_process() {
let sig = SignalTracker::for_process();
assert_eq!(sig.total(), 0);
}
#[test]
fn f_signal_008_for_daemon() {
let sig = SignalTracker::for_daemon();
assert_eq!(sig.total(), 0);
}
#[test]
fn f_signal_009_last_signal() {
let mut sig = SignalTracker::new();
sig.handle(15);
assert_eq!(sig.last_signal, 15);
}
#[test]
fn f_signal_010_has_fatal() {
let mut sig = SignalTracker::new();
sig.handle(15);
assert!(!sig.has_fatal());
}
#[test]
fn f_signal_011_reset() {
let mut sig = SignalTracker::new();
sig.handle(15);
sig.reset();
assert_eq!(sig.total(), 0);
}
#[test]
fn f_signal_012_clone() {
let mut sig = SignalTracker::new();
sig.handle(15);
let cloned = sig.clone();
assert_eq!(sig.received, cloned.received);
}
}
#[derive(Debug, Clone)]
pub struct FutexTracker {
pub waits: u64,
pub wakes: u64,
pub requeues: u64,
pub timeouts: u64,
pub total_wait_us: u64,
pub peak_waiters: u64,
}
impl Default for FutexTracker {
fn default() -> Self {
Self::new()
}
}
impl FutexTracker {
#[must_use]
pub const fn new() -> Self {
Self {
waits: 0,
wakes: 0,
requeues: 0,
timeouts: 0,
total_wait_us: 0,
peak_waiters: 0,
}
}
#[must_use]
pub const fn for_mutex() -> Self {
Self::new()
}
#[must_use]
pub const fn for_condvar() -> Self {
Self::new()
}
pub fn wait(&mut self, duration_us: u64) {
self.waits += 1;
self.total_wait_us += duration_us;
}
pub fn wake(&mut self, count: u64) {
self.wakes += 1;
if count > self.peak_waiters {
self.peak_waiters = count;
}
}
pub fn requeue(&mut self) {
self.requeues += 1;
}
pub fn timeout(&mut self) {
self.timeouts += 1;
}
#[must_use]
pub fn avg_wait_us(&self) -> u64 {
if self.waits == 0 {
return 0;
}
self.total_wait_us / self.waits
}
#[must_use]
pub fn timeout_rate(&self) -> f64 {
if self.waits == 0 {
return 0.0;
}
(self.timeouts as f64 / self.waits as f64) * 100.0
}
#[must_use]
pub fn total(&self) -> u64 {
self.waits + self.wakes + self.requeues
}
pub fn reset(&mut self) {
self.waits = 0;
self.wakes = 0;
self.requeues = 0;
self.timeouts = 0;
self.total_wait_us = 0;
self.peak_waiters = 0;
}
}
#[cfg(test)]
mod futex_tests {
use super::*;
#[test]
fn f_futex_001_new() {
let ft = FutexTracker::new();
assert_eq!(ft.total(), 0);
}
#[test]
fn f_futex_002_default() {
let ft = FutexTracker::default();
assert_eq!(ft.total(), 0);
}
#[test]
fn f_futex_003_wait() {
let mut ft = FutexTracker::new();
ft.wait(100);
assert_eq!(ft.waits, 1);
assert_eq!(ft.total_wait_us, 100);
}
#[test]
fn f_futex_004_wake() {
let mut ft = FutexTracker::new();
ft.wake(5);
assert_eq!(ft.wakes, 1);
assert_eq!(ft.peak_waiters, 5);
}
#[test]
fn f_futex_005_requeue() {
let mut ft = FutexTracker::new();
ft.requeue();
assert_eq!(ft.requeues, 1);
}
#[test]
fn f_futex_006_timeout() {
let mut ft = FutexTracker::new();
ft.timeout();
assert_eq!(ft.timeouts, 1);
}
#[test]
fn f_futex_007_avg_wait() {
let mut ft = FutexTracker::new();
ft.wait(100);
ft.wait(200);
assert_eq!(ft.avg_wait_us(), 150);
}
#[test]
fn f_futex_008_timeout_rate() {
let mut ft = FutexTracker::new();
ft.wait(100);
ft.timeout();
ft.wait(100);
assert!((ft.timeout_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_futex_009_for_mutex() {
let ft = FutexTracker::for_mutex();
assert_eq!(ft.total(), 0);
}
#[test]
fn f_futex_010_for_condvar() {
let ft = FutexTracker::for_condvar();
assert_eq!(ft.total(), 0);
}
#[test]
fn f_futex_011_reset() {
let mut ft = FutexTracker::new();
ft.wait(100);
ft.reset();
assert_eq!(ft.total(), 0);
}
#[test]
fn f_futex_012_clone() {
let mut ft = FutexTracker::new();
ft.wait(100);
let cloned = ft.clone();
assert_eq!(ft.waits, cloned.waits);
}
}
#[derive(Debug, Clone)]
pub struct EpollTracker {
pub waits: u64,
pub events: u64,
pub empty_waits: u64,
pub timeouts: u64,
pub peak_events: u64,
pub total_wait_us: u64,
}
impl Default for EpollTracker {
fn default() -> Self {
Self::new()
}
}
impl EpollTracker {
#[must_use]
pub const fn new() -> Self {
Self {
waits: 0,
events: 0,
empty_waits: 0,
timeouts: 0,
peak_events: 0,
total_wait_us: 0,
}
}
#[must_use]
pub const fn for_network() -> Self {
Self::new()
}
#[must_use]
pub const fn for_file_io() -> Self {
Self::new()
}
pub fn wait(&mut self, event_count: u64, duration_us: u64) {
self.waits += 1;
self.events += event_count;
self.total_wait_us += duration_us;
if event_count == 0 {
self.empty_waits += 1;
}
if event_count > self.peak_events {
self.peak_events = event_count;
}
}
pub fn timeout(&mut self) {
self.timeouts += 1;
}
#[must_use]
pub fn avg_events_per_wait(&self) -> f64 {
if self.waits == 0 {
return 0.0;
}
self.events as f64 / self.waits as f64
}
#[must_use]
pub fn empty_rate(&self) -> f64 {
if self.waits == 0 {
return 0.0;
}
(self.empty_waits as f64 / self.waits as f64) * 100.0
}
#[must_use]
pub fn avg_wait_us(&self) -> u64 {
if self.waits == 0 {
return 0;
}
self.total_wait_us / self.waits
}
pub fn reset(&mut self) {
self.waits = 0;
self.events = 0;
self.empty_waits = 0;
self.timeouts = 0;
self.peak_events = 0;
self.total_wait_us = 0;
}
}
#[cfg(test)]
mod epoll_tests {
use super::*;
#[test]
fn f_epoll_001_new() {
let ep = EpollTracker::new();
assert_eq!(ep.waits, 0);
}
#[test]
fn f_epoll_002_default() {
let ep = EpollTracker::default();
assert_eq!(ep.waits, 0);
}
#[test]
fn f_epoll_003_wait_events() {
let mut ep = EpollTracker::new();
ep.wait(5, 100);
assert_eq!(ep.events, 5);
assert_eq!(ep.waits, 1);
}
#[test]
fn f_epoll_004_empty_wait() {
let mut ep = EpollTracker::new();
ep.wait(0, 100);
assert_eq!(ep.empty_waits, 1);
}
#[test]
fn f_epoll_005_peak_events() {
let mut ep = EpollTracker::new();
ep.wait(5, 100);
ep.wait(10, 100);
ep.wait(3, 100);
assert_eq!(ep.peak_events, 10);
}
#[test]
fn f_epoll_006_timeout() {
let mut ep = EpollTracker::new();
ep.timeout();
assert_eq!(ep.timeouts, 1);
}
#[test]
fn f_epoll_007_avg_events() {
let mut ep = EpollTracker::new();
ep.wait(5, 100);
ep.wait(15, 100);
assert!((ep.avg_events_per_wait() - 10.0).abs() < 0.01);
}
#[test]
fn f_epoll_008_empty_rate() {
let mut ep = EpollTracker::new();
ep.wait(0, 100);
ep.wait(5, 100);
assert!((ep.empty_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_epoll_009_for_network() {
let ep = EpollTracker::for_network();
assert_eq!(ep.waits, 0);
}
#[test]
fn f_epoll_010_for_file_io() {
let ep = EpollTracker::for_file_io();
assert_eq!(ep.waits, 0);
}
#[test]
fn f_epoll_011_reset() {
let mut ep = EpollTracker::new();
ep.wait(5, 100);
ep.reset();
assert_eq!(ep.waits, 0);
}
#[test]
fn f_epoll_012_clone() {
let mut ep = EpollTracker::new();
ep.wait(5, 100);
let cloned = ep.clone();
assert_eq!(ep.events, cloned.events);
}
}
#[derive(Debug, Clone)]
pub struct MmapTracker {
pub active: u64,
pub maps: u64,
pub unmaps: u64,
pub mapped_bytes: u64,
pub peak_mapped_bytes: u64,
pub failures: u64,
}
impl Default for MmapTracker {
fn default() -> Self {
Self::new()
}
}
impl MmapTracker {
#[must_use]
pub const fn new() -> Self {
Self {
active: 0,
maps: 0,
unmaps: 0,
mapped_bytes: 0,
peak_mapped_bytes: 0,
failures: 0,
}
}
#[must_use]
pub const fn for_file() -> Self {
Self::new()
}
#[must_use]
pub const fn for_anonymous() -> Self {
Self::new()
}
pub fn map(&mut self, size: u64) {
self.maps += 1;
self.active += 1;
self.mapped_bytes += size;
if self.mapped_bytes > self.peak_mapped_bytes {
self.peak_mapped_bytes = self.mapped_bytes;
}
}
pub fn unmap(&mut self, size: u64) {
self.unmaps += 1;
if self.active > 0 {
self.active -= 1;
}
self.mapped_bytes = self.mapped_bytes.saturating_sub(size);
}
pub fn failure(&mut self) {
self.failures += 1;
}
#[must_use]
pub fn failure_rate(&self) -> f64 {
let total = self.maps + self.failures;
if total == 0 {
return 0.0;
}
(self.failures as f64 / total as f64) * 100.0
}
#[must_use]
pub fn has_leak(&self) -> bool {
self.maps > self.unmaps + 10 }
pub fn reset(&mut self) {
self.active = 0;
self.maps = 0;
self.unmaps = 0;
self.mapped_bytes = 0;
self.peak_mapped_bytes = 0;
self.failures = 0;
}
}
#[cfg(test)]
mod mmap_tests {
use super::*;
#[test]
fn f_mmap_001_new() {
let mm = MmapTracker::new();
assert_eq!(mm.active, 0);
}
#[test]
fn f_mmap_002_default() {
let mm = MmapTracker::default();
assert_eq!(mm.active, 0);
}
#[test]
fn f_mmap_003_map() {
let mut mm = MmapTracker::new();
mm.map(4096);
assert_eq!(mm.maps, 1);
assert_eq!(mm.active, 1);
assert_eq!(mm.mapped_bytes, 4096);
}
#[test]
fn f_mmap_004_unmap() {
let mut mm = MmapTracker::new();
mm.map(4096);
mm.unmap(4096);
assert_eq!(mm.unmaps, 1);
assert_eq!(mm.active, 0);
}
#[test]
fn f_mmap_005_peak() {
let mut mm = MmapTracker::new();
mm.map(4096);
mm.map(4096);
mm.unmap(4096);
assert_eq!(mm.peak_mapped_bytes, 8192);
}
#[test]
fn f_mmap_006_failure() {
let mut mm = MmapTracker::new();
mm.failure();
assert_eq!(mm.failures, 1);
}
#[test]
fn f_mmap_007_failure_rate() {
let mut mm = MmapTracker::new();
mm.map(4096);
mm.failure();
assert!((mm.failure_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_mmap_008_leak() {
let mut mm = MmapTracker::new();
for _ in 0..20 {
mm.map(4096);
}
assert!(mm.has_leak());
}
#[test]
fn f_mmap_009_for_file() {
let mm = MmapTracker::for_file();
assert_eq!(mm.active, 0);
}
#[test]
fn f_mmap_010_for_anonymous() {
let mm = MmapTracker::for_anonymous();
assert_eq!(mm.active, 0);
}
#[test]
fn f_mmap_011_reset() {
let mut mm = MmapTracker::new();
mm.map(4096);
mm.reset();
assert_eq!(mm.active, 0);
}
#[test]
fn f_mmap_012_clone() {
let mut mm = MmapTracker::new();
mm.map(4096);
let cloned = mm.clone();
assert_eq!(mm.mapped_bytes, cloned.mapped_bytes);
}
}
#[derive(Debug, Clone)]
pub struct CgroupTracker {
pub cpu_shares: u64,
pub memory_limit: u64,
pub memory_usage: u64,
pub cpu_throttled: u64,
pub oom_events: u64,
pub io_weight: u64,
}
impl Default for CgroupTracker {
fn default() -> Self {
Self::new()
}
}
impl CgroupTracker {
#[must_use]
pub const fn new() -> Self {
Self {
cpu_shares: 1024, memory_limit: 0, memory_usage: 0,
cpu_throttled: 0,
oom_events: 0,
io_weight: 100, }
}
#[must_use]
pub const fn for_container() -> Self {
Self {
cpu_shares: 1024,
memory_limit: 1024 * 1024 * 1024, memory_usage: 0,
cpu_throttled: 0,
oom_events: 0,
io_weight: 100,
}
}
#[must_use]
pub const fn for_service() -> Self {
Self::new()
}
pub fn set_cpu_shares(&mut self, shares: u64) {
self.cpu_shares = shares;
}
pub fn set_memory_limit(&mut self, limit: u64) {
self.memory_limit = limit;
}
pub fn update_memory(&mut self, usage: u64) {
self.memory_usage = usage;
}
pub fn throttle(&mut self) {
self.cpu_throttled += 1;
}
pub fn oom(&mut self) {
self.oom_events += 1;
}
#[must_use]
pub fn memory_utilization(&self) -> f64 {
if self.memory_limit == 0 {
return 0.0;
}
(self.memory_usage as f64 / self.memory_limit as f64) * 100.0
}
#[must_use]
pub fn is_memory_pressure(&self) -> bool {
self.memory_utilization() > 90.0
}
pub fn reset(&mut self) {
self.cpu_throttled = 0;
self.oom_events = 0;
}
}
#[cfg(test)]
mod cgroup_tests {
use super::*;
#[test]
fn f_cgroup_001_new() {
let cg = CgroupTracker::new();
assert_eq!(cg.cpu_shares, 1024);
}
#[test]
fn f_cgroup_002_default() {
let cg = CgroupTracker::default();
assert_eq!(cg.cpu_shares, 1024);
}
#[test]
fn f_cgroup_003_cpu_shares() {
let mut cg = CgroupTracker::new();
cg.set_cpu_shares(2048);
assert_eq!(cg.cpu_shares, 2048);
}
#[test]
fn f_cgroup_004_memory_limit() {
let mut cg = CgroupTracker::new();
cg.set_memory_limit(1024 * 1024 * 1024);
assert_eq!(cg.memory_limit, 1024 * 1024 * 1024);
}
#[test]
fn f_cgroup_005_memory_usage() {
let mut cg = CgroupTracker::new();
cg.update_memory(512 * 1024 * 1024);
assert_eq!(cg.memory_usage, 512 * 1024 * 1024);
}
#[test]
fn f_cgroup_006_throttle() {
let mut cg = CgroupTracker::new();
cg.throttle();
assert_eq!(cg.cpu_throttled, 1);
}
#[test]
fn f_cgroup_007_oom() {
let mut cg = CgroupTracker::new();
cg.oom();
assert_eq!(cg.oom_events, 1);
}
#[test]
fn f_cgroup_008_memory_util() {
let mut cg = CgroupTracker::new();
cg.set_memory_limit(1000);
cg.update_memory(500);
assert!((cg.memory_utilization() - 50.0).abs() < 0.01);
}
#[test]
fn f_cgroup_009_memory_pressure() {
let mut cg = CgroupTracker::new();
cg.set_memory_limit(1000);
cg.update_memory(950);
assert!(cg.is_memory_pressure());
}
#[test]
fn f_cgroup_010_for_container() {
let cg = CgroupTracker::for_container();
assert_eq!(cg.memory_limit, 1024 * 1024 * 1024);
}
#[test]
fn f_cgroup_011_reset() {
let mut cg = CgroupTracker::new();
cg.throttle();
cg.oom();
cg.reset();
assert_eq!(cg.cpu_throttled, 0);
assert_eq!(cg.oom_events, 0);
}
#[test]
fn f_cgroup_012_clone() {
let mut cg = CgroupTracker::new();
cg.throttle();
let cloned = cg.clone();
assert_eq!(cg.cpu_throttled, cloned.cpu_throttled);
}
}
#[derive(Debug, Clone)]
pub struct NetfilterTracker {
pub accepted: u64,
pub dropped: u64,
pub rejected: u64,
pub nated: u64,
pub rule_matches: u64,
pub conntrack_entries: u64,
}
impl Default for NetfilterTracker {
fn default() -> Self {
Self::new()
}
}
impl NetfilterTracker {
#[must_use]
pub const fn new() -> Self {
Self {
accepted: 0,
dropped: 0,
rejected: 0,
nated: 0,
rule_matches: 0,
conntrack_entries: 0,
}
}
#[must_use]
pub const fn for_firewall() -> Self {
Self::new()
}
#[must_use]
pub const fn for_nat() -> Self {
Self::new()
}
pub fn accept(&mut self) {
self.accepted += 1;
self.rule_matches += 1;
}
pub fn record_drop(&mut self) {
self.dropped += 1;
self.rule_matches += 1;
}
pub fn reject(&mut self) {
self.rejected += 1;
self.rule_matches += 1;
}
pub fn nat(&mut self) {
self.nated += 1;
}
pub fn set_conntrack(&mut self, entries: u64) {
self.conntrack_entries = entries;
}
#[must_use]
pub fn total_packets(&self) -> u64 {
self.accepted + self.dropped + self.rejected
}
#[must_use]
pub fn drop_rate(&self) -> f64 {
let total = self.total_packets();
if total == 0 {
return 0.0;
}
(self.dropped as f64 / total as f64) * 100.0
}
pub fn reset(&mut self) {
self.accepted = 0;
self.dropped = 0;
self.rejected = 0;
self.nated = 0;
self.rule_matches = 0;
}
}
#[cfg(test)]
mod netfilter_tests {
use super::*;
#[test]
fn f_nf_001_new() {
let nf = NetfilterTracker::new();
assert_eq!(nf.total_packets(), 0);
}
#[test]
fn f_nf_002_default() {
let nf = NetfilterTracker::default();
assert_eq!(nf.total_packets(), 0);
}
#[test]
fn f_nf_003_accept() {
let mut nf = NetfilterTracker::new();
nf.accept();
assert_eq!(nf.accepted, 1);
assert_eq!(nf.rule_matches, 1);
}
#[test]
fn f_nf_004_drop() {
let mut nf = NetfilterTracker::new();
nf.record_drop();
assert_eq!(nf.dropped, 1);
}
#[test]
fn f_nf_005_reject() {
let mut nf = NetfilterTracker::new();
nf.reject();
assert_eq!(nf.rejected, 1);
}
#[test]
fn f_nf_006_nat() {
let mut nf = NetfilterTracker::new();
nf.nat();
assert_eq!(nf.nated, 1);
}
#[test]
fn f_nf_007_drop_rate() {
let mut nf = NetfilterTracker::new();
nf.accept();
nf.record_drop();
assert!((nf.drop_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_nf_008_conntrack() {
let mut nf = NetfilterTracker::new();
nf.set_conntrack(1000);
assert_eq!(nf.conntrack_entries, 1000);
}
#[test]
fn f_nf_009_for_firewall() {
let nf = NetfilterTracker::for_firewall();
assert_eq!(nf.total_packets(), 0);
}
#[test]
fn f_nf_010_for_nat() {
let nf = NetfilterTracker::for_nat();
assert_eq!(nf.total_packets(), 0);
}
#[test]
fn f_nf_011_reset() {
let mut nf = NetfilterTracker::new();
nf.accept();
nf.record_drop();
nf.reset();
assert_eq!(nf.total_packets(), 0);
}
#[test]
fn f_nf_012_clone() {
let mut nf = NetfilterTracker::new();
nf.accept();
let cloned = nf.clone();
assert_eq!(nf.accepted, cloned.accepted);
}
}
#[derive(Debug, Clone)]
pub struct BpfTracker {
pub programs: u64,
pub maps: u64,
pub runs: u64,
pub map_lookups: u64,
pub map_updates: u64,
pub verification_fails: u64,
}
impl Default for BpfTracker {
fn default() -> Self {
Self::new()
}
}
impl BpfTracker {
#[must_use]
pub const fn new() -> Self {
Self {
programs: 0,
maps: 0,
runs: 0,
map_lookups: 0,
map_updates: 0,
verification_fails: 0,
}
}
#[must_use]
pub const fn for_tracing() -> Self {
Self::new()
}
#[must_use]
pub const fn for_xdp() -> Self {
Self::new()
}
pub fn load_program(&mut self) {
self.programs += 1;
}
pub fn create_map(&mut self) {
self.maps += 1;
}
pub fn run(&mut self) {
self.runs += 1;
}
pub fn map_lookup(&mut self) {
self.map_lookups += 1;
}
pub fn map_update(&mut self) {
self.map_updates += 1;
}
pub fn verification_fail(&mut self) {
self.verification_fails += 1;
}
#[must_use]
pub fn total_map_ops(&self) -> u64 {
self.map_lookups + self.map_updates
}
#[must_use]
pub fn failure_rate(&self) -> f64 {
let total_loads = self.programs + self.verification_fails;
if total_loads == 0 {
return 0.0;
}
(self.verification_fails as f64 / total_loads as f64) * 100.0
}
pub fn reset(&mut self) {
self.programs = 0;
self.maps = 0;
self.runs = 0;
self.map_lookups = 0;
self.map_updates = 0;
self.verification_fails = 0;
}
}
#[cfg(test)]
mod bpf_tests {
use super::*;
#[test]
fn f_bpf_001_new() {
let bpf = BpfTracker::new();
assert_eq!(bpf.programs, 0);
}
#[test]
fn f_bpf_002_default() {
let bpf = BpfTracker::default();
assert_eq!(bpf.programs, 0);
}
#[test]
fn f_bpf_003_load() {
let mut bpf = BpfTracker::new();
bpf.load_program();
assert_eq!(bpf.programs, 1);
}
#[test]
fn f_bpf_004_map() {
let mut bpf = BpfTracker::new();
bpf.create_map();
assert_eq!(bpf.maps, 1);
}
#[test]
fn f_bpf_005_run() {
let mut bpf = BpfTracker::new();
bpf.run();
assert_eq!(bpf.runs, 1);
}
#[test]
fn f_bpf_006_lookup() {
let mut bpf = BpfTracker::new();
bpf.map_lookup();
assert_eq!(bpf.map_lookups, 1);
}
#[test]
fn f_bpf_007_update() {
let mut bpf = BpfTracker::new();
bpf.map_update();
assert_eq!(bpf.map_updates, 1);
}
#[test]
fn f_bpf_008_total_ops() {
let mut bpf = BpfTracker::new();
bpf.map_lookup();
bpf.map_update();
assert_eq!(bpf.total_map_ops(), 2);
}
#[test]
fn f_bpf_009_for_tracing() {
let bpf = BpfTracker::for_tracing();
assert_eq!(bpf.programs, 0);
}
#[test]
fn f_bpf_010_for_xdp() {
let bpf = BpfTracker::for_xdp();
assert_eq!(bpf.programs, 0);
}
#[test]
fn f_bpf_011_reset() {
let mut bpf = BpfTracker::new();
bpf.load_program();
bpf.run();
bpf.reset();
assert_eq!(bpf.programs, 0);
}
#[test]
fn f_bpf_012_clone() {
let mut bpf = BpfTracker::new();
bpf.load_program();
let cloned = bpf.clone();
assert_eq!(bpf.programs, cloned.programs);
}
}
#[derive(Debug, Clone)]
pub struct PerfEventTracker {
pub events: u64,
pub samples: u64,
pub lost: u64,
pub context_switches: u64,
pub cycles: u64,
pub instructions: u64,
}
impl Default for PerfEventTracker {
fn default() -> Self {
Self::new()
}
}
impl PerfEventTracker {
#[must_use]
pub const fn new() -> Self {
Self {
events: 0,
samples: 0,
lost: 0,
context_switches: 0,
cycles: 0,
instructions: 0,
}
}
#[must_use]
pub const fn for_sampling() -> Self {
Self::new()
}
#[must_use]
pub const fn for_counting() -> Self {
Self::new()
}
pub fn open_event(&mut self) {
self.events += 1;
}
pub fn sample(&mut self) {
self.samples += 1;
}
pub fn lost_sample(&mut self) {
self.lost += 1;
}
pub fn context_switch(&mut self) {
self.context_switches += 1;
}
pub fn add_cycles(&mut self, count: u64) {
self.cycles += count;
}
pub fn add_instructions(&mut self, count: u64) {
self.instructions += count;
}
#[must_use]
pub fn ipc(&self) -> f64 {
if self.cycles == 0 {
return 0.0;
}
self.instructions as f64 / self.cycles as f64
}
#[must_use]
pub fn loss_rate(&self) -> f64 {
let total = self.samples + self.lost;
if total == 0 {
return 0.0;
}
(self.lost as f64 / total as f64) * 100.0
}
pub fn reset(&mut self) {
self.events = 0;
self.samples = 0;
self.lost = 0;
self.context_switches = 0;
self.cycles = 0;
self.instructions = 0;
}
}
#[cfg(test)]
mod perfevent_tests {
use super::*;
#[test]
fn f_perf_001_new() {
let pe = PerfEventTracker::new();
assert_eq!(pe.events, 0);
}
#[test]
fn f_perf_002_default() {
let pe = PerfEventTracker::default();
assert_eq!(pe.events, 0);
}
#[test]
fn f_perf_003_open() {
let mut pe = PerfEventTracker::new();
pe.open_event();
assert_eq!(pe.events, 1);
}
#[test]
fn f_perf_004_sample() {
let mut pe = PerfEventTracker::new();
pe.sample();
assert_eq!(pe.samples, 1);
}
#[test]
fn f_perf_005_lost() {
let mut pe = PerfEventTracker::new();
pe.lost_sample();
assert_eq!(pe.lost, 1);
}
#[test]
fn f_perf_006_ctxsw() {
let mut pe = PerfEventTracker::new();
pe.context_switch();
assert_eq!(pe.context_switches, 1);
}
#[test]
fn f_perf_007_cycles() {
let mut pe = PerfEventTracker::new();
pe.add_cycles(1000);
assert_eq!(pe.cycles, 1000);
}
#[test]
fn f_perf_008_ipc() {
let mut pe = PerfEventTracker::new();
pe.add_cycles(1000);
pe.add_instructions(2000);
assert!((pe.ipc() - 2.0).abs() < 0.01);
}
#[test]
fn f_perf_009_loss_rate() {
let mut pe = PerfEventTracker::new();
pe.sample();
pe.lost_sample();
assert!((pe.loss_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_perf_010_for_sampling() {
let pe = PerfEventTracker::for_sampling();
assert_eq!(pe.events, 0);
}
#[test]
fn f_perf_011_reset() {
let mut pe = PerfEventTracker::new();
pe.sample();
pe.add_cycles(1000);
pe.reset();
assert_eq!(pe.samples, 0);
}
#[test]
fn f_perf_012_clone() {
let mut pe = PerfEventTracker::new();
pe.sample();
let cloned = pe.clone();
assert_eq!(pe.samples, cloned.samples);
}
}
#[derive(Debug, Clone)]
pub struct KprobeTracker {
pub probes: u64,
pub hits: u64,
pub misses: u64,
pub reg_failures: u64,
pub total_latency_ns: u64,
pub peak_hits_per_sec: u64,
}
impl Default for KprobeTracker {
fn default() -> Self {
Self::new()
}
}
impl KprobeTracker {
#[must_use]
pub const fn new() -> Self {
Self {
probes: 0,
hits: 0,
misses: 0,
reg_failures: 0,
total_latency_ns: 0,
peak_hits_per_sec: 0,
}
}
#[must_use]
pub const fn for_tracing() -> Self {
Self::new()
}
#[must_use]
pub const fn for_debugging() -> Self {
Self::new()
}
pub fn register(&mut self) {
self.probes += 1;
}
pub fn reg_failure(&mut self) {
self.reg_failures += 1;
}
pub fn hit(&mut self, latency_ns: u64) {
self.hits += 1;
self.total_latency_ns += latency_ns;
}
pub fn miss(&mut self) {
self.misses += 1;
}
pub fn update_peak(&mut self, hits_per_sec: u64) {
if hits_per_sec > self.peak_hits_per_sec {
self.peak_hits_per_sec = hits_per_sec;
}
}
#[must_use]
pub fn avg_latency_ns(&self) -> u64 {
if self.hits == 0 {
return 0;
}
self.total_latency_ns / self.hits
}
#[must_use]
pub fn hit_rate(&self) -> f64 {
let total = self.hits + self.misses;
if total == 0 {
return 0.0;
}
(self.hits as f64 / total as f64) * 100.0
}
pub fn reset(&mut self) {
self.probes = 0;
self.hits = 0;
self.misses = 0;
self.reg_failures = 0;
self.total_latency_ns = 0;
self.peak_hits_per_sec = 0;
}
}
#[cfg(test)]
mod kprobe_tests {
use super::*;
#[test]
fn f_kprobe_001_new() {
let kp = KprobeTracker::new();
assert_eq!(kp.probes, 0);
}
#[test]
fn f_kprobe_002_default() {
let kp = KprobeTracker::default();
assert_eq!(kp.probes, 0);
}
#[test]
fn f_kprobe_003_register() {
let mut kp = KprobeTracker::new();
kp.register();
assert_eq!(kp.probes, 1);
}
#[test]
fn f_kprobe_004_reg_failure() {
let mut kp = KprobeTracker::new();
kp.reg_failure();
assert_eq!(kp.reg_failures, 1);
}
#[test]
fn f_kprobe_005_hit() {
let mut kp = KprobeTracker::new();
kp.hit(100);
assert_eq!(kp.hits, 1);
assert_eq!(kp.total_latency_ns, 100);
}
#[test]
fn f_kprobe_006_miss() {
let mut kp = KprobeTracker::new();
kp.miss();
assert_eq!(kp.misses, 1);
}
#[test]
fn f_kprobe_007_avg_latency() {
let mut kp = KprobeTracker::new();
kp.hit(100);
kp.hit(200);
assert_eq!(kp.avg_latency_ns(), 150);
}
#[test]
fn f_kprobe_008_hit_rate() {
let mut kp = KprobeTracker::new();
kp.hit(100);
kp.miss();
assert!((kp.hit_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_kprobe_009_peak() {
let mut kp = KprobeTracker::new();
kp.update_peak(1000);
kp.update_peak(500);
assert_eq!(kp.peak_hits_per_sec, 1000);
}
#[test]
fn f_kprobe_010_for_tracing() {
let kp = KprobeTracker::for_tracing();
assert_eq!(kp.probes, 0);
}
#[test]
fn f_kprobe_011_reset() {
let mut kp = KprobeTracker::new();
kp.register();
kp.hit(100);
kp.reset();
assert_eq!(kp.probes, 0);
}
#[test]
fn f_kprobe_012_clone() {
let mut kp = KprobeTracker::new();
kp.hit(100);
let cloned = kp.clone();
assert_eq!(kp.hits, cloned.hits);
}
}
#[derive(Debug, Clone)]
pub struct IoUringTracker {
pub submissions: u64,
pub completions: u64,
pub overflows: u64,
pub sq_full: u64,
pub bytes_transferred: u64,
pub peak_depth: u64,
}
impl Default for IoUringTracker {
fn default() -> Self {
Self::new()
}
}
impl IoUringTracker {
#[must_use]
pub const fn new() -> Self {
Self {
submissions: 0,
completions: 0,
overflows: 0,
sq_full: 0,
bytes_transferred: 0,
peak_depth: 0,
}
}
#[must_use]
pub const fn for_file_io() -> Self {
Self::new()
}
#[must_use]
pub const fn for_network() -> Self {
Self::new()
}
pub fn submit(&mut self, bytes: u64) {
self.submissions += 1;
self.bytes_transferred += bytes;
let pending = self.submissions.saturating_sub(self.completions);
if pending > self.peak_depth {
self.peak_depth = pending;
}
}
pub fn complete(&mut self) {
self.completions += 1;
}
pub fn overflow(&mut self) {
self.overflows += 1;
}
pub fn sq_full(&mut self) {
self.sq_full += 1;
}
#[must_use]
pub fn pending(&self) -> u64 {
self.submissions.saturating_sub(self.completions)
}
#[must_use]
pub fn completion_rate(&self) -> f64 {
if self.submissions == 0 {
return 0.0;
}
(self.completions as f64 / self.submissions as f64) * 100.0
}
pub fn reset(&mut self) {
self.submissions = 0;
self.completions = 0;
self.overflows = 0;
self.sq_full = 0;
self.bytes_transferred = 0;
self.peak_depth = 0;
}
}
#[cfg(test)]
mod iouring_tests {
use super::*;
#[test]
fn f_iouring_001_new() {
let io = IoUringTracker::new();
assert_eq!(io.submissions, 0);
}
#[test]
fn f_iouring_002_default() {
let io = IoUringTracker::default();
assert_eq!(io.submissions, 0);
}
#[test]
fn f_iouring_003_submit() {
let mut io = IoUringTracker::new();
io.submit(4096);
assert_eq!(io.submissions, 1);
assert_eq!(io.bytes_transferred, 4096);
}
#[test]
fn f_iouring_004_complete() {
let mut io = IoUringTracker::new();
io.submit(4096);
io.complete();
assert_eq!(io.completions, 1);
}
#[test]
fn f_iouring_005_pending() {
let mut io = IoUringTracker::new();
io.submit(4096);
io.submit(4096);
io.complete();
assert_eq!(io.pending(), 1);
}
#[test]
fn f_iouring_006_peak() {
let mut io = IoUringTracker::new();
io.submit(4096);
io.submit(4096);
io.complete();
io.complete();
assert_eq!(io.peak_depth, 2);
}
#[test]
fn f_iouring_007_overflow() {
let mut io = IoUringTracker::new();
io.overflow();
assert_eq!(io.overflows, 1);
}
#[test]
fn f_iouring_008_sq_full() {
let mut io = IoUringTracker::new();
io.sq_full();
assert_eq!(io.sq_full, 1);
}
#[test]
fn f_iouring_009_for_file_io() {
let io = IoUringTracker::for_file_io();
assert_eq!(io.submissions, 0);
}
#[test]
fn f_iouring_010_for_network() {
let io = IoUringTracker::for_network();
assert_eq!(io.submissions, 0);
}
#[test]
fn f_iouring_011_reset() {
let mut io = IoUringTracker::new();
io.submit(4096);
io.reset();
assert_eq!(io.submissions, 0);
}
#[test]
fn f_iouring_012_clone() {
let mut io = IoUringTracker::new();
io.submit(4096);
let cloned = io.clone();
assert_eq!(io.submissions, cloned.submissions);
}
}
#[derive(Debug, Clone)]
pub struct NumaTracker {
pub local_allocs: u64,
pub remote_allocs: u64,
pub local_bytes: u64,
pub remote_bytes: u64,
pub migrations: u64,
pub nodes: u32,
}
impl Default for NumaTracker {
fn default() -> Self {
Self::new()
}
}
impl NumaTracker {
#[must_use]
pub const fn new() -> Self {
Self {
local_allocs: 0,
remote_allocs: 0,
local_bytes: 0,
remote_bytes: 0,
migrations: 0,
nodes: 1,
}
}
#[must_use]
pub const fn for_multinode(nodes: u32) -> Self {
Self {
local_allocs: 0,
remote_allocs: 0,
local_bytes: 0,
remote_bytes: 0,
migrations: 0,
nodes,
}
}
#[must_use]
pub const fn for_single_node() -> Self {
Self::new()
}
pub fn alloc_local(&mut self, bytes: u64) {
self.local_allocs += 1;
self.local_bytes += bytes;
}
pub fn alloc_remote(&mut self, bytes: u64) {
self.remote_allocs += 1;
self.remote_bytes += bytes;
}
pub fn migrate(&mut self) {
self.migrations += 1;
}
#[must_use]
pub fn total_allocs(&self) -> u64 {
self.local_allocs + self.remote_allocs
}
#[must_use]
pub fn locality(&self) -> f64 {
let total = self.total_allocs();
if total == 0 {
return 100.0;
}
(self.local_allocs as f64 / total as f64) * 100.0
}
#[must_use]
pub fn is_remote_heavy(&self) -> bool {
self.locality() < 80.0
}
pub fn reset(&mut self) {
self.local_allocs = 0;
self.remote_allocs = 0;
self.local_bytes = 0;
self.remote_bytes = 0;
self.migrations = 0;
}
}
#[cfg(test)]
mod numa_tests {
use super::*;
#[test]
fn f_numa_001_new() {
let numa = NumaTracker::new();
assert_eq!(numa.total_allocs(), 0);
}
#[test]
fn f_numa_002_default() {
let numa = NumaTracker::default();
assert_eq!(numa.total_allocs(), 0);
}
#[test]
fn f_numa_003_local() {
let mut numa = NumaTracker::new();
numa.alloc_local(4096);
assert_eq!(numa.local_allocs, 1);
assert_eq!(numa.local_bytes, 4096);
}
#[test]
fn f_numa_004_remote() {
let mut numa = NumaTracker::new();
numa.alloc_remote(4096);
assert_eq!(numa.remote_allocs, 1);
}
#[test]
fn f_numa_005_migrate() {
let mut numa = NumaTracker::new();
numa.migrate();
assert_eq!(numa.migrations, 1);
}
#[test]
fn f_numa_006_locality() {
let mut numa = NumaTracker::new();
numa.alloc_local(4096);
numa.alloc_remote(4096);
assert!((numa.locality() - 50.0).abs() < 0.01);
}
#[test]
fn f_numa_007_remote_heavy() {
let mut numa = NumaTracker::new();
numa.alloc_local(1);
numa.alloc_remote(9);
assert!(numa.is_remote_heavy());
}
#[test]
fn f_numa_008_multinode() {
let numa = NumaTracker::for_multinode(4);
assert_eq!(numa.nodes, 4);
}
#[test]
fn f_numa_009_single_node() {
let numa = NumaTracker::for_single_node();
assert_eq!(numa.nodes, 1);
}
#[test]
fn f_numa_010_total() {
let mut numa = NumaTracker::new();
numa.alloc_local(4096);
numa.alloc_remote(4096);
assert_eq!(numa.total_allocs(), 2);
}
#[test]
fn f_numa_011_reset() {
let mut numa = NumaTracker::new();
numa.alloc_local(4096);
numa.reset();
assert_eq!(numa.total_allocs(), 0);
}
#[test]
fn f_numa_012_clone() {
let mut numa = NumaTracker::new();
numa.alloc_local(4096);
let cloned = numa.clone();
assert_eq!(numa.local_allocs, cloned.local_allocs);
}
}
#[derive(Debug, Clone)]
pub struct HugepageTracker {
pub pages_2mb: u64,
pub pages_1gb: u64,
pub failures: u64,
pub bytes: u64,
pub peak_pages: u64,
pub thp_promotions: u64,
}
impl Default for HugepageTracker {
fn default() -> Self {
Self::new()
}
}
impl HugepageTracker {
#[must_use]
pub const fn new() -> Self {
Self {
pages_2mb: 0,
pages_1gb: 0,
failures: 0,
bytes: 0,
peak_pages: 0,
thp_promotions: 0,
}
}
#[must_use]
pub const fn for_database() -> Self {
Self::new()
}
#[must_use]
pub const fn for_hpc() -> Self {
Self::new()
}
pub fn alloc_2mb(&mut self) {
self.pages_2mb += 1;
self.bytes += 2 * 1024 * 1024;
self.update_peak();
}
pub fn alloc_1gb(&mut self) {
self.pages_1gb += 1;
self.bytes += 1024 * 1024 * 1024;
self.update_peak();
}
pub fn failure(&mut self) {
self.failures += 1;
}
pub fn thp_promote(&mut self) {
self.thp_promotions += 1;
}
fn update_peak(&mut self) {
let total = self.pages_2mb + self.pages_1gb;
if total > self.peak_pages {
self.peak_pages = total;
}
}
#[must_use]
pub fn total_pages(&self) -> u64 {
self.pages_2mb + self.pages_1gb
}
#[must_use]
pub fn failure_rate(&self) -> f64 {
let total = self.total_pages() + self.failures;
if total == 0 {
return 0.0;
}
(self.failures as f64 / total as f64) * 100.0
}
pub fn reset(&mut self) {
self.pages_2mb = 0;
self.pages_1gb = 0;
self.failures = 0;
self.bytes = 0;
self.peak_pages = 0;
self.thp_promotions = 0;
}
}
#[cfg(test)]
mod hugepage_tests {
use super::*;
#[test]
fn f_huge_001_new() {
let hp = HugepageTracker::new();
assert_eq!(hp.total_pages(), 0);
}
#[test]
fn f_huge_002_default() {
let hp = HugepageTracker::default();
assert_eq!(hp.total_pages(), 0);
}
#[test]
fn f_huge_003_2mb() {
let mut hp = HugepageTracker::new();
hp.alloc_2mb();
assert_eq!(hp.pages_2mb, 1);
assert_eq!(hp.bytes, 2 * 1024 * 1024);
}
#[test]
fn f_huge_004_1gb() {
let mut hp = HugepageTracker::new();
hp.alloc_1gb();
assert_eq!(hp.pages_1gb, 1);
}
#[test]
fn f_huge_005_failure() {
let mut hp = HugepageTracker::new();
hp.failure();
assert_eq!(hp.failures, 1);
}
#[test]
fn f_huge_006_thp() {
let mut hp = HugepageTracker::new();
hp.thp_promote();
assert_eq!(hp.thp_promotions, 1);
}
#[test]
fn f_huge_007_peak() {
let mut hp = HugepageTracker::new();
hp.alloc_2mb();
hp.alloc_2mb();
assert_eq!(hp.peak_pages, 2);
}
#[test]
fn f_huge_008_failure_rate() {
let mut hp = HugepageTracker::new();
hp.alloc_2mb();
hp.failure();
assert!((hp.failure_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_huge_009_database() {
let hp = HugepageTracker::for_database();
assert_eq!(hp.total_pages(), 0);
}
#[test]
fn f_huge_010_hpc() {
let hp = HugepageTracker::for_hpc();
assert_eq!(hp.total_pages(), 0);
}
#[test]
fn f_huge_011_reset() {
let mut hp = HugepageTracker::new();
hp.alloc_2mb();
hp.reset();
assert_eq!(hp.total_pages(), 0);
}
#[test]
fn f_huge_012_clone() {
let mut hp = HugepageTracker::new();
hp.alloc_2mb();
let cloned = hp.clone();
assert_eq!(hp.pages_2mb, cloned.pages_2mb);
}
}
#[derive(Debug, Clone)]
pub struct TlbTracker {
pub hits: u64,
pub misses: u64,
pub flushes: u64,
pub shootdowns: u64,
pub page_walks: u64,
pub peak_miss_rate: f64,
}
impl Default for TlbTracker {
fn default() -> Self {
Self::new()
}
}
impl TlbTracker {
#[must_use]
pub const fn new() -> Self {
Self {
hits: 0,
misses: 0,
flushes: 0,
shootdowns: 0,
page_walks: 0,
peak_miss_rate: 0.0,
}
}
#[must_use]
pub const fn for_memory_intensive() -> Self {
Self::new()
}
#[must_use]
pub const fn for_context_switch() -> Self {
Self::new()
}
pub fn hit(&mut self) {
self.hits += 1;
}
pub fn miss(&mut self) {
self.misses += 1;
self.page_walks += 1;
let rate = self.miss_rate();
if rate > self.peak_miss_rate {
self.peak_miss_rate = rate;
}
}
pub fn flush(&mut self) {
self.flushes += 1;
}
pub fn shootdown(&mut self) {
self.shootdowns += 1;
}
#[must_use]
pub fn total_accesses(&self) -> u64 {
self.hits + self.misses
}
#[must_use]
pub fn miss_rate(&self) -> f64 {
let total = self.total_accesses();
if total == 0 {
return 0.0;
}
(self.misses as f64 / total as f64) * 100.0
}
#[must_use]
pub fn is_thrashing(&self) -> bool {
self.miss_rate() > 10.0
}
pub fn reset(&mut self) {
self.hits = 0;
self.misses = 0;
self.flushes = 0;
self.shootdowns = 0;
self.page_walks = 0;
self.peak_miss_rate = 0.0;
}
}
#[cfg(test)]
mod tlb_tests {
use super::*;
#[test]
fn f_tlb_001_new() {
let tlb = TlbTracker::new();
assert_eq!(tlb.total_accesses(), 0);
}
#[test]
fn f_tlb_002_default() {
let tlb = TlbTracker::default();
assert_eq!(tlb.total_accesses(), 0);
}
#[test]
fn f_tlb_003_hit() {
let mut tlb = TlbTracker::new();
tlb.hit();
assert_eq!(tlb.hits, 1);
}
#[test]
fn f_tlb_004_miss() {
let mut tlb = TlbTracker::new();
tlb.miss();
assert_eq!(tlb.misses, 1);
assert_eq!(tlb.page_walks, 1);
}
#[test]
fn f_tlb_005_flush() {
let mut tlb = TlbTracker::new();
tlb.flush();
assert_eq!(tlb.flushes, 1);
}
#[test]
fn f_tlb_006_shootdown() {
let mut tlb = TlbTracker::new();
tlb.shootdown();
assert_eq!(tlb.shootdowns, 1);
}
#[test]
fn f_tlb_007_miss_rate() {
let mut tlb = TlbTracker::new();
tlb.hit();
tlb.miss();
assert!((tlb.miss_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_tlb_008_thrashing() {
let mut tlb = TlbTracker::new();
for _ in 0..9 {
tlb.hit();
}
for _ in 0..2 {
tlb.miss();
}
assert!(tlb.is_thrashing());
}
#[test]
fn f_tlb_009_memory() {
let tlb = TlbTracker::for_memory_intensive();
assert_eq!(tlb.total_accesses(), 0);
}
#[test]
fn f_tlb_010_ctxsw() {
let tlb = TlbTracker::for_context_switch();
assert_eq!(tlb.total_accesses(), 0);
}
#[test]
fn f_tlb_011_reset() {
let mut tlb = TlbTracker::new();
tlb.hit();
tlb.miss();
tlb.reset();
assert_eq!(tlb.total_accesses(), 0);
}
#[test]
fn f_tlb_012_clone() {
let mut tlb = TlbTracker::new();
tlb.hit();
let cloned = tlb.clone();
assert_eq!(tlb.hits, cloned.hits);
}
}
#[derive(Debug, Clone)]
pub struct SchedTracker {
pub wakeups: u64,
pub migrations: u64,
pub wait_events: u64,
pub runq_latency_us: u64,
pub sched_events: u64,
pub peak_runq_len: u64,
}
impl Default for SchedTracker {
fn default() -> Self {
Self::new()
}
}
impl SchedTracker {
#[must_use]
pub const fn new() -> Self {
Self {
wakeups: 0,
migrations: 0,
wait_events: 0,
runq_latency_us: 0,
sched_events: 0,
peak_runq_len: 0,
}
}
#[must_use]
pub const fn for_realtime() -> Self {
Self::new()
}
#[must_use]
pub const fn for_batch() -> Self {
Self::new()
}
pub fn wakeup(&mut self) {
self.wakeups += 1;
self.sched_events += 1;
}
pub fn migrate(&mut self) {
self.migrations += 1;
self.sched_events += 1;
}
pub fn wait(&mut self) {
self.wait_events += 1;
}
pub fn runq_wait(&mut self, latency_us: u64) {
self.runq_latency_us += latency_us;
}
pub fn update_runq_len(&mut self, len: u64) {
if len > self.peak_runq_len {
self.peak_runq_len = len;
}
}
#[must_use]
pub fn avg_runq_latency_us(&self) -> u64 {
if self.sched_events == 0 {
return 0;
}
self.runq_latency_us / self.sched_events
}
#[must_use]
pub fn migration_rate(&self) -> f64 {
if self.wakeups == 0 {
return 0.0;
}
(self.migrations as f64 / self.wakeups as f64) * 100.0
}
pub fn reset(&mut self) {
self.wakeups = 0;
self.migrations = 0;
self.wait_events = 0;
self.runq_latency_us = 0;
self.sched_events = 0;
self.peak_runq_len = 0;
}
}
#[cfg(test)]
mod sched_tests {
use super::*;
#[test]
fn f_sched_001_new() {
let sched = SchedTracker::new();
assert_eq!(sched.sched_events, 0);
}
#[test]
fn f_sched_002_default() {
let sched = SchedTracker::default();
assert_eq!(sched.sched_events, 0);
}
#[test]
fn f_sched_003_wakeup() {
let mut sched = SchedTracker::new();
sched.wakeup();
assert_eq!(sched.wakeups, 1);
assert_eq!(sched.sched_events, 1);
}
#[test]
fn f_sched_004_migrate() {
let mut sched = SchedTracker::new();
sched.migrate();
assert_eq!(sched.migrations, 1);
}
#[test]
fn f_sched_005_wait() {
let mut sched = SchedTracker::new();
sched.wait();
assert_eq!(sched.wait_events, 1);
}
#[test]
fn f_sched_006_runq() {
let mut sched = SchedTracker::new();
sched.runq_wait(100);
assert_eq!(sched.runq_latency_us, 100);
}
#[test]
fn f_sched_007_peak() {
let mut sched = SchedTracker::new();
sched.update_runq_len(10);
sched.update_runq_len(5);
assert_eq!(sched.peak_runq_len, 10);
}
#[test]
fn f_sched_008_mig_rate() {
let mut sched = SchedTracker::new();
sched.wakeup();
sched.wakeup();
sched.migrate();
assert!((sched.migration_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_sched_009_realtime() {
let sched = SchedTracker::for_realtime();
assert_eq!(sched.sched_events, 0);
}
#[test]
fn f_sched_010_batch() {
let sched = SchedTracker::for_batch();
assert_eq!(sched.sched_events, 0);
}
#[test]
fn f_sched_011_reset() {
let mut sched = SchedTracker::new();
sched.wakeup();
sched.reset();
assert_eq!(sched.sched_events, 0);
}
#[test]
fn f_sched_012_clone() {
let mut sched = SchedTracker::new();
sched.wakeup();
let cloned = sched.clone();
assert_eq!(sched.wakeups, cloned.wakeups);
}
}
#[derive(Debug, Clone)]
pub struct IrqTracker {
pub total: u64,
pub timer: u64,
pub network: u64,
pub storage: u64,
pub handler_time_us: u64,
pub peak_rate: u64,
}
impl Default for IrqTracker {
fn default() -> Self {
Self::new()
}
}
impl IrqTracker {
#[must_use]
pub const fn new() -> Self {
Self {
total: 0,
timer: 0,
network: 0,
storage: 0,
handler_time_us: 0,
peak_rate: 0,
}
}
#[must_use]
pub const fn for_server() -> Self {
Self::new()
}
#[must_use]
pub const fn for_embedded() -> Self {
Self::new()
}
pub fn timer_irq(&mut self, handler_us: u64) {
self.total += 1;
self.timer += 1;
self.handler_time_us += handler_us;
}
pub fn network_irq(&mut self, handler_us: u64) {
self.total += 1;
self.network += 1;
self.handler_time_us += handler_us;
}
pub fn storage_irq(&mut self, handler_us: u64) {
self.total += 1;
self.storage += 1;
self.handler_time_us += handler_us;
}
pub fn update_rate(&mut self, rate: u64) {
if rate > self.peak_rate {
self.peak_rate = rate;
}
}
#[must_use]
pub fn avg_handler_us(&self) -> u64 {
if self.total == 0 {
return 0;
}
self.handler_time_us / self.total
}
#[must_use]
pub fn network_percentage(&self) -> f64 {
if self.total == 0 {
return 0.0;
}
(self.network as f64 / self.total as f64) * 100.0
}
pub fn reset(&mut self) {
self.total = 0;
self.timer = 0;
self.network = 0;
self.storage = 0;
self.handler_time_us = 0;
self.peak_rate = 0;
}
}
#[cfg(test)]
mod irq_tests {
use super::*;
#[test]
fn f_irq_001_new() {
let irq = IrqTracker::new();
assert_eq!(irq.total, 0);
}
#[test]
fn f_irq_002_default() {
let irq = IrqTracker::default();
assert_eq!(irq.total, 0);
}
#[test]
fn f_irq_003_timer() {
let mut irq = IrqTracker::new();
irq.timer_irq(10);
assert_eq!(irq.timer, 1);
assert_eq!(irq.total, 1);
}
#[test]
fn f_irq_004_network() {
let mut irq = IrqTracker::new();
irq.network_irq(10);
assert_eq!(irq.network, 1);
}
#[test]
fn f_irq_005_storage() {
let mut irq = IrqTracker::new();
irq.storage_irq(10);
assert_eq!(irq.storage, 1);
}
#[test]
fn f_irq_006_handler() {
let mut irq = IrqTracker::new();
irq.timer_irq(100);
irq.network_irq(200);
assert_eq!(irq.handler_time_us, 300);
}
#[test]
fn f_irq_007_avg() {
let mut irq = IrqTracker::new();
irq.timer_irq(100);
irq.network_irq(200);
assert_eq!(irq.avg_handler_us(), 150);
}
#[test]
fn f_irq_008_peak() {
let mut irq = IrqTracker::new();
irq.update_rate(1000);
irq.update_rate(500);
assert_eq!(irq.peak_rate, 1000);
}
#[test]
fn f_irq_009_server() {
let irq = IrqTracker::for_server();
assert_eq!(irq.total, 0);
}
#[test]
fn f_irq_010_embedded() {
let irq = IrqTracker::for_embedded();
assert_eq!(irq.total, 0);
}
#[test]
fn f_irq_011_reset() {
let mut irq = IrqTracker::new();
irq.timer_irq(10);
irq.reset();
assert_eq!(irq.total, 0);
}
#[test]
fn f_irq_012_clone() {
let mut irq = IrqTracker::new();
irq.timer_irq(10);
let cloned = irq.clone();
assert_eq!(irq.timer, cloned.timer);
}
}
#[derive(Debug, Clone)]
pub struct SoftirqTracker {
pub total: u64,
pub net_rx: u64,
pub net_tx: u64,
pub block: u64,
pub timer: u64,
pub exec_time_us: u64,
}
impl Default for SoftirqTracker {
fn default() -> Self {
Self::new()
}
}
impl SoftirqTracker {
#[must_use]
pub const fn new() -> Self {
Self {
total: 0,
net_rx: 0,
net_tx: 0,
block: 0,
timer: 0,
exec_time_us: 0,
}
}
#[must_use]
pub const fn for_network() -> Self {
Self::new()
}
#[must_use]
pub const fn for_storage() -> Self {
Self::new()
}
pub fn net_rx(&mut self, exec_us: u64) {
self.total += 1;
self.net_rx += 1;
self.exec_time_us += exec_us;
}
pub fn net_tx(&mut self, exec_us: u64) {
self.total += 1;
self.net_tx += 1;
self.exec_time_us += exec_us;
}
pub fn block(&mut self, exec_us: u64) {
self.total += 1;
self.block += 1;
self.exec_time_us += exec_us;
}
pub fn timer(&mut self, exec_us: u64) {
self.total += 1;
self.timer += 1;
self.exec_time_us += exec_us;
}
#[must_use]
pub fn network_percentage(&self) -> f64 {
if self.total == 0 {
return 0.0;
}
((self.net_rx + self.net_tx) as f64 / self.total as f64) * 100.0
}
#[must_use]
pub fn avg_exec_us(&self) -> u64 {
if self.total == 0 {
return 0;
}
self.exec_time_us / self.total
}
pub fn reset(&mut self) {
self.total = 0;
self.net_rx = 0;
self.net_tx = 0;
self.block = 0;
self.timer = 0;
self.exec_time_us = 0;
}
}
#[cfg(test)]
mod softirq_tests {
use super::*;
#[test]
fn f_softirq_001_new() {
let si = SoftirqTracker::new();
assert_eq!(si.total, 0);
}
#[test]
fn f_softirq_002_default() {
let si = SoftirqTracker::default();
assert_eq!(si.total, 0);
}
#[test]
fn f_softirq_003_net_rx() {
let mut si = SoftirqTracker::new();
si.net_rx(10);
assert_eq!(si.net_rx, 1);
assert_eq!(si.total, 1);
}
#[test]
fn f_softirq_004_net_tx() {
let mut si = SoftirqTracker::new();
si.net_tx(10);
assert_eq!(si.net_tx, 1);
}
#[test]
fn f_softirq_005_block() {
let mut si = SoftirqTracker::new();
si.block(10);
assert_eq!(si.block, 1);
}
#[test]
fn f_softirq_006_timer() {
let mut si = SoftirqTracker::new();
si.timer(10);
assert_eq!(si.timer, 1);
}
#[test]
fn f_softirq_007_exec() {
let mut si = SoftirqTracker::new();
si.net_rx(100);
si.block(200);
assert_eq!(si.exec_time_us, 300);
}
#[test]
fn f_softirq_008_net_pct() {
let mut si = SoftirqTracker::new();
si.net_rx(10);
si.net_tx(10);
si.block(10);
si.timer(10);
assert!((si.network_percentage() - 50.0).abs() < 0.01);
}
#[test]
fn f_softirq_009_network() {
let si = SoftirqTracker::for_network();
assert_eq!(si.total, 0);
}
#[test]
fn f_softirq_010_storage() {
let si = SoftirqTracker::for_storage();
assert_eq!(si.total, 0);
}
#[test]
fn f_softirq_011_reset() {
let mut si = SoftirqTracker::new();
si.net_rx(10);
si.reset();
assert_eq!(si.total, 0);
}
#[test]
fn f_softirq_012_clone() {
let mut si = SoftirqTracker::new();
si.net_rx(10);
let cloned = si.clone();
assert_eq!(si.net_rx, cloned.net_rx);
}
}
#[derive(Debug, Clone)]
pub struct WorkqueueTracker {
pub queued: u64,
pub executed: u64,
pub cancelled: u64,
pub exec_time_us: u64,
pub peak_depth: u64,
pub delayed: u64,
}
impl Default for WorkqueueTracker {
fn default() -> Self {
Self::new()
}
}
impl WorkqueueTracker {
#[must_use]
pub const fn new() -> Self {
Self {
queued: 0,
executed: 0,
cancelled: 0,
exec_time_us: 0,
peak_depth: 0,
delayed: 0,
}
}
#[must_use]
pub const fn for_system() -> Self {
Self::new()
}
#[must_use]
pub const fn for_highpri() -> Self {
Self::new()
}
pub fn queue(&mut self) {
self.queued += 1;
let pending = self.queued.saturating_sub(self.executed);
if pending > self.peak_depth {
self.peak_depth = pending;
}
}
pub fn execute(&mut self, exec_us: u64) {
self.executed += 1;
self.exec_time_us += exec_us;
}
pub fn cancel(&mut self) {
self.cancelled += 1;
}
pub fn delay(&mut self) {
self.delayed += 1;
}
#[must_use]
pub fn pending(&self) -> u64 {
self.queued.saturating_sub(self.executed + self.cancelled)
}
#[must_use]
pub fn avg_exec_us(&self) -> u64 {
if self.executed == 0 {
return 0;
}
self.exec_time_us / self.executed
}
pub fn reset(&mut self) {
self.queued = 0;
self.executed = 0;
self.cancelled = 0;
self.exec_time_us = 0;
self.peak_depth = 0;
self.delayed = 0;
}
}
#[cfg(test)]
mod workqueue_tests {
use super::*;
#[test]
fn f_wq_001_new() {
let wq = WorkqueueTracker::new();
assert_eq!(wq.queued, 0);
}
#[test]
fn f_wq_002_default() {
let wq = WorkqueueTracker::default();
assert_eq!(wq.queued, 0);
}
#[test]
fn f_wq_003_queue() {
let mut wq = WorkqueueTracker::new();
wq.queue();
assert_eq!(wq.queued, 1);
}
#[test]
fn f_wq_004_execute() {
let mut wq = WorkqueueTracker::new();
wq.execute(100);
assert_eq!(wq.executed, 1);
assert_eq!(wq.exec_time_us, 100);
}
#[test]
fn f_wq_005_cancel() {
let mut wq = WorkqueueTracker::new();
wq.cancel();
assert_eq!(wq.cancelled, 1);
}
#[test]
fn f_wq_006_delay() {
let mut wq = WorkqueueTracker::new();
wq.delay();
assert_eq!(wq.delayed, 1);
}
#[test]
fn f_wq_007_pending() {
let mut wq = WorkqueueTracker::new();
wq.queue();
wq.queue();
wq.execute(100);
assert_eq!(wq.pending(), 1);
}
#[test]
fn f_wq_008_peak() {
let mut wq = WorkqueueTracker::new();
wq.queue();
wq.queue();
wq.execute(100);
wq.execute(100);
assert_eq!(wq.peak_depth, 2);
}
#[test]
fn f_wq_009_system() {
let wq = WorkqueueTracker::for_system();
assert_eq!(wq.queued, 0);
}
#[test]
fn f_wq_010_highpri() {
let wq = WorkqueueTracker::for_highpri();
assert_eq!(wq.queued, 0);
}
#[test]
fn f_wq_011_reset() {
let mut wq = WorkqueueTracker::new();
wq.queue();
wq.reset();
assert_eq!(wq.queued, 0);
}
#[test]
fn f_wq_012_clone() {
let mut wq = WorkqueueTracker::new();
wq.queue();
let cloned = wq.clone();
assert_eq!(wq.queued, cloned.queued);
}
}
#[derive(Debug, Clone)]
pub struct RcuTracker {
pub grace_periods: u64,
pub callbacks_queued: u64,
pub callbacks_executed: u64,
pub expedited: u64,
pub total_gp_duration_us: u64,
pub peak_callbacks: u64,
}
impl Default for RcuTracker {
fn default() -> Self {
Self::new()
}
}
impl RcuTracker {
#[must_use]
pub const fn new() -> Self {
Self {
grace_periods: 0,
callbacks_queued: 0,
callbacks_executed: 0,
expedited: 0,
total_gp_duration_us: 0,
peak_callbacks: 0,
}
}
#[must_use]
pub const fn for_kernel() -> Self {
Self::new()
}
#[must_use]
pub const fn for_srcu() -> Self {
Self::new()
}
pub fn grace_period(&mut self, duration_us: u64) {
self.grace_periods += 1;
self.total_gp_duration_us += duration_us;
}
pub fn queue_callback(&mut self) {
self.callbacks_queued += 1;
let pending = self
.callbacks_queued
.saturating_sub(self.callbacks_executed);
if pending > self.peak_callbacks {
self.peak_callbacks = pending;
}
}
pub fn execute_callback(&mut self) {
self.callbacks_executed += 1;
}
pub fn expedite(&mut self) {
self.expedited += 1;
}
#[must_use]
pub fn avg_gp_duration_us(&self) -> u64 {
if self.grace_periods == 0 {
return 0;
}
self.total_gp_duration_us / self.grace_periods
}
#[must_use]
pub fn pending_callbacks(&self) -> u64 {
self.callbacks_queued
.saturating_sub(self.callbacks_executed)
}
pub fn reset(&mut self) {
self.grace_periods = 0;
self.callbacks_queued = 0;
self.callbacks_executed = 0;
self.expedited = 0;
self.total_gp_duration_us = 0;
self.peak_callbacks = 0;
}
}
#[cfg(test)]
mod rcu_tests {
use super::*;
#[test]
fn f_rcu_001_new() {
let rcu = RcuTracker::new();
assert_eq!(rcu.grace_periods, 0);
}
#[test]
fn f_rcu_002_default() {
let rcu = RcuTracker::default();
assert_eq!(rcu.grace_periods, 0);
}
#[test]
fn f_rcu_003_gp() {
let mut rcu = RcuTracker::new();
rcu.grace_period(100);
assert_eq!(rcu.grace_periods, 1);
}
#[test]
fn f_rcu_004_queue() {
let mut rcu = RcuTracker::new();
rcu.queue_callback();
assert_eq!(rcu.callbacks_queued, 1);
}
#[test]
fn f_rcu_005_execute() {
let mut rcu = RcuTracker::new();
rcu.execute_callback();
assert_eq!(rcu.callbacks_executed, 1);
}
#[test]
fn f_rcu_006_expedite() {
let mut rcu = RcuTracker::new();
rcu.expedite();
assert_eq!(rcu.expedited, 1);
}
#[test]
fn f_rcu_007_avg_gp() {
let mut rcu = RcuTracker::new();
rcu.grace_period(100);
rcu.grace_period(200);
assert_eq!(rcu.avg_gp_duration_us(), 150);
}
#[test]
fn f_rcu_008_pending() {
let mut rcu = RcuTracker::new();
rcu.queue_callback();
rcu.queue_callback();
rcu.execute_callback();
assert_eq!(rcu.pending_callbacks(), 1);
}
#[test]
fn f_rcu_009_kernel() {
let rcu = RcuTracker::for_kernel();
assert_eq!(rcu.grace_periods, 0);
}
#[test]
fn f_rcu_010_srcu() {
let rcu = RcuTracker::for_srcu();
assert_eq!(rcu.grace_periods, 0);
}
#[test]
fn f_rcu_011_reset() {
let mut rcu = RcuTracker::new();
rcu.grace_period(100);
rcu.reset();
assert_eq!(rcu.grace_periods, 0);
}
#[test]
fn f_rcu_012_clone() {
let mut rcu = RcuTracker::new();
rcu.grace_period(100);
let cloned = rcu.clone();
assert_eq!(rcu.grace_periods, cloned.grace_periods);
}
}
#[derive(Debug, Clone)]
pub struct SlabTracker {
pub allocs: u64,
pub frees: u64,
pub cache_misses: u64,
pub objects_in_use: u64,
pub memory_used: u64,
pub slabs: u64,
}
impl Default for SlabTracker {
fn default() -> Self {
Self::new()
}
}
impl SlabTracker {
#[must_use]
pub const fn new() -> Self {
Self {
allocs: 0,
frees: 0,
cache_misses: 0,
objects_in_use: 0,
memory_used: 0,
slabs: 0,
}
}
#[must_use]
pub const fn for_kmalloc() -> Self {
Self::new()
}
#[must_use]
pub const fn for_cache() -> Self {
Self::new()
}
pub fn alloc(&mut self, size: u64) {
self.allocs += 1;
self.objects_in_use += 1;
self.memory_used += size;
}
pub fn free(&mut self, size: u64) {
self.frees += 1;
if self.objects_in_use > 0 {
self.objects_in_use -= 1;
}
self.memory_used = self.memory_used.saturating_sub(size);
}
pub fn cache_miss(&mut self) {
self.cache_misses += 1;
}
pub fn set_slabs(&mut self, count: u64) {
self.slabs = count;
}
#[must_use]
pub fn cache_hit_rate(&self) -> f64 {
if self.allocs == 0 {
return 100.0;
}
let hits = self.allocs.saturating_sub(self.cache_misses);
(hits as f64 / self.allocs as f64) * 100.0
}
pub fn reset(&mut self) {
self.allocs = 0;
self.frees = 0;
self.cache_misses = 0;
self.objects_in_use = 0;
self.memory_used = 0;
self.slabs = 0;
}
}
#[cfg(test)]
mod slab_tests {
use super::*;
#[test]
fn f_slab_001_new() {
let slab = SlabTracker::new();
assert_eq!(slab.allocs, 0);
}
#[test]
fn f_slab_002_default() {
let slab = SlabTracker::default();
assert_eq!(slab.allocs, 0);
}
#[test]
fn f_slab_003_alloc() {
let mut slab = SlabTracker::new();
slab.alloc(64);
assert_eq!(slab.allocs, 1);
assert_eq!(slab.objects_in_use, 1);
}
#[test]
fn f_slab_004_free() {
let mut slab = SlabTracker::new();
slab.alloc(64);
slab.free(64);
assert_eq!(slab.frees, 1);
assert_eq!(slab.objects_in_use, 0);
}
#[test]
fn f_slab_005_miss() {
let mut slab = SlabTracker::new();
slab.cache_miss();
assert_eq!(slab.cache_misses, 1);
}
#[test]
fn f_slab_006_memory() {
let mut slab = SlabTracker::new();
slab.alloc(64);
slab.alloc(128);
assert_eq!(slab.memory_used, 192);
}
#[test]
fn f_slab_007_hit_rate() {
let mut slab = SlabTracker::new();
slab.alloc(64);
slab.alloc(64);
slab.cache_miss();
assert!((slab.cache_hit_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_slab_008_slabs() {
let mut slab = SlabTracker::new();
slab.set_slabs(10);
assert_eq!(slab.slabs, 10);
}
#[test]
fn f_slab_009_kmalloc() {
let slab = SlabTracker::for_kmalloc();
assert_eq!(slab.allocs, 0);
}
#[test]
fn f_slab_010_cache() {
let slab = SlabTracker::for_cache();
assert_eq!(slab.allocs, 0);
}
#[test]
fn f_slab_011_reset() {
let mut slab = SlabTracker::new();
slab.alloc(64);
slab.reset();
assert_eq!(slab.allocs, 0);
}
#[test]
fn f_slab_012_clone() {
let mut slab = SlabTracker::new();
slab.alloc(64);
let cloned = slab.clone();
assert_eq!(slab.allocs, cloned.allocs);
}
}
#[derive(Debug, Clone)]
pub struct VmstatTracker {
pub minor_faults: u64,
pub major_faults: u64,
pub swap_in: u64,
pub swap_out: u64,
pub pgalloc: u64,
pub pgfree: u64,
}
impl Default for VmstatTracker {
fn default() -> Self {
Self::new()
}
}
impl VmstatTracker {
#[must_use]
pub const fn new() -> Self {
Self {
minor_faults: 0,
major_faults: 0,
swap_in: 0,
swap_out: 0,
pgalloc: 0,
pgfree: 0,
}
}
#[must_use]
pub const fn for_process() -> Self {
Self::new()
}
#[must_use]
pub const fn for_system() -> Self {
Self::new()
}
pub fn minor_fault(&mut self) {
self.minor_faults += 1;
}
pub fn major_fault(&mut self) {
self.major_faults += 1;
}
pub fn swap_in(&mut self, pages: u64) {
self.swap_in += pages;
}
pub fn swap_out(&mut self, pages: u64) {
self.swap_out += pages;
}
pub fn pgalloc(&mut self, pages: u64) {
self.pgalloc += pages;
}
pub fn pgfree(&mut self, pages: u64) {
self.pgfree += pages;
}
#[must_use]
pub fn total_faults(&self) -> u64 {
self.minor_faults + self.major_faults
}
#[must_use]
pub fn major_fault_ratio(&self) -> f64 {
let total = self.total_faults();
if total == 0 {
return 0.0;
}
(self.major_faults as f64 / total as f64) * 100.0
}
pub fn reset(&mut self) {
self.minor_faults = 0;
self.major_faults = 0;
self.swap_in = 0;
self.swap_out = 0;
self.pgalloc = 0;
self.pgfree = 0;
}
}
#[cfg(test)]
mod vmstat_tests {
use super::*;
#[test]
fn f_vmstat_001_new() {
let vm = VmstatTracker::new();
assert_eq!(vm.total_faults(), 0);
}
#[test]
fn f_vmstat_002_default() {
let vm = VmstatTracker::default();
assert_eq!(vm.total_faults(), 0);
}
#[test]
fn f_vmstat_003_minor() {
let mut vm = VmstatTracker::new();
vm.minor_fault();
assert_eq!(vm.minor_faults, 1);
}
#[test]
fn f_vmstat_004_major() {
let mut vm = VmstatTracker::new();
vm.major_fault();
assert_eq!(vm.major_faults, 1);
}
#[test]
fn f_vmstat_005_swap_in() {
let mut vm = VmstatTracker::new();
vm.swap_in(10);
assert_eq!(vm.swap_in, 10);
}
#[test]
fn f_vmstat_006_swap_out() {
let mut vm = VmstatTracker::new();
vm.swap_out(10);
assert_eq!(vm.swap_out, 10);
}
#[test]
fn f_vmstat_007_total() {
let mut vm = VmstatTracker::new();
vm.minor_fault();
vm.major_fault();
assert_eq!(vm.total_faults(), 2);
}
#[test]
fn f_vmstat_008_ratio() {
let mut vm = VmstatTracker::new();
vm.minor_fault();
vm.major_fault();
assert!((vm.major_fault_ratio() - 50.0).abs() < 0.01);
}
#[test]
fn f_vmstat_009_process() {
let vm = VmstatTracker::for_process();
assert_eq!(vm.total_faults(), 0);
}
#[test]
fn f_vmstat_010_system() {
let vm = VmstatTracker::for_system();
assert_eq!(vm.total_faults(), 0);
}
#[test]
fn f_vmstat_011_reset() {
let mut vm = VmstatTracker::new();
vm.minor_fault();
vm.reset();
assert_eq!(vm.total_faults(), 0);
}
#[test]
fn f_vmstat_012_clone() {
let mut vm = VmstatTracker::new();
vm.minor_fault();
let cloned = vm.clone();
assert_eq!(vm.minor_faults, cloned.minor_faults);
}
}
#[derive(Debug, Clone)]
pub struct ZoneTracker {
pub free_pages: u64,
pub watermark_low: u64,
pub watermark_high: u64,
pub pages_scanned: u64,
pub reclaim_attempts: u64,
pub compaction_attempts: u64,
}
impl Default for ZoneTracker {
fn default() -> Self {
Self::new()
}
}
impl ZoneTracker {
#[must_use]
pub const fn new() -> Self {
Self {
free_pages: 0,
watermark_low: 0,
watermark_high: 0,
pages_scanned: 0,
reclaim_attempts: 0,
compaction_attempts: 0,
}
}
#[must_use]
pub const fn for_dma() -> Self {
Self::new()
}
#[must_use]
pub const fn for_normal() -> Self {
Self::new()
}
pub fn set_free_pages(&mut self, pages: u64) {
self.free_pages = pages;
}
pub fn set_watermarks(&mut self, low: u64, high: u64) {
self.watermark_low = low;
self.watermark_high = high;
}
pub fn scan(&mut self, pages: u64) {
self.pages_scanned += pages;
}
pub fn reclaim(&mut self) {
self.reclaim_attempts += 1;
}
pub fn compact(&mut self) {
self.compaction_attempts += 1;
}
#[must_use]
pub fn is_low(&self) -> bool {
self.free_pages < self.watermark_low
}
#[must_use]
pub fn is_high(&self) -> bool {
self.free_pages > self.watermark_high
}
pub fn reset(&mut self) {
self.pages_scanned = 0;
self.reclaim_attempts = 0;
self.compaction_attempts = 0;
}
}
#[cfg(test)]
mod zone_tests {
use super::*;
#[test]
fn f_zone_001_new() {
let zone = ZoneTracker::new();
assert_eq!(zone.free_pages, 0);
}
#[test]
fn f_zone_002_default() {
let zone = ZoneTracker::default();
assert_eq!(zone.free_pages, 0);
}
#[test]
fn f_zone_003_free() {
let mut zone = ZoneTracker::new();
zone.set_free_pages(1000);
assert_eq!(zone.free_pages, 1000);
}
#[test]
fn f_zone_004_watermarks() {
let mut zone = ZoneTracker::new();
zone.set_watermarks(100, 500);
assert_eq!(zone.watermark_low, 100);
assert_eq!(zone.watermark_high, 500);
}
#[test]
fn f_zone_005_scan() {
let mut zone = ZoneTracker::new();
zone.scan(100);
assert_eq!(zone.pages_scanned, 100);
}
#[test]
fn f_zone_006_reclaim() {
let mut zone = ZoneTracker::new();
zone.reclaim();
assert_eq!(zone.reclaim_attempts, 1);
}
#[test]
fn f_zone_007_compact() {
let mut zone = ZoneTracker::new();
zone.compact();
assert_eq!(zone.compaction_attempts, 1);
}
#[test]
fn f_zone_008_is_low() {
let mut zone = ZoneTracker::new();
zone.set_watermarks(100, 500);
zone.set_free_pages(50);
assert!(zone.is_low());
}
#[test]
fn f_zone_009_dma() {
let zone = ZoneTracker::for_dma();
assert_eq!(zone.free_pages, 0);
}
#[test]
fn f_zone_010_normal() {
let zone = ZoneTracker::for_normal();
assert_eq!(zone.free_pages, 0);
}
#[test]
fn f_zone_011_reset() {
let mut zone = ZoneTracker::new();
zone.scan(100);
zone.reset();
assert_eq!(zone.pages_scanned, 0);
}
#[test]
fn f_zone_012_clone() {
let mut zone = ZoneTracker::new();
zone.set_free_pages(1000);
let cloned = zone.clone();
assert_eq!(zone.free_pages, cloned.free_pages);
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct BlockLayerTracker {
pub read_bytes: u64,
pub write_bytes: u64,
pub read_ops: u64,
pub write_ops: u64,
pub flushes: u64,
pub discards: u64,
}
impl BlockLayerTracker {
#[must_use]
pub const fn new() -> Self {
Self {
read_bytes: 0,
write_bytes: 0,
read_ops: 0,
write_ops: 0,
flushes: 0,
discards: 0,
}
}
#[must_use]
pub const fn for_nvme() -> Self {
Self::new()
}
#[must_use]
pub const fn for_scsi() -> Self {
Self::new()
}
pub fn read(&mut self, bytes: u64) {
self.read_bytes += bytes;
self.read_ops += 1;
}
pub fn write(&mut self, bytes: u64) {
self.write_bytes += bytes;
self.write_ops += 1;
}
pub fn flush(&mut self) {
self.flushes += 1;
}
pub fn discard(&mut self) {
self.discards += 1;
}
#[must_use]
pub fn total_bytes(&self) -> u64 {
self.read_bytes + self.write_bytes
}
#[must_use]
pub fn total_ops(&self) -> u64 {
self.read_ops + self.write_ops + self.flushes + self.discards
}
pub fn reset(&mut self) {
self.read_bytes = 0;
self.write_bytes = 0;
self.read_ops = 0;
self.write_ops = 0;
self.flushes = 0;
self.discards = 0;
}
}
#[cfg(test)]
mod block_layer_tests {
use super::*;
#[test]
fn f_blk_001_new() {
let blk = BlockLayerTracker::new();
assert_eq!(blk.read_bytes, 0);
}
#[test]
fn f_blk_002_default() {
let blk = BlockLayerTracker::default();
assert_eq!(blk.read_bytes, 0);
}
#[test]
fn f_blk_003_read() {
let mut blk = BlockLayerTracker::new();
blk.read(4096);
assert_eq!(blk.read_bytes, 4096);
assert_eq!(blk.read_ops, 1);
}
#[test]
fn f_blk_004_write() {
let mut blk = BlockLayerTracker::new();
blk.write(8192);
assert_eq!(blk.write_bytes, 8192);
assert_eq!(blk.write_ops, 1);
}
#[test]
fn f_blk_005_flush() {
let mut blk = BlockLayerTracker::new();
blk.flush();
assert_eq!(blk.flushes, 1);
}
#[test]
fn f_blk_006_discard() {
let mut blk = BlockLayerTracker::new();
blk.discard();
assert_eq!(blk.discards, 1);
}
#[test]
fn f_blk_007_total_bytes() {
let mut blk = BlockLayerTracker::new();
blk.read(1000);
blk.write(2000);
assert_eq!(blk.total_bytes(), 3000);
}
#[test]
fn f_blk_008_total_ops() {
let mut blk = BlockLayerTracker::new();
blk.read(100);
blk.write(100);
blk.flush();
blk.discard();
assert_eq!(blk.total_ops(), 4);
}
#[test]
fn f_blk_009_nvme() {
let blk = BlockLayerTracker::for_nvme();
assert_eq!(blk.read_bytes, 0);
}
#[test]
fn f_blk_010_scsi() {
let blk = BlockLayerTracker::for_scsi();
assert_eq!(blk.read_bytes, 0);
}
#[test]
fn f_blk_011_reset() {
let mut blk = BlockLayerTracker::new();
blk.read(4096);
blk.reset();
assert_eq!(blk.read_bytes, 0);
}
#[test]
fn f_blk_012_clone() {
let mut blk = BlockLayerTracker::new();
blk.read(4096);
let cloned = blk.clone();
assert_eq!(blk.read_bytes, cloned.read_bytes);
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct NvmeTracker {
pub submissions: u64,
pub completions: u64,
pub admin_cmds: u64,
pub io_cmds: u64,
pub queue_depth: u32,
pub max_queue_depth: u32,
}
impl NvmeTracker {
#[must_use]
pub const fn new() -> Self {
Self {
submissions: 0,
completions: 0,
admin_cmds: 0,
io_cmds: 0,
queue_depth: 0,
max_queue_depth: 0,
}
}
#[must_use]
pub const fn for_gen3() -> Self {
Self::new()
}
#[must_use]
pub const fn for_gen4() -> Self {
Self::new()
}
pub fn submit(&mut self, count: u64) {
self.submissions += count;
self.queue_depth = self.queue_depth.saturating_add(count as u32);
if self.queue_depth > self.max_queue_depth {
self.max_queue_depth = self.queue_depth;
}
}
pub fn complete(&mut self, count: u64) {
self.completions += count;
self.queue_depth = self.queue_depth.saturating_sub(count as u32);
}
pub fn admin(&mut self) {
self.admin_cmds += 1;
}
pub fn io(&mut self) {
self.io_cmds += 1;
}
#[must_use]
pub fn pending(&self) -> u64 {
self.submissions.saturating_sub(self.completions)
}
#[must_use]
pub fn is_saturated(&self, threshold: u32) -> bool {
self.queue_depth >= threshold
}
pub fn reset(&mut self) {
self.submissions = 0;
self.completions = 0;
self.admin_cmds = 0;
self.io_cmds = 0;
self.queue_depth = 0;
}
}
#[cfg(test)]
mod nvme_tests {
use super::*;
#[test]
fn f_nvme_001_new() {
let nvme = NvmeTracker::new();
assert_eq!(nvme.submissions, 0);
}
#[test]
fn f_nvme_002_default() {
let nvme = NvmeTracker::default();
assert_eq!(nvme.submissions, 0);
}
#[test]
fn f_nvme_003_submit() {
let mut nvme = NvmeTracker::new();
nvme.submit(4);
assert_eq!(nvme.submissions, 4);
assert_eq!(nvme.queue_depth, 4);
}
#[test]
fn f_nvme_004_complete() {
let mut nvme = NvmeTracker::new();
nvme.submit(4);
nvme.complete(2);
assert_eq!(nvme.completions, 2);
assert_eq!(nvme.queue_depth, 2);
}
#[test]
fn f_nvme_005_admin() {
let mut nvme = NvmeTracker::new();
nvme.admin();
assert_eq!(nvme.admin_cmds, 1);
}
#[test]
fn f_nvme_006_io() {
let mut nvme = NvmeTracker::new();
nvme.io();
assert_eq!(nvme.io_cmds, 1);
}
#[test]
fn f_nvme_007_pending() {
let mut nvme = NvmeTracker::new();
nvme.submit(10);
nvme.complete(3);
assert_eq!(nvme.pending(), 7);
}
#[test]
fn f_nvme_008_max_depth() {
let mut nvme = NvmeTracker::new();
nvme.submit(10);
nvme.complete(5);
nvme.submit(2);
assert_eq!(nvme.max_queue_depth, 10);
}
#[test]
fn f_nvme_009_gen3() {
let nvme = NvmeTracker::for_gen3();
assert_eq!(nvme.submissions, 0);
}
#[test]
fn f_nvme_010_gen4() {
let nvme = NvmeTracker::for_gen4();
assert_eq!(nvme.submissions, 0);
}
#[test]
fn f_nvme_011_reset() {
let mut nvme = NvmeTracker::new();
nvme.submit(10);
nvme.reset();
assert_eq!(nvme.submissions, 0);
}
#[test]
fn f_nvme_012_clone() {
let mut nvme = NvmeTracker::new();
nvme.submit(10);
let cloned = nvme.clone();
assert_eq!(nvme.submissions, cloned.submissions);
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct ScsiTracker {
pub commands: u64,
pub good_status: u64,
pub check_condition: u64,
pub busy: u64,
pub timeouts: u64,
pub resets: u64,
}
impl ScsiTracker {
#[must_use]
pub const fn new() -> Self {
Self {
commands: 0,
good_status: 0,
check_condition: 0,
busy: 0,
timeouts: 0,
resets: 0,
}
}
#[must_use]
pub const fn for_sas() -> Self {
Self::new()
}
#[must_use]
pub const fn for_sata() -> Self {
Self::new()
}
pub fn command(&mut self) {
self.commands += 1;
}
pub fn complete_good(&mut self) {
self.good_status += 1;
}
pub fn check(&mut self) {
self.check_condition += 1;
}
pub fn busy(&mut self) {
self.busy += 1;
}
pub fn timeout(&mut self) {
self.timeouts += 1;
}
pub fn reset_device(&mut self) {
self.resets += 1;
}
#[must_use]
pub fn error_rate(&self) -> f64 {
if self.commands == 0 {
return 0.0;
}
let errors = self.check_condition + self.busy + self.timeouts;
(errors as f64) / (self.commands as f64)
}
pub fn reset(&mut self) {
self.commands = 0;
self.good_status = 0;
self.check_condition = 0;
self.busy = 0;
self.timeouts = 0;
self.resets = 0;
}
}
#[cfg(test)]
mod scsi_tests {
use super::*;
#[test]
fn f_scsi_001_new() {
let scsi = ScsiTracker::new();
assert_eq!(scsi.commands, 0);
}
#[test]
fn f_scsi_002_default() {
let scsi = ScsiTracker::default();
assert_eq!(scsi.commands, 0);
}
#[test]
fn f_scsi_003_command() {
let mut scsi = ScsiTracker::new();
scsi.command();
assert_eq!(scsi.commands, 1);
}
#[test]
fn f_scsi_004_good() {
let mut scsi = ScsiTracker::new();
scsi.complete_good();
assert_eq!(scsi.good_status, 1);
}
#[test]
fn f_scsi_005_check() {
let mut scsi = ScsiTracker::new();
scsi.check();
assert_eq!(scsi.check_condition, 1);
}
#[test]
fn f_scsi_006_busy() {
let mut scsi = ScsiTracker::new();
scsi.busy();
assert_eq!(scsi.busy, 1);
}
#[test]
fn f_scsi_007_timeout() {
let mut scsi = ScsiTracker::new();
scsi.timeout();
assert_eq!(scsi.timeouts, 1);
}
#[test]
fn f_scsi_008_error_rate() {
let mut scsi = ScsiTracker::new();
for _ in 0..100 {
scsi.command();
}
scsi.check();
scsi.timeout();
assert!((scsi.error_rate() - 0.02).abs() < 0.001);
}
#[test]
fn f_scsi_009_sas() {
let scsi = ScsiTracker::for_sas();
assert_eq!(scsi.commands, 0);
}
#[test]
fn f_scsi_010_sata() {
let scsi = ScsiTracker::for_sata();
assert_eq!(scsi.commands, 0);
}
#[test]
fn f_scsi_011_reset() {
let mut scsi = ScsiTracker::new();
scsi.command();
scsi.reset();
assert_eq!(scsi.commands, 0);
}
#[test]
fn f_scsi_012_clone() {
let mut scsi = ScsiTracker::new();
scsi.command();
let cloned = scsi.clone();
assert_eq!(scsi.commands, cloned.commands);
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct MdTracker {
pub total_members: u32,
pub active_members: u32,
pub sync_percent: u8,
pub syncing: bool,
pub read_errors: u64,
pub write_errors: u64,
}
impl MdTracker {
#[must_use]
pub const fn new() -> Self {
Self {
total_members: 0,
active_members: 0,
sync_percent: 100,
syncing: false,
read_errors: 0,
write_errors: 0,
}
}
#[must_use]
pub const fn for_raid0() -> Self {
Self::new()
}
#[must_use]
pub const fn for_raid1() -> Self {
Self::new()
}
pub fn set_members(&mut self, total: u32, active: u32) {
self.total_members = total;
self.active_members = active;
}
pub fn sync_progress(&mut self, percent: u8) {
self.sync_percent = percent.min(100);
self.syncing = percent < 100;
}
pub fn read_error(&mut self) {
self.read_errors += 1;
}
pub fn write_error(&mut self) {
self.write_errors += 1;
}
#[must_use]
pub fn is_degraded(&self) -> bool {
self.active_members < self.total_members
}
#[must_use]
pub fn is_healthy(&self) -> bool {
self.active_members == self.total_members && !self.syncing
}
#[must_use]
pub fn total_errors(&self) -> u64 {
self.read_errors + self.write_errors
}
pub fn reset(&mut self) {
self.read_errors = 0;
self.write_errors = 0;
}
}
#[cfg(test)]
mod md_tests {
use super::*;
#[test]
fn f_md_001_new() {
let md = MdTracker::new();
assert_eq!(md.total_members, 0);
}
#[test]
fn f_md_002_default() {
let md = MdTracker::default();
assert_eq!(md.total_members, 0);
}
#[test]
fn f_md_003_members() {
let mut md = MdTracker::new();
md.set_members(4, 4);
assert_eq!(md.total_members, 4);
assert_eq!(md.active_members, 4);
}
#[test]
fn f_md_004_sync() {
let mut md = MdTracker::new();
md.sync_progress(50);
assert_eq!(md.sync_percent, 50);
assert!(md.syncing);
}
#[test]
fn f_md_005_read_error() {
let mut md = MdTracker::new();
md.read_error();
assert_eq!(md.read_errors, 1);
}
#[test]
fn f_md_006_write_error() {
let mut md = MdTracker::new();
md.write_error();
assert_eq!(md.write_errors, 1);
}
#[test]
fn f_md_007_degraded() {
let mut md = MdTracker::new();
md.set_members(4, 3);
assert!(md.is_degraded());
}
#[test]
fn f_md_008_healthy() {
let mut md = MdTracker::new();
md.set_members(4, 4);
md.sync_progress(100);
assert!(md.is_healthy());
}
#[test]
fn f_md_009_raid0() {
let md = MdTracker::for_raid0();
assert_eq!(md.total_members, 0);
}
#[test]
fn f_md_010_raid1() {
let md = MdTracker::for_raid1();
assert_eq!(md.total_members, 0);
}
#[test]
fn f_md_011_reset() {
let mut md = MdTracker::new();
md.read_error();
md.reset();
assert_eq!(md.read_errors, 0);
}
#[test]
fn f_md_012_clone() {
let mut md = MdTracker::new();
md.set_members(4, 4);
let cloned = md.clone();
assert_eq!(md.total_members, cloned.total_members);
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct VfsTracker {
pub lookups: u64,
pub creates: u64,
pub unlinks: u64,
pub renames: u64,
pub opens: u64,
pub closes: u64,
}
impl VfsTracker {
#[must_use]
pub const fn new() -> Self {
Self {
lookups: 0,
creates: 0,
unlinks: 0,
renames: 0,
opens: 0,
closes: 0,
}
}
#[must_use]
pub const fn for_ext4() -> Self {
Self::new()
}
#[must_use]
pub const fn for_xfs() -> Self {
Self::new()
}
pub fn lookup(&mut self) {
self.lookups += 1;
}
pub fn create(&mut self) {
self.creates += 1;
}
pub fn unlink(&mut self) {
self.unlinks += 1;
}
pub fn rename(&mut self) {
self.renames += 1;
}
pub fn open(&mut self) {
self.opens += 1;
}
pub fn close(&mut self) {
self.closes += 1;
}
#[must_use]
pub fn total_ops(&self) -> u64 {
self.lookups + self.creates + self.unlinks + self.renames + self.opens + self.closes
}
pub fn reset(&mut self) {
self.lookups = 0;
self.creates = 0;
self.unlinks = 0;
self.renames = 0;
self.opens = 0;
self.closes = 0;
}
}
#[cfg(test)]
mod vfs_tests {
use super::*;
#[test]
fn f_vfs_001_new() {
let vfs = VfsTracker::new();
assert_eq!(vfs.lookups, 0);
}
#[test]
fn f_vfs_002_default() {
let vfs = VfsTracker::default();
assert_eq!(vfs.lookups, 0);
}
#[test]
fn f_vfs_003_lookup() {
let mut vfs = VfsTracker::new();
vfs.lookup();
assert_eq!(vfs.lookups, 1);
}
#[test]
fn f_vfs_004_create() {
let mut vfs = VfsTracker::new();
vfs.create();
assert_eq!(vfs.creates, 1);
}
#[test]
fn f_vfs_005_unlink() {
let mut vfs = VfsTracker::new();
vfs.unlink();
assert_eq!(vfs.unlinks, 1);
}
#[test]
fn f_vfs_006_rename() {
let mut vfs = VfsTracker::new();
vfs.rename();
assert_eq!(vfs.renames, 1);
}
#[test]
fn f_vfs_007_open() {
let mut vfs = VfsTracker::new();
vfs.open();
assert_eq!(vfs.opens, 1);
}
#[test]
fn f_vfs_008_close() {
let mut vfs = VfsTracker::new();
vfs.close();
assert_eq!(vfs.closes, 1);
}
#[test]
fn f_vfs_009_ext4() {
let vfs = VfsTracker::for_ext4();
assert_eq!(vfs.lookups, 0);
}
#[test]
fn f_vfs_010_xfs() {
let vfs = VfsTracker::for_xfs();
assert_eq!(vfs.lookups, 0);
}
#[test]
fn f_vfs_011_reset() {
let mut vfs = VfsTracker::new();
vfs.lookup();
vfs.reset();
assert_eq!(vfs.lookups, 0);
}
#[test]
fn f_vfs_012_clone() {
let mut vfs = VfsTracker::new();
vfs.lookup();
let cloned = vfs.clone();
assert_eq!(vfs.lookups, cloned.lookups);
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct InodeTracker {
pub allocs: u64,
pub frees: u64,
pub in_use: u64,
pub peak_in_use: u64,
pub capacity: u64,
pub evictions: u64,
}
impl InodeTracker {
#[must_use]
pub const fn new() -> Self {
Self {
allocs: 0,
frees: 0,
in_use: 0,
peak_in_use: 0,
capacity: 0,
evictions: 0,
}
}
#[must_use]
pub const fn for_ext4() -> Self {
Self::new()
}
#[must_use]
pub const fn for_btrfs() -> Self {
Self::new()
}
pub fn alloc(&mut self) {
self.allocs += 1;
self.in_use += 1;
if self.in_use > self.peak_in_use {
self.peak_in_use = self.in_use;
}
}
pub fn free(&mut self) {
self.frees += 1;
self.in_use = self.in_use.saturating_sub(1);
}
pub fn evict(&mut self) {
self.evictions += 1;
}
pub fn set_capacity(&mut self, cap: u64) {
self.capacity = cap;
}
#[must_use]
pub fn utilization(&self) -> f64 {
if self.capacity == 0 {
return 0.0;
}
(self.in_use as f64) / (self.capacity as f64) * 100.0
}
pub fn reset(&mut self) {
self.allocs = 0;
self.frees = 0;
self.evictions = 0;
}
}
#[cfg(test)]
mod inode_tests {
use super::*;
#[test]
fn f_inode_001_new() {
let inode = InodeTracker::new();
assert_eq!(inode.allocs, 0);
}
#[test]
fn f_inode_002_default() {
let inode = InodeTracker::default();
assert_eq!(inode.allocs, 0);
}
#[test]
fn f_inode_003_alloc() {
let mut inode = InodeTracker::new();
inode.alloc();
assert_eq!(inode.allocs, 1);
assert_eq!(inode.in_use, 1);
}
#[test]
fn f_inode_004_free() {
let mut inode = InodeTracker::new();
inode.alloc();
inode.free();
assert_eq!(inode.frees, 1);
assert_eq!(inode.in_use, 0);
}
#[test]
fn f_inode_005_evict() {
let mut inode = InodeTracker::new();
inode.evict();
assert_eq!(inode.evictions, 1);
}
#[test]
fn f_inode_006_peak() {
let mut inode = InodeTracker::new();
inode.alloc();
inode.alloc();
inode.free();
assert_eq!(inode.peak_in_use, 2);
}
#[test]
fn f_inode_007_capacity() {
let mut inode = InodeTracker::new();
inode.set_capacity(1000);
assert_eq!(inode.capacity, 1000);
}
#[test]
fn f_inode_008_utilization() {
let mut inode = InodeTracker::new();
inode.set_capacity(100);
inode.alloc();
assert!((inode.utilization() - 1.0).abs() < 0.01);
}
#[test]
fn f_inode_009_ext4() {
let inode = InodeTracker::for_ext4();
assert_eq!(inode.allocs, 0);
}
#[test]
fn f_inode_010_btrfs() {
let inode = InodeTracker::for_btrfs();
assert_eq!(inode.allocs, 0);
}
#[test]
fn f_inode_011_reset() {
let mut inode = InodeTracker::new();
inode.alloc();
inode.reset();
assert_eq!(inode.allocs, 0);
}
#[test]
fn f_inode_012_clone() {
let mut inode = InodeTracker::new();
inode.alloc();
let cloned = inode.clone();
assert_eq!(inode.allocs, cloned.allocs);
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct DentryTracker {
pub hits: u64,
pub misses: u64,
pub negative: u64,
pub cached: u64,
pub reclaims: u64,
pub peak_cached: u64,
}
impl DentryTracker {
#[must_use]
pub const fn new() -> Self {
Self {
hits: 0,
misses: 0,
negative: 0,
cached: 0,
reclaims: 0,
peak_cached: 0,
}
}
#[must_use]
pub const fn for_dcache() -> Self {
Self::new()
}
#[must_use]
pub const fn for_pathwalk() -> Self {
Self::new()
}
pub fn lookup_hit(&mut self) {
self.hits += 1;
}
pub fn lookup_miss(&mut self) {
self.misses += 1;
}
pub fn negative_entry(&mut self) {
self.negative += 1;
}
pub fn set_cached(&mut self, count: u64) {
self.cached = count;
if count > self.peak_cached {
self.peak_cached = count;
}
}
pub fn reclaim(&mut self) {
self.reclaims += 1;
}
#[must_use]
pub fn hit_rate(&self) -> f64 {
let total = self.hits + self.misses;
if total == 0 {
return 0.0;
}
(self.hits as f64) / (total as f64) * 100.0
}
pub fn reset(&mut self) {
self.hits = 0;
self.misses = 0;
self.negative = 0;
self.reclaims = 0;
}
}
#[cfg(test)]
mod dentry_tests {
use super::*;
#[test]
fn f_dentry_001_new() {
let dentry = DentryTracker::new();
assert_eq!(dentry.hits, 0);
}
#[test]
fn f_dentry_002_default() {
let dentry = DentryTracker::default();
assert_eq!(dentry.hits, 0);
}
#[test]
fn f_dentry_003_hit() {
let mut dentry = DentryTracker::new();
dentry.lookup_hit();
assert_eq!(dentry.hits, 1);
}
#[test]
fn f_dentry_004_miss() {
let mut dentry = DentryTracker::new();
dentry.lookup_miss();
assert_eq!(dentry.misses, 1);
}
#[test]
fn f_dentry_005_negative() {
let mut dentry = DentryTracker::new();
dentry.negative_entry();
assert_eq!(dentry.negative, 1);
}
#[test]
fn f_dentry_006_cached() {
let mut dentry = DentryTracker::new();
dentry.set_cached(1000);
assert_eq!(dentry.cached, 1000);
}
#[test]
fn f_dentry_007_hit_rate() {
let mut dentry = DentryTracker::new();
dentry.lookup_hit();
dentry.lookup_miss();
assert!((dentry.hit_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_dentry_008_reclaim() {
let mut dentry = DentryTracker::new();
dentry.reclaim();
assert_eq!(dentry.reclaims, 1);
}
#[test]
fn f_dentry_009_dcache() {
let dentry = DentryTracker::for_dcache();
assert_eq!(dentry.hits, 0);
}
#[test]
fn f_dentry_010_pathwalk() {
let dentry = DentryTracker::for_pathwalk();
assert_eq!(dentry.hits, 0);
}
#[test]
fn f_dentry_011_reset() {
let mut dentry = DentryTracker::new();
dentry.lookup_hit();
dentry.reset();
assert_eq!(dentry.hits, 0);
}
#[test]
fn f_dentry_012_clone() {
let mut dentry = DentryTracker::new();
dentry.lookup_hit();
let cloned = dentry.clone();
assert_eq!(dentry.hits, cloned.hits);
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct ExtentTracker {
pub allocs: u64,
pub merges: u64,
pub splits: u64,
pub blocks: u64,
pub avg_size: u32,
pub max_size: u32,
}
impl ExtentTracker {
#[must_use]
pub const fn new() -> Self {
Self {
allocs: 0,
merges: 0,
splits: 0,
blocks: 0,
avg_size: 0,
max_size: 0,
}
}
#[must_use]
pub const fn for_ext4() -> Self {
Self::new()
}
#[must_use]
pub const fn for_xfs() -> Self {
Self::new()
}
pub fn alloc(&mut self, blocks: u32) {
self.allocs += 1;
self.blocks += blocks as u64;
if blocks > self.max_size {
self.max_size = blocks;
}
self.avg_size = (self.blocks / self.allocs) as u32;
}
pub fn merge(&mut self) {
self.merges += 1;
}
pub fn split(&mut self) {
self.splits += 1;
}
#[must_use]
pub fn fragmentation(&self) -> f64 {
if self.allocs == 0 {
return 0.0;
}
(self.splits as f64) / (self.allocs as f64)
}
pub fn reset(&mut self) {
self.allocs = 0;
self.merges = 0;
self.splits = 0;
self.blocks = 0;
self.avg_size = 0;
}
}
#[cfg(test)]
mod extent_tests {
use super::*;
#[test]
fn f_extent_001_new() {
let extent = ExtentTracker::new();
assert_eq!(extent.allocs, 0);
}
#[test]
fn f_extent_002_default() {
let extent = ExtentTracker::default();
assert_eq!(extent.allocs, 0);
}
#[test]
fn f_extent_003_alloc() {
let mut extent = ExtentTracker::new();
extent.alloc(16);
assert_eq!(extent.allocs, 1);
assert_eq!(extent.blocks, 16);
}
#[test]
fn f_extent_004_merge() {
let mut extent = ExtentTracker::new();
extent.merge();
assert_eq!(extent.merges, 1);
}
#[test]
fn f_extent_005_split() {
let mut extent = ExtentTracker::new();
extent.split();
assert_eq!(extent.splits, 1);
}
#[test]
fn f_extent_006_max_size() {
let mut extent = ExtentTracker::new();
extent.alloc(8);
extent.alloc(32);
extent.alloc(16);
assert_eq!(extent.max_size, 32);
}
#[test]
fn f_extent_007_avg_size() {
let mut extent = ExtentTracker::new();
extent.alloc(10);
extent.alloc(20);
assert_eq!(extent.avg_size, 15);
}
#[test]
fn f_extent_008_fragmentation() {
let mut extent = ExtentTracker::new();
extent.alloc(16);
extent.alloc(16);
extent.split();
assert!((extent.fragmentation() - 0.5).abs() < 0.01);
}
#[test]
fn f_extent_009_ext4() {
let extent = ExtentTracker::for_ext4();
assert_eq!(extent.allocs, 0);
}
#[test]
fn f_extent_010_xfs() {
let extent = ExtentTracker::for_xfs();
assert_eq!(extent.allocs, 0);
}
#[test]
fn f_extent_011_reset() {
let mut extent = ExtentTracker::new();
extent.alloc(16);
extent.reset();
assert_eq!(extent.allocs, 0);
}
#[test]
fn f_extent_012_clone() {
let mut extent = ExtentTracker::new();
extent.alloc(16);
let cloned = extent.clone();
assert_eq!(extent.allocs, cloned.allocs);
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct TcpTracker {
pub connections: u64,
pub established: u64,
pub retransmits: u64,
pub resets: u64,
pub timeouts: u64,
pub bytes_tx: u64,
}
impl TcpTracker {
#[must_use]
pub const fn new() -> Self {
Self {
connections: 0,
established: 0,
retransmits: 0,
resets: 0,
timeouts: 0,
bytes_tx: 0,
}
}
#[must_use]
pub const fn for_ipv4() -> Self {
Self::new()
}
#[must_use]
pub const fn for_ipv6() -> Self {
Self::new()
}
pub fn connect(&mut self) {
self.connections += 1;
}
pub fn established(&mut self) {
self.established += 1;
}
pub fn retransmit(&mut self) {
self.retransmits += 1;
}
pub fn reset_conn(&mut self) {
self.resets += 1;
}
pub fn timeout(&mut self) {
self.timeouts += 1;
}
pub fn transmit(&mut self, bytes: u64) {
self.bytes_tx += bytes;
}
#[must_use]
pub fn retransmit_rate(&self) -> f64 {
if self.connections == 0 {
return 0.0;
}
(self.retransmits as f64) / (self.connections as f64)
}
pub fn reset(&mut self) {
self.connections = 0;
self.established = 0;
self.retransmits = 0;
self.resets = 0;
self.timeouts = 0;
self.bytes_tx = 0;
}
}
#[cfg(test)]
mod tcp_tests {
use super::*;
#[test]
fn f_tcp_001_new() {
let tcp = TcpTracker::new();
assert_eq!(tcp.connections, 0);
}
#[test]
fn f_tcp_002_default() {
let tcp = TcpTracker::default();
assert_eq!(tcp.connections, 0);
}
#[test]
fn f_tcp_003_connect() {
let mut tcp = TcpTracker::new();
tcp.connect();
assert_eq!(tcp.connections, 1);
}
#[test]
fn f_tcp_004_established() {
let mut tcp = TcpTracker::new();
tcp.established();
assert_eq!(tcp.established, 1);
}
#[test]
fn f_tcp_005_retransmit() {
let mut tcp = TcpTracker::new();
tcp.retransmit();
assert_eq!(tcp.retransmits, 1);
}
#[test]
fn f_tcp_006_reset() {
let mut tcp = TcpTracker::new();
tcp.reset_conn();
assert_eq!(tcp.resets, 1);
}
#[test]
fn f_tcp_007_timeout() {
let mut tcp = TcpTracker::new();
tcp.timeout();
assert_eq!(tcp.timeouts, 1);
}
#[test]
fn f_tcp_008_bytes() {
let mut tcp = TcpTracker::new();
tcp.transmit(1000);
assert_eq!(tcp.bytes_tx, 1000);
}
#[test]
fn f_tcp_009_ipv4() {
let tcp = TcpTracker::for_ipv4();
assert_eq!(tcp.connections, 0);
}
#[test]
fn f_tcp_010_ipv6() {
let tcp = TcpTracker::for_ipv6();
assert_eq!(tcp.connections, 0);
}
#[test]
fn f_tcp_011_reset() {
let mut tcp = TcpTracker::new();
tcp.connect();
tcp.reset();
assert_eq!(tcp.connections, 0);
}
#[test]
fn f_tcp_012_clone() {
let mut tcp = TcpTracker::new();
tcp.connect();
let cloned = tcp.clone();
assert_eq!(tcp.connections, cloned.connections);
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct UdpTracker {
pub packets_tx: u64,
pub packets_rx: u64,
pub bytes_tx: u64,
pub bytes_rx: u64,
pub drops: u64,
pub buf_errors: u64,
}
impl UdpTracker {
#[must_use]
pub const fn new() -> Self {
Self {
packets_tx: 0,
packets_rx: 0,
bytes_tx: 0,
bytes_rx: 0,
drops: 0,
buf_errors: 0,
}
}
#[must_use]
pub const fn for_ipv4() -> Self {
Self::new()
}
#[must_use]
pub const fn for_ipv6() -> Self {
Self::new()
}
pub fn send(&mut self, bytes: u64) {
self.packets_tx += 1;
self.bytes_tx += bytes;
}
pub fn recv(&mut self, bytes: u64) {
self.packets_rx += 1;
self.bytes_rx += bytes;
}
pub fn drop_pkt(&mut self) {
self.drops += 1;
}
pub fn buf_error(&mut self) {
self.buf_errors += 1;
}
#[must_use]
pub fn drop_rate(&self) -> f64 {
let total = self.packets_tx + self.packets_rx;
if total == 0 {
return 0.0;
}
(self.drops as f64) / (total as f64)
}
pub fn reset(&mut self) {
self.packets_tx = 0;
self.packets_rx = 0;
self.bytes_tx = 0;
self.bytes_rx = 0;
self.drops = 0;
self.buf_errors = 0;
}
}
#[cfg(test)]
mod udp_tests {
use super::*;
#[test]
fn f_udp_001_new() {
let udp = UdpTracker::new();
assert_eq!(udp.packets_tx, 0);
}
#[test]
fn f_udp_002_default() {
let udp = UdpTracker::default();
assert_eq!(udp.packets_tx, 0);
}
#[test]
fn f_udp_003_send() {
let mut udp = UdpTracker::new();
udp.send(1000);
assert_eq!(udp.packets_tx, 1);
assert_eq!(udp.bytes_tx, 1000);
}
#[test]
fn f_udp_004_recv() {
let mut udp = UdpTracker::new();
udp.recv(500);
assert_eq!(udp.packets_rx, 1);
assert_eq!(udp.bytes_rx, 500);
}
#[test]
fn f_udp_005_drop() {
let mut udp = UdpTracker::new();
udp.drop_pkt();
assert_eq!(udp.drops, 1);
}
#[test]
fn f_udp_006_buf_error() {
let mut udp = UdpTracker::new();
udp.buf_error();
assert_eq!(udp.buf_errors, 1);
}
#[test]
fn f_udp_007_drop_rate() {
let mut udp = UdpTracker::new();
udp.send(100);
udp.recv(100);
udp.drop_pkt();
assert!((udp.drop_rate() - 0.5).abs() < 0.01);
}
#[test]
fn f_udp_008_total_bytes() {
let mut udp = UdpTracker::new();
udp.send(1000);
udp.recv(500);
assert_eq!(udp.bytes_tx + udp.bytes_rx, 1500);
}
#[test]
fn f_udp_009_ipv4() {
let udp = UdpTracker::for_ipv4();
assert_eq!(udp.packets_tx, 0);
}
#[test]
fn f_udp_010_ipv6() {
let udp = UdpTracker::for_ipv6();
assert_eq!(udp.packets_tx, 0);
}
#[test]
fn f_udp_011_reset() {
let mut udp = UdpTracker::new();
udp.send(1000);
udp.reset();
assert_eq!(udp.packets_tx, 0);
}
#[test]
fn f_udp_012_clone() {
let mut udp = UdpTracker::new();
udp.send(1000);
let cloned = udp.clone();
assert_eq!(udp.packets_tx, cloned.packets_tx);
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct SkbTracker {
pub allocs: u64,
pub frees: u64,
pub clones: u64,
pub bytes_alloc: u64,
pub in_flight: u64,
pub peak_in_flight: u64,
}
impl SkbTracker {
#[must_use]
pub const fn new() -> Self {
Self {
allocs: 0,
frees: 0,
clones: 0,
bytes_alloc: 0,
in_flight: 0,
peak_in_flight: 0,
}
}
#[must_use]
pub const fn for_rx() -> Self {
Self::new()
}
#[must_use]
pub const fn for_tx() -> Self {
Self::new()
}
pub fn alloc(&mut self, bytes: u64) {
self.allocs += 1;
self.bytes_alloc += bytes;
self.in_flight += 1;
if self.in_flight > self.peak_in_flight {
self.peak_in_flight = self.in_flight;
}
}
pub fn free(&mut self) {
self.frees += 1;
self.in_flight = self.in_flight.saturating_sub(1);
}
pub fn clone_skb(&mut self) {
self.clones += 1;
self.in_flight += 1;
if self.in_flight > self.peak_in_flight {
self.peak_in_flight = self.in_flight;
}
}
#[must_use]
pub fn avg_size(&self) -> u64 {
if self.allocs == 0 {
return 0;
}
self.bytes_alloc / self.allocs
}
pub fn reset(&mut self) {
self.allocs = 0;
self.frees = 0;
self.clones = 0;
self.bytes_alloc = 0;
}
}
#[cfg(test)]
mod skb_tests {
use super::*;
#[test]
fn f_skb_001_new() {
let skb = SkbTracker::new();
assert_eq!(skb.allocs, 0);
}
#[test]
fn f_skb_002_default() {
let skb = SkbTracker::default();
assert_eq!(skb.allocs, 0);
}
#[test]
fn f_skb_003_alloc() {
let mut skb = SkbTracker::new();
skb.alloc(1500);
assert_eq!(skb.allocs, 1);
assert_eq!(skb.bytes_alloc, 1500);
}
#[test]
fn f_skb_004_free() {
let mut skb = SkbTracker::new();
skb.alloc(1500);
skb.free();
assert_eq!(skb.frees, 1);
assert_eq!(skb.in_flight, 0);
}
#[test]
fn f_skb_005_clone() {
let mut skb = SkbTracker::new();
skb.clone_skb();
assert_eq!(skb.clones, 1);
}
#[test]
fn f_skb_006_in_flight() {
let mut skb = SkbTracker::new();
skb.alloc(1500);
skb.alloc(1500);
assert_eq!(skb.in_flight, 2);
}
#[test]
fn f_skb_007_peak() {
let mut skb = SkbTracker::new();
skb.alloc(1500);
skb.alloc(1500);
skb.free();
assert_eq!(skb.peak_in_flight, 2);
}
#[test]
fn f_skb_008_avg_size() {
let mut skb = SkbTracker::new();
skb.alloc(1000);
skb.alloc(2000);
assert_eq!(skb.avg_size(), 1500);
}
#[test]
fn f_skb_009_rx() {
let skb = SkbTracker::for_rx();
assert_eq!(skb.allocs, 0);
}
#[test]
fn f_skb_010_tx() {
let skb = SkbTracker::for_tx();
assert_eq!(skb.allocs, 0);
}
#[test]
fn f_skb_011_reset() {
let mut skb = SkbTracker::new();
skb.alloc(1500);
skb.reset();
assert_eq!(skb.allocs, 0);
}
#[test]
fn f_skb_012_clone() {
let mut skb = SkbTracker::new();
skb.alloc(1500);
let cloned = skb.clone();
assert_eq!(skb.allocs, cloned.allocs);
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct NetDevTracker {
pub rx_packets: u64,
pub tx_packets: u64,
pub rx_bytes: u64,
pub tx_bytes: u64,
pub rx_errors: u64,
pub tx_errors: u64,
}
impl NetDevTracker {
#[must_use]
pub const fn new() -> Self {
Self {
rx_packets: 0,
tx_packets: 0,
rx_bytes: 0,
tx_bytes: 0,
rx_errors: 0,
tx_errors: 0,
}
}
#[must_use]
pub const fn for_eth() -> Self {
Self::new()
}
#[must_use]
pub const fn for_lo() -> Self {
Self::new()
}
pub fn rx(&mut self, bytes: u64) {
self.rx_packets += 1;
self.rx_bytes += bytes;
}
pub fn tx(&mut self, bytes: u64) {
self.tx_packets += 1;
self.tx_bytes += bytes;
}
pub fn rx_error(&mut self) {
self.rx_errors += 1;
}
pub fn tx_error(&mut self) {
self.tx_errors += 1;
}
#[must_use]
pub fn total_packets(&self) -> u64 {
self.rx_packets + self.tx_packets
}
#[must_use]
pub fn total_bytes(&self) -> u64 {
self.rx_bytes + self.tx_bytes
}
#[must_use]
pub fn error_rate(&self) -> f64 {
let total = self.total_packets();
if total == 0 {
return 0.0;
}
let errors = self.rx_errors + self.tx_errors;
(errors as f64) / (total as f64)
}
pub fn reset(&mut self) {
self.rx_packets = 0;
self.tx_packets = 0;
self.rx_bytes = 0;
self.tx_bytes = 0;
self.rx_errors = 0;
self.tx_errors = 0;
}
}
#[cfg(test)]
mod netdev_tests {
use super::*;
#[test]
fn f_netdev_001_new() {
let netdev = NetDevTracker::new();
assert_eq!(netdev.rx_packets, 0);
}
#[test]
fn f_netdev_002_default() {
let netdev = NetDevTracker::default();
assert_eq!(netdev.rx_packets, 0);
}
#[test]
fn f_netdev_003_rx() {
let mut netdev = NetDevTracker::new();
netdev.rx(1500);
assert_eq!(netdev.rx_packets, 1);
assert_eq!(netdev.rx_bytes, 1500);
}
#[test]
fn f_netdev_004_tx() {
let mut netdev = NetDevTracker::new();
netdev.tx(1000);
assert_eq!(netdev.tx_packets, 1);
assert_eq!(netdev.tx_bytes, 1000);
}
#[test]
fn f_netdev_005_rx_error() {
let mut netdev = NetDevTracker::new();
netdev.rx_error();
assert_eq!(netdev.rx_errors, 1);
}
#[test]
fn f_netdev_006_tx_error() {
let mut netdev = NetDevTracker::new();
netdev.tx_error();
assert_eq!(netdev.tx_errors, 1);
}
#[test]
fn f_netdev_007_total_packets() {
let mut netdev = NetDevTracker::new();
netdev.rx(1500);
netdev.tx(1000);
assert_eq!(netdev.total_packets(), 2);
}
#[test]
fn f_netdev_008_total_bytes() {
let mut netdev = NetDevTracker::new();
netdev.rx(1500);
netdev.tx(1000);
assert_eq!(netdev.total_bytes(), 2500);
}
#[test]
fn f_netdev_009_eth() {
let netdev = NetDevTracker::for_eth();
assert_eq!(netdev.rx_packets, 0);
}
#[test]
fn f_netdev_010_lo() {
let netdev = NetDevTracker::for_lo();
assert_eq!(netdev.rx_packets, 0);
}
#[test]
fn f_netdev_011_reset() {
let mut netdev = NetDevTracker::new();
netdev.rx(1500);
netdev.reset();
assert_eq!(netdev.rx_packets, 0);
}
#[test]
fn f_netdev_012_clone() {
let mut netdev = NetDevTracker::new();
netdev.rx(1500);
let cloned = netdev.clone();
assert_eq!(netdev.rx_packets, cloned.rx_packets);
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct TimerTracker {
pub starts: u64,
pub cancels: u64,
pub expirations: u64,
pub callbacks: u64,
pub active: u64,
pub peak_active: u64,
}
impl TimerTracker {
#[must_use]
pub const fn new() -> Self {
Self {
starts: 0,
cancels: 0,
expirations: 0,
callbacks: 0,
active: 0,
peak_active: 0,
}
}
#[must_use]
pub const fn for_softirq() -> Self {
Self::new()
}
#[must_use]
pub const fn for_workqueue() -> Self {
Self::new()
}
pub fn start(&mut self) {
self.starts += 1;
self.active += 1;
if self.active > self.peak_active {
self.peak_active = self.active;
}
}
pub fn cancel(&mut self) {
self.cancels += 1;
self.active = self.active.saturating_sub(1);
}
pub fn expire(&mut self) {
self.expirations += 1;
self.active = self.active.saturating_sub(1);
}
pub fn callback(&mut self) {
self.callbacks += 1;
}
#[must_use]
pub fn cancel_rate(&self) -> f64 {
if self.starts == 0 {
return 0.0;
}
(self.cancels as f64) / (self.starts as f64)
}
pub fn reset(&mut self) {
self.starts = 0;
self.cancels = 0;
self.expirations = 0;
self.callbacks = 0;
}
}
#[cfg(test)]
mod timer_tests {
use super::*;
#[test]
fn f_timer_001_new() {
let timer = TimerTracker::new();
assert_eq!(timer.starts, 0);
}
#[test]
fn f_timer_002_default() {
let timer = TimerTracker::default();
assert_eq!(timer.starts, 0);
}
#[test]
fn f_timer_003_start() {
let mut timer = TimerTracker::new();
timer.start();
assert_eq!(timer.starts, 1);
assert_eq!(timer.active, 1);
}
#[test]
fn f_timer_004_cancel() {
let mut timer = TimerTracker::new();
timer.start();
timer.cancel();
assert_eq!(timer.cancels, 1);
assert_eq!(timer.active, 0);
}
#[test]
fn f_timer_005_expire() {
let mut timer = TimerTracker::new();
timer.start();
timer.expire();
assert_eq!(timer.expirations, 1);
}
#[test]
fn f_timer_006_callback() {
let mut timer = TimerTracker::new();
timer.callback();
assert_eq!(timer.callbacks, 1);
}
#[test]
fn f_timer_007_peak() {
let mut timer = TimerTracker::new();
timer.start();
timer.start();
timer.expire();
assert_eq!(timer.peak_active, 2);
}
#[test]
fn f_timer_008_cancel_rate() {
let mut timer = TimerTracker::new();
timer.start();
timer.start();
timer.cancel();
assert!((timer.cancel_rate() - 0.5).abs() < 0.01);
}
#[test]
fn f_timer_009_softirq() {
let timer = TimerTracker::for_softirq();
assert_eq!(timer.starts, 0);
}
#[test]
fn f_timer_010_workqueue() {
let timer = TimerTracker::for_workqueue();
assert_eq!(timer.starts, 0);
}
#[test]
fn f_timer_011_reset() {
let mut timer = TimerTracker::new();
timer.start();
timer.reset();
assert_eq!(timer.starts, 0);
}
#[test]
fn f_timer_012_clone() {
let mut timer = TimerTracker::new();
timer.start();
let cloned = timer.clone();
assert_eq!(timer.starts, cloned.starts);
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct HrTimerTracker {
pub starts: u64,
pub expirations: u64,
pub restarts: u64,
pub total_latency_ns: u64,
pub max_latency_ns: u64,
pub active: u64,
}
impl HrTimerTracker {
#[must_use]
pub const fn new() -> Self {
Self {
starts: 0,
expirations: 0,
restarts: 0,
total_latency_ns: 0,
max_latency_ns: 0,
active: 0,
}
}
#[must_use]
pub const fn for_monotonic() -> Self {
Self::new()
}
#[must_use]
pub const fn for_realtime() -> Self {
Self::new()
}
pub fn start(&mut self) {
self.starts += 1;
self.active += 1;
}
pub fn expire_ns(&mut self, latency_ns: u64) {
self.expirations += 1;
self.active = self.active.saturating_sub(1);
self.total_latency_ns += latency_ns;
if latency_ns > self.max_latency_ns {
self.max_latency_ns = latency_ns;
}
}
pub fn restart(&mut self) {
self.restarts += 1;
}
#[must_use]
pub fn avg_latency_ns(&self) -> u64 {
if self.expirations == 0 {
return 0;
}
self.total_latency_ns / self.expirations
}
pub fn reset(&mut self) {
self.starts = 0;
self.expirations = 0;
self.restarts = 0;
self.total_latency_ns = 0;
}
}
#[cfg(test)]
mod hrtimer_tests {
use super::*;
#[test]
fn f_hrt_001_new() {
let hrt = HrTimerTracker::new();
assert_eq!(hrt.starts, 0);
}
#[test]
fn f_hrt_002_default() {
let hrt = HrTimerTracker::default();
assert_eq!(hrt.starts, 0);
}
#[test]
fn f_hrt_003_start() {
let mut hrt = HrTimerTracker::new();
hrt.start();
assert_eq!(hrt.starts, 1);
assert_eq!(hrt.active, 1);
}
#[test]
fn f_hrt_004_expire() {
let mut hrt = HrTimerTracker::new();
hrt.start();
hrt.expire_ns(1000);
assert_eq!(hrt.expirations, 1);
assert_eq!(hrt.total_latency_ns, 1000);
}
#[test]
fn f_hrt_005_restart() {
let mut hrt = HrTimerTracker::new();
hrt.restart();
assert_eq!(hrt.restarts, 1);
}
#[test]
fn f_hrt_006_max_latency() {
let mut hrt = HrTimerTracker::new();
hrt.start();
hrt.expire_ns(500);
hrt.start();
hrt.expire_ns(1500);
assert_eq!(hrt.max_latency_ns, 1500);
}
#[test]
fn f_hrt_007_avg_latency() {
let mut hrt = HrTimerTracker::new();
hrt.start();
hrt.expire_ns(1000);
hrt.start();
hrt.expire_ns(2000);
assert_eq!(hrt.avg_latency_ns(), 1500);
}
#[test]
fn f_hrt_008_active() {
let mut hrt = HrTimerTracker::new();
hrt.start();
hrt.start();
hrt.expire_ns(100);
assert_eq!(hrt.active, 1);
}
#[test]
fn f_hrt_009_monotonic() {
let hrt = HrTimerTracker::for_monotonic();
assert_eq!(hrt.starts, 0);
}
#[test]
fn f_hrt_010_realtime() {
let hrt = HrTimerTracker::for_realtime();
assert_eq!(hrt.starts, 0);
}
#[test]
fn f_hrt_011_reset() {
let mut hrt = HrTimerTracker::new();
hrt.start();
hrt.reset();
assert_eq!(hrt.starts, 0);
}
#[test]
fn f_hrt_012_clone() {
let mut hrt = HrTimerTracker::new();
hrt.start();
let cloned = hrt.clone();
assert_eq!(hrt.starts, cloned.starts);
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct ClockTracker {
pub reads: u64,
pub adjustments: u64,
pub total_adj_ppb: i64,
pub ntp_syncs: u64,
pub wraps: u64,
pub unstable_events: u64,
}
impl ClockTracker {
#[must_use]
pub const fn new() -> Self {
Self {
reads: 0,
adjustments: 0,
total_adj_ppb: 0,
ntp_syncs: 0,
wraps: 0,
unstable_events: 0,
}
}
#[must_use]
pub const fn for_tsc() -> Self {
Self::new()
}
#[must_use]
pub const fn for_hpet() -> Self {
Self::new()
}
pub fn read(&mut self) {
self.reads += 1;
}
pub fn adjust(&mut self, ppb: i64) {
self.adjustments += 1;
self.total_adj_ppb += ppb;
}
pub fn ntp_sync(&mut self) {
self.ntp_syncs += 1;
}
pub fn wrap(&mut self) {
self.wraps += 1;
}
pub fn unstable(&mut self) {
self.unstable_events += 1;
}
pub fn reset(&mut self) {
self.reads = 0;
self.adjustments = 0;
self.total_adj_ppb = 0;
self.ntp_syncs = 0;
self.wraps = 0;
self.unstable_events = 0;
}
}
#[cfg(test)]
mod clock_tests {
use super::*;
#[test]
fn f_clock_001_new() {
let clock = ClockTracker::new();
assert_eq!(clock.reads, 0);
}
#[test]
fn f_clock_002_default() {
let clock = ClockTracker::default();
assert_eq!(clock.reads, 0);
}
#[test]
fn f_clock_003_read() {
let mut clock = ClockTracker::new();
clock.read();
assert_eq!(clock.reads, 1);
}
#[test]
fn f_clock_004_adjust() {
let mut clock = ClockTracker::new();
clock.adjust(100);
assert_eq!(clock.adjustments, 1);
assert_eq!(clock.total_adj_ppb, 100);
}
#[test]
fn f_clock_005_ntp() {
let mut clock = ClockTracker::new();
clock.ntp_sync();
assert_eq!(clock.ntp_syncs, 1);
}
#[test]
fn f_clock_006_wrap() {
let mut clock = ClockTracker::new();
clock.wrap();
assert_eq!(clock.wraps, 1);
}
#[test]
fn f_clock_007_unstable() {
let mut clock = ClockTracker::new();
clock.unstable();
assert_eq!(clock.unstable_events, 1);
}
#[test]
fn f_clock_008_negative_adj() {
let mut clock = ClockTracker::new();
clock.adjust(-50);
assert_eq!(clock.total_adj_ppb, -50);
}
#[test]
fn f_clock_009_tsc() {
let clock = ClockTracker::for_tsc();
assert_eq!(clock.reads, 0);
}
#[test]
fn f_clock_010_hpet() {
let clock = ClockTracker::for_hpet();
assert_eq!(clock.reads, 0);
}
#[test]
fn f_clock_011_reset() {
let mut clock = ClockTracker::new();
clock.read();
clock.reset();
assert_eq!(clock.reads, 0);
}
#[test]
fn f_clock_012_clone() {
let mut clock = ClockTracker::new();
clock.read();
let cloned = clock.clone();
assert_eq!(clock.reads, cloned.reads);
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct TimeKeepingTracker {
pub updates: u64,
pub leap_seconds: u64,
pub time_jumps: u64,
pub suspend_cycles: u64,
pub clock_switches: u64,
pub corrections: u64,
}
impl TimeKeepingTracker {
#[must_use]
pub const fn new() -> Self {
Self {
updates: 0,
leap_seconds: 0,
time_jumps: 0,
suspend_cycles: 0,
clock_switches: 0,
corrections: 0,
}
}
#[must_use]
pub const fn for_system() -> Self {
Self::new()
}
#[must_use]
pub const fn for_boot() -> Self {
Self::new()
}
pub fn update(&mut self) {
self.updates += 1;
}
pub fn leap_second(&mut self) {
self.leap_seconds += 1;
}
pub fn time_jump(&mut self) {
self.time_jumps += 1;
}
pub fn suspend_resume(&mut self) {
self.suspend_cycles += 1;
}
pub fn clock_switch(&mut self) {
self.clock_switches += 1;
}
pub fn correction(&mut self) {
self.corrections += 1;
}
pub fn reset(&mut self) {
self.updates = 0;
self.leap_seconds = 0;
self.time_jumps = 0;
self.suspend_cycles = 0;
self.clock_switches = 0;
self.corrections = 0;
}
}
#[cfg(test)]
mod timekeeping_tests {
use super::*;
#[test]
fn f_tk_001_new() {
let tk = TimeKeepingTracker::new();
assert_eq!(tk.updates, 0);
}
#[test]
fn f_tk_002_default() {
let tk = TimeKeepingTracker::default();
assert_eq!(tk.updates, 0);
}
#[test]
fn f_tk_003_update() {
let mut tk = TimeKeepingTracker::new();
tk.update();
assert_eq!(tk.updates, 1);
}
#[test]
fn f_tk_004_leap() {
let mut tk = TimeKeepingTracker::new();
tk.leap_second();
assert_eq!(tk.leap_seconds, 1);
}
#[test]
fn f_tk_005_jump() {
let mut tk = TimeKeepingTracker::new();
tk.time_jump();
assert_eq!(tk.time_jumps, 1);
}
#[test]
fn f_tk_006_suspend() {
let mut tk = TimeKeepingTracker::new();
tk.suspend_resume();
assert_eq!(tk.suspend_cycles, 1);
}
#[test]
fn f_tk_007_switch() {
let mut tk = TimeKeepingTracker::new();
tk.clock_switch();
assert_eq!(tk.clock_switches, 1);
}
#[test]
fn f_tk_008_correction() {
let mut tk = TimeKeepingTracker::new();
tk.correction();
assert_eq!(tk.corrections, 1);
}
#[test]
fn f_tk_009_system() {
let tk = TimeKeepingTracker::for_system();
assert_eq!(tk.updates, 0);
}
#[test]
fn f_tk_010_boot() {
let tk = TimeKeepingTracker::for_boot();
assert_eq!(tk.updates, 0);
}
#[test]
fn f_tk_011_reset() {
let mut tk = TimeKeepingTracker::new();
tk.update();
tk.reset();
assert_eq!(tk.updates, 0);
}
#[test]
fn f_tk_012_clone() {
let mut tk = TimeKeepingTracker::new();
tk.update();
let cloned = tk.clone();
assert_eq!(tk.updates, cloned.updates);
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct AioTracker {
pub submissions: u64,
pub completions: u64,
pub cancels: u64,
pub bytes: u64,
pub queue_depth: u32,
pub peak_queue_depth: u32,
}
impl AioTracker {
#[must_use]
pub const fn new() -> Self {
Self {
submissions: 0,
completions: 0,
cancels: 0,
bytes: 0,
queue_depth: 0,
peak_queue_depth: 0,
}
}
#[must_use]
pub const fn for_read() -> Self {
Self::new()
}
#[must_use]
pub const fn for_write() -> Self {
Self::new()
}
pub fn submit(&mut self, count: u64) {
self.submissions += count;
self.queue_depth = self.queue_depth.saturating_add(count as u32);
if self.queue_depth > self.peak_queue_depth {
self.peak_queue_depth = self.queue_depth;
}
}
pub fn complete(&mut self, count: u64) {
self.completions += count;
self.queue_depth = self.queue_depth.saturating_sub(count as u32);
}
pub fn cancel(&mut self) {
self.cancels += 1;
self.queue_depth = self.queue_depth.saturating_sub(1);
}
pub fn transfer(&mut self, bytes: u64) {
self.bytes += bytes;
}
#[must_use]
pub fn pending(&self) -> u64 {
self.submissions
.saturating_sub(self.completions + self.cancels)
}
pub fn reset(&mut self) {
self.submissions = 0;
self.completions = 0;
self.cancels = 0;
self.bytes = 0;
self.queue_depth = 0;
}
}
#[cfg(test)]
mod aio_tests {
use super::*;
#[test]
fn f_aio_001_new() {
let aio = AioTracker::new();
assert_eq!(aio.submissions, 0);
}
#[test]
fn f_aio_002_default() {
let aio = AioTracker::default();
assert_eq!(aio.submissions, 0);
}
#[test]
fn f_aio_003_submit() {
let mut aio = AioTracker::new();
aio.submit(4);
assert_eq!(aio.submissions, 4);
assert_eq!(aio.queue_depth, 4);
}
#[test]
fn f_aio_004_complete() {
let mut aio = AioTracker::new();
aio.submit(4);
aio.complete(2);
assert_eq!(aio.completions, 2);
assert_eq!(aio.queue_depth, 2);
}
#[test]
fn f_aio_005_cancel() {
let mut aio = AioTracker::new();
aio.submit(4);
aio.cancel();
assert_eq!(aio.cancels, 1);
}
#[test]
fn f_aio_006_bytes() {
let mut aio = AioTracker::new();
aio.transfer(4096);
assert_eq!(aio.bytes, 4096);
}
#[test]
fn f_aio_007_peak() {
let mut aio = AioTracker::new();
aio.submit(10);
aio.complete(5);
aio.submit(2);
assert_eq!(aio.peak_queue_depth, 10);
}
#[test]
fn f_aio_008_pending() {
let mut aio = AioTracker::new();
aio.submit(10);
aio.complete(3);
aio.cancel();
assert_eq!(aio.pending(), 6);
}
#[test]
fn f_aio_009_read() {
let aio = AioTracker::for_read();
assert_eq!(aio.submissions, 0);
}
#[test]
fn f_aio_010_write() {
let aio = AioTracker::for_write();
assert_eq!(aio.submissions, 0);
}
#[test]
fn f_aio_011_reset() {
let mut aio = AioTracker::new();
aio.submit(4);
aio.reset();
assert_eq!(aio.submissions, 0);
}
#[test]
fn f_aio_012_clone() {
let mut aio = AioTracker::new();
aio.submit(4);
let cloned = aio.clone();
assert_eq!(aio.submissions, cloned.submissions);
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct DirectIoTracker {
pub reads: u64,
pub writes: u64,
pub bytes_read: u64,
pub bytes_written: u64,
pub alignment_fails: u64,
pub fallbacks: u64,
}
impl DirectIoTracker {
#[must_use]
pub const fn new() -> Self {
Self {
reads: 0,
writes: 0,
bytes_read: 0,
bytes_written: 0,
alignment_fails: 0,
fallbacks: 0,
}
}
#[must_use]
pub const fn for_ext4() -> Self {
Self::new()
}
#[must_use]
pub const fn for_xfs() -> Self {
Self::new()
}
pub fn read(&mut self, bytes: u64) {
self.reads += 1;
self.bytes_read += bytes;
}
pub fn write(&mut self, bytes: u64) {
self.writes += 1;
self.bytes_written += bytes;
}
pub fn alignment_fail(&mut self) {
self.alignment_fails += 1;
}
pub fn fallback(&mut self) {
self.fallbacks += 1;
}
#[must_use]
pub fn total_bytes(&self) -> u64 {
self.bytes_read + self.bytes_written
}
#[must_use]
pub fn total_ops(&self) -> u64 {
self.reads + self.writes
}
pub fn reset(&mut self) {
self.reads = 0;
self.writes = 0;
self.bytes_read = 0;
self.bytes_written = 0;
self.alignment_fails = 0;
self.fallbacks = 0;
}
}
#[cfg(test)]
mod dio_tests {
use super::*;
#[test]
fn f_dio_001_new() {
let dio = DirectIoTracker::new();
assert_eq!(dio.reads, 0);
}
#[test]
fn f_dio_002_default() {
let dio = DirectIoTracker::default();
assert_eq!(dio.reads, 0);
}
#[test]
fn f_dio_003_read() {
let mut dio = DirectIoTracker::new();
dio.read(4096);
assert_eq!(dio.reads, 1);
assert_eq!(dio.bytes_read, 4096);
}
#[test]
fn f_dio_004_write() {
let mut dio = DirectIoTracker::new();
dio.write(8192);
assert_eq!(dio.writes, 1);
assert_eq!(dio.bytes_written, 8192);
}
#[test]
fn f_dio_005_alignment() {
let mut dio = DirectIoTracker::new();
dio.alignment_fail();
assert_eq!(dio.alignment_fails, 1);
}
#[test]
fn f_dio_006_fallback() {
let mut dio = DirectIoTracker::new();
dio.fallback();
assert_eq!(dio.fallbacks, 1);
}
#[test]
fn f_dio_007_total_bytes() {
let mut dio = DirectIoTracker::new();
dio.read(1000);
dio.write(2000);
assert_eq!(dio.total_bytes(), 3000);
}
#[test]
fn f_dio_008_total_ops() {
let mut dio = DirectIoTracker::new();
dio.read(1000);
dio.write(2000);
assert_eq!(dio.total_ops(), 2);
}
#[test]
fn f_dio_009_ext4() {
let dio = DirectIoTracker::for_ext4();
assert_eq!(dio.reads, 0);
}
#[test]
fn f_dio_010_xfs() {
let dio = DirectIoTracker::for_xfs();
assert_eq!(dio.reads, 0);
}
#[test]
fn f_dio_011_reset() {
let mut dio = DirectIoTracker::new();
dio.read(4096);
dio.reset();
assert_eq!(dio.reads, 0);
}
#[test]
fn f_dio_012_clone() {
let mut dio = DirectIoTracker::new();
dio.read(4096);
let cloned = dio.clone();
assert_eq!(dio.reads, cloned.reads);
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct BufferedIoTracker {
pub cache_hits: u64,
pub cache_misses: u64,
pub writes: u64,
pub writebacks: u64,
pub bytes_read: u64,
pub bytes_written: u64,
}
impl BufferedIoTracker {
#[must_use]
pub const fn new() -> Self {
Self {
cache_hits: 0,
cache_misses: 0,
writes: 0,
writebacks: 0,
bytes_read: 0,
bytes_written: 0,
}
}
#[must_use]
pub const fn for_read_heavy() -> Self {
Self::new()
}
#[must_use]
pub const fn for_write_heavy() -> Self {
Self::new()
}
pub fn read_hit(&mut self, bytes: u64) {
self.cache_hits += 1;
self.bytes_read += bytes;
}
pub fn read_miss(&mut self, bytes: u64) {
self.cache_misses += 1;
self.bytes_read += bytes;
}
pub fn write(&mut self, bytes: u64) {
self.writes += 1;
self.bytes_written += bytes;
}
pub fn writeback(&mut self) {
self.writebacks += 1;
}
#[must_use]
pub fn hit_rate(&self) -> f64 {
let total = self.cache_hits + self.cache_misses;
if total == 0 {
return 0.0;
}
(self.cache_hits as f64) / (total as f64) * 100.0
}
pub fn reset(&mut self) {
self.cache_hits = 0;
self.cache_misses = 0;
self.writes = 0;
self.writebacks = 0;
self.bytes_read = 0;
self.bytes_written = 0;
}
}
#[cfg(test)]
mod buffered_io_tests {
use super::*;
#[test]
fn f_bio_001_new() {
let bio = BufferedIoTracker::new();
assert_eq!(bio.cache_hits, 0);
}
#[test]
fn f_bio_002_default() {
let bio = BufferedIoTracker::default();
assert_eq!(bio.cache_hits, 0);
}
#[test]
fn f_bio_003_hit() {
let mut bio = BufferedIoTracker::new();
bio.read_hit(4096);
assert_eq!(bio.cache_hits, 1);
assert_eq!(bio.bytes_read, 4096);
}
#[test]
fn f_bio_004_miss() {
let mut bio = BufferedIoTracker::new();
bio.read_miss(4096);
assert_eq!(bio.cache_misses, 1);
}
#[test]
fn f_bio_005_write() {
let mut bio = BufferedIoTracker::new();
bio.write(8192);
assert_eq!(bio.writes, 1);
assert_eq!(bio.bytes_written, 8192);
}
#[test]
fn f_bio_006_writeback() {
let mut bio = BufferedIoTracker::new();
bio.writeback();
assert_eq!(bio.writebacks, 1);
}
#[test]
fn f_bio_007_hit_rate() {
let mut bio = BufferedIoTracker::new();
bio.read_hit(1000);
bio.read_miss(1000);
assert!((bio.hit_rate() - 50.0).abs() < 0.01);
}
#[test]
fn f_bio_008_total_bytes() {
let mut bio = BufferedIoTracker::new();
bio.read_hit(1000);
bio.write(2000);
assert_eq!(bio.bytes_read + bio.bytes_written, 3000);
}
#[test]
fn f_bio_009_read_heavy() {
let bio = BufferedIoTracker::for_read_heavy();
assert_eq!(bio.cache_hits, 0);
}
#[test]
fn f_bio_010_write_heavy() {
let bio = BufferedIoTracker::for_write_heavy();
assert_eq!(bio.cache_hits, 0);
}
#[test]
fn f_bio_011_reset() {
let mut bio = BufferedIoTracker::new();
bio.read_hit(4096);
bio.reset();
assert_eq!(bio.cache_hits, 0);
}
#[test]
fn f_bio_012_clone() {
let mut bio = BufferedIoTracker::new();
bio.read_hit(4096);
let cloned = bio.clone();
assert_eq!(bio.cache_hits, cloned.cache_hits);
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct SpliceTracker {
pub splices: u64,
pub sendfiles: u64,
pub copy_ranges: u64,
pub splice_bytes: u64,
pub sendfile_bytes: u64,
pub fallbacks: u64,
}
impl SpliceTracker {
#[must_use]
pub const fn new() -> Self {
Self {
splices: 0,
sendfiles: 0,
copy_ranges: 0,
splice_bytes: 0,
sendfile_bytes: 0,
fallbacks: 0,
}
}
#[must_use]
pub const fn for_pipe() -> Self {
Self::new()
}
#[must_use]
pub const fn for_socket() -> Self {
Self::new()
}
pub fn splice(&mut self, bytes: u64) {
self.splices += 1;
self.splice_bytes += bytes;
}
pub fn sendfile(&mut self, bytes: u64) {
self.sendfiles += 1;
self.sendfile_bytes += bytes;
}
pub fn copy_range(&mut self) {
self.copy_ranges += 1;
}
pub fn fallback(&mut self) {
self.fallbacks += 1;
}
#[must_use]
pub fn total_zero_copy_bytes(&self) -> u64 {
self.splice_bytes + self.sendfile_bytes
}
#[must_use]
pub fn total_ops(&self) -> u64 {
self.splices + self.sendfiles + self.copy_ranges
}
pub fn reset(&mut self) {
self.splices = 0;
self.sendfiles = 0;
self.copy_ranges = 0;
self.splice_bytes = 0;
self.sendfile_bytes = 0;
self.fallbacks = 0;
}
}
#[cfg(test)]
mod splice_tests {
use super::*;
#[test]
fn f_splice_001_new() {
let splice = SpliceTracker::new();
assert_eq!(splice.splices, 0);
}
#[test]
fn f_splice_002_default() {
let splice = SpliceTracker::default();
assert_eq!(splice.splices, 0);
}
#[test]
fn f_splice_003_splice() {
let mut splice = SpliceTracker::new();
splice.splice(1024);
assert_eq!(splice.splices, 1);
assert_eq!(splice.splice_bytes, 1024);
}
#[test]
fn f_splice_004_sendfile() {
let mut splice = SpliceTracker::new();
splice.sendfile(2048);
assert_eq!(splice.sendfiles, 1);
assert_eq!(splice.sendfile_bytes, 2048);
}
#[test]
fn f_splice_005_copy_range() {
let mut splice = SpliceTracker::new();
splice.copy_range();
assert_eq!(splice.copy_ranges, 1);
}
#[test]
fn f_splice_006_fallback() {
let mut splice = SpliceTracker::new();
splice.fallback();
assert_eq!(splice.fallbacks, 1);
}
#[test]
fn f_splice_007_total_bytes() {
let mut splice = SpliceTracker::new();
splice.splice(1000);
splice.sendfile(2000);
assert_eq!(splice.total_zero_copy_bytes(), 3000);
}
#[test]
fn f_splice_008_total_ops() {
let mut splice = SpliceTracker::new();
splice.splice(1000);
splice.sendfile(1000);
splice.copy_range();
assert_eq!(splice.total_ops(), 3);
}
#[test]
fn f_splice_009_pipe() {
let splice = SpliceTracker::for_pipe();
assert_eq!(splice.splices, 0);
}
#[test]
fn f_splice_010_socket() {
let splice = SpliceTracker::for_socket();
assert_eq!(splice.splices, 0);
}
#[test]
fn f_splice_011_reset() {
let mut splice = SpliceTracker::new();
splice.splice(1024);
splice.reset();
assert_eq!(splice.splices, 0);
}
#[test]
fn f_splice_012_clone() {
let mut splice = SpliceTracker::new();
splice.splice(1024);
let cloned = splice.clone();
assert_eq!(splice.splices, cloned.splices);
}
}
define_tracker! {
pub struct TaskAccountingTracker {
pub utime: u64,
pub stime: u64,
pub cutime: u64,
pub cstime: u64,
pub voluntary_ctxt_switches: u64,
pub nonvoluntary_ctxt_switches: u64,
}
}
impl TaskAccountingTracker {
#[inline]
#[must_use]
pub fn for_proc(utime: u64, stime: u64) -> Self {
Self {
utime,
stime,
..Self::new()
}
}
#[inline]
pub fn add_utime(&mut self, ticks: u64) {
self.utime = self.utime.saturating_add(ticks);
}
#[inline]
pub fn add_stime(&mut self, ticks: u64) {
self.stime = self.stime.saturating_add(ticks);
}
#[inline]
pub fn voluntary_switch(&mut self) {
self.voluntary_ctxt_switches = self.voluntary_ctxt_switches.saturating_add(1);
}
#[inline]
pub fn involuntary_switch(&mut self) {
self.nonvoluntary_ctxt_switches = self.nonvoluntary_ctxt_switches.saturating_add(1);
}
#[inline]
#[must_use]
pub const fn total_cpu(&self) -> u64 {
self.utime + self.stime
}
#[inline]
#[must_use]
pub const fn total_switches(&self) -> u64 {
self.voluntary_ctxt_switches + self.nonvoluntary_ctxt_switches
}
}
define_tracker! {
pub struct IoAccountingTracker {
pub read_bytes: u64,
pub write_bytes: u64,
pub read_syscalls: u64,
pub write_syscalls: u64,
pub disk_read_bytes: u64,
pub disk_write_bytes: u64,
}
}
impl IoAccountingTracker {
#[inline]
#[must_use]
pub fn for_proc_io(read_bytes: u64, write_bytes: u64) -> Self {
Self {
read_bytes,
write_bytes,
..Self::new()
}
}
#[inline]
pub fn read(&mut self, bytes: u64) {
self.read_bytes = self.read_bytes.saturating_add(bytes);
self.read_syscalls = self.read_syscalls.saturating_add(1);
}
#[inline]
pub fn write(&mut self, bytes: u64) {
self.write_bytes = self.write_bytes.saturating_add(bytes);
self.write_syscalls = self.write_syscalls.saturating_add(1);
}
#[inline]
pub fn disk_read(&mut self, bytes: u64) {
self.disk_read_bytes = self.disk_read_bytes.saturating_add(bytes);
}
#[inline]
pub fn disk_write(&mut self, bytes: u64) {
self.disk_write_bytes = self.disk_write_bytes.saturating_add(bytes);
}
#[inline]
#[must_use]
pub const fn total_bytes(&self) -> u64 {
self.read_bytes + self.write_bytes
}
#[inline]
#[must_use]
pub const fn total_syscalls(&self) -> u64 {
self.read_syscalls + self.write_syscalls
}
}
define_tracker! {
pub struct SchedAccountingTracker {
pub nr_switches: u64,
pub sum_exec_runtime: u64,
pub wait_sum: u64,
pub wait_max: u64,
pub timeslices: u64,
pub prio_inversions: u64,
}
}
impl SchedAccountingTracker {
#[inline]
#[must_use]
pub fn for_sched(switches: u64, runtime_ns: u64) -> Self {
Self {
nr_switches: switches,
sum_exec_runtime: runtime_ns,
..Self::new()
}
}
#[inline]
pub fn switch(&mut self, runtime_ns: u64) {
self.nr_switches = self.nr_switches.saturating_add(1);
self.sum_exec_runtime = self.sum_exec_runtime.saturating_add(runtime_ns);
}
#[inline]
pub fn wait(&mut self, wait_ns: u64) {
self.wait_sum = self.wait_sum.saturating_add(wait_ns);
if wait_ns > self.wait_max {
self.wait_max = wait_ns;
}
}
#[inline]
pub fn timeslice(&mut self) {
self.timeslices = self.timeslices.saturating_add(1);
}
#[inline]
pub fn prio_inversion(&mut self) {
self.prio_inversions = self.prio_inversions.saturating_add(1);
}
#[inline]
#[must_use]
pub fn avg_runtime(&self) -> u64 {
if self.nr_switches > 0 {
self.sum_exec_runtime / self.nr_switches
} else {
0
}
}
}
define_tracker! {
pub struct MemAccountingTracker {
pub vsize: u64,
pub rss: u64,
pub shared: u64,
pub text: u64,
pub data: u64,
pub peak_rss: u64,
}
}
impl MemAccountingTracker {
#[inline]
#[must_use]
pub fn for_statm(vsize: u64, rss: u64) -> Self {
Self {
vsize,
rss,
peak_rss: rss,
..Self::new()
}
}
#[inline]
pub fn update(&mut self, vsize: u64, rss: u64) {
self.vsize = vsize;
self.rss = rss;
if rss > self.peak_rss {
self.peak_rss = rss;
}
}
#[inline]
pub fn set_shared(&mut self, shared: u64) {
self.shared = shared;
}
#[inline]
pub fn set_text(&mut self, text: u64) {
self.text = text;
}
#[inline]
pub fn set_data(&mut self, data: u64) {
self.data = data;
}
#[inline]
#[must_use]
pub fn private_mem(&self) -> u64 {
self.rss.saturating_sub(self.shared)
}
}
#[cfg(test)]
mod task_acct_tests {
use super::*;
#[test]
fn f_tacct_001_new() {
let tracker = TaskAccountingTracker::new();
assert_eq!(tracker.utime, 0);
assert_eq!(tracker.stime, 0);
}
#[test]
fn f_tacct_002_default() {
let tracker = TaskAccountingTracker::default();
assert_eq!(tracker.total_cpu(), 0);
}
#[test]
fn f_tacct_003_factory() {
let tracker = TaskAccountingTracker::for_proc(100, 50);
assert_eq!(tracker.utime, 100);
assert_eq!(tracker.stime, 50);
}
#[test]
fn f_tacct_004_add_utime() {
let mut tracker = TaskAccountingTracker::new();
tracker.add_utime(100);
assert_eq!(tracker.utime, 100);
}
#[test]
fn f_tacct_005_add_stime() {
let mut tracker = TaskAccountingTracker::new();
tracker.add_stime(50);
assert_eq!(tracker.stime, 50);
}
#[test]
fn f_tacct_006_voluntary() {
let mut tracker = TaskAccountingTracker::new();
tracker.voluntary_switch();
assert_eq!(tracker.voluntary_ctxt_switches, 1);
}
#[test]
fn f_tacct_007_involuntary() {
let mut tracker = TaskAccountingTracker::new();
tracker.involuntary_switch();
assert_eq!(tracker.nonvoluntary_ctxt_switches, 1);
}
#[test]
fn f_tacct_008_total_cpu() {
let tracker = TaskAccountingTracker::for_proc(100, 50);
assert_eq!(tracker.total_cpu(), 150);
}
#[test]
fn f_tacct_009_total_switches() {
let mut tracker = TaskAccountingTracker::new();
tracker.voluntary_switch();
tracker.involuntary_switch();
assert_eq!(tracker.total_switches(), 2);
}
#[test]
fn f_tacct_010_saturating() {
let mut tracker = TaskAccountingTracker::for_proc(u64::MAX - 1, 0);
tracker.add_utime(10);
assert_eq!(tracker.utime, u64::MAX);
}
#[test]
fn f_tacct_011_reset() {
let mut tracker = TaskAccountingTracker::for_proc(100, 50);
tracker.reset();
assert_eq!(tracker.total_cpu(), 0);
}
#[test]
fn f_tacct_012_clone() {
let tracker = TaskAccountingTracker::for_proc(100, 50);
let cloned = tracker.clone();
assert_eq!(tracker.utime, cloned.utime);
}
}
#[cfg(test)]
mod io_acct_tests {
use super::*;
#[test]
fn f_ioacct_001_new() {
let tracker = IoAccountingTracker::new();
assert_eq!(tracker.read_bytes, 0);
}
#[test]
fn f_ioacct_002_default() {
let tracker = IoAccountingTracker::default();
assert_eq!(tracker.total_bytes(), 0);
}
#[test]
fn f_ioacct_003_factory() {
let tracker = IoAccountingTracker::for_proc_io(1000, 500);
assert_eq!(tracker.read_bytes, 1000);
assert_eq!(tracker.write_bytes, 500);
}
#[test]
fn f_ioacct_004_read() {
let mut tracker = IoAccountingTracker::new();
tracker.read(1024);
assert_eq!(tracker.read_bytes, 1024);
assert_eq!(tracker.read_syscalls, 1);
}
#[test]
fn f_ioacct_005_write() {
let mut tracker = IoAccountingTracker::new();
tracker.write(2048);
assert_eq!(tracker.write_bytes, 2048);
assert_eq!(tracker.write_syscalls, 1);
}
#[test]
fn f_ioacct_006_disk_read() {
let mut tracker = IoAccountingTracker::new();
tracker.disk_read(4096);
assert_eq!(tracker.disk_read_bytes, 4096);
}
#[test]
fn f_ioacct_007_disk_write() {
let mut tracker = IoAccountingTracker::new();
tracker.disk_write(8192);
assert_eq!(tracker.disk_write_bytes, 8192);
}
#[test]
fn f_ioacct_008_total_bytes() {
let tracker = IoAccountingTracker::for_proc_io(1000, 500);
assert_eq!(tracker.total_bytes(), 1500);
}
#[test]
fn f_ioacct_009_total_syscalls() {
let mut tracker = IoAccountingTracker::new();
tracker.read(1024);
tracker.write(1024);
assert_eq!(tracker.total_syscalls(), 2);
}
#[test]
fn f_ioacct_010_saturating() {
let mut tracker = IoAccountingTracker::for_proc_io(u64::MAX - 1, 0);
tracker.read(10);
assert_eq!(tracker.read_bytes, u64::MAX);
}
#[test]
fn f_ioacct_011_reset() {
let mut tracker = IoAccountingTracker::for_proc_io(1000, 500);
tracker.reset();
assert_eq!(tracker.total_bytes(), 0);
}
#[test]
fn f_ioacct_012_clone() {
let tracker = IoAccountingTracker::for_proc_io(1000, 500);
let cloned = tracker.clone();
assert_eq!(tracker.read_bytes, cloned.read_bytes);
}
}
#[cfg(test)]
mod sched_acct_tests {
use super::*;
#[test]
fn f_schedacct_001_new() {
let tracker = SchedAccountingTracker::new();
assert_eq!(tracker.nr_switches, 0);
}
#[test]
fn f_schedacct_002_default() {
let tracker = SchedAccountingTracker::default();
assert_eq!(tracker.sum_exec_runtime, 0);
}
#[test]
fn f_schedacct_003_factory() {
let tracker = SchedAccountingTracker::for_sched(100, 1_000_000);
assert_eq!(tracker.nr_switches, 100);
assert_eq!(tracker.sum_exec_runtime, 1_000_000);
}
#[test]
fn f_schedacct_004_switch() {
let mut tracker = SchedAccountingTracker::new();
tracker.switch(10_000);
assert_eq!(tracker.nr_switches, 1);
assert_eq!(tracker.sum_exec_runtime, 10_000);
}
#[test]
fn f_schedacct_005_wait() {
let mut tracker = SchedAccountingTracker::new();
tracker.wait(5000);
tracker.wait(10000);
assert_eq!(tracker.wait_sum, 15000);
assert_eq!(tracker.wait_max, 10000);
}
#[test]
fn f_schedacct_006_timeslice() {
let mut tracker = SchedAccountingTracker::new();
tracker.timeslice();
assert_eq!(tracker.timeslices, 1);
}
#[test]
fn f_schedacct_007_prio_inversion() {
let mut tracker = SchedAccountingTracker::new();
tracker.prio_inversion();
assert_eq!(tracker.prio_inversions, 1);
}
#[test]
fn f_schedacct_008_avg_runtime() {
let tracker = SchedAccountingTracker::for_sched(10, 100_000);
assert_eq!(tracker.avg_runtime(), 10_000);
}
#[test]
fn f_schedacct_009_avg_zero() {
let tracker = SchedAccountingTracker::new();
assert_eq!(tracker.avg_runtime(), 0);
}
#[test]
fn f_schedacct_010_saturating() {
let mut tracker = SchedAccountingTracker::for_sched(u64::MAX - 1, 0);
tracker.switch(0);
assert_eq!(tracker.nr_switches, u64::MAX);
}
#[test]
fn f_schedacct_011_reset() {
let mut tracker = SchedAccountingTracker::for_sched(100, 1_000_000);
tracker.reset();
assert_eq!(tracker.nr_switches, 0);
}
#[test]
fn f_schedacct_012_clone() {
let tracker = SchedAccountingTracker::for_sched(100, 1_000_000);
let cloned = tracker.clone();
assert_eq!(tracker.nr_switches, cloned.nr_switches);
}
}
#[cfg(test)]
mod mem_acct_tests {
use super::*;
#[test]
fn f_memacct_001_new() {
let tracker = MemAccountingTracker::new();
assert_eq!(tracker.vsize, 0);
assert_eq!(tracker.rss, 0);
}
#[test]
fn f_memacct_002_default() {
let tracker = MemAccountingTracker::default();
assert_eq!(tracker.peak_rss, 0);
}
#[test]
fn f_memacct_003_factory() {
let tracker = MemAccountingTracker::for_statm(1000, 500);
assert_eq!(tracker.vsize, 1000);
assert_eq!(tracker.rss, 500);
assert_eq!(tracker.peak_rss, 500);
}
#[test]
fn f_memacct_004_update() {
let mut tracker = MemAccountingTracker::new();
tracker.update(2000, 1000);
assert_eq!(tracker.vsize, 2000);
assert_eq!(tracker.rss, 1000);
}
#[test]
fn f_memacct_005_peak() {
let mut tracker = MemAccountingTracker::for_statm(1000, 500);
tracker.update(1000, 600);
tracker.update(1000, 400);
assert_eq!(tracker.peak_rss, 600);
}
#[test]
fn f_memacct_006_shared() {
let mut tracker = MemAccountingTracker::new();
tracker.set_shared(100);
assert_eq!(tracker.shared, 100);
}
#[test]
fn f_memacct_007_text() {
let mut tracker = MemAccountingTracker::new();
tracker.set_text(50);
assert_eq!(tracker.text, 50);
}
#[test]
fn f_memacct_008_data() {
let mut tracker = MemAccountingTracker::new();
tracker.set_data(200);
assert_eq!(tracker.data, 200);
}
#[test]
fn f_memacct_009_private() {
let mut tracker = MemAccountingTracker::for_statm(1000, 500);
tracker.set_shared(100);
assert_eq!(tracker.private_mem(), 400);
}
#[test]
fn f_memacct_010_private_saturate() {
let mut tracker = MemAccountingTracker::for_statm(1000, 100);
tracker.set_shared(200);
assert_eq!(tracker.private_mem(), 0);
}
#[test]
fn f_memacct_011_reset() {
let mut tracker = MemAccountingTracker::for_statm(1000, 500);
tracker.reset();
assert_eq!(tracker.vsize, 0);
}
#[test]
fn f_memacct_012_clone() {
let tracker = MemAccountingTracker::for_statm(1000, 500);
let cloned = tracker.clone();
assert_eq!(tracker.rss, cloned.rss);
}
}
define_tracker! {
pub struct PidTracker {
pub active_pids: u32,
pub peak_pids: u32,
pub allocated: u64,
pub recycled: u64,
pub wraps: u64,
pub failures: u64,
}
}
impl PidTracker {
#[inline]
#[must_use]
pub fn for_namespace(active: u32) -> Self {
Self {
active_pids: active,
peak_pids: active,
..Self::new()
}
}
#[inline]
pub fn allocate(&mut self) -> bool {
if self.active_pids < u32::MAX {
self.active_pids = self.active_pids.saturating_add(1);
self.allocated = self.allocated.saturating_add(1);
if self.active_pids > self.peak_pids {
self.peak_pids = self.active_pids;
}
true
} else {
self.failures = self.failures.saturating_add(1);
false
}
}
#[inline]
pub fn free(&mut self) {
if self.active_pids > 0 {
self.active_pids = self.active_pids.saturating_sub(1);
self.recycled = self.recycled.saturating_add(1);
}
}
#[inline]
pub fn wrap(&mut self) {
self.wraps = self.wraps.saturating_add(1);
}
#[inline]
#[must_use]
pub fn utilization(&self, max_pids: u32) -> f32 {
if max_pids > 0 {
(self.active_pids as f32 / max_pids as f32) * 100.0
} else {
0.0
}
}
}
define_tracker! {
pub struct UidTracker {
pub mappings: u32,
pub lookups: u64,
pub translations: u64,
pub failures: u64,
pub root_mappings: u32,
pub unpriv_mappings: u32,
}
}
impl UidTracker {
#[inline]
#[must_use]
pub fn for_userns(mappings: u32) -> Self {
Self {
mappings,
..Self::new()
}
}
#[inline]
pub fn add_mapping(&mut self, is_root: bool) {
self.mappings = self.mappings.saturating_add(1);
if is_root {
self.root_mappings = self.root_mappings.saturating_add(1);
} else {
self.unpriv_mappings = self.unpriv_mappings.saturating_add(1);
}
}
#[inline]
pub fn lookup(&mut self, success: bool) {
self.lookups = self.lookups.saturating_add(1);
if success {
self.translations = self.translations.saturating_add(1);
} else {
self.failures = self.failures.saturating_add(1);
}
}
#[inline]
#[must_use]
pub fn success_rate(&self) -> f32 {
if self.lookups > 0 {
(self.translations as f32 / self.lookups as f32) * 100.0
} else {
100.0
}
}
}
define_tracker! {
pub struct NamespaceTracker {
pub active: u32,
pub created: u64,
pub destroyed: u64,
pub setns_ops: u64,
pub unshare_ops: u64,
pub clone_newns: u64,
}
}
impl NamespaceTracker {
#[inline]
#[must_use]
pub fn for_system(active: u32) -> Self {
Self {
active,
..Self::new()
}
}
#[inline]
pub fn create(&mut self) {
self.active = self.active.saturating_add(1);
self.created = self.created.saturating_add(1);
}
#[inline]
pub fn destroy(&mut self) {
self.active = self.active.saturating_sub(1);
self.destroyed = self.destroyed.saturating_add(1);
}
#[inline]
pub fn setns(&mut self) {
self.setns_ops = self.setns_ops.saturating_add(1);
}
#[inline]
pub fn unshare(&mut self) {
self.unshare_ops = self.unshare_ops.saturating_add(1);
self.create();
}
#[inline]
pub fn clone_ns(&mut self) {
self.clone_newns = self.clone_newns.saturating_add(1);
self.create();
}
}
define_tracker! {
pub struct SeccompTracker {
pub filters: u32,
pub checks: u64,
pub allowed: u64,
pub denied: u64,
pub filter_adds: u64,
pub audit_events: u64,
}
}
impl SeccompTracker {
#[inline]
#[must_use]
pub fn for_process(filters: u32) -> Self {
Self {
filters,
..Self::new()
}
}
#[inline]
pub fn add_filter(&mut self) {
self.filters = self.filters.saturating_add(1);
self.filter_adds = self.filter_adds.saturating_add(1);
}
#[inline]
pub fn check(&mut self, allowed: bool) {
self.checks = self.checks.saturating_add(1);
if allowed {
self.allowed = self.allowed.saturating_add(1);
} else {
self.denied = self.denied.saturating_add(1);
}
}
#[inline]
pub fn audit(&mut self) {
self.audit_events = self.audit_events.saturating_add(1);
}
#[inline]
#[must_use]
pub fn allow_rate(&self) -> f32 {
if self.checks > 0 {
(self.allowed as f32 / self.checks as f32) * 100.0
} else {
100.0
}
}
#[inline]
#[must_use]
pub fn deny_rate(&self) -> f32 {
if self.checks > 0 {
(self.denied as f32 / self.checks as f32) * 100.0
} else {
0.0
}
}
}
#[cfg(test)]
mod pid_tests {
use super::*;
#[test]
fn f_pid_001_new() {
let tracker = PidTracker::new();
assert_eq!(tracker.active_pids, 0);
}
#[test]
fn f_pid_002_default() {
let tracker = PidTracker::default();
assert_eq!(tracker.allocated, 0);
}
#[test]
fn f_pid_003_factory() {
let tracker = PidTracker::for_namespace(100);
assert_eq!(tracker.active_pids, 100);
assert_eq!(tracker.peak_pids, 100);
}
#[test]
fn f_pid_004_allocate() {
let mut tracker = PidTracker::new();
assert!(tracker.allocate());
assert_eq!(tracker.active_pids, 1);
assert_eq!(tracker.allocated, 1);
}
#[test]
fn f_pid_005_free() {
let mut tracker = PidTracker::for_namespace(5);
tracker.free();
assert_eq!(tracker.active_pids, 4);
assert_eq!(tracker.recycled, 1);
}
#[test]
fn f_pid_006_peak() {
let mut tracker = PidTracker::new();
tracker.allocate();
tracker.allocate();
tracker.free();
assert_eq!(tracker.peak_pids, 2);
}
#[test]
fn f_pid_007_wrap() {
let mut tracker = PidTracker::new();
tracker.wrap();
assert_eq!(tracker.wraps, 1);
}
#[test]
fn f_pid_008_utilization() {
let tracker = PidTracker::for_namespace(50);
let util = tracker.utilization(100);
assert!((util - 50.0).abs() < 0.1);
}
#[test]
fn f_pid_009_util_zero() {
let tracker = PidTracker::for_namespace(10);
assert_eq!(tracker.utilization(0), 0.0);
}
#[test]
fn f_pid_010_free_underflow() {
let mut tracker = PidTracker::new();
tracker.free();
assert_eq!(tracker.active_pids, 0);
}
#[test]
fn f_pid_011_reset() {
let mut tracker = PidTracker::for_namespace(100);
tracker.reset();
assert_eq!(tracker.active_pids, 0);
}
#[test]
fn f_pid_012_clone() {
let tracker = PidTracker::for_namespace(100);
let cloned = tracker.clone();
assert_eq!(tracker.active_pids, cloned.active_pids);
}
}
#[cfg(test)]
mod uid_tests {
use super::*;
#[test]
fn f_uid_001_new() {
let tracker = UidTracker::new();
assert_eq!(tracker.mappings, 0);
}
#[test]
fn f_uid_002_default() {
let tracker = UidTracker::default();
assert_eq!(tracker.lookups, 0);
}
#[test]
fn f_uid_003_factory() {
let tracker = UidTracker::for_userns(5);
assert_eq!(tracker.mappings, 5);
}
#[test]
fn f_uid_004_root_mapping() {
let mut tracker = UidTracker::new();
tracker.add_mapping(true);
assert_eq!(tracker.mappings, 1);
assert_eq!(tracker.root_mappings, 1);
}
#[test]
fn f_uid_005_unpriv_mapping() {
let mut tracker = UidTracker::new();
tracker.add_mapping(false);
assert_eq!(tracker.mappings, 1);
assert_eq!(tracker.unpriv_mappings, 1);
}
#[test]
fn f_uid_006_lookup_success() {
let mut tracker = UidTracker::new();
tracker.lookup(true);
assert_eq!(tracker.lookups, 1);
assert_eq!(tracker.translations, 1);
}
#[test]
fn f_uid_007_lookup_failure() {
let mut tracker = UidTracker::new();
tracker.lookup(false);
assert_eq!(tracker.lookups, 1);
assert_eq!(tracker.failures, 1);
}
#[test]
fn f_uid_008_success_rate() {
let mut tracker = UidTracker::new();
tracker.lookup(true);
tracker.lookup(false);
let rate = tracker.success_rate();
assert!((rate - 50.0).abs() < 0.1);
}
#[test]
fn f_uid_009_default_rate() {
let tracker = UidTracker::new();
assert_eq!(tracker.success_rate(), 100.0);
}
#[test]
fn f_uid_010_mixed() {
let mut tracker = UidTracker::new();
tracker.add_mapping(true);
tracker.add_mapping(false);
tracker.add_mapping(false);
assert_eq!(tracker.mappings, 3);
assert_eq!(tracker.root_mappings, 1);
assert_eq!(tracker.unpriv_mappings, 2);
}
#[test]
fn f_uid_011_reset() {
let mut tracker = UidTracker::for_userns(5);
tracker.reset();
assert_eq!(tracker.mappings, 0);
}
#[test]
fn f_uid_012_clone() {
let tracker = UidTracker::for_userns(5);
let cloned = tracker.clone();
assert_eq!(tracker.mappings, cloned.mappings);
}
}
#[cfg(test)]
mod namespace_tests {
use super::*;
#[test]
fn f_ns_001_new() {
let tracker = NamespaceTracker::new();
assert_eq!(tracker.active, 0);
}
#[test]
fn f_ns_002_default() {
let tracker = NamespaceTracker::default();
assert_eq!(tracker.created, 0);
}
#[test]
fn f_ns_003_factory() {
let tracker = NamespaceTracker::for_system(10);
assert_eq!(tracker.active, 10);
}
#[test]
fn f_ns_004_create() {
let mut tracker = NamespaceTracker::new();
tracker.create();
assert_eq!(tracker.active, 1);
assert_eq!(tracker.created, 1);
}
#[test]
fn f_ns_005_destroy() {
let mut tracker = NamespaceTracker::for_system(5);
tracker.destroy();
assert_eq!(tracker.active, 4);
assert_eq!(tracker.destroyed, 1);
}
#[test]
fn f_ns_006_setns() {
let mut tracker = NamespaceTracker::new();
tracker.setns();
assert_eq!(tracker.setns_ops, 1);
}
#[test]
fn f_ns_007_unshare() {
let mut tracker = NamespaceTracker::new();
tracker.unshare();
assert_eq!(tracker.unshare_ops, 1);
assert_eq!(tracker.active, 1);
assert_eq!(tracker.created, 1);
}
#[test]
fn f_ns_008_clone_ns() {
let mut tracker = NamespaceTracker::new();
tracker.clone_ns();
assert_eq!(tracker.clone_newns, 1);
assert_eq!(tracker.active, 1);
}
#[test]
fn f_ns_009_destroy_underflow() {
let mut tracker = NamespaceTracker::new();
tracker.destroy();
assert_eq!(tracker.active, 0);
}
#[test]
fn f_ns_010_multiple() {
let mut tracker = NamespaceTracker::new();
tracker.create();
tracker.unshare();
tracker.clone_ns();
assert_eq!(tracker.active, 3);
assert_eq!(tracker.created, 3);
}
#[test]
fn f_ns_011_reset() {
let mut tracker = NamespaceTracker::for_system(10);
tracker.reset();
assert_eq!(tracker.active, 0);
}
#[test]
fn f_ns_012_clone() {
let tracker = NamespaceTracker::for_system(10);
let cloned = tracker.clone();
assert_eq!(tracker.active, cloned.active);
}
}
#[cfg(test)]
mod seccomp_tests {
use super::*;
#[test]
fn f_seccomp_001_new() {
let tracker = SeccompTracker::new();
assert_eq!(tracker.filters, 0);
}
#[test]
fn f_seccomp_002_default() {
let tracker = SeccompTracker::default();
assert_eq!(tracker.checks, 0);
}
#[test]
fn f_seccomp_003_factory() {
let tracker = SeccompTracker::for_process(3);
assert_eq!(tracker.filters, 3);
}
#[test]
fn f_seccomp_004_add_filter() {
let mut tracker = SeccompTracker::new();
tracker.add_filter();
assert_eq!(tracker.filters, 1);
assert_eq!(tracker.filter_adds, 1);
}
#[test]
fn f_seccomp_005_check_allow() {
let mut tracker = SeccompTracker::new();
tracker.check(true);
assert_eq!(tracker.checks, 1);
assert_eq!(tracker.allowed, 1);
}
#[test]
fn f_seccomp_006_check_deny() {
let mut tracker = SeccompTracker::new();
tracker.check(false);
assert_eq!(tracker.checks, 1);
assert_eq!(tracker.denied, 1);
}
#[test]
fn f_seccomp_007_audit() {
let mut tracker = SeccompTracker::new();
tracker.audit();
assert_eq!(tracker.audit_events, 1);
}
#[test]
fn f_seccomp_008_allow_rate() {
let mut tracker = SeccompTracker::new();
tracker.check(true);
tracker.check(false);
let rate = tracker.allow_rate();
assert!((rate - 50.0).abs() < 0.1);
}
#[test]
fn f_seccomp_009_default_rate() {
let tracker = SeccompTracker::new();
assert_eq!(tracker.allow_rate(), 100.0);
}
#[test]
fn f_seccomp_010_deny_rate() {
let mut tracker = SeccompTracker::new();
tracker.check(false);
let rate = tracker.deny_rate();
assert!((rate - 100.0).abs() < 0.1);
}
#[test]
fn f_seccomp_011_reset() {
let mut tracker = SeccompTracker::for_process(3);
tracker.reset();
assert_eq!(tracker.filters, 0);
}
#[test]
fn f_seccomp_012_clone() {
let tracker = SeccompTracker::for_process(3);
let cloned = tracker.clone();
assert_eq!(tracker.filters, cloned.filters);
}
}
define_tracker! {
pub struct CapabilitiesTracker {
pub checks: u64,
pub granted: u64,
pub denied: u64,
pub set_ops: u64,
pub drops: u64,
pub ambient_raises: u64,
}
}
impl CapabilitiesTracker {
#[inline]
#[must_use]
pub fn for_process() -> Self {
Self::new()
}
#[inline]
pub fn check(&mut self, has_cap: bool) {
self.checks = self.checks.saturating_add(1);
if has_cap {
self.granted = self.granted.saturating_add(1);
} else {
self.denied = self.denied.saturating_add(1);
}
}
#[inline]
pub fn set_cap(&mut self) {
self.set_ops = self.set_ops.saturating_add(1);
}
#[inline]
pub fn drop_cap(&mut self) {
self.drops = self.drops.saturating_add(1);
}
#[inline]
pub fn raise_ambient(&mut self) {
self.ambient_raises = self.ambient_raises.saturating_add(1);
}
#[inline]
#[must_use]
pub fn grant_rate(&self) -> f32 {
if self.checks > 0 {
(self.granted as f32 / self.checks as f32) * 100.0
} else {
100.0
}
}
}
define_tracker! {
pub struct LsmTracker {
pub hooks: u64,
pub allowed: u64,
pub denied: u64,
pub audits: u64,
pub policy_loads: u64,
pub transitions: u64,
}
}
impl LsmTracker {
#[inline]
#[must_use]
pub fn for_selinux() -> Self {
Self::new()
}
#[inline]
pub fn hook(&mut self, allowed: bool) {
self.hooks = self.hooks.saturating_add(1);
if allowed {
self.allowed = self.allowed.saturating_add(1);
} else {
self.denied = self.denied.saturating_add(1);
}
}
#[inline]
pub fn audit(&mut self) {
self.audits = self.audits.saturating_add(1);
}
#[inline]
pub fn load_policy(&mut self) {
self.policy_loads = self.policy_loads.saturating_add(1);
}
#[inline]
pub fn transition(&mut self) {
self.transitions = self.transitions.saturating_add(1);
}
#[inline]
#[must_use]
pub fn allow_rate(&self) -> f32 {
if self.hooks > 0 {
(self.allowed as f32 / self.hooks as f32) * 100.0
} else {
100.0
}
}
}
define_tracker! {
pub struct AuditTracker {
pub records: u64,
pub written: u64,
pub dropped: u64,
pub backlog: u32,
pub peak_backlog: u32,
pub rules: u32,
}
}
impl AuditTracker {
#[inline]
#[must_use]
pub fn for_auditd(rules: u32) -> Self {
Self {
rules,
..Self::new()
}
}
#[inline]
pub fn generate(&mut self) {
self.records = self.records.saturating_add(1);
self.backlog = self.backlog.saturating_add(1);
if self.backlog > self.peak_backlog {
self.peak_backlog = self.backlog;
}
}
#[inline]
pub fn write(&mut self) {
self.written = self.written.saturating_add(1);
self.backlog = self.backlog.saturating_sub(1);
}
#[inline]
pub fn drop_record(&mut self) {
self.dropped = self.dropped.saturating_add(1);
self.backlog = self.backlog.saturating_sub(1);
}
#[inline]
pub fn add_rule(&mut self) {
self.rules = self.rules.saturating_add(1);
}
#[inline]
#[must_use]
pub fn drop_rate(&self) -> f32 {
if self.records > 0 {
(self.dropped as f32 / self.records as f32) * 100.0
} else {
0.0
}
}
}
define_tracker! {
pub struct IntegrityTracker {
pub measurements: u64,
pub verified: u64,
pub failed: u64,
pub appraisals: u64,
pub signatures: u64,
pub violations: u64,
}
}
impl IntegrityTracker {
#[inline]
#[must_use]
pub fn for_ima() -> Self {
Self::new()
}
#[inline]
pub fn measure(&mut self) {
self.measurements = self.measurements.saturating_add(1);
}
#[inline]
pub fn verify(&mut self, success: bool) {
if success {
self.verified = self.verified.saturating_add(1);
} else {
self.failed = self.failed.saturating_add(1);
}
}
#[inline]
pub fn appraise(&mut self) {
self.appraisals = self.appraisals.saturating_add(1);
}
#[inline]
pub fn validate_sig(&mut self) {
self.signatures = self.signatures.saturating_add(1);
}
#[inline]
pub fn violation(&mut self) {
self.violations = self.violations.saturating_add(1);
}
#[inline]
#[must_use]
pub fn success_rate(&self) -> f32 {
let total = self.verified + self.failed;
if total > 0 {
(self.verified as f32 / total as f32) * 100.0
} else {
100.0
}
}
}
#[cfg(test)]
mod cap_tests {
use super::*;
#[test]
fn f_cap_001_new() {
let tracker = CapabilitiesTracker::new();
assert_eq!(tracker.checks, 0);
}
#[test]
fn f_cap_002_default() {
let tracker = CapabilitiesTracker::default();
assert_eq!(tracker.granted, 0);
}
#[test]
fn f_cap_003_factory() {
let tracker = CapabilitiesTracker::for_process();
assert_eq!(tracker.checks, 0);
}
#[test]
fn f_cap_004_check_granted() {
let mut tracker = CapabilitiesTracker::new();
tracker.check(true);
assert_eq!(tracker.checks, 1);
assert_eq!(tracker.granted, 1);
}
#[test]
fn f_cap_005_check_denied() {
let mut tracker = CapabilitiesTracker::new();
tracker.check(false);
assert_eq!(tracker.checks, 1);
assert_eq!(tracker.denied, 1);
}
#[test]
fn f_cap_006_set_cap() {
let mut tracker = CapabilitiesTracker::new();
tracker.set_cap();
assert_eq!(tracker.set_ops, 1);
}
#[test]
fn f_cap_007_drop_cap() {
let mut tracker = CapabilitiesTracker::new();
tracker.drop_cap();
assert_eq!(tracker.drops, 1);
}
#[test]
fn f_cap_008_ambient() {
let mut tracker = CapabilitiesTracker::new();
tracker.raise_ambient();
assert_eq!(tracker.ambient_raises, 1);
}
#[test]
fn f_cap_009_grant_rate() {
let mut tracker = CapabilitiesTracker::new();
tracker.check(true);
tracker.check(false);
let rate = tracker.grant_rate();
assert!((rate - 50.0).abs() < 0.1);
}
#[test]
fn f_cap_010_default_rate() {
let tracker = CapabilitiesTracker::new();
assert_eq!(tracker.grant_rate(), 100.0);
}
#[test]
fn f_cap_011_reset() {
let mut tracker = CapabilitiesTracker::new();
tracker.check(true);
tracker.reset();
assert_eq!(tracker.checks, 0);
}
#[test]
fn f_cap_012_clone() {
let mut tracker = CapabilitiesTracker::new();
tracker.check(true);
let cloned = tracker.clone();
assert_eq!(tracker.checks, cloned.checks);
}
}
#[cfg(test)]
mod lsm_tests {
use super::*;
#[test]
fn f_lsm_001_new() {
let tracker = LsmTracker::new();
assert_eq!(tracker.hooks, 0);
}
#[test]
fn f_lsm_002_default() {
let tracker = LsmTracker::default();
assert_eq!(tracker.allowed, 0);
}
#[test]
fn f_lsm_003_factory() {
let tracker = LsmTracker::for_selinux();
assert_eq!(tracker.hooks, 0);
}
#[test]
fn f_lsm_004_hook_allowed() {
let mut tracker = LsmTracker::new();
tracker.hook(true);
assert_eq!(tracker.hooks, 1);
assert_eq!(tracker.allowed, 1);
}
#[test]
fn f_lsm_005_hook_denied() {
let mut tracker = LsmTracker::new();
tracker.hook(false);
assert_eq!(tracker.hooks, 1);
assert_eq!(tracker.denied, 1);
}
#[test]
fn f_lsm_006_audit() {
let mut tracker = LsmTracker::new();
tracker.audit();
assert_eq!(tracker.audits, 1);
}
#[test]
fn f_lsm_007_policy() {
let mut tracker = LsmTracker::new();
tracker.load_policy();
assert_eq!(tracker.policy_loads, 1);
}
#[test]
fn f_lsm_008_transition() {
let mut tracker = LsmTracker::new();
tracker.transition();
assert_eq!(tracker.transitions, 1);
}
#[test]
fn f_lsm_009_allow_rate() {
let mut tracker = LsmTracker::new();
tracker.hook(true);
tracker.hook(false);
let rate = tracker.allow_rate();
assert!((rate - 50.0).abs() < 0.1);
}
#[test]
fn f_lsm_010_default_rate() {
let tracker = LsmTracker::new();
assert_eq!(tracker.allow_rate(), 100.0);
}
#[test]
fn f_lsm_011_reset() {
let mut tracker = LsmTracker::new();
tracker.hook(true);
tracker.reset();
assert_eq!(tracker.hooks, 0);
}
#[test]
fn f_lsm_012_clone() {
let mut tracker = LsmTracker::new();
tracker.hook(true);
let cloned = tracker.clone();
assert_eq!(tracker.hooks, cloned.hooks);
}
}
#[cfg(test)]
mod audit_tests {
use super::*;
#[test]
fn f_audit_001_new() {
let tracker = AuditTracker::new();
assert_eq!(tracker.records, 0);
}
#[test]
fn f_audit_002_default() {
let tracker = AuditTracker::default();
assert_eq!(tracker.written, 0);
}
#[test]
fn f_audit_003_factory() {
let tracker = AuditTracker::for_auditd(10);
assert_eq!(tracker.rules, 10);
}
#[test]
fn f_audit_004_generate() {
let mut tracker = AuditTracker::new();
tracker.generate();
assert_eq!(tracker.records, 1);
assert_eq!(tracker.backlog, 1);
}
#[test]
fn f_audit_005_write() {
let mut tracker = AuditTracker::new();
tracker.generate();
tracker.write();
assert_eq!(tracker.written, 1);
assert_eq!(tracker.backlog, 0);
}
#[test]
fn f_audit_006_drop() {
let mut tracker = AuditTracker::new();
tracker.generate();
tracker.drop_record();
assert_eq!(tracker.dropped, 1);
assert_eq!(tracker.backlog, 0);
}
#[test]
fn f_audit_007_add_rule() {
let mut tracker = AuditTracker::new();
tracker.add_rule();
assert_eq!(tracker.rules, 1);
}
#[test]
fn f_audit_008_peak() {
let mut tracker = AuditTracker::new();
tracker.generate();
tracker.generate();
tracker.write();
assert_eq!(tracker.peak_backlog, 2);
}
#[test]
fn f_audit_009_drop_rate() {
let mut tracker = AuditTracker::new();
tracker.generate();
tracker.drop_record();
let rate = tracker.drop_rate();
assert!((rate - 100.0).abs() < 0.1);
}
#[test]
fn f_audit_010_default_rate() {
let tracker = AuditTracker::new();
assert_eq!(tracker.drop_rate(), 0.0);
}
#[test]
fn f_audit_011_reset() {
let mut tracker = AuditTracker::for_auditd(10);
tracker.reset();
assert_eq!(tracker.rules, 0);
}
#[test]
fn f_audit_012_clone() {
let tracker = AuditTracker::for_auditd(10);
let cloned = tracker.clone();
assert_eq!(tracker.rules, cloned.rules);
}
}
#[cfg(test)]
mod integrity_tests {
use super::*;
#[test]
fn f_integrity_001_new() {
let tracker = IntegrityTracker::new();
assert_eq!(tracker.measurements, 0);
}
#[test]
fn f_integrity_002_default() {
let tracker = IntegrityTracker::default();
assert_eq!(tracker.verified, 0);
}
#[test]
fn f_integrity_003_factory() {
let tracker = IntegrityTracker::for_ima();
assert_eq!(tracker.measurements, 0);
}
#[test]
fn f_integrity_004_measure() {
let mut tracker = IntegrityTracker::new();
tracker.measure();
assert_eq!(tracker.measurements, 1);
}
#[test]
fn f_integrity_005_verify_success() {
let mut tracker = IntegrityTracker::new();
tracker.verify(true);
assert_eq!(tracker.verified, 1);
}
#[test]
fn f_integrity_006_verify_fail() {
let mut tracker = IntegrityTracker::new();
tracker.verify(false);
assert_eq!(tracker.failed, 1);
}
#[test]
fn f_integrity_007_appraise() {
let mut tracker = IntegrityTracker::new();
tracker.appraise();
assert_eq!(tracker.appraisals, 1);
}
#[test]
fn f_integrity_008_signature() {
let mut tracker = IntegrityTracker::new();
tracker.validate_sig();
assert_eq!(tracker.signatures, 1);
}
#[test]
fn f_integrity_009_violation() {
let mut tracker = IntegrityTracker::new();
tracker.violation();
assert_eq!(tracker.violations, 1);
}
#[test]
fn f_integrity_010_success_rate() {
let mut tracker = IntegrityTracker::new();
tracker.verify(true);
tracker.verify(false);
let rate = tracker.success_rate();
assert!((rate - 50.0).abs() < 0.1);
}
#[test]
fn f_integrity_011_reset() {
let mut tracker = IntegrityTracker::new();
tracker.measure();
tracker.reset();
assert_eq!(tracker.measurements, 0);
}
#[test]
fn f_integrity_012_clone() {
let mut tracker = IntegrityTracker::new();
tracker.measure();
let cloned = tracker.clone();
assert_eq!(tracker.measurements, cloned.measurements);
}
}