use std::sync::atomic::Ordering::Relaxed;
use std::sync::atomic::{AtomicU64, AtomicUsize};
#[derive(Debug, Default)]
pub struct Metrics {
pub(crate) table_file_opened_uncached: AtomicUsize,
pub(crate) table_file_opened_cached: AtomicUsize,
pub(crate) index_block_load_io: AtomicUsize,
pub(crate) filter_block_load_io: AtomicUsize,
pub(crate) data_block_load_io: AtomicUsize,
pub(crate) index_block_load_cached: AtomicUsize,
pub(crate) filter_block_load_cached: AtomicUsize,
pub(crate) data_block_load_cached: AtomicUsize,
pub(crate) range_tombstone_block_load_io: AtomicUsize,
pub(crate) range_tombstone_block_load_cached: AtomicUsize,
pub(crate) filter_queries: AtomicUsize,
pub(crate) io_skipped_by_filter: AtomicUsize,
pub(crate) prefix_bloom_skips: AtomicUsize,
pub(crate) data_block_io_requested: AtomicU64,
pub(crate) index_block_io_requested: AtomicU64,
pub(crate) filter_block_io_requested: AtomicU64,
pub(crate) range_tombstone_block_io_requested: AtomicU64,
}
#[expect(
clippy::cast_precision_loss,
reason = "metrics can accept precision loss"
)]
impl Metrics {
pub fn table_file_cache_hit_rate(&self) -> f64 {
let uncached = self.table_file_opened_uncached.load(Relaxed) as f64;
let cached = self.table_file_opened_cached.load(Relaxed) as f64;
if cached + uncached == 0.0 {
1.0
} else {
cached / (cached + uncached)
}
}
pub fn data_block_io(&self) -> u64 {
self.data_block_io_requested.load(Relaxed)
}
pub fn index_block_io(&self) -> u64 {
self.index_block_io_requested.load(Relaxed)
}
pub fn filter_block_io(&self) -> u64 {
self.filter_block_io_requested.load(Relaxed)
}
pub fn range_tombstone_block_io(&self) -> u64 {
self.range_tombstone_block_io_requested.load(Relaxed)
}
pub fn block_io(&self) -> u64 {
self.data_block_io_requested.load(Relaxed)
+ self.index_block_io_requested.load(Relaxed)
+ self.filter_block_io_requested.load(Relaxed)
+ self.range_tombstone_block_io_requested.load(Relaxed)
}
pub fn data_block_load_count(&self) -> usize {
self.data_block_load_cached.load(Relaxed) + self.data_block_load_io.load(Relaxed)
}
pub fn index_block_load_count(&self) -> usize {
self.index_block_load_cached.load(Relaxed) + self.index_block_load_io.load(Relaxed)
}
pub fn filter_block_load_count(&self) -> usize {
self.filter_block_load_cached.load(Relaxed) + self.filter_block_load_io.load(Relaxed)
}
pub fn range_tombstone_block_load_count(&self) -> usize {
self.range_tombstone_block_load_cached.load(Relaxed)
+ self.range_tombstone_block_load_io.load(Relaxed)
}
pub fn block_load_io_count(&self) -> usize {
self.data_block_load_io.load(Relaxed)
+ self.index_block_load_io.load(Relaxed)
+ self.filter_block_load_io.load(Relaxed)
+ self.range_tombstone_block_load_io.load(Relaxed)
}
pub fn data_block_load_cached_count(&self) -> usize {
self.data_block_load_cached.load(Relaxed)
}
pub fn index_block_load_cached_count(&self) -> usize {
self.index_block_load_cached.load(Relaxed)
}
pub fn filter_block_load_cached_count(&self) -> usize {
self.filter_block_load_cached.load(Relaxed)
}
pub fn range_tombstone_block_load_cached_count(&self) -> usize {
self.range_tombstone_block_load_cached.load(Relaxed)
}
pub fn block_load_cached_count(&self) -> usize {
self.data_block_load_cached.load(Relaxed)
+ self.index_block_load_cached.load(Relaxed)
+ self.filter_block_load_cached.load(Relaxed)
+ self.range_tombstone_block_load_cached.load(Relaxed)
}
pub fn block_loads(&self) -> usize {
self.block_load_io_count() + self.block_load_cached_count()
}
pub fn data_block_cache_hit_rate(&self) -> f64 {
let queries = self.data_block_load_count() as f64;
let hits = self.data_block_load_cached_count() as f64;
if queries == 0.0 { 1.0 } else { hits / queries }
}
pub fn filter_block_cache_hit_rate(&self) -> f64 {
let queries = self.filter_block_load_count() as f64;
let hits = self.filter_block_load_cached_count() as f64;
if queries == 0.0 { 1.0 } else { hits / queries }
}
pub fn index_block_cache_hit_rate(&self) -> f64 {
let queries = self.index_block_load_count() as f64;
let hits = self.index_block_load_cached_count() as f64;
if queries == 0.0 { 1.0 } else { hits / queries }
}
pub fn range_tombstone_block_cache_hit_rate(&self) -> f64 {
let queries = self.range_tombstone_block_load_count() as f64;
let hits = self.range_tombstone_block_load_cached_count() as f64;
if queries == 0.0 { 1.0 } else { hits / queries }
}
pub fn block_cache_hit_rate(&self) -> f64 {
let queries = self.block_loads() as f64;
let hits = self.block_load_cached_count() as f64;
if queries == 0.0 { 1.0 } else { hits / queries }
}
pub fn filter_efficiency(&self) -> f64 {
let queries = self.filter_queries.load(Relaxed) as f64;
let io_skipped = self.io_skipped_by_filter.load(Relaxed) as f64;
if queries == 0.0 {
1.0
} else {
io_skipped / queries
}
}
pub fn filter_queries(&self) -> usize {
self.filter_queries.load(Relaxed)
}
pub fn io_skipped_by_filter(&self) -> usize {
self.io_skipped_by_filter.load(Relaxed)
}
pub fn prefix_bloom_skips(&self) -> usize {
self.prefix_bloom_skips.load(Relaxed)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::atomic::Ordering::Relaxed;
#[test]
fn range_tombstone_counters_default_zero() {
let m = Metrics::default();
assert_eq!(0, m.range_tombstone_block_load_count());
assert_eq!(0, m.range_tombstone_block_load_cached_count());
assert_eq!(0, m.range_tombstone_block_io());
}
#[test]
fn range_tombstone_block_load_count_sums_cached_and_io() {
let m = Metrics::default();
m.range_tombstone_block_load_cached.store(3, Relaxed);
m.range_tombstone_block_load_io.store(7, Relaxed);
assert_eq!(10, m.range_tombstone_block_load_count());
}
#[test]
fn range_tombstone_cache_hit_rate_no_loads_returns_one() {
let m = Metrics::default();
assert!((m.range_tombstone_block_cache_hit_rate() - 1.0).abs() < f64::EPSILON);
}
#[test]
fn range_tombstone_cache_hit_rate_mixed_loads() {
let m = Metrics::default();
m.range_tombstone_block_load_cached.store(3, Relaxed);
m.range_tombstone_block_load_io.store(1, Relaxed);
assert!((m.range_tombstone_block_cache_hit_rate() - 0.75).abs() < f64::EPSILON);
}
#[test]
fn zero_query_cache_hit_rates_return_one() {
let m = Metrics::default();
assert!((m.data_block_cache_hit_rate() - 1.0).abs() < f64::EPSILON);
assert!((m.filter_block_cache_hit_rate() - 1.0).abs() < f64::EPSILON);
assert!((m.index_block_cache_hit_rate() - 1.0).abs() < f64::EPSILON);
}
#[test]
fn block_io_includes_range_tombstone() {
let m = Metrics::default();
m.data_block_io_requested.store(10, Relaxed);
m.index_block_io_requested.store(20, Relaxed);
m.filter_block_io_requested.store(30, Relaxed);
m.range_tombstone_block_io_requested.store(40, Relaxed);
assert_eq!(100, m.block_io());
}
#[test]
fn block_load_io_count_includes_range_tombstone() {
let m = Metrics::default();
m.data_block_load_io.store(1, Relaxed);
m.index_block_load_io.store(2, Relaxed);
m.filter_block_load_io.store(3, Relaxed);
m.range_tombstone_block_load_io.store(4, Relaxed);
assert_eq!(10, m.block_load_io_count());
}
#[test]
fn block_load_cached_count_includes_range_tombstone() {
let m = Metrics::default();
m.data_block_load_cached.store(5, Relaxed);
m.index_block_load_cached.store(6, Relaxed);
m.filter_block_load_cached.store(7, Relaxed);
m.range_tombstone_block_load_cached.store(8, Relaxed);
assert_eq!(26, m.block_load_cached_count());
}
}