use bench_support as common;
use bench_support::for_each_policy;
use std::time::Instant;
use common::metrics::{
BenchmarkConfig, measure_adaptation_speed, measure_scan_resistance, run_benchmark,
standard_workload_suite,
};
use common::operation::{ReadThrough, run_operations};
use common::registry::STANDARD_WORKLOADS;
use common::workload::WorkloadSpec;
use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main};
const CAPACITY: usize = 4096;
const UNIVERSE: u64 = 16_384;
const OPS: usize = 200_000;
const SEED: u64 = 42;
fn bench_hit_rates(c: &mut Criterion) {
let mut group = c.benchmark_group("hit_rate");
group.throughput(Throughput::Elements(OPS as u64));
for workload_case in STANDARD_WORKLOADS {
let workload = workload_case.workload;
let workload_id = workload_case.id;
for_each_policy! {
with |policy_id, _display_name, make_cache| {
group.bench_with_input(
BenchmarkId::new(policy_id, workload_id),
&workload,
|b, &wl| {
b.iter_custom(|iters| {
let mut total = std::time::Duration::default();
for _ in 0..iters {
let mut cache = make_cache(CAPACITY);
let mut generator = WorkloadSpec {
universe: UNIVERSE,
workload: wl,
seed: SEED,
}
.generator();
let mut op_model = ReadThrough::new(1.0, SEED);
let start = Instant::now();
let _ = run_operations(
&mut cache,
&mut generator,
OPS,
&mut op_model,
Arc::new,
);
total += start.elapsed();
}
total
});
},
);
}
}
}
group.finish();
}
fn bench_scan_resistance(c: &mut Criterion) {
let mut group = c.benchmark_group("scan_resistance");
for_each_policy! {
with |policy_id, _display_name, make_cache| {
group.bench_function(policy_id, |b| {
b.iter_custom(|iters| {
let mut total = std::time::Duration::default();
for _ in 0..iters {
let mut cache = make_cache(CAPACITY);
let start = Instant::now();
let _ = measure_scan_resistance(&mut cache, CAPACITY, UNIVERSE, Arc::new);
total += start.elapsed();
}
total
});
});
}
}
group.finish();
}
fn bench_adaptation_speed(c: &mut Criterion) {
let mut group = c.benchmark_group("adaptation_speed");
for_each_policy! {
with |policy_id, _display_name, make_cache| {
group.bench_function(policy_id, |b| {
b.iter_custom(|iters| {
let mut total = std::time::Duration::default();
for _ in 0..iters {
let mut cache = make_cache(CAPACITY);
let start = Instant::now();
let _ = measure_adaptation_speed(&mut cache, CAPACITY, UNIVERSE, Arc::new);
total += start.elapsed();
}
total
});
});
}
}
group.finish();
}
fn bench_comprehensive(c: &mut Criterion) {
let mut group = c.benchmark_group("comprehensive");
let suite = standard_workload_suite(UNIVERSE, SEED);
for (workload_name, spec) in &suite {
let config = BenchmarkConfig {
name: workload_name.to_string(),
capacity: CAPACITY,
operations: OPS,
warmup_ops: CAPACITY,
workload: *spec,
latency_sample_rate: 100,
max_latency_samples: 10_000,
};
for_each_policy! {
with |policy_id, _display_name, make_cache| {
group.bench_with_input(
BenchmarkId::new(policy_id, workload_name),
&config,
|b, cfg| {
b.iter_custom(|iters| {
let mut total = std::time::Duration::default();
for _ in 0..iters {
let mut cache = make_cache(CAPACITY);
let start = Instant::now();
let _ = run_benchmark(policy_id, &mut cache, cfg, Arc::new);
total += start.elapsed();
}
total
});
},
);
}
}
}
group.finish();
}
criterion_group!(
benches,
bench_hit_rates,
bench_scan_resistance,
bench_adaptation_speed,
bench_comprehensive,
);
criterion_main!(benches);