use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
use std::time::{Duration, Instant};
use alloc_counter::{count_alloc_future, AllocCounterSystem};
use criterion::async_executor::FuturesExecutor;
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use ethereum_types::H256;
use keyvaluedb::{DBTransaction, IoStatsKind, KeyValueDB};
use keyvaluedb_sqlite::{Database, DatabaseConfig};
use once_cell::sync::Lazy;
use rand::distributions::Uniform;
use rand::{Rng, RngCore, SeedableRng};
use rand_chacha::ChaCha20Rng;
static TEMPDIR: Lazy<String> =
Lazy::new(|| std::env::var("TEMPDIR").unwrap_or("./benches/_sqlite_bench_put".into()));
static SEED: Lazy<u64> = Lazy::new(|| {
std::env::var("SEED")
.map(|seed| seed.parse::<u64>().expect("invalid SEED env var"))
.unwrap_or(12345678901234567890)
});
static WRITE_SIZE: Lazy<usize> = Lazy::new(|| {
std::env::var("WRITE_SIZE")
.map(|seed| seed.parse::<usize>().expect("invalid WRITE_SIZE env var"))
.unwrap_or(140)
});
#[global_allocator]
static A: AllocCounterSystem = AllocCounterSystem;
criterion_group!(benches, put_delete);
criterion_main!(benches);
fn open_db() -> Database {
let cfg = DatabaseConfig::new().with_columns(1);
for file in [
(*TEMPDIR).to_string(),
format!("{}-journal", &*TEMPDIR),
format!("{}-wal", &*TEMPDIR),
format!("{}-shm", &*TEMPDIR),
] {
match std::fs::remove_file(&file) {
Ok(_) => {}
Err(err) if err.kind() == std::io::ErrorKind::NotFound => {}
Err(err) => panic!("{}", err),
}
}
Database::open(&*TEMPDIR, cfg).expect("sqlite works")
}
fn n_random_bytes<R: RngCore>(rng: &mut R, n: usize) -> Vec<u8> {
let variability: i64 = rng.gen_range(0..(n / 5) as i64);
let plus_or_minus: i64 = if variability % 2 == 0 { 1 } else { -1 };
let range = Uniform::from(0..u8::MAX);
rng.sample_iter(&range)
.take((n as i64 + plus_or_minus * variability) as usize)
.collect()
}
fn put_delete(c: &mut Criterion) {
let rng = ChaCha20Rng::seed_from_u64(*SEED);
let db = open_db();
{
let total_iterations = Arc::new(AtomicU64::new(0));
let total_allocs = Arc::new(AtomicU64::new(0));
c.bench_function("put and delete key", |b| {
b.to_async(FuturesExecutor).iter_custom(|iterations| {
let mut rng = rng.clone();
let db = &db;
let mut keys = Vec::with_capacity(1024);
let total_iterations = total_iterations.clone();
let total_allocs = total_allocs.clone();
async move {
total_iterations.fetch_add(iterations, Ordering::Relaxed);
let mut elapsed = Duration::new(0, 0);
let (alloc_stats, _) = count_alloc_future(async {
let start = Instant::now();
for _ in 0..(iterations * 10) {
let mut tx = DBTransaction::with_capacity(4);
if !keys.is_empty() {
let index = rng.gen_range(0..keys.len());
let key = keys.swap_remove(index);
tx.delete(0, key);
}
for _ in 0..4 {
let key = H256::random_using(&mut rng);
let value = n_random_bytes(&mut rng, *WRITE_SIZE);
tx.put(0, key, value);
keys.push(key);
}
#[allow(clippy::unit_arg)]
black_box(db.write(tx).await.unwrap());
}
elapsed = start.elapsed();
})
.await;
total_allocs.fetch_add(alloc_stats.0 as u64, Ordering::Relaxed);
elapsed
}
});
});
let total_iterations = total_iterations.load(Ordering::Relaxed);
let total_allocs = total_allocs.load(Ordering::Relaxed);
if total_iterations > 0 {
println!(
"[put and delete key] total: iterations={}, allocations={}; allocations per iter={:.2}",
total_iterations,
total_allocs,
total_allocs as f64 / total_iterations as f64
);
}
let stats = db.io_stats(IoStatsKind::Overall);
println!(
"[put and delete key] total: writes={}, deletes={}, prefix_deletes={}",
stats.writes, stats.deletes, stats.prefix_deletes,
);
println!();
println!("write size count");
println!("---------- -----");
let mut write_stats = stats.write_size_buckets.iter().collect::<Vec<_>>();
write_stats.sort_by_key(|(size, _)| *size);
for (size, count) in write_stats.iter() {
println!("{size:10} {count:5}")
}
println!();
println!("tx write size count avg duration ms");
println!("------------- ----- ---------------");
let mut tx_write_stats = stats.tx_write_size_buckets.iter().collect::<Vec<_>>();
tx_write_stats.sort_by_key(|(size, _)| *size);
for (size, (count, duration)) in tx_write_stats {
println!("{size:13} {count:5} {:15.3}", duration / 1000.)
}
}
}