use criterion::{BenchmarkId, Criterion, Throughput, black_box, criterion_group, criterion_main};
use r2d2::Pool as R2d2Pool;
use r2d2_sqlite::SqliteConnectionManager;
use rusqlite::Connection;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
mod bench_utils;
use bench_utils::create_benchmark_temp_dir;
const WARM_UP_TIME: Duration = Duration::from_secs(2);
const MEASURE_TIME: Duration = Duration::from_secs(5);
type SharedPool = Arc<R2d2Pool<SqliteConnectionManager>>;
fn setup_test_db(db_path: &std::path::Path) {
let conn = Connection::open(db_path).unwrap();
conn.execute(
"CREATE TABLE IF NOT EXISTS test (id INTEGER PRIMARY KEY, value TEXT)",
[],
)
.unwrap();
for i in 0..100 {
conn.execute(
"INSERT INTO test (value) VALUES (?)",
[format!("value_{}", i)],
)
.unwrap();
}
}
fn create_shared_pool(db_path: &std::path::Path, max_size: u32) -> SharedPool {
let manager = SqliteConnectionManager::file(db_path);
let pool = R2d2Pool::builder()
.max_size(max_size)
.build(manager)
.unwrap();
Arc::new(pool)
}
fn bench_checkout_latency(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("pool_checkout_latency");
group.warm_up_time(WARM_UP_TIME);
group.measurement_time(MEASURE_TIME);
let temp_dir = create_benchmark_temp_dir();
let db_path = temp_dir.path().join("benchmark.db");
setup_test_db(&db_path);
group.bench_function("direct_open", |b| {
b.iter(|| {
let conn = Connection::open(&db_path).unwrap();
black_box(&conn);
drop(conn);
})
});
group.bench_function("pool_checkout_first", |b| {
b.iter(|| {
let temp_dir = create_benchmark_temp_dir();
let db_path = temp_dir.path().join("first.db");
setup_test_db(&db_path);
let pool = create_shared_pool(&db_path, 5);
let conn = pool.get().unwrap();
black_box(&conn);
drop(conn);
})
});
group.bench_function("pool_checkout_warm", |b| {
let temp_dir = create_benchmark_temp_dir();
let db_path = temp_dir.path().join("warm.db");
setup_test_db(&db_path);
let pool = create_shared_pool(&db_path, 5);
{
let _conn = pool.get().unwrap();
let _conn2 = pool.get().unwrap();
}
b.iter(|| {
let conn = pool.get().unwrap();
black_box(&conn);
drop(conn);
})
});
group.finish();
}
fn bench_concurrent_access(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("pool_concurrent_access");
group.warm_up_time(WARM_UP_TIME);
group.measurement_time(MEASURE_TIME);
for thread_count in [1, 2, 4, 8].iter() {
group.bench_with_input(
BenchmarkId::from_parameter(thread_count),
thread_count,
|b, &thread_count| {
b.iter_batched(
|| {
let temp_dir = create_benchmark_temp_dir();
let db_path = temp_dir.path().join("concurrent.db");
setup_test_db(&db_path);
let pool = create_shared_pool(&db_path, 10);
(pool, temp_dir)
},
|(pool, _temp_dir)| {
let mut handles = Vec::new();
for _ in 0..thread_count {
let pool_clone = Arc::clone(&pool);
handles.push(thread::spawn(move || {
for _ in 0..10 {
let conn = pool_clone.get().unwrap();
let _: Vec<i64> = conn
.prepare("SELECT id FROM test LIMIT 1")
.unwrap()
.query_map([], |row| row.get(0))
.unwrap()
.collect::<Result<_, _>>()
.unwrap();
drop(conn);
}
}));
}
for handle in handles {
handle.join().unwrap();
}
},
criterion::BatchSize::LargeInput,
);
},
);
}
group.finish();
}
fn bench_query_throughput(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("pool_query_throughput");
group.warm_up_time(WARM_UP_TIME);
group.measurement_time(MEASURE_TIME);
for query_count in [100, 500, 1000].iter() {
group.throughput(Throughput::Elements(*query_count as u64));
group.bench_with_input(
BenchmarkId::new("pooled", query_count),
query_count,
|b, &query_count| {
b.iter_batched(
|| {
let temp_dir = create_benchmark_temp_dir();
let db_path = temp_dir.path().join("throughput.db");
setup_test_db(&db_path);
let pool = create_shared_pool(&db_path, 5);
(pool, temp_dir)
},
|(pool, _temp_dir)| {
for _ in 0..query_count {
let conn = pool.get().unwrap();
let _: Vec<i64> = conn
.prepare("SELECT id FROM test LIMIT 1")
.unwrap()
.query_map([], |row| row.get(0))
.unwrap()
.collect::<Result<_, _>>()
.unwrap();
drop(conn);
}
},
criterion::BatchSize::LargeInput,
);
},
);
group.throughput(Throughput::Elements(*query_count as u64));
group.bench_with_input(
BenchmarkId::new("direct", query_count),
query_count,
|b, &query_count| {
b.iter_batched(
|| {
let temp_dir = create_benchmark_temp_dir();
let db_path = temp_dir.path().join("throughput.db");
setup_test_db(&db_path);
(db_path, temp_dir)
},
|(db_path, _temp_dir)| {
for _ in 0..query_count {
let conn = Connection::open(&db_path).unwrap();
let _: Vec<i64> = conn
.prepare("SELECT id FROM test LIMIT 1")
.unwrap()
.query_map([], |row| row.get(0))
.unwrap()
.collect::<Result<_, _>>()
.unwrap();
drop(conn);
}
},
criterion::BatchSize::LargeInput,
);
},
);
}
group.finish();
}
fn bench_pool_sizes(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("pool_size_comparison");
group.warm_up_time(WARM_UP_TIME);
group.measurement_time(MEASURE_TIME);
let concurrent_threads = 8;
for pool_size in [1, 2, 5, 10, 20].iter() {
group.bench_with_input(
BenchmarkId::from_parameter(pool_size),
pool_size,
|b, &pool_size| {
b.iter_batched(
|| {
let temp_dir = create_benchmark_temp_dir();
let db_path = temp_dir.path().join("size_test.db");
setup_test_db(&db_path);
let pool = create_shared_pool(&db_path, pool_size);
(pool, temp_dir)
},
|(pool, _temp_dir)| {
let mut handles = Vec::new();
for _ in 0..concurrent_threads {
let pool_clone = Arc::clone(&pool);
handles.push(thread::spawn(move || {
for _ in 0..50 {
let conn = pool_clone.get().unwrap();
let _: Vec<String> = conn
.prepare("SELECT value FROM test WHERE id = ?")
.unwrap()
.query_map([1], |row| row.get(0))
.unwrap()
.collect::<Result<_, _>>()
.unwrap();
drop(conn);
}
}));
}
for handle in handles {
handle.join().unwrap();
}
},
criterion::BatchSize::LargeInput,
);
},
);
}
group.finish();
}
criterion_group!(
benches,
bench_checkout_latency,
bench_concurrent_access,
bench_query_throughput,
bench_pool_sizes
);
criterion_main!(benches);