use criterion::{BenchmarkId, Criterion, Throughput, black_box, criterion_group, criterion_main};
use sqlitegraph::{GraphEdgeCreate, GraphEntityCreate, SqliteGraph};
use std::sync::Arc;
use std::time::Duration;
fn insert_entity(
graph: &SqliteGraph,
create: GraphEntityCreate,
) -> Result<i64, sqlitegraph::SqliteGraphError> {
let entity = sqlitegraph::GraphEntity {
id: 0,
kind: create.kind,
name: create.name,
file_path: create.file_path,
data: create.data,
};
graph.insert_entity(&entity)
}
fn insert_edge(
graph: &SqliteGraph,
create: GraphEdgeCreate,
) -> Result<i64, sqlitegraph::SqliteGraphError> {
let edge = sqlitegraph::GraphEdge {
id: 0,
from_id: create.from_id,
to_id: create.to_id,
edge_type: create.edge_type,
data: create.data,
};
graph.insert_edge(&edge)
}
fn warm_cache(graph: &SqliteGraph) -> Result<(), sqlitegraph::SqliteGraphError> {
let entity_ids = graph.list_entity_ids()?;
for &id in &entity_ids {
let _ = graph.query().outgoing(id);
let _ = graph.query().incoming(id);
}
Ok(())
}
fn create_benchmark_graph(size: usize) -> SqliteGraph {
let graph = SqliteGraph::open_in_memory().expect("Failed to create graph");
for i in 0..size {
let entity = GraphEntityCreate {
kind: "bench".to_string(),
name: format!("bench_node_{}", i),
file_path: Some(format!("bench_{}.rs", i)),
data: serde_json::json!({"index": i}),
};
insert_entity(&graph, entity).expect("Failed to insert entity");
}
let ids: Vec<i64> = graph.list_entity_ids().expect("Failed to get IDs");
for (i, &id) in ids.iter().enumerate() {
for j in 1..=2 {
let target_idx = (i + j) % ids.len();
let edge = GraphEdgeCreate {
from_id: id,
to_id: ids[target_idx],
edge_type: "connects".to_string(),
data: serde_json::json!({}),
};
insert_edge(&graph, edge).expect("Failed to insert edge");
}
}
graph
}
fn bench_snapshot_acquisition(c: &mut Criterion) {
let mut group = c.benchmark_group("snapshot_acquisition");
for size in [100, 1_000, 5_000, 10_000].iter() {
let graph = create_benchmark_graph(*size);
warm_cache(&graph).expect("Failed to warm cache");
group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, _| {
b.iter(|| {
let snapshot = black_box(graph.acquire_snapshot().unwrap());
black_box(snapshot.node_count())
})
});
}
group.finish();
}
fn bench_snapshot_clone(c: &mut Criterion) {
let mut group = c.benchmark_group("snapshot_clone");
let graph = create_benchmark_graph(1_000);
warm_cache(&graph).expect("Failed to warm cache");
let snapshot = Arc::new(graph.acquire_snapshot().unwrap());
group.bench_function("arc_clone_1000", |b| {
b.iter(|| {
let _clone = black_box(Arc::clone(&snapshot));
})
});
group.finish();
}
fn bench_snapshot_iteration(c: &mut Criterion) {
let mut group = c.benchmark_group("snapshot_iteration");
for size in [100, 1_000, 10_000].iter() {
let graph = create_benchmark_graph(*size);
warm_cache(&graph).expect("Failed to warm cache");
let snapshot = graph.acquire_snapshot().unwrap();
group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, _| {
b.iter(|| {
let count = black_box(snapshot.node_count());
black_box(count)
})
});
}
group.finish();
}
fn bench_snapshot_update(c: &mut Criterion) {
let mut group = c.benchmark_group("snapshot_update");
for size in [100, 1_000, 5_000, 10_000].iter() {
let graph = create_benchmark_graph(*size);
group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, _| {
b.iter(|| {
let _snapshot = black_box(graph.acquire_snapshot().unwrap());
})
});
}
group.finish();
}
fn bench_concurrent_snapshot_acquisition(c: &mut Criterion) {
let mut group = c.benchmark_group("concurrent_snapshots");
use sqlitegraph::mvcc::SnapshotManager;
use std::collections::HashMap;
use std::sync::Barrier;
use std::sync::atomic::{AtomicU64, Ordering};
for thread_count in [1, 2, 4, 8].iter() {
group.bench_with_input(
BenchmarkId::from_parameter(thread_count),
thread_count,
|b, &num_threads| {
b.iter(|| {
let mut outgoing = HashMap::new();
let mut incoming = HashMap::new();
for i in 0..1_000 {
outgoing.insert(i, vec![]);
incoming.insert(i, vec![]);
}
let manager = Arc::new(SnapshotManager::with_state(&outgoing, &incoming));
let barrier = Arc::new(Barrier::new(num_threads));
let counter = Arc::new(AtomicU64::new(0));
let handles: Vec<_> = (0..num_threads)
.map(|_| {
let manager = manager.clone();
let barrier = barrier.clone();
let counter = counter.clone();
std::thread::spawn(move || {
barrier.wait();
for _ in 0..100 {
let snapshot = manager.acquire_snapshot();
if snapshot.node_count() > 0 {
counter.fetch_add(1, Ordering::Relaxed);
}
}
})
})
.collect();
for h in handles {
h.join().unwrap();
}
black_box(counter.load(Ordering::Relaxed))
})
},
);
}
group.finish();
}
fn bench_memory_overhead(c: &mut Criterion) {
let mut group = c.benchmark_group("memory_overhead");
group.throughput(Throughput::Elements(1));
for size in [100, 1_000, 10_000].iter() {
let graph = create_benchmark_graph(*size);
warm_cache(&graph).expect("Failed to warm cache");
group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, _| {
b.iter(|| {
let snapshot = black_box(graph.acquire_snapshot().unwrap());
let nodes = black_box(snapshot.node_count());
let edges = black_box(snapshot.edge_count());
(nodes, edges)
})
});
}
group.finish();
}
fn bench_rapid_snapshot_lifecycle(c: &mut Criterion) {
let mut group = c.benchmark_group("snapshot_lifecycle");
let graph = create_benchmark_graph(100);
warm_cache(&graph).expect("Failed to warm cache");
group.bench_function("create_drop_1000", |b| {
b.iter(|| {
for _ in 0..100 {
let snapshot = black_box(graph.acquire_snapshot().unwrap());
black_box(snapshot.node_count());
}
})
});
group.finish();
}
fn bench_snapshot_vs_direct_access(c: &mut Criterion) {
let mut group = c.benchmark_group("snapshot_vs_direct");
let graph = create_benchmark_graph(1_000);
warm_cache(&graph).expect("Failed to warm cache");
let snapshot = graph.acquire_snapshot().unwrap();
let entity_ids = graph.list_entity_ids().expect("Failed to get IDs");
group.bench_function("direct_access", |b| {
b.iter(|| {
for &id in entity_ids.iter().take(100) {
let _neighbors = black_box(graph.query().outgoing(id).unwrap());
}
})
});
group.bench_function("snapshot_access", |b| {
b.iter(|| {
for &id in entity_ids.iter().take(100) {
let _neighbors = black_box(snapshot.get_outgoing(id));
}
})
});
group.finish();
}
fn bench_snapshot_throughput(c: &mut Criterion) {
let graph = create_benchmark_graph(1_000);
warm_cache(&graph).expect("Failed to warm cache");
let mut group = c.benchmark_group("throughput");
group.sample_size(100);
group.measurement_time(Duration::from_secs(5));
group.bench_function("sustained_acquisition", |b| {
b.iter(|| {
let snapshot = black_box(graph.acquire_snapshot().unwrap());
black_box(snapshot.node_count());
})
});
group.finish();
}
criterion_group! {
name = mvcc_benchmarks;
config = Criterion::default()
.sample_size(100)
.measurement_time(Duration::from_secs(3))
.warm_up_time(Duration::from_millis(500));
targets =
bench_snapshot_acquisition,
bench_snapshot_clone,
bench_snapshot_iteration,
bench_snapshot_update,
bench_concurrent_snapshot_acquisition,
bench_memory_overhead,
bench_rapid_snapshot_lifecycle,
bench_snapshot_vs_direct_access,
bench_snapshot_throughput
}
criterion_main!(mvcc_benchmarks);