use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use std::time::Instant;
use tokio::runtime::Runtime as TokioRuntime;
use ringkernel_core::hlc::HlcTimestamp;
use ringkernel_core::message::{CorrelationId, MessageEnvelope, MessageHeader, Priority};
use ringkernel_core::queue::{MessageQueue, SpscQueue};
use ringkernel_cpu::CpuRuntime;
fn bench_envelope_creation(c: &mut Criterion) {
let mut group = c.benchmark_group("latency/envelope_creation");
group.bench_function("header_only", |b| {
b.iter(|| {
let header = MessageHeader::new(1, 0, 1, 256, HlcTimestamp::now(1));
black_box(header);
});
});
for payload_size in [64, 256, 1024, 4096].iter() {
group.throughput(Throughput::Bytes(*payload_size as u64));
group.bench_with_input(
BenchmarkId::new("with_payload", payload_size),
payload_size,
|b, &size| {
let payload = vec![0u8; size];
b.iter(|| {
let header = MessageHeader::new(1, 0, 1, size, HlcTimestamp::now(1));
let envelope = MessageEnvelope {
header,
payload: payload.clone(),
};
black_box(envelope);
});
},
);
}
group.finish();
}
fn bench_queue_latency(c: &mut Criterion) {
let mut group = c.benchmark_group("latency/queue");
group.sample_size(200);
for payload_size in [64, 256, 1024, 4096].iter() {
group.throughput(Throughput::Bytes(*payload_size as u64));
group.bench_with_input(
BenchmarkId::new("single_roundtrip", payload_size),
payload_size,
|b, &size| {
let queue = SpscQueue::new(1024);
let envelope = MessageEnvelope {
header: MessageHeader::new(1, 0, 1, size, HlcTimestamp::now(1)),
payload: vec![0u8; size],
};
b.iter(|| {
queue.try_enqueue(envelope.clone()).unwrap();
let msg = queue.try_dequeue().unwrap();
black_box(msg);
});
},
);
}
group.finish();
}
fn bench_end_to_end_latency(c: &mut Criterion) {
let mut group = c.benchmark_group("latency/end_to_end");
group.sample_size(200);
for payload_size in [64, 256, 1024, 4096].iter() {
group.throughput(Throughput::Bytes(*payload_size as u64));
group.bench_with_input(
BenchmarkId::new("serialize_queue_deserialize", payload_size),
payload_size,
|b, &size| {
let queue = SpscQueue::new(1024);
let payload = vec![42u8; size];
b.iter(|| {
let envelope = MessageEnvelope {
header: MessageHeader::new(1, 0, 1, size, HlcTimestamp::now(1)),
payload: payload.clone(),
};
queue.try_enqueue(envelope).unwrap();
let received = queue.try_dequeue().unwrap();
assert!(received.header.validate());
black_box(received);
});
},
);
}
group.finish();
}
fn bench_latency_percentiles(c: &mut Criterion) {
let mut group = c.benchmark_group("latency/percentiles");
group.sample_size(500);
group.bench_function("message_roundtrip_256b", |b| {
let queue = SpscQueue::new(1024);
let envelope = MessageEnvelope {
header: MessageHeader::new(1, 0, 1, 256, HlcTimestamp::now(1)),
payload: vec![0u8; 256],
};
b.iter_custom(|iters| {
let start = Instant::now();
for _ in 0..iters {
queue.try_enqueue(envelope.clone()).unwrap();
let msg = queue.try_dequeue().unwrap();
black_box(msg);
}
start.elapsed()
});
});
group.finish();
}
fn bench_burst_latency(c: &mut Criterion) {
let mut group = c.benchmark_group("latency/burst");
for burst_size in [10, 100, 1000].iter() {
group.bench_with_input(
BenchmarkId::new("messages", burst_size),
burst_size,
|b, &burst| {
let queue = SpscQueue::new(2048);
let envelope = MessageEnvelope {
header: MessageHeader::new(1, 0, 1, 256, HlcTimestamp::now(1)),
payload: vec![0u8; 256],
};
b.iter(|| {
for _ in 0..burst {
queue.try_enqueue(envelope.clone()).unwrap();
}
for _ in 0..burst {
let msg = queue.try_dequeue().unwrap();
black_box(msg);
}
});
},
);
}
group.finish();
}
fn bench_latency_validation(c: &mut Criterion) {
let mut group = c.benchmark_group("latency_validation");
group.sample_size(500);
group.bench_function("target_500us_message_roundtrip", |b| {
let queue = SpscQueue::new(1024);
let envelope = MessageEnvelope {
header: MessageHeader::new(1, 0, 1, 256, HlcTimestamp::now(1)),
payload: vec![0u8; 256],
};
b.iter_custom(|iters| {
let start = Instant::now();
for _ in 0..iters {
queue.try_enqueue(envelope.clone()).unwrap();
let msg = queue.try_dequeue().unwrap();
black_box(msg);
}
start.elapsed()
});
});
group.bench_function("target_500us_simulated_kernel_message", |b| {
let rt = TokioRuntime::new().unwrap();
let _runtime = rt.block_on(CpuRuntime::new()).unwrap();
let queue = SpscQueue::new(1024);
b.iter_custom(|iters| {
let start = Instant::now();
for _i in 0..iters {
let timestamp = HlcTimestamp::now(1);
let envelope = MessageEnvelope {
header: MessageHeader::new(1, 0, 1, 256, timestamp),
payload: vec![0u8; 256],
};
queue.try_enqueue(envelope).unwrap();
let received = queue.try_dequeue().unwrap();
assert!(received.header.validate());
black_box(received);
}
start.elapsed()
});
});
group.finish();
}
fn bench_header_operations(c: &mut Criterion) {
let mut group = c.benchmark_group("latency/header_ops");
group.bench_function("create_header", |b| {
b.iter(|| {
let header = MessageHeader::new(42, 0, 1, 1024, HlcTimestamp::now(1));
black_box(header);
});
});
group.bench_function("header_with_correlation", |b| {
b.iter(|| {
let header = MessageHeader::new(42, 0, 1, 1024, HlcTimestamp::now(1))
.with_correlation(CorrelationId::generate());
black_box(header);
});
});
group.bench_function("header_with_priority", |b| {
b.iter(|| {
let header = MessageHeader::new(42, 0, 1, 1024, HlcTimestamp::now(1))
.with_priority(Priority::High);
black_box(header);
});
});
group.bench_function("header_full_config", |b| {
b.iter(|| {
let header = MessageHeader::new(42, 0, 1, 1024, HlcTimestamp::now(1))
.with_correlation(CorrelationId::generate())
.with_priority(Priority::Critical)
.with_deadline(HlcTimestamp::now(1));
black_box(header);
});
});
group.bench_function("header_validate", |b| {
let header = MessageHeader::new(42, 0, 1, 1024, HlcTimestamp::now(1));
b.iter(|| {
black_box(header.validate());
});
});
group.finish();
}
criterion_group!(
benches,
bench_envelope_creation,
bench_queue_latency,
bench_end_to_end_latency,
bench_latency_percentiles,
bench_burst_latency,
bench_latency_validation,
bench_header_operations,
);
criterion_main!(benches);