use arrow::{
array::{Float32Array, Float64Array},
datatypes::{DataType, Field, Schema},
record_batch::RecordBatch,
};
use criterion::{criterion_group, criterion_main, Criterion};
use datafusion::from_slice::FromSlice;
use datafusion::prelude::ExecutionContext;
use datafusion::{datasource::MemTable, error::Result};
use futures::executor::block_on;
use std::sync::Arc;
use tokio::runtime::Runtime;
async fn query(ctx: &mut ExecutionContext, sql: &str) {
let rt = Runtime::new().unwrap();
let df = rt.block_on(ctx.sql(sql)).unwrap();
criterion::black_box(rt.block_on(df.collect()).unwrap());
}
fn create_context(array_len: usize, batch_size: usize) -> Result<ExecutionContext> {
let schema = Arc::new(Schema::new(vec![
Field::new("f32", DataType::Float32, false),
Field::new("f64", DataType::Float64, false),
]));
let batches = (0..array_len / batch_size)
.map(|i| {
RecordBatch::try_new(
schema.clone(),
vec![
Arc::new(Float32Array::from_slice(&vec![i as f32; batch_size])),
Arc::new(Float64Array::from_slice(&vec![i as f64; batch_size])),
],
)
.unwrap()
})
.collect::<Vec<_>>();
let mut ctx = ExecutionContext::new();
let provider = MemTable::try_new(schema, vec![batches])?;
ctx.register_table("t", Arc::new(provider))?;
Ok(ctx)
}
fn criterion_benchmark(c: &mut Criterion) {
let array_len = 524_288; let batch_size = 4096;
c.bench_function("filter_array", |b| {
let mut ctx = create_context(array_len, batch_size).unwrap();
b.iter(|| block_on(query(&mut ctx, "select f32, f64 from t where f32 >= f64")))
});
c.bench_function("filter_scalar", |b| {
let mut ctx = create_context(array_len, batch_size).unwrap();
b.iter(|| {
block_on(query(
&mut ctx,
"select f32, f64 from t where f32 >= 250 and f64 > 250",
))
})
});
c.bench_function("filter_scalar in list", |b| {
let mut ctx = create_context(array_len, batch_size).unwrap();
b.iter(|| {
block_on(query(
&mut ctx,
"select f32, f64 from t where f32 in (10, 20, 30, 40)",
))
})
});
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);