use std::sync::Arc;
use aisle::PruneRequest;
use arrow_array::{RecordBatch, StringArray};
use arrow_schema::{DataType, Field, Schema};
use bytes::Bytes;
use datafusion_expr::{Expr, col, expr::Like, lit};
use parquet::{
arrow::ArrowWriter,
file::{
metadata::{PageIndexPolicy, ParquetMetaData, ParquetMetaDataReader},
properties::{EnabledStatistics, WriterProperties},
},
};
fn make_batch(schema: &Schema, values: &[&str]) -> RecordBatch {
let array = StringArray::from(values.to_vec());
RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(array)]).unwrap()
}
fn write_parquet(batches: &[RecordBatch], props: WriterProperties) -> Vec<u8> {
let mut buffer: Vec<u8> = Vec::new();
let schema = batches[0].schema();
let mut writer = ArrowWriter::try_new(&mut buffer, schema, Some(props)).unwrap();
for batch in batches {
writer.write(batch).unwrap();
}
writer.close().unwrap();
buffer
}
fn load_metadata(bytes: &[u8]) -> ParquetMetaData {
let bytes = Bytes::copy_from_slice(bytes);
ParquetMetaDataReader::new()
.with_page_index_policy(PageIndexPolicy::Skip)
.parse_and_finish(&bytes)
.unwrap()
}
#[test]
fn prunes_row_groups_with_like_prefix() {
let schema = Schema::new(vec![Field::new("s", DataType::Utf8, false)]);
let batch1 = make_batch(&schema, &["foo1", "foo2"]);
let batch2 = make_batch(&schema, &["bar1", "baz"]);
let props = WriterProperties::builder()
.set_statistics_enabled(EnabledStatistics::Chunk)
.set_max_row_group_size(2)
.build();
let bytes = write_parquet(&[batch1, batch2], props);
let metadata = load_metadata(&bytes);
let expr = Expr::Like(Like::new(
false,
Box::new(col("s")),
Box::new(lit("foo%")),
None,
false,
));
let result = PruneRequest::new(&metadata, &schema)
.with_predicate(&expr)
.enable_page_index(false)
.emit_roaring(false)
.prune();
assert_eq!(result.row_groups(), &[0]);
}