use std::{
borrow::Cow, collections::HashMap, fmt::Debug, marker::PhantomData, mem::size_of,
sync::Arc, vec,
};
use super::{get_projected_output_ordering, statistics::MinMaxStatistics};
use crate::datasource::{listing::PartitionedFile, object_store::ObjectStoreUrl};
use crate::{error::Result, scalar::ScalarValue};
use arrow::array::{ArrayData, BufferBuilder};
use arrow::buffer::Buffer;
use arrow::datatypes::{ArrowNativeType, UInt16Type};
use arrow_array::{ArrayRef, DictionaryArray, RecordBatch, RecordBatchOptions};
use arrow_schema::{DataType, Field, Schema, SchemaRef};
use datafusion_common::stats::Precision;
use datafusion_common::{
exec_err, ColumnStatistics, Constraints, DataFusionError, Statistics,
};
use datafusion_physical_expr::LexOrdering;
use log::warn;
pub fn wrap_partition_type_in_dict(val_type: DataType) -> DataType {
DataType::Dictionary(Box::new(DataType::UInt16), Box::new(val_type))
}
pub fn wrap_partition_value_in_dict(val: ScalarValue) -> ScalarValue {
ScalarValue::Dictionary(Box::new(DataType::UInt16), Box::new(val))
}
#[derive(Clone)]
pub struct FileScanConfig {
pub object_store_url: ObjectStoreUrl,
pub file_schema: SchemaRef,
pub file_groups: Vec<Vec<PartitionedFile>>,
pub constraints: Constraints,
pub statistics: Statistics,
pub projection: Option<Vec<usize>>,
pub limit: Option<usize>,
pub table_partition_cols: Vec<Field>,
pub output_ordering: Vec<LexOrdering>,
}
impl FileScanConfig {
pub fn new(object_store_url: ObjectStoreUrl, file_schema: SchemaRef) -> Self {
let statistics = Statistics::new_unknown(&file_schema);
Self {
object_store_url,
file_schema,
file_groups: vec![],
constraints: Constraints::empty(),
statistics,
projection: None,
limit: None,
table_partition_cols: vec![],
output_ordering: vec![],
}
}
pub fn with_constraints(mut self, constraints: Constraints) -> Self {
self.constraints = constraints;
self
}
pub fn with_statistics(mut self, statistics: Statistics) -> Self {
self.statistics = statistics;
self
}
pub fn with_projection(mut self, projection: Option<Vec<usize>>) -> Self {
self.projection = projection;
self
}
pub fn with_limit(mut self, limit: Option<usize>) -> Self {
self.limit = limit;
self
}
pub fn with_file(self, file: PartitionedFile) -> Self {
self.with_file_group(vec![file])
}
pub fn with_file_groups(
mut self,
mut file_groups: Vec<Vec<PartitionedFile>>,
) -> Self {
self.file_groups.append(&mut file_groups);
self
}
pub fn with_file_group(mut self, file_group: Vec<PartitionedFile>) -> Self {
self.file_groups.push(file_group);
self
}
pub fn with_table_partition_cols(mut self, table_partition_cols: Vec<Field>) -> Self {
self.table_partition_cols = table_partition_cols;
self
}
pub fn with_output_ordering(mut self, output_ordering: Vec<LexOrdering>) -> Self {
self.output_ordering = output_ordering;
self
}
pub fn project(&self) -> (SchemaRef, Constraints, Statistics, Vec<LexOrdering>) {
if self.projection.is_none() && self.table_partition_cols.is_empty() {
return (
Arc::clone(&self.file_schema),
self.constraints.clone(),
self.statistics.clone(),
self.output_ordering.clone(),
);
}
let proj_indices = if let Some(proj) = &self.projection {
proj
} else {
let len = self.file_schema.fields().len() + self.table_partition_cols.len();
&(0..len).collect::<Vec<_>>()
};
let mut table_fields = vec![];
let mut table_cols_stats = vec![];
for idx in proj_indices {
if *idx < self.file_schema.fields().len() {
let field = self.file_schema.field(*idx);
table_fields.push(field.clone());
table_cols_stats.push(self.statistics.column_statistics[*idx].clone())
} else {
let partition_idx = idx - self.file_schema.fields().len();
table_fields.push(self.table_partition_cols[partition_idx].to_owned());
table_cols_stats.push(ColumnStatistics::new_unknown())
}
}
let table_stats = Statistics {
num_rows: self.statistics.num_rows,
total_byte_size: Precision::Absent,
column_statistics: table_cols_stats,
};
let projected_schema = Arc::new(Schema::new_with_metadata(
table_fields,
self.file_schema.metadata().clone(),
));
let projected_constraints = self
.constraints
.project(proj_indices)
.unwrap_or_else(Constraints::empty);
let projected_output_ordering =
get_projected_output_ordering(self, &projected_schema);
(
projected_schema,
projected_constraints,
table_stats,
projected_output_ordering,
)
}
#[cfg_attr(not(feature = "avro"), allow(unused))] pub(crate) fn projected_file_column_names(&self) -> Option<Vec<String>> {
self.projection.as_ref().map(|p| {
p.iter()
.filter(|col_idx| **col_idx < self.file_schema.fields().len())
.map(|col_idx| self.file_schema.field(*col_idx).name())
.cloned()
.collect()
})
}
pub(crate) fn projected_file_schema(&self) -> SchemaRef {
let fields = self.file_column_projection_indices().map(|indices| {
indices
.iter()
.map(|col_idx| self.file_schema.field(*col_idx))
.cloned()
.collect::<Vec<_>>()
});
fields.map_or_else(
|| Arc::clone(&self.file_schema),
|f| {
Arc::new(Schema::new_with_metadata(
f,
self.file_schema.metadata.clone(),
))
},
)
}
pub(crate) fn file_column_projection_indices(&self) -> Option<Vec<usize>> {
self.projection.as_ref().map(|p| {
p.iter()
.filter(|col_idx| **col_idx < self.file_schema.fields().len())
.copied()
.collect()
})
}
pub fn split_groups_by_statistics(
table_schema: &SchemaRef,
file_groups: &[Vec<PartitionedFile>],
sort_order: &LexOrdering,
) -> Result<Vec<Vec<PartitionedFile>>> {
let flattened_files = file_groups.iter().flatten().collect::<Vec<_>>();
if flattened_files.is_empty() {
return Ok(vec![]);
}
let statistics = MinMaxStatistics::new_from_files(
sort_order,
table_schema,
None,
flattened_files.iter().copied(),
)
.map_err(|e| {
e.context("construct min/max statistics for split_groups_by_statistics")
})?;
let indices_sorted_by_min = statistics.min_values_sorted();
let mut file_groups_indices: Vec<Vec<usize>> = vec![];
for (idx, min) in indices_sorted_by_min {
let file_group_to_insert = file_groups_indices.iter_mut().find(|group| {
min > statistics.max(
*group
.last()
.expect("groups should be nonempty at construction"),
)
});
match file_group_to_insert {
Some(group) => group.push(idx),
None => file_groups_indices.push(vec![idx]),
}
}
Ok(file_groups_indices
.into_iter()
.map(|file_group_indices| {
file_group_indices
.into_iter()
.map(|idx| flattened_files[idx].clone())
.collect()
})
.collect())
}
}
pub struct PartitionColumnProjector {
key_buffer_cache: ZeroBufferGenerators,
projected_partition_indexes: Vec<(usize, usize)>,
projected_schema: SchemaRef,
}
impl PartitionColumnProjector {
pub fn new(projected_schema: SchemaRef, table_partition_cols: &[String]) -> Self {
let mut idx_map = HashMap::new();
for (partition_idx, partition_name) in table_partition_cols.iter().enumerate() {
if let Ok(schema_idx) = projected_schema.index_of(partition_name) {
idx_map.insert(partition_idx, schema_idx);
}
}
let mut projected_partition_indexes: Vec<_> = idx_map.into_iter().collect();
projected_partition_indexes.sort_by(|(_, a), (_, b)| a.cmp(b));
Self {
projected_partition_indexes,
key_buffer_cache: Default::default(),
projected_schema,
}
}
pub fn project(
&mut self,
file_batch: RecordBatch,
partition_values: &[ScalarValue],
) -> Result<RecordBatch> {
let expected_cols =
self.projected_schema.fields().len() - self.projected_partition_indexes.len();
if file_batch.columns().len() != expected_cols {
return exec_err!(
"Unexpected batch schema from file, expected {} cols but got {}",
expected_cols,
file_batch.columns().len()
);
}
let mut cols = file_batch.columns().to_vec();
for &(pidx, sidx) in &self.projected_partition_indexes {
let p_value =
partition_values
.get(pidx)
.ok_or(DataFusionError::Execution(
"Invalid partitioning found on disk".to_string(),
))?;
let mut partition_value = Cow::Borrowed(p_value);
let field = self.projected_schema.field(sidx);
let expected_data_type = field.data_type();
let actual_data_type = partition_value.data_type();
if let DataType::Dictionary(key_type, _) = expected_data_type {
if !matches!(actual_data_type, DataType::Dictionary(_, _)) {
warn!("Partition value for column {} was not dictionary-encoded, applied auto-fix.", field.name());
partition_value = Cow::Owned(ScalarValue::Dictionary(
key_type.clone(),
Box::new(partition_value.as_ref().clone()),
));
}
}
cols.insert(
sidx,
create_output_array(
&mut self.key_buffer_cache,
partition_value.as_ref(),
file_batch.num_rows(),
)?,
)
}
RecordBatch::try_new_with_options(
Arc::clone(&self.projected_schema),
cols,
&RecordBatchOptions::new().with_row_count(Some(file_batch.num_rows())),
)
.map_err(Into::into)
}
}
#[derive(Debug, Default)]
struct ZeroBufferGenerators {
gen_i8: ZeroBufferGenerator<i8>,
gen_i16: ZeroBufferGenerator<i16>,
gen_i32: ZeroBufferGenerator<i32>,
gen_i64: ZeroBufferGenerator<i64>,
gen_u8: ZeroBufferGenerator<u8>,
gen_u16: ZeroBufferGenerator<u16>,
gen_u32: ZeroBufferGenerator<u32>,
gen_u64: ZeroBufferGenerator<u64>,
}
#[derive(Debug, Default)]
struct ZeroBufferGenerator<T>
where
T: ArrowNativeType,
{
cache: Option<Buffer>,
_t: PhantomData<T>,
}
impl<T> ZeroBufferGenerator<T>
where
T: ArrowNativeType,
{
const SIZE: usize = size_of::<T>();
fn get_buffer(&mut self, n_vals: usize) -> Buffer {
match &mut self.cache {
Some(buf) if buf.len() >= n_vals * Self::SIZE => {
buf.slice_with_length(0, n_vals * Self::SIZE)
}
_ => {
let mut key_buffer_builder = BufferBuilder::<T>::new(n_vals);
key_buffer_builder.advance(n_vals); self.cache.insert(key_buffer_builder.finish()).clone()
}
}
}
}
fn create_dict_array<T>(
buffer_gen: &mut ZeroBufferGenerator<T>,
dict_val: &ScalarValue,
len: usize,
data_type: DataType,
) -> Result<ArrayRef>
where
T: ArrowNativeType,
{
let dict_vals = dict_val.to_array()?;
let sliced_key_buffer = buffer_gen.get_buffer(len);
let mut builder = ArrayData::builder(data_type)
.len(len)
.add_buffer(sliced_key_buffer);
builder = builder.add_child_data(dict_vals.to_data());
Ok(Arc::new(DictionaryArray::<UInt16Type>::from(
builder.build().unwrap(),
)))
}
fn create_output_array(
key_buffer_cache: &mut ZeroBufferGenerators,
val: &ScalarValue,
len: usize,
) -> Result<ArrayRef> {
if let ScalarValue::Dictionary(key_type, dict_val) = &val {
match key_type.as_ref() {
DataType::Int8 => {
return create_dict_array(
&mut key_buffer_cache.gen_i8,
dict_val,
len,
val.data_type(),
);
}
DataType::Int16 => {
return create_dict_array(
&mut key_buffer_cache.gen_i16,
dict_val,
len,
val.data_type(),
);
}
DataType::Int32 => {
return create_dict_array(
&mut key_buffer_cache.gen_i32,
dict_val,
len,
val.data_type(),
);
}
DataType::Int64 => {
return create_dict_array(
&mut key_buffer_cache.gen_i64,
dict_val,
len,
val.data_type(),
);
}
DataType::UInt8 => {
return create_dict_array(
&mut key_buffer_cache.gen_u8,
dict_val,
len,
val.data_type(),
);
}
DataType::UInt16 => {
return create_dict_array(
&mut key_buffer_cache.gen_u16,
dict_val,
len,
val.data_type(),
);
}
DataType::UInt32 => {
return create_dict_array(
&mut key_buffer_cache.gen_u32,
dict_val,
len,
val.data_type(),
);
}
DataType::UInt64 => {
return create_dict_array(
&mut key_buffer_cache.gen_u64,
dict_val,
len,
val.data_type(),
);
}
_ => {}
}
}
val.to_array_of_size(len)
}
#[cfg(test)]
mod tests {
use arrow_array::Int32Array;
use super::*;
use crate::{test::columns, test_util::aggr_test_schema};
#[test]
fn physical_plan_config_no_projection() {
let file_schema = aggr_test_schema();
let conf = config_for_projection(
Arc::clone(&file_schema),
None,
Statistics::new_unknown(&file_schema),
to_partition_cols(vec![(
"date".to_owned(),
wrap_partition_type_in_dict(DataType::Utf8),
)]),
);
let (proj_schema, _, proj_statistics, _) = conf.project();
assert_eq!(proj_schema.fields().len(), file_schema.fields().len() + 1);
assert_eq!(
proj_schema.field(file_schema.fields().len()).name(),
"date",
"partition columns are the last columns"
);
assert_eq!(
proj_statistics.column_statistics.len(),
file_schema.fields().len() + 1
);
let col_names = conf.projected_file_column_names();
assert_eq!(col_names, None);
let col_indices = conf.file_column_projection_indices();
assert_eq!(col_indices, None);
}
#[test]
fn physical_plan_config_no_projection_tab_cols_as_field() {
let file_schema = aggr_test_schema();
let table_partition_col =
Field::new("date", wrap_partition_type_in_dict(DataType::Utf8), true)
.with_metadata(HashMap::from_iter(vec![(
"key_whatever".to_owned(),
"value_whatever".to_owned(),
)]));
let conf = config_for_projection(
Arc::clone(&file_schema),
None,
Statistics::new_unknown(&file_schema),
vec![table_partition_col.clone()],
);
let (proj_schema, _, _, _) = conf.project();
assert_eq!(proj_schema.fields().len(), file_schema.fields().len() + 1);
assert_eq!(
*proj_schema.field(file_schema.fields().len()),
table_partition_col,
"partition columns are the last columns and ust have all values defined in created field"
);
}
#[test]
fn physical_plan_config_with_projection() {
let file_schema = aggr_test_schema();
let conf = config_for_projection(
Arc::clone(&file_schema),
Some(vec![file_schema.fields().len(), 0]),
Statistics {
num_rows: Precision::Inexact(10),
column_statistics: (0..file_schema.fields().len())
.map(|i| ColumnStatistics {
distinct_count: Precision::Inexact(i),
..Default::default()
})
.collect(),
total_byte_size: Precision::Absent,
},
to_partition_cols(vec![(
"date".to_owned(),
wrap_partition_type_in_dict(DataType::Utf8),
)]),
);
let (proj_schema, _, proj_statistics, _) = conf.project();
assert_eq!(
columns(&proj_schema),
vec!["date".to_owned(), "c1".to_owned()]
);
let proj_stat_cols = proj_statistics.column_statistics;
assert_eq!(proj_stat_cols.len(), 2);
assert_eq!(proj_stat_cols[1].distinct_count, Precision::Inexact(0));
let col_names = conf.projected_file_column_names();
assert_eq!(col_names, Some(vec!["c1".to_owned()]));
let col_indices = conf.file_column_projection_indices();
assert_eq!(col_indices, Some(vec![0]));
}
#[test]
fn partition_column_projector() {
let file_batch = build_table_i32(
("a", &vec![0, 1, 2]),
("b", &vec![-2, -1, 0]),
("c", &vec![10, 11, 12]),
);
let partition_cols = vec![
(
"year".to_owned(),
wrap_partition_type_in_dict(DataType::Utf8),
),
(
"month".to_owned(),
wrap_partition_type_in_dict(DataType::Utf8),
),
(
"day".to_owned(),
wrap_partition_type_in_dict(DataType::Utf8),
),
];
let conf = config_for_projection(
file_batch.schema(),
Some(vec![
0,
1,
2,
file_batch.schema().fields().len(),
file_batch.schema().fields().len() + 2,
]),
Statistics::new_unknown(&file_batch.schema()),
to_partition_cols(partition_cols.clone()),
);
let (proj_schema, ..) = conf.project();
let mut proj = PartitionColumnProjector::new(
proj_schema,
&partition_cols
.iter()
.map(|x| x.0.clone())
.collect::<Vec<_>>(),
);
let projected_batch = proj
.project(
file_batch,
&[
wrap_partition_value_in_dict(ScalarValue::from("2021")),
wrap_partition_value_in_dict(ScalarValue::from("10")),
wrap_partition_value_in_dict(ScalarValue::from("26")),
],
)
.expect("Projection of partition columns into record batch failed");
let expected = [
"+---+----+----+------+-----+",
"| a | b | c | year | day |",
"+---+----+----+------+-----+",
"| 0 | -2 | 10 | 2021 | 26 |",
"| 1 | -1 | 11 | 2021 | 26 |",
"| 2 | 0 | 12 | 2021 | 26 |",
"+---+----+----+------+-----+",
];
crate::assert_batches_eq!(expected, &[projected_batch]);
let file_batch = build_table_i32(
("a", &vec![5, 6, 7, 8, 9]),
("b", &vec![-10, -9, -8, -7, -6]),
("c", &vec![12, 13, 14, 15, 16]),
);
let projected_batch = proj
.project(
file_batch,
&[
wrap_partition_value_in_dict(ScalarValue::from("2021")),
wrap_partition_value_in_dict(ScalarValue::from("10")),
wrap_partition_value_in_dict(ScalarValue::from("27")),
],
)
.expect("Projection of partition columns into record batch failed");
let expected = [
"+---+-----+----+------+-----+",
"| a | b | c | year | day |",
"+---+-----+----+------+-----+",
"| 5 | -10 | 12 | 2021 | 27 |",
"| 6 | -9 | 13 | 2021 | 27 |",
"| 7 | -8 | 14 | 2021 | 27 |",
"| 8 | -7 | 15 | 2021 | 27 |",
"| 9 | -6 | 16 | 2021 | 27 |",
"+---+-----+----+------+-----+",
];
crate::assert_batches_eq!(expected, &[projected_batch]);
let file_batch = build_table_i32(
("a", &vec![0, 1, 3]),
("b", &vec![2, 3, 4]),
("c", &vec![4, 5, 6]),
);
let projected_batch = proj
.project(
file_batch,
&[
wrap_partition_value_in_dict(ScalarValue::from("2021")),
wrap_partition_value_in_dict(ScalarValue::from("10")),
wrap_partition_value_in_dict(ScalarValue::from("28")),
],
)
.expect("Projection of partition columns into record batch failed");
let expected = [
"+---+---+---+------+-----+",
"| a | b | c | year | day |",
"+---+---+---+------+-----+",
"| 0 | 2 | 4 | 2021 | 28 |",
"| 1 | 3 | 5 | 2021 | 28 |",
"| 3 | 4 | 6 | 2021 | 28 |",
"+---+---+---+------+-----+",
];
crate::assert_batches_eq!(expected, &[projected_batch]);
let file_batch = build_table_i32(
("a", &vec![0, 1, 2]),
("b", &vec![-2, -1, 0]),
("c", &vec![10, 11, 12]),
);
let projected_batch = proj
.project(
file_batch,
&[
ScalarValue::from("2021"),
ScalarValue::from("10"),
ScalarValue::from("26"),
],
)
.expect("Projection of partition columns into record batch failed");
let expected = [
"+---+----+----+------+-----+",
"| a | b | c | year | day |",
"+---+----+----+------+-----+",
"| 0 | -2 | 10 | 2021 | 26 |",
"| 1 | -1 | 11 | 2021 | 26 |",
"| 2 | 0 | 12 | 2021 | 26 |",
"+---+----+----+------+-----+",
];
crate::assert_batches_eq!(expected, &[projected_batch]);
}
#[test]
fn test_projected_file_schema_with_partition_col() {
let schema = aggr_test_schema();
let partition_cols = vec![
(
"part1".to_owned(),
wrap_partition_type_in_dict(DataType::Utf8),
),
(
"part2".to_owned(),
wrap_partition_type_in_dict(DataType::Utf8),
),
];
let projection = config_for_projection(
schema.clone(),
Some(vec![0, 3, 5, schema.fields().len()]),
Statistics::new_unknown(&schema),
to_partition_cols(partition_cols),
)
.projected_file_schema();
let expected_columns = vec!["c1", "c4", "c6"];
let actual_columns = projection
.fields()
.iter()
.map(|f| f.name().clone())
.collect::<Vec<_>>();
assert_eq!(expected_columns, actual_columns);
}
#[test]
fn test_projected_file_schema_without_projection() {
let schema = aggr_test_schema();
let partition_cols = vec![
(
"part1".to_owned(),
wrap_partition_type_in_dict(DataType::Utf8),
),
(
"part2".to_owned(),
wrap_partition_type_in_dict(DataType::Utf8),
),
];
let projection = config_for_projection(
schema.clone(),
None,
Statistics::new_unknown(&schema),
to_partition_cols(partition_cols),
)
.projected_file_schema();
assert_eq!(projection.fields(), schema.fields());
}
#[test]
fn test_split_groups_by_statistics() -> Result<()> {
use chrono::TimeZone;
use datafusion_common::DFSchema;
use datafusion_expr::execution_props::ExecutionProps;
use object_store::{path::Path, ObjectMeta};
struct File {
name: &'static str,
date: &'static str,
statistics: Vec<Option<(f64, f64)>>,
}
impl File {
fn new(
name: &'static str,
date: &'static str,
statistics: Vec<Option<(f64, f64)>>,
) -> Self {
Self {
name,
date,
statistics,
}
}
}
struct TestCase {
name: &'static str,
file_schema: Schema,
files: Vec<File>,
sort: Vec<datafusion_expr::SortExpr>,
expected_result: Result<Vec<Vec<&'static str>>, &'static str>,
}
use datafusion_expr::col;
let cases = vec![
TestCase {
name: "test sort",
file_schema: Schema::new(vec![Field::new(
"value".to_string(),
DataType::Float64,
false,
)]),
files: vec![
File::new("0", "2023-01-01", vec![Some((0.00, 0.49))]),
File::new("1", "2023-01-01", vec![Some((0.50, 1.00))]),
File::new("2", "2023-01-02", vec![Some((0.00, 1.00))]),
],
sort: vec![col("value").sort(true, false)],
expected_result: Ok(vec![vec!["0", "1"], vec!["2"]]),
},
TestCase {
name: "test sort with files ordered differently",
file_schema: Schema::new(vec![Field::new(
"value".to_string(),
DataType::Float64,
false,
)]),
files: vec![
File::new("0", "2023-01-01", vec![Some((0.00, 0.49))]),
File::new("2", "2023-01-02", vec![Some((0.00, 1.00))]),
File::new("1", "2023-01-01", vec![Some((0.50, 1.00))]),
],
sort: vec![col("value").sort(true, false)],
expected_result: Ok(vec![vec!["0", "1"], vec!["2"]]),
},
TestCase {
name: "reverse sort",
file_schema: Schema::new(vec![Field::new(
"value".to_string(),
DataType::Float64,
false,
)]),
files: vec![
File::new("0", "2023-01-01", vec![Some((0.00, 0.49))]),
File::new("1", "2023-01-01", vec![Some((0.50, 1.00))]),
File::new("2", "2023-01-02", vec![Some((0.00, 1.00))]),
],
sort: vec![col("value").sort(false, true)],
expected_result: Ok(vec![vec!["1", "0"], vec!["2"]]),
},
TestCase {
name: "no nullable sort columns",
file_schema: Schema::new(vec![Field::new(
"value".to_string(),
DataType::Float64,
true, )]),
files: vec![
File::new("0", "2023-01-01", vec![Some((0.00, 0.49))]),
File::new("1", "2023-01-01", vec![Some((0.50, 1.00))]),
File::new("2", "2023-01-02", vec![Some((0.00, 1.00))]),
],
sort: vec![col("value").sort(true, false)],
expected_result: Err("construct min/max statistics for split_groups_by_statistics\ncaused by\nbuild min rows\ncaused by\ncreate sorting columns\ncaused by\nError during planning: cannot sort by nullable column")
},
TestCase {
name: "all three non-overlapping",
file_schema: Schema::new(vec![Field::new(
"value".to_string(),
DataType::Float64,
false,
)]),
files: vec![
File::new("0", "2023-01-01", vec![Some((0.00, 0.49))]),
File::new("1", "2023-01-01", vec![Some((0.50, 0.99))]),
File::new("2", "2023-01-02", vec![Some((1.00, 1.49))]),
],
sort: vec![col("value").sort(true, false)],
expected_result: Ok(vec![vec!["0", "1", "2"]]),
},
TestCase {
name: "all three overlapping",
file_schema: Schema::new(vec![Field::new(
"value".to_string(),
DataType::Float64,
false,
)]),
files: vec![
File::new("0", "2023-01-01", vec![Some((0.00, 0.49))]),
File::new("1", "2023-01-01", vec![Some((0.00, 0.49))]),
File::new("2", "2023-01-02", vec![Some((0.00, 0.49))]),
],
sort: vec![col("value").sort(true, false)],
expected_result: Ok(vec![vec!["0"], vec!["1"], vec!["2"]]),
},
TestCase {
name: "empty input",
file_schema: Schema::new(vec![Field::new(
"value".to_string(),
DataType::Float64,
false,
)]),
files: vec![],
sort: vec![col("value").sort(true, false)],
expected_result: Ok(vec![]),
},
TestCase {
name: "one file missing statistics",
file_schema: Schema::new(vec![Field::new(
"value".to_string(),
DataType::Float64,
false,
)]),
files: vec![
File::new("0", "2023-01-01", vec![Some((0.00, 0.49))]),
File::new("1", "2023-01-01", vec![Some((0.00, 0.49))]),
File::new("2", "2023-01-02", vec![None]),
],
sort: vec![col("value").sort(true, false)],
expected_result: Err("construct min/max statistics for split_groups_by_statistics\ncaused by\ncollect min/max values\ncaused by\nget min/max for column: 'value'\ncaused by\nError during planning: statistics not found"),
},
];
for case in cases {
let table_schema = Arc::new(Schema::new(
case.file_schema
.fields()
.clone()
.into_iter()
.cloned()
.chain(Some(Arc::new(Field::new(
"date".to_string(),
DataType::Utf8,
false,
))))
.collect::<Vec<_>>(),
));
let sort_order = LexOrdering::from(
case.sort
.into_iter()
.map(|expr| {
crate::physical_planner::create_physical_sort_expr(
&expr,
&DFSchema::try_from(table_schema.as_ref().clone())?,
&ExecutionProps::default(),
)
})
.collect::<Result<Vec<_>>>()?,
);
let partitioned_files =
case.files.into_iter().map(From::from).collect::<Vec<_>>();
let result = FileScanConfig::split_groups_by_statistics(
&table_schema,
&[partitioned_files.clone()],
&sort_order,
);
let results_by_name = result
.as_ref()
.map(|file_groups| {
file_groups
.iter()
.map(|file_group| {
file_group
.iter()
.map(|file| {
partitioned_files
.iter()
.find_map(|f| {
if f.object_meta == file.object_meta {
Some(
f.object_meta
.location
.as_ref()
.rsplit('/')
.next()
.unwrap()
.trim_end_matches(".parquet"),
)
} else {
None
}
})
.unwrap()
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>()
})
.map_err(|e| e.strip_backtrace().leak() as &'static str);
assert_eq!(results_by_name, case.expected_result, "{}", case.name);
}
return Ok(());
impl From<File> for PartitionedFile {
fn from(file: File) -> Self {
PartitionedFile {
object_meta: ObjectMeta {
location: Path::from(format!(
"data/date={}/{}.parquet",
file.date, file.name
)),
last_modified: chrono::Utc.timestamp_nanos(0),
size: 0,
e_tag: None,
version: None,
},
partition_values: vec![ScalarValue::from(file.date)],
range: None,
statistics: Some(Statistics {
num_rows: Precision::Absent,
total_byte_size: Precision::Absent,
column_statistics: file
.statistics
.into_iter()
.map(|stats| {
stats
.map(|(min, max)| ColumnStatistics {
min_value: Precision::Exact(ScalarValue::from(
min,
)),
max_value: Precision::Exact(ScalarValue::from(
max,
)),
..Default::default()
})
.unwrap_or_default()
})
.collect::<Vec<_>>(),
}),
extensions: None,
metadata_size_hint: None,
}
}
}
}
fn config_for_projection(
file_schema: SchemaRef,
projection: Option<Vec<usize>>,
statistics: Statistics,
table_partition_cols: Vec<Field>,
) -> FileScanConfig {
FileScanConfig::new(ObjectStoreUrl::parse("test:///").unwrap(), file_schema)
.with_projection(projection)
.with_statistics(statistics)
.with_table_partition_cols(table_partition_cols)
}
fn to_partition_cols(table_partition_cols: Vec<(String, DataType)>) -> Vec<Field> {
table_partition_cols
.iter()
.map(|(name, dtype)| Field::new(name, dtype.clone(), false))
.collect::<Vec<_>>()
}
pub fn build_table_i32(
a: (&str, &Vec<i32>),
b: (&str, &Vec<i32>),
c: (&str, &Vec<i32>),
) -> RecordBatch {
let schema = Schema::new(vec![
Field::new(a.0, DataType::Int32, false),
Field::new(b.0, DataType::Int32, false),
Field::new(c.0, DataType::Int32, false),
]);
RecordBatch::try_new(
Arc::new(schema),
vec![
Arc::new(Int32Array::from(a.1.clone())),
Arc::new(Int32Array::from(b.1.clone())),
Arc::new(Int32Array::from(c.1.clone())),
],
)
.unwrap()
}
}