use std::str::FromStr;
use std::{any::Any, sync::Arc};
use arrow::compute::SortOptions;
use arrow::datatypes::{DataType, Field, Schema, SchemaRef};
use async_trait::async_trait;
use dashmap::DashMap;
use datafusion_common::ToDFSchema;
use datafusion_expr::expr::Sort;
use datafusion_optimizer::utils::conjunction;
use datafusion_physical_expr::{create_physical_expr, PhysicalSortExpr};
use futures::{future, stream, StreamExt, TryStreamExt};
use object_store::path::Path;
use object_store::ObjectMeta;
use crate::datasource::file_format::file_type::{FileCompressionType, FileType};
use crate::datasource::{
file_format::{
avro::AvroFormat, csv::CsvFormat, json::JsonFormat, parquet::ParquetFormat,
FileFormat,
},
get_statistics_with_limit,
listing::ListingTableUrl,
TableProvider, TableType,
};
use crate::logical_expr::TableProviderFilterPushDown;
use crate::physical_plan;
use crate::physical_plan::file_format::partition_type_wrap;
use crate::{
error::{DataFusionError, Result},
execution::context::SessionState,
logical_expr::Expr,
physical_plan::{
empty::EmptyExec, file_format::FileScanConfig, project_schema, ExecutionPlan,
Statistics,
},
};
use super::PartitionedFile;
use super::helpers::{expr_applicable_for_cols, pruned_partition_list, split_files};
#[derive(Debug, Clone)]
pub struct ListingTableConfig {
pub table_paths: Vec<ListingTableUrl>,
pub file_schema: Option<SchemaRef>,
pub options: Option<ListingOptions>,
}
impl ListingTableConfig {
pub fn new(table_path: ListingTableUrl) -> Self {
let table_paths = vec![table_path];
Self {
table_paths,
file_schema: None,
options: None,
}
}
pub fn new_with_multi_paths(table_paths: Vec<ListingTableUrl>) -> Self {
Self {
table_paths,
file_schema: None,
options: None,
}
}
pub fn with_schema(self, schema: SchemaRef) -> Self {
Self {
table_paths: self.table_paths,
file_schema: Some(schema),
options: self.options,
}
}
pub fn with_listing_options(self, listing_options: ListingOptions) -> Self {
Self {
table_paths: self.table_paths,
file_schema: self.file_schema,
options: Some(listing_options),
}
}
fn infer_format(path: &str) -> Result<(Arc<dyn FileFormat>, String)> {
let err_msg = format!("Unable to infer file type from path: {path}");
let mut exts = path.rsplit('.');
let mut splitted = exts.next().unwrap_or("");
let file_compression_type = FileCompressionType::from_str(splitted)
.unwrap_or(FileCompressionType::UNCOMPRESSED);
if file_compression_type.is_compressed() {
splitted = exts.next().unwrap_or("");
}
let file_type = FileType::from_str(splitted)
.map_err(|_| DataFusionError::Internal(err_msg.to_owned()))?;
let ext = file_type
.get_ext_with_compression(file_compression_type.to_owned())
.map_err(|_| DataFusionError::Internal(err_msg))?;
let file_format: Arc<dyn FileFormat> = match file_type {
FileType::AVRO => Arc::new(AvroFormat::default()),
FileType::CSV => Arc::new(
CsvFormat::default().with_file_compression_type(file_compression_type),
),
FileType::JSON => Arc::new(
JsonFormat::default().with_file_compression_type(file_compression_type),
),
FileType::PARQUET => Arc::new(ParquetFormat::default()),
};
Ok((file_format, ext))
}
pub async fn infer_options(self, state: &SessionState) -> Result<Self> {
let store = state
.runtime_env()
.object_store(self.table_paths.get(0).unwrap())?;
let file = self
.table_paths
.get(0)
.unwrap()
.list_all_files(store.as_ref(), "")
.next()
.await
.ok_or_else(|| DataFusionError::Internal("No files for table".into()))??;
let (format, file_extension) =
ListingTableConfig::infer_format(file.location.as_ref())?;
let listing_options = ListingOptions::new(format)
.with_file_extension(file_extension)
.with_target_partitions(state.config().target_partitions());
Ok(Self {
table_paths: self.table_paths,
file_schema: self.file_schema,
options: Some(listing_options),
})
}
pub async fn infer_schema(self, state: &SessionState) -> Result<Self> {
match self.options {
Some(options) => {
let schema = options
.infer_schema(state, self.table_paths.get(0).unwrap())
.await?;
Ok(Self {
table_paths: self.table_paths,
file_schema: Some(schema),
options: Some(options),
})
}
None => Err(DataFusionError::Internal(
"No `ListingOptions` set for inferring schema".into(),
)),
}
}
pub async fn infer(self, state: &SessionState) -> Result<Self> {
self.infer_options(state).await?.infer_schema(state).await
}
}
#[derive(Clone, Debug)]
pub struct ListingOptions {
pub file_extension: String,
pub format: Arc<dyn FileFormat>,
pub table_partition_cols: Vec<(String, DataType)>,
pub collect_stat: bool,
pub target_partitions: usize,
pub file_sort_order: Option<Vec<Expr>>,
pub infinite_source: bool,
}
impl ListingOptions {
pub fn new(format: Arc<dyn FileFormat>) -> Self {
Self {
file_extension: String::new(),
format,
table_partition_cols: vec![],
collect_stat: true,
target_partitions: 1,
file_sort_order: None,
infinite_source: false,
}
}
pub fn with_infinite_source(mut self, infinite_source: bool) -> Self {
self.infinite_source = infinite_source;
self
}
pub fn with_file_extension(mut self, file_extension: impl Into<String>) -> Self {
self.file_extension = file_extension.into();
self
}
pub fn with_table_partition_cols(
mut self,
table_partition_cols: Vec<(String, DataType)>,
) -> Self {
self.table_partition_cols = table_partition_cols;
self
}
pub fn with_collect_stat(mut self, collect_stat: bool) -> Self {
self.collect_stat = collect_stat;
self
}
pub fn with_target_partitions(mut self, target_partitions: usize) -> Self {
self.target_partitions = target_partitions;
self
}
pub fn with_file_sort_order(mut self, file_sort_order: Option<Vec<Expr>>) -> Self {
self.file_sort_order = file_sort_order;
self
}
pub async fn infer_schema<'a>(
&'a self,
state: &SessionState,
table_path: &'a ListingTableUrl,
) -> Result<SchemaRef> {
let store = state.runtime_env().object_store(table_path)?;
let files: Vec<_> = table_path
.list_all_files(store.as_ref(), &self.file_extension)
.try_collect()
.await?;
self.format.infer_schema(state, &store, &files).await
}
}
#[derive(Default)]
struct StatisticsCache {
statistics: DashMap<Path, (ObjectMeta, Statistics)>,
}
impl StatisticsCache {
fn get(&self, meta: &ObjectMeta) -> Option<Statistics> {
self.statistics
.get(&meta.location)
.map(|s| {
let (saved_meta, statistics) = s.value();
if saved_meta.size != meta.size
|| saved_meta.last_modified != meta.last_modified
{
None
} else {
Some(statistics.clone())
}
})
.unwrap_or(None)
}
fn save(&self, meta: ObjectMeta, statistics: Statistics) {
self.statistics
.insert(meta.location.clone(), (meta, statistics));
}
}
pub struct ListingTable {
table_paths: Vec<ListingTableUrl>,
file_schema: SchemaRef,
table_schema: SchemaRef,
options: ListingOptions,
definition: Option<String>,
collected_statistics: StatisticsCache,
infinite_source: bool,
}
impl ListingTable {
pub fn try_new(config: ListingTableConfig) -> Result<Self> {
let file_schema = config
.file_schema
.ok_or_else(|| DataFusionError::Internal("No schema provided.".into()))?;
let options = config.options.ok_or_else(|| {
DataFusionError::Internal("No ListingOptions provided".into())
})?;
let mut table_fields = file_schema.fields().clone();
for (part_col_name, part_col_type) in &options.table_partition_cols {
table_fields.push(Field::new(
part_col_name,
partition_type_wrap(part_col_type.clone()),
false,
));
}
let infinite_source = options.infinite_source;
let table = Self {
table_paths: config.table_paths,
file_schema,
table_schema: Arc::new(Schema::new(table_fields)),
options,
definition: None,
collected_statistics: Default::default(),
infinite_source,
};
Ok(table)
}
pub fn with_definition(mut self, defintion: Option<String>) -> Self {
self.definition = defintion;
self
}
pub fn table_paths(&self) -> &Vec<ListingTableUrl> {
&self.table_paths
}
pub fn options(&self) -> &ListingOptions {
&self.options
}
fn try_create_output_ordering(&self) -> Result<Option<Vec<PhysicalSortExpr>>> {
let file_sort_order =
if let Some(file_sort_order) = self.options.file_sort_order.as_ref() {
file_sort_order
} else {
return Ok(None);
};
let sort_exprs = file_sort_order
.iter()
.map(|expr| {
if let Expr::Sort(Sort { expr, asc, nulls_first }) = expr {
if let Expr::Column(col) = expr.as_ref() {
let expr = physical_plan::expressions::col(&col.name, self.table_schema.as_ref())?;
Ok(PhysicalSortExpr {
expr,
options: SortOptions {
descending: !asc,
nulls_first: *nulls_first,
},
})
}
else {
Err(DataFusionError::Plan(
format!("Only support single column references in output_ordering, got {expr:?}")
))
}
} else {
Err(DataFusionError::Plan(
format!("Expected Expr::Sort in output_ordering, but got {expr:?}")
))
}
})
.collect::<Result<Vec<_>>>()?;
Ok(Some(sort_exprs))
}
}
#[async_trait]
impl TableProvider for ListingTable {
fn as_any(&self) -> &dyn Any {
self
}
fn schema(&self) -> SchemaRef {
Arc::clone(&self.table_schema)
}
fn table_type(&self) -> TableType {
TableType::Base
}
async fn scan(
&self,
state: &SessionState,
projection: Option<&Vec<usize>>,
filters: &[Expr],
limit: Option<usize>,
) -> Result<Arc<dyn ExecutionPlan>> {
let (partitioned_file_lists, statistics) =
self.list_files_for_scan(state, filters, limit).await?;
if partitioned_file_lists.is_empty() {
let schema = self.schema();
let projected_schema = project_schema(&schema, projection)?;
return Ok(Arc::new(EmptyExec::new(false, projected_schema)));
}
let table_partition_cols = self
.options
.table_partition_cols
.iter()
.map(|col| {
Ok((
col.0.to_owned(),
self.table_schema
.field_with_name(&col.0)?
.data_type()
.clone(),
))
})
.collect::<Result<Vec<_>>>()?;
let filters = if let Some(expr) = conjunction(filters.to_vec()) {
let table_df_schema = self.table_schema.as_ref().clone().to_dfschema()?;
let filters = create_physical_expr(
&expr,
&table_df_schema,
&self.table_schema,
state.execution_props(),
)?;
Some(filters)
} else {
None
};
self.options
.format
.create_physical_plan(
state,
FileScanConfig {
object_store_url: self.table_paths.get(0).unwrap().object_store(),
file_schema: Arc::clone(&self.file_schema),
file_groups: partitioned_file_lists,
statistics,
projection: projection.cloned(),
limit,
output_ordering: self.try_create_output_ordering()?,
table_partition_cols,
infinite_source: self.infinite_source,
},
filters.as_ref(),
)
.await
}
fn supports_filter_pushdown(
&self,
filter: &Expr,
) -> Result<TableProviderFilterPushDown> {
if expr_applicable_for_cols(
&self
.options
.table_partition_cols
.iter()
.map(|x| x.0.clone())
.collect::<Vec<_>>(),
filter,
) {
Ok(TableProviderFilterPushDown::Exact)
} else {
Ok(TableProviderFilterPushDown::Inexact)
}
}
fn get_table_definition(&self) -> Option<&str> {
self.definition.as_deref()
}
}
impl ListingTable {
async fn list_files_for_scan<'a>(
&'a self,
ctx: &'a SessionState,
filters: &'a [Expr],
limit: Option<usize>,
) -> Result<(Vec<Vec<PartitionedFile>>, Statistics)> {
let store = ctx
.runtime_env()
.object_store(self.table_paths.get(0).unwrap())?;
let file_list = future::try_join_all(self.table_paths.iter().map(|table_path| {
pruned_partition_list(
store.as_ref(),
table_path,
filters,
&self.options.file_extension,
&self.options.table_partition_cols,
)
}))
.await?;
let file_list = stream::iter(file_list).flatten();
let files = file_list.then(|part_file| async {
let part_file = part_file?;
let statistics = if self.options.collect_stat {
match self.collected_statistics.get(&part_file.object_meta) {
Some(statistics) => statistics,
None => {
let statistics = self
.options
.format
.infer_stats(
ctx,
&store,
self.file_schema.clone(),
&part_file.object_meta,
)
.await?;
self.collected_statistics
.save(part_file.object_meta.clone(), statistics.clone());
statistics
}
}
} else {
Statistics::default()
};
Ok((part_file, statistics)) as Result<(PartitionedFile, Statistics)>
});
let (files, statistics) =
get_statistics_with_limit(files, self.schema(), limit).await?;
Ok((
split_files(files, self.options.target_partitions),
statistics,
))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::datasource::file_format::file_type::GetExt;
use crate::prelude::*;
use crate::{
datasource::file_format::{avro::AvroFormat, parquet::ParquetFormat},
execution::options::ReadOptions,
logical_expr::{col, lit},
test::{columns, object_store::register_test_store},
};
use arrow::datatypes::DataType;
use chrono::DateTime;
use datafusion_common::assert_contains;
use rstest::*;
use std::fs::File;
use tempfile::TempDir;
async fn unbounded_table_helper(
file_type: FileType,
listing_option: ListingOptions,
infinite_data: bool,
) -> Result<()> {
let ctx = SessionContext::new();
register_test_store(
&ctx,
&[(&format!("table/file{}", file_type.get_ext()), 100)],
);
let schema = Schema::new(vec![Field::new("a", DataType::Boolean, false)]);
let table_path = ListingTableUrl::parse("test:///table/").unwrap();
let config = ListingTableConfig::new(table_path)
.with_listing_options(listing_option)
.with_schema(Arc::new(schema));
let table = ListingTable::try_new(config)?;
let source_exec = table.scan(&ctx.state(), None, &[], None).await?;
assert_eq!(source_exec.unbounded_output(&[])?, infinite_data);
Ok(())
}
#[tokio::test]
async fn read_single_file() -> Result<()> {
let ctx = SessionContext::new();
let table = load_table(&ctx, "alltypes_plain.parquet").await?;
let projection = None;
let exec = table
.scan(&ctx.state(), projection, &[], None)
.await
.expect("Scan table");
assert_eq!(exec.children().len(), 0);
assert_eq!(exec.output_partitioning().partition_count(), 1);
assert_eq!(exec.statistics().num_rows, Some(8));
assert_eq!(exec.statistics().total_byte_size, Some(671));
Ok(())
}
#[tokio::test]
async fn load_table_stats_by_default() -> Result<()> {
let testdata = crate::test_util::parquet_test_data();
let filename = format!("{}/{}", testdata, "alltypes_plain.parquet");
let table_path = ListingTableUrl::parse(filename).unwrap();
let ctx = SessionContext::new();
let state = ctx.state();
let opt = ListingOptions::new(Arc::new(ParquetFormat::default()));
let schema = opt.infer_schema(&state, &table_path).await?;
let config = ListingTableConfig::new(table_path)
.with_listing_options(opt)
.with_schema(schema);
let table = ListingTable::try_new(config)?;
let exec = table.scan(&state, None, &[], None).await?;
assert_eq!(exec.statistics().num_rows, Some(8));
assert_eq!(exec.statistics().total_byte_size, Some(671));
Ok(())
}
#[tokio::test]
async fn load_table_stats_when_no_stats() -> Result<()> {
let testdata = crate::test_util::parquet_test_data();
let filename = format!("{}/{}", testdata, "alltypes_plain.parquet");
let table_path = ListingTableUrl::parse(filename).unwrap();
let ctx = SessionContext::new();
let state = ctx.state();
let opt = ListingOptions::new(Arc::new(ParquetFormat::default()))
.with_collect_stat(false);
let schema = opt.infer_schema(&state, &table_path).await?;
let config = ListingTableConfig::new(table_path)
.with_listing_options(opt)
.with_schema(schema);
let table = ListingTable::try_new(config)?;
let exec = table.scan(&state, None, &[], None).await?;
assert_eq!(exec.statistics().num_rows, None);
assert_eq!(exec.statistics().total_byte_size, None);
Ok(())
}
#[tokio::test]
async fn test_try_create_output_ordering() {
let testdata = crate::test_util::parquet_test_data();
let filename = format!("{}/{}", testdata, "alltypes_plain.parquet");
let table_path = ListingTableUrl::parse(filename).unwrap();
let ctx = SessionContext::new();
let state = ctx.state();
let options = ListingOptions::new(Arc::new(ParquetFormat::default()));
let schema = options.infer_schema(&state, &table_path).await.unwrap();
use crate::physical_plan::expressions::col as physical_col;
use std::ops::Add;
let cases = vec![
(None, Ok(None)),
(Some(vec![]), Ok(Some(vec![]))),
(
Some(vec![col("string_col")]),
Err("Expected Expr::Sort in output_ordering, but got string_col"),
),
(
Some(vec![
col("int_col").add(lit(1)).sort(true, true),
]),
Err("Only support single column references in output_ordering, got int_col + Int32(1)"),
),
(
Some(vec![col("string_col").sort(true, false)]),
Ok(Some(vec![PhysicalSortExpr {
expr: physical_col("string_col", &schema).unwrap(),
options: SortOptions {
descending: false,
nulls_first: false,
},
}]))
),
(
Some(vec![
col("string_col").sort(true, false),
col("int_col").sort(false, true),
]),
Ok(Some(vec![
PhysicalSortExpr {
expr: physical_col("string_col", &schema).unwrap(),
options: SortOptions {
descending: false,
nulls_first: false,
},
},
PhysicalSortExpr {
expr: physical_col("int_col", &schema).unwrap(),
options: SortOptions {
descending: true,
nulls_first: true,
},
},
]))
),
];
for (file_sort_order, expected_result) in cases {
let options = options.clone().with_file_sort_order(file_sort_order);
let config = ListingTableConfig::new(table_path.clone())
.with_listing_options(options)
.with_schema(schema.clone());
let table =
ListingTable::try_new(config.clone()).expect("Creating the table");
let ordering_result = table.try_create_output_ordering();
match (expected_result, ordering_result) {
(Ok(expected), Ok(result)) => {
assert_eq!(expected, result);
}
(Err(expected), Err(result)) => {
let result = result.to_string();
let expected = expected.to_string();
assert_contains!(result.to_string(), expected);
}
(expected_result, ordering_result) => {
panic!(
"expected: {expected_result:#?}\n\nactual:{ordering_result:#?}"
);
}
}
}
}
#[tokio::test]
async fn read_empty_table() -> Result<()> {
let ctx = SessionContext::new();
let path = String::from("table/p1=v1/file.avro");
register_test_store(&ctx, &[(&path, 100)]);
let opt = ListingOptions::new(Arc::new(AvroFormat {}))
.with_file_extension(FileType::AVRO.get_ext())
.with_table_partition_cols(vec![(
String::from("p1"),
partition_type_wrap(DataType::Utf8),
)])
.with_target_partitions(4);
let table_path = ListingTableUrl::parse("test:///table/").unwrap();
let file_schema =
Arc::new(Schema::new(vec![Field::new("a", DataType::Boolean, false)]));
let config = ListingTableConfig::new(table_path)
.with_listing_options(opt)
.with_schema(file_schema);
let table = ListingTable::try_new(config)?;
assert_eq!(
columns(&table.schema()),
vec!["a".to_owned(), "p1".to_owned()]
);
let filter = Expr::not_eq(col("p1"), lit("v1"));
let scan = table
.scan(&ctx.state(), None, &[filter], None)
.await
.expect("Empty execution plan");
assert!(scan.as_any().is::<EmptyExec>());
assert_eq!(
columns(&scan.schema()),
vec!["a".to_owned(), "p1".to_owned()]
);
Ok(())
}
#[tokio::test]
async fn unbounded_csv_table_without_schema() -> Result<()> {
let tmp_dir = TempDir::new()?;
let file_path = tmp_dir.path().join("dummy.csv");
File::create(file_path)?;
let ctx = SessionContext::new();
let error = ctx
.register_csv(
"test",
tmp_dir.path().to_str().unwrap(),
CsvReadOptions::new().mark_infinite(true),
)
.await
.unwrap_err();
match error {
DataFusionError::Plan(_) => Ok(()),
val => Err(val),
}
}
#[tokio::test]
async fn unbounded_json_table_without_schema() -> Result<()> {
let tmp_dir = TempDir::new()?;
let file_path = tmp_dir.path().join("dummy.json");
File::create(file_path)?;
let ctx = SessionContext::new();
let error = ctx
.register_json(
"test",
tmp_dir.path().to_str().unwrap(),
NdJsonReadOptions::default().mark_infinite(true),
)
.await
.unwrap_err();
match error {
DataFusionError::Plan(_) => Ok(()),
val => Err(val),
}
}
#[tokio::test]
async fn unbounded_avro_table_without_schema() -> Result<()> {
let tmp_dir = TempDir::new()?;
let file_path = tmp_dir.path().join("dummy.avro");
File::create(file_path)?;
let ctx = SessionContext::new();
let error = ctx
.register_avro(
"test",
tmp_dir.path().to_str().unwrap(),
AvroReadOptions::default().mark_infinite(true),
)
.await
.unwrap_err();
match error {
DataFusionError::Plan(_) => Ok(()),
val => Err(val),
}
}
#[rstest]
#[tokio::test]
async fn unbounded_csv_table(
#[values(true, false)] infinite_data: bool,
) -> Result<()> {
let config = CsvReadOptions::new().mark_infinite(infinite_data);
let session_config = SessionConfig::new().with_target_partitions(1);
let listing_options = config.to_listing_options(&session_config);
unbounded_table_helper(FileType::CSV, listing_options, infinite_data).await
}
#[rstest]
#[tokio::test]
async fn unbounded_json_table(
#[values(true, false)] infinite_data: bool,
) -> Result<()> {
let config = NdJsonReadOptions::default().mark_infinite(infinite_data);
let session_config = SessionConfig::new().with_target_partitions(1);
let listing_options = config.to_listing_options(&session_config);
unbounded_table_helper(FileType::JSON, listing_options, infinite_data).await
}
#[rstest]
#[tokio::test]
async fn unbounded_avro_table(
#[values(true, false)] infinite_data: bool,
) -> Result<()> {
let config = AvroReadOptions::default().mark_infinite(infinite_data);
let session_config = SessionConfig::new().with_target_partitions(1);
let listing_options = config.to_listing_options(&session_config);
unbounded_table_helper(FileType::AVRO, listing_options, infinite_data).await
}
#[tokio::test]
async fn test_assert_list_files_for_scan_grouping() -> Result<()> {
assert_list_files_for_scan_grouping(
&[
"bucket/key-prefix/file0",
"bucket/key-prefix/file1",
"bucket/key-prefix/file2",
"bucket/key-prefix/file3",
"bucket/key-prefix/file4",
],
"test:///bucket/key-prefix/",
12,
5,
)
.await?;
assert_list_files_for_scan_grouping(
&[
"bucket/key-prefix/file0",
"bucket/key-prefix/file1",
"bucket/key-prefix/file2",
"bucket/key-prefix/file3",
],
"test:///bucket/key-prefix/",
4,
4,
)
.await?;
assert_list_files_for_scan_grouping(
&[
"bucket/key-prefix/file0",
"bucket/key-prefix/file1",
"bucket/key-prefix/file2",
"bucket/key-prefix/file3",
"bucket/key-prefix/file4",
],
"test:///bucket/key-prefix/",
2,
2,
)
.await?;
assert_list_files_for_scan_grouping(&[], "test:///bucket/key-prefix/", 2, 0)
.await?;
assert_list_files_for_scan_grouping(
&[
"bucket/key-prefix/file0",
"bucket/key-prefix/file1",
"bucket/other-prefix/roguefile",
],
"test:///bucket/key-prefix/",
10,
2,
)
.await?;
Ok(())
}
#[tokio::test]
async fn test_assert_list_files_for_multi_path() -> Result<()> {
assert_list_files_for_multi_paths(
&[
"bucket/key1/file0",
"bucket/key1/file1",
"bucket/key1/file2",
"bucket/key2/file3",
"bucket/key2/file4",
"bucket/key3/file5",
],
&["test:///bucket/key1/", "test:///bucket/key2/"],
12,
5,
)
.await?;
assert_list_files_for_multi_paths(
&[
"bucket/key1/file0",
"bucket/key1/file1",
"bucket/key1/file2",
"bucket/key2/file3",
"bucket/key2/file4",
"bucket/key3/file5",
],
&["test:///bucket/key1/", "test:///bucket/key2/"],
5,
5,
)
.await?;
assert_list_files_for_multi_paths(
&[
"bucket/key1/file0",
"bucket/key1/file1",
"bucket/key1/file2",
"bucket/key2/file3",
"bucket/key2/file4",
"bucket/key3/file5",
],
&["test:///bucket/key1/"],
2,
2,
)
.await?;
assert_list_files_for_multi_paths(&[], &["test:///bucket/key1/"], 2, 0).await?;
assert_list_files_for_multi_paths(
&[
"bucket/key1/file0",
"bucket/key1/file1",
"bucket/key1/file2",
"bucket/key2/file3",
"bucket/key2/file4",
"bucket/key3/file5",
],
&["test:///bucket/key3/"],
2,
1,
)
.await?;
Ok(())
}
async fn load_table(
ctx: &SessionContext,
name: &str,
) -> Result<Arc<dyn TableProvider>> {
let testdata = crate::test_util::parquet_test_data();
let filename = format!("{testdata}/{name}");
let table_path = ListingTableUrl::parse(filename).unwrap();
let config = ListingTableConfig::new(table_path)
.infer(&ctx.state())
.await?;
let table = ListingTable::try_new(config)?;
Ok(Arc::new(table))
}
async fn assert_list_files_for_scan_grouping(
files: &[&str],
table_prefix: &str,
target_partitions: usize,
output_partitioning: usize,
) -> Result<()> {
let ctx = SessionContext::new();
register_test_store(&ctx, &files.iter().map(|f| (*f, 10)).collect::<Vec<_>>());
let format = AvroFormat {};
let opt = ListingOptions::new(Arc::new(format))
.with_file_extension("")
.with_target_partitions(target_partitions);
let schema = Schema::new(vec![Field::new("a", DataType::Boolean, false)]);
let table_path = ListingTableUrl::parse(table_prefix).unwrap();
let config = ListingTableConfig::new(table_path)
.with_listing_options(opt)
.with_schema(Arc::new(schema));
let table = ListingTable::try_new(config)?;
let (file_list, _) = table.list_files_for_scan(&ctx.state(), &[], None).await?;
assert_eq!(file_list.len(), output_partitioning);
Ok(())
}
async fn assert_list_files_for_multi_paths(
files: &[&str],
table_prefix: &[&str],
target_partitions: usize,
output_partitioning: usize,
) -> Result<()> {
let ctx = SessionContext::new();
register_test_store(&ctx, &files.iter().map(|f| (*f, 10)).collect::<Vec<_>>());
let format = AvroFormat {};
let opt = ListingOptions::new(Arc::new(format))
.with_file_extension("")
.with_target_partitions(target_partitions);
let schema = Schema::new(vec![Field::new("a", DataType::Boolean, false)]);
let table_paths = table_prefix
.iter()
.map(|t| ListingTableUrl::parse(t).unwrap())
.collect();
let config = ListingTableConfig::new_with_multi_paths(table_paths)
.with_listing_options(opt)
.with_schema(Arc::new(schema));
let table = ListingTable::try_new(config)?;
let (file_list, _) = table.list_files_for_scan(&ctx.state(), &[], None).await?;
assert_eq!(file_list.len(), output_partitioning);
Ok(())
}
#[test]
fn test_statistics_cache() {
let meta = ObjectMeta {
location: Path::from("test"),
last_modified: DateTime::parse_from_rfc3339("2022-09-27T22:36:00+02:00")
.unwrap()
.into(),
size: 1024,
};
let cache = StatisticsCache::default();
assert!(cache.get(&meta).is_none());
cache.save(meta.clone(), Statistics::default());
assert!(cache.get(&meta).is_some());
let mut meta2 = meta.clone();
meta2.size = 2048;
assert!(cache.get(&meta2).is_none());
let mut meta2 = meta.clone();
meta2.last_modified = DateTime::parse_from_rfc3339("2022-09-27T22:40:00+02:00")
.unwrap()
.into();
assert!(cache.get(&meta2).is_none());
let mut meta2 = meta;
meta2.location = Path::from("test2");
assert!(cache.get(&meta2).is_none());
}
}