use std::sync::Arc;
use super::ColumnarValue;
use crate::error::{DataFusionError, Result};
use crate::physical_plan::PhysicalExpr;
use arrow::compute::kernels::sort::{SortColumn, SortOptions};
use arrow::record_batch::RecordBatch;
mod average;
#[macro_use]
mod binary;
mod case;
mod cast;
mod coercion;
mod column;
mod count;
mod in_list;
mod is_not_null;
mod is_null;
mod literal;
mod min_max;
mod negative;
mod not;
mod nullif;
mod sum;
mod try_cast;
pub use average::{avg_return_type, Avg, AvgAccumulator};
pub use binary::{binary, binary_operator_data_type, BinaryExpr};
pub use case::{case, CaseExpr};
pub use cast::{cast, cast_with_options, CastExpr};
pub use column::{col, Column};
pub use count::Count;
pub use in_list::{in_list, InListExpr};
pub use is_not_null::{is_not_null, IsNotNullExpr};
pub use is_null::{is_null, IsNullExpr};
pub use literal::{lit, Literal};
pub use min_max::{Max, Min};
pub use negative::{negative, NegativeExpr};
pub use not::{not, NotExpr};
pub use nullif::{nullif_func, SUPPORTED_NULLIF_TYPES};
pub use sum::{sum_return_type, Sum};
pub use try_cast::{try_cast, TryCastExpr};
pub fn format_state_name(name: &str, state_name: &str) -> String {
format!("{}[{}]", name, state_name)
}
#[derive(Clone, Debug)]
pub struct PhysicalSortExpr {
pub expr: Arc<dyn PhysicalExpr>,
pub options: SortOptions,
}
impl PhysicalSortExpr {
pub fn evaluate_to_sort_column(&self, batch: &RecordBatch) -> Result<SortColumn> {
let value_to_sort = self.expr.evaluate(batch)?;
let array_to_sort = match value_to_sort {
ColumnarValue::Array(array) => array,
ColumnarValue::Scalar(scalar) => {
return Err(DataFusionError::Internal(format!(
"Sort operation is not applicable to scalar value {}",
scalar
)));
}
};
Ok(SortColumn {
values: array_to_sort,
options: Some(self.options),
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{error::Result, physical_plan::AggregateExpr, scalar::ScalarValue};
#[macro_export]
macro_rules! generic_test_op {
($ARRAY:expr, $DATATYPE:expr, $OP:ident, $EXPECTED:expr, $EXPECTED_DATATYPE:expr) => {{
let schema = Schema::new(vec![Field::new("a", $DATATYPE, false)]);
let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![$ARRAY])?;
let agg =
Arc::new(<$OP>::new(col("a"), "bla".to_string(), $EXPECTED_DATATYPE));
let actual = aggregate(&batch, agg)?;
let expected = ScalarValue::from($EXPECTED);
assert_eq!(expected, actual);
Ok(())
}};
}
pub fn aggregate(
batch: &RecordBatch,
agg: Arc<dyn AggregateExpr>,
) -> Result<ScalarValue> {
let mut accum = agg.create_accumulator()?;
let expr = agg.expressions();
let values = expr
.iter()
.map(|e| e.evaluate(batch))
.map(|r| r.map(|v| v.into_array(batch.num_rows())))
.collect::<Result<Vec<_>>>()?;
accum.update_batch(&values)?;
accum.evaluate()
}
}