use std::sync::Arc;
use crate::PhysicalOptimizerRule;
use datafusion_common::config::ConfigOptions;
use datafusion_common::tree_node::{Transformed, TransformedResult, TreeNode};
use datafusion_common::{Result, Statistics};
use datafusion_execution::TaskContext;
use datafusion_physical_expr::{Distribution, LexRequirement, PhysicalSortRequirement};
use datafusion_physical_plan::projection::{
make_with_child, update_expr, ProjectionExec,
};
use datafusion_physical_plan::sorts::sort::SortExec;
use datafusion_physical_plan::sorts::sort_preserving_merge::SortPreservingMergeExec;
use datafusion_physical_plan::{
DisplayAs, DisplayFormatType, ExecutionPlan, SendableRecordBatchStream,
};
use datafusion_physical_plan::{ExecutionPlanProperties, PlanProperties};
#[derive(Debug)]
pub struct OutputRequirements {
mode: RuleMode,
}
impl OutputRequirements {
pub fn new_add_mode() -> Self {
Self {
mode: RuleMode::Add,
}
}
pub fn new_remove_mode() -> Self {
Self {
mode: RuleMode::Remove,
}
}
}
#[derive(Debug, Ord, PartialOrd, PartialEq, Eq, Hash)]
enum RuleMode {
Add,
Remove,
}
#[derive(Debug)]
pub struct OutputRequirementExec {
input: Arc<dyn ExecutionPlan>,
order_requirement: Option<LexRequirement>,
dist_requirement: Distribution,
cache: PlanProperties,
}
impl OutputRequirementExec {
pub fn new(
input: Arc<dyn ExecutionPlan>,
requirements: Option<LexRequirement>,
dist_requirement: Distribution,
) -> Self {
let cache = Self::compute_properties(&input);
Self {
input,
order_requirement: requirements,
dist_requirement,
cache,
}
}
pub fn input(&self) -> Arc<dyn ExecutionPlan> {
Arc::clone(&self.input)
}
fn compute_properties(input: &Arc<dyn ExecutionPlan>) -> PlanProperties {
PlanProperties::new(
input.equivalence_properties().clone(), input.output_partitioning().clone(), input.pipeline_behavior(), input.boundedness(), )
}
}
impl DisplayAs for OutputRequirementExec {
fn fmt_as(
&self,
_t: DisplayFormatType,
f: &mut std::fmt::Formatter,
) -> std::fmt::Result {
write!(f, "OutputRequirementExec")
}
}
impl ExecutionPlan for OutputRequirementExec {
fn name(&self) -> &'static str {
"OutputRequirementExec"
}
fn as_any(&self) -> &dyn std::any::Any {
self
}
fn properties(&self) -> &PlanProperties {
&self.cache
}
fn benefits_from_input_partitioning(&self) -> Vec<bool> {
vec![false]
}
fn required_input_distribution(&self) -> Vec<Distribution> {
vec![self.dist_requirement.clone()]
}
fn maintains_input_order(&self) -> Vec<bool> {
vec![true]
}
fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
vec![&self.input]
}
fn required_input_ordering(&self) -> Vec<Option<LexRequirement>> {
vec![self.order_requirement.clone()]
}
fn with_new_children(
self: Arc<Self>,
mut children: Vec<Arc<dyn ExecutionPlan>>,
) -> Result<Arc<dyn ExecutionPlan>> {
Ok(Arc::new(Self::new(
children.remove(0), self.order_requirement.clone(),
self.dist_requirement.clone(),
)))
}
fn execute(
&self,
_partition: usize,
_context: Arc<TaskContext>,
) -> Result<SendableRecordBatchStream> {
unreachable!();
}
fn statistics(&self) -> Result<Statistics> {
self.input.statistics()
}
fn try_swapping_with_projection(
&self,
projection: &ProjectionExec,
) -> Result<Option<Arc<dyn ExecutionPlan>>> {
if projection.expr().len() >= projection.input().schema().fields().len() {
return Ok(None);
}
let mut updated_sort_reqs = LexRequirement::new(vec![]);
if let Some(reqs) = &self.required_input_ordering()[0] {
for req in &reqs.inner {
let Some(new_expr) = update_expr(&req.expr, projection.expr(), false)?
else {
return Ok(None);
};
updated_sort_reqs.push(PhysicalSortRequirement {
expr: new_expr,
options: req.options,
});
}
}
let dist_req = match &self.required_input_distribution()[0] {
Distribution::HashPartitioned(exprs) => {
let mut updated_exprs = vec![];
for expr in exprs {
let Some(new_expr) = update_expr(expr, projection.expr(), false)?
else {
return Ok(None);
};
updated_exprs.push(new_expr);
}
Distribution::HashPartitioned(updated_exprs)
}
dist => dist.clone(),
};
make_with_child(projection, &self.input())
.map(|input| {
OutputRequirementExec::new(
input,
(!updated_sort_reqs.is_empty()).then_some(updated_sort_reqs),
dist_req,
)
})
.map(|e| Some(Arc::new(e) as _))
}
}
impl PhysicalOptimizerRule for OutputRequirements {
fn optimize(
&self,
plan: Arc<dyn ExecutionPlan>,
_config: &ConfigOptions,
) -> Result<Arc<dyn ExecutionPlan>> {
match self.mode {
RuleMode::Add => require_top_ordering(plan),
RuleMode::Remove => plan
.transform_up(|plan| {
if let Some(sort_req) =
plan.as_any().downcast_ref::<OutputRequirementExec>()
{
Ok(Transformed::yes(sort_req.input()))
} else {
Ok(Transformed::no(plan))
}
})
.data(),
}
}
fn name(&self) -> &str {
"OutputRequirements"
}
fn schema_check(&self) -> bool {
true
}
}
fn require_top_ordering(plan: Arc<dyn ExecutionPlan>) -> Result<Arc<dyn ExecutionPlan>> {
let (new_plan, is_changed) = require_top_ordering_helper(plan)?;
if is_changed {
Ok(new_plan)
} else {
Ok(Arc::new(OutputRequirementExec::new(
new_plan,
None,
Distribution::UnspecifiedDistribution,
)) as _)
}
}
fn require_top_ordering_helper(
plan: Arc<dyn ExecutionPlan>,
) -> Result<(Arc<dyn ExecutionPlan>, bool)> {
let mut children = plan.children();
if children.len() != 1 {
Ok((plan, false))
} else if let Some(sort_exec) = plan.as_any().downcast_ref::<SortExec>() {
let req_ordering = sort_exec.expr();
let req_dist = sort_exec.required_input_distribution()[0].clone();
let reqs = LexRequirement::from(req_ordering.clone());
Ok((
Arc::new(OutputRequirementExec::new(plan, Some(reqs), req_dist)) as _,
true,
))
} else if let Some(spm) = plan.as_any().downcast_ref::<SortPreservingMergeExec>() {
let reqs = LexRequirement::from(spm.expr().clone());
Ok((
Arc::new(OutputRequirementExec::new(
plan,
Some(reqs),
Distribution::SinglePartition,
)) as _,
true,
))
} else if plan.maintains_input_order()[0]
&& plan.required_input_ordering()[0].is_none()
{
let (new_child, is_changed) =
require_top_ordering_helper(Arc::clone(children.swap_remove(0)))?;
Ok((plan.with_new_children(vec![new_child])?, is_changed))
} else {
Ok((plan, false))
}
}