datafusion_physical_optimizer/
enforce_distribution.rs

1// Licensed to the Apache Software Foundation (ASF) under one
2// or more contributor license agreements.  See the NOTICE file
3// distributed with this work for additional information
4// regarding copyright ownership.  The ASF licenses this file
5// to you under the Apache License, Version 2.0 (the
6// "License"); you may not use this file except in compliance
7// with the License.  You may obtain a copy of the License at
8//
9//   http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing,
12// software distributed under the License is distributed on an
13// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14// KIND, either express or implied.  See the License for the
15// specific language governing permissions and limitations
16// under the License.
17
18//! EnforceDistribution optimizer rule inspects the physical plan with respect
19//! to distribution requirements and adds [`RepartitionExec`]s to satisfy them
20//! when necessary. If increasing parallelism is beneficial (and also desirable
21//! according to the configuration), this rule increases partition counts in
22//! the physical plan.
23
24use std::fmt::Debug;
25use std::sync::Arc;
26
27use crate::optimizer::PhysicalOptimizerRule;
28use crate::output_requirements::OutputRequirementExec;
29use crate::utils::{
30    add_sort_above_with_check, is_coalesce_partitions, is_repartition,
31    is_sort_preserving_merge,
32};
33
34use arrow::compute::SortOptions;
35use datafusion_common::config::ConfigOptions;
36use datafusion_common::error::Result;
37use datafusion_common::stats::Precision;
38use datafusion_common::tree_node::{Transformed, TransformedResult, TreeNode};
39use datafusion_expr::logical_plan::JoinType;
40use datafusion_physical_expr::expressions::{Column, NoOp};
41use datafusion_physical_expr::utils::map_columns_before_projection;
42use datafusion_physical_expr::{
43    EquivalenceProperties, PhysicalExpr, PhysicalExprRef, physical_exprs_equal,
44};
45use datafusion_physical_plan::ExecutionPlanProperties;
46use datafusion_physical_plan::aggregates::{
47    AggregateExec, AggregateMode, PhysicalGroupBy,
48};
49use datafusion_physical_plan::coalesce_partitions::CoalescePartitionsExec;
50use datafusion_physical_plan::execution_plan::EmissionType;
51use datafusion_physical_plan::joins::{
52    CrossJoinExec, HashJoinExec, PartitionMode, SortMergeJoinExec,
53};
54use datafusion_physical_plan::projection::{ProjectionExec, ProjectionExpr};
55use datafusion_physical_plan::repartition::RepartitionExec;
56use datafusion_physical_plan::sorts::sort_preserving_merge::SortPreservingMergeExec;
57use datafusion_physical_plan::tree_node::PlanContext;
58use datafusion_physical_plan::union::{InterleaveExec, UnionExec, can_interleave};
59use datafusion_physical_plan::windows::WindowAggExec;
60use datafusion_physical_plan::windows::{BoundedWindowAggExec, get_best_fitting_window};
61use datafusion_physical_plan::{Distribution, ExecutionPlan, Partitioning};
62
63use itertools::izip;
64
65/// The `EnforceDistribution` rule ensures that distribution requirements are
66/// met. In doing so, this rule will increase the parallelism in the plan by
67/// introducing repartitioning operators to the physical plan.
68///
69/// For example, given an input such as:
70///
71///
72/// ```text
73/// ┌─────────────────────────────────┐
74/// │                                 │
75/// │          ExecutionPlan          │
76/// │                                 │
77/// └─────────────────────────────────┘
78///             ▲         ▲
79///             │         │
80///       ┌─────┘         └─────┐
81///       │                     │
82///       │                     │
83///       │                     │
84/// ┌───────────┐         ┌───────────┐
85/// │           │         │           │
86/// │ batch A1  │         │ batch B1  │
87/// │           │         │           │
88/// ├───────────┤         ├───────────┤
89/// │           │         │           │
90/// │ batch A2  │         │ batch B2  │
91/// │           │         │           │
92/// ├───────────┤         ├───────────┤
93/// │           │         │           │
94/// │ batch A3  │         │ batch B3  │
95/// │           │         │           │
96/// └───────────┘         └───────────┘
97///
98///      Input                 Input
99///        A                     B
100/// ```
101///
102/// This rule will attempt to add a `RepartitionExec` to increase parallelism
103/// (to 3, in this case) and create the following arrangement:
104///
105/// ```text
106///     ┌─────────────────────────────────┐
107///     │                                 │
108///     │          ExecutionPlan          │
109///     │                                 │
110///     └─────────────────────────────────┘
111///               ▲      ▲       ▲            Input now has 3
112///               │      │       │             partitions
113///       ┌───────┘      │       └───────┐
114///       │              │               │
115///       │              │               │
116/// ┌───────────┐  ┌───────────┐   ┌───────────┐
117/// │           │  │           │   │           │
118/// │ batch A1  │  │ batch A3  │   │ batch B3  │
119/// │           │  │           │   │           │
120/// ├───────────┤  ├───────────┤   ├───────────┤
121/// │           │  │           │   │           │
122/// │ batch B2  │  │ batch B1  │   │ batch A2  │
123/// │           │  │           │   │           │
124/// └───────────┘  └───────────┘   └───────────┘
125///       ▲              ▲               ▲
126///       │              │               │
127///       └─────────┐    │    ┌──────────┘
128///                 │    │    │
129///                 │    │    │
130///     ┌─────────────────────────────────┐   batches are
131///     │       RepartitionExec(3)        │   repartitioned
132///     │           RoundRobin            │
133///     │                                 │
134///     └─────────────────────────────────┘
135///                 ▲         ▲
136///                 │         │
137///           ┌─────┘         └─────┐
138///           │                     │
139///           │                     │
140///           │                     │
141///     ┌───────────┐         ┌───────────┐
142///     │           │         │           │
143///     │ batch A1  │         │ batch B1  │
144///     │           │         │           │
145///     ├───────────┤         ├───────────┤
146///     │           │         │           │
147///     │ batch A2  │         │ batch B2  │
148///     │           │         │           │
149///     ├───────────┤         ├───────────┤
150///     │           │         │           │
151///     │ batch A3  │         │ batch B3  │
152///     │           │         │           │
153///     └───────────┘         └───────────┘
154///
155///
156///      Input                 Input
157///        A                     B
158/// ```
159///
160/// The `EnforceDistribution` rule
161/// - is idempotent; i.e. it can be applied multiple times, each time producing
162///   the same result.
163/// - always produces a valid plan in terms of distribution requirements. Its
164///   input plan can be valid or invalid with respect to distribution requirements,
165///   but the output plan will always be valid.
166/// - produces a valid plan in terms of ordering requirements, *if* its input is
167///   a valid plan in terms of ordering requirements. If the input plan is invalid,
168///   this rule does not attempt to fix it as doing so is the responsibility of the
169///   `EnforceSorting` rule.
170///
171/// Note that distribution requirements are met in the strictest way. This may
172/// result in more than strictly necessary [`RepartitionExec`]s in the plan, but
173/// meeting the requirements in the strictest way may help avoid possible data
174/// skew in joins.
175///
176/// For example for a hash join with keys (a, b, c), the required Distribution(a, b, c)
177/// can be satisfied by several alternative partitioning ways: (a, b, c), (a, b),
178/// (a, c), (b, c), (a), (b), (c) and ( ).
179///
180/// This rule only chooses the exact match and satisfies the Distribution(a, b, c)
181/// by a HashPartition(a, b, c).
182#[derive(Default, Debug)]
183pub struct EnforceDistribution {}
184
185impl EnforceDistribution {
186    #[expect(missing_docs)]
187    pub fn new() -> Self {
188        Self {}
189    }
190}
191
192impl PhysicalOptimizerRule for EnforceDistribution {
193    fn optimize(
194        &self,
195        plan: Arc<dyn ExecutionPlan>,
196        config: &ConfigOptions,
197    ) -> Result<Arc<dyn ExecutionPlan>> {
198        let top_down_join_key_reordering = config.optimizer.top_down_join_key_reordering;
199
200        let adjusted = if top_down_join_key_reordering {
201            // Run a top-down process to adjust input key ordering recursively
202            let plan_requirements = PlanWithKeyRequirements::new_default(plan);
203            let adjusted = plan_requirements
204                .transform_down(adjust_input_keys_ordering)
205                .data()?;
206            adjusted.plan
207        } else {
208            // Run a bottom-up process
209            plan.transform_up(|plan| {
210                Ok(Transformed::yes(reorder_join_keys_to_inputs(plan)?))
211            })
212            .data()?
213        };
214
215        let distribution_context = DistributionContext::new_default(adjusted);
216        // Distribution enforcement needs to be applied bottom-up.
217        let distribution_context = distribution_context
218            .transform_up(|distribution_context| {
219                ensure_distribution(distribution_context, config)
220            })
221            .data()?;
222        Ok(distribution_context.plan)
223    }
224
225    fn name(&self) -> &str {
226        "EnforceDistribution"
227    }
228
229    fn schema_check(&self) -> bool {
230        true
231    }
232}
233
234#[derive(Debug, Clone)]
235struct JoinKeyPairs {
236    left_keys: Vec<Arc<dyn PhysicalExpr>>,
237    right_keys: Vec<Arc<dyn PhysicalExpr>>,
238}
239
240/// Keeps track of parent required key orderings.
241pub type PlanWithKeyRequirements = PlanContext<Vec<Arc<dyn PhysicalExpr>>>;
242
243/// When the physical planner creates the Joins, the ordering of join keys is from the original query.
244/// That might not match with the output partitioning of the join node's children
245/// A Top-Down process will use this method to adjust children's output partitioning based on the parent key reordering requirements:
246///
247/// Example:
248///     TopJoin on (a, b, c)
249///         bottom left join on(b, a, c)
250///         bottom right join on(c, b, a)
251///
252///  Will be adjusted to:
253///     TopJoin on (a, b, c)
254///         bottom left join on(a, b, c)
255///         bottom right join on(a, b, c)
256///
257/// Example:
258///     TopJoin on (a, b, c)
259///         Agg1 group by (b, a, c)
260///         Agg2 group by (c, b, a)
261///
262/// Will be adjusted to:
263///     TopJoin on (a, b, c)
264///          Projection(b, a, c)
265///             Agg1 group by (a, b, c)
266///          Projection(c, b, a)
267///             Agg2 group by (a, b, c)
268///
269/// Following is the explanation of the reordering process:
270///
271/// 1) If the current plan is Partitioned HashJoin, SortMergeJoin, check whether the requirements can be satisfied by adjusting join keys ordering:
272///    Requirements can not be satisfied, clear the current requirements, generate new requirements(to pushdown) based on the current join keys, return the unchanged plan.
273///    Requirements is already satisfied, clear the current requirements, generate new requirements(to pushdown) based on the current join keys, return the unchanged plan.
274///    Requirements can be satisfied by adjusting keys ordering, clear the current requirements, generate new requirements(to pushdown) based on the adjusted join keys, return the changed plan.
275///
276/// 2) If the current plan is Aggregation, check whether the requirements can be satisfied by adjusting group by keys ordering:
277///    Requirements can not be satisfied, clear all the requirements, return the unchanged plan.
278///    Requirements is already satisfied, clear all the requirements, return the unchanged plan.
279///    Requirements can be satisfied by adjusting keys ordering, clear all the requirements, return the changed plan.
280///
281/// 3) If the current plan is RepartitionExec, CoalescePartitionsExec or WindowAggExec, clear all the requirements, return the unchanged plan
282/// 4) If the current plan is Projection, transform the requirements to the columns before the Projection and push down requirements
283/// 5) For other types of operators, by default, pushdown the parent requirements to children.
284pub fn adjust_input_keys_ordering(
285    mut requirements: PlanWithKeyRequirements,
286) -> Result<Transformed<PlanWithKeyRequirements>> {
287    let plan = Arc::clone(&requirements.plan);
288
289    if let Some(HashJoinExec {
290        left,
291        right,
292        on,
293        filter,
294        join_type,
295        projection,
296        mode,
297        null_equality,
298        ..
299    }) = plan.as_any().downcast_ref::<HashJoinExec>()
300    {
301        match mode {
302            PartitionMode::Partitioned => {
303                let join_constructor = |new_conditions: (
304                    Vec<(PhysicalExprRef, PhysicalExprRef)>,
305                    Vec<SortOptions>,
306                )| {
307                    HashJoinExec::try_new(
308                        Arc::clone(left),
309                        Arc::clone(right),
310                        new_conditions.0,
311                        filter.clone(),
312                        join_type,
313                        // TODO: although projection is not used in the join here, because projection pushdown is after enforce_distribution. Maybe we need to handle it later. Same as filter.
314                        projection.clone(),
315                        PartitionMode::Partitioned,
316                        *null_equality,
317                    )
318                    .map(|e| Arc::new(e) as _)
319                };
320                return reorder_partitioned_join_keys(
321                    requirements,
322                    on,
323                    &[],
324                    &join_constructor,
325                )
326                .map(Transformed::yes);
327            }
328            PartitionMode::CollectLeft => {
329                // Push down requirements to the right side
330                requirements.children[1].data = match join_type {
331                    JoinType::Inner | JoinType::Right => shift_right_required(
332                        &requirements.data,
333                        left.schema().fields().len(),
334                    )
335                    .unwrap_or_default(),
336                    JoinType::RightSemi | JoinType::RightAnti | JoinType::RightMark => {
337                        requirements.data.clone()
338                    }
339                    JoinType::Left
340                    | JoinType::LeftSemi
341                    | JoinType::LeftAnti
342                    | JoinType::Full
343                    | JoinType::LeftMark => vec![],
344                };
345            }
346            PartitionMode::Auto => {
347                // Can not satisfy, clear the current requirements and generate new empty requirements
348                requirements.data.clear();
349            }
350        }
351    } else if let Some(CrossJoinExec { left, .. }) =
352        plan.as_any().downcast_ref::<CrossJoinExec>()
353    {
354        let left_columns_len = left.schema().fields().len();
355        // Push down requirements to the right side
356        requirements.children[1].data =
357            shift_right_required(&requirements.data, left_columns_len)
358                .unwrap_or_default();
359    } else if let Some(SortMergeJoinExec {
360        left,
361        right,
362        on,
363        filter,
364        join_type,
365        sort_options,
366        null_equality,
367        ..
368    }) = plan.as_any().downcast_ref::<SortMergeJoinExec>()
369    {
370        let join_constructor = |new_conditions: (
371            Vec<(PhysicalExprRef, PhysicalExprRef)>,
372            Vec<SortOptions>,
373        )| {
374            SortMergeJoinExec::try_new(
375                Arc::clone(left),
376                Arc::clone(right),
377                new_conditions.0,
378                filter.clone(),
379                *join_type,
380                new_conditions.1,
381                *null_equality,
382            )
383            .map(|e| Arc::new(e) as _)
384        };
385        return reorder_partitioned_join_keys(
386            requirements,
387            on,
388            sort_options,
389            &join_constructor,
390        )
391        .map(Transformed::yes);
392    } else if let Some(aggregate_exec) = plan.as_any().downcast_ref::<AggregateExec>() {
393        if !requirements.data.is_empty() {
394            if aggregate_exec.mode() == &AggregateMode::FinalPartitioned {
395                return reorder_aggregate_keys(requirements, aggregate_exec)
396                    .map(Transformed::yes);
397            } else {
398                requirements.data.clear();
399            }
400        } else {
401            // Keep everything unchanged
402            return Ok(Transformed::no(requirements));
403        }
404    } else if let Some(proj) = plan.as_any().downcast_ref::<ProjectionExec>() {
405        let expr = proj.expr();
406        // For Projection, we need to transform the requirements to the columns before the Projection
407        // And then to push down the requirements
408        // Construct a mapping from new name to the original Column
409        let proj_exprs: Vec<_> = expr
410            .iter()
411            .map(|p| (Arc::clone(&p.expr), p.alias.clone()))
412            .collect();
413        let new_required = map_columns_before_projection(&requirements.data, &proj_exprs);
414        if new_required.len() == requirements.data.len() {
415            requirements.children[0].data = new_required;
416        } else {
417            // Can not satisfy, clear the current requirements and generate new empty requirements
418            requirements.data.clear();
419        }
420    } else if plan.as_any().downcast_ref::<RepartitionExec>().is_some()
421        || plan
422            .as_any()
423            .downcast_ref::<CoalescePartitionsExec>()
424            .is_some()
425        || plan.as_any().downcast_ref::<WindowAggExec>().is_some()
426    {
427        requirements.data.clear();
428    } else {
429        // By default, push down the parent requirements to children
430        for child in requirements.children.iter_mut() {
431            child.data.clone_from(&requirements.data);
432        }
433    }
434    Ok(Transformed::yes(requirements))
435}
436
437pub fn reorder_partitioned_join_keys<F>(
438    mut join_plan: PlanWithKeyRequirements,
439    on: &[(PhysicalExprRef, PhysicalExprRef)],
440    sort_options: &[SortOptions],
441    join_constructor: &F,
442) -> Result<PlanWithKeyRequirements>
443where
444    F: Fn(
445        (Vec<(PhysicalExprRef, PhysicalExprRef)>, Vec<SortOptions>),
446    ) -> Result<Arc<dyn ExecutionPlan>>,
447{
448    let parent_required = &join_plan.data;
449    let join_key_pairs = extract_join_keys(on);
450    let eq_properties = join_plan.plan.equivalence_properties();
451
452    let (
453        JoinKeyPairs {
454            left_keys,
455            right_keys,
456        },
457        positions,
458    ) = try_reorder(join_key_pairs, parent_required, eq_properties);
459
460    if let Some(positions) = positions
461        && !positions.is_empty()
462    {
463        let new_join_on = new_join_conditions(&left_keys, &right_keys);
464        let new_sort_options = (0..sort_options.len())
465            .map(|idx| sort_options[positions[idx]])
466            .collect();
467        join_plan.plan = join_constructor((new_join_on, new_sort_options))?;
468    }
469
470    join_plan.children[0].data = left_keys;
471    join_plan.children[1].data = right_keys;
472    Ok(join_plan)
473}
474
475pub fn reorder_aggregate_keys(
476    mut agg_node: PlanWithKeyRequirements,
477    agg_exec: &AggregateExec,
478) -> Result<PlanWithKeyRequirements> {
479    let parent_required = &agg_node.data;
480    let output_columns = agg_exec
481        .group_expr()
482        .expr()
483        .iter()
484        .enumerate()
485        .map(|(index, (_, name))| Column::new(name, index))
486        .collect::<Vec<_>>();
487
488    let output_exprs = output_columns
489        .iter()
490        .map(|c| Arc::new(c.clone()) as _)
491        .collect::<Vec<_>>();
492
493    if parent_required.len() == output_exprs.len()
494        && agg_exec.group_expr().null_expr().is_empty()
495        && !physical_exprs_equal(&output_exprs, parent_required)
496        && let Some(positions) = expected_expr_positions(&output_exprs, parent_required)
497        && let Some(agg_exec) = agg_exec.input().as_any().downcast_ref::<AggregateExec>()
498        && matches!(agg_exec.mode(), &AggregateMode::Partial)
499    {
500        let group_exprs = agg_exec.group_expr().expr();
501        let new_group_exprs = positions
502            .into_iter()
503            .map(|idx| group_exprs[idx].clone())
504            .collect();
505        let partial_agg = Arc::new(AggregateExec::try_new(
506            AggregateMode::Partial,
507            PhysicalGroupBy::new_single(new_group_exprs),
508            agg_exec.aggr_expr().to_vec(),
509            agg_exec.filter_expr().to_vec(),
510            Arc::clone(agg_exec.input()),
511            Arc::clone(&agg_exec.input_schema),
512        )?);
513        // Build new group expressions that correspond to the output
514        // of the "reordered" aggregator:
515        let group_exprs = partial_agg.group_expr().expr();
516        let new_group_by = PhysicalGroupBy::new_single(
517            partial_agg
518                .output_group_expr()
519                .into_iter()
520                .enumerate()
521                .map(|(idx, expr)| (expr, group_exprs[idx].1.clone()))
522                .collect(),
523        );
524        let new_final_agg = Arc::new(AggregateExec::try_new(
525            AggregateMode::FinalPartitioned,
526            new_group_by,
527            agg_exec.aggr_expr().to_vec(),
528            agg_exec.filter_expr().to_vec(),
529            Arc::clone(&partial_agg) as _,
530            agg_exec.input_schema(),
531        )?);
532
533        agg_node.plan = Arc::clone(&new_final_agg) as _;
534        agg_node.data.clear();
535        agg_node.children = vec![PlanWithKeyRequirements::new(
536            partial_agg as _,
537            vec![],
538            agg_node.children.swap_remove(0).children,
539        )];
540
541        // Need to create a new projection to change the expr ordering back
542        let agg_schema = new_final_agg.schema();
543        let mut proj_exprs = output_columns
544            .iter()
545            .map(|col| {
546                let name = col.name();
547                let index = agg_schema.index_of(name)?;
548                Ok(ProjectionExpr {
549                    expr: Arc::new(Column::new(name, index)) as _,
550                    alias: name.to_owned(),
551                })
552            })
553            .collect::<Result<Vec<_>>>()?;
554        let agg_fields = agg_schema.fields();
555        for (idx, field) in agg_fields.iter().enumerate().skip(output_columns.len()) {
556            let name = field.name();
557            let plan = Arc::new(Column::new(name, idx)) as _;
558            proj_exprs.push(ProjectionExpr {
559                expr: plan,
560                alias: name.clone(),
561            })
562        }
563        return ProjectionExec::try_new(proj_exprs, new_final_agg)
564            .map(|p| PlanWithKeyRequirements::new(Arc::new(p), vec![], vec![agg_node]));
565    }
566    Ok(agg_node)
567}
568
569fn shift_right_required(
570    parent_required: &[Arc<dyn PhysicalExpr>],
571    left_columns_len: usize,
572) -> Option<Vec<Arc<dyn PhysicalExpr>>> {
573    let new_right_required = parent_required
574        .iter()
575        .filter_map(|r| {
576            r.as_any().downcast_ref::<Column>().and_then(|col| {
577                col.index()
578                    .checked_sub(left_columns_len)
579                    .map(|index| Arc::new(Column::new(col.name(), index)) as _)
580            })
581        })
582        .collect::<Vec<_>>();
583
584    // if the parent required are all coming from the right side, the requirements can be pushdown
585    (new_right_required.len() == parent_required.len()).then_some(new_right_required)
586}
587
588/// When the physical planner creates the Joins, the ordering of join keys is from the original query.
589/// That might not match with the output partitioning of the join node's children
590/// This method will try to change the ordering of the join keys to match with the
591/// partitioning of the join nodes' children. If it can not match with both sides, it will try to
592/// match with one, either the left side or the right side.
593///
594/// Example:
595///     TopJoin on (a, b, c)
596///         bottom left join on(b, a, c)
597///         bottom right join on(c, b, a)
598///
599///  Will be adjusted to:
600///     TopJoin on (b, a, c)
601///         bottom left join on(b, a, c)
602///         bottom right join on(c, b, a)
603///
604/// Compared to the Top-Down reordering process, this Bottom-Up approach is much simpler, but might not reach a best result.
605/// The Bottom-Up approach will be useful in future if we plan to support storage partition-wised Joins.
606/// In that case, the datasources/tables might be pre-partitioned and we can't adjust the key ordering of the datasources
607/// and then can't apply the Top-Down reordering process.
608pub fn reorder_join_keys_to_inputs(
609    plan: Arc<dyn ExecutionPlan>,
610) -> Result<Arc<dyn ExecutionPlan>> {
611    let plan_any = plan.as_any();
612    if let Some(HashJoinExec {
613        left,
614        right,
615        on,
616        filter,
617        join_type,
618        projection,
619        mode,
620        null_equality,
621        ..
622    }) = plan_any.downcast_ref::<HashJoinExec>()
623    {
624        if matches!(mode, PartitionMode::Partitioned) {
625            let (join_keys, positions) = reorder_current_join_keys(
626                extract_join_keys(on),
627                Some(left.output_partitioning()),
628                Some(right.output_partitioning()),
629                left.equivalence_properties(),
630                right.equivalence_properties(),
631            );
632            if positions.is_some_and(|idxs| !idxs.is_empty()) {
633                let JoinKeyPairs {
634                    left_keys,
635                    right_keys,
636                } = join_keys;
637                let new_join_on = new_join_conditions(&left_keys, &right_keys);
638                return Ok(Arc::new(HashJoinExec::try_new(
639                    Arc::clone(left),
640                    Arc::clone(right),
641                    new_join_on,
642                    filter.clone(),
643                    join_type,
644                    projection.clone(),
645                    PartitionMode::Partitioned,
646                    *null_equality,
647                )?));
648            }
649        }
650    } else if let Some(SortMergeJoinExec {
651        left,
652        right,
653        on,
654        filter,
655        join_type,
656        sort_options,
657        null_equality,
658        ..
659    }) = plan_any.downcast_ref::<SortMergeJoinExec>()
660    {
661        let (join_keys, positions) = reorder_current_join_keys(
662            extract_join_keys(on),
663            Some(left.output_partitioning()),
664            Some(right.output_partitioning()),
665            left.equivalence_properties(),
666            right.equivalence_properties(),
667        );
668        if let Some(positions) = positions
669            && !positions.is_empty()
670        {
671            let JoinKeyPairs {
672                left_keys,
673                right_keys,
674            } = join_keys;
675            let new_join_on = new_join_conditions(&left_keys, &right_keys);
676            let new_sort_options = (0..sort_options.len())
677                .map(|idx| sort_options[positions[idx]])
678                .collect();
679            return SortMergeJoinExec::try_new(
680                Arc::clone(left),
681                Arc::clone(right),
682                new_join_on,
683                filter.clone(),
684                *join_type,
685                new_sort_options,
686                *null_equality,
687            )
688            .map(|smj| Arc::new(smj) as _);
689        }
690    }
691    Ok(plan)
692}
693
694/// Reorder the current join keys ordering based on either left partition or right partition
695fn reorder_current_join_keys(
696    join_keys: JoinKeyPairs,
697    left_partition: Option<&Partitioning>,
698    right_partition: Option<&Partitioning>,
699    left_equivalence_properties: &EquivalenceProperties,
700    right_equivalence_properties: &EquivalenceProperties,
701) -> (JoinKeyPairs, Option<Vec<usize>>) {
702    match (left_partition, right_partition) {
703        (Some(Partitioning::Hash(left_exprs, _)), _) => {
704            match try_reorder(join_keys, left_exprs, left_equivalence_properties) {
705                (join_keys, None) => reorder_current_join_keys(
706                    join_keys,
707                    None,
708                    right_partition,
709                    left_equivalence_properties,
710                    right_equivalence_properties,
711                ),
712                result => result,
713            }
714        }
715        (_, Some(Partitioning::Hash(right_exprs, _))) => {
716            try_reorder(join_keys, right_exprs, right_equivalence_properties)
717        }
718        _ => (join_keys, None),
719    }
720}
721
722fn try_reorder(
723    join_keys: JoinKeyPairs,
724    expected: &[Arc<dyn PhysicalExpr>],
725    equivalence_properties: &EquivalenceProperties,
726) -> (JoinKeyPairs, Option<Vec<usize>>) {
727    let eq_groups = equivalence_properties.eq_group();
728    let mut normalized_expected = vec![];
729    let mut normalized_left_keys = vec![];
730    let mut normalized_right_keys = vec![];
731    if join_keys.left_keys.len() != expected.len() {
732        return (join_keys, None);
733    }
734    if physical_exprs_equal(expected, &join_keys.left_keys)
735        || physical_exprs_equal(expected, &join_keys.right_keys)
736    {
737        return (join_keys, Some(vec![]));
738    } else if !equivalence_properties.eq_group().is_empty() {
739        normalized_expected = expected
740            .iter()
741            .map(|e| eq_groups.normalize_expr(Arc::clone(e)))
742            .collect();
743
744        normalized_left_keys = join_keys
745            .left_keys
746            .iter()
747            .map(|e| eq_groups.normalize_expr(Arc::clone(e)))
748            .collect();
749
750        normalized_right_keys = join_keys
751            .right_keys
752            .iter()
753            .map(|e| eq_groups.normalize_expr(Arc::clone(e)))
754            .collect();
755
756        if physical_exprs_equal(&normalized_expected, &normalized_left_keys)
757            || physical_exprs_equal(&normalized_expected, &normalized_right_keys)
758        {
759            return (join_keys, Some(vec![]));
760        }
761    }
762
763    let Some(positions) = expected_expr_positions(&join_keys.left_keys, expected)
764        .or_else(|| expected_expr_positions(&join_keys.right_keys, expected))
765        .or_else(|| expected_expr_positions(&normalized_left_keys, &normalized_expected))
766        .or_else(|| {
767            expected_expr_positions(&normalized_right_keys, &normalized_expected)
768        })
769    else {
770        return (join_keys, None);
771    };
772
773    let mut new_left_keys = vec![];
774    let mut new_right_keys = vec![];
775    for pos in positions.iter() {
776        new_left_keys.push(Arc::clone(&join_keys.left_keys[*pos]));
777        new_right_keys.push(Arc::clone(&join_keys.right_keys[*pos]));
778    }
779    let pairs = JoinKeyPairs {
780        left_keys: new_left_keys,
781        right_keys: new_right_keys,
782    };
783
784    (pairs, Some(positions))
785}
786
787/// Return the expected expressions positions.
788/// For example, the current expressions are ['c', 'a', 'a', b'], the expected expressions are ['b', 'c', 'a', 'a'],
789///
790/// This method will return a Vec [3, 0, 1, 2]
791fn expected_expr_positions(
792    current: &[Arc<dyn PhysicalExpr>],
793    expected: &[Arc<dyn PhysicalExpr>],
794) -> Option<Vec<usize>> {
795    if current.is_empty() || expected.is_empty() {
796        return None;
797    }
798    let mut indexes: Vec<usize> = vec![];
799    let mut current = current.to_vec();
800    for expr in expected.iter() {
801        // Find the position of the expected expr in the current expressions
802        if let Some(expected_position) = current.iter().position(|e| e.eq(expr)) {
803            current[expected_position] = Arc::new(NoOp::new());
804            indexes.push(expected_position);
805        } else {
806            return None;
807        }
808    }
809    Some(indexes)
810}
811
812fn extract_join_keys(on: &[(PhysicalExprRef, PhysicalExprRef)]) -> JoinKeyPairs {
813    let (left_keys, right_keys) = on
814        .iter()
815        .map(|(l, r)| (Arc::clone(l) as _, Arc::clone(r) as _))
816        .unzip();
817    JoinKeyPairs {
818        left_keys,
819        right_keys,
820    }
821}
822
823fn new_join_conditions(
824    new_left_keys: &[Arc<dyn PhysicalExpr>],
825    new_right_keys: &[Arc<dyn PhysicalExpr>],
826) -> Vec<(PhysicalExprRef, PhysicalExprRef)> {
827    new_left_keys
828        .iter()
829        .zip(new_right_keys.iter())
830        .map(|(l_key, r_key)| (Arc::clone(l_key), Arc::clone(r_key)))
831        .collect()
832}
833
834/// Adds RoundRobin repartition operator to the plan increase parallelism.
835///
836/// # Arguments
837///
838/// * `input`: Current node.
839/// * `n_target`: desired target partition number, if partition number of the
840///   current executor is less than this value. Partition number will be increased.
841///
842/// # Returns
843///
844/// A [`Result`] object that contains new execution plan where the desired
845/// partition number is achieved by adding a RoundRobin repartition.
846fn add_roundrobin_on_top(
847    input: DistributionContext,
848    n_target: usize,
849) -> Result<DistributionContext> {
850    // Adding repartition is helpful:
851    if input.plan.output_partitioning().partition_count() < n_target {
852        // When there is an existing ordering, we preserve ordering
853        // during repartition. This will be un-done in the future
854        // If any of the following conditions is true
855        // - Preserving ordering is not helpful in terms of satisfying ordering requirements
856        // - Usage of order preserving variants is not desirable
857        // (determined by flag `config.optimizer.prefer_existing_sort`)
858        let partitioning = Partitioning::RoundRobinBatch(n_target);
859        let repartition =
860            RepartitionExec::try_new(Arc::clone(&input.plan), partitioning)?
861                .with_preserve_order();
862
863        let new_plan = Arc::new(repartition) as _;
864
865        Ok(DistributionContext::new(new_plan, true, vec![input]))
866    } else {
867        // Partition is not helpful, we already have desired number of partitions.
868        Ok(input)
869    }
870}
871
872/// Adds a hash repartition operator:
873/// - to increase parallelism, and/or
874/// - to satisfy requirements of the subsequent operators.
875///
876/// Repartition(Hash) is added on top of operator `input`.
877///
878/// # Arguments
879///
880/// * `input`: Current node.
881/// * `hash_exprs`: Stores Physical Exprs that are used during hashing.
882/// * `n_target`: desired target partition number, if partition number of the
883///   current executor is less than this value. Partition number will be increased.
884/// * `allow_subset_satisfy_partitioning`: Whether to allow subset partitioning logic in satisfaction checks.
885///   Set to `false` for partitioned hash joins to ensure exact hash matching.
886///
887/// # Returns
888///
889/// A [`Result`] object that contains new execution plan where the desired
890/// distribution is satisfied by adding a Hash repartition.
891fn add_hash_on_top(
892    input: DistributionContext,
893    hash_exprs: Vec<Arc<dyn PhysicalExpr>>,
894    n_target: usize,
895    allow_subset_satisfy_partitioning: bool,
896) -> Result<DistributionContext> {
897    // Early return if hash repartition is unnecessary
898    // `RepartitionExec: partitioning=Hash([...], 1), input_partitions=1` is unnecessary.
899    if n_target == 1 && input.plan.output_partitioning().partition_count() == 1 {
900        return Ok(input);
901    }
902
903    let dist = Distribution::HashPartitioned(hash_exprs);
904    let satisfaction = input.plan.output_partitioning().satisfaction(
905        &dist,
906        input.plan.equivalence_properties(),
907        allow_subset_satisfy_partitioning,
908    );
909
910    // Add hash repartitioning when:
911    // - When subset satisfaction is enabled (current >= threshold): only repartition if not satisfied
912    // - When below threshold (current < threshold): repartition if expressions don't match OR to increase parallelism
913    let needs_repartition = if allow_subset_satisfy_partitioning {
914        !satisfaction.is_satisfied()
915    } else {
916        !satisfaction.is_satisfied()
917            || n_target > input.plan.output_partitioning().partition_count()
918    };
919
920    if needs_repartition {
921        // When there is an existing ordering, we preserve ordering during
922        // repartition. This will be rolled back in the future if any of the
923        // following conditions is true:
924        // - Preserving ordering is not helpful in terms of satisfying ordering
925        //   requirements.
926        // - Usage of order preserving variants is not desirable (per the flag
927        //   `config.optimizer.prefer_existing_sort`).
928        let partitioning = dist.create_partitioning(n_target);
929        let repartition =
930            RepartitionExec::try_new(Arc::clone(&input.plan), partitioning)?
931                .with_preserve_order();
932        let plan = Arc::new(repartition) as _;
933
934        return Ok(DistributionContext::new(plan, true, vec![input]));
935    }
936
937    Ok(input)
938}
939
940/// Adds a [`SortPreservingMergeExec`] or a [`CoalescePartitionsExec`] operator
941/// on top of the given plan node to satisfy a single partition requirement
942/// while preserving ordering constraints.
943///
944/// # Parameters
945///
946/// * `input`: Current node.
947///
948/// # Returns
949///
950/// Updated node with an execution plan, where the desired single distribution
951/// requirement is satisfied.
952fn add_merge_on_top(input: DistributionContext) -> DistributionContext {
953    // Apply only when the partition count is larger than one.
954    if input.plan.output_partitioning().partition_count() > 1 {
955        // When there is an existing ordering, we preserve ordering
956        // when decreasing partitions. This will be un-done in the future
957        // if any of the following conditions is true
958        // - Preserving ordering is not helpful in terms of satisfying ordering requirements
959        // - Usage of order preserving variants is not desirable
960        // (determined by flag `config.optimizer.prefer_existing_sort`)
961        let new_plan = if let Some(req) = input.plan.output_ordering() {
962            Arc::new(SortPreservingMergeExec::new(
963                req.clone(),
964                Arc::clone(&input.plan),
965            )) as _
966        } else {
967            // If there is no input order, we can simply coalesce partitions:
968            Arc::new(CoalescePartitionsExec::new(Arc::clone(&input.plan))) as _
969        };
970
971        DistributionContext::new(new_plan, true, vec![input])
972    } else {
973        input
974    }
975}
976
977/// Updates the physical plan inside [`DistributionContext`] so that distribution
978/// changing operators are removed from the top. If they are necessary, they will
979/// be added in subsequent stages.
980///
981/// Assume that following plan is given:
982/// ```text
983/// "RepartitionExec: partitioning=RoundRobinBatch(10), input_partitions=10",
984/// "  RepartitionExec: partitioning=RoundRobinBatch(10), input_partitions=2",
985/// "    DataSourceExec: file_groups={2 groups: \[\[x], \[y]]}, projection=\[a, b, c, d, e], output_ordering=\[a@0 ASC], file_type=parquet",
986/// ```
987///
988/// Since `RepartitionExec`s change the distribution, this function removes
989/// them and returns following plan:
990///
991/// ```text
992/// "DataSourceExec: file_groups={2 groups: \[\[x], \[y]]}, projection=\[a, b, c, d, e], output_ordering=\[a@0 ASC], file_type=parquet",
993/// ```
994fn remove_dist_changing_operators(
995    mut distribution_context: DistributionContext,
996) -> Result<DistributionContext> {
997    while is_repartition(&distribution_context.plan)
998        || is_coalesce_partitions(&distribution_context.plan)
999        || is_sort_preserving_merge(&distribution_context.plan)
1000    {
1001        // All of above operators have a single child. First child is only child.
1002        // Remove any distribution changing operators at the beginning:
1003        distribution_context = distribution_context.children.swap_remove(0);
1004        // Note that they will be re-inserted later on if necessary or helpful.
1005    }
1006
1007    Ok(distribution_context)
1008}
1009
1010/// Updates the [`DistributionContext`] if preserving ordering while changing partitioning is not helpful or desirable.
1011///
1012/// Assume that following plan is given:
1013/// ```text
1014/// "SortPreservingMergeExec: \[a@0 ASC]"
1015/// "  RepartitionExec: partitioning=RoundRobinBatch(10), input_partitions=10, preserve_order=true",
1016/// "    RepartitionExec: partitioning=RoundRobinBatch(10), input_partitions=2, preserve_order=true",
1017/// "      DataSourceExec: file_groups={2 groups: \[\[x], \[y]]}, projection=\[a, b, c, d, e], output_ordering=\[a@0 ASC], file_type=parquet",
1018/// ```
1019///
1020/// This function converts plan above to the following:
1021///
1022/// ```text
1023/// "CoalescePartitionsExec"
1024/// "  RepartitionExec: partitioning=RoundRobinBatch(10), input_partitions=10",
1025/// "    RepartitionExec: partitioning=RoundRobinBatch(10), input_partitions=2",
1026/// "      DataSourceExec: file_groups={2 groups: \[\[x], \[y]]}, projection=\[a, b, c, d, e], output_ordering=\[a@0 ASC], file_type=parquet",
1027/// ```
1028pub fn replace_order_preserving_variants(
1029    mut context: DistributionContext,
1030) -> Result<DistributionContext> {
1031    context.children = context
1032        .children
1033        .into_iter()
1034        .map(|child| {
1035            if child.data {
1036                replace_order_preserving_variants(child)
1037            } else {
1038                Ok(child)
1039            }
1040        })
1041        .collect::<Result<Vec<_>>>()?;
1042
1043    if is_sort_preserving_merge(&context.plan) {
1044        let child_plan = Arc::clone(&context.children[0].plan);
1045        context.plan = Arc::new(
1046            CoalescePartitionsExec::new(child_plan).with_fetch(context.plan.fetch()),
1047        );
1048        return Ok(context);
1049    } else if let Some(repartition) =
1050        context.plan.as_any().downcast_ref::<RepartitionExec>()
1051        && repartition.preserve_order()
1052    {
1053        context.plan = Arc::new(RepartitionExec::try_new(
1054            Arc::clone(&context.children[0].plan),
1055            repartition.partitioning().clone(),
1056        )?);
1057        return Ok(context);
1058    }
1059
1060    context.update_plan_from_children()
1061}
1062
1063/// A struct to keep track of repartition requirements for each child node.
1064struct RepartitionRequirementStatus {
1065    /// The distribution requirement for the node.
1066    requirement: Distribution,
1067    /// Designates whether round robin partitioning is theoretically beneficial;
1068    /// i.e. the operator can actually utilize parallelism.
1069    roundrobin_beneficial: bool,
1070    /// Designates whether round robin partitioning is beneficial according to
1071    /// the statistical information we have on the number of rows.
1072    roundrobin_beneficial_stats: bool,
1073    /// Designates whether hash partitioning is necessary.
1074    hash_necessary: bool,
1075}
1076
1077/// Calculates the `RepartitionRequirementStatus` for each children to generate
1078/// consistent and sensible (in terms of performance) distribution requirements.
1079/// As an example, a hash join's left (build) child might produce
1080///
1081/// ```text
1082/// RepartitionRequirementStatus {
1083///     ..,
1084///     hash_necessary: true
1085/// }
1086/// ```
1087///
1088/// while its right (probe) child might have very few rows and produce:
1089///
1090/// ```text
1091/// RepartitionRequirementStatus {
1092///     ..,
1093///     hash_necessary: false
1094/// }
1095/// ```
1096///
1097/// These statuses are not consistent as all children should agree on hash
1098/// partitioning. This function aligns the statuses to generate consistent
1099/// hash partitions for each children. After alignment, the right child's
1100/// status would turn into:
1101///
1102/// ```text
1103/// RepartitionRequirementStatus {
1104///     ..,
1105///     hash_necessary: true
1106/// }
1107/// ```
1108fn get_repartition_requirement_status(
1109    plan: &Arc<dyn ExecutionPlan>,
1110    batch_size: usize,
1111    should_use_estimates: bool,
1112) -> Result<Vec<RepartitionRequirementStatus>> {
1113    let mut needs_alignment = false;
1114    let children = plan.children();
1115    let rr_beneficial = plan.benefits_from_input_partitioning();
1116    let requirements = plan.required_input_distribution();
1117    let mut repartition_status_flags = vec![];
1118    for (child, requirement, roundrobin_beneficial) in
1119        izip!(children.into_iter(), requirements, rr_beneficial)
1120    {
1121        // Decide whether adding a round robin is beneficial depending on
1122        // the statistical information we have on the number of rows:
1123        let roundrobin_beneficial_stats = match child.partition_statistics(None)?.num_rows
1124        {
1125            Precision::Exact(n_rows) => n_rows > batch_size,
1126            Precision::Inexact(n_rows) => !should_use_estimates || (n_rows > batch_size),
1127            Precision::Absent => true,
1128        };
1129        let is_hash = matches!(requirement, Distribution::HashPartitioned(_));
1130        // Hash re-partitioning is necessary when the input has more than one
1131        // partitions:
1132        let multi_partitions = child.output_partitioning().partition_count() > 1;
1133        let roundrobin_sensible = roundrobin_beneficial && roundrobin_beneficial_stats;
1134        needs_alignment |= is_hash && (multi_partitions || roundrobin_sensible);
1135        repartition_status_flags.push((
1136            is_hash,
1137            RepartitionRequirementStatus {
1138                requirement,
1139                roundrobin_beneficial,
1140                roundrobin_beneficial_stats,
1141                hash_necessary: is_hash && multi_partitions,
1142            },
1143        ));
1144    }
1145    // Align hash necessary flags for hash partitions to generate consistent
1146    // hash partitions at each children:
1147    if needs_alignment {
1148        // When there is at least one hash requirement that is necessary or
1149        // beneficial according to statistics, make all children require hash
1150        // repartitioning:
1151        for (is_hash, status) in &mut repartition_status_flags {
1152            if *is_hash {
1153                status.hash_necessary = true;
1154            }
1155        }
1156    }
1157    Ok(repartition_status_flags
1158        .into_iter()
1159        .map(|(_, status)| status)
1160        .collect())
1161}
1162
1163/// This function checks whether we need to add additional data exchange
1164/// operators to satisfy distribution requirements. Since this function
1165/// takes care of such requirements, we should avoid manually adding data
1166/// exchange operators in other places.
1167///
1168/// This function is intended to be used in a bottom up traversal, as it
1169/// can first repartition (or newly partition) at the datasources -- these
1170/// source partitions may be later repartitioned with additional data exchange operators.
1171pub fn ensure_distribution(
1172    dist_context: DistributionContext,
1173    config: &ConfigOptions,
1174) -> Result<Transformed<DistributionContext>> {
1175    let dist_context = update_children(dist_context)?;
1176
1177    if dist_context.plan.children().is_empty() {
1178        return Ok(Transformed::no(dist_context));
1179    }
1180
1181    let target_partitions = config.execution.target_partitions;
1182    // When `false`, round robin repartition will not be added to increase parallelism
1183    let enable_round_robin = config.optimizer.enable_round_robin_repartition;
1184    let repartition_file_scans = config.optimizer.repartition_file_scans;
1185    let batch_size = config.execution.batch_size;
1186    let should_use_estimates = config
1187        .execution
1188        .use_row_number_estimates_to_optimize_partitioning;
1189    let subset_satisfaction_threshold = config.optimizer.subset_repartition_threshold;
1190    let unbounded_and_pipeline_friendly = dist_context.plan.boundedness().is_unbounded()
1191        && matches!(
1192            dist_context.plan.pipeline_behavior(),
1193            EmissionType::Incremental | EmissionType::Both
1194        );
1195    // Use order preserving variants either of the conditions true
1196    // - it is desired according to config
1197    // - when plan is unbounded
1198    // - when it is pipeline friendly (can incrementally produce results)
1199    let order_preserving_variants_desirable =
1200        unbounded_and_pipeline_friendly || config.optimizer.prefer_existing_sort;
1201
1202    // Remove unnecessary repartition from the physical plan if any
1203    let DistributionContext {
1204        mut plan,
1205        data,
1206        children,
1207    } = remove_dist_changing_operators(dist_context)?;
1208
1209    if let Some(exec) = plan.as_any().downcast_ref::<WindowAggExec>() {
1210        if let Some(updated_window) = get_best_fitting_window(
1211            exec.window_expr(),
1212            exec.input(),
1213            &exec.partition_keys(),
1214        )? {
1215            plan = updated_window;
1216        }
1217    } else if let Some(exec) = plan.as_any().downcast_ref::<BoundedWindowAggExec>()
1218        && let Some(updated_window) = get_best_fitting_window(
1219            exec.window_expr(),
1220            exec.input(),
1221            &exec.partition_keys(),
1222        )?
1223    {
1224        plan = updated_window;
1225    };
1226
1227    // For joins in partitioned mode, we need exact hash matching between
1228    // both sides, so subset partitioning logic must be disabled.
1229    //
1230    // Why: Different hash expressions produce different hash values, causing
1231    // rows with the same join key to land in different partitions. Since
1232    // partitioned joins match partition N left with partition N right, rows
1233    // that should match may be in different partitions and miss each other.
1234    //
1235    // Example JOIN ON left.a = right.a:
1236    //
1237    // Left: Hash([a])
1238    //  Partition 1: a=1
1239    //  Partition 2: a=2
1240    //
1241    // Right: Hash([a, b])
1242    //  Partition 1: (a=1, b=1) -> Same a=1
1243    //  Partition 2: (a=2, b=2)
1244    //  Partition 3: (a=1, b=2) -> Same a=1
1245    //
1246    // Partitioned join execution:
1247    //  P1 left (a=1) joins P1 right (a=1, b=1) -> Match
1248    //  P2 left (a=2) joins P2 right (a=2, b=2) -> Match
1249    //  P3 left (empty) joins P3 right (a=1, b=2) -> Missing, errors
1250    //
1251    // The row (a=1, b=2) should match left.a=1 but they're in different
1252    // partitions, causing panics.
1253    //
1254    // CollectLeft/CollectRight modes are safe because one side is collected
1255    // to a single partition which eliminates partition-to-partition mapping.
1256    let is_partitioned_join = plan
1257        .as_any()
1258        .downcast_ref::<HashJoinExec>()
1259        .is_some_and(|join| matches!(join.mode, PartitionMode::Partitioned))
1260        || plan.as_any().is::<SortMergeJoinExec>();
1261
1262    let repartition_status_flags =
1263        get_repartition_requirement_status(&plan, batch_size, should_use_estimates)?;
1264    // This loop iterates over all the children to:
1265    // - Increase parallelism for every child if it is beneficial.
1266    // - Satisfy the distribution requirements of every child, if it is not
1267    //   already satisfied.
1268    // We store the updated children in `new_children`.
1269    let children = izip!(
1270        children.into_iter(),
1271        plan.required_input_ordering(),
1272        plan.maintains_input_order(),
1273        repartition_status_flags.into_iter()
1274    )
1275    .map(
1276        |(
1277            mut child,
1278            required_input_ordering,
1279            maintains,
1280            RepartitionRequirementStatus {
1281                requirement,
1282                roundrobin_beneficial,
1283                roundrobin_beneficial_stats,
1284                hash_necessary,
1285            },
1286        )| {
1287            let increases_partition_count =
1288                child.plan.output_partitioning().partition_count() < target_partitions;
1289
1290            let add_roundrobin = enable_round_robin
1291                // Operator benefits from partitioning (e.g. filter):
1292                && roundrobin_beneficial
1293                && roundrobin_beneficial_stats
1294                // Unless partitioning increases the partition count, it is not beneficial:
1295                && increases_partition_count;
1296
1297            // Allow subset satisfaction when:
1298            // 1. Current partition count >= threshold
1299            // 2. Not a partitioned join since must use exact hash matching for joins
1300            let current_partitions = child.plan.output_partitioning().partition_count();
1301            let allow_subset_satisfy_partitioning = current_partitions
1302                >= subset_satisfaction_threshold
1303                && !is_partitioned_join;
1304
1305            // When `repartition_file_scans` is set, attempt to increase
1306            // parallelism at the source.
1307            //
1308            // If repartitioning is not possible (a.k.a. None is returned from `ExecutionPlan::repartitioned`)
1309            // then no repartitioning will have occurred. As the default implementation returns None, it is only
1310            // specific physical plan nodes, such as certain datasources, which are repartitioned.
1311            if repartition_file_scans
1312                && roundrobin_beneficial_stats
1313                && let Some(new_child) =
1314                    child.plan.repartitioned(target_partitions, config)?
1315            {
1316                child.plan = new_child;
1317            }
1318
1319            // Satisfy the distribution requirement if it is unmet.
1320            match &requirement {
1321                Distribution::SinglePartition => {
1322                    child = add_merge_on_top(child);
1323                }
1324                Distribution::HashPartitioned(exprs) => {
1325                    // See https://github.com/apache/datafusion/issues/18341#issuecomment-3503238325 for background
1326                    // When inserting hash is necessary to satisfy hash requirement, insert hash repartition.
1327                    if hash_necessary {
1328                        child = add_hash_on_top(
1329                            child,
1330                            exprs.to_vec(),
1331                            target_partitions,
1332                            allow_subset_satisfy_partitioning,
1333                        )?;
1334                    }
1335                }
1336                Distribution::UnspecifiedDistribution => {
1337                    if add_roundrobin {
1338                        // Add round-robin repartitioning on top of the operator
1339                        // to increase parallelism.
1340                        child = add_roundrobin_on_top(child, target_partitions)?;
1341                    }
1342                }
1343            };
1344
1345            // There is an ordering requirement of the operator:
1346            if let Some(required_input_ordering) = required_input_ordering {
1347                // Either:
1348                // - Ordering requirement cannot be satisfied by preserving ordering through repartitions, or
1349                // - using order preserving variant is not desirable.
1350                let sort_req = required_input_ordering.into_single();
1351                let ordering_satisfied = child
1352                    .plan
1353                    .equivalence_properties()
1354                    .ordering_satisfy_requirement(sort_req.clone())?;
1355
1356                if (!ordering_satisfied || !order_preserving_variants_desirable)
1357                    && child.data
1358                {
1359                    child = replace_order_preserving_variants(child)?;
1360                    // If ordering requirements were satisfied before repartitioning,
1361                    // make sure ordering requirements are still satisfied after.
1362                    if ordering_satisfied {
1363                        // Make sure to satisfy ordering requirement:
1364                        child = add_sort_above_with_check(
1365                            child,
1366                            sort_req,
1367                            plan.as_any()
1368                                .downcast_ref::<OutputRequirementExec>()
1369                                .map(|output| output.fetch())
1370                                .unwrap_or(None),
1371                        )?;
1372                    }
1373                }
1374                // Stop tracking distribution changing operators
1375                child.data = false;
1376            } else {
1377                // no ordering requirement
1378                match requirement {
1379                    // Operator requires specific distribution.
1380                    Distribution::SinglePartition | Distribution::HashPartitioned(_) => {
1381                        // Since there is no ordering requirement, preserving ordering is pointless
1382                        child = replace_order_preserving_variants(child)?;
1383                    }
1384                    Distribution::UnspecifiedDistribution => {
1385                        // Since ordering is lost, trying to preserve ordering is pointless
1386                        if !maintains || plan.as_any().is::<OutputRequirementExec>() {
1387                            child = replace_order_preserving_variants(child)?;
1388                        }
1389                    }
1390                }
1391            }
1392            Ok(child)
1393        },
1394    )
1395    .collect::<Result<Vec<_>>>()?;
1396
1397    let children_plans = children
1398        .iter()
1399        .map(|c| Arc::clone(&c.plan))
1400        .collect::<Vec<_>>();
1401
1402    plan = if plan.as_any().is::<UnionExec>()
1403        && !config.optimizer.prefer_existing_union
1404        && can_interleave(children_plans.iter())
1405    {
1406        // Add a special case for [`UnionExec`] since we want to "bubble up"
1407        // hash-partitioned data. So instead of
1408        //
1409        // Agg:
1410        //   Repartition (hash):
1411        //     Union:
1412        //       - Agg:
1413        //           Repartition (hash):
1414        //             Data
1415        //       - Agg:
1416        //           Repartition (hash):
1417        //             Data
1418        //
1419        // we can use:
1420        //
1421        // Agg:
1422        //   Interleave:
1423        //     - Agg:
1424        //         Repartition (hash):
1425        //           Data
1426        //     - Agg:
1427        //         Repartition (hash):
1428        //           Data
1429        Arc::new(InterleaveExec::try_new(children_plans)?)
1430    } else {
1431        plan.with_new_children(children_plans)?
1432    };
1433
1434    Ok(Transformed::yes(DistributionContext::new(
1435        plan, data, children,
1436    )))
1437}
1438
1439/// Keeps track of distribution changing operators (like `RepartitionExec`,
1440/// `SortPreservingMergeExec`, `CoalescePartitionsExec`) and their ancestors.
1441/// Using this information, we can optimize distribution of the plan if/when
1442/// necessary.
1443pub type DistributionContext = PlanContext<bool>;
1444
1445fn update_children(mut dist_context: DistributionContext) -> Result<DistributionContext> {
1446    for child_context in dist_context.children.iter_mut() {
1447        let child_plan_any = child_context.plan.as_any();
1448        child_context.data =
1449            if let Some(repartition) = child_plan_any.downcast_ref::<RepartitionExec>() {
1450                !matches!(
1451                    repartition.partitioning(),
1452                    Partitioning::UnknownPartitioning(_)
1453                )
1454            } else {
1455                child_plan_any.is::<SortPreservingMergeExec>()
1456                    || child_plan_any.is::<CoalescePartitionsExec>()
1457                    || child_context.plan.children().is_empty()
1458                    || child_context.children[0].data
1459                    || child_context
1460                        .plan
1461                        .required_input_distribution()
1462                        .iter()
1463                        .zip(child_context.children.iter())
1464                        .any(|(required_dist, child_context)| {
1465                            child_context.data
1466                                && matches!(
1467                                    required_dist,
1468                                    Distribution::UnspecifiedDistribution
1469                                )
1470                        })
1471            }
1472    }
1473
1474    dist_context.data = false;
1475    Ok(dist_context)
1476}
1477
1478// See tests in datafusion/core/tests/physical_optimizer