Skip to main content

datafusion_physical_plan/windows/
window_agg_exec.rs

1// Licensed to the Apache Software Foundation (ASF) under one
2// or more contributor license agreements.  See the NOTICE file
3// distributed with this work for additional information
4// regarding copyright ownership.  The ASF licenses this file
5// to you under the Apache License, Version 2.0 (the
6// "License"); you may not use this file except in compliance
7// with the License.  You may obtain a copy of the License at
8//
9//   http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing,
12// software distributed under the License is distributed on an
13// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14// KIND, either express or implied.  See the License for the
15// specific language governing permissions and limitations
16// under the License.
17
18//! Stream and channel implementations for window function expressions.
19
20use std::any::Any;
21use std::pin::Pin;
22use std::sync::Arc;
23use std::task::{Context, Poll};
24
25use super::utils::create_schema;
26use crate::execution_plan::EmissionType;
27use crate::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
28use crate::windows::{
29    calc_requirements, get_ordered_partition_by_indices, get_partition_by_sort_exprs,
30    window_equivalence_properties,
31};
32use crate::{
33    ColumnStatistics, DisplayAs, DisplayFormatType, Distribution, ExecutionPlan,
34    ExecutionPlanProperties, PhysicalExpr, PlanProperties, RecordBatchStream,
35    SendableRecordBatchStream, Statistics, WindowExpr, check_if_same_properties,
36};
37
38use arrow::array::ArrayRef;
39use arrow::compute::{concat, concat_batches};
40use arrow::datatypes::SchemaRef;
41use arrow::error::ArrowError;
42use arrow::record_batch::RecordBatch;
43use datafusion_common::stats::Precision;
44use datafusion_common::utils::{evaluate_partition_ranges, transpose};
45use datafusion_common::{Result, assert_eq_or_internal_err};
46use datafusion_execution::TaskContext;
47use datafusion_physical_expr_common::sort_expr::{
48    OrderingRequirements, PhysicalSortExpr,
49};
50
51use futures::{Stream, StreamExt, ready};
52
53/// Window execution plan
54#[derive(Debug, Clone)]
55pub struct WindowAggExec {
56    /// Input plan
57    pub(crate) input: Arc<dyn ExecutionPlan>,
58    /// Window function expression
59    window_expr: Vec<Arc<dyn WindowExpr>>,
60    /// Schema after the window is run
61    schema: SchemaRef,
62    /// Execution metrics
63    metrics: ExecutionPlanMetricsSet,
64    /// Partition by indices that defines preset for existing ordering
65    // see `get_ordered_partition_by_indices` for more details.
66    ordered_partition_by_indices: Vec<usize>,
67    /// Cache holding plan properties like equivalences, output partitioning etc.
68    cache: Arc<PlanProperties>,
69    /// If `can_partition` is false, partition_keys is always empty.
70    can_repartition: bool,
71}
72
73impl WindowAggExec {
74    /// Create a new execution plan for window aggregates
75    pub fn try_new(
76        window_expr: Vec<Arc<dyn WindowExpr>>,
77        input: Arc<dyn ExecutionPlan>,
78        can_repartition: bool,
79    ) -> Result<Self> {
80        let schema = create_schema(&input.schema(), &window_expr)?;
81        let schema = Arc::new(schema);
82
83        let ordered_partition_by_indices =
84            get_ordered_partition_by_indices(window_expr[0].partition_by(), &input)?;
85        let cache = Self::compute_properties(&schema, &input, &window_expr)?;
86        Ok(Self {
87            input,
88            window_expr,
89            schema,
90            metrics: ExecutionPlanMetricsSet::new(),
91            ordered_partition_by_indices,
92            cache: Arc::new(cache),
93            can_repartition,
94        })
95    }
96
97    /// Window expressions
98    pub fn window_expr(&self) -> &[Arc<dyn WindowExpr>] {
99        &self.window_expr
100    }
101
102    /// Input plan
103    pub fn input(&self) -> &Arc<dyn ExecutionPlan> {
104        &self.input
105    }
106
107    /// Return the output sort order of partition keys: For example
108    /// OVER(PARTITION BY a, ORDER BY b) -> would give sorting of the column a
109    // We are sure that partition by columns are always at the beginning of sort_keys
110    // Hence returned `PhysicalSortExpr` corresponding to `PARTITION BY` columns can be used safely
111    // to calculate partition separation points
112    pub fn partition_by_sort_keys(&self) -> Result<Vec<PhysicalSortExpr>> {
113        let partition_by = self.window_expr()[0].partition_by();
114        get_partition_by_sort_exprs(
115            &self.input,
116            partition_by,
117            &self.ordered_partition_by_indices,
118        )
119    }
120
121    /// This function creates the cache object that stores the plan properties such as schema, equivalence properties, ordering, partitioning, etc.
122    fn compute_properties(
123        schema: &SchemaRef,
124        input: &Arc<dyn ExecutionPlan>,
125        window_exprs: &[Arc<dyn WindowExpr>],
126    ) -> Result<PlanProperties> {
127        // Calculate equivalence properties:
128        let eq_properties = window_equivalence_properties(schema, input, window_exprs)?;
129
130        // Get output partitioning:
131        // Because we can have repartitioning using the partition keys this
132        // would be either 1 or more than 1 depending on the presence of repartitioning.
133        let output_partitioning = input.output_partitioning().clone();
134
135        // Construct properties cache:
136        Ok(PlanProperties::new(
137            eq_properties,
138            output_partitioning,
139            // TODO: Emission type and boundedness information can be enhanced here
140            EmissionType::Final,
141            input.boundedness(),
142        ))
143    }
144
145    pub fn partition_keys(&self) -> Vec<Arc<dyn PhysicalExpr>> {
146        if !self.can_repartition {
147            vec![]
148        } else {
149            let all_partition_keys = self
150                .window_expr()
151                .iter()
152                .map(|expr| expr.partition_by().to_vec())
153                .collect::<Vec<_>>();
154
155            all_partition_keys
156                .into_iter()
157                .min_by_key(|s| s.len())
158                .unwrap_or_else(Vec::new)
159        }
160    }
161
162    fn with_new_children_and_same_properties(
163        &self,
164        mut children: Vec<Arc<dyn ExecutionPlan>>,
165    ) -> Self {
166        Self {
167            input: children.swap_remove(0),
168            metrics: ExecutionPlanMetricsSet::new(),
169            ..Self::clone(self)
170        }
171    }
172}
173
174impl DisplayAs for WindowAggExec {
175    fn fmt_as(
176        &self,
177        t: DisplayFormatType,
178        f: &mut std::fmt::Formatter,
179    ) -> std::fmt::Result {
180        match t {
181            DisplayFormatType::Default | DisplayFormatType::Verbose => {
182                write!(f, "WindowAggExec: ")?;
183                let g: Vec<String> = self
184                    .window_expr
185                    .iter()
186                    .map(|e| {
187                        format!(
188                            "{}: {:?}, frame: {:?}",
189                            e.name().to_owned(),
190                            e.field(),
191                            e.get_window_frame()
192                        )
193                    })
194                    .collect();
195                write!(f, "wdw=[{}]", g.join(", "))?;
196            }
197            DisplayFormatType::TreeRender => {
198                let g: Vec<String> = self
199                    .window_expr
200                    .iter()
201                    .map(|e| e.name().to_owned().to_string())
202                    .collect();
203                writeln!(f, "select_list={}", g.join(", "))?;
204            }
205        }
206        Ok(())
207    }
208}
209
210impl ExecutionPlan for WindowAggExec {
211    fn name(&self) -> &'static str {
212        "WindowAggExec"
213    }
214
215    /// Return a reference to Any that can be used for downcasting
216    fn as_any(&self) -> &dyn Any {
217        self
218    }
219
220    fn properties(&self) -> &Arc<PlanProperties> {
221        &self.cache
222    }
223
224    fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
225        vec![&self.input]
226    }
227
228    fn maintains_input_order(&self) -> Vec<bool> {
229        vec![true]
230    }
231
232    fn required_input_ordering(&self) -> Vec<Option<OrderingRequirements>> {
233        let partition_bys = self.window_expr()[0].partition_by();
234        let order_keys = self.window_expr()[0].order_by();
235        if self.ordered_partition_by_indices.len() < partition_bys.len() {
236            vec![calc_requirements(partition_bys, order_keys)]
237        } else {
238            let partition_bys = self
239                .ordered_partition_by_indices
240                .iter()
241                .map(|idx| &partition_bys[*idx]);
242            vec![calc_requirements(partition_bys, order_keys)]
243        }
244    }
245
246    fn required_input_distribution(&self) -> Vec<Distribution> {
247        if self.partition_keys().is_empty() {
248            vec![Distribution::SinglePartition]
249        } else {
250            vec![Distribution::HashPartitioned(self.partition_keys())]
251        }
252    }
253
254    fn with_new_children(
255        self: Arc<Self>,
256        mut children: Vec<Arc<dyn ExecutionPlan>>,
257    ) -> Result<Arc<dyn ExecutionPlan>> {
258        check_if_same_properties!(self, children);
259        Ok(Arc::new(WindowAggExec::try_new(
260            self.window_expr.clone(),
261            children.swap_remove(0),
262            true,
263        )?))
264    }
265
266    fn execute(
267        &self,
268        partition: usize,
269        context: Arc<TaskContext>,
270    ) -> Result<SendableRecordBatchStream> {
271        let input = self.input.execute(partition, context)?;
272        let stream = Box::pin(WindowAggStream::new(
273            Arc::clone(&self.schema),
274            self.window_expr.clone(),
275            input,
276            BaselineMetrics::new(&self.metrics, partition),
277            self.partition_by_sort_keys()?,
278            self.ordered_partition_by_indices.clone(),
279        )?);
280        Ok(stream)
281    }
282
283    fn metrics(&self) -> Option<MetricsSet> {
284        Some(self.metrics.clone_inner())
285    }
286
287    fn partition_statistics(&self, partition: Option<usize>) -> Result<Statistics> {
288        let input_stat = self.input.partition_statistics(partition)?;
289        let win_cols = self.window_expr.len();
290        let input_cols = self.input.schema().fields().len();
291        // TODO stats: some windowing function will maintain invariants such as min, max...
292        let mut column_statistics = Vec::with_capacity(win_cols + input_cols);
293        // copy stats of the input to the beginning of the schema.
294        column_statistics.extend(input_stat.column_statistics);
295        for _ in 0..win_cols {
296            column_statistics.push(ColumnStatistics::new_unknown())
297        }
298        Ok(Statistics {
299            num_rows: input_stat.num_rows,
300            column_statistics,
301            total_byte_size: Precision::Absent,
302        })
303    }
304}
305
306/// Compute the window aggregate columns
307fn compute_window_aggregates(
308    window_expr: &[Arc<dyn WindowExpr>],
309    batch: &RecordBatch,
310) -> Result<Vec<ArrayRef>> {
311    window_expr
312        .iter()
313        .map(|window_expr| window_expr.evaluate(batch))
314        .collect()
315}
316
317/// stream for window aggregation plan
318pub struct WindowAggStream {
319    schema: SchemaRef,
320    input: SendableRecordBatchStream,
321    batches: Vec<RecordBatch>,
322    finished: bool,
323    window_expr: Vec<Arc<dyn WindowExpr>>,
324    partition_by_sort_keys: Vec<PhysicalSortExpr>,
325    baseline_metrics: BaselineMetrics,
326    ordered_partition_by_indices: Vec<usize>,
327}
328
329impl WindowAggStream {
330    /// Create a new WindowAggStream
331    pub fn new(
332        schema: SchemaRef,
333        window_expr: Vec<Arc<dyn WindowExpr>>,
334        input: SendableRecordBatchStream,
335        baseline_metrics: BaselineMetrics,
336        partition_by_sort_keys: Vec<PhysicalSortExpr>,
337        ordered_partition_by_indices: Vec<usize>,
338    ) -> Result<Self> {
339        // In WindowAggExec all partition by columns should be ordered.
340        assert_eq_or_internal_err!(
341            window_expr[0].partition_by().len(),
342            ordered_partition_by_indices.len(),
343            "All partition by columns should have an ordering"
344        );
345        Ok(Self {
346            schema,
347            input,
348            batches: vec![],
349            finished: false,
350            window_expr,
351            baseline_metrics,
352            partition_by_sort_keys,
353            ordered_partition_by_indices,
354        })
355    }
356
357    fn compute_aggregates(&self) -> Result<Option<RecordBatch>> {
358        // record compute time on drop
359        let _timer = self.baseline_metrics.elapsed_compute().timer();
360
361        let batch = concat_batches(&self.input.schema(), &self.batches)?;
362        if batch.num_rows() == 0 {
363            return Ok(None);
364        }
365
366        let partition_by_sort_keys = self
367            .ordered_partition_by_indices
368            .iter()
369            .map(|idx| self.partition_by_sort_keys[*idx].evaluate_to_sort_column(&batch))
370            .collect::<Result<Vec<_>>>()?;
371        let partition_points =
372            evaluate_partition_ranges(batch.num_rows(), &partition_by_sort_keys)?;
373
374        let mut partition_results = vec![];
375        // Calculate window cols
376        for partition_point in partition_points {
377            let length = partition_point.end - partition_point.start;
378            partition_results.push(compute_window_aggregates(
379                &self.window_expr,
380                &batch.slice(partition_point.start, length),
381            )?)
382        }
383        let columns = transpose(partition_results)
384            .iter()
385            .map(|elems| concat(&elems.iter().map(|x| x.as_ref()).collect::<Vec<_>>()))
386            .collect::<Vec<_>>()
387            .into_iter()
388            .collect::<Result<Vec<ArrayRef>, ArrowError>>()?;
389
390        // combine with the original cols
391        // note the setup of window aggregates is that they newly calculated window
392        // expression results are always appended to the columns
393        let mut batch_columns = batch.columns().to_vec();
394        // calculate window cols
395        batch_columns.extend_from_slice(&columns);
396        Ok(Some(RecordBatch::try_new(
397            Arc::clone(&self.schema),
398            batch_columns,
399        )?))
400    }
401}
402
403impl Stream for WindowAggStream {
404    type Item = Result<RecordBatch>;
405
406    fn poll_next(
407        mut self: Pin<&mut Self>,
408        cx: &mut Context<'_>,
409    ) -> Poll<Option<Self::Item>> {
410        let poll = self.poll_next_inner(cx);
411        self.baseline_metrics.record_poll(poll)
412    }
413}
414
415impl WindowAggStream {
416    #[inline]
417    fn poll_next_inner(
418        &mut self,
419        cx: &mut Context<'_>,
420    ) -> Poll<Option<Result<RecordBatch>>> {
421        if self.finished {
422            return Poll::Ready(None);
423        }
424
425        loop {
426            return Poll::Ready(Some(match ready!(self.input.poll_next_unpin(cx)) {
427                Some(Ok(batch)) => {
428                    self.batches.push(batch);
429                    continue;
430                }
431                Some(Err(e)) => Err(e),
432                None => {
433                    let Some(result) = self.compute_aggregates()? else {
434                        return Poll::Ready(None);
435                    };
436                    self.finished = true;
437                    // Empty record batches should not be emitted.
438                    // They need to be treated as  [`Option<RecordBatch>`]es and handled separately
439                    debug_assert!(result.num_rows() > 0);
440                    Ok(result)
441                }
442            }));
443        }
444    }
445}
446
447impl RecordBatchStream for WindowAggStream {
448    /// Get the schema
449    fn schema(&self) -> SchemaRef {
450        Arc::clone(&self.schema)
451    }
452}