datafusion_physical_expr/window/
aggregate.rs

1// Licensed to the Apache Software Foundation (ASF) under one
2// or more contributor license agreements.  See the NOTICE file
3// distributed with this work for additional information
4// regarding copyright ownership.  The ASF licenses this file
5// to you under the Apache License, Version 2.0 (the
6// "License"); you may not use this file except in compliance
7// with the License.  You may obtain a copy of the License at
8//
9//   http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing,
12// software distributed under the License is distributed on an
13// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14// KIND, either express or implied.  See the License for the
15// specific language governing permissions and limitations
16// under the License.
17
18//! Physical exec for aggregate window function expressions.
19
20use std::any::Any;
21use std::ops::Range;
22use std::sync::Arc;
23
24use crate::aggregate::AggregateFunctionExpr;
25use crate::window::standard::add_new_ordering_expr_with_partition_by;
26use crate::window::window_expr::{filter_array, AggregateWindowExpr, WindowFn};
27use crate::window::{
28    PartitionBatches, PartitionWindowAggStates, SlidingAggregateWindowExpr, WindowExpr,
29};
30use crate::{EquivalenceProperties, PhysicalExpr};
31
32use arrow::array::ArrayRef;
33use arrow::array::BooleanArray;
34use arrow::datatypes::FieldRef;
35use arrow::record_batch::RecordBatch;
36use datafusion_common::{exec_datafusion_err, Result, ScalarValue};
37use datafusion_expr::{Accumulator, WindowFrame, WindowFrameBound, WindowFrameUnits};
38use datafusion_physical_expr_common::sort_expr::PhysicalSortExpr;
39
40/// A window expr that takes the form of an aggregate function.
41///
42/// See comments on [`WindowExpr`] for more details.
43#[derive(Debug)]
44pub struct PlainAggregateWindowExpr {
45    aggregate: Arc<AggregateFunctionExpr>,
46    partition_by: Vec<Arc<dyn PhysicalExpr>>,
47    order_by: Vec<PhysicalSortExpr>,
48    window_frame: Arc<WindowFrame>,
49    is_constant_in_partition: bool,
50    filter: Option<Arc<dyn PhysicalExpr>>,
51}
52
53impl PlainAggregateWindowExpr {
54    /// Create a new aggregate window function expression
55    pub fn new(
56        aggregate: Arc<AggregateFunctionExpr>,
57        partition_by: &[Arc<dyn PhysicalExpr>],
58        order_by: &[PhysicalSortExpr],
59        window_frame: Arc<WindowFrame>,
60        filter: Option<Arc<dyn PhysicalExpr>>,
61    ) -> Self {
62        let is_constant_in_partition =
63            Self::is_window_constant_in_partition(order_by, &window_frame);
64        Self {
65            aggregate,
66            partition_by: partition_by.to_vec(),
67            order_by: order_by.to_vec(),
68            window_frame,
69            is_constant_in_partition,
70            filter,
71        }
72    }
73
74    /// Get aggregate expr of AggregateWindowExpr
75    pub fn get_aggregate_expr(&self) -> &AggregateFunctionExpr {
76        &self.aggregate
77    }
78
79    pub fn add_equal_orderings(
80        &self,
81        eq_properties: &mut EquivalenceProperties,
82        window_expr_index: usize,
83    ) -> Result<()> {
84        if let Some(expr) = self
85            .get_aggregate_expr()
86            .get_result_ordering(window_expr_index)
87        {
88            add_new_ordering_expr_with_partition_by(
89                eq_properties,
90                expr,
91                &self.partition_by,
92            )?;
93        }
94        Ok(())
95    }
96
97    // Returns true if every row in the partition has the same window frame. This allows
98    // for preventing bound + function calculation for every row due to the values being the
99    // same.
100    //
101    // This occurs when both bounds fall under either condition below:
102    //  1. Bound is unbounded (`Preceding` or `Following`)
103    //  2. Bound is `CurrentRow` while using `Range` units with no order by clause
104    //  This results in an invalid range specification. Following PostgreSQL’s convention,
105    //  we interpret this as the entire partition being used for the current window frame.
106    fn is_window_constant_in_partition(
107        order_by: &[PhysicalSortExpr],
108        window_frame: &WindowFrame,
109    ) -> bool {
110        let is_constant_bound = |bound: &WindowFrameBound| match bound {
111            WindowFrameBound::CurrentRow => {
112                window_frame.units == WindowFrameUnits::Range && order_by.is_empty()
113            }
114            _ => bound.is_unbounded(),
115        };
116
117        is_constant_bound(&window_frame.start_bound)
118            && is_constant_bound(&window_frame.end_bound)
119    }
120}
121
122/// peer based evaluation based on the fact that batch is pre-sorted given the sort columns
123/// and then per partition point we'll evaluate the peer group (e.g. SUM or MAX gives the same
124/// results for peers) and concatenate the results.
125impl WindowExpr for PlainAggregateWindowExpr {
126    /// Return a reference to Any that can be used for downcasting
127    fn as_any(&self) -> &dyn Any {
128        self
129    }
130
131    fn field(&self) -> Result<FieldRef> {
132        Ok(self.aggregate.field())
133    }
134
135    fn name(&self) -> &str {
136        self.aggregate.name()
137    }
138
139    fn expressions(&self) -> Vec<Arc<dyn PhysicalExpr>> {
140        self.aggregate.expressions()
141    }
142
143    fn evaluate(&self, batch: &RecordBatch) -> Result<ArrayRef> {
144        self.aggregate_evaluate(batch)
145    }
146
147    fn evaluate_stateful(
148        &self,
149        partition_batches: &PartitionBatches,
150        window_agg_state: &mut PartitionWindowAggStates,
151    ) -> Result<()> {
152        self.aggregate_evaluate_stateful(partition_batches, window_agg_state)?;
153
154        // Update window frame range for each partition. As we know that
155        // non-sliding aggregations will never call `retract_batch`, this value
156        // can safely increase, and we can remove "old" parts of the state.
157        // This enables us to run queries involving UNBOUNDED PRECEDING frames
158        // using bounded memory for suitable aggregations.
159        for partition_row in partition_batches.keys() {
160            let window_state = window_agg_state
161                .get_mut(partition_row)
162                .ok_or_else(|| exec_datafusion_err!("Cannot find state"))?;
163            let state = &mut window_state.state;
164            if self.window_frame.start_bound.is_unbounded() {
165                state.window_frame_range.start =
166                    state.window_frame_range.end.saturating_sub(1);
167            }
168        }
169        Ok(())
170    }
171
172    fn partition_by(&self) -> &[Arc<dyn PhysicalExpr>] {
173        &self.partition_by
174    }
175
176    fn order_by(&self) -> &[PhysicalSortExpr] {
177        &self.order_by
178    }
179
180    fn get_window_frame(&self) -> &Arc<WindowFrame> {
181        &self.window_frame
182    }
183
184    fn get_reverse_expr(&self) -> Option<Arc<dyn WindowExpr>> {
185        self.aggregate.reverse_expr().map(|reverse_expr| {
186            let reverse_window_frame = self.window_frame.reverse();
187            if reverse_window_frame.is_ever_expanding() {
188                Arc::new(PlainAggregateWindowExpr::new(
189                    Arc::new(reverse_expr),
190                    &self.partition_by.clone(),
191                    &self
192                        .order_by
193                        .iter()
194                        .map(|e| e.reverse())
195                        .collect::<Vec<_>>(),
196                    Arc::new(self.window_frame.reverse()),
197                    self.filter.clone(),
198                )) as _
199            } else {
200                Arc::new(SlidingAggregateWindowExpr::new(
201                    Arc::new(reverse_expr),
202                    &self.partition_by.clone(),
203                    &self
204                        .order_by
205                        .iter()
206                        .map(|e| e.reverse())
207                        .collect::<Vec<_>>(),
208                    Arc::new(self.window_frame.reverse()),
209                    self.filter.clone(),
210                )) as _
211            }
212        })
213    }
214
215    fn uses_bounded_memory(&self) -> bool {
216        !self.window_frame.end_bound.is_unbounded()
217    }
218
219    fn create_window_fn(&self) -> Result<WindowFn> {
220        Ok(WindowFn::Aggregate(self.get_accumulator()?))
221    }
222}
223
224impl AggregateWindowExpr for PlainAggregateWindowExpr {
225    fn get_accumulator(&self) -> Result<Box<dyn Accumulator>> {
226        self.aggregate.create_accumulator()
227    }
228
229    fn filter_expr(&self) -> Option<&Arc<dyn PhysicalExpr>> {
230        self.filter.as_ref()
231    }
232
233    /// For a given range, calculate accumulation result inside the range on
234    /// `value_slice` and update accumulator state.
235    // We assume that `cur_range` contains `last_range` and their start points
236    // are same. In summary if `last_range` is `Range{start: a,end: b}` and
237    // `cur_range` is `Range{start: a1, end: b1}`, it is guaranteed that a1=a and b1>=b.
238    fn get_aggregate_result_inside_range(
239        &self,
240        last_range: &Range<usize>,
241        cur_range: &Range<usize>,
242        value_slice: &[ArrayRef],
243        accumulator: &mut Box<dyn Accumulator>,
244        filter_mask: Option<&BooleanArray>,
245    ) -> Result<ScalarValue> {
246        if cur_range.start == cur_range.end {
247            self.aggregate
248                .default_value(self.aggregate.field().data_type())
249        } else {
250            // Accumulate any new rows that have entered the window:
251            let update_bound = cur_range.end - last_range.end;
252            // A non-sliding aggregation only processes new data, it never
253            // deals with expiring data as its starting point is always the
254            // same point (i.e. the beginning of the table/frame). Hence, we
255            // do not call `retract_batch`.
256            if update_bound > 0 {
257                let slice_mask =
258                    filter_mask.map(|m| m.slice(last_range.end, update_bound));
259                let update: Vec<ArrayRef> = value_slice
260                    .iter()
261                    .map(|v| v.slice(last_range.end, update_bound))
262                    .map(|arr| match &slice_mask {
263                        Some(m) => filter_array(&arr, m),
264                        None => Ok(arr),
265                    })
266                    .collect::<Result<Vec<_>>>()?;
267                accumulator.update_batch(&update)?
268            }
269            accumulator.evaluate()
270        }
271    }
272
273    fn is_constant_in_partition(&self) -> bool {
274        self.is_constant_in_partition
275    }
276}