Skip to main content

grafeo_engine/query/executor/
mod.rs

1//! Query executor.
2//!
3//! Executes physical plans and produces results.
4
5use crate::config::AdaptiveConfig;
6use crate::database::QueryResult;
7use grafeo_common::types::{LogicalType, Value};
8use grafeo_common::utils::error::{Error, Result};
9use grafeo_core::execution::operators::{Operator, OperatorError};
10use grafeo_core::execution::{
11    AdaptiveContext, AdaptiveSummary, CardinalityTrackingWrapper, DataChunk, SharedAdaptiveContext,
12};
13
14/// Executes a physical operator tree and collects results.
15pub struct Executor {
16    /// Column names for the result.
17    columns: Vec<String>,
18    /// Column types for the result.
19    column_types: Vec<LogicalType>,
20}
21
22impl Executor {
23    /// Creates a new executor.
24    #[must_use]
25    pub fn new() -> Self {
26        Self {
27            columns: Vec::new(),
28            column_types: Vec::new(),
29        }
30    }
31
32    /// Creates an executor with specified column names.
33    #[must_use]
34    pub fn with_columns(columns: Vec<String>) -> Self {
35        let len = columns.len();
36        Self {
37            columns,
38            column_types: vec![LogicalType::Any; len],
39        }
40    }
41
42    /// Creates an executor with specified column names and types.
43    #[must_use]
44    pub fn with_columns_and_types(columns: Vec<String>, column_types: Vec<LogicalType>) -> Self {
45        Self {
46            columns,
47            column_types,
48        }
49    }
50
51    /// Executes a physical operator and collects all results.
52    ///
53    /// # Errors
54    ///
55    /// Returns an error if operator execution fails.
56    pub fn execute(&self, operator: &mut dyn Operator) -> Result<QueryResult> {
57        let mut result = QueryResult::with_types(self.columns.clone(), self.column_types.clone());
58        let mut types_captured = !result.column_types.iter().all(|t| *t == LogicalType::Any);
59
60        loop {
61            match operator.next() {
62                Ok(Some(chunk)) => {
63                    // Capture column types from first non-empty chunk
64                    if !types_captured && chunk.column_count() > 0 {
65                        self.capture_column_types(&chunk, &mut result);
66                        types_captured = true;
67                    }
68                    self.collect_chunk(&chunk, &mut result)?;
69                }
70                Ok(None) => break,
71                Err(err) => return Err(convert_operator_error(err)),
72            }
73        }
74
75        Ok(result)
76    }
77
78    /// Executes and returns at most `limit` rows.
79    ///
80    /// # Errors
81    ///
82    /// Returns an error if operator execution fails.
83    pub fn execute_with_limit(
84        &self,
85        operator: &mut dyn Operator,
86        limit: usize,
87    ) -> Result<QueryResult> {
88        let mut result = QueryResult::with_types(self.columns.clone(), self.column_types.clone());
89        let mut collected = 0;
90        let mut types_captured = !result.column_types.iter().all(|t| *t == LogicalType::Any);
91
92        loop {
93            if collected >= limit {
94                break;
95            }
96
97            match operator.next() {
98                Ok(Some(chunk)) => {
99                    // Capture column types from first non-empty chunk
100                    if !types_captured && chunk.column_count() > 0 {
101                        self.capture_column_types(&chunk, &mut result);
102                        types_captured = true;
103                    }
104                    let remaining = limit - collected;
105                    collected += self.collect_chunk_limited(&chunk, &mut result, remaining)?;
106                }
107                Ok(None) => break,
108                Err(err) => return Err(convert_operator_error(err)),
109            }
110        }
111
112        Ok(result)
113    }
114
115    /// Captures column types from a DataChunk.
116    fn capture_column_types(&self, chunk: &DataChunk, result: &mut QueryResult) {
117        let col_count = chunk.column_count();
118        result.column_types = Vec::with_capacity(col_count);
119        for col_idx in 0..col_count {
120            let col_type = chunk
121                .column(col_idx)
122                .map_or(LogicalType::Any, |col| col.data_type().clone());
123            result.column_types.push(col_type);
124        }
125    }
126
127    /// Collects all rows from a DataChunk into the result.
128    ///
129    /// Uses `selected_indices()` to correctly handle chunks with selection vectors
130    /// (e.g., after filtering operations).
131    fn collect_chunk(&self, chunk: &DataChunk, result: &mut QueryResult) -> Result<usize> {
132        let col_count = chunk.column_count();
133        let mut collected = 0;
134
135        for row_idx in chunk.selected_indices() {
136            let mut row = Vec::with_capacity(col_count);
137            for col_idx in 0..col_count {
138                let value = chunk
139                    .column(col_idx)
140                    .and_then(|col| col.get_value(row_idx))
141                    .unwrap_or(Value::Null);
142                row.push(value);
143            }
144            result.rows.push(row);
145            collected += 1;
146        }
147
148        Ok(collected)
149    }
150
151    /// Collects up to `limit` rows from a DataChunk.
152    ///
153    /// Uses `selected_indices()` to correctly handle chunks with selection vectors
154    /// (e.g., after filtering operations).
155    fn collect_chunk_limited(
156        &self,
157        chunk: &DataChunk,
158        result: &mut QueryResult,
159        limit: usize,
160    ) -> Result<usize> {
161        let col_count = chunk.column_count();
162        let mut collected = 0;
163
164        for row_idx in chunk.selected_indices() {
165            if collected >= limit {
166                break;
167            }
168            let mut row = Vec::with_capacity(col_count);
169            for col_idx in 0..col_count {
170                let value = chunk
171                    .column(col_idx)
172                    .and_then(|col| col.get_value(row_idx))
173                    .unwrap_or(Value::Null);
174                row.push(value);
175            }
176            result.rows.push(row);
177            collected += 1;
178        }
179
180        Ok(collected)
181    }
182
183    /// Executes a physical operator with adaptive cardinality tracking.
184    ///
185    /// This wraps the operator in a cardinality tracking layer and monitors
186    /// deviation from estimates during execution. The adaptive summary is
187    /// returned alongside the query result.
188    ///
189    /// # Arguments
190    ///
191    /// * `operator` - The root physical operator to execute
192    /// * `adaptive_context` - Context with cardinality estimates from planning
193    /// * `config` - Adaptive execution configuration
194    ///
195    /// # Errors
196    ///
197    /// Returns an error if operator execution fails.
198    pub fn execute_adaptive(
199        &self,
200        operator: Box<dyn Operator>,
201        adaptive_context: Option<AdaptiveContext>,
202        config: &AdaptiveConfig,
203    ) -> Result<(QueryResult, Option<AdaptiveSummary>)> {
204        // If adaptive is disabled or no context, fall back to normal execution
205        if !config.enabled {
206            let mut op = operator;
207            let result = self.execute(op.as_mut())?;
208            return Ok((result, None));
209        }
210
211        let Some(ctx) = adaptive_context else {
212            let mut op = operator;
213            let result = self.execute(op.as_mut())?;
214            return Ok((result, None));
215        };
216
217        // Create shared context for tracking
218        let shared_ctx = SharedAdaptiveContext::from_context(AdaptiveContext::with_thresholds(
219            config.threshold,
220            config.min_rows,
221        ));
222
223        // Copy estimates from the planning context to the shared tracking context
224        for (op_id, checkpoint) in ctx.all_checkpoints() {
225            if let Some(mut inner) = shared_ctx.snapshot() {
226                inner.set_estimate(op_id, checkpoint.estimated);
227            }
228        }
229
230        // Wrap operator with tracking
231        let mut wrapped = CardinalityTrackingWrapper::new(operator, "root", shared_ctx.clone());
232
233        // Execute with tracking
234        let mut result = QueryResult::with_types(self.columns.clone(), self.column_types.clone());
235        let mut types_captured = !result.column_types.iter().all(|t| *t == LogicalType::Any);
236        let mut total_rows: u64 = 0;
237        let check_interval = config.min_rows;
238
239        loop {
240            match wrapped.next() {
241                Ok(Some(chunk)) => {
242                    let chunk_rows = chunk.row_count();
243                    total_rows += chunk_rows as u64;
244
245                    // Capture column types from first non-empty chunk
246                    if !types_captured && chunk.column_count() > 0 {
247                        self.capture_column_types(&chunk, &mut result);
248                        types_captured = true;
249                    }
250                    self.collect_chunk(&chunk, &mut result)?;
251
252                    // Periodically check for significant deviation
253                    if total_rows >= check_interval
254                        && total_rows.is_multiple_of(check_interval)
255                        && shared_ctx.should_reoptimize()
256                    {
257                        // For now, just log/note that re-optimization would trigger
258                        // Full re-optimization would require plan regeneration
259                        // which is a more invasive change
260                    }
261                }
262                Ok(None) => break,
263                Err(err) => return Err(convert_operator_error(err)),
264            }
265        }
266
267        // Get final summary
268        let summary = shared_ctx.snapshot().map(|ctx| ctx.summary());
269
270        Ok((result, summary))
271    }
272}
273
274impl Default for Executor {
275    fn default() -> Self {
276        Self::new()
277    }
278}
279
280/// Converts an operator error to a common error.
281fn convert_operator_error(err: OperatorError) -> Error {
282    match err {
283        OperatorError::TypeMismatch { expected, found } => Error::TypeMismatch { expected, found },
284        OperatorError::ColumnNotFound(name) => {
285            Error::InvalidValue(format!("Column not found: {name}"))
286        }
287        OperatorError::Execution(msg) => Error::Internal(msg),
288    }
289}
290
291#[cfg(test)]
292mod tests {
293    use super::*;
294    use grafeo_common::types::LogicalType;
295    use grafeo_core::execution::DataChunk;
296
297    /// A mock operator that generates chunks with integer data on demand.
298    struct MockIntOperator {
299        values: Vec<i64>,
300        position: usize,
301        chunk_size: usize,
302    }
303
304    impl MockIntOperator {
305        fn new(values: Vec<i64>, chunk_size: usize) -> Self {
306            Self {
307                values,
308                position: 0,
309                chunk_size,
310            }
311        }
312    }
313
314    impl Operator for MockIntOperator {
315        fn next(&mut self) -> grafeo_core::execution::operators::OperatorResult {
316            if self.position >= self.values.len() {
317                return Ok(None);
318            }
319
320            let end = (self.position + self.chunk_size).min(self.values.len());
321            let mut chunk = DataChunk::with_capacity(&[LogicalType::Int64], self.chunk_size);
322
323            {
324                let col = chunk.column_mut(0).unwrap();
325                for i in self.position..end {
326                    col.push_int64(self.values[i]);
327                }
328            }
329            chunk.set_count(end - self.position);
330            self.position = end;
331
332            Ok(Some(chunk))
333        }
334
335        fn reset(&mut self) {
336            self.position = 0;
337        }
338
339        fn name(&self) -> &'static str {
340            "MockInt"
341        }
342    }
343
344    /// Empty mock operator for testing empty results.
345    struct EmptyOperator;
346
347    impl Operator for EmptyOperator {
348        fn next(&mut self) -> grafeo_core::execution::operators::OperatorResult {
349            Ok(None)
350        }
351
352        fn reset(&mut self) {}
353
354        fn name(&self) -> &'static str {
355            "Empty"
356        }
357    }
358
359    #[test]
360    fn test_executor_empty() {
361        let executor = Executor::with_columns(vec!["a".to_string()]);
362        let mut op = EmptyOperator;
363
364        let result = executor.execute(&mut op).unwrap();
365        assert!(result.is_empty());
366        assert_eq!(result.column_count(), 1);
367    }
368
369    #[test]
370    fn test_executor_single_chunk() {
371        let executor = Executor::with_columns(vec!["value".to_string()]);
372        let mut op = MockIntOperator::new(vec![1, 2, 3], 10);
373
374        let result = executor.execute(&mut op).unwrap();
375        assert_eq!(result.row_count(), 3);
376        assert_eq!(result.rows[0][0], Value::Int64(1));
377        assert_eq!(result.rows[1][0], Value::Int64(2));
378        assert_eq!(result.rows[2][0], Value::Int64(3));
379    }
380
381    #[test]
382    fn test_executor_with_limit() {
383        let executor = Executor::with_columns(vec!["value".to_string()]);
384        let mut op = MockIntOperator::new((0..10).collect(), 100);
385
386        let result = executor.execute_with_limit(&mut op, 5).unwrap();
387        assert_eq!(result.row_count(), 5);
388    }
389}