Skip to main content

grafeo_engine/query/executor/
mod.rs

1//! Query executor.
2//!
3//! Executes physical plans and produces results.
4
5use crate::config::AdaptiveConfig;
6use crate::database::QueryResult;
7use grafeo_common::types::{LogicalType, Value};
8use grafeo_common::utils::error::{Error, Result};
9use grafeo_core::execution::operators::{Operator, OperatorError};
10use grafeo_core::execution::{
11    AdaptiveContext, AdaptiveSummary, CardinalityTrackingWrapper, DataChunk, SharedAdaptiveContext,
12};
13
14/// Executes a physical operator tree and collects results.
15pub struct Executor {
16    /// Column names for the result.
17    columns: Vec<String>,
18    /// Column types for the result.
19    column_types: Vec<LogicalType>,
20}
21
22impl Executor {
23    /// Creates a new executor.
24    #[must_use]
25    pub fn new() -> Self {
26        Self {
27            columns: Vec::new(),
28            column_types: Vec::new(),
29        }
30    }
31
32    /// Creates an executor with specified column names.
33    #[must_use]
34    pub fn with_columns(columns: Vec<String>) -> Self {
35        let len = columns.len();
36        Self {
37            columns,
38            column_types: vec![LogicalType::Any; len],
39        }
40    }
41
42    /// Creates an executor with specified column names and types.
43    #[must_use]
44    pub fn with_columns_and_types(columns: Vec<String>, column_types: Vec<LogicalType>) -> Self {
45        Self {
46            columns,
47            column_types,
48        }
49    }
50
51    /// Executes a physical operator and collects all results.
52    ///
53    /// # Errors
54    ///
55    /// Returns an error if operator execution fails.
56    pub fn execute(&self, operator: &mut dyn Operator) -> Result<QueryResult> {
57        let mut result = QueryResult::with_types(self.columns.clone(), self.column_types.clone());
58        let mut types_captured = !result.column_types.iter().all(|t| *t == LogicalType::Any);
59
60        loop {
61            match operator.next() {
62                Ok(Some(chunk)) => {
63                    // Capture column types from first non-empty chunk
64                    if !types_captured && chunk.column_count() > 0 {
65                        self.capture_column_types(&chunk, &mut result);
66                        types_captured = true;
67                    }
68                    self.collect_chunk(&chunk, &mut result)?;
69                }
70                Ok(None) => break,
71                Err(err) => return Err(convert_operator_error(err)),
72            }
73        }
74
75        Ok(result)
76    }
77
78    /// Executes and returns at most `limit` rows.
79    ///
80    /// # Errors
81    ///
82    /// Returns an error if operator execution fails.
83    pub fn execute_with_limit(
84        &self,
85        operator: &mut dyn Operator,
86        limit: usize,
87    ) -> Result<QueryResult> {
88        let mut result = QueryResult::with_types(self.columns.clone(), self.column_types.clone());
89        let mut collected = 0;
90        let mut types_captured = !result.column_types.iter().all(|t| *t == LogicalType::Any);
91
92        loop {
93            if collected >= limit {
94                break;
95            }
96
97            match operator.next() {
98                Ok(Some(chunk)) => {
99                    // Capture column types from first non-empty chunk
100                    if !types_captured && chunk.column_count() > 0 {
101                        self.capture_column_types(&chunk, &mut result);
102                        types_captured = true;
103                    }
104                    let remaining = limit - collected;
105                    collected += self.collect_chunk_limited(&chunk, &mut result, remaining)?;
106                }
107                Ok(None) => break,
108                Err(err) => return Err(convert_operator_error(err)),
109            }
110        }
111
112        Ok(result)
113    }
114
115    /// Captures column types from a DataChunk.
116    fn capture_column_types(&self, chunk: &DataChunk, result: &mut QueryResult) {
117        let col_count = chunk.column_count();
118        result.column_types = Vec::with_capacity(col_count);
119        for col_idx in 0..col_count {
120            let col_type = chunk
121                .column(col_idx)
122                .map(|col| col.data_type().clone())
123                .unwrap_or(LogicalType::Any);
124            result.column_types.push(col_type);
125        }
126    }
127
128    /// Collects all rows from a DataChunk into the result.
129    ///
130    /// Uses `selected_indices()` to correctly handle chunks with selection vectors
131    /// (e.g., after filtering operations).
132    fn collect_chunk(&self, chunk: &DataChunk, result: &mut QueryResult) -> Result<usize> {
133        let col_count = chunk.column_count();
134        let mut collected = 0;
135
136        for row_idx in chunk.selected_indices() {
137            let mut row = Vec::with_capacity(col_count);
138            for col_idx in 0..col_count {
139                let value = chunk
140                    .column(col_idx)
141                    .and_then(|col| col.get_value(row_idx))
142                    .unwrap_or(Value::Null);
143                row.push(value);
144            }
145            result.rows.push(row);
146            collected += 1;
147        }
148
149        Ok(collected)
150    }
151
152    /// Collects up to `limit` rows from a DataChunk.
153    ///
154    /// Uses `selected_indices()` to correctly handle chunks with selection vectors
155    /// (e.g., after filtering operations).
156    fn collect_chunk_limited(
157        &self,
158        chunk: &DataChunk,
159        result: &mut QueryResult,
160        limit: usize,
161    ) -> Result<usize> {
162        let col_count = chunk.column_count();
163        let mut collected = 0;
164
165        for row_idx in chunk.selected_indices() {
166            if collected >= limit {
167                break;
168            }
169            let mut row = Vec::with_capacity(col_count);
170            for col_idx in 0..col_count {
171                let value = chunk
172                    .column(col_idx)
173                    .and_then(|col| col.get_value(row_idx))
174                    .unwrap_or(Value::Null);
175                row.push(value);
176            }
177            result.rows.push(row);
178            collected += 1;
179        }
180
181        Ok(collected)
182    }
183
184    /// Executes a physical operator with adaptive cardinality tracking.
185    ///
186    /// This wraps the operator in a cardinality tracking layer and monitors
187    /// deviation from estimates during execution. The adaptive summary is
188    /// returned alongside the query result.
189    ///
190    /// # Arguments
191    ///
192    /// * `operator` - The root physical operator to execute
193    /// * `adaptive_context` - Context with cardinality estimates from planning
194    /// * `config` - Adaptive execution configuration
195    ///
196    /// # Errors
197    ///
198    /// Returns an error if operator execution fails.
199    pub fn execute_adaptive(
200        &self,
201        operator: Box<dyn Operator>,
202        adaptive_context: Option<AdaptiveContext>,
203        config: &AdaptiveConfig,
204    ) -> Result<(QueryResult, Option<AdaptiveSummary>)> {
205        // If adaptive is disabled or no context, fall back to normal execution
206        if !config.enabled {
207            let mut op = operator;
208            let result = self.execute(op.as_mut())?;
209            return Ok((result, None));
210        }
211
212        let Some(ctx) = adaptive_context else {
213            let mut op = operator;
214            let result = self.execute(op.as_mut())?;
215            return Ok((result, None));
216        };
217
218        // Create shared context for tracking
219        let shared_ctx = SharedAdaptiveContext::from_context(AdaptiveContext::with_thresholds(
220            config.threshold,
221            config.min_rows,
222        ));
223
224        // Copy estimates from the planning context to the shared tracking context
225        for (op_id, checkpoint) in ctx.all_checkpoints() {
226            if let Some(mut inner) = shared_ctx.snapshot() {
227                inner.set_estimate(op_id, checkpoint.estimated);
228            }
229        }
230
231        // Wrap operator with tracking
232        let mut wrapped = CardinalityTrackingWrapper::new(operator, "root", shared_ctx.clone());
233
234        // Execute with tracking
235        let mut result = QueryResult::with_types(self.columns.clone(), self.column_types.clone());
236        let mut types_captured = !result.column_types.iter().all(|t| *t == LogicalType::Any);
237        let mut total_rows: u64 = 0;
238        let check_interval = config.min_rows;
239
240        loop {
241            match wrapped.next() {
242                Ok(Some(chunk)) => {
243                    let chunk_rows = chunk.row_count();
244                    total_rows += chunk_rows as u64;
245
246                    // Capture column types from first non-empty chunk
247                    if !types_captured && chunk.column_count() > 0 {
248                        self.capture_column_types(&chunk, &mut result);
249                        types_captured = true;
250                    }
251                    self.collect_chunk(&chunk, &mut result)?;
252
253                    // Periodically check for significant deviation
254                    if total_rows >= check_interval
255                        && total_rows.is_multiple_of(check_interval)
256                        && shared_ctx.should_reoptimize()
257                    {
258                        // For now, just log/note that re-optimization would trigger
259                        // Full re-optimization would require plan regeneration
260                        // which is a more invasive change
261                    }
262                }
263                Ok(None) => break,
264                Err(err) => return Err(convert_operator_error(err)),
265            }
266        }
267
268        // Get final summary
269        let summary = shared_ctx.snapshot().map(|ctx| ctx.summary());
270
271        Ok((result, summary))
272    }
273}
274
275impl Default for Executor {
276    fn default() -> Self {
277        Self::new()
278    }
279}
280
281/// Converts an operator error to a common error.
282fn convert_operator_error(err: OperatorError) -> Error {
283    match err {
284        OperatorError::TypeMismatch { expected, found } => Error::TypeMismatch { expected, found },
285        OperatorError::ColumnNotFound(name) => {
286            Error::InvalidValue(format!("Column not found: {name}"))
287        }
288        OperatorError::Execution(msg) => Error::Internal(msg),
289    }
290}
291
292#[cfg(test)]
293mod tests {
294    use super::*;
295    use grafeo_common::types::LogicalType;
296    use grafeo_core::execution::DataChunk;
297
298    /// A mock operator that generates chunks with integer data on demand.
299    struct MockIntOperator {
300        values: Vec<i64>,
301        position: usize,
302        chunk_size: usize,
303    }
304
305    impl MockIntOperator {
306        fn new(values: Vec<i64>, chunk_size: usize) -> Self {
307            Self {
308                values,
309                position: 0,
310                chunk_size,
311            }
312        }
313    }
314
315    impl Operator for MockIntOperator {
316        fn next(&mut self) -> grafeo_core::execution::operators::OperatorResult {
317            if self.position >= self.values.len() {
318                return Ok(None);
319            }
320
321            let end = (self.position + self.chunk_size).min(self.values.len());
322            let mut chunk = DataChunk::with_capacity(&[LogicalType::Int64], self.chunk_size);
323
324            {
325                let col = chunk.column_mut(0).unwrap();
326                for i in self.position..end {
327                    col.push_int64(self.values[i]);
328                }
329            }
330            chunk.set_count(end - self.position);
331            self.position = end;
332
333            Ok(Some(chunk))
334        }
335
336        fn reset(&mut self) {
337            self.position = 0;
338        }
339
340        fn name(&self) -> &'static str {
341            "MockInt"
342        }
343    }
344
345    /// Empty mock operator for testing empty results.
346    struct EmptyOperator;
347
348    impl Operator for EmptyOperator {
349        fn next(&mut self) -> grafeo_core::execution::operators::OperatorResult {
350            Ok(None)
351        }
352
353        fn reset(&mut self) {}
354
355        fn name(&self) -> &'static str {
356            "Empty"
357        }
358    }
359
360    #[test]
361    fn test_executor_empty() {
362        let executor = Executor::with_columns(vec!["a".to_string()]);
363        let mut op = EmptyOperator;
364
365        let result = executor.execute(&mut op).unwrap();
366        assert!(result.is_empty());
367        assert_eq!(result.column_count(), 1);
368    }
369
370    #[test]
371    fn test_executor_single_chunk() {
372        let executor = Executor::with_columns(vec!["value".to_string()]);
373        let mut op = MockIntOperator::new(vec![1, 2, 3], 10);
374
375        let result = executor.execute(&mut op).unwrap();
376        assert_eq!(result.row_count(), 3);
377        assert_eq!(result.rows[0][0], Value::Int64(1));
378        assert_eq!(result.rows[1][0], Value::Int64(2));
379        assert_eq!(result.rows[2][0], Value::Int64(3));
380    }
381
382    #[test]
383    fn test_executor_with_limit() {
384        let executor = Executor::with_columns(vec!["value".to_string()]);
385        let mut op = MockIntOperator::new((0..10).collect(), 100);
386
387        let result = executor.execute_with_limit(&mut op, 5).unwrap();
388        assert_eq!(result.row_count(), 5);
389    }
390}