1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
// SPDX-License-Identifier: Apache-2.0
//! The `SqlPlan` enum — top-level plan produced by the SQL planner.
use crate::fts_types::FtsQuery;
use crate::temporal::TemporalScope;
use crate::types_array;
use crate::types_expr::{SqlExpr, SqlPayloadAtom, SqlValue};
pub use nodedb_types::vector_distance::DistanceMetric;
use crate::types::filter::Filter;
use crate::types::query::{
AggregateExpr, EngineType, JoinType, Projection, SortKey, SpatialPredicate, WindowSpec,
};
use super::merge_types::MergePlanClause;
use super::row_types::{KvInsertIntent, VectorPrimaryRow};
use super::vector_opts::{ArrayPrefilter, VectorAnnOptions};
/// The top-level plan produced by the SQL planner.
#[derive(Debug, Clone)]
pub enum SqlPlan {
// ── Constant ──
/// Query with no FROM clause: SELECT 1, SELECT 'hello' AS name, etc.
/// Produces a single row with evaluated constant expressions.
ConstantResult {
columns: Vec<String>,
values: Vec<SqlValue>,
},
// ── Reads ──
Scan {
collection: String,
alias: Option<String>,
engine: EngineType,
filters: Vec<Filter>,
projection: Vec<Projection>,
sort_keys: Vec<SortKey>,
limit: Option<usize>,
offset: usize,
distinct: bool,
window_functions: Vec<WindowSpec>,
/// Bitemporal qualifier extracted from `FOR SYSTEM_TIME` /
/// `FOR VALID_TIME`. Default when the scan is current-state.
temporal: TemporalScope,
},
PointGet {
collection: String,
alias: Option<String>,
engine: EngineType,
key_column: String,
key_value: SqlValue,
},
/// Document fetch via a secondary index: equality predicate on an
/// indexed field. The executor performs an index lookup to resolve
/// matching document IDs, reads each document, and applies any
/// remaining filters, projection, sort, and limit.
///
/// Emitted by `document_schemaless::plan_scan` /
/// `document_strict::plan_scan` when the WHERE clause contains a
/// single equality predicate on a `Ready` indexed field. Any
/// additional predicates fall through as post-filters.
DocumentIndexLookup {
collection: String,
alias: Option<String>,
engine: EngineType,
/// Indexed field path used for the lookup.
field: String,
/// Equality value from the WHERE clause.
value: SqlValue,
/// Remaining filters after extracting the equality used for lookup.
filters: Vec<Filter>,
projection: Vec<Projection>,
sort_keys: Vec<SortKey>,
limit: Option<usize>,
offset: usize,
distinct: bool,
window_functions: Vec<WindowSpec>,
/// Whether the chosen index is COLLATE NOCASE — the executor
/// lowercases the lookup value before probing.
case_insensitive: bool,
/// Bitemporal qualifier — mirrors `Scan::temporal`. Document
/// engines must honor it at the Ceiling stage.
temporal: TemporalScope,
},
RangeScan {
collection: String,
field: String,
lower: Option<SqlValue>,
upper: Option<SqlValue>,
limit: usize,
},
// ── Writes ──
Insert {
collection: String,
engine: EngineType,
rows: Vec<Vec<(String, SqlValue)>>,
/// Column defaults from schema: `(column_name, default_expr)`.
/// Used to auto-generate values for missing columns (e.g. `id` with `UUID_V7`).
column_defaults: Vec<(String, String)>,
/// `ON CONFLICT DO NOTHING` semantics: when true, duplicate-PK rows
/// are silently skipped instead of raising `unique_violation`. Plain
/// `INSERT` (no `ON CONFLICT` clause) sets this to `false`.
if_absent: bool,
},
/// KV INSERT: key and value are fundamentally separate.
/// Each entry is `(key, value_columns)`.
KvInsert {
collection: String,
entries: Vec<(SqlValue, Vec<(String, SqlValue)>)>,
/// TTL in seconds (0 = no expiry). Extracted from `ttl` column if present.
ttl_secs: u64,
/// INSERT-vs-UPSERT distinction. `KvOp::Put` is a Redis-SET-style
/// upsert by design; to honor SQL `INSERT` semantics the planner must
/// tell the converter whether a duplicate key should raise (plain
/// `INSERT`, `Insert`), be silently skipped (`ON CONFLICT DO NOTHING`,
/// `InsertIfAbsent`), or overwrite (`UPSERT` / `ON CONFLICT DO
/// UPDATE`, `Put`).
intent: KvInsertIntent,
/// `ON CONFLICT (key) DO UPDATE SET field = expr` assignments, carried
/// through when `intent == Put` via the ON-CONFLICT-DO-UPDATE path.
/// Empty for plain UPSERT (whole-value overwrite) and for INSERT
/// variants.
on_conflict_updates: Vec<(String, SqlExpr)>,
},
/// UPSERT: insert or merge if document exists.
Upsert {
collection: String,
engine: EngineType,
rows: Vec<Vec<(String, SqlValue)>>,
column_defaults: Vec<(String, String)>,
/// `ON CONFLICT (...) DO UPDATE SET field = expr` assignments.
/// When empty, upsert is a plain merge: new columns overwrite existing.
/// When non-empty, the engine applies these per-row against the
/// *existing* document instead of merging the inserted values.
on_conflict_updates: Vec<(String, SqlExpr)>,
},
InsertSelect {
target: String,
source: Box<SqlPlan>,
limit: usize,
},
Update {
collection: String,
engine: EngineType,
assignments: Vec<(String, SqlExpr)>,
filters: Vec<Filter>,
target_keys: Vec<SqlValue>,
returning: bool,
},
/// `UPDATE target SET col = src.col2 FROM src WHERE target.id = src.id`
///
/// Two-phase execution: scan `source` with `source_filters`, then for
/// each matched source row that satisfies the join predicates against a
/// target row, apply `assignments` (which may reference source columns
/// via qualified names `src.col`).
///
/// `join_predicates` are equality pairs `(target_col, source_col)` extracted
/// from the WHERE clause linking the two tables. `target_filters` are
/// remaining WHERE predicates that reference only `target`.
UpdateFrom {
collection: String,
engine: EngineType,
/// The FROM source: a `Scan`, `Join`, or other read plan.
source: Box<SqlPlan>,
/// Column name used as the target's join key (e.g. `"id"`).
target_join_col: String,
/// Column name used as the source's join key (e.g. `"id"`).
source_join_col: String,
/// SET assignments — RHS may be `SqlExpr::Column { table: Some("src"), .. }`.
assignments: Vec<(String, SqlExpr)>,
/// Filters that apply only to the target collection.
target_filters: Vec<Filter>,
returning: bool,
},
Delete {
collection: String,
engine: EngineType,
filters: Vec<Filter>,
target_keys: Vec<SqlValue>,
},
Truncate {
collection: String,
restart_identity: bool,
},
// ── Joins ──
Join {
left: Box<SqlPlan>,
right: Box<SqlPlan>,
on: Vec<(String, String)>,
join_type: JoinType,
condition: Option<SqlExpr>,
limit: usize,
/// Post-join projection: column names to keep (empty = all columns).
projection: Vec<Projection>,
/// Post-join filters (from WHERE clause).
filters: Vec<Filter>,
},
// ── Aggregation ──
Aggregate {
input: Box<SqlPlan>,
group_by: Vec<SqlExpr>,
aggregates: Vec<AggregateExpr>,
having: Vec<Filter>,
limit: usize,
/// When the GROUP BY contains ROLLUP/CUBE/GROUPING SETS, this field holds
/// the expansion. Each inner `Vec<usize>` is one grouping set — the indices
/// into `group_by` (the canonical key list) that are *present* (non-NULL)
/// for rows in that set. `None` = plain single-set GROUP BY.
grouping_sets: Option<Vec<Vec<usize>>>,
/// ORDER BY applied to the aggregated rows. Empty = no sort
/// (executor returns groups in hash-map iteration order).
/// Populated by `apply_order_by` when an outer ORDER BY
/// targets a GROUP BY result; the Aggregate executor sorts the
/// finalized group rows before returning.
sort_keys: Vec<SortKey>,
},
// ── Timeseries ──
TimeseriesScan {
collection: String,
time_range: (i64, i64),
bucket_interval_ms: i64,
group_by: Vec<String>,
aggregates: Vec<AggregateExpr>,
filters: Vec<Filter>,
projection: Vec<Projection>,
gap_fill: String,
limit: usize,
tiered: bool,
/// Bitemporal system-time / valid-time scope. Only non-default
/// on collections created `WITH BITEMPORAL`; `TimeseriesRules::plan_scan`
/// rejects temporal scopes otherwise.
temporal: TemporalScope,
},
TimeseriesIngest {
collection: String,
rows: Vec<Vec<(String, SqlValue)>>,
},
// ── Search (first-class) ──
VectorSearch {
collection: String,
field: String,
query_vector: Vec<f32>,
top_k: usize,
ef_search: usize,
/// Distance metric requested by the query operator (`<->`, `<=>`, `<#>`).
/// Overrides the collection-default metric at search time.
metric: DistanceMetric,
filters: Vec<Filter>,
/// Optional cross-engine prefilter: when set, the ND-array slice
/// runs first and its output cells' surrogates form a bitmap that
/// gates the HNSW candidate set. Set by the planner when an
/// `ORDER BY vector_distance(...) LIMIT k` query is JOINed against
/// `ARRAY_SLICE(...)`. The convert layer lowers this to
/// `VectorOp::Search { inline_prefilter_plan: Some(ArrayOp::SurrogateBitmapScan) }`.
array_prefilter: Option<ArrayPrefilter>,
/// ANN knobs parsed from the optional third JSON-string argument
/// to `vector_distance(field, query, '{...}')`.
ann_options: VectorAnnOptions,
/// When `true`, the projection contains only the surrogate/PK column
/// and/or `vector_distance(...)` — no payload fields. The Data Plane
/// can skip the document-body fetch entirely for vector-primary
/// collections. Always `false` for non-vector-primary collections
/// (document body is the primary result).
skip_payload_fetch: bool,
/// Predicates against payload-indexed columns on a vector-primary
/// collection. Each atom is `Eq(field, value)`, `In(field, values)`,
/// or `Range(field, ...)`. The convert layer translates SqlValue →
/// nodedb_types::Value and emits them as
/// `VectorOp::Search::payload_filters`. The Data Plane intersects
/// the resulting bitmap with the HNSW candidate set via the
/// per-collection `PayloadIndexSet::pre_filter`.
payload_filters: Vec<SqlPayloadAtom>,
},
MultiVectorSearch {
collection: String,
query_vector: Vec<f32>,
top_k: usize,
ef_search: usize,
},
TextSearch {
collection: String,
/// Structured FTS query. Use `FtsQuery::Plain { text, fuzzy }` for
/// simple keyword search. `FtsQuery::And/Or/Prefix` are supported;
/// `FtsQuery::Phrase` and `FtsQuery::Not` are represented but rejected
/// by the executor with `Unsupported`.
query: FtsQuery,
top_k: usize,
filters: Vec<Filter>,
/// When set, the SELECT list contains `bm25_score(field, term)` and the
/// caller wants a full-collection scan with the score injected under this
/// alias. The converter emits `TextOp::BM25ScoreScan` instead of
/// `TextOp::Search` so that all documents — including non-matching ones —
/// appear in the response with `null` for the score when they do not
/// contain the term.
score_alias: Option<String>,
},
HybridSearch {
collection: String,
query_vector: Vec<f32>,
query_text: String,
top_k: usize,
ef_search: usize,
vector_weight: f32,
fuzzy: bool,
/// SELECT-list alias the response should use for the RRF score
/// column. `None` means the executor falls back to the fixed
/// internal field name `rrf_score`. Set by the planner from the
/// SELECT projection's `AS <alias>` for the `rrf_score(...)` call.
score_alias: Option<String>,
},
/// Three-source hybrid search: vector + BM25 text + graph BFS, fused via weighted RRF.
///
/// Produced when the planner detects `rrf_score(vector_distance(...),
/// bm25_score(...), graph_score(...))` with three source arguments.
HybridSearchTriple {
collection: String,
query_vector: Vec<f32>,
query_text: String,
/// Node id used as the BFS seed for the graph leg.
graph_seed_id: String,
/// Maximum BFS depth from the seed node.
graph_depth: usize,
/// Edge label filter for graph BFS. `None` = all edges.
graph_edge_label: Option<String>,
top_k: usize,
ef_search: usize,
fuzzy: bool,
/// Per-source RRF k constants: (vector_k, text_k, graph_k).
rrf_k: (f64, f64, f64),
/// SELECT-list alias for the fused RRF score column.
score_alias: Option<String>,
},
SpatialScan {
collection: String,
field: String,
predicate: SpatialPredicate,
query_geometry: nodedb_types::geometry::Geometry,
distance_meters: f64,
attribute_filters: Vec<Filter>,
limit: usize,
projection: Vec<Projection>,
},
// ── Composite ──
Union {
inputs: Vec<SqlPlan>,
distinct: bool,
},
Intersect {
left: Box<SqlPlan>,
right: Box<SqlPlan>,
all: bool,
},
Except {
left: Box<SqlPlan>,
right: Box<SqlPlan>,
all: bool,
},
RecursiveScan {
collection: String,
base_filters: Vec<Filter>,
recursive_filters: Vec<Filter>,
/// Equi-join link for tree-traversal recursion:
/// `(collection_field, working_table_field)`.
/// e.g. `("parent_id", "id")` means each iteration finds rows
/// where `collection.parent_id` matches a `working_table.id`.
join_link: Option<(String, String)>,
max_iterations: usize,
distinct: bool,
limit: usize,
},
/// Value-generating recursive CTE (`WITH RECURSIVE name(cols) AS (anchor UNION [ALL] step)`).
///
/// Unlike `RecursiveScan`, this variant carries no collection reference — the anchor
/// row is produced entirely from literal expressions and each iteration applies the
/// step expressions to the previous row. The executor evaluates this iteratively
/// in the Data Plane without touching storage.
///
/// All expressions are stored as raw SQL text so they can be serialised across the
/// SPSC bridge without requiring `SqlExpr` to implement `Serialize`. The executor
/// parses them at execution time via the same lightweight expression evaluator used
/// by the procedural executor.
RecursiveValue {
/// CTE name (used in error messages).
cte_name: String,
/// Column names declared on the CTE (e.g. `(n)` in `c(n) AS ...`).
columns: Vec<String>,
/// Anchor SELECT expressions as raw SQL text (one per column).
init_exprs: Vec<String>,
/// Recursive step SELECT expressions as raw SQL text (one per column).
/// May reference column names from `columns`.
step_exprs: Vec<String>,
/// Optional WHERE condition as raw SQL text applied to each new row
/// to decide whether to continue. `None` → run until fixed point.
condition: Option<String>,
/// Maximum iterations before a `RecursionDepthExceeded` error is raised.
max_depth: usize,
/// `false` → UNION ALL (keep duplicates); `true` → UNION (deduplicate).
distinct: bool,
},
/// Non-recursive CTE: execute each definition, then the outer query.
Cte {
/// CTE definitions: `(name, subquery_plan)`.
definitions: Vec<(String, SqlPlan)>,
/// The outer query that references CTE names.
outer: Box<SqlPlan>,
},
// ── Array (ND sparse) ─────────────────────────────────────
/// `CREATE ARRAY <name> DIMS (...) ATTRS (...) TILE_EXTENTS (...)`.
/// AST is engine-agnostic — the Origin converter builds the typed
/// `nodedb_array::ArraySchema` and persists the catalog row.
CreateArray {
name: String,
dims: Vec<types_array::ArrayDimAst>,
attrs: Vec<types_array::ArrayAttrAst>,
tile_extents: Vec<i64>,
cell_order: types_array::ArrayCellOrderAst,
tile_order: types_array::ArrayTileOrderAst,
/// Hilbert-prefix bits for vShard routing (1–16, default 8).
prefix_bits: u8,
/// Audit-retention horizon in milliseconds. `None` = non-bitemporal.
audit_retain_ms: Option<u64>,
/// Compliance floor for `audit_retain_ms`. `None` = no floor.
minimum_audit_retain_ms: Option<u64>,
},
/// `DROP ARRAY [IF EXISTS] <name>` — pure Control-Plane catalog
/// mutation. Per-core array store cleanup happens lazily.
DropArray { name: String, if_exists: bool },
/// `ALTER ARRAY <name> SET (audit_retain_ms = N, ...)`.
///
/// Double-`Option` semantics for each diff field:
/// - `None` = key was absent from SET clause → field unchanged.
/// - `Some(None)` = key present with value `NULL` → field set to NULL.
/// - `Some(Some(v))` = key present with integer value → field set to v.
AlterArray {
name: String,
/// New value for `audit_retain_ms`. `Some(None)` unregisters
/// the array from the bitemporal retention registry.
audit_retain_ms: Option<Option<i64>>,
/// New value for `minimum_audit_retain_ms`. Cannot be NULL.
minimum_audit_retain_ms: Option<u64>,
},
/// `INSERT INTO ARRAY <name> COORDS (...) VALUES (...) [, ...]`.
InsertArray {
name: String,
rows: Vec<types_array::ArrayInsertRow>,
},
/// `DELETE FROM ARRAY <name> WHERE COORDS IN ((...), (...))`.
DeleteArray {
name: String,
coords: Vec<Vec<types_array::ArrayCoordLiteral>>,
},
/// `SELECT * FROM ARRAY_SLICE(name, {dim:[lo,hi],..}, [attrs], limit)`.
ArraySlice {
name: String,
slice: types_array::ArraySliceAst,
/// Attribute names. Empty = all attrs.
attr_projection: Vec<String>,
/// 0 = unlimited.
limit: u32,
/// Bitemporal qualifier. When both axes are `None` / `Any`, the Data
/// Plane returns the live (current) state — the default fast path.
/// Populated from `AS OF SYSTEM TIME` / `AS OF VALID TIME` clauses.
temporal: TemporalScope,
},
/// `SELECT * FROM ARRAY_PROJECT(name, [attrs])`.
ArrayProject {
name: String,
/// Attribute names. Must be non-empty.
attr_projection: Vec<String>,
},
/// `SELECT * FROM ARRAY_AGG(name, attr, reducer [, group_by_dim])`.
ArrayAgg {
name: String,
attr: String,
reducer: types_array::ArrayReducerAst,
/// `None` = scalar fold; `Some(name)` = group by that dim.
group_by_dim: Option<String>,
/// Bitemporal qualifier. When both axes are `None` / `Any`, the Data
/// Plane aggregates against the live (current) state — the default
/// fast path. Populated from `AS OF SYSTEM TIME` / `AS OF VALID TIME`.
temporal: TemporalScope,
},
/// `SELECT * FROM ARRAY_ELEMENTWISE(left, right, op, attr)`.
ArrayElementwise {
left: String,
right: String,
op: types_array::ArrayBinaryOpAst,
attr: String,
},
/// `SELECT ARRAY_FLUSH(name)` — returns one row `{result: BOOL}`.
ArrayFlush { name: String },
/// `SELECT ARRAY_COMPACT(name)` — returns one row `{result: BOOL}`.
ArrayCompact { name: String },
// ── MERGE ──────────────────────────────────────────────────────────
/// `MERGE INTO target USING source ON ... WHEN ... THEN ...`
///
/// Supported only for `document_schemaless` and `document_strict` engines.
/// The Data Plane handler evaluates WHEN arms in declaration order and
/// applies the first matching action to each joined or unmatched row.
Merge {
target: String,
engine: EngineType,
/// Source plan (Scan, DocumentIndexLookup, or Join of a sub-select).
source: Box<SqlPlan>,
/// Column in the target used for the equi-join (from ON clause).
target_join_col: String,
/// Column in the source used for the equi-join (from ON clause).
source_join_col: String,
/// Alias used to qualify source columns in expressions (e.g. `src.col`).
source_alias: String,
/// WHEN arms in declaration order.
clauses: Vec<MergePlanClause>,
returning: bool,
},
// ── Lateral joins ───────────────────────────────────────────────────
/// LATERAL subquery that is equi-correlated and has ORDER BY + LIMIT k.
///
/// Emitted when the inner subquery has an equi-key correlation to the outer
/// table plus an `ORDER BY ... LIMIT k` clause. The Data Plane scans the
/// inner collection once per outer row applying the equi-filter, sorts by
/// `inner_order_by`, and retains at most `inner_limit` rows.
///
/// `correlation_keys` is `(outer_col, inner_col)` — the equi-join pairs
/// that correlate inner to outer.
LateralTopK {
/// Plan producing outer rows.
outer: Box<SqlPlan>,
/// Alias used to qualify outer table columns (e.g. `"u"`).
outer_alias: Option<String>,
/// Inner collection to scan.
inner_collection: String,
/// Pre-filter applied to inner rows (non-correlated filters).
inner_filters: Vec<Filter>,
/// Sort keys for the inner per-outer-row result.
inner_order_by: Vec<SortKey>,
/// Maximum number of inner rows per outer row.
inner_limit: usize,
/// Equi-join pairs `(outer_col, inner_col)`.
correlation_keys: Vec<(String, String)>,
/// Alias under which inner rows are presented.
lateral_alias: String,
/// Post-lateral projection.
projection: Vec<Projection>,
/// LEFT join semantics: preserve outer rows even when inner is empty.
left_join: bool,
},
/// General LATERAL subquery — per-outer-row correlated nested loop.
///
/// Emitted for LATERAL subqueries that cannot be rewritten as equi-join
/// hash joins or `LateralTopK`. The Control Plane drives execution: it
/// materialises outer rows, then for each row substitutes the correlation
/// values as additional filters on the inner plan and re-dispatches it.
///
/// Bounded by `outer_row_cap`; queries that exceed the cap receive a typed
/// `SqlError::Unsupported` before any data is returned.
LateralLoop {
/// Plan producing outer rows.
outer: Box<SqlPlan>,
/// Alias used to qualify outer table columns.
outer_alias: Option<String>,
/// Inner subquery plan (correlation predicates are injected at runtime).
inner: Box<SqlPlan>,
/// Correlated predicates extracted from the inner WHERE that reference
/// outer columns. Each entry is `(inner_field, outer_field)`.
correlation_predicates: Vec<(String, String)>,
/// Alias under which inner rows are presented.
lateral_alias: String,
/// Post-lateral projection.
projection: Vec<Projection>,
/// Maximum outer rows allowed. Queries exceeding this return an error.
outer_row_cap: usize,
/// LEFT join semantics: preserve outer rows even when inner is empty.
left_join: bool,
},
// ── Vector-primary ──────────────────────────────────────────────────
/// INSERT into a vector-primary collection.
///
/// Emitted by the planner instead of the generic `Insert` variant when the
/// target collection has `primary = PrimaryEngine::Vector`. The Data Plane
/// routes each row through `VectorOp::DirectUpsert`, bypassing full-document
/// MessagePack encoding.
VectorPrimaryInsert {
collection: String,
/// Vector column name (matches `VectorPrimaryConfig::vector_field`).
/// Plumbed to `VectorOp::DirectUpsert` so the Data Plane keys its
/// HNSW index by `(tid, collection, field)` — the same key the SELECT
/// path uses.
field: String,
/// Collection-level quantization. Applied via `set_quantization` on
/// the first DirectUpsert so subsequent seals trigger codec-dispatch
/// rebuilds against the configured codec.
quantization: nodedb_types::VectorQuantization,
/// Payload field names that get equality bitmap indexes. Registered
/// via `payload.add_index` on the first DirectUpsert.
payload_indexes: Vec<(String, nodedb_types::PayloadIndexKind)>,
rows: Vec<VectorPrimaryRow>,
},
}