Skip to main content

omnigraph/exec/
mutation.rs

1use super::*;
2
3use super::query::literal_to_sql;
4
5// ─── Mutation helpers ────────────────────────────────────────────────────────
6
7/// Resolve an IRExpr to a concrete Literal value at runtime.
8fn resolve_expr_value(expr: &IRExpr, params: &ParamMap) -> Result<Literal> {
9    match expr {
10        IRExpr::Literal(lit) => Ok(lit.clone()),
11        IRExpr::Param(name) => params
12            .get(name)
13            .cloned()
14            .ok_or_else(|| OmniError::manifest(format!("parameter '{}' not provided", name))),
15        other => Err(OmniError::manifest(format!(
16            "unsupported expression in mutation: {:?}",
17            other
18        ))),
19    }
20}
21
22/// Create a single-element or N-element array from a Literal, matching the target DataType.
23fn literal_to_typed_array(
24    lit: &Literal,
25    data_type: &DataType,
26    num_rows: usize,
27) -> Result<ArrayRef> {
28    Ok(match (lit, data_type) {
29        (Literal::Null, _) => arrow_array::new_null_array(data_type, num_rows),
30        (Literal::String(s), DataType::Utf8) => {
31            Arc::new(StringArray::from(vec![s.as_str(); num_rows])) as ArrayRef
32        }
33        (Literal::Integer(n), DataType::Int32) => {
34            Arc::new(Int32Array::from(vec![*n as i32; num_rows]))
35        }
36        (Literal::Integer(n), DataType::Int64) => Arc::new(Int64Array::from(vec![*n; num_rows])),
37        (Literal::Integer(n), DataType::UInt32) => {
38            Arc::new(UInt32Array::from(vec![*n as u32; num_rows]))
39        }
40        (Literal::Integer(n), DataType::UInt64) => {
41            Arc::new(UInt64Array::from(vec![*n as u64; num_rows]))
42        }
43        (Literal::Float(f), DataType::Float32) => {
44            Arc::new(Float32Array::from(vec![*f as f32; num_rows]))
45        }
46        (Literal::Float(f), DataType::Float64) => Arc::new(Float64Array::from(vec![*f; num_rows])),
47        (Literal::Bool(b), DataType::Boolean) => Arc::new(BooleanArray::from(vec![*b; num_rows])),
48        (Literal::Date(s), DataType::Date32) => {
49            let days = crate::loader::parse_date32_literal(s)?;
50            Arc::new(Date32Array::from(vec![days; num_rows]))
51        }
52        (Literal::DateTime(s), DataType::Date64) => Arc::new(Date64Array::from(vec![
53            crate::loader::parse_date64_literal(s)?;
54            num_rows
55        ])),
56        (Literal::List(items), DataType::List(field)) => {
57            typed_list_literal_to_array(items, field.data_type(), num_rows)?
58        }
59        (Literal::List(items), DataType::FixedSizeList(field, dim))
60            if field.data_type() == &DataType::Float32 =>
61        {
62            if items.len() != *dim as usize {
63                return Err(OmniError::manifest(format!(
64                    "vector property expects {} dimensions, got {}",
65                    dim,
66                    items.len()
67                )));
68            }
69            let mut builder = FixedSizeListBuilder::with_capacity(
70                Float32Builder::with_capacity(num_rows * (*dim as usize)),
71                *dim,
72                num_rows,
73            )
74            .with_field(field.clone());
75            for _ in 0..num_rows {
76                for item in items {
77                    match item {
78                        Literal::Integer(value) => builder.values().append_value(*value as f32),
79                        Literal::Float(value) => builder.values().append_value(*value as f32),
80                        _ => {
81                            return Err(OmniError::manifest(
82                                "vector elements must be numeric".to_string(),
83                            ));
84                        }
85                    }
86                }
87                builder.append(true);
88            }
89            Arc::new(builder.finish())
90        }
91        _ => {
92            return Err(OmniError::manifest(format!(
93                "cannot convert {:?} to {:?}",
94                lit, data_type
95            )));
96        }
97    })
98}
99
100fn typed_list_literal_to_array(
101    items: &[Literal],
102    item_type: &DataType,
103    num_rows: usize,
104) -> Result<ArrayRef> {
105    match item_type {
106        DataType::Utf8 => {
107            let mut builder = ListBuilder::new(StringBuilder::new());
108            for _ in 0..num_rows {
109                for item in items {
110                    match item {
111                        Literal::String(value) => builder.values().append_value(value),
112                        _ => builder.values().append_null(),
113                    }
114                }
115                builder.append(true);
116            }
117            Ok(Arc::new(builder.finish()))
118        }
119        DataType::Boolean => {
120            let mut builder = ListBuilder::new(BooleanBuilder::new());
121            for _ in 0..num_rows {
122                for item in items {
123                    match item {
124                        Literal::Bool(value) => builder.values().append_value(*value),
125                        _ => builder.values().append_null(),
126                    }
127                }
128                builder.append(true);
129            }
130            Ok(Arc::new(builder.finish()))
131        }
132        DataType::Int32 => {
133            let mut builder = ListBuilder::new(Int32Builder::new());
134            for _ in 0..num_rows {
135                for item in items {
136                    match item {
137                        Literal::Integer(value) => {
138                            let value = i32::try_from(*value).map_err(|_| {
139                                OmniError::manifest(format!(
140                                    "list value {} exceeds Int32 range",
141                                    value
142                                ))
143                            })?;
144                            builder.values().append_value(value);
145                        }
146                        _ => builder.values().append_null(),
147                    }
148                }
149                builder.append(true);
150            }
151            Ok(Arc::new(builder.finish()))
152        }
153        DataType::Int64 => {
154            let mut builder = ListBuilder::new(Int64Builder::new());
155            for _ in 0..num_rows {
156                for item in items {
157                    match item {
158                        Literal::Integer(value) => builder.values().append_value(*value),
159                        _ => builder.values().append_null(),
160                    }
161                }
162                builder.append(true);
163            }
164            Ok(Arc::new(builder.finish()))
165        }
166        DataType::UInt32 => {
167            let mut builder = ListBuilder::new(UInt32Builder::new());
168            for _ in 0..num_rows {
169                for item in items {
170                    match item {
171                        Literal::Integer(value) => {
172                            let value = u32::try_from(*value).map_err(|_| {
173                                OmniError::manifest(format!(
174                                    "list value {} exceeds UInt32 range",
175                                    value
176                                ))
177                            })?;
178                            builder.values().append_value(value);
179                        }
180                        _ => builder.values().append_null(),
181                    }
182                }
183                builder.append(true);
184            }
185            Ok(Arc::new(builder.finish()))
186        }
187        DataType::UInt64 => {
188            let mut builder = ListBuilder::new(UInt64Builder::new());
189            for _ in 0..num_rows {
190                for item in items {
191                    match item {
192                        Literal::Integer(value) => {
193                            let value = u64::try_from(*value).map_err(|_| {
194                                OmniError::manifest(format!(
195                                    "list value {} exceeds UInt64 range",
196                                    value
197                                ))
198                            })?;
199                            builder.values().append_value(value);
200                        }
201                        _ => builder.values().append_null(),
202                    }
203                }
204                builder.append(true);
205            }
206            Ok(Arc::new(builder.finish()))
207        }
208        DataType::Float32 => {
209            let mut builder = ListBuilder::new(Float32Builder::new());
210            for _ in 0..num_rows {
211                for item in items {
212                    match item {
213                        Literal::Integer(value) => builder.values().append_value(*value as f32),
214                        Literal::Float(value) => builder.values().append_value(*value as f32),
215                        _ => builder.values().append_null(),
216                    }
217                }
218                builder.append(true);
219            }
220            Ok(Arc::new(builder.finish()))
221        }
222        DataType::Float64 => {
223            let mut builder = ListBuilder::new(Float64Builder::new());
224            for _ in 0..num_rows {
225                for item in items {
226                    match item {
227                        Literal::Integer(value) => builder.values().append_value(*value as f64),
228                        Literal::Float(value) => builder.values().append_value(*value),
229                        _ => builder.values().append_null(),
230                    }
231                }
232                builder.append(true);
233            }
234            Ok(Arc::new(builder.finish()))
235        }
236        DataType::Date32 => {
237            let mut builder = ListBuilder::new(Date32Builder::new());
238            for _ in 0..num_rows {
239                for item in items {
240                    match item {
241                        Literal::Date(value) => builder
242                            .values()
243                            .append_value(crate::loader::parse_date32_literal(value)?),
244                        _ => builder.values().append_null(),
245                    }
246                }
247                builder.append(true);
248            }
249            Ok(Arc::new(builder.finish()))
250        }
251        DataType::Date64 => {
252            let mut builder = ListBuilder::new(Date64Builder::new());
253            for _ in 0..num_rows {
254                for item in items {
255                    match item {
256                        Literal::DateTime(value) => builder
257                            .values()
258                            .append_value(crate::loader::parse_date64_literal(value)?),
259                        _ => builder.values().append_null(),
260                    }
261                }
262                builder.append(true);
263            }
264            Ok(Arc::new(builder.finish()))
265        }
266        other => Err(OmniError::manifest(format!(
267            "cannot convert list literal to {:?}",
268            other
269        ))),
270    }
271}
272
273/// Build a single-element blob array from a URI or base64 value string.
274fn build_blob_array_from_value(value: &str) -> Result<ArrayRef> {
275    let mut builder = BlobArrayBuilder::new(1);
276    crate::loader::append_blob_value(&mut builder, value)?;
277    builder
278        .finish()
279        .map_err(|e| OmniError::Lance(e.to_string()))
280}
281
282/// Build a null blob array with one element.
283fn build_null_blob_array() -> Result<ArrayRef> {
284    let mut builder = BlobArrayBuilder::new(1);
285    builder
286        .push_null()
287        .map_err(|e| OmniError::Lance(e.to_string()))?;
288    builder
289        .finish()
290        .map_err(|e| OmniError::Lance(e.to_string()))
291}
292
293/// Build a single-row RecordBatch from resolved assignments.
294fn build_insert_batch(
295    schema: &SchemaRef,
296    id: &str,
297    assignments: &HashMap<String, Literal>,
298    blob_properties: &HashSet<String>,
299) -> Result<RecordBatch> {
300    let mut columns: Vec<ArrayRef> = Vec::with_capacity(schema.fields().len());
301
302    for field in schema.fields() {
303        if field.name() == "id" {
304            columns.push(Arc::new(StringArray::from(vec![id])));
305        } else if blob_properties.contains(field.name()) {
306            if let Some(Literal::String(uri)) = assignments.get(field.name()) {
307                columns.push(build_blob_array_from_value(uri)?);
308            } else if field.is_nullable() {
309                columns.push(build_null_blob_array()?);
310            } else {
311                return Err(OmniError::manifest(format!(
312                    "missing required blob property '{}'",
313                    field.name()
314                )));
315            }
316        } else if field.name() == "src" {
317            let lit = assignments.get("from").ok_or_else(|| {
318                OmniError::manifest("missing required edge endpoint 'from'".to_string())
319            })?;
320            columns.push(literal_to_typed_array(lit, field.data_type(), 1)?);
321        } else if field.name() == "dst" {
322            let lit = assignments.get("to").ok_or_else(|| {
323                OmniError::manifest("missing required edge endpoint 'to'".to_string())
324            })?;
325            columns.push(literal_to_typed_array(lit, field.data_type(), 1)?);
326        } else if let Some(lit) = assignments.get(field.name()) {
327            columns.push(literal_to_typed_array(lit, field.data_type(), 1)?);
328        } else if field.is_nullable() {
329            columns.push(arrow_array::new_null_array(field.data_type(), 1));
330        } else {
331            return Err(OmniError::manifest(format!(
332                "missing required property '{}'",
333                field.name()
334            )));
335        }
336    }
337
338    RecordBatch::try_new(schema.clone(), columns).map_err(|e| OmniError::Lance(e.to_string()))
339}
340
341async fn validate_edge_insert_endpoints(
342    db: &Omnigraph,
343    staging: &MutationStaging,
344    branch: Option<&str>,
345    edge_name: &str,
346    assignments: &HashMap<String, Literal>,
347) -> Result<()> {
348    let catalog = db.catalog();
349    let edge_type = catalog
350        .edge_types
351        .get(edge_name)
352        .ok_or_else(|| OmniError::manifest(format!("unknown edge type '{}'", edge_name)))?;
353    let from = match assignments.get("from") {
354        Some(Literal::String(value)) => value.as_str(),
355        Some(other) => {
356            return Err(OmniError::manifest(format!(
357                "edge {} from endpoint must be a string id, got {}",
358                edge_name,
359                literal_to_sql(other)
360            )));
361        }
362        None => {
363            return Err(OmniError::manifest(format!(
364                "edge {} missing 'from' endpoint",
365                edge_name
366            )));
367        }
368    };
369    let to = match assignments.get("to") {
370        Some(Literal::String(value)) => value.as_str(),
371        Some(other) => {
372            return Err(OmniError::manifest(format!(
373                "edge {} to endpoint must be a string id, got {}",
374                edge_name,
375                literal_to_sql(other)
376            )));
377        }
378        None => {
379            return Err(OmniError::manifest(format!(
380                "edge {} missing 'to' endpoint",
381                edge_name
382            )));
383        }
384    };
385
386    ensure_node_id_exists(db, staging, branch, &edge_type.from_type, from, "src").await?;
387    ensure_node_id_exists(db, staging, branch, &edge_type.to_type, to, "dst").await?;
388    Ok(())
389}
390
391/// Quick scan of pending batches for an `id` value match. Used by the
392/// mutation path's edge endpoint validation to satisfy read-your-writes
393/// for same-query inserts before they're committed to Lance.
394fn pending_batches_contain_id(batches: &[RecordBatch], id: &str) -> bool {
395    for batch in batches {
396        let Some(col) = batch.column_by_name("id") else {
397            continue;
398        };
399        let Some(arr) = col.as_any().downcast_ref::<StringArray>() else {
400            continue;
401        };
402        for i in 0..arr.len() {
403            if arr.is_valid(i) && arr.value(i) == id {
404                return true;
405            }
406        }
407    }
408    false
409}
410
411async fn ensure_node_id_exists(
412    db: &Omnigraph,
413    staging: &MutationStaging,
414    branch: Option<&str>,
415    node_type: &str,
416    id: &str,
417    label: &str,
418) -> Result<()> {
419    let table_key = format!("node:{}", node_type);
420
421    // Prefer the in-query pending accumulator so a same-query insert of
422    // the referenced node is visible to this validation. Fall back to
423    // the pre-mutation manifest snapshot when nothing pending matches.
424    let pending = staging.pending_batches(&table_key);
425    if pending_batches_contain_id(pending, id) {
426        return Ok(());
427    }
428
429    let filter = format!("id = '{}'", id.replace('\'', "''"));
430    let snapshot = db.snapshot_for_branch(branch).await?;
431    let ds = snapshot.open(&table_key).await?;
432    let exists = ds
433        .count_rows(Some(filter))
434        .await
435        .map_err(|e| OmniError::Lance(e.to_string()))?
436        > 0;
437
438    if exists {
439        Ok(())
440    } else {
441        Err(OmniError::manifest(format!(
442            "{} '{}' not found in {}",
443            label, id, node_type
444        )))
445    }
446}
447
448/// Convert an IRMutationPredicate to a Lance SQL filter string.
449fn predicate_to_sql(
450    predicate: &IRMutationPredicate,
451    params: &ParamMap,
452    is_edge: bool,
453) -> Result<String> {
454    let column = if is_edge {
455        match predicate.property.as_str() {
456            "from" => "src".to_string(),
457            "to" => "dst".to_string(),
458            other => other.to_string(),
459        }
460    } else {
461        predicate.property.clone()
462    };
463
464    let value = resolve_expr_value(&predicate.value, params)?;
465    let value_sql = literal_to_sql(&value);
466
467    let op = match predicate.op {
468        CompOp::Eq => "=",
469        CompOp::Ne => "!=",
470        CompOp::Gt => ">",
471        CompOp::Lt => "<",
472        CompOp::Ge => ">=",
473        CompOp::Le => "<=",
474        CompOp::Contains => {
475            return Err(OmniError::manifest(
476                "contains predicate not supported in mutations".to_string(),
477            ));
478        }
479    };
480
481    Ok(format!("{} {} {}", column, op, value_sql))
482}
483
484/// Replace specific columns in a RecordBatch with new literal values.
485///
486/// Blob columns may or may not be present in `batch` depending on the
487/// caller's scan projection:
488/// - If `batch` does NOT contain a blob column AND it has no assignment,
489///   the column is OMITTED from the output. `merge_insert` leaves it
490///   untouched.
491/// - If `batch` DOES contain a blob column AND it has no assignment, the
492///   column is COPIED to the output. This enables coalescing of
493///   different-shape updates into a single full-schema merge batch (the
494///   per-table accumulator in `MutationStaging` requires consistent
495///   schemas across pending batches for `concat_batches`). The
496///   round-tripping cost is acceptable for typical agent-driven
497///   mutations; tables with large blobs and unassigned-blob updates may
498///   want to be split into separate queries.
499/// - If a blob column has a string-URI assignment, build the blob array
500///   inline.
501fn apply_assignments(
502    full_schema: &SchemaRef,
503    batch: &RecordBatch,
504    assignments: &HashMap<String, Literal>,
505    blob_properties: &HashSet<String>,
506) -> Result<RecordBatch> {
507    let mut columns: Vec<ArrayRef> = Vec::with_capacity(full_schema.fields().len());
508    let mut out_fields: Vec<Field> = Vec::with_capacity(full_schema.fields().len());
509
510    for field in full_schema.fields().iter() {
511        if blob_properties.contains(field.name()) {
512            if let Some(Literal::String(uri)) = assignments.get(field.name()) {
513                // Assigned: build a single blob column from the URI.
514                let mut builder = BlobArrayBuilder::new(batch.num_rows());
515                for _ in 0..batch.num_rows() {
516                    crate::loader::append_blob_value(&mut builder, uri)?;
517                }
518                let blob_field = lance::blob::blob_field(field.name(), true);
519                out_fields.push(blob_field);
520                columns.push(
521                    builder
522                        .finish()
523                        .map_err(|e| OmniError::Lance(e.to_string()))?,
524                );
525            } else if let Some(col) = batch.column_by_name(field.name()) {
526                // Unassigned but scan included it: copy through (writes
527                // back the same blob, no observable change but uniform
528                // schema for the accumulator).
529                let blob_field = lance::blob::blob_field(field.name(), field.is_nullable());
530                out_fields.push(blob_field);
531                columns.push(col.clone());
532            }
533            // else: scan did not include this blob column and no
534            // assignment — omit. Caller's accumulator must accept the
535            // narrower schema (legacy single-merge_insert path).
536        } else if let Some(lit) = assignments.get(field.name()) {
537            out_fields.push(field.as_ref().clone());
538            columns.push(literal_to_typed_array(
539                lit,
540                field.data_type(),
541                batch.num_rows(),
542            )?);
543        } else {
544            let col = batch.column_by_name(field.name()).ok_or_else(|| {
545                OmniError::Lance(format!(
546                    "column '{}' not found in scan result",
547                    field.name()
548                ))
549            })?;
550            out_fields.push(field.as_ref().clone());
551            columns.push(col.clone());
552        }
553    }
554
555    RecordBatch::try_new(Arc::new(Schema::new(out_fields)), columns)
556        .map_err(|e| OmniError::Lance(e.to_string()))
557}
558
559// ─── Mutation execution ──────────────────────────────────────────────────────
560
561use super::staging::{MutationStaging, PendingMode};
562
563/// Open a sub-table dataset for read or inline-commit-write within the
564/// current mutation query, capturing pre-write metadata in `staging` on
565/// first touch. The captured version is the publisher's CAS fence at
566/// end-of-query (per-table OCC).
567///
568/// On first touch, opens the dataset at HEAD on the requested branch
569/// via `open_for_mutation_on_branch`, which compares Lance HEAD against
570/// the manifest's pinned version — that fence is the engine's
571/// publisher-style OCC catching cross-writer drift before we make any
572/// changes.
573///
574/// On subsequent touches *within the same query*, behavior depends on
575/// whether the table has already been inline-committed by a delete op:
576///
577/// - **Insert / update path (no inline commit between touches).** Lance
578///   HEAD has not moved since first touch, so a fresh
579///   `open_for_mutation_on_branch` would still match the manifest
580///   pinned version. We just go through it again; `ensure_path` is a
581///   no-op (idempotent on the captured `expected_version`).
582/// - **Delete cascade or multi-delete on the same table.** A prior
583///   `delete_where` on this table has already advanced Lance HEAD past
584///   the manifest's pinned version (the manifest doesn't move until
585///   end-of-query). Going through `open_for_mutation_on_branch` again
586///   would trip its `ensure_expected_version` equality check
587///   (`actual = pinned + 1` vs `expected = pinned`). Instead we route
588///   through `reopen_for_mutation` at the post-inline-commit Lance
589///   version captured in `staging.inline_committed[table_key]`, which
590///   is the source of truth for "where is Lance HEAD right now on
591///   this table within this query."
592///
593/// The `inline_committed` reopen branch closes the multi-delete-on-same-table
594/// failure path that pre-staged-write engines inherited. The branch goes
595/// away once Lance exposes a two-phase delete API
596/// ([lance-format/lance#6658](https://github.com/lance-format/lance/issues/6658))
597/// and we can stage deletes on the same path as inserts/updates.
598async fn open_table_for_mutation(
599    db: &Omnigraph,
600    staging: &mut MutationStaging,
601    branch: Option<&str>,
602    table_key: &str,
603    op_kind: crate::db::MutationOpKind,
604) -> Result<(Dataset, String, Option<String>)> {
605    if let Some(prior) = staging.inline_committed.get(table_key) {
606        let path = staging.paths.get(table_key).ok_or_else(|| {
607            OmniError::manifest_internal(format!(
608                "open_table_for_mutation: inline_committed[{}] without paths entry",
609                table_key
610            ))
611        })?;
612        let ds = db
613            .reopen_for_mutation(
614                table_key,
615                &path.full_path,
616                path.table_branch.as_deref(),
617                prior.table_version,
618                op_kind,
619            )
620            .await?;
621        return Ok((ds, path.full_path.clone(), path.table_branch.clone()));
622    }
623    let (ds, full_path, table_branch) = db
624        .open_for_mutation_on_branch(branch, table_key, op_kind)
625        .await?;
626    let expected_version = ds.version().version;
627    staging.ensure_path(
628        table_key,
629        full_path.clone(),
630        table_branch.clone(),
631        expected_version,
632        op_kind,
633    );
634    Ok((ds, full_path, table_branch))
635}
636
637/// D₂ parse-time check: a single mutation query is either insert/update-only
638/// or delete-only. Mixed → reject before any I/O.
639///
640/// Reason: under the staged-write writer, inserts and updates
641/// accumulate in memory and commit at end-of-query, while deletes still
642/// inline-commit (Lance lacks a public two-phase delete in 4.0.0).
643/// Mixing creates ordering hazards (same-row insert→delete becomes a no-op
644/// because the staged insert isn't visible to delete; cascading deletes
645/// of just-inserted edges break referential integrity by silent design).
646/// Until Lance exposes `DeleteJob::execute_uncommitted`, the parse-time
647/// rejection keeps both paths atomic and correct.
648fn enforce_no_mixed_destructive_constructive(
649    ir: &omnigraph_compiler::ir::MutationIR,
650) -> Result<()> {
651    let mut has_constructive = false;
652    let mut has_delete = false;
653    for op in &ir.ops {
654        match op {
655            MutationOpIR::Insert { .. } | MutationOpIR::Update { .. } => {
656                has_constructive = true;
657            }
658            MutationOpIR::Delete { .. } => {
659                has_delete = true;
660            }
661        }
662    }
663    if has_constructive && has_delete {
664        return Err(OmniError::manifest(format!(
665            "mutation '{}' on the same query mixes inserts/updates and deletes; \
666             split into separate mutations: (1) inserts and updates, then (2) deletes. \
667             This restriction lifts when Lance exposes a two-phase delete API \
668             (tracked: lance-format/lance#6658).",
669            ir.name
670        )));
671    }
672    Ok(())
673}
674
675impl Omnigraph {
676    pub async fn mutate(
677        &self,
678        branch: &str,
679        query_source: &str,
680        query_name: &str,
681        params: &ParamMap,
682    ) -> Result<MutationResult> {
683        self.mutate_as(branch, query_source, query_name, params, None)
684            .await
685    }
686
687    pub async fn mutate_as(
688        &self,
689        branch: &str,
690        query_source: &str,
691        query_name: &str,
692        params: &ParamMap,
693        actor_id: Option<&str>,
694    ) -> Result<MutationResult> {
695        self.mutate_with_current_actor(branch, query_source, query_name, params, actor_id)
696            .await
697    }
698
699    async fn mutate_with_current_actor(
700        &self,
701        branch: &str,
702        query_source: &str,
703        query_name: &str,
704        params: &ParamMap,
705        actor_id: Option<&str>,
706    ) -> Result<MutationResult> {
707        self.ensure_schema_state_valid().await?;
708        let requested = Self::normalize_branch_name(branch)?;
709        // Reject internal `__run__*` / system-prefixed branches at the
710        // public write boundary. Direct-publish paths assert this
711        // explicitly so a caller can't write to legacy or system
712        // staging branches by passing the prefix verbatim.
713        if let Some(name) = requested.as_deref() {
714            crate::db::ensure_public_branch_ref(name, "mutate")?;
715        }
716        let resolved_params = enrich_mutation_params(params)?;
717
718        // Per-query staging accumulator. Inserts and updates push batches
719        // into `pending`; deletes still inline-commit and record into
720        // `inline_committed`. At end-of-query, `finalize` issues one
721        // `stage_*` + `commit_staged` per pending table, then the
722        // publisher commits the manifest atomically across all touched
723        // tables. Branch is threaded explicitly — no coordinator swap.
724        let mut staging = MutationStaging::default();
725
726        let exec_result = self
727            .execute_named_mutation(
728                query_source,
729                query_name,
730                &resolved_params,
731                requested.as_deref(),
732                &mut staging,
733            )
734            .await;
735
736        match exec_result {
737            Err(e) => Err(e),
738            Ok(total) if staging.is_empty() => Ok(total),
739            Ok(total) => {
740                let staged = staging.stage_all(self, requested.as_deref()).await?;
741                // `_queue_guards` holds per-(table_key, branch) write
742                // queues acquired inside `commit_all`. Held across the
743                // manifest publish below so no concurrent writer can
744                // interleave between our commit_staged and our publish
745                // (which would correctly fail our CAS but leave Lance
746                // HEAD advanced — the residual class MR-870 recovers).
747                let (updates, expected_versions, sidecar_handle, _queue_guards) = staged
748                    .commit_all(
749                        self,
750                        requested.as_deref(),
751                        crate::db::manifest::SidecarKind::Mutation,
752                        actor_id,
753                    )
754                    .await?;
755                // Failpoint that wedges the documented finalize→publisher
756                // residual: per-table `commit_staged` calls already
757                // advanced Lance HEAD on every touched table; a failure
758                // injected here mirrors the production-rare case where
759                // the publisher's CAS pre-check rejects (or the manifest
760                // write throws) after staged commits succeeded. The
761                // sidecar written inside `staging.finalize()` persists
762                // across this failure so the next `Omnigraph::open`'s
763                // recovery sweep can roll forward — see
764                // `tests/failpoints.rs::recovery_rolls_forward_after_finalize_publisher_failure`.
765                crate::failpoints::maybe_fail("mutation.post_finalize_pre_publisher")?;
766                self.commit_updates_on_branch_with_expected(
767                    requested.as_deref(),
768                    &updates,
769                    &expected_versions,
770                    actor_id,
771                )
772                .await?;
773                // Phase C succeeded — sidecar can be deleted. If this
774                // delete fails, the next open's sweep classifies every
775                // table as NoMovement (manifest pin == Lance HEAD ==
776                // post_commit_pin) and the sidecar is treated as a
777                // stale artifact (cleaned up via the Phase 2 logic).
778                if let Some(handle) = sidecar_handle {
779                    // Best-effort cleanup: the manifest publish already
780                    // succeeded, so the user's mutation is durable. A
781                    // failed delete leaves the sidecar on disk; the
782                    // next open's recovery sweep classifies every table
783                    // as `NoMovement` (manifest pin == Lance HEAD ==
784                    // post_commit_pin) and tidies up. Failing the user
785                    // here would return an error for a write that
786                    // already landed.
787                    if let Err(err) = crate::db::manifest::delete_sidecar(
788                        &handle,
789                        self.storage_adapter(),
790                    )
791                    .await
792                    {
793                        tracing::warn!(
794                            error = %err,
795                            operation_id = handle.operation_id.as_str(),
796                            "recovery sidecar cleanup failed; the next open's recovery sweep will resolve it"
797                        );
798                    }
799                }
800                Ok(total)
801            }
802        }
803    }
804
805    async fn execute_named_mutation(
806        &self,
807        query_source: &str,
808        query_name: &str,
809        params: &ParamMap,
810        branch: Option<&str>,
811        staging: &mut MutationStaging,
812    ) -> Result<MutationResult> {
813        let query_decl = omnigraph_compiler::find_named_query(query_source, query_name)
814            .map_err(|e| OmniError::manifest(e.to_string()))?;
815
816        let checked = typecheck_query_decl(&self.catalog(), &query_decl)?;
817        match checked {
818            CheckedQuery::Mutation(_) => {}
819            CheckedQuery::Read(_) => {
820                return Err(OmniError::manifest(
821                    "mutation execution called on a read query; use query instead".to_string(),
822                ));
823            }
824        }
825
826        let ir = lower_mutation_query(&query_decl)?;
827        // D₂: reject mixed insert/update + delete before any I/O.
828        enforce_no_mixed_destructive_constructive(&ir)?;
829
830        let mut total = MutationResult::default();
831        for op in &ir.ops {
832            let result = match op {
833                MutationOpIR::Insert {
834                    type_name,
835                    assignments,
836                } => {
837                    self.execute_insert(type_name, assignments, params, branch, staging)
838                        .await?
839                }
840                MutationOpIR::Update {
841                    type_name,
842                    assignments,
843                    predicate,
844                } => {
845                    self.execute_update(
846                        type_name,
847                        assignments,
848                        predicate,
849                        params,
850                        branch,
851                        staging,
852                    )
853                    .await?
854                }
855                MutationOpIR::Delete {
856                    type_name,
857                    predicate,
858                } => {
859                    self.execute_delete(type_name, predicate, params, branch, staging)
860                        .await?
861                }
862            };
863            total.affected_nodes += result.affected_nodes;
864            total.affected_edges += result.affected_edges;
865        }
866        Ok(total)
867    }
868
869    async fn execute_insert(
870        &self,
871        type_name: &str,
872        assignments: &[IRAssignment],
873        params: &ParamMap,
874        branch: Option<&str>,
875        staging: &mut MutationStaging,
876    ) -> Result<MutationResult> {
877        let mut resolved: HashMap<String, Literal> = HashMap::new();
878        for a in assignments {
879            resolved.insert(a.property.clone(), resolve_expr_value(&a.value, params)?);
880        }
881
882        let is_node = self.catalog().node_types.contains_key(type_name);
883        let is_edge = self.catalog().edge_types.contains_key(type_name);
884
885        if is_node {
886            let node_type = &self.catalog().node_types[type_name];
887            let schema = node_type.arrow_schema.clone();
888            let blob_props = node_type.blob_properties.clone();
889            let id = if let Some(key_prop) = node_type.key_property() {
890                match resolved.get(key_prop) {
891                    Some(Literal::String(s)) => s.clone(),
892                    Some(other) => literal_to_sql(other).trim_matches('\'').to_string(),
893                    None => {
894                        return Err(OmniError::manifest(format!(
895                            "insert missing @key property '{}'",
896                            key_prop
897                        )));
898                    }
899                }
900            } else {
901                ulid::Ulid::new().to_string()
902            };
903
904            let batch = build_insert_batch(&schema, &id, &resolved, &blob_props)?;
905            crate::loader::validate_value_constraints(&batch, node_type)?;
906            crate::loader::validate_enum_constraints(&batch, &node_type.properties, type_name)?;
907            let unique_props = crate::loader::unique_property_names_for_node(node_type);
908            if !unique_props.is_empty() {
909                crate::loader::enforce_unique_constraints_intra_batch(
910                    &batch,
911                    type_name,
912                    &unique_props,
913                )?;
914            }
915            let has_key = node_type.key_property().is_some();
916            let table_key = format!("node:{}", type_name);
917            // Capture pre-write metadata on first touch (no Lance write).
918            let insert_kind = if has_key {
919                crate::db::MutationOpKind::Merge
920            } else {
921                crate::db::MutationOpKind::Insert
922            };
923            let (_ds, _full_path, _table_branch) =
924                open_table_for_mutation(self, staging, branch, &table_key, insert_kind).await?;
925            // Accumulate. @key inserts go into the Merge stream (so a
926            // later update on the same id coalesces correctly); no-key
927            // inserts go into the Append stream.
928            let mode = if has_key {
929                PendingMode::Merge
930            } else {
931                PendingMode::Append
932            };
933            staging.append_batch(&table_key, schema, mode, batch)?;
934
935            Ok(MutationResult {
936                affected_nodes: 1,
937                affected_edges: 0,
938            })
939        } else if is_edge {
940            let edge_type = &self.catalog().edge_types[type_name];
941            let schema = edge_type.arrow_schema.clone();
942            let blob_props = edge_type.blob_properties.clone();
943            let id = ulid::Ulid::new().to_string();
944
945            let batch = build_insert_batch(&schema, &id, &resolved, &blob_props)?;
946            validate_edge_insert_endpoints(self, staging, branch, type_name, &resolved).await?;
947            crate::loader::validate_enum_constraints(&batch, &edge_type.properties, type_name)?;
948            let unique_props = crate::loader::unique_property_names_for_edge(edge_type);
949            if !unique_props.is_empty() {
950                crate::loader::enforce_unique_constraints_intra_batch(
951                    &batch,
952                    type_name,
953                    &unique_props,
954                )?;
955            }
956            let table_key = format!("edge:{}", type_name);
957            // Capture pre-write metadata on first touch (no Lance write).
958            let (ds, _full_path, _table_branch) = open_table_for_mutation(
959                self,
960                staging,
961                branch,
962                &table_key,
963                crate::db::MutationOpKind::Insert,
964            )
965            .await?;
966            // Accumulate the new edge row. Edge IDs are ULID-generated so
967            // Append mode is correct (no key-based dedup needed).
968            staging.append_batch(&table_key, schema, PendingMode::Append, batch.clone())?;
969
970            // Edge cardinality validation: scan committed edges via Lance
971            // + iterate pending edges in-memory for the `src` column,
972            // group-by-src. The pending side already includes the row
973            // we just appended (above).
974            validate_edge_cardinality_with_pending(
975                self,
976                &ds,
977                staging,
978                &table_key,
979                edge_type,
980            )
981            .await?;
982
983            self.invalidate_graph_index().await;
984
985            Ok(MutationResult {
986                affected_nodes: 0,
987                affected_edges: 1,
988            })
989        } else {
990            Err(OmniError::manifest(format!("unknown type '{}'", type_name)))
991        }
992    }
993
994    async fn execute_update(
995        &self,
996        type_name: &str,
997        assignments: &[IRAssignment],
998        predicate: &IRMutationPredicate,
999        params: &ParamMap,
1000        branch: Option<&str>,
1001        staging: &mut MutationStaging,
1002    ) -> Result<MutationResult> {
1003        // Defense in depth: ensure this is a node type
1004        if !self.catalog().node_types.contains_key(type_name) {
1005            return Err(OmniError::manifest(format!(
1006                "update is only supported for node types, not '{}'",
1007                type_name
1008            )));
1009        }
1010
1011        // Reject updates to @key properties — identity is immutable
1012        if let Some(key_prop) = self.catalog().node_types[type_name].key_property() {
1013            if assignments.iter().any(|a| a.property == key_prop) {
1014                return Err(OmniError::manifest(format!(
1015                    "cannot update @key property '{}' — delete and re-insert instead",
1016                    key_prop
1017                )));
1018            }
1019        }
1020
1021        let pred_sql = predicate_to_sql(predicate, params, false)?;
1022        let schema = self.catalog().node_types[type_name].arrow_schema.clone();
1023        let blob_props = self.catalog().node_types[type_name].blob_properties.clone();
1024
1025        let table_key = format!("node:{}", type_name);
1026        let (ds, _full_path, _table_branch) = open_table_for_mutation(
1027            self,
1028            staging,
1029            branch,
1030            &table_key,
1031            crate::db::MutationOpKind::Update,
1032        )
1033        .await?;
1034
1035        // Scan committed via Lance + apply the same predicate to pending
1036        // batches via DataFusion `MemTable` (read-your-writes for prior
1037        // ops in this query). The pending side may include rows from
1038        // earlier `insert` / `update` ops on the same table.
1039        //
1040        // For blob tables we project away the blob columns: Lance's
1041        // scanner doesn't accept the standard projection path on blob
1042        // descriptors and would panic with a `Field::project` assertion.
1043        // The downstream `apply_assignments` synthesizes blob columns
1044        // from explicit assignments and omits unassigned blobs (Lance's
1045        // merge_insert leaves them untouched). Tables without blob
1046        // columns scan the full schema unprojected.
1047        let non_blob_cols: Vec<&str> = schema
1048            .fields()
1049            .iter()
1050            .filter(|f| !blob_props.contains(f.name()))
1051            .map(|f| f.name().as_str())
1052            .collect();
1053        let projection: Option<&[&str]> =
1054            (!blob_props.is_empty()).then_some(non_blob_cols.as_slice());
1055        let pending_batches = staging.pending_batches(&table_key);
1056        let pending_schema = staging.pending_schema(&table_key);
1057        // Use merge semantics on the union: a committed row whose `id`
1058        // also appears in pending has been logically updated by an
1059        // earlier op in this query and is shadowed from the scan,
1060        // otherwise the predicate runs against stale committed values
1061        // and a chained `update where <pred>` can match a row whose
1062        // pending value no longer satisfies <pred>.
1063        let batches = self
1064            .table_store()
1065            .scan_with_pending(
1066                &ds,
1067                pending_batches,
1068                pending_schema,
1069                projection,
1070                Some(&pred_sql),
1071                Some("id"),
1072            )
1073            .await?;
1074
1075        if batches.is_empty() || batches.iter().all(|b| b.num_rows() == 0) {
1076            return Ok(MutationResult {
1077                affected_nodes: 0,
1078                affected_edges: 0,
1079            });
1080        }
1081
1082        // Concat the matched batches (committed + pending) into one. The
1083        // helper trusts that both sides share a schema — Lance returns
1084        // dataset-schema-ordered columns and DataFusion returns
1085        // MemTable-schema-ordered columns; both should match the catalog's
1086        // arrow_schema when the projection is consistent. If they
1087        // diverge (typically a blob-table mid-schema-shift), the helper
1088        // surfaces a clear error directing the caller to split the
1089        // mutation.
1090        let matched = concat_match_batches_to_schema(&schema, &blob_props, batches)?;
1091
1092        let affected_count = matched.num_rows();
1093
1094        let mut resolved: HashMap<String, Literal> = HashMap::new();
1095        for a in assignments {
1096            resolved.insert(a.property.clone(), resolve_expr_value(&a.value, params)?);
1097        }
1098        let updated = apply_assignments(&schema, &matched, &resolved, &blob_props)?;
1099        let node_type = &self.catalog().node_types[type_name];
1100        crate::loader::validate_value_constraints(&updated, node_type)?;
1101        crate::loader::validate_enum_constraints(&updated, &node_type.properties, type_name)?;
1102        let unique_props = crate::loader::unique_property_names_for_node(node_type);
1103        if !unique_props.is_empty() {
1104            crate::loader::enforce_unique_constraints_intra_batch(
1105                &updated,
1106                type_name,
1107                &unique_props,
1108            )?;
1109        }
1110
1111        // Accumulate the updated batch into the Merge-mode pending stream.
1112        // The accumulator may now contain entries with the same id as a
1113        // prior insert or update on this table; `MutationStaging::finalize`
1114        // dedupes by id (last-occurrence wins) before issuing the single
1115        // `stage_merge_insert` call at end-of-query.
1116        let updated_schema = updated.schema();
1117        staging.append_batch(&table_key, updated_schema, PendingMode::Merge, updated)?;
1118
1119        Ok(MutationResult {
1120            affected_nodes: affected_count,
1121            affected_edges: 0,
1122        })
1123    }
1124
1125    async fn execute_delete(
1126        &self,
1127        type_name: &str,
1128        predicate: &IRMutationPredicate,
1129        params: &ParamMap,
1130        branch: Option<&str>,
1131        staging: &mut MutationStaging,
1132    ) -> Result<MutationResult> {
1133        let is_node = self.catalog().node_types.contains_key(type_name);
1134        if is_node {
1135            self.execute_delete_node(type_name, predicate, params, branch, staging)
1136                .await
1137        } else {
1138            self.execute_delete_edge(type_name, predicate, params, branch, staging)
1139                .await
1140        }
1141    }
1142
1143    async fn execute_delete_node(
1144        &self,
1145        type_name: &str,
1146        predicate: &IRMutationPredicate,
1147        params: &ParamMap,
1148        branch: Option<&str>,
1149        staging: &mut MutationStaging,
1150    ) -> Result<MutationResult> {
1151        let pred_sql = predicate_to_sql(predicate, params, false)?;
1152
1153        let table_key = format!("node:{}", type_name);
1154        let (ds, full_path, table_branch) = open_table_for_mutation(
1155            self,
1156            staging,
1157            branch,
1158            &table_key,
1159            crate::db::MutationOpKind::Delete,
1160        )
1161        .await?;
1162        let initial_version = ds.version().version;
1163
1164        // Scan matching IDs for cascade. Per D₂ this never overlaps with
1165        // staged inserts (mixed insert/delete in one query is rejected at
1166        // parse time), so we scan committed only.
1167        let batches = self
1168            .table_store()
1169            .scan(&ds, Some(&["id"]), Some(&pred_sql), None)
1170            .await?;
1171
1172        let deleted_ids: Vec<String> = batches
1173            .iter()
1174            .flat_map(|batch| {
1175                let ids = batch
1176                    .column(0)
1177                    .as_any()
1178                    .downcast_ref::<StringArray>()
1179                    .unwrap();
1180                (0..ids.len())
1181                    .map(|i| ids.value(i).to_string())
1182                    .collect::<Vec<_>>()
1183            })
1184            .collect();
1185
1186        if deleted_ids.is_empty() {
1187            return Ok(MutationResult {
1188                affected_nodes: 0,
1189                affected_edges: 0,
1190            });
1191        }
1192
1193        let affected_nodes = deleted_ids.len();
1194
1195        // Delete nodes — still inline-commit (Lance's `Dataset::delete` is
1196        // not exposed as a two-phase op in 4.0.0). D₂ keeps inserts and
1197        // deletes from coexisting in one query, so this advance of Lance
1198        // HEAD is the only HEAD movement during the query and the
1199        // publisher's CAS captures it intact.
1200        let mut ds = self
1201            .reopen_for_mutation(
1202                &table_key,
1203                &full_path,
1204                table_branch.as_deref(),
1205                initial_version,
1206                crate::db::MutationOpKind::Delete,
1207            )
1208            .await?;
1209        crate::failpoints::maybe_fail("mutation.delete_node_pre_primary_delete")?;
1210        let delete_state = self
1211            .table_store()
1212            .delete_where(&full_path, &mut ds, &pred_sql)
1213            .await?;
1214
1215        staging.record_inline(crate::db::SubTableUpdate {
1216            table_key: table_key.clone(),
1217            table_version: delete_state.version,
1218            table_branch: table_branch.clone(),
1219            row_count: delete_state.row_count,
1220            version_metadata: delete_state.version_metadata,
1221        });
1222
1223        let mut affected_edges = 0usize;
1224        let escaped: Vec<String> = deleted_ids
1225            .iter()
1226            .map(|id| format!("'{}'", id.replace('\'', "''")))
1227            .collect();
1228        let id_list = escaped.join(", ");
1229
1230        let edge_info: Vec<(String, String, String)> = self
1231            .catalog()
1232            .edge_types
1233            .iter()
1234            .map(|(name, et)| (name.clone(), et.from_type.clone(), et.to_type.clone()))
1235            .collect();
1236
1237        for (edge_name, from_type, to_type) in &edge_info {
1238            let mut cascade_filters = Vec::new();
1239            if from_type == type_name {
1240                cascade_filters.push(format!("src IN ({})", id_list));
1241            }
1242            if to_type == type_name {
1243                cascade_filters.push(format!("dst IN ({})", id_list));
1244            }
1245            if cascade_filters.is_empty() {
1246                continue;
1247            }
1248
1249            let edge_table_key = format!("edge:{}", edge_name);
1250            let cascade_filter = cascade_filters.join(" OR ");
1251            let (mut edge_ds, edge_full_path, edge_table_branch) = open_table_for_mutation(
1252                self,
1253                staging,
1254                branch,
1255                &edge_table_key,
1256                crate::db::MutationOpKind::Delete,
1257            )
1258            .await?;
1259
1260            let edge_delete = self
1261                .table_store()
1262                .delete_where(&edge_full_path, &mut edge_ds, &cascade_filter)
1263                .await?;
1264
1265            affected_edges += edge_delete.deleted_rows;
1266
1267            if edge_delete.deleted_rows > 0 {
1268                staging.record_inline(crate::db::SubTableUpdate {
1269                    table_key: edge_table_key,
1270                    table_version: edge_delete.version,
1271                    table_branch: edge_table_branch,
1272                    row_count: edge_delete.row_count,
1273                    version_metadata: edge_delete.version_metadata,
1274                });
1275            }
1276        }
1277
1278        if affected_edges > 0 {
1279            self.invalidate_graph_index().await;
1280        }
1281
1282        Ok(MutationResult {
1283            affected_nodes,
1284            affected_edges,
1285        })
1286    }
1287
1288    async fn execute_delete_edge(
1289        &self,
1290        type_name: &str,
1291        predicate: &IRMutationPredicate,
1292        params: &ParamMap,
1293        branch: Option<&str>,
1294        staging: &mut MutationStaging,
1295    ) -> Result<MutationResult> {
1296        let pred_sql = predicate_to_sql(predicate, params, true)?;
1297
1298        let table_key = format!("edge:{}", type_name);
1299        let (mut ds, full_path, table_branch) = open_table_for_mutation(
1300            self,
1301            staging,
1302            branch,
1303            &table_key,
1304            crate::db::MutationOpKind::Delete,
1305        )
1306        .await?;
1307
1308        let delete_state = self
1309            .table_store()
1310            .delete_where(&full_path, &mut ds, &pred_sql)
1311            .await?;
1312        let affected = delete_state.deleted_rows;
1313
1314        if affected > 0 {
1315            staging.record_inline(crate::db::SubTableUpdate {
1316                table_key,
1317                table_version: delete_state.version,
1318                table_branch,
1319                row_count: delete_state.row_count,
1320                version_metadata: delete_state.version_metadata,
1321            });
1322            self.invalidate_graph_index().await;
1323        }
1324
1325        Ok(MutationResult {
1326            affected_nodes: 0,
1327            affected_edges: affected,
1328        })
1329    }
1330}
1331
1332/// Concat the matched batches from `scan_with_pending` into a single batch.
1333/// `scan_with_pending` returns committed-side and pending-side batches in
1334/// order; both should share a schema if pending was produced through
1335/// `apply_assignments` with full-schema scan input. If schemas drift,
1336/// surface a clear error so the user can split the query.
1337fn concat_match_batches_to_schema(
1338    _schema: &SchemaRef,
1339    _blob_properties: &HashSet<String>,
1340    batches: Vec<RecordBatch>,
1341) -> Result<RecordBatch> {
1342    if batches.len() == 1 {
1343        return Ok(batches.into_iter().next().unwrap());
1344    }
1345    let common = batches[0].schema();
1346    arrow_select::concat::concat_batches(&common, &batches).map_err(|e| {
1347        OmniError::Lance(format!(
1348            "scan_with_pending returned batches with mismatched schemas \
1349             across the committed/pending boundary; this typically indicates \
1350             a blob-column shape mismatch between the committed table and a \
1351             prior in-query insert/update. Split blob-touching mutations \
1352             into separate queries. ({})",
1353            e
1354        ))
1355    })
1356}
1357
1358/// Validate `@card` bounds against committed (Lance) + pending (in-memory)
1359/// edges for one edge table. Engine path: each insert produces a fresh
1360/// ULID id, so committed and pending cannot share a primary key — no
1361/// dedup needed (`dedupe_key_column = None`).
1362async fn validate_edge_cardinality_with_pending(
1363    db: &Omnigraph,
1364    committed_ds: &Dataset,
1365    staging: &MutationStaging,
1366    table_key: &str,
1367    edge_type: &omnigraph_compiler::catalog::EdgeType,
1368) -> Result<()> {
1369    if edge_type.cardinality.is_default() {
1370        return Ok(());
1371    }
1372    let counts = super::staging::count_src_per_edge(
1373        db,
1374        committed_ds,
1375        table_key,
1376        staging,
1377        None,
1378    )
1379    .await?;
1380    super::staging::enforce_cardinality_bounds(edge_type, &counts)
1381}
1382
1383fn enrich_mutation_params(params: &ParamMap) -> Result<ParamMap> {
1384    let mut resolved = params.clone();
1385    if !resolved.contains_key(NOW_PARAM_NAME) {
1386        let now = OffsetDateTime::now_utc()
1387            .format(&Rfc3339)
1388            .map_err(|e| OmniError::manifest(format!("failed to format now(): {}", e)))?;
1389        resolved.insert(NOW_PARAM_NAME.to_string(), Literal::DateTime(now));
1390    }
1391    Ok(resolved)
1392}