nodedb_cluster/distributed_array/local_executor.rs
1// SPDX-License-Identifier: BUSL-1.1
2
3//! Local Data-Plane execution trait for array shard operations.
4//!
5//! `ArrayLocalExecutor` is defined here in `nodedb-cluster` and
6//! implemented in the main `nodedb` binary, which has access to the
7//! SPSC bridge and the Data Plane array engine. The shard-side handler
8//! (`handler.rs`) holds an `Arc<dyn ArrayLocalExecutor>` and calls
9//! through it to execute slice and surrogate-bitmap-scan operations
10//! on the local node.
11//!
12//! This keeps `nodedb-cluster` free of a compile-time dependency on
13//! `nodedb` while still producing real results from the Data Plane.
14
15use crate::distributed_array::merge::ArrayAggPartial;
16use crate::distributed_array::wire::{ArrayShardAggReq, ArrayShardPutReq, ArrayShardSliceReq};
17use crate::error::Result;
18
19/// Execute array operations against the local Data Plane.
20///
21/// The implementor (in `nodedb`) routes the call through the SPSC bridge
22/// to the appropriate TPC core and returns the serialised zerompk result
23/// rows or bitmap bytes.
24#[async_trait::async_trait]
25pub trait ArrayLocalExecutor: Send + Sync + 'static {
26 /// Execute a coord-range slice and return zerompk-encoded row bytes.
27 ///
28 /// `array_id_msgpack` — zerompk encoding of `nodedb_array::types::ArrayId`.
29 /// `slice_msgpack` — zerompk encoding of `nodedb_array::query::Slice`.
30 /// `attr_projection` — attribute index list; empty means all attributes.
31 /// `limit` — maximum rows to return per shard (0 = unlimited).
32 /// `cell_filter_msgpack` — zerompk encoding of `SurrogateBitmap`; empty
33 /// means no filter.
34 /// `shard_hilbert_range` — optional `[lo, hi]` Hilbert-prefix range; when
35 /// set only tiles whose prefix falls in this range are returned, preventing
36 /// duplicate rows in single-node harnesses where all vShards share one
37 /// Data Plane. `None` = no Hilbert filter.
38 ///
39 /// Returns `Vec<Vec<u8>>` — one element per matching row, each element
40 /// being the zerompk encoding of that row.
41 async fn exec_slice(&self, req: &ArrayShardSliceReq) -> Result<Vec<Vec<u8>>>;
42
43 /// Execute a surrogate-bitmap scan and return the zerompk-encoded
44 /// `SurrogateBitmap` bytes for matching cells.
45 ///
46 /// `array_id_msgpack` — zerompk encoding of `nodedb_array::types::ArrayId`.
47 /// `slice_msgpack` — zerompk encoding of `nodedb_array::query::Slice`.
48 async fn exec_surrogate_bitmap_scan(
49 &self,
50 array_id_msgpack: &[u8],
51 slice_msgpack: &[u8],
52 ) -> Result<Vec<u8>>;
53
54 /// Execute a partial aggregate on this shard and return the partial states.
55 ///
56 /// The Data Plane computes the aggregate with `return_partial = true`, so it
57 /// returns `Vec<ArrayAggPartial>` rather than finalized scalars. The
58 /// coordinator merges partials from all shards before finalizing.
59 async fn exec_agg(&self, req: &ArrayShardAggReq) -> Result<Vec<ArrayAggPartial>>;
60
61 /// Apply a cell-batch write to the local array engine.
62 ///
63 /// `req.cells_msgpack` is a zerompk encoding of
64 /// `Vec<nodedb::engine::array::wal::ArrayPutCell>`. All cells belong to
65 /// the same Hilbert-prefix tile. The shard handler has already validated
66 /// that this shard owns the tile; the executor dispatches directly to the
67 /// Data Plane without further routing checks.
68 async fn exec_put(&self, req: &ArrayShardPutReq) -> Result<u64>;
69
70 /// Delete cells by exact coordinates from the local array engine.
71 ///
72 /// `array_id_msgpack` — zerompk encoding of `nodedb_array::types::ArrayId`.
73 /// `coords_msgpack` — zerompk encoding of `Vec<Vec<CoordValue>>`.
74 /// `wal_lsn` — WAL sequence number allocated by the Control Plane.
75 ///
76 /// Returns the `applied_lsn` (equal to `wal_lsn` on success).
77 async fn exec_delete(
78 &self,
79 array_id_msgpack: &[u8],
80 coords_msgpack: &[u8],
81 wal_lsn: u64,
82 ) -> Result<u64>;
83}