Skip to main content

TableStore

Struct TableStore 

Source
pub struct TableStore { /* private fields */ }

Implementations§

Source§

impl TableStore

Source

pub fn new(root_uri: &str) -> Self

Source

pub fn root_uri(&self) -> &str

Source

pub fn dataset_uri(&self, table_path: &str) -> String

Source

pub async fn open_snapshot_table( &self, snapshot: &Snapshot, table_key: &str, ) -> Result<Dataset>

Source

pub async fn open_at_entry(&self, entry: &SubTableEntry) -> Result<Dataset>

Source

pub async fn open_dataset_head( &self, dataset_uri: &str, branch: Option<&str>, ) -> Result<Dataset>

Source

pub async fn open_dataset_head_for_write( &self, table_key: &str, dataset_uri: &str, branch: Option<&str>, ) -> Result<Dataset>

Source

pub async fn delete_branch(&self, dataset_uri: &str, branch: &str) -> Result<()>

Source

pub async fn open_dataset_at_state( &self, table_path: &str, branch: Option<&str>, version: u64, ) -> Result<Dataset>

Source

pub fn ensure_expected_version( &self, ds: &Dataset, table_key: &str, expected_version: u64, ) -> Result<()>

Source

pub async fn reopen_for_mutation( &self, dataset_uri: &str, branch: Option<&str>, table_key: &str, expected_version: u64, ) -> Result<Dataset>

Source

pub async fn fork_branch_from_state( &self, dataset_uri: &str, source_branch: Option<&str>, table_key: &str, source_version: u64, target_branch: &str, ) -> Result<Dataset>

Source

pub async fn scan_batches(&self, ds: &Dataset) -> Result<Vec<RecordBatch>>

Source

pub async fn scan_batches_for_rewrite( &self, ds: &Dataset, ) -> Result<Vec<RecordBatch>>

Source

pub async fn scan_stream( ds: &Dataset, projection: Option<&[&str]>, filter: Option<&str>, order_by: Option<Vec<ColumnOrdering>>, with_row_id: bool, ) -> Result<DatasetRecordBatchStream>

Source

pub async fn scan_stream_with<F>( ds: &Dataset, projection: Option<&[&str]>, filter: Option<&str>, order_by: Option<Vec<ColumnOrdering>>, with_row_id: bool, configure: F, ) -> Result<DatasetRecordBatchStream>
where F: FnOnce(&mut Scanner) -> Result<()>,

Source

pub async fn scan( &self, ds: &Dataset, projection: Option<&[&str]>, filter: Option<&str>, order_by: Option<Vec<ColumnOrdering>>, ) -> Result<Vec<RecordBatch>>

Source

pub async fn scan_with<F>( &self, ds: &Dataset, projection: Option<&[&str]>, filter: Option<&str>, order_by: Option<Vec<ColumnOrdering>>, with_row_id: bool, configure: F, ) -> Result<Vec<RecordBatch>>
where F: FnOnce(&mut Scanner) -> Result<()>,

Source

pub async fn count_rows( &self, ds: &Dataset, filter: Option<String>, ) -> Result<usize>

Source

pub fn dataset_version(&self, ds: &Dataset) -> u64

Source

pub async fn table_state( &self, dataset_uri: &str, ds: &Dataset, ) -> Result<TableState>

Source

pub async fn append_batch( &self, dataset_uri: &str, ds: &mut Dataset, batch: RecordBatch, ) -> Result<TableState>

Source

pub async fn append_or_create_batch( dataset_uri: &str, dataset: Option<Dataset>, batch: RecordBatch, ) -> Result<Dataset>

Source

pub async fn overwrite_batch( &self, dataset_uri: &str, ds: &mut Dataset, batch: RecordBatch, ) -> Result<TableState>

Source

pub async fn overwrite_dataset( dataset_uri: &str, batch: RecordBatch, ) -> Result<Dataset>

Source

pub async fn merge_insert_batch( &self, dataset_uri: &str, ds: Dataset, batch: RecordBatch, key_columns: Vec<String>, when_matched: WhenMatched, when_not_matched: WhenNotMatched, ) -> Result<TableState>

Source

pub async fn merge_insert_batches( &self, dataset_uri: &str, ds: Dataset, batches: Vec<RecordBatch>, key_columns: Vec<String>, when_matched: WhenMatched, when_not_matched: WhenNotMatched, ) -> Result<TableState>

Source

pub async fn delete_where( &self, dataset_uri: &str, ds: &mut Dataset, filter: &str, ) -> Result<DeleteState>

Source

pub async fn stage_append( &self, ds: &Dataset, batch: RecordBatch, prior_stages: &[StagedWrite], ) -> Result<StagedWrite>

Stage an append: write fragment files for batch, return the uncommitted Lance transaction plus the new fragments for read-your-writes.

prior_stages is the slice of staged writes already accumulated against the same dataset in the same query. Pass &[] for the first call; pass the accumulated stages for subsequent calls. The primitive uses this to offset row-ID assignment so chained stage_append calls don’t produce overlapping _rowid ranges. Mirrors scan_with_staged’s &[StagedWrite] shape — the same slice gets passed to both.

On stable-row-id datasets we manually populate row_id_meta on the cloned new_fragments we expose for scan_with_staged. Lance’s InsertBuilder::execute_uncommitted produces fragments with row_id_meta = None; row IDs are normally assigned by Transaction::assign_row_ids during commit. Because scan_with_staged reads the staged fragments before commit, the scanner trips on a stable-row-id dataset (Error::internal("Missing row id meta") from dataset/rowids.rs:22). The transaction’s internal fragment copy stays untouched — Lance assigns IDs there independently at commit time, and the two ID assignments don’t have to agree because no caller threads _rowid from the staged scan into the commit path.

Contract: prior_stages must contain only previous stage_append results against the same dataset. Mixing stage_merge_insert into prior_stages would over-count because merge_insert’s new_fragments include rewrites that don’t add rows. The engine’s parse-time D₂′ check (per touched table: all stage_append OR exactly one stage_merge_insert) guarantees this upstream; on the primitive layer it’s the caller’s responsibility.

Source

pub async fn stage_merge_insert( &self, ds: Dataset, batch: RecordBatch, key_columns: Vec<String>, when_matched: WhenMatched, when_not_matched: WhenNotMatched, ) -> Result<StagedWrite>

Stage a merge_insert (upsert): write fragment files describing the merge result, return the uncommitted transaction plus the new fragments. The transaction’s Operation::Update carries the fragments-to-remove and fragments-to-add; for read-your-writes we expose new_fragments (rows that will be visible after commit).

Contract: do not chain stage_merge_insert calls on the same table within one query. Each call’s MergeInsertBuilder runs against the supplied dataset’s committed view — it does not see fragments produced by a previous staged merge on the same table. Two chained stage_merge_inserts whose source rows share keys will each independently produce Operation::Update transactions whose new_fragments contain a row for the shared key. scan_with_staged (and count_rows_with_staged) will then return both — i.e. duplicates by key.

This is intrinsic to the underlying Lance API: there is no public way to make MergeInsertBuilder see uncommitted fragments. The engine’s MutationStaging accumulator works around this by concatenating per-table batches in memory and issuing exactly one stage_merge_insert per touched table at end-of-query (with last-write-wins dedupe by id) — see exec/staging.rs. Direct callers of this primitive must respect the contract themselves.

Lift path: either a Lance API extension that lets MergeInsertBuilder accept additional staged fragments, or an in-memory pre-merge here that folds prior staged batches into the input stream. See docs/runs.md.

Source

pub async fn commit_staged( &self, ds: Arc<Dataset>, transaction: Transaction, ) -> Result<Dataset>

Commit a previously-staged transaction onto ds, returning the new dataset (with HEAD advanced). Wraps CommitBuilder::execute. Used by the publisher at end-of-query to materialize all staged writes before the meta-manifest commit.

Source

pub async fn stage_overwrite( &self, ds: &Dataset, batch: RecordBatch, ) -> Result<StagedWrite>

Stage an overwrite (write_fragments + Operation::Overwrite { schema, fragments }). Returns a StagedWrite carrying the replacement fragments. HEAD does NOT advance.

Lance shape: InsertBuilder::with_params(WriteParams { mode: Overwrite, .. }) .execute_uncommitted(vec![batch]) produces a Transaction whose Operation::Overwrite carries the new schema + fragments. The transaction is committed via commit_staged (same call as stage_append).

MR-793 Phase 2: introduces this for the schema_apply rewrite path. Lance API verified in .context/mr-793-design.md Appendix A.1.

Source

pub async fn stage_create_btree_index( &self, ds: &Dataset, columns: &[&str], ) -> Result<StagedWrite>

Stage a BTREE scalar index build. Returns a StagedWrite whose transaction commits via commit_staged. HEAD does NOT advance.

Lance shape: CreateIndexBuilder::execute_uncommitted returns IndexMetadata; we manually wrap it in Operation::CreateIndex { new_indices, removed_indices } via the public TransactionBuilder, replicating the simple (non-segment-commit-path) branch of Lance’s CreateIndexBuilder::execute (lance-4.0.0 src/index/create.rs:502-512).

removed_indices mirrors execute() lines 466-476: when the build replaces an existing same-named index, those entries are listed for tombstoning by the manifest commit.

MR-793 Phase 2: scalar index types (BTree, Inverted) are stage-able. Vector indices are NOT (segment-commit-path requires build_index_metadata_from_segments which is pub(crate) in lance-4.0.0); see create_vector_index and Appendix A.3.

Source

pub async fn stage_create_inverted_index( &self, ds: &Dataset, column: &str, ) -> Result<StagedWrite>

Stage an INVERTED (FTS) scalar index build. Same shape as stage_create_btree_index; see its docs for the Lance API citation and contract notes.

Source

pub async fn scan_with_staged( &self, ds: &Dataset, staged: &[StagedWrite], projection: Option<&[&str]>, filter: Option<&str>, ) -> Result<Vec<RecordBatch>>

Run a scan with optional uncommitted staged writes visible alongside the committed snapshot. When staged is empty this is identical to scan(...).

Composes the visible fragment list as committed - removed + new: the committed manifest’s fragments, minus any fragment IDs that staged Operation::Updates (merge_insert rewrites) have superseded, plus the staged new/updated fragments. Without the removed filter, a merge_insert that rewrites an existing fragment would surface twice — once via the original committed fragment, once via the rewrite in new_fragments.

Filter contract is incomplete on staged fragments. When filter is Some(...), Lance pushes the predicate to per-fragment scans with stats-based pruning. Uncommitted fragments produced by write_fragments_internal lack the per-column statistics that committed fragments carry; Lance’s optimizer drops them from the filtered scan even when their data would match. Staged-fragment rows are silently absent from the result. scanner.use_stats(false) does not fix this in lance 4.0.0. Callers needing correct filtered reads against staged data should use a different strategy — the engine’s MutationStaging accumulator unions in-memory pending batches with the committed scan via DataFusion MemTable (see scan_with_pending).

This method remains on the surface for primitive-level testing (basic stage + scan correctness without filters works) and for callers that don’t need filter pushdown.

Source

pub async fn scan_with_pending( &self, committed_ds: &Dataset, pending_batches: &[RecordBatch], pending_schema: Option<SchemaRef>, projection: Option<&[&str]>, filter: Option<&str>, key_column: Option<&str>, ) -> Result<Vec<RecordBatch>>

Scan committed via Lance + apply the same filter to in-memory pending batches via DataFusion MemTable, concat the two result streams. The replacement for scan_with_staged in engine code: the staged-write writer accumulates input batches in memory and unions them with the committed snapshot at read time, sidestepping the Scanner::with_fragments filter-pushdown limitation documented on scan_with_staged.

committed_ds should be opened at the pre-mutation expected_version (the same version captured in MutationStaging::expected_versions at first touch of the table). pending_batches are the per-table accumulator’s batches in their input shape. pending_schema is the schema of the accumulated batches; passing None falls back to the schema of the first pending batch.

filter is the Lance / DataFusion SQL predicate. It is applied to both sides — Lance pushes it down on the committed side; the pending side runs it through a fresh DataFusion SessionContext with the batches registered as a MemTable named pending.

key_column controls how committed and pending are unioned:

  • None (union semantics): every committed row that matches the filter and every pending row that matches the filter is returned. Correct when committed and pending cannot share a primary key — e.g., Append-mode loads with ULID-generated ids, or any read where pending hasn’t been used to update committed rows.
  • Some(col) (merge / shadow semantics): committed rows whose col value appears in any pending batch are EXCLUDED from the result; only pending’s view of those rows is returned. Required for Merge-mode reads (e.g., execute_update on the engine path) so a chained update doesn’t see stale committed values that a prior op already updated in pending. Without this, a predicate like where age > 30 can match a row that an earlier set age = 20 already moved out of range.

When pending_batches is empty this delegates to the regular scan path.

Source

pub async fn count_rows_with_staged( &self, ds: &Dataset, staged: &[StagedWrite], filter: Option<String>, ) -> Result<usize>

count_rows variant that respects staged writes. Used for edge-cardinality validation that needs to see staged edges before commit. Same committed - removed + new composition as scan_with_staged.

Source

pub async fn has_btree_index(&self, ds: &Dataset, column: &str) -> Result<bool>

Source

pub async fn has_fts_index(&self, ds: &Dataset, column: &str) -> Result<bool>

Source

pub async fn has_vector_index(&self, ds: &Dataset, column: &str) -> Result<bool>

Source

pub async fn create_btree_index( &self, ds: &mut Dataset, columns: &[&str], ) -> Result<()>

Source

pub async fn create_inverted_index( &self, ds: &mut Dataset, column: &str, ) -> Result<()>

Source

pub async fn create_vector_index( &self, ds: &mut Dataset, column: &str, ) -> Result<()>

Source

pub async fn create_empty_dataset( dataset_uri: &str, schema: &SchemaRef, ) -> Result<Dataset>

Source

pub async fn first_row_id_for_filter( &self, ds: &Dataset, filter: &str, ) -> Result<Option<u64>>

Source

pub async fn write_dataset( dataset_uri: &str, batch: RecordBatch, ) -> Result<Dataset>

Trait Implementations§

Source§

impl Clone for TableStore

Source§

fn clone(&self) -> TableStore

Returns a duplicate of the value. Read more
1.0.0 (const: unstable) · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl Debug for TableStore

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
Source§

impl TableStorage for TableStore

Source§

fn open_snapshot_at_entry<'life0, 'life1, 'async_trait>( &'life0 self, entry: &'life1 SubTableEntry, ) -> Pin<Box<dyn Future<Output = Result<SnapshotHandle>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait,

Source§

fn open_snapshot_at_table<'life0, 'life1, 'life2, 'async_trait>( &'life0 self, snapshot: &'life1 Snapshot, table_key: &'life2 str, ) -> Pin<Box<dyn Future<Output = Result<SnapshotHandle>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait,

Source§

fn open_dataset_head<'life0, 'life1, 'life2, 'async_trait>( &'life0 self, dataset_uri: &'life1 str, branch: Option<&'life2 str>, ) -> Pin<Box<dyn Future<Output = Result<SnapshotHandle>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait,

Source§

fn open_dataset_head_for_write<'life0, 'life1, 'life2, 'life3, 'async_trait>( &'life0 self, table_key: &'life1 str, dataset_uri: &'life2 str, branch: Option<&'life3 str>, ) -> Pin<Box<dyn Future<Output = Result<SnapshotHandle>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait, 'life3: 'async_trait,

Source§

fn open_dataset_at_state<'life0, 'life1, 'life2, 'async_trait>( &'life0 self, table_path: &'life1 str, branch: Option<&'life2 str>, version: u64, ) -> Pin<Box<dyn Future<Output = Result<SnapshotHandle>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait,

Source§

fn fork_branch_from_state<'life0, 'life1, 'life2, 'life3, 'life4, 'async_trait>( &'life0 self, dataset_uri: &'life1 str, source_branch: Option<&'life2 str>, table_key: &'life3 str, source_version: u64, target_branch: &'life4 str, ) -> Pin<Box<dyn Future<Output = Result<SnapshotHandle>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait, 'life3: 'async_trait, 'life4: 'async_trait,

Source§

fn delete_branch<'life0, 'life1, 'life2, 'async_trait>( &'life0 self, dataset_uri: &'life1 str, branch: &'life2 str, ) -> Pin<Box<dyn Future<Output = Result<()>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait,

Source§

fn reopen_for_mutation<'life0, 'life1, 'life2, 'life3, 'async_trait>( &'life0 self, dataset_uri: &'life1 str, branch: Option<&'life2 str>, table_key: &'life3 str, expected_version: u64, ) -> Pin<Box<dyn Future<Output = Result<SnapshotHandle>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait, 'life3: 'async_trait,

Source§

fn ensure_expected_version( &self, snapshot: &SnapshotHandle, table_key: &str, expected_version: u64, ) -> Result<()>

Source§

fn scan<'life0, 'life1, 'life2, 'life3, 'life4, 'async_trait>( &'life0 self, snapshot: &'life1 SnapshotHandle, projection: Option<&'life2 [&'life3 str]>, filter: Option<&'life4 str>, order_by: Option<Vec<ColumnOrdering>>, ) -> Pin<Box<dyn Future<Output = Result<Vec<RecordBatch>>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait, 'life3: 'async_trait, 'life4: 'async_trait,

Source§

fn scan_with_row_id<'life0, 'life1, 'life2, 'life3, 'life4, 'async_trait>( &'life0 self, snapshot: &'life1 SnapshotHandle, projection: Option<&'life2 [&'life3 str]>, filter: Option<&'life4 str>, order_by: Option<Vec<ColumnOrdering>>, with_row_id: bool, ) -> Pin<Box<dyn Future<Output = Result<Vec<RecordBatch>>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait, 'life3: 'async_trait, 'life4: 'async_trait,

Source§

fn scan_batches<'life0, 'life1, 'async_trait>( &'life0 self, snapshot: &'life1 SnapshotHandle, ) -> Pin<Box<dyn Future<Output = Result<Vec<RecordBatch>>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait,

Source§

fn scan_batches_for_rewrite<'life0, 'life1, 'async_trait>( &'life0 self, snapshot: &'life1 SnapshotHandle, ) -> Pin<Box<dyn Future<Output = Result<Vec<RecordBatch>>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait,

Source§

fn count_rows<'life0, 'life1, 'async_trait>( &'life0 self, snapshot: &'life1 SnapshotHandle, filter: Option<String>, ) -> Pin<Box<dyn Future<Output = Result<usize>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait,

Source§

fn count_rows_with_staged<'life0, 'life1, 'life2, 'async_trait>( &'life0 self, snapshot: &'life1 SnapshotHandle, staged: &'life2 [StagedHandle], filter: Option<String>, ) -> Pin<Box<dyn Future<Output = Result<usize>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait,

Source§

fn scan_with_staged<'life0, 'life1, 'life2, 'life3, 'life4, 'life5, 'async_trait>( &'life0 self, snapshot: &'life1 SnapshotHandle, staged: &'life2 [StagedHandle], projection: Option<&'life3 [&'life4 str]>, filter: Option<&'life5 str>, ) -> Pin<Box<dyn Future<Output = Result<Vec<RecordBatch>>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait, 'life3: 'async_trait, 'life4: 'async_trait, 'life5: 'async_trait,

Source§

fn scan_with_pending<'life0, 'life1, 'life2, 'life3, 'life4, 'life5, 'life6, 'async_trait>( &'life0 self, snapshot: &'life1 SnapshotHandle, pending: &'life2 [RecordBatch], pending_schema: Option<SchemaRef>, projection: Option<&'life3 [&'life4 str]>, filter: Option<&'life5 str>, key_column: Option<&'life6 str>, ) -> Pin<Box<dyn Future<Output = Result<Vec<RecordBatch>>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait, 'life3: 'async_trait, 'life4: 'async_trait, 'life5: 'async_trait, 'life6: 'async_trait,

Source§

fn first_row_id_for_filter<'life0, 'life1, 'life2, 'async_trait>( &'life0 self, snapshot: &'life1 SnapshotHandle, filter: &'life2 str, ) -> Pin<Box<dyn Future<Output = Result<Option<u64>>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait,

Source§

fn table_state<'life0, 'life1, 'life2, 'async_trait>( &'life0 self, dataset_uri: &'life1 str, snapshot: &'life2 SnapshotHandle, ) -> Pin<Box<dyn Future<Output = Result<TableState>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait,

Source§

fn stage_append<'life0, 'life1, 'life2, 'async_trait>( &'life0 self, snapshot: &'life1 SnapshotHandle, batch: RecordBatch, prior_stages: &'life2 [StagedHandle], ) -> Pin<Box<dyn Future<Output = Result<StagedHandle>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait,

Source§

fn stage_merge_insert<'life0, 'async_trait>( &'life0 self, snapshot: SnapshotHandle, batch: RecordBatch, key_columns: Vec<String>, when_matched: WhenMatched, when_not_matched: WhenNotMatched, ) -> Pin<Box<dyn Future<Output = Result<StagedHandle>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait,

Source§

fn commit_staged<'life0, 'async_trait>( &'life0 self, snapshot: SnapshotHandle, staged: StagedHandle, ) -> Pin<Box<dyn Future<Output = Result<SnapshotHandle>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait,

Source§

fn stage_overwrite<'life0, 'life1, 'async_trait>( &'life0 self, snapshot: &'life1 SnapshotHandle, batch: RecordBatch, ) -> Pin<Box<dyn Future<Output = Result<StagedHandle>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait,

Stage an overwrite (Operation::Overwrite). MR-793 Phase 2.
Source§

fn stage_create_btree_index<'life0, 'life1, 'life2, 'life3, 'async_trait>( &'life0 self, snapshot: &'life1 SnapshotHandle, columns: &'life2 [&'life3 str], ) -> Pin<Box<dyn Future<Output = Result<StagedHandle>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait, 'life3: 'async_trait,

Stage a BTREE scalar index build. MR-793 Phase 2.
Source§

fn stage_create_inverted_index<'life0, 'life1, 'life2, 'async_trait>( &'life0 self, snapshot: &'life1 SnapshotHandle, column: &'life2 str, ) -> Pin<Box<dyn Future<Output = Result<StagedHandle>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait,

Stage an INVERTED (FTS) scalar index build. MR-793 Phase 2.
Source§

fn append_batch<'life0, 'life1, 'async_trait>( &'life0 self, dataset_uri: &'life1 str, snapshot: SnapshotHandle, batch: RecordBatch, ) -> Pin<Box<dyn Future<Output = Result<(SnapshotHandle, TableState)>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait,

Source§

fn merge_insert_batches<'life0, 'life1, 'async_trait>( &'life0 self, dataset_uri: &'life1 str, snapshot: SnapshotHandle, batches: Vec<RecordBatch>, key_columns: Vec<String>, when_matched: WhenMatched, when_not_matched: WhenNotMatched, ) -> Pin<Box<dyn Future<Output = Result<TableState>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait,

Source§

fn overwrite_batch<'life0, 'life1, 'async_trait>( &'life0 self, dataset_uri: &'life1 str, snapshot: SnapshotHandle, batch: RecordBatch, ) -> Pin<Box<dyn Future<Output = Result<(SnapshotHandle, TableState)>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait,

Source§

fn delete_where<'life0, 'life1, 'life2, 'async_trait>( &'life0 self, dataset_uri: &'life1 str, snapshot: SnapshotHandle, filter: &'life2 str, ) -> Pin<Box<dyn Future<Output = Result<(SnapshotHandle, DeleteState)>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait,

Source§

fn has_btree_index<'life0, 'life1, 'life2, 'async_trait>( &'life0 self, snapshot: &'life1 SnapshotHandle, column: &'life2 str, ) -> Pin<Box<dyn Future<Output = Result<bool>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait,

Source§

fn has_fts_index<'life0, 'life1, 'life2, 'async_trait>( &'life0 self, snapshot: &'life1 SnapshotHandle, column: &'life2 str, ) -> Pin<Box<dyn Future<Output = Result<bool>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait,

Source§

fn has_vector_index<'life0, 'life1, 'life2, 'async_trait>( &'life0 self, snapshot: &'life1 SnapshotHandle, column: &'life2 str, ) -> Pin<Box<dyn Future<Output = Result<bool>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait,

Source§

fn create_btree_index<'life0, 'life1, 'life2, 'async_trait>( &'life0 self, snapshot: SnapshotHandle, columns: &'life1 [&'life2 str], ) -> Pin<Box<dyn Future<Output = Result<SnapshotHandle>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait,

Source§

fn create_inverted_index<'life0, 'life1, 'async_trait>( &'life0 self, snapshot: SnapshotHandle, column: &'life1 str, ) -> Pin<Box<dyn Future<Output = Result<SnapshotHandle>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait,

Source§

fn create_vector_index<'life0, 'life1, 'async_trait>( &'life0 self, snapshot: SnapshotHandle, column: &'life1 str, ) -> Pin<Box<dyn Future<Output = Result<SnapshotHandle>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait,

Source§

fn root_uri(&self) -> &str

Source§

fn dataset_uri(&self, table_path: &str) -> String

Source§

fn scan_stream<'life0, 'life1, 'life2, 'life3, 'life4, 'async_trait>( &'life0 self, snapshot: &'life1 SnapshotHandle, projection: Option<&'life2 [&'life3 str]>, filter: Option<&'life4 str>, order_by: Option<Vec<ColumnOrdering>>, with_row_id: bool, ) -> Pin<Box<dyn Future<Output = Result<DatasetRecordBatchStream>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait, 'life3: 'async_trait, 'life4: 'async_trait,

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
Source§

impl<T> Conv for T

Source§

fn conv<T>(self) -> T
where Self: Into<T>,

Converts self into T using Into<T>. Read more
Source§

impl<T> Downcast for T
where T: Any,

Source§

fn into_any(self: Box<T>) -> Box<dyn Any>

Converts Box<dyn Trait> (where Trait: Downcast) to Box<dyn Any>, which can then be downcast into Box<dyn ConcreteType> where ConcreteType implements Trait.
Source§

fn into_any_rc(self: Rc<T>) -> Rc<dyn Any>

Converts Rc<Trait> (where Trait: Downcast) to Rc<Any>, which can then be further downcast into Rc<ConcreteType> where ConcreteType implements Trait.
Source§

fn as_any(&self) -> &(dyn Any + 'static)

Converts &Trait (where Trait: Downcast) to &Any. This is needed since Rust cannot generate &Any’s vtable from &Trait’s.
Source§

fn as_any_mut(&mut self) -> &mut (dyn Any + 'static)

Converts &mut Trait (where Trait: Downcast) to &Any. This is needed since Rust cannot generate &mut Any’s vtable from &mut Trait’s.
Source§

impl<T> DowncastSend for T
where T: Any + Send,

Source§

fn into_any_send(self: Box<T>) -> Box<dyn Any + Send>

Converts Box<Trait> (where Trait: DowncastSend) to Box<dyn Any + Send>, which can then be downcast into Box<ConcreteType> where ConcreteType implements Trait.
Source§

impl<T> DowncastSync for T
where T: Any + Send + Sync,

Source§

fn into_any_sync(self: Box<T>) -> Box<dyn Any + Sync + Send>

Converts Box<Trait> (where Trait: DowncastSync) to Box<dyn Any + Send + Sync>, which can then be downcast into Box<ConcreteType> where ConcreteType implements Trait.
Source§

fn into_any_arc(self: Arc<T>) -> Arc<dyn Any + Sync + Send>

Converts Arc<Trait> (where Trait: DowncastSync) to Arc<Any>, which can then be downcast into Arc<ConcreteType> where ConcreteType implements Trait.
Source§

impl<T> FmtForward for T

Source§

fn fmt_binary(self) -> FmtBinary<Self>
where Self: Binary,

Causes self to use its Binary implementation when Debug-formatted.
Source§

fn fmt_display(self) -> FmtDisplay<Self>
where Self: Display,

Causes self to use its Display implementation when Debug-formatted.
Source§

fn fmt_lower_exp(self) -> FmtLowerExp<Self>
where Self: LowerExp,

Causes self to use its LowerExp implementation when Debug-formatted.
Source§

fn fmt_lower_hex(self) -> FmtLowerHex<Self>
where Self: LowerHex,

Causes self to use its LowerHex implementation when Debug-formatted.
Source§

fn fmt_octal(self) -> FmtOctal<Self>
where Self: Octal,

Causes self to use its Octal implementation when Debug-formatted.
Source§

fn fmt_pointer(self) -> FmtPointer<Self>
where Self: Pointer,

Causes self to use its Pointer implementation when Debug-formatted.
Source§

fn fmt_upper_exp(self) -> FmtUpperExp<Self>
where Self: UpperExp,

Causes self to use its UpperExp implementation when Debug-formatted.
Source§

fn fmt_upper_hex(self) -> FmtUpperHex<Self>
where Self: UpperHex,

Causes self to use its UpperHex implementation when Debug-formatted.
Source§

fn fmt_list(self) -> FmtList<Self>
where &'a Self: for<'a> IntoIterator,

Formats each item in a sequence. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T> Instrument for T

Source§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more
Source§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

impl<Unshared, Shared> IntoShared<Shared> for Unshared
where Shared: FromUnshared<Unshared>,

Source§

fn into_shared(self) -> Shared

Creates a shared type from an unshared type.
Source§

impl<T> Pipe for T
where T: ?Sized,

Source§

fn pipe<R>(self, func: impl FnOnce(Self) -> R) -> R
where Self: Sized,

Pipes by value. This is generally the method you want to use. Read more
Source§

fn pipe_ref<'a, R>(&'a self, func: impl FnOnce(&'a Self) -> R) -> R
where R: 'a,

Borrows self and passes that borrow into the pipe function. Read more
Source§

fn pipe_ref_mut<'a, R>(&'a mut self, func: impl FnOnce(&'a mut Self) -> R) -> R
where R: 'a,

Mutably borrows self and passes that borrow into the pipe function. Read more
Source§

fn pipe_borrow<'a, B, R>(&'a self, func: impl FnOnce(&'a B) -> R) -> R
where Self: Borrow<B>, B: 'a + ?Sized, R: 'a,

Borrows self, then passes self.borrow() into the pipe function. Read more
Source§

fn pipe_borrow_mut<'a, B, R>( &'a mut self, func: impl FnOnce(&'a mut B) -> R, ) -> R
where Self: BorrowMut<B>, B: 'a + ?Sized, R: 'a,

Mutably borrows self, then passes self.borrow_mut() into the pipe function. Read more
Source§

fn pipe_as_ref<'a, U, R>(&'a self, func: impl FnOnce(&'a U) -> R) -> R
where Self: AsRef<U>, U: 'a + ?Sized, R: 'a,

Borrows self, then passes self.as_ref() into the pipe function.
Source§

fn pipe_as_mut<'a, U, R>(&'a mut self, func: impl FnOnce(&'a mut U) -> R) -> R
where Self: AsMut<U>, U: 'a + ?Sized, R: 'a,

Mutably borrows self, then passes self.as_mut() into the pipe function.
Source§

fn pipe_deref<'a, T, R>(&'a self, func: impl FnOnce(&'a T) -> R) -> R
where Self: Deref<Target = T>, T: 'a + ?Sized, R: 'a,

Borrows self, then passes self.deref() into the pipe function.
Source§

fn pipe_deref_mut<'a, T, R>( &'a mut self, func: impl FnOnce(&'a mut T) -> R, ) -> R
where Self: DerefMut<Target = T> + Deref, T: 'a + ?Sized, R: 'a,

Mutably borrows self, then passes self.deref_mut() into the pipe function.
Source§

impl<T> Pointable for T

Source§

const ALIGN: usize

The alignment of pointer.
Source§

type Init = T

The type for initializers.
Source§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
Source§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
Source§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
Source§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T> PolicyExt for T
where T: ?Sized,

Source§

fn and<P, B, E>(self, other: P) -> And<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow only if self and other return Action::Follow. Read more
Source§

fn or<P, B, E>(self, other: P) -> Or<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow if either self or other returns Action::Follow. Read more
Source§

impl<T> Same for T

Source§

type Output = T

Should always be Self
Source§

impl<T> Tap for T

Source§

fn tap(self, func: impl FnOnce(&Self)) -> Self

Immutable access to a value. Read more
Source§

fn tap_mut(self, func: impl FnOnce(&mut Self)) -> Self

Mutable access to a value. Read more
Source§

fn tap_borrow<B>(self, func: impl FnOnce(&B)) -> Self
where Self: Borrow<B>, B: ?Sized,

Immutable access to the Borrow<B> of a value. Read more
Source§

fn tap_borrow_mut<B>(self, func: impl FnOnce(&mut B)) -> Self
where Self: BorrowMut<B>, B: ?Sized,

Mutable access to the BorrowMut<B> of a value. Read more
Source§

fn tap_ref<R>(self, func: impl FnOnce(&R)) -> Self
where Self: AsRef<R>, R: ?Sized,

Immutable access to the AsRef<R> view of a value. Read more
Source§

fn tap_ref_mut<R>(self, func: impl FnOnce(&mut R)) -> Self
where Self: AsMut<R>, R: ?Sized,

Mutable access to the AsMut<R> view of a value. Read more
Source§

fn tap_deref<T>(self, func: impl FnOnce(&T)) -> Self
where Self: Deref<Target = T>, T: ?Sized,

Immutable access to the Deref::Target of a value. Read more
Source§

fn tap_deref_mut<T>(self, func: impl FnOnce(&mut T)) -> Self
where Self: DerefMut<Target = T> + Deref, T: ?Sized,

Mutable access to the Deref::Target of a value. Read more
Source§

fn tap_dbg(self, func: impl FnOnce(&Self)) -> Self

Calls .tap() only in debug builds, and is erased in release builds.
Source§

fn tap_mut_dbg(self, func: impl FnOnce(&mut Self)) -> Self

Calls .tap_mut() only in debug builds, and is erased in release builds.
Source§

fn tap_borrow_dbg<B>(self, func: impl FnOnce(&B)) -> Self
where Self: Borrow<B>, B: ?Sized,

Calls .tap_borrow() only in debug builds, and is erased in release builds.
Source§

fn tap_borrow_mut_dbg<B>(self, func: impl FnOnce(&mut B)) -> Self
where Self: BorrowMut<B>, B: ?Sized,

Calls .tap_borrow_mut() only in debug builds, and is erased in release builds.
Source§

fn tap_ref_dbg<R>(self, func: impl FnOnce(&R)) -> Self
where Self: AsRef<R>, R: ?Sized,

Calls .tap_ref() only in debug builds, and is erased in release builds.
Source§

fn tap_ref_mut_dbg<R>(self, func: impl FnOnce(&mut R)) -> Self
where Self: AsMut<R>, R: ?Sized,

Calls .tap_ref_mut() only in debug builds, and is erased in release builds.
Source§

fn tap_deref_dbg<T>(self, func: impl FnOnce(&T)) -> Self
where Self: Deref<Target = T>, T: ?Sized,

Calls .tap_deref() only in debug builds, and is erased in release builds.
Source§

fn tap_deref_mut_dbg<T>(self, func: impl FnOnce(&mut T)) -> Self
where Self: DerefMut<Target = T> + Deref, T: ?Sized,

Calls .tap_deref_mut() only in debug builds, and is erased in release builds.
Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T> TryConv for T

Source§

fn try_conv<T>(self) -> Result<T, Self::Error>
where Self: TryInto<T>,

Attempts to convert self into T using TryInto<T>. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V

Source§

impl<T> WithSubscriber for T

Source§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>
where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

impl<T> Allocation for T
where T: RefUnwindSafe + Send + Sync,

Source§

impl<T> Fruit for T
where T: Send + Downcast,

Source§

impl<T> MaybeSend for T
where T: Send,