pub struct TableStore { /* private fields */ }Implementations§
Source§impl TableStore
impl TableStore
pub fn new(root_uri: &str) -> Self
pub fn root_uri(&self) -> &str
pub fn dataset_uri(&self, table_path: &str) -> String
pub async fn open_snapshot_table( &self, snapshot: &Snapshot, table_key: &str, ) -> Result<Dataset>
pub async fn open_at_entry(&self, entry: &SubTableEntry) -> Result<Dataset>
pub async fn open_dataset_head( &self, dataset_uri: &str, branch: Option<&str>, ) -> Result<Dataset>
pub async fn open_dataset_head_for_write( &self, table_key: &str, dataset_uri: &str, branch: Option<&str>, ) -> Result<Dataset>
pub async fn delete_branch(&self, dataset_uri: &str, branch: &str) -> Result<()>
pub async fn open_dataset_at_state( &self, table_path: &str, branch: Option<&str>, version: u64, ) -> Result<Dataset>
pub fn ensure_expected_version( &self, ds: &Dataset, table_key: &str, expected_version: u64, ) -> Result<()>
pub async fn reopen_for_mutation( &self, dataset_uri: &str, branch: Option<&str>, table_key: &str, expected_version: u64, ) -> Result<Dataset>
pub async fn fork_branch_from_state( &self, dataset_uri: &str, source_branch: Option<&str>, table_key: &str, source_version: u64, target_branch: &str, ) -> Result<Dataset>
pub async fn scan_batches(&self, ds: &Dataset) -> Result<Vec<RecordBatch>>
pub async fn scan_batches_for_rewrite( &self, ds: &Dataset, ) -> Result<Vec<RecordBatch>>
pub async fn scan_stream( ds: &Dataset, projection: Option<&[&str]>, filter: Option<&str>, order_by: Option<Vec<ColumnOrdering>>, with_row_id: bool, ) -> Result<DatasetRecordBatchStream>
pub async fn scan_stream_with<F>( ds: &Dataset, projection: Option<&[&str]>, filter: Option<&str>, order_by: Option<Vec<ColumnOrdering>>, with_row_id: bool, configure: F, ) -> Result<DatasetRecordBatchStream>
pub async fn scan( &self, ds: &Dataset, projection: Option<&[&str]>, filter: Option<&str>, order_by: Option<Vec<ColumnOrdering>>, ) -> Result<Vec<RecordBatch>>
pub async fn scan_with<F>( &self, ds: &Dataset, projection: Option<&[&str]>, filter: Option<&str>, order_by: Option<Vec<ColumnOrdering>>, with_row_id: bool, configure: F, ) -> Result<Vec<RecordBatch>>
pub async fn count_rows( &self, ds: &Dataset, filter: Option<String>, ) -> Result<usize>
pub fn dataset_version(&self, ds: &Dataset) -> u64
pub async fn table_state( &self, dataset_uri: &str, ds: &Dataset, ) -> Result<TableState>
pub async fn append_batch( &self, dataset_uri: &str, ds: &mut Dataset, batch: RecordBatch, ) -> Result<TableState>
pub async fn append_or_create_batch( dataset_uri: &str, dataset: Option<Dataset>, batch: RecordBatch, ) -> Result<Dataset>
pub async fn overwrite_batch( &self, dataset_uri: &str, ds: &mut Dataset, batch: RecordBatch, ) -> Result<TableState>
pub async fn overwrite_dataset( dataset_uri: &str, batch: RecordBatch, ) -> Result<Dataset>
pub async fn merge_insert_batch( &self, dataset_uri: &str, ds: Dataset, batch: RecordBatch, key_columns: Vec<String>, when_matched: WhenMatched, when_not_matched: WhenNotMatched, ) -> Result<TableState>
pub async fn merge_insert_batches( &self, dataset_uri: &str, ds: Dataset, batches: Vec<RecordBatch>, key_columns: Vec<String>, when_matched: WhenMatched, when_not_matched: WhenNotMatched, ) -> Result<TableState>
pub async fn delete_where( &self, dataset_uri: &str, ds: &mut Dataset, filter: &str, ) -> Result<DeleteState>
Sourcepub async fn stage_append(
&self,
ds: &Dataset,
batch: RecordBatch,
prior_stages: &[StagedWrite],
) -> Result<StagedWrite>
pub async fn stage_append( &self, ds: &Dataset, batch: RecordBatch, prior_stages: &[StagedWrite], ) -> Result<StagedWrite>
Stage an append: write fragment files for batch, return the
uncommitted Lance transaction plus the new fragments for
read-your-writes.
prior_stages is the slice of staged writes already accumulated
against the same dataset in the same query. Pass &[] for the
first call; pass the accumulated stages for subsequent calls. The
primitive uses this to offset row-ID assignment so chained
stage_append calls don’t produce overlapping _rowid ranges.
Mirrors scan_with_staged’s &[StagedWrite] shape — the same
slice gets passed to both.
On stable-row-id datasets we manually populate row_id_meta on
the cloned new_fragments we expose for scan_with_staged.
Lance’s InsertBuilder::execute_uncommitted produces fragments
with row_id_meta = None; row IDs are normally assigned by
Transaction::assign_row_ids during commit. Because
scan_with_staged reads the staged fragments before commit,
the scanner trips on a stable-row-id dataset
(Error::internal("Missing row id meta") from
dataset/rowids.rs:22). The transaction’s internal fragment copy
stays untouched — Lance assigns IDs there independently at commit
time, and the two ID assignments don’t have to agree because no
caller threads _rowid from the staged scan into the commit
path.
Contract: prior_stages must contain only previous
stage_append results against the same dataset. Mixing
stage_merge_insert into prior_stages would over-count because
merge_insert’s new_fragments include rewrites that don’t add
rows. The engine’s parse-time D₂′ check (per touched table: all
stage_append OR exactly one stage_merge_insert) guarantees this
upstream; on the primitive layer it’s the caller’s responsibility.
Sourcepub async fn stage_merge_insert(
&self,
ds: Dataset,
batch: RecordBatch,
key_columns: Vec<String>,
when_matched: WhenMatched,
when_not_matched: WhenNotMatched,
) -> Result<StagedWrite>
pub async fn stage_merge_insert( &self, ds: Dataset, batch: RecordBatch, key_columns: Vec<String>, when_matched: WhenMatched, when_not_matched: WhenNotMatched, ) -> Result<StagedWrite>
Stage a merge_insert (upsert): write fragment files describing the
merge result, return the uncommitted transaction plus the new
fragments. The transaction’s Operation::Update carries the
fragments-to-remove and fragments-to-add; for read-your-writes we
expose new_fragments (rows that will be visible after commit).
Contract: do not chain stage_merge_insert calls on the same
table within one query. Each call’s MergeInsertBuilder runs
against the supplied dataset’s committed view — it does not see
fragments produced by a previous staged merge on the same table.
Two chained stage_merge_inserts whose source rows share keys will
each independently produce Operation::Update transactions whose
new_fragments contain a row for the shared key. scan_with_staged
(and count_rows_with_staged) will then return both — i.e.
duplicates by key.
This is intrinsic to the underlying Lance API: there is no public
way to make MergeInsertBuilder see uncommitted fragments. The
engine’s MutationStaging accumulator works around this by
concatenating per-table batches in memory and issuing exactly
one stage_merge_insert per touched table at end-of-query (with
last-write-wins dedupe by id) — see exec/staging.rs. Direct
callers of this primitive must respect the contract themselves.
Lift path: either a Lance API extension that lets
MergeInsertBuilder accept additional staged fragments, or an
in-memory pre-merge here that folds prior staged batches into the
input stream. See docs/runs.md.
Sourcepub async fn commit_staged(
&self,
ds: Arc<Dataset>,
transaction: Transaction,
) -> Result<Dataset>
pub async fn commit_staged( &self, ds: Arc<Dataset>, transaction: Transaction, ) -> Result<Dataset>
Commit a previously-staged transaction onto ds, returning the new
dataset (with HEAD advanced). Wraps CommitBuilder::execute. Used by
the publisher at end-of-query to materialize all staged writes before
the meta-manifest commit.
Sourcepub async fn stage_overwrite(
&self,
ds: &Dataset,
batch: RecordBatch,
) -> Result<StagedWrite>
pub async fn stage_overwrite( &self, ds: &Dataset, batch: RecordBatch, ) -> Result<StagedWrite>
Stage an overwrite (write_fragments + Operation::Overwrite { schema, fragments }). Returns a StagedWrite carrying the replacement fragments. HEAD does NOT advance.
Lance shape: InsertBuilder::with_params(WriteParams { mode: Overwrite, .. }) .execute_uncommitted(vec![batch]) produces a Transaction whose
Operation::Overwrite carries the new schema + fragments. The
transaction is committed via commit_staged (same call as
stage_append).
MR-793 Phase 2: introduces this for the schema_apply rewrite path.
Lance API verified in .context/mr-793-design.md Appendix A.1.
Sourcepub async fn stage_create_btree_index(
&self,
ds: &Dataset,
columns: &[&str],
) -> Result<StagedWrite>
pub async fn stage_create_btree_index( &self, ds: &Dataset, columns: &[&str], ) -> Result<StagedWrite>
Stage a BTREE scalar index build. Returns a StagedWrite whose
transaction commits via commit_staged. HEAD does NOT advance.
Lance shape: CreateIndexBuilder::execute_uncommitted returns
IndexMetadata; we manually wrap it in Operation::CreateIndex { new_indices, removed_indices } via the public TransactionBuilder,
replicating the simple (non-segment-commit-path) branch of Lance’s
CreateIndexBuilder::execute (lance-4.0.0 src/index/create.rs:502-512).
removed_indices mirrors execute() lines 466-476: when the
build replaces an existing same-named index, those entries are
listed for tombstoning by the manifest commit.
MR-793 Phase 2: scalar index types (BTree, Inverted) are
stage-able. Vector indices are NOT (segment-commit-path requires
build_index_metadata_from_segments which is pub(crate) in
lance-4.0.0); see create_vector_index and Appendix A.3.
Sourcepub async fn stage_create_inverted_index(
&self,
ds: &Dataset,
column: &str,
) -> Result<StagedWrite>
pub async fn stage_create_inverted_index( &self, ds: &Dataset, column: &str, ) -> Result<StagedWrite>
Stage an INVERTED (FTS) scalar index build. Same shape as
stage_create_btree_index; see its docs for the Lance API
citation and contract notes.
Sourcepub async fn scan_with_staged(
&self,
ds: &Dataset,
staged: &[StagedWrite],
projection: Option<&[&str]>,
filter: Option<&str>,
) -> Result<Vec<RecordBatch>>
pub async fn scan_with_staged( &self, ds: &Dataset, staged: &[StagedWrite], projection: Option<&[&str]>, filter: Option<&str>, ) -> Result<Vec<RecordBatch>>
Run a scan with optional uncommitted staged writes visible
alongside the committed snapshot. When staged is empty this is
identical to scan(...).
Composes the visible fragment list as committed - removed + new:
the committed manifest’s fragments, minus any fragment IDs that
staged Operation::Updates (merge_insert rewrites) have superseded,
plus the staged new/updated fragments. Without the removed
filter, a merge_insert that rewrites an existing fragment would
surface twice — once via the original committed fragment, once via
the rewrite in new_fragments.
Filter contract is incomplete on staged fragments. When filter
is Some(...), Lance pushes the predicate to per-fragment scans
with stats-based pruning. Uncommitted fragments produced by
write_fragments_internal lack the per-column statistics that
committed fragments carry; Lance’s optimizer drops them from the
filtered scan even when their data would match. Staged-fragment
rows are silently absent from the result. scanner.use_stats(false)
does not fix this in lance 4.0.0. Callers needing correct filtered
reads against staged data should use a different strategy — the
engine’s MutationStaging accumulator unions in-memory pending
batches with the committed scan via DataFusion MemTable (see
scan_with_pending).
This method remains on the surface for primitive-level testing (basic stage + scan correctness without filters works) and for callers that don’t need filter pushdown.
Sourcepub async fn scan_with_pending(
&self,
committed_ds: &Dataset,
pending_batches: &[RecordBatch],
pending_schema: Option<SchemaRef>,
projection: Option<&[&str]>,
filter: Option<&str>,
key_column: Option<&str>,
) -> Result<Vec<RecordBatch>>
pub async fn scan_with_pending( &self, committed_ds: &Dataset, pending_batches: &[RecordBatch], pending_schema: Option<SchemaRef>, projection: Option<&[&str]>, filter: Option<&str>, key_column: Option<&str>, ) -> Result<Vec<RecordBatch>>
Scan committed via Lance + apply the same filter to in-memory
pending batches via DataFusion MemTable, concat the two result
streams. The replacement for scan_with_staged in engine code:
the staged-write writer accumulates input batches in memory and
unions them with the committed snapshot at read time,
sidestepping the Scanner::with_fragments filter-pushdown
limitation documented on scan_with_staged.
committed_ds should be opened at the pre-mutation
expected_version (the same version captured in MutationStaging::expected_versions
at first touch of the table). pending_batches are the per-table
accumulator’s batches in their input shape. pending_schema is
the schema of the accumulated batches; passing None falls back
to the schema of the first pending batch.
filter is the Lance / DataFusion SQL predicate. It is applied
to both sides — Lance pushes it down on the committed side; the
pending side runs it through a fresh DataFusion SessionContext
with the batches registered as a MemTable named pending.
key_column controls how committed and pending are unioned:
None(union semantics): every committed row that matches the filter and every pending row that matches the filter is returned. Correct when committed and pending cannot share a primary key — e.g., Append-mode loads with ULID-generated ids, or any read where pending hasn’t been used to update committed rows.Some(col)(merge / shadow semantics): committed rows whosecolvalue appears in any pending batch are EXCLUDED from the result; only pending’s view of those rows is returned. Required for Merge-mode reads (e.g.,execute_updateon the engine path) so a chainedupdatedoesn’t see stale committed values that a prior op already updated in pending. Without this, a predicate likewhere age > 30can match a row that an earlierset age = 20already moved out of range.
When pending_batches is empty this delegates to the regular
scan path.
Sourcepub async fn count_rows_with_staged(
&self,
ds: &Dataset,
staged: &[StagedWrite],
filter: Option<String>,
) -> Result<usize>
pub async fn count_rows_with_staged( &self, ds: &Dataset, staged: &[StagedWrite], filter: Option<String>, ) -> Result<usize>
count_rows variant that respects staged writes. Used for
edge-cardinality validation that needs to see staged edges before
commit. Same committed - removed + new composition as
scan_with_staged.
pub async fn has_btree_index(&self, ds: &Dataset, column: &str) -> Result<bool>
pub async fn has_fts_index(&self, ds: &Dataset, column: &str) -> Result<bool>
pub async fn has_vector_index(&self, ds: &Dataset, column: &str) -> Result<bool>
pub async fn create_btree_index( &self, ds: &mut Dataset, columns: &[&str], ) -> Result<()>
pub async fn create_inverted_index( &self, ds: &mut Dataset, column: &str, ) -> Result<()>
pub async fn create_vector_index( &self, ds: &mut Dataset, column: &str, ) -> Result<()>
pub async fn create_empty_dataset( dataset_uri: &str, schema: &SchemaRef, ) -> Result<Dataset>
pub async fn first_row_id_for_filter( &self, ds: &Dataset, filter: &str, ) -> Result<Option<u64>>
pub async fn write_dataset( dataset_uri: &str, batch: RecordBatch, ) -> Result<Dataset>
Trait Implementations§
Source§impl Clone for TableStore
impl Clone for TableStore
Source§fn clone(&self) -> TableStore
fn clone(&self) -> TableStore
1.0.0 (const: unstable) · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source. Read moreSource§impl Debug for TableStore
impl Debug for TableStore
Source§impl TableStorage for TableStore
impl TableStorage for TableStore
fn open_snapshot_at_entry<'life0, 'life1, 'async_trait>(
&'life0 self,
entry: &'life1 SubTableEntry,
) -> Pin<Box<dyn Future<Output = Result<SnapshotHandle>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
fn open_snapshot_at_table<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
snapshot: &'life1 Snapshot,
table_key: &'life2 str,
) -> Pin<Box<dyn Future<Output = Result<SnapshotHandle>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
fn open_dataset_head<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
dataset_uri: &'life1 str,
branch: Option<&'life2 str>,
) -> Pin<Box<dyn Future<Output = Result<SnapshotHandle>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
fn open_dataset_head_for_write<'life0, 'life1, 'life2, 'life3, 'async_trait>(
&'life0 self,
table_key: &'life1 str,
dataset_uri: &'life2 str,
branch: Option<&'life3 str>,
) -> Pin<Box<dyn Future<Output = Result<SnapshotHandle>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
'life3: 'async_trait,
fn open_dataset_at_state<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
table_path: &'life1 str,
branch: Option<&'life2 str>,
version: u64,
) -> Pin<Box<dyn Future<Output = Result<SnapshotHandle>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
fn fork_branch_from_state<'life0, 'life1, 'life2, 'life3, 'life4, 'async_trait>(
&'life0 self,
dataset_uri: &'life1 str,
source_branch: Option<&'life2 str>,
table_key: &'life3 str,
source_version: u64,
target_branch: &'life4 str,
) -> Pin<Box<dyn Future<Output = Result<SnapshotHandle>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
'life3: 'async_trait,
'life4: 'async_trait,
fn delete_branch<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
dataset_uri: &'life1 str,
branch: &'life2 str,
) -> Pin<Box<dyn Future<Output = Result<()>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
fn reopen_for_mutation<'life0, 'life1, 'life2, 'life3, 'async_trait>(
&'life0 self,
dataset_uri: &'life1 str,
branch: Option<&'life2 str>,
table_key: &'life3 str,
expected_version: u64,
) -> Pin<Box<dyn Future<Output = Result<SnapshotHandle>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
'life3: 'async_trait,
fn ensure_expected_version( &self, snapshot: &SnapshotHandle, table_key: &str, expected_version: u64, ) -> Result<()>
fn scan<'life0, 'life1, 'life2, 'life3, 'life4, 'async_trait>(
&'life0 self,
snapshot: &'life1 SnapshotHandle,
projection: Option<&'life2 [&'life3 str]>,
filter: Option<&'life4 str>,
order_by: Option<Vec<ColumnOrdering>>,
) -> Pin<Box<dyn Future<Output = Result<Vec<RecordBatch>>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
'life3: 'async_trait,
'life4: 'async_trait,
fn scan_with_row_id<'life0, 'life1, 'life2, 'life3, 'life4, 'async_trait>(
&'life0 self,
snapshot: &'life1 SnapshotHandle,
projection: Option<&'life2 [&'life3 str]>,
filter: Option<&'life4 str>,
order_by: Option<Vec<ColumnOrdering>>,
with_row_id: bool,
) -> Pin<Box<dyn Future<Output = Result<Vec<RecordBatch>>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
'life3: 'async_trait,
'life4: 'async_trait,
fn scan_batches<'life0, 'life1, 'async_trait>(
&'life0 self,
snapshot: &'life1 SnapshotHandle,
) -> Pin<Box<dyn Future<Output = Result<Vec<RecordBatch>>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
fn scan_batches_for_rewrite<'life0, 'life1, 'async_trait>(
&'life0 self,
snapshot: &'life1 SnapshotHandle,
) -> Pin<Box<dyn Future<Output = Result<Vec<RecordBatch>>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
fn count_rows<'life0, 'life1, 'async_trait>(
&'life0 self,
snapshot: &'life1 SnapshotHandle,
filter: Option<String>,
) -> Pin<Box<dyn Future<Output = Result<usize>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
fn count_rows_with_staged<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
snapshot: &'life1 SnapshotHandle,
staged: &'life2 [StagedHandle],
filter: Option<String>,
) -> Pin<Box<dyn Future<Output = Result<usize>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
fn scan_with_staged<'life0, 'life1, 'life2, 'life3, 'life4, 'life5, 'async_trait>(
&'life0 self,
snapshot: &'life1 SnapshotHandle,
staged: &'life2 [StagedHandle],
projection: Option<&'life3 [&'life4 str]>,
filter: Option<&'life5 str>,
) -> Pin<Box<dyn Future<Output = Result<Vec<RecordBatch>>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
'life3: 'async_trait,
'life4: 'async_trait,
'life5: 'async_trait,
fn scan_with_pending<'life0, 'life1, 'life2, 'life3, 'life4, 'life5, 'life6, 'async_trait>(
&'life0 self,
snapshot: &'life1 SnapshotHandle,
pending: &'life2 [RecordBatch],
pending_schema: Option<SchemaRef>,
projection: Option<&'life3 [&'life4 str]>,
filter: Option<&'life5 str>,
key_column: Option<&'life6 str>,
) -> Pin<Box<dyn Future<Output = Result<Vec<RecordBatch>>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
'life3: 'async_trait,
'life4: 'async_trait,
'life5: 'async_trait,
'life6: 'async_trait,
fn first_row_id_for_filter<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
snapshot: &'life1 SnapshotHandle,
filter: &'life2 str,
) -> Pin<Box<dyn Future<Output = Result<Option<u64>>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
fn table_state<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
dataset_uri: &'life1 str,
snapshot: &'life2 SnapshotHandle,
) -> Pin<Box<dyn Future<Output = Result<TableState>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
fn stage_append<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
snapshot: &'life1 SnapshotHandle,
batch: RecordBatch,
prior_stages: &'life2 [StagedHandle],
) -> Pin<Box<dyn Future<Output = Result<StagedHandle>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
fn stage_merge_insert<'life0, 'async_trait>(
&'life0 self,
snapshot: SnapshotHandle,
batch: RecordBatch,
key_columns: Vec<String>,
when_matched: WhenMatched,
when_not_matched: WhenNotMatched,
) -> Pin<Box<dyn Future<Output = Result<StagedHandle>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
fn commit_staged<'life0, 'async_trait>(
&'life0 self,
snapshot: SnapshotHandle,
staged: StagedHandle,
) -> Pin<Box<dyn Future<Output = Result<SnapshotHandle>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
Source§fn stage_overwrite<'life0, 'life1, 'async_trait>(
&'life0 self,
snapshot: &'life1 SnapshotHandle,
batch: RecordBatch,
) -> Pin<Box<dyn Future<Output = Result<StagedHandle>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
fn stage_overwrite<'life0, 'life1, 'async_trait>(
&'life0 self,
snapshot: &'life1 SnapshotHandle,
batch: RecordBatch,
) -> Pin<Box<dyn Future<Output = Result<StagedHandle>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
Source§fn stage_create_btree_index<'life0, 'life1, 'life2, 'life3, 'async_trait>(
&'life0 self,
snapshot: &'life1 SnapshotHandle,
columns: &'life2 [&'life3 str],
) -> Pin<Box<dyn Future<Output = Result<StagedHandle>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
'life3: 'async_trait,
fn stage_create_btree_index<'life0, 'life1, 'life2, 'life3, 'async_trait>(
&'life0 self,
snapshot: &'life1 SnapshotHandle,
columns: &'life2 [&'life3 str],
) -> Pin<Box<dyn Future<Output = Result<StagedHandle>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
'life3: 'async_trait,
Source§fn stage_create_inverted_index<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
snapshot: &'life1 SnapshotHandle,
column: &'life2 str,
) -> Pin<Box<dyn Future<Output = Result<StagedHandle>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
fn stage_create_inverted_index<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
snapshot: &'life1 SnapshotHandle,
column: &'life2 str,
) -> Pin<Box<dyn Future<Output = Result<StagedHandle>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
fn append_batch<'life0, 'life1, 'async_trait>(
&'life0 self,
dataset_uri: &'life1 str,
snapshot: SnapshotHandle,
batch: RecordBatch,
) -> Pin<Box<dyn Future<Output = Result<(SnapshotHandle, TableState)>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
fn merge_insert_batches<'life0, 'life1, 'async_trait>(
&'life0 self,
dataset_uri: &'life1 str,
snapshot: SnapshotHandle,
batches: Vec<RecordBatch>,
key_columns: Vec<String>,
when_matched: WhenMatched,
when_not_matched: WhenNotMatched,
) -> Pin<Box<dyn Future<Output = Result<TableState>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
fn overwrite_batch<'life0, 'life1, 'async_trait>(
&'life0 self,
dataset_uri: &'life1 str,
snapshot: SnapshotHandle,
batch: RecordBatch,
) -> Pin<Box<dyn Future<Output = Result<(SnapshotHandle, TableState)>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
fn delete_where<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
dataset_uri: &'life1 str,
snapshot: SnapshotHandle,
filter: &'life2 str,
) -> Pin<Box<dyn Future<Output = Result<(SnapshotHandle, DeleteState)>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
fn has_btree_index<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
snapshot: &'life1 SnapshotHandle,
column: &'life2 str,
) -> Pin<Box<dyn Future<Output = Result<bool>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
fn has_fts_index<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
snapshot: &'life1 SnapshotHandle,
column: &'life2 str,
) -> Pin<Box<dyn Future<Output = Result<bool>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
fn has_vector_index<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
snapshot: &'life1 SnapshotHandle,
column: &'life2 str,
) -> Pin<Box<dyn Future<Output = Result<bool>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
fn create_btree_index<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
snapshot: SnapshotHandle,
columns: &'life1 [&'life2 str],
) -> Pin<Box<dyn Future<Output = Result<SnapshotHandle>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
fn create_inverted_index<'life0, 'life1, 'async_trait>(
&'life0 self,
snapshot: SnapshotHandle,
column: &'life1 str,
) -> Pin<Box<dyn Future<Output = Result<SnapshotHandle>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
fn create_vector_index<'life0, 'life1, 'async_trait>(
&'life0 self,
snapshot: SnapshotHandle,
column: &'life1 str,
) -> Pin<Box<dyn Future<Output = Result<SnapshotHandle>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
fn root_uri(&self) -> &str
fn dataset_uri(&self, table_path: &str) -> String
fn scan_stream<'life0, 'life1, 'life2, 'life3, 'life4, 'async_trait>(
&'life0 self,
snapshot: &'life1 SnapshotHandle,
projection: Option<&'life2 [&'life3 str]>,
filter: Option<&'life4 str>,
order_by: Option<Vec<ColumnOrdering>>,
with_row_id: bool,
) -> Pin<Box<dyn Future<Output = Result<DatasetRecordBatchStream>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
'life3: 'async_trait,
'life4: 'async_trait,
Auto Trait Implementations§
impl Freeze for TableStore
impl RefUnwindSafe for TableStore
impl Send for TableStore
impl Sync for TableStore
impl Unpin for TableStore
impl UnsafeUnpin for TableStore
impl UnwindSafe for TableStore
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<T> Downcast for Twhere
T: Any,
impl<T> Downcast for Twhere
T: Any,
Source§fn into_any(self: Box<T>) -> Box<dyn Any>
fn into_any(self: Box<T>) -> Box<dyn Any>
Box<dyn Trait> (where Trait: Downcast) to Box<dyn Any>, which can then be
downcast into Box<dyn ConcreteType> where ConcreteType implements Trait.Source§fn into_any_rc(self: Rc<T>) -> Rc<dyn Any>
fn into_any_rc(self: Rc<T>) -> Rc<dyn Any>
Rc<Trait> (where Trait: Downcast) to Rc<Any>, which can then be further
downcast into Rc<ConcreteType> where ConcreteType implements Trait.Source§fn as_any(&self) -> &(dyn Any + 'static)
fn as_any(&self) -> &(dyn Any + 'static)
&Trait (where Trait: Downcast) to &Any. This is needed since Rust cannot
generate &Any’s vtable from &Trait’s.Source§fn as_any_mut(&mut self) -> &mut (dyn Any + 'static)
fn as_any_mut(&mut self) -> &mut (dyn Any + 'static)
&mut Trait (where Trait: Downcast) to &Any. This is needed since Rust cannot
generate &mut Any’s vtable from &mut Trait’s.Source§impl<T> DowncastSend for T
impl<T> DowncastSend for T
Source§impl<T> DowncastSync for T
impl<T> DowncastSync for T
Source§impl<T> FmtForward for T
impl<T> FmtForward for T
Source§fn fmt_binary(self) -> FmtBinary<Self>where
Self: Binary,
fn fmt_binary(self) -> FmtBinary<Self>where
Self: Binary,
self to use its Binary implementation when Debug-formatted.Source§fn fmt_display(self) -> FmtDisplay<Self>where
Self: Display,
fn fmt_display(self) -> FmtDisplay<Self>where
Self: Display,
self to use its Display implementation when
Debug-formatted.Source§fn fmt_lower_exp(self) -> FmtLowerExp<Self>where
Self: LowerExp,
fn fmt_lower_exp(self) -> FmtLowerExp<Self>where
Self: LowerExp,
self to use its LowerExp implementation when
Debug-formatted.Source§fn fmt_lower_hex(self) -> FmtLowerHex<Self>where
Self: LowerHex,
fn fmt_lower_hex(self) -> FmtLowerHex<Self>where
Self: LowerHex,
self to use its LowerHex implementation when
Debug-formatted.Source§fn fmt_octal(self) -> FmtOctal<Self>where
Self: Octal,
fn fmt_octal(self) -> FmtOctal<Self>where
Self: Octal,
self to use its Octal implementation when Debug-formatted.Source§fn fmt_pointer(self) -> FmtPointer<Self>where
Self: Pointer,
fn fmt_pointer(self) -> FmtPointer<Self>where
Self: Pointer,
self to use its Pointer implementation when
Debug-formatted.Source§fn fmt_upper_exp(self) -> FmtUpperExp<Self>where
Self: UpperExp,
fn fmt_upper_exp(self) -> FmtUpperExp<Self>where
Self: UpperExp,
self to use its UpperExp implementation when
Debug-formatted.Source§fn fmt_upper_hex(self) -> FmtUpperHex<Self>where
Self: UpperHex,
fn fmt_upper_hex(self) -> FmtUpperHex<Self>where
Self: UpperHex,
self to use its UpperHex implementation when
Debug-formatted.Source§impl<T> Instrument for T
impl<T> Instrument for T
Source§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
Source§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> Pipe for Twhere
T: ?Sized,
impl<T> Pipe for Twhere
T: ?Sized,
Source§fn pipe<R>(self, func: impl FnOnce(Self) -> R) -> Rwhere
Self: Sized,
fn pipe<R>(self, func: impl FnOnce(Self) -> R) -> Rwhere
Self: Sized,
Source§fn pipe_ref<'a, R>(&'a self, func: impl FnOnce(&'a Self) -> R) -> Rwhere
R: 'a,
fn pipe_ref<'a, R>(&'a self, func: impl FnOnce(&'a Self) -> R) -> Rwhere
R: 'a,
self and passes that borrow into the pipe function. Read moreSource§fn pipe_ref_mut<'a, R>(&'a mut self, func: impl FnOnce(&'a mut Self) -> R) -> Rwhere
R: 'a,
fn pipe_ref_mut<'a, R>(&'a mut self, func: impl FnOnce(&'a mut Self) -> R) -> Rwhere
R: 'a,
self and passes that borrow into the pipe function. Read moreSource§fn pipe_borrow<'a, B, R>(&'a self, func: impl FnOnce(&'a B) -> R) -> R
fn pipe_borrow<'a, B, R>(&'a self, func: impl FnOnce(&'a B) -> R) -> R
Source§fn pipe_borrow_mut<'a, B, R>(
&'a mut self,
func: impl FnOnce(&'a mut B) -> R,
) -> R
fn pipe_borrow_mut<'a, B, R>( &'a mut self, func: impl FnOnce(&'a mut B) -> R, ) -> R
Source§fn pipe_as_ref<'a, U, R>(&'a self, func: impl FnOnce(&'a U) -> R) -> R
fn pipe_as_ref<'a, U, R>(&'a self, func: impl FnOnce(&'a U) -> R) -> R
self, then passes self.as_ref() into the pipe function.Source§fn pipe_as_mut<'a, U, R>(&'a mut self, func: impl FnOnce(&'a mut U) -> R) -> R
fn pipe_as_mut<'a, U, R>(&'a mut self, func: impl FnOnce(&'a mut U) -> R) -> R
self, then passes self.as_mut() into the pipe
function.Source§fn pipe_deref<'a, T, R>(&'a self, func: impl FnOnce(&'a T) -> R) -> R
fn pipe_deref<'a, T, R>(&'a self, func: impl FnOnce(&'a T) -> R) -> R
self, then passes self.deref() into the pipe function.Source§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<T> PolicyExt for Twhere
T: ?Sized,
impl<T> PolicyExt for Twhere
T: ?Sized,
Source§impl<T> Tap for T
impl<T> Tap for T
Source§fn tap_borrow<B>(self, func: impl FnOnce(&B)) -> Self
fn tap_borrow<B>(self, func: impl FnOnce(&B)) -> Self
Borrow<B> of a value. Read moreSource§fn tap_borrow_mut<B>(self, func: impl FnOnce(&mut B)) -> Self
fn tap_borrow_mut<B>(self, func: impl FnOnce(&mut B)) -> Self
BorrowMut<B> of a value. Read moreSource§fn tap_ref<R>(self, func: impl FnOnce(&R)) -> Self
fn tap_ref<R>(self, func: impl FnOnce(&R)) -> Self
AsRef<R> view of a value. Read moreSource§fn tap_ref_mut<R>(self, func: impl FnOnce(&mut R)) -> Self
fn tap_ref_mut<R>(self, func: impl FnOnce(&mut R)) -> Self
AsMut<R> view of a value. Read moreSource§fn tap_deref<T>(self, func: impl FnOnce(&T)) -> Self
fn tap_deref<T>(self, func: impl FnOnce(&T)) -> Self
Deref::Target of a value. Read moreSource§fn tap_deref_mut<T>(self, func: impl FnOnce(&mut T)) -> Self
fn tap_deref_mut<T>(self, func: impl FnOnce(&mut T)) -> Self
Deref::Target of a value. Read moreSource§fn tap_dbg(self, func: impl FnOnce(&Self)) -> Self
fn tap_dbg(self, func: impl FnOnce(&Self)) -> Self
.tap() only in debug builds, and is erased in release builds.Source§fn tap_mut_dbg(self, func: impl FnOnce(&mut Self)) -> Self
fn tap_mut_dbg(self, func: impl FnOnce(&mut Self)) -> Self
.tap_mut() only in debug builds, and is erased in release
builds.Source§fn tap_borrow_dbg<B>(self, func: impl FnOnce(&B)) -> Self
fn tap_borrow_dbg<B>(self, func: impl FnOnce(&B)) -> Self
.tap_borrow() only in debug builds, and is erased in release
builds.Source§fn tap_borrow_mut_dbg<B>(self, func: impl FnOnce(&mut B)) -> Self
fn tap_borrow_mut_dbg<B>(self, func: impl FnOnce(&mut B)) -> Self
.tap_borrow_mut() only in debug builds, and is erased in release
builds.Source§fn tap_ref_dbg<R>(self, func: impl FnOnce(&R)) -> Self
fn tap_ref_dbg<R>(self, func: impl FnOnce(&R)) -> Self
.tap_ref() only in debug builds, and is erased in release
builds.Source§fn tap_ref_mut_dbg<R>(self, func: impl FnOnce(&mut R)) -> Self
fn tap_ref_mut_dbg<R>(self, func: impl FnOnce(&mut R)) -> Self
.tap_ref_mut() only in debug builds, and is erased in release
builds.Source§fn tap_deref_dbg<T>(self, func: impl FnOnce(&T)) -> Self
fn tap_deref_dbg<T>(self, func: impl FnOnce(&T)) -> Self
.tap_deref() only in debug builds, and is erased in release
builds.