manifold/
table.rs

1use crate::db::TransactionGuard;
2use crate::sealed::Sealed;
3use crate::tree_store::{
4    AccessGuardMutInPlace, Btree, BtreeExtractIf, BtreeHeader, BtreeMut, BtreeRangeIter,
5    MAX_PAIR_LENGTH, MAX_VALUE_LENGTH, PageHint, PageNumber, PageTrackerPolicy, RawBtree,
6    TransactionalMemory,
7};
8use crate::types::{Key, MutInPlaceValue, Value};
9use crate::{AccessGuard, AccessGuardMut, StorageError, WriteTransaction};
10use crate::{Result, TableHandle};
11use std::borrow::Borrow;
12use std::fmt::{Debug, Formatter};
13use std::marker::PhantomData;
14use std::ops::RangeBounds;
15use std::sync::{Arc, Mutex};
16
17// Chunk size for bulk operations - balances memory usage and performance
18const BULK_INSERT_CHUNK_SIZE: usize = 10_000;
19
20/// Informational storage stats about a table
21#[derive(Debug)]
22pub struct TableStats {
23    pub(crate) tree_height: u32,
24    pub(crate) leaf_pages: u64,
25    pub(crate) branch_pages: u64,
26    pub(crate) stored_leaf_bytes: u64,
27    pub(crate) metadata_bytes: u64,
28    pub(crate) fragmented_bytes: u64,
29}
30
31impl TableStats {
32    /// Maximum traversal distance to reach the deepest (key, value) pair in the table
33    pub fn tree_height(&self) -> u32 {
34        self.tree_height
35    }
36
37    /// Number of leaf pages that store user data
38    pub fn leaf_pages(&self) -> u64 {
39        self.leaf_pages
40    }
41
42    /// Number of branch pages in the btree that store user data
43    pub fn branch_pages(&self) -> u64 {
44        self.branch_pages
45    }
46
47    /// Number of bytes consumed by keys and values that have been inserted.
48    /// Does not include indexing overhead
49    pub fn stored_bytes(&self) -> u64 {
50        self.stored_leaf_bytes
51    }
52
53    /// Number of bytes consumed by keys in internal branch pages, plus other metadata
54    pub fn metadata_bytes(&self) -> u64 {
55        self.metadata_bytes
56    }
57
58    /// Number of bytes consumed by fragmentation, both in data pages and internal metadata tables
59    pub fn fragmented_bytes(&self) -> u64 {
60        self.fragmented_bytes
61    }
62}
63
64/// A table containing key-value mappings
65pub struct Table<'txn, K: Key + 'static, V: Value + 'static> {
66    name: String,
67    transaction: &'txn WriteTransaction,
68    tree: BtreeMut<'txn, K, V>,
69}
70
71impl<K: Key + 'static, V: Value + 'static> TableHandle for Table<'_, K, V> {
72    fn name(&self) -> &str {
73        &self.name
74    }
75}
76
77impl<'txn, K: Key + 'static, V: Value + 'static> Table<'txn, K, V> {
78    pub(crate) fn new(
79        name: &str,
80        table_root: Option<BtreeHeader>,
81        freed_pages: Arc<Mutex<Vec<PageNumber>>>,
82        allocated_pages: Arc<Mutex<PageTrackerPolicy>>,
83        mem: Arc<TransactionalMemory>,
84        transaction: &'txn WriteTransaction,
85    ) -> Table<'txn, K, V> {
86        Table {
87            name: name.to_string(),
88            transaction,
89            tree: BtreeMut::new(
90                table_root,
91                transaction.transaction_guard(),
92                mem,
93                freed_pages,
94                allocated_pages,
95            ),
96        }
97    }
98
99    #[allow(dead_code)]
100    pub(crate) fn print_debug(&self, include_values: bool) -> Result {
101        self.tree.print_debug(include_values)
102    }
103
104    /// Returns an accessor, which allows mutation, to the value corresponding to the given key
105    pub fn get_mut<'k>(
106        &mut self,
107        key: impl Borrow<K::SelfType<'k>>,
108    ) -> Result<Option<AccessGuardMut<'_, V>>> {
109        self.tree.get_mut(key.borrow())
110    }
111
112    /// Removes and returns the first key-value pair in the table
113    pub fn pop_first(&mut self) -> Result<Option<(AccessGuard<'_, K>, AccessGuard<'_, V>)>> {
114        // TODO: optimize this
115        let first = self
116            .iter()?
117            .next()
118            .map(|x| x.map(|(key, _)| K::as_bytes(&key.value()).as_ref().to_vec()));
119        if let Some(owned_key) = first {
120            let owned_key = owned_key?;
121            let key = K::from_bytes(&owned_key);
122            let value = self.remove(&key)?.unwrap();
123            drop(key);
124            Ok(Some((AccessGuard::with_owned_value(owned_key), value)))
125        } else {
126            Ok(None)
127        }
128    }
129
130    /// Removes and returns the last key-value pair in the table
131    pub fn pop_last(&mut self) -> Result<Option<(AccessGuard<'_, K>, AccessGuard<'_, V>)>> {
132        // TODO: optimize this
133        let last = self
134            .iter()?
135            .next_back()
136            .map(|x| x.map(|(key, _)| K::as_bytes(&key.value()).as_ref().to_vec()));
137        if let Some(owned_key) = last {
138            let owned_key = owned_key?;
139            let key = K::from_bytes(&owned_key);
140            let value = self.remove(&key)?.unwrap();
141            drop(key);
142            Ok(Some((AccessGuard::with_owned_value(owned_key), value)))
143        } else {
144            Ok(None)
145        }
146    }
147
148    /// Applies `predicate` to all key-value pairs. All entries for which
149    /// `predicate` evaluates to `true` are returned in an iterator, and those which are read from the iterator are removed
150    ///
151    /// Note: values not read from the iterator will not be removed
152    pub fn extract_if<F: for<'f> FnMut(K::SelfType<'f>, V::SelfType<'f>) -> bool>(
153        &mut self,
154        predicate: F,
155    ) -> Result<ExtractIf<'_, K, V, F>> {
156        self.extract_from_if::<K::SelfType<'_>, F>(.., predicate)
157    }
158
159    /// Applies `predicate` to all key-value pairs in the specified range. All entries for which
160    /// `predicate` evaluates to `true` are returned in an iterator, and those which are read from the iterator are removed
161    ///
162    /// Note: values not read from the iterator will not be removed
163    pub fn extract_from_if<'a, KR, F: for<'f> FnMut(K::SelfType<'f>, V::SelfType<'f>) -> bool>(
164        &mut self,
165        range: impl RangeBounds<KR> + 'a,
166        predicate: F,
167    ) -> Result<ExtractIf<'_, K, V, F>>
168    where
169        KR: Borrow<K::SelfType<'a>> + 'a,
170    {
171        self.tree
172            .extract_from_if(&range, predicate)
173            .map(ExtractIf::new)
174    }
175
176    /// Applies `predicate` to all key-value pairs. All entries for which
177    /// `predicate` evaluates to `false` are removed.
178    ///
179    pub fn retain<F: for<'f> FnMut(K::SelfType<'f>, V::SelfType<'f>) -> bool>(
180        &mut self,
181        predicate: F,
182    ) -> Result {
183        self.tree.retain_in::<K::SelfType<'_>, F>(predicate, ..)
184    }
185
186    /// Applies `predicate` to all key-value pairs in the range `start..end`. All entries for which
187    /// `predicate` evaluates to `false` are removed.
188    ///
189    pub fn retain_in<'a, KR, F: for<'f> FnMut(K::SelfType<'f>, V::SelfType<'f>) -> bool>(
190        &mut self,
191        range: impl RangeBounds<KR> + 'a,
192        predicate: F,
193    ) -> Result
194    where
195        KR: Borrow<K::SelfType<'a>> + 'a,
196    {
197        self.tree.retain_in(predicate, range)
198    }
199
200    /// Insert mapping of the given key to the given value
201    ///
202    /// If key is already present it is replaced
203    ///
204    /// Returns the old value, if the key was present in the table, otherwise None is returned
205    pub fn insert<'k, 'v>(
206        &mut self,
207        key: impl Borrow<K::SelfType<'k>>,
208        value: impl Borrow<V::SelfType<'v>>,
209    ) -> Result<Option<AccessGuard<'_, V>>> {
210        let value_len = V::as_bytes(value.borrow()).as_ref().len();
211        if value_len > MAX_VALUE_LENGTH {
212            return Err(StorageError::ValueTooLarge(value_len));
213        }
214        let key_len = K::as_bytes(key.borrow()).as_ref().len();
215        if key_len > MAX_VALUE_LENGTH {
216            return Err(StorageError::ValueTooLarge(key_len));
217        }
218        if value_len + key_len > MAX_PAIR_LENGTH {
219            return Err(StorageError::ValueTooLarge(value_len + key_len));
220        }
221        self.tree.insert(key.borrow(), value.borrow())
222    }
223
224    /// Removes the given key
225    ///
226    /// Returns the old value, if the key was present in the table
227    pub fn remove<'a>(
228        &mut self,
229        key: impl Borrow<K::SelfType<'a>>,
230    ) -> Result<Option<AccessGuard<'_, V>>> {
231        self.tree.remove(key.borrow())
232    }
233
234    /// Bulk insert optimized for loading large datasets
235    ///
236    /// This method provides significant performance improvements over individual
237    /// `insert()` calls for bulk data loading scenarios.
238    ///
239    /// # Arguments
240    ///
241    /// * `items` - Iterator of (key, value) pairs to insert
242    /// * `sorted` - Hint indicating whether items are already sorted by key.
243    ///   Set to `true` if you know the data is sorted for optimal performance.
244    ///
245    /// # Performance
246    ///
247    /// - **Sorted data**: 2-3x faster than individual inserts (uses optimized bulk construction)
248    /// - **Unsorted data**: 1.5-2x faster than individual inserts (uses batched insertion with sorting)
249    ///
250    /// # Returns
251    ///
252    /// Returns the total number of items inserted
253    ///
254    /// # Errors
255    ///
256    /// Returns an error if:
257    /// - Any key or value exceeds maximum size limits
258    /// - Storage errors occur during insertion
259    ///
260    /// # Examples
261    ///
262    /// ```rust,no_run
263    /// # use manifold::{Database, TableDefinition, Error};
264    /// # const TABLE: TableDefinition<u64, &str> = TableDefinition::new("data");
265    /// # fn example() -> Result<(), Error> {
266    /// # let db = Database::create("example.db")?;
267    /// # let txn = db.begin_write()?;
268    /// # let mut table = txn.open_table(TABLE)?;
269    /// // Pre-sorted data (best performance)
270    /// let sorted_data = vec![(1u64, "one"), (2u64, "two"), (3u64, "three")];
271    /// let count = table.insert_bulk(sorted_data.into_iter(), true)?;
272    ///
273    /// // Unsorted data (still faster than individual inserts)
274    /// let unsorted_data = vec![(10u64, "ten"), (5u64, "five"), (7u64, "seven")];
275    /// let count = table.insert_bulk(unsorted_data.into_iter(), false)?;
276    /// # Ok(())
277    /// # }
278    /// ```
279    pub fn insert_bulk<'i, I>(&mut self, items: I, sorted: bool) -> Result<usize>
280    where
281        I: IntoIterator<Item = (K::SelfType<'i>, V::SelfType<'i>)>,
282    {
283        if sorted {
284            self.insert_bulk_sorted(items)
285        } else {
286            self.insert_bulk_unsorted(items)
287        }
288    }
289
290    /// Bulk insert for pre-sorted data (internal implementation)
291    ///
292    /// This uses an optimized insertion strategy that assumes data arrives
293    /// in sorted order, reducing tree rebalancing overhead.
294    fn insert_bulk_sorted<'i, I>(&mut self, items: I) -> Result<usize>
295    where
296        I: IntoIterator<Item = (K::SelfType<'i>, V::SelfType<'i>)>,
297    {
298        let mut count = 0usize;
299
300        // For sorted data, we can insert directly without buffering
301        // The B-tree will naturally build from left to right with minimal rebalancing
302        for (key, value) in items {
303            self.insert(&key, &value)?;
304            count += 1;
305        }
306
307        Ok(count)
308    }
309
310    /// Bulk insert for unsorted data (internal implementation)
311    ///
312    /// This collects items into chunks, sorts each chunk, then inserts
313    /// in order to improve cache locality and reduce random tree traversals.
314    fn insert_bulk_unsorted<'i, I>(&mut self, items: I) -> Result<usize>
315    where
316        I: IntoIterator<Item = (K::SelfType<'i>, V::SelfType<'i>)>,
317    {
318        let mut total_count = 0usize;
319        let mut chunk = Vec::with_capacity(BULK_INSERT_CHUNK_SIZE);
320
321        for (key, value) in items {
322            // Serialize key and value to owned bytes for sorting
323            let key_bytes = K::as_bytes(&key).as_ref().to_vec();
324            let value_bytes = V::as_bytes(&value).as_ref().to_vec();
325
326            chunk.push((key_bytes, value_bytes));
327
328            // When chunk is full, sort and insert
329            if chunk.len() >= BULK_INSERT_CHUNK_SIZE {
330                total_count += self.insert_sorted_chunk(&mut chunk)?;
331                chunk.clear();
332            }
333        }
334
335        // Insert remaining items
336        if !chunk.is_empty() {
337            total_count += self.insert_sorted_chunk(&mut chunk)?;
338        }
339
340        Ok(total_count)
341    }
342
343    /// Helper to sort and insert a chunk of items
344    fn insert_sorted_chunk(&mut self, chunk: &mut [(Vec<u8>, Vec<u8>)]) -> Result<usize> {
345        // Sort chunk by key bytes
346        chunk.sort_by(|a, b| K::compare(&a.0, &b.0));
347
348        // Insert sorted items
349        let count = chunk.len();
350        for (key_bytes, value_bytes) in chunk.iter() {
351            let key = K::from_bytes(key_bytes);
352            let value = V::from_bytes(value_bytes);
353            self.insert(&key, &value)?;
354        }
355
356        Ok(count)
357    }
358
359    /// Bulk remove optimized for deleting multiple keys
360    ///
361    /// This method provides performance improvements over individual
362    /// `remove()` calls when deleting many keys at once.
363    ///
364    /// # Arguments
365    ///
366    /// * `keys` - Iterator of keys to remove
367    ///
368    /// # Returns
369    ///
370    /// Returns the number of keys that were actually removed (keys that existed in the table)
371    ///
372    /// # Examples
373    ///
374    /// ```rust,no_run
375    /// # use manifold::{Database, TableDefinition, Error};
376    /// # const TABLE: TableDefinition<u64, &str> = TableDefinition::new("data");
377    /// # fn example() -> Result<(), Error> {
378    /// # let db = Database::create("example.db")?;
379    /// # let txn = db.begin_write()?;
380    /// # let mut table = txn.open_table(TABLE)?;
381    /// let keys_to_delete = vec![1u64, 2u64, 3u64, 4u64, 5u64];
382    /// let removed_count = table.remove_bulk(keys_to_delete.into_iter())?;
383    /// # Ok(())
384    /// # }
385    /// ```
386    pub fn remove_bulk<'i, I>(&mut self, keys: I) -> Result<usize>
387    where
388        I: IntoIterator<Item = K::SelfType<'i>>,
389    {
390        let mut total_removed = 0usize;
391        let mut chunk = Vec::with_capacity(BULK_INSERT_CHUNK_SIZE);
392
393        for key in keys {
394            // Serialize key to owned bytes for sorting
395            let key_bytes = K::as_bytes(&key).as_ref().to_vec();
396            chunk.push(key_bytes);
397
398            // When chunk is full, sort and remove
399            if chunk.len() >= BULK_INSERT_CHUNK_SIZE {
400                total_removed += self.remove_sorted_chunk(&mut chunk)?;
401                chunk.clear();
402            }
403        }
404
405        // Remove remaining items
406        if !chunk.is_empty() {
407            total_removed += self.remove_sorted_chunk(&mut chunk)?;
408        }
409
410        Ok(total_removed)
411    }
412
413    /// Helper to sort and remove a chunk of keys
414    fn remove_sorted_chunk(&mut self, chunk: &mut [Vec<u8>]) -> Result<usize> {
415        // Sort chunk by key bytes
416        chunk.sort_by(|a, b| K::compare(a, b));
417
418        // Remove sorted keys
419        let mut removed = 0usize;
420        for key_bytes in chunk.iter() {
421            let key = K::from_bytes(key_bytes);
422            if self.remove(&key)?.is_some() {
423                removed += 1;
424            }
425        }
426
427        Ok(removed)
428    }
429}
430
431impl<K: Key + 'static, V: MutInPlaceValue + 'static> Table<'_, K, V> {
432    /// Reserve space to insert a key-value pair
433    ///
434    /// If key is already present it is replaced
435    ///
436    /// The returned reference will have length equal to `value_length`
437    pub fn insert_reserve<'a>(
438        &mut self,
439        key: impl Borrow<K::SelfType<'a>>,
440        value_length: usize,
441    ) -> Result<AccessGuardMutInPlace<'_, V>> {
442        if value_length > MAX_VALUE_LENGTH {
443            return Err(StorageError::ValueTooLarge(value_length));
444        }
445        let key_len = K::as_bytes(key.borrow()).as_ref().len();
446        if key_len > MAX_VALUE_LENGTH {
447            return Err(StorageError::ValueTooLarge(key_len));
448        }
449        if value_length + key_len > MAX_PAIR_LENGTH {
450            return Err(StorageError::ValueTooLarge(value_length + key_len));
451        }
452        self.tree.insert_reserve(key.borrow(), value_length)
453    }
454}
455
456impl<K: Key + 'static, V: Value + 'static> ReadableTableMetadata for Table<'_, K, V> {
457    fn stats(&self) -> Result<TableStats> {
458        let tree_stats = self.tree.stats()?;
459
460        Ok(TableStats {
461            tree_height: tree_stats.tree_height,
462            leaf_pages: tree_stats.leaf_pages,
463            branch_pages: tree_stats.branch_pages,
464            stored_leaf_bytes: tree_stats.stored_leaf_bytes,
465            metadata_bytes: tree_stats.metadata_bytes,
466            fragmented_bytes: tree_stats.fragmented_bytes,
467        })
468    }
469
470    fn len(&self) -> Result<u64> {
471        self.tree.len()
472    }
473}
474
475impl<K: Key + 'static, V: Value + 'static> ReadableTable<K, V> for Table<'_, K, V> {
476    fn get<'a>(&self, key: impl Borrow<K::SelfType<'a>>) -> Result<Option<AccessGuard<'_, V>>> {
477        self.tree.get(key.borrow())
478    }
479
480    fn range<'a, KR>(&self, range: impl RangeBounds<KR> + 'a) -> Result<Range<'_, K, V>>
481    where
482        KR: Borrow<K::SelfType<'a>> + 'a,
483    {
484        self.tree
485            .range(&range)
486            .map(|x| Range::new(x, self.transaction.transaction_guard()))
487    }
488
489    fn first(&self) -> Result<Option<(AccessGuard<'_, K>, AccessGuard<'_, V>)>> {
490        self.tree.first()
491    }
492
493    fn last(&self) -> Result<Option<(AccessGuard<'_, K>, AccessGuard<'_, V>)>> {
494        self.tree.last()
495    }
496}
497
498impl<K: Key, V: Value> Sealed for Table<'_, K, V> {}
499
500impl<K: Key + 'static, V: Value + 'static> Drop for Table<'_, K, V> {
501    fn drop(&mut self) {
502        self.transaction.close_table(
503            &self.name,
504            &self.tree,
505            self.tree.get_root().map(|x| x.length).unwrap_or_default(),
506        );
507    }
508}
509
510fn debug_helper<K: Key + 'static, V: Value + 'static>(
511    f: &mut Formatter<'_>,
512    name: &str,
513    len: Result<u64>,
514    first: Result<Option<(AccessGuard<K>, AccessGuard<V>)>>,
515    last: Result<Option<(AccessGuard<K>, AccessGuard<V>)>>,
516) -> std::fmt::Result {
517    write!(f, "Table [ name: \"{name}\", ")?;
518    if let Ok(len) = len {
519        if len == 0 {
520            write!(f, "No entries")?;
521        } else if len == 1 {
522            if let Ok(first) = first {
523                let (key, value) = first.as_ref().unwrap();
524                write!(f, "One key-value: {:?} = {:?}", key.value(), value.value())?;
525            } else {
526                write!(f, "I/O Error accessing table!")?;
527            }
528        } else {
529            if let Ok(first) = first {
530                let (key, value) = first.as_ref().unwrap();
531                write!(f, "first: {:?} = {:?}, ", key.value(), value.value())?;
532            } else {
533                write!(f, "I/O Error accessing table!")?;
534            }
535            if len > 2 {
536                write!(f, "...{} more entries..., ", len - 2)?;
537            }
538            if let Ok(last) = last {
539                let (key, value) = last.as_ref().unwrap();
540                write!(f, "last: {:?} = {:?}", key.value(), value.value())?;
541            } else {
542                write!(f, "I/O Error accessing table!")?;
543            }
544        }
545    } else {
546        write!(f, "I/O Error accessing table!")?;
547    }
548    write!(f, " ]")?;
549
550    Ok(())
551}
552
553impl<K: Key + 'static, V: Value + 'static> Debug for Table<'_, K, V> {
554    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
555        debug_helper(f, &self.name, self.len(), self.first(), self.last())
556    }
557}
558
559pub trait ReadableTableMetadata {
560    /// Retrieves information about storage usage for the table
561    fn stats(&self) -> Result<TableStats>;
562
563    /// Returns the number of entries in the table
564    fn len(&self) -> Result<u64>;
565
566    /// Returns `true` if the table is empty
567    fn is_empty(&self) -> Result<bool> {
568        Ok(self.len()? == 0)
569    }
570}
571
572pub trait ReadableTable<K: Key + 'static, V: Value + 'static>: ReadableTableMetadata {
573    /// Returns the value corresponding to the given key
574    fn get<'a>(&self, key: impl Borrow<K::SelfType<'a>>) -> Result<Option<AccessGuard<'_, V>>>;
575
576    /// Retrieves multiple values in a single batch operation.
577    ///
578    /// This method provides better performance than calling `get()` multiple times by:
579    /// - Optimizing B-tree traversal order (sorts keys internally)
580    /// - Reducing repeated lookups for nearby keys
581    /// - Better cache locality
582    ///
583    /// # Arguments
584    ///
585    /// * `keys` - Iterator of keys to retrieve
586    ///
587    /// # Returns
588    ///
589    /// A vector of Option<AccessGuard> in the same order as the input keys.
590    /// Missing keys will be None.
591    ///
592    /// # Examples
593    ///
594    /// ```rust,no_run
595    /// # use manifold::{Database, TableDefinition, Error};
596    /// # const TABLE: TableDefinition<u64, &str> = TableDefinition::new("data");
597    /// # fn example() -> Result<(), Error> {
598    /// # let db = Database::create("example.db")?;
599    /// # let txn = db.begin_read()?;
600    /// # let table = txn.open_table(TABLE)?;
601    /// let keys = vec![1u64, 2u64, 3u64];
602    /// let values = table.get_bulk(keys.iter().copied())?;
603    /// for (i, value) in values.iter().enumerate() {
604    ///     if let Some(guard) = value {
605    ///         println!("Key {}: {:?}", keys[i], guard.value());
606    ///     }
607    /// }
608    /// # Ok(())
609    /// # }
610    /// ```
611    fn get_bulk<'i, I>(&self, keys: I) -> Result<Vec<Option<AccessGuard<'_, V>>>>
612    where
613        I: IntoIterator<Item = K::SelfType<'i>>,
614    {
615        // Default implementation: collect keys with indices, sort, retrieve, restore order
616        let mut indexed_keys: Vec<(usize, Vec<u8>)> = keys
617            .into_iter()
618            .enumerate()
619            .map(|(idx, key)| (idx, K::as_bytes(&key).as_ref().to_vec()))
620            .collect();
621
622        // Sort by key bytes for sequential B-tree access
623        indexed_keys.sort_by(|a, b| a.1.cmp(&b.1));
624
625        // Retrieve values in sorted order
626        let mut sorted_results: Vec<(usize, Option<AccessGuard<'_, V>>)> =
627            Vec::with_capacity(indexed_keys.len());
628
629        for (original_idx, key_bytes) in indexed_keys {
630            let key = K::from_bytes(&key_bytes);
631            let value = self.get(&key)?;
632            sorted_results.push((original_idx, value));
633        }
634
635        // Restore original order
636        sorted_results.sort_by_key(|(idx, _)| *idx);
637
638        // Extract just the values
639        Ok(sorted_results.into_iter().map(|(_, v)| v).collect())
640    }
641
642    /// Returns a double-ended iterator over a range of elements in the table
643    ///
644    /// # Examples
645    ///
646    /// Usage:
647    /// ```rust
648    /// use manifold::*;
649    /// # use tempfile::NamedTempFile;
650    /// const TABLE: TableDefinition<&str, u64> = TableDefinition::new("my_data");
651    ///
652    /// # fn main() -> Result<(), Error> {
653    /// # #[cfg(not(target_os = "wasi"))]
654    /// # let tmpfile = NamedTempFile::new().unwrap();
655    /// # #[cfg(target_os = "wasi")]
656    /// # let tmpfile = NamedTempFile::new_in("/tmp").unwrap();
657    /// # let filename = tmpfile.path();
658    /// let db = Database::create(filename)?;
659    /// let write_txn = db.begin_write()?;
660    /// {
661    ///     let mut table = write_txn.open_table(TABLE)?;
662    ///     table.insert("a", &0)?;
663    ///     table.insert("b", &1)?;
664    ///     table.insert("c", &2)?;
665    /// }
666    /// write_txn.commit()?;
667    ///
668    /// let read_txn = db.begin_read()?;
669    /// let table = read_txn.open_table(TABLE)?;
670    /// let mut iter = table.range("a".."c")?;
671    /// let (key, value) = iter.next().unwrap()?;
672    /// assert_eq!("a", key.value());
673    /// assert_eq!(0, value.value());
674    /// # Ok(())
675    /// # }
676    /// ```
677    fn range<'a, KR>(&self, range: impl RangeBounds<KR> + 'a) -> Result<Range<'_, K, V>>
678    where
679        KR: Borrow<K::SelfType<'a>> + 'a;
680
681    /// Returns the first key-value pair in the table, if it exists
682    fn first(&self) -> Result<Option<(AccessGuard<'_, K>, AccessGuard<'_, V>)>>;
683
684    /// Returns the last key-value pair in the table, if it exists
685    fn last(&self) -> Result<Option<(AccessGuard<'_, K>, AccessGuard<'_, V>)>>;
686
687    /// Returns a double-ended iterator over all elements in the table
688    fn iter(&self) -> Result<Range<'_, K, V>> {
689        self.range::<K::SelfType<'_>>(..)
690    }
691}
692
693/// A read-only untyped table
694pub struct ReadOnlyUntypedTable {
695    tree: RawBtree,
696}
697
698impl Sealed for ReadOnlyUntypedTable {}
699
700impl ReadableTableMetadata for ReadOnlyUntypedTable {
701    /// Retrieves information about storage usage for the table
702    fn stats(&self) -> Result<TableStats> {
703        let tree_stats = self.tree.stats()?;
704
705        Ok(TableStats {
706            tree_height: tree_stats.tree_height,
707            leaf_pages: tree_stats.leaf_pages,
708            branch_pages: tree_stats.branch_pages,
709            stored_leaf_bytes: tree_stats.stored_leaf_bytes,
710            metadata_bytes: tree_stats.metadata_bytes,
711            fragmented_bytes: tree_stats.fragmented_bytes,
712        })
713    }
714
715    fn len(&self) -> Result<u64> {
716        self.tree.len()
717    }
718}
719
720impl ReadOnlyUntypedTable {
721    pub(crate) fn new(
722        root_page: Option<BtreeHeader>,
723        fixed_key_size: Option<usize>,
724        fixed_value_size: Option<usize>,
725        mem: Arc<TransactionalMemory>,
726    ) -> Self {
727        Self {
728            tree: RawBtree::new(root_page, fixed_key_size, fixed_value_size, mem),
729        }
730    }
731}
732
733/// A read-only table
734pub struct ReadOnlyTable<K: Key + 'static, V: Value + 'static> {
735    name: String,
736    tree: Btree<K, V>,
737    transaction_guard: Arc<TransactionGuard>,
738}
739
740impl<K: Key + 'static, V: Value + 'static> TableHandle for ReadOnlyTable<K, V> {
741    fn name(&self) -> &str {
742        &self.name
743    }
744}
745
746impl<K: Key + 'static, V: Value + 'static> ReadOnlyTable<K, V> {
747    pub(crate) fn new(
748        name: String,
749        root_page: Option<BtreeHeader>,
750        hint: PageHint,
751        guard: Arc<TransactionGuard>,
752        mem: Arc<TransactionalMemory>,
753    ) -> Result<ReadOnlyTable<K, V>> {
754        Ok(ReadOnlyTable {
755            name,
756            tree: Btree::new(root_page, hint, guard.clone(), mem)?,
757            transaction_guard: guard,
758        })
759    }
760
761    /// This method is like [`ReadableTable::get()`], but the [`AccessGuard`] is reference counted
762    /// and keeps the transaction alive until it is dropped.
763    pub fn get<'a>(
764        &self,
765        key: impl Borrow<K::SelfType<'a>>,
766    ) -> Result<Option<AccessGuard<'static, V>>> {
767        self.tree.get(key.borrow())
768    }
769
770    /// This method is like [`ReadableTable::range()`], but the iterator is reference counted and keeps the transaction
771    /// alive until it is dropped.
772    pub fn range<'a, KR>(&self, range: impl RangeBounds<KR>) -> Result<Range<'static, K, V>>
773    where
774        KR: Borrow<K::SelfType<'a>>,
775    {
776        self.tree
777            .range(&range)
778            .map(|x| Range::new(x, self.transaction_guard.clone()))
779    }
780}
781
782impl<K: Key + 'static, V: Value + 'static> ReadableTableMetadata for ReadOnlyTable<K, V> {
783    fn stats(&self) -> Result<TableStats> {
784        let tree_stats = self.tree.stats()?;
785
786        Ok(TableStats {
787            tree_height: tree_stats.tree_height,
788            leaf_pages: tree_stats.leaf_pages,
789            branch_pages: tree_stats.branch_pages,
790            stored_leaf_bytes: tree_stats.stored_leaf_bytes,
791            metadata_bytes: tree_stats.metadata_bytes,
792            fragmented_bytes: tree_stats.fragmented_bytes,
793        })
794    }
795
796    fn len(&self) -> Result<u64> {
797        self.tree.len()
798    }
799}
800
801impl<K: Key + 'static, V: Value + 'static> ReadableTable<K, V> for ReadOnlyTable<K, V> {
802    fn get<'a>(&self, key: impl Borrow<K::SelfType<'a>>) -> Result<Option<AccessGuard<'_, V>>> {
803        self.tree.get(key.borrow())
804    }
805
806    fn range<'a, KR>(&self, range: impl RangeBounds<KR> + 'a) -> Result<Range<'_, K, V>>
807    where
808        KR: Borrow<K::SelfType<'a>> + 'a,
809    {
810        self.tree
811            .range(&range)
812            .map(|x| Range::new(x, self.transaction_guard.clone()))
813    }
814
815    fn first(&self) -> Result<Option<(AccessGuard<'_, K>, AccessGuard<'_, V>)>> {
816        self.tree.first()
817    }
818
819    fn last(&self) -> Result<Option<(AccessGuard<'_, K>, AccessGuard<'_, V>)>> {
820        self.tree.last()
821    }
822}
823
824impl<K: Key, V: Value> Sealed for ReadOnlyTable<K, V> {}
825
826impl<K: Key + 'static, V: Value + 'static> Debug for ReadOnlyTable<K, V> {
827    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
828        debug_helper(f, &self.name, self.len(), self.first(), self.last())
829    }
830}
831
832pub struct ExtractIf<
833    'a,
834    K: Key + 'static,
835    V: Value + 'static,
836    F: for<'f> FnMut(K::SelfType<'f>, V::SelfType<'f>) -> bool,
837> {
838    inner: BtreeExtractIf<'a, K, V, F>,
839}
840
841impl<
842    'a,
843    K: Key + 'static,
844    V: Value + 'static,
845    F: for<'f> FnMut(K::SelfType<'f>, V::SelfType<'f>) -> bool,
846> ExtractIf<'a, K, V, F>
847{
848    pub(crate) fn new(inner: BtreeExtractIf<'a, K, V, F>) -> Self {
849        Self { inner }
850    }
851}
852
853impl<
854    'a,
855    K: Key + 'static,
856    V: Value + 'static,
857    F: for<'f> FnMut(K::SelfType<'f>, V::SelfType<'f>) -> bool,
858> Iterator for ExtractIf<'a, K, V, F>
859{
860    type Item = Result<(AccessGuard<'a, K>, AccessGuard<'a, V>)>;
861
862    fn next(&mut self) -> Option<Self::Item> {
863        let entry = self.inner.next()?;
864        Some(entry.map(|entry| {
865            let (page, key_range, value_range) = entry.into_raw();
866            let key = AccessGuard::with_page(page.clone(), key_range);
867            let value = AccessGuard::with_page(page, value_range);
868            (key, value)
869        }))
870    }
871}
872
873impl<
874    K: Key + 'static,
875    V: Value + 'static,
876    F: for<'f> FnMut(K::SelfType<'f>, V::SelfType<'f>) -> bool,
877> DoubleEndedIterator for ExtractIf<'_, K, V, F>
878{
879    fn next_back(&mut self) -> Option<Self::Item> {
880        let entry = self.inner.next_back()?;
881        Some(entry.map(|entry| {
882            let (page, key_range, value_range) = entry.into_raw();
883            let key = AccessGuard::with_page(page.clone(), key_range);
884            let value = AccessGuard::with_page(page, value_range);
885            (key, value)
886        }))
887    }
888}
889
890#[derive(Clone)]
891pub struct Range<'a, K: Key + 'static, V: Value + 'static> {
892    inner: BtreeRangeIter<K, V>,
893    _transaction_guard: Arc<TransactionGuard>,
894    // This lifetime is here so that `&` can be held on `Table` preventing concurrent mutation
895    _lifetime: PhantomData<&'a ()>,
896}
897
898impl<K: Key + 'static, V: Value + 'static> Range<'_, K, V> {
899    pub(super) fn new(inner: BtreeRangeIter<K, V>, guard: Arc<TransactionGuard>) -> Self {
900        Self {
901            inner,
902            _transaction_guard: guard,
903            _lifetime: Default::default(),
904        }
905    }
906}
907
908impl<'a, K: Key + 'static, V: Value + 'static> Iterator for Range<'a, K, V> {
909    type Item = Result<(AccessGuard<'a, K>, AccessGuard<'a, V>)>;
910
911    fn next(&mut self) -> Option<Self::Item> {
912        self.inner.next().map(|x| {
913            x.map(|entry| {
914                let (page, key_range, value_range) = entry.into_raw();
915                let key = AccessGuard::with_page(page.clone(), key_range);
916                let value = AccessGuard::with_page(page, value_range);
917                (key, value)
918            })
919        })
920    }
921}
922
923impl<K: Key + 'static, V: Value + 'static> DoubleEndedIterator for Range<'_, K, V> {
924    fn next_back(&mut self) -> Option<Self::Item> {
925        self.inner.next_back().map(|x| {
926            x.map(|entry| {
927                let (page, key_range, value_range) = entry.into_raw();
928                let key = AccessGuard::with_page(page.clone(), key_range);
929                let value = AccessGuard::with_page(page, value_range);
930                (key, value)
931            })
932        })
933    }
934}