btree_ondisk/
bmap.rs

1use std::fmt;
2#[cfg(feature = "rc")]
3use std::rc::Rc;
4#[cfg(feature = "rc")]
5use std::cell::RefCell;
6#[cfg(feature = "arc")]
7use std::sync::Arc;
8#[cfg(feature = "arc")]
9use std::sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering};
10#[cfg(feature = "arc")]
11use atomic_refcell::AtomicRefCell;
12use std::collections::{HashMap, VecDeque};
13use std::marker::PhantomData;
14use std::io::{Result, ErrorKind};
15use std::any::Any;
16use crate::VMap;
17use crate::{NodeValue, BlockLoader, NodeCache};
18use crate::direct::DirectMap;
19use crate::btree::BtreeMap;
20use crate::node::{
21    BtreeNode, DirectNode,
22    BTREE_NODE_FLAG_LEAF, BTREE_NODE_FLAG_LARGE, BTREE_NODE_LEVEL_MIN
23};
24use crate::btree::{BtreeNodeRef, BtreeNodeDirty};
25use crate::cache::NodeTieredCacheStats;
26
27#[derive(Default, Debug)]
28pub struct BMapStat {
29    pub btree: bool,
30    pub level: usize,
31    pub dirty: bool,
32    pub meta_block_size: usize,
33    pub nodes_total: usize,
34    pub nodes_l1: usize,
35    pub cache_stats: NodeTieredCacheStats,
36}
37
38pub enum NodeType<'a, K, V, P, L: BlockLoader<P>, C: NodeCache<P>> {
39    Direct(DirectMap<'a, K, V, P>),
40    Btree(BtreeMap<'a, K, V, P, L, C>),
41}
42
43impl<'a, K, V, P, L, C> fmt::Display for NodeType<'a, K, V, P, L, C>
44    where
45        K: Copy + fmt::Display + std::cmp::PartialOrd,
46        V: Copy + fmt::Display + NodeValue,
47        P: Copy + fmt::Display + NodeValue,
48        L: BlockLoader<P>,
49        C: NodeCache<P>,
50{
51    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
52        match self {
53            NodeType::Direct(direct) => {
54                write!(f, "{}", direct)
55            },
56            NodeType::Btree(btree) => {
57                write!(f, "{}", btree)
58            }
59        }
60    }
61}
62
63pub struct BMap<'a, K, V, P, L: BlockLoader<P>, C: NodeCache<P>> {
64    inner: NodeType<'a, K, V, P, L, C>,
65    meta_block_size: usize,
66    block_loader: Option<L>,
67    node_tiered_cache: Option<C>,
68}
69
70impl<'a, K, V, P, L, C> fmt::Display for BMap<'a, K, V, P, L, C>
71    where
72        K: Copy + fmt::Display + std::cmp::PartialOrd,
73        V: Copy + fmt::Display + NodeValue,
74        P: Copy + fmt::Display + NodeValue,
75        L: BlockLoader<P>,
76        C: NodeCache<P>,
77{
78    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
79        write!(f, "{}", self.inner)
80    }
81}
82
83impl<'a, K, V, P, L, C> BMap<'a, K, V, P, L, C>
84    where
85        K: Copy + Default + std::fmt::Display + PartialOrd + Eq + std::hash::Hash + std::ops::AddAssign<u64>,
86        V: Copy + Default + std::fmt::Display + NodeValue,
87        K: Into<u64> + From<u64>,
88        P: Copy + Default + std::fmt::Display + PartialOrd + Eq + std::hash::Hash + std::ops::AddAssign<u64>,
89        P: NodeValue + Into<u64> + From<u64>,
90        L: BlockLoader<P>,
91        C: NodeCache<P>,
92{
93    #[maybe_async::maybe_async]
94    async fn convert_and_insert(&mut self, data: Vec<u8>, meta_block_size: usize, last_seq: P, limit: usize, key: K, val: V) -> Result<()> {
95        // collect all valid value from old direct root
96        let mut old_kv = Vec::new();
97        let direct = DirectNode::<V>::from_slice(&data);
98        for i in 0..direct.get_capacity() {
99            let val = direct.get_val(i);
100            if !val.is_invalid() {
101                old_kv.push((From::<u64>::from(i as u64), val));
102            }
103        }
104        let old_ud = direct.get_userdata();
105
106        // create new btree map
107        let mut v = Vec::with_capacity(data.len());
108        // root node to all zero
109        v.resize(data.len(), 0);
110        // init flags with leaf and large
111        v[0] = BTREE_NODE_FLAG_LEAF | BTREE_NODE_FLAG_LARGE;
112        let mut btree = BtreeMap {
113            #[cfg(feature = "rc")]
114            root: Rc::new(Box::new(BtreeNode::<K, V, P>::from_slice(&v))),
115            #[cfg(feature = "arc")]
116            root: Arc::new(Box::new(BtreeNode::<K, V, P>::from_slice(&v))),
117            data: v,
118            #[cfg(feature = "rc")]
119            nodes: RefCell::new(HashMap::new()),
120            #[cfg(feature = "rc")]
121            last_seq: RefCell::new(last_seq),
122            #[cfg(feature = "rc")]
123            dirty: RefCell::new(true),
124            #[cfg(feature = "arc")]
125            nodes: AtomicRefCell::new(HashMap::new()),
126            node_tiered_cache: self.node_tiered_cache.take().unwrap(),
127            #[cfg(feature = "arc")]
128            last_seq: Arc::new(AtomicU64::new(Into::<u64>::into(last_seq))),
129            #[cfg(feature = "arc")]
130            dirty: Arc::new(AtomicBool::new(true)),
131            meta_block_size: meta_block_size,
132            #[cfg(feature = "rc")]
133            cache_limit: RefCell::new(limit),
134            #[cfg(feature = "arc")]
135            cache_limit: Arc::new(AtomicUsize::new(limit)),
136            block_loader: self.block_loader.take().unwrap(),
137        };
138
139        // all values in old root node plus one for new k,v to be insert
140        if old_kv.len() + 1 <= btree.root.get_capacity() {
141            // create root node @level 1
142            btree.root.set_nchild(0);
143            btree.root.init_root(1, true);
144            btree.root.set_userdata(old_ud);
145            let mut index = 0;
146            for (k, v) in old_kv.into_iter() {
147                btree.root.insert::<V>(index, &k, &v);
148                index += 1;
149            }
150            btree.root.insert::<V>(index, &key, &val);
151
152            // modify inner
153            self.inner = NodeType::Btree(btree);
154            return Ok(());
155        }
156
157        let first_root_key;
158        // create child node @level 1
159        {
160
161        let node = btree.get_new_node(&last_seq, 1)?;
162        node.init(1, 0);
163
164        // if value is too large for root node
165        if old_kv.len() == 0 && btree.root.get_capacity() == 0 {
166            first_root_key = key;
167        } else {
168            // save first key
169            first_root_key = old_kv[0].0;
170        }
171        let mut index = 0;
172        for (k, v) in old_kv.into_iter() {
173            node.insert::<V>(index, &k, &v);
174            index += 1;
175        }
176        node.insert::<V>(index, &key, &val);
177        node.mark_dirty();
178        btree.nodes.borrow_mut().insert(last_seq, node);
179
180        }
181
182        // create root node @level 2
183        {
184
185        // btree root node was V, now init it to P
186        #[cfg(feature = "rc")]
187        let Some(root) = std::rc::Rc::get_mut(&mut btree.root) else {
188            panic!("failed to get rc of btree root");
189        };
190        #[cfg(feature = "arc")]
191        let Some(root) = std::sync::Arc::get_mut(&mut btree.root) else {
192            panic!("failed to get arc of btree root");
193        };
194        root.do_reinit::<P>();
195        btree.root.set_nchild(0);
196        btree.root.init_root(2, true);
197        btree.root.set_userdata(old_ud);
198        // root no more leaf
199        btree.root.clear_leaf();
200        btree.root.insert::<P>(0, &first_root_key, &last_seq);
201
202        }
203
204        // modify inner
205        self.inner = NodeType::Btree(btree);
206        Ok(())
207    }
208
209    #[maybe_async::maybe_async]
210    async fn convert_to_direct(&mut self, _key: &K, input: &Vec<(K, V)>,
211            root_node_size: usize, user_data: u32, last_seq: P, limit: usize, block_loader: L, node_tiered_cache: C) -> Result<()> {
212        let mut v = Vec::with_capacity(root_node_size);
213        v.resize(root_node_size, 0);
214        let direct = DirectMap {
215            #[cfg(feature = "rc")]
216            root: Rc::new(Box::new(DirectNode::<V>::from_slice(&v))),
217            #[cfg(feature = "arc")]
218            root: Arc::new(Box::new(DirectNode::<V>::from_slice(&v))),
219            data: v,
220            #[cfg(feature = "rc")]
221            last_seq: RefCell::new(last_seq),
222            #[cfg(feature = "rc")]
223            dirty: RefCell::new(true),
224            #[cfg(feature = "arc")]
225            last_seq: Arc::new(AtomicU64::new(Into::<u64>::into(last_seq))),
226            #[cfg(feature = "arc")]
227            dirty: Arc::new(AtomicBool::new(true)),
228            #[cfg(feature = "rc")]
229            cache_limit: RefCell::new(limit),
230            #[cfg(feature = "arc")]
231            cache_limit: Arc::new(AtomicUsize::new(limit)),
232            marker: PhantomData,
233            #[cfg(feature = "arc")]
234            marker_p: PhantomData,
235        };
236
237        direct.root.set_userdata(user_data);
238        for (k, v) in input {
239            // convert key to u64 then to usize,
240            // use key as index of direct node
241            let i = Into::<u64>::into(*k) as usize;
242            direct.root.set_val(i, v);
243        }
244
245        self.block_loader = Some(block_loader);
246        self.node_tiered_cache = Some(node_tiered_cache);
247        self.inner = NodeType::Direct(direct);
248        Ok(())
249    }
250}
251
252impl<'a, K, V, P, L, C> BMap<'a, K, V, P, L, C>
253    where
254        K: Copy + Default + std::fmt::Display + PartialOrd + Eq + std::hash::Hash + std::ops::AddAssign<u64>,
255        V: Copy + Default + std::fmt::Display + NodeValue + Any,
256        K: Into<u64> + From<u64>,
257        P: Copy + Default + std::fmt::Display + PartialOrd + Eq + std::hash::Hash + std::ops::AddAssign<u64>,
258        P: NodeValue + From<u64> + Into<u64> + Any,
259        L: BlockLoader<P> + Clone,
260        C: NodeCache<P> + Clone,
261{
262    /// Constructs a map start from empty direct node.
263    // start from a direct node at level 1 with no entries
264    pub fn new(root_node_size: usize, meta_block_size: usize, block_loader: L, node_tiered_cache: C) -> Self {
265        if root_node_size > (meta_block_size / 2) {
266            panic!("root node size {} is too large, reduce to {} at least, which is half of meta block size",
267                root_node_size, meta_block_size / 2);
268        }
269        // allocate temp space to init a root node as direct
270        let mut data = Vec::with_capacity(root_node_size);
271        data.resize(root_node_size, 0);
272        // init direct root node at level 1
273        let root = DirectNode::<V>::from_slice(&data);
274        // flags = 0, level = 1, nchild = 0;
275        root.init(0, 1, 0);
276
277        Self {
278            inner: NodeType::Direct(DirectMap::<K, V, P>::new(&data)),
279            meta_block_size: meta_block_size,
280            block_loader: Some(block_loader),
281            node_tiered_cache: Some(node_tiered_cache),
282        }
283    }
284
285    /// Constructs a new direct map from data slice.
286    ///
287    /// Data will be copied into internal buffer.
288    pub fn new_direct(data: &[u8], meta_block_size: usize, block_loader: L, node_tiered_cache: C) -> Self {
289        Self {
290            inner: NodeType::Direct(DirectMap::<K, V, P>::new(data)),
291            meta_block_size: meta_block_size,
292            block_loader: Some(block_loader),
293            node_tiered_cache: Some(node_tiered_cache),
294        }
295    }
296
297    /// Constructs a new btree map from data slice.
298    ///
299    /// Data will be copied into internal buffer.
300    pub fn new_btree(data: &[u8], meta_block_size: usize, block_loader: L, node_tiered_cache: C) -> Self {
301        Self {
302            inner: NodeType::Btree(BtreeMap::<K, V, P, L, C>::new(data, meta_block_size, block_loader, node_tiered_cache)),
303            meta_block_size: meta_block_size,
304            block_loader: None,
305            node_tiered_cache: None,
306        }
307    }
308
309    /// Expose root node data buffer as a slice.
310    pub fn as_slice(&self) -> &[u8] {
311        match &self.inner {
312            NodeType::Direct(direct) => {
313                return direct.as_slice();
314            },
315            NodeType::Btree(btree) => {
316                return btree.as_slice();
317            },
318        }
319    }
320
321    /// Test map dirty flag.
322    pub fn dirty(&self) -> bool {
323        match &self.inner {
324            NodeType::Direct(direct) => {
325                return direct.dirty();
326            },
327            NodeType::Btree(btree) => {
328                return btree.dirty();
329            }
330        }
331    }
332
333    /// Clear map dirty flag. (don't affect nodes dirty state)
334    pub fn clear_dirty(&mut self) {
335        match &self.inner {
336            NodeType::Direct(direct) => {
337                return direct.clear_dirty();
338            },
339            NodeType::Btree(btree) => {
340                return btree.clear_dirty();
341            }
342        }
343    }
344
345    #[maybe_async::maybe_async]
346    async fn do_try_insert(&mut self, key: K, val: V) -> Result<()> {
347        match &self.inner {
348            NodeType::Direct(direct) => {
349                if direct.is_key_exceed(&key) {
350                    // convert and insert
351                    let data = direct.data.clone();
352                    #[cfg(feature = "rc")]
353                    let last_seq = direct.last_seq.take();
354                    #[cfg(feature = "arc")]
355                    let last_seq = direct.last_seq.load(Ordering::SeqCst).into();
356                    let limit = direct.get_cache_limit();
357                    return self.convert_and_insert(data, self.meta_block_size, last_seq, limit, key, val).await;
358                }
359                return direct.insert(key, val).await;
360            },
361            NodeType::Btree(btree) => {
362                return btree.insert(key, val).await;
363            },
364        }
365    }
366
367    /// Insert a key/value pair.
368    ///
369    /// # Errors
370    ///
371    /// * NotFound - key not found, if key inserted out of node's capacity. **direct node ONLY**
372    /// * AlreadyExists - key already exists, new value will not be updated into map.
373    /// * OutOfMemory - insufficient memory.
374    #[maybe_async::maybe_async]
375    pub async fn try_insert(&mut self, key: K, val: V) -> Result<()> {
376        self.do_try_insert(key, val).await
377    }
378
379    #[maybe_async::maybe_async]
380    async fn do_insert_or_update(&mut self, key: K, val: V) -> Result<Option<V>> {
381        match &self.inner {
382            NodeType::Direct(direct) => {
383                if direct.is_key_exceed(&key) {
384                    // convert and insert
385                    let data = direct.data.clone();
386                    #[cfg(feature = "rc")]
387                    let last_seq = direct.last_seq.take();
388                    #[cfg(feature = "arc")]
389                    let last_seq = direct.last_seq.load(Ordering::SeqCst).into();
390                    let limit = direct.get_cache_limit();
391                    let _ = self.convert_and_insert(data, self.meta_block_size, last_seq, limit, key, val).await?;
392                    return Ok(None);
393                }
394                return direct.insert_or_update(key, val).await;
395            },
396            NodeType::Btree(btree) => {
397                return btree.insert_or_update(key, val).await;
398            },
399        }
400    }
401
402    /// Insert or Update a key/value pair,
403    /// Return old value in Option if key is already exists.
404    ///
405    /// # Errors
406    ///
407    /// * NotFound - key not found, if key inserted out of node's capacity. **direct node ONLY**
408    /// * OutOfMemory - insufficient memory.
409    #[maybe_async::maybe_async]
410    pub async fn insert(&mut self, key: K, val: V) -> Result<Option<V>> {
411        self.do_insert_or_update(key, val).await
412    }
413
414    #[maybe_async::maybe_async]
415    async fn do_delete(&mut self, key: &K) -> Result<()> {
416        match &self.inner {
417            NodeType::Direct(direct) => {
418                return direct.delete(key).await;
419            },
420            NodeType::Btree(btree) => {
421                let mut v = Vec::<(K, V)>::new();
422                if btree.delete_check_and_gather(key, &mut v).await? {
423                    let _ = btree.delete(key).await?;
424                    // re-visit vec we got, remove above last key we need to delete
425                    v.retain(|(_k, _)| _k != key);
426                    #[cfg(feature = "rc")]
427                    let _ = self.convert_to_direct(key, &v,
428                        btree.data.len(), btree.root.get_userdata(), btree.last_seq.take(), btree.get_cache_limit(),
429                            btree.block_loader.clone(), btree.node_tiered_cache.clone()).await?;
430                    #[cfg(feature = "arc")]
431                    let _ = self.convert_to_direct(key, &v,
432                        btree.data.len(), btree.root.get_userdata(), btree.last_seq.load(Ordering::SeqCst).into(), btree.get_cache_limit(),
433                            btree.block_loader.clone(), btree.node_tiered_cache.clone()).await?;
434                    return Ok(());
435                }
436                return btree.delete(key).await;
437            },
438        }
439    }
440
441    /// Delete a key/value pair from map.
442    ///
443    /// When btree shrink to height 2 or 3, start to check if btree can be converted to direct.
444    ///
445    /// # Errors
446    ///
447    /// * NotFound - key not found.
448    /// * OutOfMemory - insufficient memory.
449    #[maybe_async::maybe_async]
450    pub async fn delete(&mut self, key: &K) -> Result<()> {
451        self.do_delete(key).await
452    }
453
454    /// Lookup key at specific level, return it's value.
455    ///
456    /// # Errors
457    ///
458    /// * NotFound - key not found.
459    /// * OutOfMemory - insufficient memory.
460    #[maybe_async::maybe_async]
461    pub async fn lookup_at_level(&self, key: &K, level: usize) -> Result<V> {
462        match &self.inner {
463            NodeType::Direct(direct) => {
464                return direct.lookup(key, level).await;
465            },
466            NodeType::Btree(btree) => {
467                return btree.lookup(key, level).await;
468            },
469        }
470    }
471
472    /// Lookup key at level 1, return it's value.
473    ///
474    /// # Errors
475    ///
476    /// * NotFound - key not found.
477    /// * OutOfMemory - insufficient memory.
478    #[maybe_async::maybe_async]
479    pub async fn lookup(&self, key: &K) -> Result<V> {
480        self.lookup_at_level(key, 1).await
481    }
482
483    /// Lookup max continues key space.
484    ///   - key: start key
485    ///   - maxblocks: max expected continues
486    ///
487    /// # Return
488    ///
489    /// value, max continue count we acctually found
490    ///
491    /// # Errors
492    ///
493    /// * NotFound - key not found.
494    /// * OutOfMemory - insufficient memory.
495    #[maybe_async::maybe_async]
496    pub async fn lookup_contig(&self, key: &K, maxblocks: usize) -> Result<(V, usize)> {
497        match &self.inner {
498            NodeType::Direct(direct) => {
499                return direct.lookup_contig(key, maxblocks).await;
500            },
501            NodeType::Btree(btree) => {
502                return btree.lookup_contig(key, maxblocks).await;
503            },
504        }
505    }
506
507    /// Collect all dirty nodes into a Vec.
508    pub fn lookup_dirty(&self) -> Vec<BtreeNodeDirty<'a, K, V, P>> {
509        match &self.inner {
510            NodeType::Direct(_) => {
511                return Vec::new();
512            },
513            NodeType::Btree(btree) => {
514                return btree.lookup_dirty().into_iter().map(|n| BtreeNodeDirty(n)).collect();
515            },
516        }
517    }
518
519    /// Seek a valid entry and return it's key starting from start key.
520    ///
521    /// # Errors
522    ///
523    /// * NotFound - no valid entry found.
524    /// * OutOfMemory - insufficient memory.
525    #[maybe_async::maybe_async]
526    pub async fn seek_key(&self, start: &K) -> Result<K> {
527        match &self.inner {
528            NodeType::Direct(direct) => {
529                return direct.seek_key(start).await;
530            },
531            NodeType::Btree(btree) => {
532                return btree.seek_key(start).await;
533            },
534        }
535    }
536
537    /// Get last key.
538    ///
539    /// # Errors
540    ///
541    /// * NotFound - key not found.
542    /// * OutOfMemory - insufficient memory.
543    #[maybe_async::maybe_async]
544    pub async fn last_key(&self) -> Result<K> {
545        match &self.inner {
546            NodeType::Direct(direct) => {
547                return direct.last_key().await;
548            },
549            NodeType::Btree(btree) => {
550                return btree.last_key().await;
551            },
552        }
553    }
554
555    /// Assign a new value by key.
556    ///
557    /// **data node:** use key to find value entry.
558    ///
559    /// **meta node:** use node to find value entry, key is unused.
560    ///
561    /// use [`BMap::assign_meta_node`] and [`BMap::assign_data_node`] for more explict semantics
562    ///
563    /// # Errors
564    ///
565    /// * NotFound - key not found.
566    /// * OutOfMemory - insufficient memory.
567    #[maybe_async::maybe_async]
568    pub async fn assign(&self, key: &K, newid: P, node: Option<BtreeNodeDirty<'_, K, V, P>>) -> Result<()> {
569        #[cfg(feature = "value-check")]
570        if !newid.is_valid_extern_assign() {
571            // potiential conflict with seq value internal used
572            panic!("assign value is not in a valid format");
573        }
574        match &self.inner {
575            NodeType::Direct(direct) => {
576                if node.is_some() {
577                    return Ok(());
578                }
579                // assign data node only works when P and V is same type
580                let any = &newid as &dyn Any;
581                match any.downcast_ref::<V>() {
582                    Some(newval) => {
583                        return direct.assign(key, *newval).await;
584                    },
585                    None => {
586                        panic!("V type {} and P type {} not the same, you can not use fn assign_data_node",
587                            std::any::type_name::<V>(), std::any::type_name::<P>());
588                    },
589                }
590            },
591            NodeType::Btree(btree) => {
592                return btree.assign(key, newid, node.map(|n| n.clone_node_ref())).await;
593            },
594        }
595    }
596
597    /// Assign extneral value to meta node.
598    ///
599    /// # Errors
600    ///
601    /// * NotFound - key not found.
602    /// * OutOfMemory - insufficient memory.
603    #[maybe_async::maybe_async]
604    pub async fn assign_meta_node(&self, newid: P, node: BtreeNodeDirty<'_, K, V, P>) -> Result<()> {
605        #[cfg(feature = "value-check")]
606        if !newid.is_valid_extern_assign() {
607            // potiential conflict with seq value internal used
608            panic!("assign value is not in a valid format");
609        }
610        match &self.inner {
611            NodeType::Direct(_direct) => {
612                return Ok(());
613            },
614            NodeType::Btree(btree) => {
615                // key is unused, so use 0
616                let zero_key = 0.into();
617                return btree.assign(&zero_key, newid, Some(node.clone_node_ref())).await;
618            },
619        }
620    }
621
622    /// Assign external value to data node.
623    ///
624    /// # Errors
625    ///
626    /// * NotFound - key not found.
627    /// * OutOfMemory - insufficient memory.
628    #[maybe_async::maybe_async]
629    pub async fn assign_data_node(&self, key: &K, newid: P) -> Result<()> {
630        #[cfg(feature = "value-check")]
631        if !newid.is_valid_extern_assign() {
632            // potiential conflict with seq value internal used
633            panic!("assign value is not in a valid format");
634        }
635        match &self.inner {
636            NodeType::Direct(direct) => {
637                // assign data node only works when P and V is same type
638                let any = &newid as &dyn Any;
639                match any.downcast_ref::<V>() {
640                    Some(newval) => {
641                        return direct.assign(key, *newval).await;
642                    },
643                    None => {
644                        panic!("V type {} and P type {} not the same, you can not use fn assign_data_node",
645                            std::any::type_name::<V>(), std::any::type_name::<P>());
646                    },
647                }
648            },
649            NodeType::Btree(btree) => {
650                return btree.assign(key, newid, None).await;
651            },
652        }
653    }
654
655    /// Propagate changes of key/node from specific level up to root, marks all nodes dirty in the path.
656    ///
657    /// **data node:** use key to find target.
658    ///
659    /// **meta node:** use node to find target, key is unused.
660    ///
661    /// explicit invoke of this function is not required,
662    /// propagation will be implicit invoked by [`BMap::insert`] and [`BMap::delete`].
663    ///
664    /// # Errors
665    ///
666    /// * NotFound - key not found.
667    /// * OutOfMemory - insufficient memory.
668    #[maybe_async::maybe_async]
669    pub async fn propagate(&self, key: &K, node: Option<BtreeNodeRef<'_, K, V, P>>) -> Result<()> {
670        match &self.inner {
671            NodeType::Direct(direct) => {
672                return direct.propagate(key).await;
673            },
674            NodeType::Btree(btree) => {
675                return btree.propagate(key, node).await;
676            },
677        }
678    }
679
680    /// Mark the block specified by key at level as dirty.
681    ///
682    /// # Errors
683    ///
684    /// * NotFound - key not found.
685    /// * OutOfMemory - insufficient memory.
686    #[maybe_async::maybe_async]
687    pub async fn mark(&self, key: &K, level: usize) -> Result<()> {
688        match &self.inner {
689            NodeType::Direct(_) => {
690                return Ok(());
691            },
692            NodeType::Btree(btree) => {
693                return btree.mark(key, level).await;
694            },
695        }
696    }
697
698    #[maybe_async::maybe_async]
699    async fn do_truncate(&mut self, key: &K) -> Result<()> {
700        let mut last_key = self.last_key().await?;
701        if key > &last_key {
702            return Ok(());
703        }
704
705        while key <= &last_key {
706            let _ = self.do_delete(&last_key).await?;
707            match self.last_key().await {
708                Ok(key) => {
709                    last_key = key;
710                },
711                Err(e) => {
712                    if e.kind() == ErrorKind::NotFound {
713                        return Ok(());
714                    }
715                    return Err(e);
716                }
717            }
718        }
719        return Ok(());
720    }
721
722    /// Truncate index to key value (including key value supplied).
723    ///
724    /// # Errors
725    ///
726    /// * NotFound - target key not found in bmap.
727    /// * OutOfMemory - insufficient memory.
728    #[maybe_async::maybe_async]
729    pub async fn truncate(&mut self, key: &K) -> Result<()> {
730        self.do_truncate(key).await
731    }
732
733    /// Read in root node from extenal buffer.
734    pub fn read(buf: &[u8], meta_block_size: usize, block_loader: L, node_tiered_cache: C) -> Self {
735        let root = BtreeNode::<K, V, P>::from_slice(buf);
736        if root.is_large() {
737            return Self::new_btree(buf, meta_block_size, block_loader, node_tiered_cache);
738        }
739        return Self::new_direct(buf, meta_block_size, block_loader, node_tiered_cache);
740    }
741
742    /// Write out root node to external buffer.
743    pub fn write(&self, buf: &mut [u8]) {
744        buf.copy_from_slice(self.as_slice())
745    }
746
747    /// Get statistics from map internal.
748    ///
749    /// For root node in direct, return all zero.
750    pub fn get_stat(&self) -> BMapStat {
751        match &self.inner {
752            NodeType::Direct(_) => {
753                return BMapStat::default();
754            },
755            NodeType::Btree(btree) => {
756                return btree.get_stat();
757            },
758        }
759    }
760
761    /// Get user data from root node
762    pub fn get_userdata(&self) -> u32 {
763        match &self.inner {
764            NodeType::Direct(direct) => {
765                return direct.get_userdata();
766            },
767            NodeType::Btree(btree) => {
768                return btree.get_userdata();
769            },
770        }
771    }
772
773    /// Set user data into root node
774    pub fn set_userdata(&self, data: u32) {
775        match &self.inner {
776            NodeType::Direct(direct) => {
777                return direct.set_userdata(data);
778            },
779            NodeType::Btree(btree) => {
780                return btree.set_userdata(data);
781            },
782        }
783    }
784
785    /// Get limit of max nodes count cached in memory for btree
786    pub fn get_cache_limit(&self) -> usize {
787        match &self.inner {
788            NodeType::Direct(direct) => {
789                return direct.get_cache_limit();
790            },
791            NodeType::Btree(btree) => {
792                return btree.get_cache_limit();
793            },
794        }
795    }
796
797    /// Set limit of max nodes count cached in memory for btree
798    pub fn set_cache_limit(&self, limit: usize) {
799        match &self.inner {
800            NodeType::Direct(direct) => {
801                return direct.set_cache_limit(limit);
802            },
803            NodeType::Btree(btree) => {
804                return btree.set_cache_limit(limit);
805            },
806        }
807    }
808
809    /// Return iterator for all non-leaf node which including root node
810    pub fn nonleafnode_iter<'b>(&'b self) -> NonLeafNodeIter<'a, 'b, K, V, P, L, C> {
811        NonLeafNodeIter::new(self)
812    }
813
814    /// Get back block loader
815    pub fn get_block_loader(&self) -> L {
816        match &self.inner {
817            NodeType::Direct(_) => {
818                assert!(self.block_loader.is_some());
819                return self.block_loader.as_ref().unwrap().clone();
820            },
821            NodeType::Btree(btree) => {
822                return btree.block_loader.clone();
823            },
824        }
825    }
826
827    /// Get back node cache
828    pub fn get_node_cache(&self) -> C {
829        match &self.inner {
830            NodeType::Direct(_) => {
831                assert!(self.node_tiered_cache.is_some());
832                return self.node_tiered_cache.as_ref().unwrap().clone();
833            },
834            NodeType::Btree(btree) => {
835                return btree.node_tiered_cache.clone();
836            },
837        }
838    }
839}
840
841pub struct NonLeafNodeIter<'a, 'b, K, V, P, L: BlockLoader<P>, C: NodeCache<P>> {
842    bmap: &'b BMap<'a, K, V, P, L, C>,
843    last_root_node_index: usize,
844    root_node_cap_or_nchild: usize,
845    last_btree_node_index: usize,
846    last_btree_node: Option<BtreeNodeRef<'a, K, V, P>>,
847    btree_node_backlog: VecDeque<BtreeNodeRef<'a, K, V, P>>,
848}
849
850impl<'a, 'b, K, V, P, L, C> NonLeafNodeIter<'a, 'b, K, V, P, L, C>
851    where
852        K: Copy + Default + std::fmt::Display + PartialOrd + Eq + std::hash::Hash + std::ops::AddAssign<u64>,
853        V: Copy + Default + std::fmt::Display + NodeValue + Any,
854        K: Into<u64> + From<u64>,
855        P: Copy + Default + std::fmt::Display + PartialOrd + Eq + std::hash::Hash + std::ops::AddAssign<u64> + NodeValue,
856        P: From<u64> + Into<u64> + Any,
857        L: BlockLoader<P> + Clone,
858        C: NodeCache<P> + Clone,
859{
860    fn new(bmap: &'b BMap<'a, K, V, P, L, C>) -> Self {
861        let root = BtreeNode::<K, V, P>::from_slice(bmap.as_slice());
862        let root_node_cap_or_nchild = if root.is_large() {
863            root.get_nchild()
864        } else {
865            let root = DirectNode::<V>::from_slice(bmap.as_slice());
866            root.get_capacity()
867        };
868
869        Self {
870            bmap: bmap,
871            last_root_node_index: 0,
872            root_node_cap_or_nchild: root_node_cap_or_nchild,
873            last_btree_node_index: 0,
874            last_btree_node: None,
875            btree_node_backlog: VecDeque::new(),
876        }
877    }
878}
879
880/// Iterate all values of intermediate nodes from highest level to level 1
881impl<'a, 'b, K, V, P, L, C> Iterator for NonLeafNodeIter<'a, 'b, K, V, P, L, C>
882    where
883        K: Copy + Default + std::fmt::Display + PartialOrd + Eq + std::hash::Hash + std::ops::AddAssign<u64>,
884        V: Copy + Default + std::fmt::Display + NodeValue + Any,
885        K: Into<u64> + From<u64>,
886        P: Copy + Default + std::fmt::Display + PartialOrd + Eq + std::hash::Hash + std::ops::AddAssign<u64> + NodeValue,
887        P: From<u64> + Into<u64> + Any,
888        L: BlockLoader<P> + Clone,
889        C: NodeCache<P> + Clone,
890{
891    type Item = P;
892
893    #[inline]
894    fn next(&mut self) -> Option<Self::Item> {
895        match &self.bmap.inner {
896            NodeType::Direct(_) => {
897                // direct node is always leaf node
898                return None;
899            },
900            NodeType::Btree(btree) => {
901                // try working on root node
902                for idx in self.last_root_node_index..self.root_node_cap_or_nchild {
903                    let node = BtreeNode::<K, V, P>::from_slice(self.bmap.as_slice());
904                    let id = *node.get_val::<P>(idx);
905                    assert!(!id.is_invalid());
906                    if node.get_level() > BTREE_NODE_LEVEL_MIN {
907                        // for L1 node, we don't need to fetch actual data block
908                        // so we limit condition for fetch next level node here to >= 2
909                        #[cfg(all(not(feature = "sync-api"), feature = "futures-runtime"))]
910                        let node_ref = futures::executor::block_on(async {
911                            btree.get_from_nodes(&id).await.unwrap_or_else(|_| panic!("failed to fetch node {id}"))
912                        });
913                        #[cfg(all(not(feature = "sync-api"), feature = "tokio-runtime"))]
914                        let node_ref = tokio::runtime::Handle::current().block_on(async {
915                            btree.get_from_nodes(&id).await.unwrap_or_else(|_| panic!("failed to fetch node {id}"))
916                        });
917                        #[cfg(feature = "sync-api")]
918                        let node_ref = btree.get_from_nodes(&id).unwrap_or_else(|_| panic!("failed to fetch node {id}"));
919                        self.btree_node_backlog.push_back(node_ref);
920                    }
921                    self.last_root_node_index += 1;
922                    return Some(id);
923                }
924
925                // try find the node we currently working on
926                let node = if let Some(node) = &self.last_btree_node {
927                    node
928                } else {
929                    let Some(node) = self.btree_node_backlog.pop_front() else {
930                        // if no node currently working and no more nodes in backlog
931                        // we reached the end
932                        return None;
933                    };
934                    self.last_btree_node_index = 0;
935                    self.last_btree_node = Some(node);
936                    self.last_btree_node.as_ref().expect("unable to get last btree node")
937                };
938
939                // try working on one intermediate node
940                let id = *node.get_val::<P>(self.last_btree_node_index);
941                assert!(!id.is_invalid());
942                if node.get_level() > BTREE_NODE_LEVEL_MIN {
943                    // for L1 node, we don't need to fetch actual data block
944                    // so we limit condition for fetch next level node here to >= 2
945                    #[cfg(all(not(feature = "sync-api"), feature = "futures-runtime"))]
946                    let node_ref = futures::executor::block_on(async {
947                        btree.get_from_nodes(&id).await.unwrap_or_else(|_| panic!("failed to fetch node {id}"))
948                    });
949                    #[cfg(all(not(feature = "sync-api"), feature = "tokio-runtime"))]
950                    let node_ref = tokio::runtime::Handle::current().block_on(async {
951                        btree.get_from_nodes(&id).await.unwrap_or_else(|_| panic!("failed to fetch node {id}"))
952                    });
953                    #[cfg(feature = "sync-api")]
954                    let node_ref = btree.get_from_nodes(&id).unwrap_or_else(|_| panic!("failed to fetch node {id}"));
955                    self.btree_node_backlog.push_back(node_ref);
956                }
957                self.last_btree_node_index += 1;
958                if self.last_btree_node_index >= node.get_nchild() {
959                    // reach the end of this node,
960                    // reset last btree node,
961                    // will fetch next available node from backlog next time
962                    self.last_btree_node = None;
963                }
964                return Some(id);
965            },
966        }
967    }
968}