1use std::fmt;
2#[cfg(feature = "rc")]
3use std::rc::Rc;
4#[cfg(feature = "rc")]
5use std::cell::RefCell;
6#[cfg(feature = "arc")]
7use std::sync::Arc;
8#[cfg(feature = "arc")]
9use std::sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering};
10#[cfg(feature = "arc")]
11use atomic_refcell::AtomicRefCell;
12use std::collections::{HashMap, VecDeque};
13use std::marker::PhantomData;
14use std::io::{Result, ErrorKind};
15use std::any::Any;
16use crate::VMap;
17use crate::{NodeValue, BlockLoader, NodeCache};
18use crate::direct::DirectMap;
19use crate::btree::BtreeMap;
20use crate::node::{
21 BtreeNode, DirectNode,
22 BTREE_NODE_FLAG_LEAF, BTREE_NODE_FLAG_LARGE, BTREE_NODE_LEVEL_MIN
23};
24use crate::btree::{BtreeNodeRef, BtreeNodeDirty};
25use crate::cache::NodeTieredCacheStats;
26
27#[derive(Default, Debug)]
28pub struct BMapStat {
29 pub btree: bool,
30 pub level: usize,
31 pub dirty: bool,
32 pub meta_block_size: usize,
33 pub nodes_total: usize,
34 pub nodes_l1: usize,
35 pub cache_stats: NodeTieredCacheStats,
36}
37
38pub enum NodeType<'a, K, V, P, L: BlockLoader<P>, C: NodeCache<P>> {
39 Direct(DirectMap<'a, K, V, P>),
40 Btree(BtreeMap<'a, K, V, P, L, C>),
41}
42
43impl<'a, K, V, P, L, C> fmt::Display for NodeType<'a, K, V, P, L, C>
44 where
45 K: Copy + fmt::Display + std::cmp::PartialOrd,
46 V: Copy + fmt::Display + NodeValue,
47 P: Copy + fmt::Display + NodeValue,
48 L: BlockLoader<P>,
49 C: NodeCache<P>,
50{
51 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
52 match self {
53 NodeType::Direct(direct) => {
54 write!(f, "{}", direct)
55 },
56 NodeType::Btree(btree) => {
57 write!(f, "{}", btree)
58 }
59 }
60 }
61}
62
63pub struct BMap<'a, K, V, P, L: BlockLoader<P>, C: NodeCache<P>> {
64 inner: NodeType<'a, K, V, P, L, C>,
65 meta_block_size: usize,
66 block_loader: Option<L>,
67 node_tiered_cache: Option<C>,
68}
69
70impl<'a, K, V, P, L, C> fmt::Display for BMap<'a, K, V, P, L, C>
71 where
72 K: Copy + fmt::Display + std::cmp::PartialOrd,
73 V: Copy + fmt::Display + NodeValue,
74 P: Copy + fmt::Display + NodeValue,
75 L: BlockLoader<P>,
76 C: NodeCache<P>,
77{
78 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
79 write!(f, "{}", self.inner)
80 }
81}
82
83impl<'a, K, V, P, L, C> BMap<'a, K, V, P, L, C>
84 where
85 K: Copy + Default + std::fmt::Display + PartialOrd + Eq + std::hash::Hash + std::ops::AddAssign<u64>,
86 V: Copy + Default + std::fmt::Display + NodeValue,
87 K: Into<u64> + From<u64>,
88 P: Copy + Default + std::fmt::Display + PartialOrd + Eq + std::hash::Hash + std::ops::AddAssign<u64>,
89 P: NodeValue + Into<u64> + From<u64>,
90 L: BlockLoader<P>,
91 C: NodeCache<P>,
92{
93 #[maybe_async::maybe_async]
94 async fn convert_and_insert(&mut self, data: Vec<u8>, meta_block_size: usize, last_seq: P, limit: usize, key: K, val: V) -> Result<()> {
95 let mut old_kv = Vec::new();
97 let direct = DirectNode::<V>::from_slice(&data);
98 for i in 0..direct.get_capacity() {
99 let val = direct.get_val(i);
100 if !val.is_invalid() {
101 old_kv.push((From::<u64>::from(i as u64), val));
102 }
103 }
104 let old_ud = direct.get_userdata();
105
106 let mut v = Vec::with_capacity(data.len());
108 v.resize(data.len(), 0);
110 v[0] = BTREE_NODE_FLAG_LEAF | BTREE_NODE_FLAG_LARGE;
112 let mut btree = BtreeMap {
113 #[cfg(feature = "rc")]
114 root: Rc::new(Box::new(BtreeNode::<K, V, P>::from_slice(&v))),
115 #[cfg(feature = "arc")]
116 root: Arc::new(Box::new(BtreeNode::<K, V, P>::from_slice(&v))),
117 data: v,
118 #[cfg(feature = "rc")]
119 nodes: RefCell::new(HashMap::new()),
120 #[cfg(feature = "rc")]
121 last_seq: RefCell::new(last_seq),
122 #[cfg(feature = "rc")]
123 dirty: RefCell::new(true),
124 #[cfg(feature = "arc")]
125 nodes: AtomicRefCell::new(HashMap::new()),
126 node_tiered_cache: self.node_tiered_cache.take().unwrap(),
127 #[cfg(feature = "arc")]
128 last_seq: Arc::new(AtomicU64::new(Into::<u64>::into(last_seq))),
129 #[cfg(feature = "arc")]
130 dirty: Arc::new(AtomicBool::new(true)),
131 meta_block_size: meta_block_size,
132 #[cfg(feature = "rc")]
133 cache_limit: RefCell::new(limit),
134 #[cfg(feature = "arc")]
135 cache_limit: Arc::new(AtomicUsize::new(limit)),
136 block_loader: self.block_loader.take().unwrap(),
137 };
138
139 if old_kv.len() + 1 <= btree.root.get_capacity() {
141 btree.root.set_nchild(0);
143 btree.root.init_root(1, true);
144 btree.root.set_userdata(old_ud);
145 let mut index = 0;
146 for (k, v) in old_kv.into_iter() {
147 btree.root.insert::<V>(index, &k, &v);
148 index += 1;
149 }
150 btree.root.insert::<V>(index, &key, &val);
151
152 self.inner = NodeType::Btree(btree);
154 return Ok(());
155 }
156
157 let first_root_key;
158 {
160
161 let node = btree.get_new_node(&last_seq, 1)?;
162 node.init(1, 0);
163
164 if old_kv.len() == 0 && btree.root.get_capacity() == 0 {
166 first_root_key = key;
167 } else {
168 first_root_key = old_kv[0].0;
170 }
171 let mut index = 0;
172 for (k, v) in old_kv.into_iter() {
173 node.insert::<V>(index, &k, &v);
174 index += 1;
175 }
176 node.insert::<V>(index, &key, &val);
177 node.mark_dirty();
178 btree.nodes.borrow_mut().insert(last_seq, node);
179
180 }
181
182 {
184
185 #[cfg(feature = "rc")]
187 let Some(root) = std::rc::Rc::get_mut(&mut btree.root) else {
188 panic!("failed to get rc of btree root");
189 };
190 #[cfg(feature = "arc")]
191 let Some(root) = std::sync::Arc::get_mut(&mut btree.root) else {
192 panic!("failed to get arc of btree root");
193 };
194 root.do_reinit::<P>();
195 btree.root.set_nchild(0);
196 btree.root.init_root(2, true);
197 btree.root.set_userdata(old_ud);
198 btree.root.clear_leaf();
200 btree.root.insert::<P>(0, &first_root_key, &last_seq);
201
202 }
203
204 self.inner = NodeType::Btree(btree);
206 Ok(())
207 }
208
209 #[maybe_async::maybe_async]
210 async fn convert_to_direct(&mut self, _key: &K, input: &Vec<(K, V)>,
211 root_node_size: usize, user_data: u32, last_seq: P, limit: usize, block_loader: L, node_tiered_cache: C) -> Result<()> {
212 let mut v = Vec::with_capacity(root_node_size);
213 v.resize(root_node_size, 0);
214 let direct = DirectMap {
215 #[cfg(feature = "rc")]
216 root: Rc::new(Box::new(DirectNode::<V>::from_slice(&v))),
217 #[cfg(feature = "arc")]
218 root: Arc::new(Box::new(DirectNode::<V>::from_slice(&v))),
219 data: v,
220 #[cfg(feature = "rc")]
221 last_seq: RefCell::new(last_seq),
222 #[cfg(feature = "rc")]
223 dirty: RefCell::new(true),
224 #[cfg(feature = "arc")]
225 last_seq: Arc::new(AtomicU64::new(Into::<u64>::into(last_seq))),
226 #[cfg(feature = "arc")]
227 dirty: Arc::new(AtomicBool::new(true)),
228 #[cfg(feature = "rc")]
229 cache_limit: RefCell::new(limit),
230 #[cfg(feature = "arc")]
231 cache_limit: Arc::new(AtomicUsize::new(limit)),
232 marker: PhantomData,
233 #[cfg(feature = "arc")]
234 marker_p: PhantomData,
235 };
236
237 direct.root.set_userdata(user_data);
238 for (k, v) in input {
239 let i = Into::<u64>::into(*k) as usize;
242 direct.root.set_val(i, v);
243 }
244
245 self.block_loader = Some(block_loader);
246 self.node_tiered_cache = Some(node_tiered_cache);
247 self.inner = NodeType::Direct(direct);
248 Ok(())
249 }
250}
251
252impl<'a, K, V, P, L, C> BMap<'a, K, V, P, L, C>
253 where
254 K: Copy + Default + std::fmt::Display + PartialOrd + Eq + std::hash::Hash + std::ops::AddAssign<u64>,
255 V: Copy + Default + std::fmt::Display + NodeValue + Any,
256 K: Into<u64> + From<u64>,
257 P: Copy + Default + std::fmt::Display + PartialOrd + Eq + std::hash::Hash + std::ops::AddAssign<u64>,
258 P: NodeValue + From<u64> + Into<u64> + Any,
259 L: BlockLoader<P> + Clone,
260 C: NodeCache<P> + Clone,
261{
262 pub fn new(root_node_size: usize, meta_block_size: usize, block_loader: L, node_tiered_cache: C) -> Self {
265 if root_node_size > (meta_block_size / 2) {
266 panic!("root node size {} is too large, reduce to {} at least, which is half of meta block size",
267 root_node_size, meta_block_size / 2);
268 }
269 let mut data = Vec::with_capacity(root_node_size);
271 data.resize(root_node_size, 0);
272 let root = DirectNode::<V>::from_slice(&data);
274 root.init(0, 1, 0);
276
277 Self {
278 inner: NodeType::Direct(DirectMap::<K, V, P>::new(&data)),
279 meta_block_size: meta_block_size,
280 block_loader: Some(block_loader),
281 node_tiered_cache: Some(node_tiered_cache),
282 }
283 }
284
285 pub fn new_direct(data: &[u8], meta_block_size: usize, block_loader: L, node_tiered_cache: C) -> Self {
289 Self {
290 inner: NodeType::Direct(DirectMap::<K, V, P>::new(data)),
291 meta_block_size: meta_block_size,
292 block_loader: Some(block_loader),
293 node_tiered_cache: Some(node_tiered_cache),
294 }
295 }
296
297 pub fn new_btree(data: &[u8], meta_block_size: usize, block_loader: L, node_tiered_cache: C) -> Self {
301 Self {
302 inner: NodeType::Btree(BtreeMap::<K, V, P, L, C>::new(data, meta_block_size, block_loader, node_tiered_cache)),
303 meta_block_size: meta_block_size,
304 block_loader: None,
305 node_tiered_cache: None,
306 }
307 }
308
309 pub fn as_slice(&self) -> &[u8] {
311 match &self.inner {
312 NodeType::Direct(direct) => {
313 return direct.as_slice();
314 },
315 NodeType::Btree(btree) => {
316 return btree.as_slice();
317 },
318 }
319 }
320
321 pub fn dirty(&self) -> bool {
323 match &self.inner {
324 NodeType::Direct(direct) => {
325 return direct.dirty();
326 },
327 NodeType::Btree(btree) => {
328 return btree.dirty();
329 }
330 }
331 }
332
333 pub fn clear_dirty(&mut self) {
335 match &self.inner {
336 NodeType::Direct(direct) => {
337 return direct.clear_dirty();
338 },
339 NodeType::Btree(btree) => {
340 return btree.clear_dirty();
341 }
342 }
343 }
344
345 #[maybe_async::maybe_async]
346 async fn do_try_insert(&mut self, key: K, val: V) -> Result<()> {
347 match &self.inner {
348 NodeType::Direct(direct) => {
349 if direct.is_key_exceed(&key) {
350 let data = direct.data.clone();
352 #[cfg(feature = "rc")]
353 let last_seq = direct.last_seq.take();
354 #[cfg(feature = "arc")]
355 let last_seq = direct.last_seq.load(Ordering::SeqCst).into();
356 let limit = direct.get_cache_limit();
357 return self.convert_and_insert(data, self.meta_block_size, last_seq, limit, key, val).await;
358 }
359 return direct.insert(key, val).await;
360 },
361 NodeType::Btree(btree) => {
362 return btree.insert(key, val).await;
363 },
364 }
365 }
366
367 #[maybe_async::maybe_async]
375 pub async fn try_insert(&mut self, key: K, val: V) -> Result<()> {
376 self.do_try_insert(key, val).await
377 }
378
379 #[maybe_async::maybe_async]
380 async fn do_insert_or_update(&mut self, key: K, val: V) -> Result<Option<V>> {
381 match &self.inner {
382 NodeType::Direct(direct) => {
383 if direct.is_key_exceed(&key) {
384 let data = direct.data.clone();
386 #[cfg(feature = "rc")]
387 let last_seq = direct.last_seq.take();
388 #[cfg(feature = "arc")]
389 let last_seq = direct.last_seq.load(Ordering::SeqCst).into();
390 let limit = direct.get_cache_limit();
391 let _ = self.convert_and_insert(data, self.meta_block_size, last_seq, limit, key, val).await?;
392 return Ok(None);
393 }
394 return direct.insert_or_update(key, val).await;
395 },
396 NodeType::Btree(btree) => {
397 return btree.insert_or_update(key, val).await;
398 },
399 }
400 }
401
402 #[maybe_async::maybe_async]
410 pub async fn insert(&mut self, key: K, val: V) -> Result<Option<V>> {
411 self.do_insert_or_update(key, val).await
412 }
413
414 #[maybe_async::maybe_async]
415 async fn do_delete(&mut self, key: &K) -> Result<()> {
416 match &self.inner {
417 NodeType::Direct(direct) => {
418 return direct.delete(key).await;
419 },
420 NodeType::Btree(btree) => {
421 let mut v = Vec::<(K, V)>::new();
422 if btree.delete_check_and_gather(key, &mut v).await? {
423 let _ = btree.delete(key).await?;
424 v.retain(|(_k, _)| _k != key);
426 #[cfg(feature = "rc")]
427 let _ = self.convert_to_direct(key, &v,
428 btree.data.len(), btree.root.get_userdata(), btree.last_seq.take(), btree.get_cache_limit(),
429 btree.block_loader.clone(), btree.node_tiered_cache.clone()).await?;
430 #[cfg(feature = "arc")]
431 let _ = self.convert_to_direct(key, &v,
432 btree.data.len(), btree.root.get_userdata(), btree.last_seq.load(Ordering::SeqCst).into(), btree.get_cache_limit(),
433 btree.block_loader.clone(), btree.node_tiered_cache.clone()).await?;
434 return Ok(());
435 }
436 return btree.delete(key).await;
437 },
438 }
439 }
440
441 #[maybe_async::maybe_async]
450 pub async fn delete(&mut self, key: &K) -> Result<()> {
451 self.do_delete(key).await
452 }
453
454 #[maybe_async::maybe_async]
461 pub async fn lookup_at_level(&self, key: &K, level: usize) -> Result<V> {
462 match &self.inner {
463 NodeType::Direct(direct) => {
464 return direct.lookup(key, level).await;
465 },
466 NodeType::Btree(btree) => {
467 return btree.lookup(key, level).await;
468 },
469 }
470 }
471
472 #[maybe_async::maybe_async]
479 pub async fn lookup(&self, key: &K) -> Result<V> {
480 self.lookup_at_level(key, 1).await
481 }
482
483 #[maybe_async::maybe_async]
496 pub async fn lookup_contig(&self, key: &K, maxblocks: usize) -> Result<(V, usize)> {
497 match &self.inner {
498 NodeType::Direct(direct) => {
499 return direct.lookup_contig(key, maxblocks).await;
500 },
501 NodeType::Btree(btree) => {
502 return btree.lookup_contig(key, maxblocks).await;
503 },
504 }
505 }
506
507 pub fn lookup_dirty(&self) -> Vec<BtreeNodeDirty<'a, K, V, P>> {
509 match &self.inner {
510 NodeType::Direct(_) => {
511 return Vec::new();
512 },
513 NodeType::Btree(btree) => {
514 return btree.lookup_dirty().into_iter().map(|n| BtreeNodeDirty(n)).collect();
515 },
516 }
517 }
518
519 #[maybe_async::maybe_async]
526 pub async fn seek_key(&self, start: &K) -> Result<K> {
527 match &self.inner {
528 NodeType::Direct(direct) => {
529 return direct.seek_key(start).await;
530 },
531 NodeType::Btree(btree) => {
532 return btree.seek_key(start).await;
533 },
534 }
535 }
536
537 #[maybe_async::maybe_async]
544 pub async fn last_key(&self) -> Result<K> {
545 match &self.inner {
546 NodeType::Direct(direct) => {
547 return direct.last_key().await;
548 },
549 NodeType::Btree(btree) => {
550 return btree.last_key().await;
551 },
552 }
553 }
554
555 #[maybe_async::maybe_async]
568 pub async fn assign(&self, key: &K, newid: P, node: Option<BtreeNodeDirty<'_, K, V, P>>) -> Result<()> {
569 #[cfg(feature = "value-check")]
570 if !newid.is_valid_extern_assign() {
571 panic!("assign value is not in a valid format");
573 }
574 match &self.inner {
575 NodeType::Direct(direct) => {
576 if node.is_some() {
577 return Ok(());
578 }
579 let any = &newid as &dyn Any;
581 match any.downcast_ref::<V>() {
582 Some(newval) => {
583 return direct.assign(key, *newval).await;
584 },
585 None => {
586 panic!("V type {} and P type {} not the same, you can not use fn assign_data_node",
587 std::any::type_name::<V>(), std::any::type_name::<P>());
588 },
589 }
590 },
591 NodeType::Btree(btree) => {
592 return btree.assign(key, newid, node.map(|n| n.clone_node_ref())).await;
593 },
594 }
595 }
596
597 #[maybe_async::maybe_async]
604 pub async fn assign_meta_node(&self, newid: P, node: BtreeNodeDirty<'_, K, V, P>) -> Result<()> {
605 #[cfg(feature = "value-check")]
606 if !newid.is_valid_extern_assign() {
607 panic!("assign value is not in a valid format");
609 }
610 match &self.inner {
611 NodeType::Direct(_direct) => {
612 return Ok(());
613 },
614 NodeType::Btree(btree) => {
615 let zero_key = 0.into();
617 return btree.assign(&zero_key, newid, Some(node.clone_node_ref())).await;
618 },
619 }
620 }
621
622 #[maybe_async::maybe_async]
629 pub async fn assign_data_node(&self, key: &K, newid: P) -> Result<()> {
630 #[cfg(feature = "value-check")]
631 if !newid.is_valid_extern_assign() {
632 panic!("assign value is not in a valid format");
634 }
635 match &self.inner {
636 NodeType::Direct(direct) => {
637 let any = &newid as &dyn Any;
639 match any.downcast_ref::<V>() {
640 Some(newval) => {
641 return direct.assign(key, *newval).await;
642 },
643 None => {
644 panic!("V type {} and P type {} not the same, you can not use fn assign_data_node",
645 std::any::type_name::<V>(), std::any::type_name::<P>());
646 },
647 }
648 },
649 NodeType::Btree(btree) => {
650 return btree.assign(key, newid, None).await;
651 },
652 }
653 }
654
655 #[maybe_async::maybe_async]
669 pub async fn propagate(&self, key: &K, node: Option<BtreeNodeRef<'_, K, V, P>>) -> Result<()> {
670 match &self.inner {
671 NodeType::Direct(direct) => {
672 return direct.propagate(key).await;
673 },
674 NodeType::Btree(btree) => {
675 return btree.propagate(key, node).await;
676 },
677 }
678 }
679
680 #[maybe_async::maybe_async]
687 pub async fn mark(&self, key: &K, level: usize) -> Result<()> {
688 match &self.inner {
689 NodeType::Direct(_) => {
690 return Ok(());
691 },
692 NodeType::Btree(btree) => {
693 return btree.mark(key, level).await;
694 },
695 }
696 }
697
698 #[maybe_async::maybe_async]
699 async fn do_truncate(&mut self, key: &K) -> Result<()> {
700 let mut last_key = self.last_key().await?;
701 if key > &last_key {
702 return Ok(());
703 }
704
705 while key <= &last_key {
706 let _ = self.do_delete(&last_key).await?;
707 match self.last_key().await {
708 Ok(key) => {
709 last_key = key;
710 },
711 Err(e) => {
712 if e.kind() == ErrorKind::NotFound {
713 return Ok(());
714 }
715 return Err(e);
716 }
717 }
718 }
719 return Ok(());
720 }
721
722 #[maybe_async::maybe_async]
729 pub async fn truncate(&mut self, key: &K) -> Result<()> {
730 self.do_truncate(key).await
731 }
732
733 pub fn read(buf: &[u8], meta_block_size: usize, block_loader: L, node_tiered_cache: C) -> Self {
735 let root = BtreeNode::<K, V, P>::from_slice(buf);
736 if root.is_large() {
737 return Self::new_btree(buf, meta_block_size, block_loader, node_tiered_cache);
738 }
739 return Self::new_direct(buf, meta_block_size, block_loader, node_tiered_cache);
740 }
741
742 pub fn write(&self, buf: &mut [u8]) {
744 buf.copy_from_slice(self.as_slice())
745 }
746
747 pub fn get_stat(&self) -> BMapStat {
751 match &self.inner {
752 NodeType::Direct(_) => {
753 return BMapStat::default();
754 },
755 NodeType::Btree(btree) => {
756 return btree.get_stat();
757 },
758 }
759 }
760
761 pub fn get_userdata(&self) -> u32 {
763 match &self.inner {
764 NodeType::Direct(direct) => {
765 return direct.get_userdata();
766 },
767 NodeType::Btree(btree) => {
768 return btree.get_userdata();
769 },
770 }
771 }
772
773 pub fn set_userdata(&self, data: u32) {
775 match &self.inner {
776 NodeType::Direct(direct) => {
777 return direct.set_userdata(data);
778 },
779 NodeType::Btree(btree) => {
780 return btree.set_userdata(data);
781 },
782 }
783 }
784
785 pub fn get_cache_limit(&self) -> usize {
787 match &self.inner {
788 NodeType::Direct(direct) => {
789 return direct.get_cache_limit();
790 },
791 NodeType::Btree(btree) => {
792 return btree.get_cache_limit();
793 },
794 }
795 }
796
797 pub fn set_cache_limit(&self, limit: usize) {
799 match &self.inner {
800 NodeType::Direct(direct) => {
801 return direct.set_cache_limit(limit);
802 },
803 NodeType::Btree(btree) => {
804 return btree.set_cache_limit(limit);
805 },
806 }
807 }
808
809 pub fn nonleafnode_iter<'b>(&'b self) -> NonLeafNodeIter<'a, 'b, K, V, P, L, C> {
811 NonLeafNodeIter::new(self)
812 }
813
814 pub fn get_block_loader(&self) -> L {
816 match &self.inner {
817 NodeType::Direct(_) => {
818 assert!(self.block_loader.is_some());
819 return self.block_loader.as_ref().unwrap().clone();
820 },
821 NodeType::Btree(btree) => {
822 return btree.block_loader.clone();
823 },
824 }
825 }
826
827 pub fn get_node_cache(&self) -> C {
829 match &self.inner {
830 NodeType::Direct(_) => {
831 assert!(self.node_tiered_cache.is_some());
832 return self.node_tiered_cache.as_ref().unwrap().clone();
833 },
834 NodeType::Btree(btree) => {
835 return btree.node_tiered_cache.clone();
836 },
837 }
838 }
839}
840
841pub struct NonLeafNodeIter<'a, 'b, K, V, P, L: BlockLoader<P>, C: NodeCache<P>> {
842 bmap: &'b BMap<'a, K, V, P, L, C>,
843 last_root_node_index: usize,
844 root_node_cap_or_nchild: usize,
845 last_btree_node_index: usize,
846 last_btree_node: Option<BtreeNodeRef<'a, K, V, P>>,
847 btree_node_backlog: VecDeque<BtreeNodeRef<'a, K, V, P>>,
848}
849
850impl<'a, 'b, K, V, P, L, C> NonLeafNodeIter<'a, 'b, K, V, P, L, C>
851 where
852 K: Copy + Default + std::fmt::Display + PartialOrd + Eq + std::hash::Hash + std::ops::AddAssign<u64>,
853 V: Copy + Default + std::fmt::Display + NodeValue + Any,
854 K: Into<u64> + From<u64>,
855 P: Copy + Default + std::fmt::Display + PartialOrd + Eq + std::hash::Hash + std::ops::AddAssign<u64> + NodeValue,
856 P: From<u64> + Into<u64> + Any,
857 L: BlockLoader<P> + Clone,
858 C: NodeCache<P> + Clone,
859{
860 fn new(bmap: &'b BMap<'a, K, V, P, L, C>) -> Self {
861 let root = BtreeNode::<K, V, P>::from_slice(bmap.as_slice());
862 let root_node_cap_or_nchild = if root.is_large() {
863 root.get_nchild()
864 } else {
865 let root = DirectNode::<V>::from_slice(bmap.as_slice());
866 root.get_capacity()
867 };
868
869 Self {
870 bmap: bmap,
871 last_root_node_index: 0,
872 root_node_cap_or_nchild: root_node_cap_or_nchild,
873 last_btree_node_index: 0,
874 last_btree_node: None,
875 btree_node_backlog: VecDeque::new(),
876 }
877 }
878}
879
880impl<'a, 'b, K, V, P, L, C> Iterator for NonLeafNodeIter<'a, 'b, K, V, P, L, C>
882 where
883 K: Copy + Default + std::fmt::Display + PartialOrd + Eq + std::hash::Hash + std::ops::AddAssign<u64>,
884 V: Copy + Default + std::fmt::Display + NodeValue + Any,
885 K: Into<u64> + From<u64>,
886 P: Copy + Default + std::fmt::Display + PartialOrd + Eq + std::hash::Hash + std::ops::AddAssign<u64> + NodeValue,
887 P: From<u64> + Into<u64> + Any,
888 L: BlockLoader<P> + Clone,
889 C: NodeCache<P> + Clone,
890{
891 type Item = P;
892
893 #[inline]
894 fn next(&mut self) -> Option<Self::Item> {
895 match &self.bmap.inner {
896 NodeType::Direct(_) => {
897 return None;
899 },
900 NodeType::Btree(btree) => {
901 for idx in self.last_root_node_index..self.root_node_cap_or_nchild {
903 let node = BtreeNode::<K, V, P>::from_slice(self.bmap.as_slice());
904 let id = *node.get_val::<P>(idx);
905 assert!(!id.is_invalid());
906 if node.get_level() > BTREE_NODE_LEVEL_MIN {
907 #[cfg(all(not(feature = "sync-api"), feature = "futures-runtime"))]
910 let node_ref = futures::executor::block_on(async {
911 btree.get_from_nodes(&id).await.unwrap_or_else(|_| panic!("failed to fetch node {id}"))
912 });
913 #[cfg(all(not(feature = "sync-api"), feature = "tokio-runtime"))]
914 let node_ref = tokio::runtime::Handle::current().block_on(async {
915 btree.get_from_nodes(&id).await.unwrap_or_else(|_| panic!("failed to fetch node {id}"))
916 });
917 #[cfg(feature = "sync-api")]
918 let node_ref = btree.get_from_nodes(&id).unwrap_or_else(|_| panic!("failed to fetch node {id}"));
919 self.btree_node_backlog.push_back(node_ref);
920 }
921 self.last_root_node_index += 1;
922 return Some(id);
923 }
924
925 let node = if let Some(node) = &self.last_btree_node {
927 node
928 } else {
929 let Some(node) = self.btree_node_backlog.pop_front() else {
930 return None;
933 };
934 self.last_btree_node_index = 0;
935 self.last_btree_node = Some(node);
936 self.last_btree_node.as_ref().expect("unable to get last btree node")
937 };
938
939 let id = *node.get_val::<P>(self.last_btree_node_index);
941 assert!(!id.is_invalid());
942 if node.get_level() > BTREE_NODE_LEVEL_MIN {
943 #[cfg(all(not(feature = "sync-api"), feature = "futures-runtime"))]
946 let node_ref = futures::executor::block_on(async {
947 btree.get_from_nodes(&id).await.unwrap_or_else(|_| panic!("failed to fetch node {id}"))
948 });
949 #[cfg(all(not(feature = "sync-api"), feature = "tokio-runtime"))]
950 let node_ref = tokio::runtime::Handle::current().block_on(async {
951 btree.get_from_nodes(&id).await.unwrap_or_else(|_| panic!("failed to fetch node {id}"))
952 });
953 #[cfg(feature = "sync-api")]
954 let node_ref = btree.get_from_nodes(&id).unwrap_or_else(|_| panic!("failed to fetch node {id}"));
955 self.btree_node_backlog.push_back(node_ref);
956 }
957 self.last_btree_node_index += 1;
958 if self.last_btree_node_index >= node.get_nchild() {
959 self.last_btree_node = None;
963 }
964 return Some(id);
965 },
966 }
967 }
968}