1use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
3#[cfg(not(target_arch = "wasm32"))]
4use memmap::{Mmap, MmapMut};
5use serde::{self, Deserialize, Serialize};
6
7use std::{
8 fs::OpenOptions,
9 io::{Error, ErrorKind, Read, Write},
10 num::ParseIntError,
11 path::PathBuf,
12};
13
14use crate::{
15 cliargs::memsize::MemSizeArgs, rw::ReadWrite, visitors::*, Entry, Id, IdVal, Process, RawEntries,
16 Val,
17};
18
19const FILE_TYPE: &[u8; 10] = b"BSTreeFile";
20const VERSION: &str = env!("CARGO_PKG_VERSION");
21
22pub trait HasByteSize {
23 fn byte_size(&self, entry_byte_size: usize) -> usize;
27}
28
29trait SubTreeW: HasByteSize {
32 fn write<I, V, IRW, VRW, T>(
34 &self,
35 entries_iterator: T,
36 id_rw: &IRW,
37 val_rw: &VRW,
38 dest: &mut [u8],
39 ) -> Result<T, Error>
40 where
41 I: Id,
42 V: Val,
43 IRW: ReadWrite<Type = I>,
44 VRW: ReadWrite<Type = V>,
45 T: Iterator<Item = Entry<I, V>>;
46}
47
48pub trait SubTreeR: HasByteSize {
49 fn get<I, V, IRW, VRW>(
50 &self,
51 value: V,
52 raw_entries: &[u8],
53 id_rw: &IRW,
54 val_rw: &VRW,
55 ) -> Result<Option<Entry<I, V>>, Error>
56 where
57 I: Id,
58 V: Val,
59 IRW: ReadWrite<Type = I>,
60 VRW: ReadWrite<Type = V>;
61
62 fn visit_desc<I, V, IRW, VRW, T>(
64 &self,
65 visitor: T,
66 raw_entries: &[u8],
67 id_rw: &IRW,
68 val_rw: &VRW,
69 ) -> Result<T, Error>
70 where
71 I: Id,
72 V: Val,
73 IRW: ReadWrite<Type = I>,
74 VRW: ReadWrite<Type = V>,
75 T: Visitor<I = I, V = V>;
76
77 fn visit<I, V, IRW, VRW, T>(
79 &self,
80 visitor: T,
81 raw_entries: &[u8],
82 id_rw: &IRW,
83 val_rw: &VRW,
84 ) -> Result<T, Error>
85 where
86 I: Id,
87 V: Val,
88 IRW: ReadWrite<Type = I>,
89 VRW: ReadWrite<Type = V>,
90 T: Visitor<I = I, V = V>;
91
92 fn visit_asc<I, V, IRW, VRW, T>(
94 &self,
95 visitor: T,
96 raw_entries: &[u8],
97 id_rw: &IRW,
98 val_rw: &VRW,
99 ) -> Result<T, Error>
100 where
101 I: Id,
102 V: Val,
103 IRW: ReadWrite<Type = I>,
104 VRW: ReadWrite<Type = V>,
105 T: Visitor<I = I, V = V>;
106}
107
108#[derive(Debug)]
109pub enum Root {
110 L1Leaf(L1Leaf), L1Node(L1Node), LDNode(LDNode), RootL1Node(RootL1Node), RootLDNode(RootLDNode), }
120
121impl HasByteSize for Root {
122 fn byte_size(&self, entry_byte_size: usize) -> usize {
123 match &self {
124 Root::L1Leaf(leaf) => leaf.byte_size(entry_byte_size),
125 Root::L1Node(node) => node.byte_size(entry_byte_size),
126 Root::LDNode(node) => node.byte_size(entry_byte_size),
127 Root::RootL1Node(node) => node.byte_size(entry_byte_size),
128 Root::RootLDNode(node) => node.byte_size(entry_byte_size),
129 }
130 }
131}
132
133impl SubTreeW for Root {
134 fn write<I, V, IRW, VRW, T>(
135 &self,
136 entries_iterator: T,
137 id_rw: &IRW,
138 val_rw: &VRW,
139 dest: &mut [u8],
140 ) -> Result<T, Error>
141 where
142 I: Id,
143 V: Val,
144 IRW: ReadWrite<Type = I>,
145 VRW: ReadWrite<Type = V>,
146 T: Iterator<Item = Entry<I, V>>,
147 {
148 match &self {
150 Root::L1Leaf(leaf) => leaf.write(entries_iterator, id_rw, val_rw, dest),
151 Root::L1Node(node) => node.write(entries_iterator, id_rw, val_rw, dest),
152 Root::LDNode(node) => node.write(entries_iterator, id_rw, val_rw, dest),
153 Root::RootL1Node(node) => node.write(entries_iterator, id_rw, val_rw, dest),
154 Root::RootLDNode(node) => node.write(entries_iterator, id_rw, val_rw, dest),
155 }
156 }
157}
158
159impl SubTreeR for Root {
160 fn get<I, V, IRW, VRW>(
161 &self,
162 value: V,
163 raw_entries: &[u8],
164 id_rw: &IRW,
165 val_rw: &VRW,
166 ) -> Result<Option<Entry<I, V>>, Error>
167 where
168 I: Id,
169 V: Val,
170 IRW: ReadWrite<Type = I>,
171 VRW: ReadWrite<Type = V>,
172 {
173 match &self {
175 Root::L1Leaf(leaf) => leaf.get(value, raw_entries, id_rw, val_rw),
176 Root::L1Node(node) => node.get(value, raw_entries, id_rw, val_rw),
177 Root::LDNode(node) => node.get(value, raw_entries, id_rw, val_rw),
178 Root::RootL1Node(node) => node.get(value, raw_entries, id_rw, val_rw),
179 Root::RootLDNode(node) => node.get(value, raw_entries, id_rw, val_rw),
180 }
181 }
182
183 fn visit_desc<I, V, IRW, VRW, T>(
184 &self,
185 visitor: T,
186 raw_entries: &[u8],
187 id_rw: &IRW,
188 val_rw: &VRW,
189 ) -> Result<T, Error>
190 where
191 I: Id,
192 V: Val,
193 IRW: ReadWrite<Type = I>,
194 VRW: ReadWrite<Type = V>,
195 T: Visitor<I = I, V = V>,
196 {
197 match &self {
199 Root::L1Leaf(leaf) => leaf.visit_desc(visitor, raw_entries, id_rw, val_rw),
200 Root::L1Node(node) => node.visit_desc(visitor, raw_entries, id_rw, val_rw),
201 Root::LDNode(node) => node.visit_desc(visitor, raw_entries, id_rw, val_rw),
202 Root::RootL1Node(node) => node.visit_desc(visitor, raw_entries, id_rw, val_rw),
203 Root::RootLDNode(node) => node.visit_desc(visitor, raw_entries, id_rw, val_rw),
204 }
205 }
206
207 fn visit<I, V, IRW, VRW, T>(
208 &self,
209 visitor: T,
210 raw_entries: &[u8],
211 id_rw: &IRW,
212 val_rw: &VRW,
213 ) -> Result<T, Error>
214 where
215 I: Id,
216 V: Val,
217 IRW: ReadWrite<Type = I>,
218 VRW: ReadWrite<Type = V>,
219 T: Visitor<I = I, V = V>,
220 {
221 match &self {
223 Root::L1Leaf(leaf) => leaf.visit(visitor, raw_entries, id_rw, val_rw),
224 Root::L1Node(node) => node.visit(visitor, raw_entries, id_rw, val_rw),
225 Root::LDNode(node) => node.visit(visitor, raw_entries, id_rw, val_rw),
226 Root::RootL1Node(node) => node.visit(visitor, raw_entries, id_rw, val_rw),
227 Root::RootLDNode(node) => node.visit(visitor, raw_entries, id_rw, val_rw),
228 }
229 }
230
231 fn visit_asc<I, V, IRW, VRW, T>(
232 &self,
233 visitor: T,
234 raw_entries: &[u8],
235 id_rw: &IRW,
236 val_rw: &VRW,
237 ) -> Result<T, Error>
238 where
239 I: Id,
240 V: Val,
241 IRW: ReadWrite<Type = I>,
242 VRW: ReadWrite<Type = V>,
243 T: Visitor<I = I, V = V>,
244 {
245 match &self {
247 Root::L1Leaf(leaf) => leaf.visit_asc(visitor, raw_entries, id_rw, val_rw),
248 Root::L1Node(node) => node.visit_asc(visitor, raw_entries, id_rw, val_rw),
249 Root::LDNode(node) => node.visit_asc(visitor, raw_entries, id_rw, val_rw),
250 Root::RootL1Node(node) => node.visit_asc(visitor, raw_entries, id_rw, val_rw),
251 Root::RootLDNode(node) => node.visit_asc(visitor, raw_entries, id_rw, val_rw),
252 }
253 }
254}
255
256#[derive(Debug)]
257pub enum SubTree {
258 L1Leaf(L1Leaf),
259 L1Node(L1Node), LDNode(LDNode),
261}
262
263impl HasByteSize for SubTree {
264 fn byte_size(&self, entry_byte_size: usize) -> usize {
265 match &self {
266 SubTree::L1Leaf(leaf) => leaf.byte_size(entry_byte_size),
267 SubTree::L1Node(node) => node.byte_size(entry_byte_size),
268 SubTree::LDNode(node) => node.byte_size(entry_byte_size),
269 }
270 }
271}
272
273impl SubTreeW for SubTree {
274 fn write<I, V, IRW, VRW, T>(
275 &self,
276 entries_iterator: T,
277 id_rw: &IRW,
278 val_rw: &VRW,
279 dest: &mut [u8],
280 ) -> Result<T, Error>
281 where
282 I: Id,
283 V: Val,
284 IRW: ReadWrite<Type = I>,
285 VRW: ReadWrite<Type = V>,
286 T: Iterator<Item = Entry<I, V>>,
287 {
288 match &self {
290 SubTree::L1Leaf(leaf) => leaf.write(entries_iterator, id_rw, val_rw, dest),
291 SubTree::L1Node(node) => node.write(entries_iterator, id_rw, val_rw, dest),
292 SubTree::LDNode(node) => node.write(entries_iterator, id_rw, val_rw, dest),
293 }
294 }
295}
296
297impl SubTreeR for SubTree {
298 fn get<I, V, IRW, VRW>(
299 &self,
300 value: V,
301 raw_entries: &[u8],
302 id_rw: &IRW,
303 val_rw: &VRW,
304 ) -> Result<Option<Entry<I, V>>, Error>
305 where
306 I: Id,
307 V: Val,
308 IRW: ReadWrite<Type = I>,
309 VRW: ReadWrite<Type = V>,
310 {
311 match &self {
313 SubTree::L1Leaf(leaf) => leaf.get(value, raw_entries, id_rw, val_rw),
314 SubTree::L1Node(node) => node.get(value, raw_entries, id_rw, val_rw),
315 SubTree::LDNode(node) => node.get(value, raw_entries, id_rw, val_rw),
316 }
317 }
318
319 fn visit_desc<I, V, IRW, VRW, T>(
320 &self,
321 visitor: T,
322 raw_entries: &[u8],
323 id_rw: &IRW,
324 val_rw: &VRW,
325 ) -> Result<T, Error>
326 where
327 I: Id,
328 V: Val,
329 IRW: ReadWrite<Type = I>,
330 VRW: ReadWrite<Type = V>,
331 T: Visitor<I = I, V = V>,
332 {
333 match &self {
335 SubTree::L1Leaf(leaf) => leaf.visit_desc(visitor, raw_entries, id_rw, val_rw),
336 SubTree::L1Node(node) => node.visit_desc(visitor, raw_entries, id_rw, val_rw),
337 SubTree::LDNode(node) => node.visit_desc(visitor, raw_entries, id_rw, val_rw),
338 }
339 }
340
341 fn visit<I, V, IRW, VRW, T>(
342 &self,
343 visitor: T,
344 raw_entries: &[u8],
345 id_rw: &IRW,
346 val_rw: &VRW,
347 ) -> Result<T, Error>
348 where
349 I: Id,
350 V: Val,
351 IRW: ReadWrite<Type = I>,
352 VRW: ReadWrite<Type = V>,
353 T: Visitor<I = I, V = V>,
354 {
355 match &self {
357 SubTree::L1Leaf(leaf) => leaf.visit(visitor, raw_entries, id_rw, val_rw),
358 SubTree::L1Node(node) => node.visit(visitor, raw_entries, id_rw, val_rw),
359 SubTree::LDNode(node) => node.visit(visitor, raw_entries, id_rw, val_rw),
360 }
361 }
362
363 fn visit_asc<I, V, IRW, VRW, T>(
364 &self,
365 visitor: T,
366 raw_entries: &[u8],
367 id_rw: &IRW,
368 val_rw: &VRW,
369 ) -> Result<T, Error>
370 where
371 I: Id,
372 V: Val,
373 IRW: ReadWrite<Type = I>,
374 VRW: ReadWrite<Type = V>,
375 T: Visitor<I = I, V = V>,
376 {
377 match &self {
379 SubTree::L1Leaf(leaf) => leaf.visit_asc(visitor, raw_entries, id_rw, val_rw),
380 SubTree::L1Node(node) => node.visit_asc(visitor, raw_entries, id_rw, val_rw),
381 SubTree::LDNode(node) => node.visit_asc(visitor, raw_entries, id_rw, val_rw),
382 }
383 }
384}
385
386#[derive(Debug)]
387pub enum LDSubTree {
388 L1Node(L1Node), LDNode(LDNode),
390}
391
392impl HasByteSize for LDSubTree {
393 fn byte_size(&self, entry_byte_size: usize) -> usize {
394 match &self {
395 LDSubTree::L1Node(node) => node.byte_size(entry_byte_size),
396 LDSubTree::LDNode(node) => node.byte_size(entry_byte_size),
397 }
398 }
399}
400
401impl SubTreeW for LDSubTree {
402 fn write<I, V, IRW, VRW, T>(
403 &self,
404 entries_iterator: T,
405 id_rw: &IRW,
406 val_rw: &VRW,
407 dest: &mut [u8],
408 ) -> Result<T, Error>
409 where
410 I: Id,
411 V: Val,
412 IRW: ReadWrite<Type = I>,
413 VRW: ReadWrite<Type = V>,
414 T: Iterator<Item = Entry<I, V>>,
415 {
416 match &self {
417 LDSubTree::L1Node(node) => node.write(entries_iterator, id_rw, val_rw, dest),
418 LDSubTree::LDNode(node) => node.write(entries_iterator, id_rw, val_rw, dest),
419 }
420 }
421}
422
423impl SubTreeR for LDSubTree {
424 fn get<I, V, IRW, VRW>(
425 &self,
426 value: V,
427 raw_entries: &[u8],
428 id_rw: &IRW,
429 val_rw: &VRW,
430 ) -> Result<Option<Entry<I, V>>, Error>
431 where
432 I: Id,
433 V: Val,
434 IRW: ReadWrite<Type = I>,
435 VRW: ReadWrite<Type = V>,
436 {
437 match &self {
438 LDSubTree::L1Node(node) => node.get(value, raw_entries, id_rw, val_rw),
439 LDSubTree::LDNode(node) => node.get(value, raw_entries, id_rw, val_rw),
440 }
441 }
442
443 fn visit_desc<I, V, IRW, VRW, T>(
444 &self,
445 visitor: T,
446 raw_entries: &[u8],
447 id_rw: &IRW,
448 val_rw: &VRW,
449 ) -> Result<T, Error>
450 where
451 I: Id,
452 V: Val,
453 IRW: ReadWrite<Type = I>,
454 VRW: ReadWrite<Type = V>,
455 T: Visitor<I = I, V = V>,
456 {
457 match &self {
459 LDSubTree::L1Node(node) => node.visit_desc(visitor, raw_entries, id_rw, val_rw),
460 LDSubTree::LDNode(node) => node.visit_desc(visitor, raw_entries, id_rw, val_rw),
461 }
462 }
463 fn visit<I, V, IRW, VRW, T>(
464 &self,
465 visitor: T,
466 raw_entries: &[u8],
467 id_rw: &IRW,
468 val_rw: &VRW,
469 ) -> Result<T, Error>
470 where
471 I: Id,
472 V: Val,
473 IRW: ReadWrite<Type = I>,
474 VRW: ReadWrite<Type = V>,
475 T: Visitor<I = I, V = V>,
476 {
477 match &self {
479 LDSubTree::L1Node(node) => node.visit(visitor, raw_entries, id_rw, val_rw),
480 LDSubTree::LDNode(node) => node.visit(visitor, raw_entries, id_rw, val_rw),
481 }
482 }
483
484 fn visit_asc<I, V, IRW, VRW, T>(
485 &self,
486 visitor: T,
487 raw_entries: &[u8],
488 id_rw: &IRW,
489 val_rw: &VRW,
490 ) -> Result<T, Error>
491 where
492 I: Id,
493 V: Val,
494 IRW: ReadWrite<Type = I>,
495 VRW: ReadWrite<Type = V>,
496 T: Visitor<I = I, V = V>,
497 {
498 match &self {
500 LDSubTree::L1Node(node) => node.visit_asc(visitor, raw_entries, id_rw, val_rw),
501 LDSubTree::LDNode(node) => node.visit_asc(visitor, raw_entries, id_rw, val_rw),
502 }
503 }
504}
505
506#[derive(Debug)]
507pub struct RootL1Node {
508 n_elems: usize,
510 sub_tree: SubTree,
511 rightmost_subtree: Box<Root>,
512}
513
514impl RootL1Node {
515 fn new(n_elems: usize, sub_tree: SubTree, rightmost_subtree: Root) -> RootL1Node {
516 RootL1Node {
517 n_elems,
518 sub_tree,
519 rightmost_subtree: Box::new(rightmost_subtree),
520 }
521 }
522}
523
524impl HasByteSize for RootL1Node {
525 fn byte_size(&self, entry_byte_size: usize) -> usize {
526 self.n_elems * entry_byte_size
527 + self.n_elems * self.sub_tree.byte_size(entry_byte_size)
528 + self.rightmost_subtree.byte_size(entry_byte_size)
529 }
530}
531
532impl SubTreeW for RootL1Node {
533 fn write<I, V, IRW, VRW, T>(
534 &self,
535 mut it: T,
536 id_rw: &IRW,
537 val_rw: &VRW,
538 dest: &mut [u8],
539 ) -> Result<T, Error>
540 where
541 I: Id,
542 V: Val,
543 IRW: ReadWrite<Type = I>,
544 VRW: ReadWrite<Type = V>,
545 T: Iterator<Item = Entry<I, V>>,
546 {
547 let entry_byte_size = id_rw.n_bytes() + val_rw.n_bytes();
548 assert_eq!(
549 self.byte_size(entry_byte_size),
550 dest.len(),
551 "Wrong byte size: {} != {}",
552 self.byte_size(entry_byte_size),
553 dest.len()
554 );
555 let subtree_byte_size = self.sub_tree.byte_size(entry_byte_size);
557 let (mut l1_buff, r_buff) = dest.split_at_mut(self.n_elems * entry_byte_size);
558 let (mut st_buff, r_buff) = r_buff.split_at_mut(self.n_elems * subtree_byte_size);
559 for _ in 0..self.n_elems {
560 let (curr_buff, subtree_buff) = st_buff.split_at_mut(subtree_byte_size);
561 it = self.sub_tree.write(it, id_rw, val_rw, curr_buff)?;
562 st_buff = subtree_buff;
563 it.next()
565 .ok_or_else(|| Error::new(ErrorKind::Other, "Iterator depleted!"))?
566 .write(&mut l1_buff, id_rw, val_rw)?;
567 }
568 it = self.rightmost_subtree.write(it, id_rw, val_rw, r_buff)?;
570 assert_eq!(st_buff.len(), 0);
571 Ok(it)
572 }
573}
574
575impl SubTreeR for RootL1Node {
576 fn get<I, V, IRW, VRW>(
577 &self,
578 value: V,
579 raw_entries: &[u8],
580 id_rw: &IRW,
581 val_rw: &VRW,
582 ) -> Result<Option<Entry<I, V>>, Error>
583 where
584 I: Id,
585 V: Val,
586 IRW: ReadWrite<Type = I>,
587 VRW: ReadWrite<Type = V>,
588 {
589 let entry_byte_size = id_rw.n_bytes() + val_rw.n_bytes();
590 assert_eq!(
591 self.byte_size(entry_byte_size),
592 raw_entries.len(),
593 "Wrong byte size: {} != {}",
594 self.byte_size(entry_byte_size),
595 raw_entries.len()
596 );
597 let subtree_byte_size = self.sub_tree.byte_size(entry_byte_size);
599 let (l1_buff, r_buff) = raw_entries.split_at(self.n_elems * entry_byte_size);
600 let mut l1_entries = RawEntries::new(l1_buff, id_rw, val_rw);
601 match l1_entries.binary_search(&value)? {
602 Ok(i) => Ok(Some(l1_entries.get_entry(i)?)),
603 Err(i) => {
604 if i == self.n_elems {
605 self
606 .rightmost_subtree
607 .get(value, &r_buff[i * subtree_byte_size..], id_rw, val_rw)
608 } else {
609 let from = i * subtree_byte_size;
610 let to = from + subtree_byte_size;
611 self.sub_tree.get(value, &r_buff[from..to], id_rw, val_rw)
612 }
613 }
614 }
615 }
616
617 fn visit_desc<I, V, IRW, VRW, T>(
618 &self,
619 mut _visitor: T,
620 _raw_entries: &[u8],
621 _id_rw: &IRW,
622 _val_rw: &VRW,
623 ) -> Result<T, Error>
624 where
625 I: Id,
626 V: Val,
627 IRW: ReadWrite<Type = I>,
628 VRW: ReadWrite<Type = V>,
629 T: Visitor<I = I, V = V>,
630 {
631 unreachable!() }
633
634 fn visit<I, V, IRW, VRW, T>(
635 &self,
636 mut visitor: T,
637 raw_entries: &[u8],
638 id_rw: &IRW,
639 val_rw: &VRW,
640 ) -> Result<T, Error>
641 where
642 I: Id,
643 V: Val,
644 IRW: ReadWrite<Type = I>,
645 VRW: ReadWrite<Type = V>,
646 T: Visitor<I = I, V = V>,
647 {
648 debug_assert!(!raw_entries.is_empty());
649 let entry_byte_size = id_rw.n_bytes() + val_rw.n_bytes();
650 assert_eq!(
651 self.byte_size(entry_byte_size),
652 raw_entries.len(),
653 "Wrong byte size: {} != {}",
654 self.byte_size(entry_byte_size),
655 raw_entries.len()
656 );
657 let subtree_byte_size = self.sub_tree.byte_size(entry_byte_size);
659 let (l1_buff, r_buff) = raw_entries.split_at(self.n_elems * entry_byte_size);
660 let mut l1_entries = RawEntries::new(l1_buff, id_rw, val_rw);
661 let (mut l, mut r) = match l1_entries.binary_search(visitor.center())? {
662 Ok(i) => {
663 visitor.visit_center(l1_entries.get_entry(i)?);
664 if visitor.visit_desc() {
665 let from = i * subtree_byte_size;
666 let to = from + subtree_byte_size;
667 visitor = self
668 .sub_tree
669 .visit_desc(visitor, &r_buff[from..to], id_rw, val_rw)?;
670 }
671 if visitor.visit_asc() {
672 if i < self.n_elems {
673 let from = (i + 1) * subtree_byte_size;
674 let to = from + subtree_byte_size;
675 visitor = self
676 .sub_tree
677 .visit_asc(visitor, &r_buff[from..to], id_rw, val_rw)?;
678 } else {
679 visitor = self.rightmost_subtree.visit_asc(
680 visitor,
681 &r_buff[i * subtree_byte_size..],
682 id_rw,
683 val_rw,
684 )?;
685 }
686 }
687 (i as i32 - 1, i + 1)
688 }
689 Err(i) => {
690 if i < self.n_elems {
691 let from = i * subtree_byte_size;
692 let to = from + subtree_byte_size;
693 visitor = self
694 .sub_tree
695 .visit(visitor, &r_buff[from..to], id_rw, val_rw)?;
696 } else {
697 debug_assert_eq!(i, self.n_elems);
698 visitor = self.rightmost_subtree.visit(
699 visitor,
700 &r_buff[i * subtree_byte_size..],
701 id_rw,
702 val_rw,
703 )?;
704 }
705 (i as i32 - 1, i)
706 }
707 };
708 while l >= 0 {
709 if !visitor.visit_desc() {
710 break;
711 }
712 visitor.visit_le_center(l1_entries.get_entry(l as usize)?);
713 if !visitor.visit_desc() {
714 break;
715 }
716 let from = l as usize * subtree_byte_size;
717 let to = from + subtree_byte_size;
718 visitor = self
719 .sub_tree
720 .visit_desc(visitor, &r_buff[from..to], id_rw, val_rw)?;
721 l -= 1;
722 }
723 while r < self.n_elems {
724 if !visitor.visit_asc() {
725 break;
726 }
727 visitor.visit_he_center(l1_entries.get_entry(r)?);
728 if !visitor.visit_asc() {
729 break;
730 }
731 r += 1;
732 if r < self.n_elems {
733 let from = (r + 1) * subtree_byte_size;
734 let to = from + subtree_byte_size;
735 visitor = self
736 .sub_tree
737 .visit_asc(visitor, &r_buff[from..to], id_rw, val_rw)?;
738 } else {
739 visitor = self.rightmost_subtree.visit_asc(
740 visitor,
741 &r_buff[r * subtree_byte_size..],
742 id_rw,
743 val_rw,
744 )?;
745 }
746 }
747 Ok(visitor)
748 }
749
750 fn visit_asc<I, V, IRW, VRW, T>(
751 &self,
752 mut _visitor: T,
753 _raw_entries: &[u8],
754 _id_rw: &IRW,
755 _val_rw: &VRW,
756 ) -> Result<T, Error>
757 where
758 I: Id,
759 V: Val,
760 IRW: ReadWrite<Type = I>,
761 VRW: ReadWrite<Type = V>,
762 T: Visitor<I = I, V = V>,
763 {
764 unreachable!() }
766}
767
768#[derive(Debug)]
769pub struct RootLDNode {
770 n_elems: usize,
771 n_l1page_elems: usize,
772 sub_tree: LDSubTree,
773 rightmost_subtree: Box<Root>,
774}
775
776impl RootLDNode {
777 fn new(
778 n_elems: usize,
779 n_l1page_elems: usize,
780 sub_tree: LDSubTree,
781 rightmost_subtree: Root,
782 ) -> RootLDNode {
783 RootLDNode {
784 n_elems,
785 n_l1page_elems,
786 sub_tree,
787 rightmost_subtree: Box::new(rightmost_subtree),
788 }
789 }
790}
791
792impl HasByteSize for RootLDNode {
793 fn byte_size(&self, entry_byte_size: usize) -> usize {
794 (self.n_elems + self.n_elems * self.n_l1page_elems) * entry_byte_size
795 + (self.n_elems * (self.n_l1page_elems + 1)) * self.sub_tree.byte_size(entry_byte_size)
796 + self.rightmost_subtree.byte_size(entry_byte_size)
797 }
798}
799
800impl SubTreeW for RootLDNode {
801 fn write<I, V, IRW, VRW, T>(
802 &self,
803 mut it: T,
804 id_rw: &IRW,
805 val_rw: &VRW,
806 dest: &mut [u8],
807 ) -> Result<T, Error>
808 where
809 I: Id,
810 V: Val,
811 IRW: ReadWrite<Type = I>,
812 VRW: ReadWrite<Type = V>,
813 T: Iterator<Item = Entry<I, V>>,
814 {
815 let entry_byte_size = id_rw.n_bytes() + val_rw.n_bytes();
816 assert_eq!(
817 self.byte_size(entry_byte_size),
818 dest.len(),
819 "Wrong byte size: {} != {}",
820 self.byte_size(entry_byte_size),
821 dest.len()
822 );
823 let l1page_byte_size = self.n_l1page_elems * entry_byte_size;
825 let subtree_group_byte_size =
826 (self.n_l1page_elems + 1) * self.sub_tree.byte_size(entry_byte_size);
827 let (mut ld_buff, r_buff) = dest.split_at_mut(self.n_elems * entry_byte_size);
829 let (mut l1_buff, r_buff) = r_buff.split_at_mut(self.n_elems * l1page_byte_size);
830 let (mut st_buff, r_buff) = r_buff.split_at_mut(self.n_elems * subtree_group_byte_size);
831 assert_eq!(
832 r_buff.len(),
833 self.rightmost_subtree.byte_size(entry_byte_size)
834 );
835 for _ in 0..self.n_elems {
836 let (cl1_buff, tl1_buff) = l1_buff.split_at_mut(l1page_byte_size);
838 let (cst_buff, tst_buff) = st_buff.split_at_mut(subtree_group_byte_size);
839 it = write_l1page(it, id_rw, val_rw, cl1_buff, &self.sub_tree, cst_buff)?;
840 l1_buff = tl1_buff;
841 st_buff = tst_buff;
842 it.next()
844 .ok_or_else(|| Error::new(ErrorKind::Other, "Iterator depleted!"))?
845 .write(&mut ld_buff, id_rw, val_rw)?;
846 }
847 it = self.rightmost_subtree.write(it, id_rw, val_rw, r_buff)?;
849 assert_eq!(l1_buff.len(), 0, "Wrong L1 buff size: {}", l1_buff.len());
850 assert_eq!(st_buff.len(), 0, "Wrong ST buff size: {}", st_buff.len());
851 Ok(it)
852 }
853}
854
855impl SubTreeR for RootLDNode {
856 fn get<I, V, IRW, VRW>(
857 &self,
858 value: V,
859 raw_entries: &[u8],
860 id_rw: &IRW,
861 val_rw: &VRW,
862 ) -> Result<Option<Entry<I, V>>, Error>
863 where
864 I: Id,
865 V: Val,
866 IRW: ReadWrite<Type = I>,
867 VRW: ReadWrite<Type = V>,
868 {
869 let entry_byte_size = id_rw.n_bytes() + val_rw.n_bytes();
870 assert_eq!(self.byte_size(entry_byte_size), raw_entries.len());
871 let l1page_byte_size = self.n_l1page_elems * entry_byte_size;
873 let subtree_group_byte_size =
874 (self.n_l1page_elems + 1) * self.sub_tree.byte_size(entry_byte_size);
875 let (ld_buff, r_buff) = raw_entries.split_at(self.n_elems * entry_byte_size);
877 let mut entries = RawEntries::new(ld_buff, id_rw, val_rw);
878 match entries.binary_search(&value)? {
879 Ok(i) => Ok(Some(entries.get_entry(i)?)),
880 Err(i) => {
881 if i == self.n_elems {
882 let limit = self.n_elems * (l1page_byte_size + subtree_group_byte_size);
883 let (_, r_buff) = r_buff.split_at(limit);
884 assert_eq!(
885 r_buff.len(),
886 self.rightmost_subtree.byte_size(entry_byte_size)
887 );
888 self.rightmost_subtree.get(value, r_buff, id_rw, val_rw)
889 } else {
890 let (l1_buff, st_buff) = r_buff.split_at(self.n_elems * l1page_byte_size);
891 let from_l1 = i * l1page_byte_size;
892 let to_l1 = from_l1 + l1page_byte_size;
893 let from_st = i * subtree_group_byte_size;
894 let to_st = from_st + subtree_group_byte_size;
895 get_l1page(
896 value,
897 id_rw,
898 val_rw,
899 &l1_buff[from_l1..to_l1],
900 &self.sub_tree,
901 &st_buff[from_st..to_st],
902 )
903 }
904 }
905 }
906 }
907
908 fn visit_desc<I, V, IRW, VRW, T>(
909 &self,
910 _visitor: T,
911 _raw_entries: &[u8],
912 _id_rw: &IRW,
913 _val_rw: &VRW,
914 ) -> Result<T, Error>
915 where
916 I: Id,
917 V: Val,
918 IRW: ReadWrite<Type = I>,
919 VRW: ReadWrite<Type = V>,
920 T: Visitor<I = I, V = V>,
921 {
922 unreachable!() }
924
925 fn visit<I, V, IRW, VRW, T>(
926 &self,
927 mut visitor: T,
928 raw_entries: &[u8],
929 id_rw: &IRW,
930 val_rw: &VRW,
931 ) -> Result<T, Error>
932 where
933 I: Id,
934 V: Val,
935 IRW: ReadWrite<Type = I>,
936 VRW: ReadWrite<Type = V>,
937 T: Visitor<I = I, V = V>,
938 {
939 let entry_byte_size = id_rw.n_bytes() + val_rw.n_bytes();
940 assert_eq!(self.byte_size(entry_byte_size), raw_entries.len());
941 let l1page_byte_size = self.n_l1page_elems * entry_byte_size;
943 let subtree_group_byte_size =
944 (self.n_l1page_elems + 1) * self.sub_tree.byte_size(entry_byte_size);
945 let (ld_buff, r_buff) = raw_entries.split_at(self.n_elems * entry_byte_size);
947 let (l1_buff, r_buff) = r_buff.split_at(self.n_elems * l1page_byte_size);
948 let (st_buff, r_buff) = r_buff.split_at(self.n_elems * subtree_group_byte_size);
949 let mut entries = RawEntries::new(ld_buff, id_rw, val_rw);
950 let (mut l, mut r) = match entries.binary_search(visitor.center())? {
951 Ok(i) => {
952 visitor.visit_center(entries.get_entry(i)?);
953 if visitor.visit_desc() {
954 let from_l1 = i * l1page_byte_size;
955 let to_l1 = from_l1 + l1page_byte_size;
956 let from_st = i * subtree_group_byte_size;
957 let to_st = from_st + subtree_group_byte_size;
958 visitor = visit_desc_l1page(
959 visitor,
960 id_rw,
961 val_rw,
962 &l1_buff[from_l1..to_l1],
963 &self.sub_tree,
964 &st_buff[from_st..to_st],
965 )?;
966 }
967 if visitor.visit_asc() {
968 if i < self.n_elems {
969 let from_l1 = (i + 1) * l1page_byte_size;
970 let to_l1 = from_l1 + l1page_byte_size;
971 let from_st = (i + 1) * subtree_group_byte_size;
972 let to_st = from_st + subtree_group_byte_size;
973 visitor = visit_asc_l1page(
974 visitor,
975 id_rw,
976 val_rw,
977 &l1_buff[from_l1..to_l1],
978 &self.sub_tree,
979 &st_buff[from_st..to_st],
980 )?;
981 } else {
982 visitor = self
983 .rightmost_subtree
984 .visit_asc(visitor, r_buff, id_rw, val_rw)?;
985 }
986 }
987 (i as i32 - 1, i + 1)
988 }
989 Err(i) => {
990 if i < self.n_elems {
991 let from_l1 = i * l1page_byte_size;
992 let to_l1 = from_l1 + l1page_byte_size;
993 let from_st = i * subtree_group_byte_size;
994 let to_st = from_st + subtree_group_byte_size;
995 visitor = visit_l1page(
996 visitor,
997 id_rw,
998 val_rw,
999 &l1_buff[from_l1..to_l1],
1000 &self.sub_tree,
1001 &st_buff[from_st..to_st],
1002 )?;
1003 } else {
1004 visitor = self
1005 .rightmost_subtree
1006 .visit(visitor, r_buff, id_rw, val_rw)?;
1007 }
1008 (i as i32 - 1, i)
1009 }
1010 };
1011 while l >= 0 {
1012 if !visitor.visit_desc() {
1013 break;
1014 }
1015 visitor.visit_le_center(entries.get_entry(l as usize)?);
1016 if !visitor.visit_desc() {
1017 break;
1018 }
1019 let from_l1 = l as usize * l1page_byte_size;
1020 let to_l1 = from_l1 + l1page_byte_size;
1021 let from_st = l as usize * subtree_group_byte_size;
1022 let to_st = from_st + subtree_group_byte_size;
1023 visitor = visit_desc_l1page(
1024 visitor,
1025 id_rw,
1026 val_rw,
1027 &l1_buff[from_l1..to_l1],
1028 &self.sub_tree,
1029 &st_buff[from_st..to_st],
1030 )?;
1031 l -= 1;
1032 }
1033 while r < self.n_elems {
1034 if !visitor.visit_asc() {
1035 break;
1036 }
1037 visitor.visit_he_center(entries.get_entry(r)?);
1038 if !visitor.visit_asc() {
1039 break;
1040 }
1041 r += 1;
1042 if r < self.n_elems {
1043 let from_l1 = r * l1page_byte_size;
1044 let to_l1 = from_l1 + l1page_byte_size;
1045 let from_st = r * subtree_group_byte_size;
1046 let to_st = from_st + subtree_group_byte_size;
1047 visitor = visit_asc_l1page(
1048 visitor,
1049 id_rw,
1050 val_rw,
1051 &l1_buff[from_l1..to_l1],
1052 &self.sub_tree,
1053 &st_buff[from_st..to_st],
1054 )?;
1055 } else {
1056 visitor = self
1057 .rightmost_subtree
1058 .visit_asc(visitor, r_buff, id_rw, val_rw)?;
1059 }
1060 }
1061 Ok(visitor)
1062 }
1063
1064 fn visit_asc<I, V, IRW, VRW, T>(
1065 &self,
1066 _visitor: T,
1067 _raw_entries: &[u8],
1068 _id_rw: &IRW,
1069 _val_rw: &VRW,
1070 ) -> Result<T, Error>
1071 where
1072 I: Id,
1073 V: Val,
1074 IRW: ReadWrite<Type = I>,
1075 VRW: ReadWrite<Type = V>,
1076 T: Visitor<I = I, V = V>,
1077 {
1078 unreachable!() }
1080}
1081
1082#[derive(Debug)]
1083pub struct L1Leaf {
1084 n_elems: usize,
1085}
1086
1087impl L1Leaf {
1088 fn new(n_elems: usize) -> L1Leaf {
1089 L1Leaf { n_elems }
1090 }
1091}
1092
1093impl HasByteSize for L1Leaf {
1094 fn byte_size(&self, entry_byte_size: usize) -> usize {
1095 self.n_elems * entry_byte_size
1096 }
1097}
1098
1099impl SubTreeW for L1Leaf {
1100 fn write<I, V, IRW, VRW, T>(
1101 &self,
1102 mut it: T,
1103 id_rw: &IRW,
1104 val_rw: &VRW,
1105 mut dest: &mut [u8],
1106 ) -> Result<T, Error>
1107 where
1108 I: Id,
1109 V: Val,
1110 IRW: ReadWrite<Type = I>,
1111 VRW: ReadWrite<Type = V>,
1112 T: Iterator<Item = Entry<I, V>>,
1113 {
1114 let entry_byte_size = id_rw.n_bytes() + val_rw.n_bytes();
1115 assert_eq!(
1116 self.byte_size(entry_byte_size),
1117 dest.len(),
1118 "Wrong byte size: {} != {}",
1119 self.byte_size(entry_byte_size),
1120 dest.len()
1121 );
1122 for _ in 0..self.n_elems {
1123 it.next()
1124 .ok_or_else(|| Error::new(ErrorKind::Other, "Iterator depleted!"))?
1125 .write(&mut dest, id_rw, val_rw)?;
1126 }
1127 assert_eq!(dest.len(), 0);
1128 Ok(it)
1129 }
1130}
1131
1132impl SubTreeR for L1Leaf {
1133 fn get<I, V, IRW, VRW>(
1134 &self,
1135 val: V,
1136 raw_entries: &[u8],
1137 id_rw: &IRW,
1138 val_rw: &VRW,
1139 ) -> Result<Option<Entry<I, V>>, Error>
1140 where
1141 I: Id,
1142 V: Val,
1143 IRW: ReadWrite<Type = I>,
1144 VRW: ReadWrite<Type = V>,
1145 {
1146 debug_assert_eq!(
1147 self.byte_size(id_rw.n_bytes() + val_rw.n_bytes()),
1148 raw_entries.len()
1149 );
1150 let mut entries = RawEntries::new(raw_entries, id_rw, val_rw);
1151 entries
1152 .binary_search(&val)?
1153 .ok()
1154 .map(|i| entries.get_entry(i))
1155 .transpose()
1156 }
1157
1158 fn visit_desc<I, V, IRW, VRW, T>(
1159 &self,
1160 mut visitor: T,
1161 raw_entries: &[u8],
1162 id_rw: &IRW,
1163 val_rw: &VRW,
1164 ) -> Result<T, Error>
1165 where
1166 I: Id,
1167 V: Val,
1168 IRW: ReadWrite<Type = I>,
1169 VRW: ReadWrite<Type = V>,
1170 T: Visitor<I = I, V = V>,
1171 {
1172 debug_assert_eq!(
1173 self.byte_size(id_rw.n_bytes() + val_rw.n_bytes()),
1174 raw_entries.len()
1175 );
1176 debug_assert!(visitor.visit_desc());
1177 let mut entries = RawEntries::new(raw_entries, id_rw, val_rw);
1178 for i in (0..self.n_elems).rev() {
1179 visitor.visit_le_center(entries.get_entry(i)?);
1180 if !visitor.visit_desc() {
1181 return Ok(visitor);
1182 }
1183 }
1184 Ok(visitor)
1185 }
1186
1187 fn visit<I, V, IRW, VRW, T>(
1188 &self,
1189 mut visitor: T,
1190 raw_entries: &[u8],
1191 id_rw: &IRW,
1192 val_rw: &VRW,
1193 ) -> Result<T, Error>
1194 where
1195 I: Id,
1196 V: Val,
1197 IRW: ReadWrite<Type = I>,
1198 VRW: ReadWrite<Type = V>,
1199 T: Visitor<I = I, V = V>,
1200 {
1201 debug_assert_eq!(
1202 self.byte_size(id_rw.n_bytes() + val_rw.n_bytes()),
1203 raw_entries.len()
1204 );
1205 let mut entries = RawEntries::new(raw_entries, id_rw, val_rw);
1206 let (mut l, mut r) = match entries.binary_search(visitor.center())? {
1207 Ok(i) => {
1208 visitor.visit_center(entries.get_entry(i)?);
1209 (i as i32 - 1, i + 1)
1210 }
1211 Err(i) => (i as i32 - 1, i),
1212 };
1213 while l >= 0 && visitor.visit_desc() {
1215 visitor.visit_le_center(entries.get_entry(l as usize)?);
1216 l -= 1;
1217 }
1218 while r < self.n_elems && visitor.visit_asc() {
1220 visitor.visit_he_center(entries.get_entry(r)?);
1221 r += 1;
1222 }
1223 Ok(visitor)
1224 }
1225
1226 fn visit_asc<I, V, IRW, VRW, T>(
1227 &self,
1228 mut visitor: T,
1229 raw_entries: &[u8],
1230 id_rw: &IRW,
1231 val_rw: &VRW,
1232 ) -> Result<T, Error>
1233 where
1234 I: Id,
1235 V: Val,
1236 IRW: ReadWrite<Type = I>,
1237 VRW: ReadWrite<Type = V>,
1238 T: Visitor<I = I, V = V>,
1239 {
1240 debug_assert_eq!(
1241 self.byte_size(id_rw.n_bytes() + val_rw.n_bytes()),
1242 raw_entries.len()
1243 );
1244 debug_assert!(visitor.visit_asc());
1245 let mut entries = RawEntries::new(raw_entries, id_rw, val_rw);
1246 for i in 0..self.n_elems {
1247 visitor.visit_he_center(entries.get_entry(i)?);
1248 if !visitor.visit_asc() {
1249 return Ok(visitor);
1250 }
1251 }
1252 Ok(visitor)
1253 }
1254}
1255
1256#[derive(Debug)]
1257pub struct L1Node {
1258 n_elems: usize,
1260 sub_tree: Box<SubTree>, }
1262
1263impl L1Node {
1264 fn new(n_elems: usize, sub_tree: SubTree) -> L1Node {
1265 L1Node {
1266 n_elems,
1267 sub_tree: Box::new(sub_tree),
1268 }
1269 }
1270}
1271
1272impl HasByteSize for L1Node {
1273 fn byte_size(&self, entry_byte_size: usize) -> usize {
1274 self.n_elems * entry_byte_size + (self.n_elems + 1) * self.sub_tree.byte_size(entry_byte_size)
1275 }
1276}
1277
1278impl SubTreeW for L1Node {
1279 fn write<I, V, IRW, VRW, T>(
1280 &self,
1281 mut it: T,
1282 id_rw: &IRW,
1283 val_rw: &VRW,
1284 dest: &mut [u8],
1285 ) -> Result<T, Error>
1286 where
1287 I: Id,
1288 V: Val,
1289 IRW: ReadWrite<Type = I>,
1290 VRW: ReadWrite<Type = V>,
1291 T: Iterator<Item = Entry<I, V>>,
1292 {
1293 let entry_byte_size = id_rw.n_bytes() + val_rw.n_bytes();
1294 assert_eq!(
1295 self.byte_size(entry_byte_size),
1296 dest.len(),
1297 "Wrong buffer size"
1298 );
1299 let (l1_buff, st_buff) = dest.split_at_mut(self.n_elems * entry_byte_size);
1300 it = write_l1page(it, id_rw, val_rw, l1_buff, self.sub_tree.as_ref(), st_buff)?;
1301 Ok(it)
1302 }
1303}
1304
1305impl SubTreeR for L1Node {
1306 fn get<I, V, IRW, VRW>(
1307 &self,
1308 val: V,
1309 raw_entries: &[u8],
1310 id_rw: &IRW,
1311 val_rw: &VRW,
1312 ) -> Result<Option<Entry<I, V>>, Error>
1313 where
1314 I: Id,
1315 V: Val,
1316 IRW: ReadWrite<Type = I>,
1317 VRW: ReadWrite<Type = V>,
1318 {
1319 let entry_byte_size = id_rw.n_bytes() + val_rw.n_bytes();
1320 debug_assert_eq!(self.byte_size(entry_byte_size), raw_entries.len());
1321 let (l1_buff, st_buff) = raw_entries.split_at(self.n_elems * entry_byte_size);
1322 get_l1page(val, id_rw, val_rw, l1_buff, self.sub_tree.as_ref(), st_buff)
1323 }
1324
1325 fn visit_desc<I, V, IRW, VRW, T>(
1326 &self,
1327 visitor: T,
1328 raw_entries: &[u8],
1329 id_rw: &IRW,
1330 val_rw: &VRW,
1331 ) -> Result<T, Error>
1332 where
1333 I: Id,
1334 V: Val,
1335 IRW: ReadWrite<Type = I>,
1336 VRW: ReadWrite<Type = V>,
1337 T: Visitor<I = I, V = V>,
1338 {
1339 debug_assert!(visitor.visit_desc());
1340 let entry_byte_size = id_rw.n_bytes() + val_rw.n_bytes();
1341 debug_assert_eq!(self.byte_size(entry_byte_size), raw_entries.len());
1342 let (l1_buff, st_buff) = raw_entries.split_at(self.n_elems * entry_byte_size);
1343 visit_desc_l1page(
1344 visitor,
1345 id_rw,
1346 val_rw,
1347 l1_buff,
1348 self.sub_tree.as_ref(),
1349 st_buff,
1350 )
1351 }
1352
1353 fn visit<I, V, IRW, VRW, T>(
1354 &self,
1355 visitor: T,
1356 raw_entries: &[u8],
1357 id_rw: &IRW,
1358 val_rw: &VRW,
1359 ) -> Result<T, Error>
1360 where
1361 I: Id,
1362 V: Val,
1363 IRW: ReadWrite<Type = I>,
1364 VRW: ReadWrite<Type = V>,
1365 T: Visitor<I = I, V = V>,
1366 {
1367 let entry_byte_size = id_rw.n_bytes() + val_rw.n_bytes();
1368 debug_assert_eq!(self.byte_size(entry_byte_size), raw_entries.len());
1369 let (l1_buff, st_buff) = raw_entries.split_at(self.n_elems * entry_byte_size);
1370 visit_l1page(
1371 visitor,
1372 id_rw,
1373 val_rw,
1374 l1_buff,
1375 self.sub_tree.as_ref(),
1376 st_buff,
1377 )
1378 }
1379
1380 fn visit_asc<I, V, IRW, VRW, T>(
1381 &self,
1382 visitor: T,
1383 raw_entries: &[u8],
1384 id_rw: &IRW,
1385 val_rw: &VRW,
1386 ) -> Result<T, Error>
1387 where
1388 I: Id,
1389 V: Val,
1390 IRW: ReadWrite<Type = I>,
1391 VRW: ReadWrite<Type = V>,
1392 T: Visitor<I = I, V = V>,
1393 {
1394 debug_assert!(visitor.visit_asc());
1395 let entry_byte_size = id_rw.n_bytes() + val_rw.n_bytes();
1396 debug_assert_eq!(self.byte_size(entry_byte_size), raw_entries.len());
1397 let (l1_buff, st_buff) = raw_entries.split_at(self.n_elems * entry_byte_size);
1398 visit_asc_l1page(
1399 visitor,
1400 id_rw,
1401 val_rw,
1402 l1_buff,
1403 self.sub_tree.as_ref(),
1404 st_buff,
1405 )
1406 }
1407}
1408
1409#[derive(Debug)]
1410pub struct LDNode {
1411 n_elems: usize,
1412 n_l1page_elems: usize,
1413 sub_tree: Box<LDSubTree>,
1414}
1415
1416impl LDNode {
1417 fn new(n_elems: usize, n_l1page_elems: usize, sub_tree: LDSubTree) -> LDNode {
1418 LDNode {
1419 n_elems,
1420 n_l1page_elems,
1421 sub_tree: Box::new(sub_tree),
1422 }
1423 }
1424}
1425
1426impl HasByteSize for LDNode {
1427 fn byte_size(&self, entry_byte_size: usize) -> usize {
1428 self.n_elems * entry_byte_size
1429 + (self.n_elems + 1) * self.n_l1page_elems * entry_byte_size
1430 + (self.n_elems + 1) * (self.n_l1page_elems + 1) * self.sub_tree.byte_size(entry_byte_size)
1431 }
1432}
1433
1434impl SubTreeW for LDNode {
1435 fn write<I, V, IRW, VRW, T>(
1436 &self,
1437 mut it: T,
1438 id_rw: &IRW,
1439 val_rw: &VRW,
1440 dest: &mut [u8],
1441 ) -> Result<T, Error>
1442 where
1443 I: Id,
1444 V: Val,
1445 IRW: ReadWrite<Type = I>,
1446 VRW: ReadWrite<Type = V>,
1447 T: Iterator<Item = Entry<I, V>>,
1448 {
1449 let entry_byte_size = id_rw.n_bytes() + val_rw.n_bytes();
1450 assert_eq!(
1451 self.byte_size(entry_byte_size),
1452 dest.len(),
1453 "Wrong byte size: {} != {}",
1454 self.byte_size(entry_byte_size),
1455 dest.len()
1456 );
1457 let l1page_byte_size = self.n_l1page_elems * entry_byte_size;
1459 let subtree_group_byte_size =
1460 (self.n_l1page_elems + 1) * self.sub_tree.byte_size(entry_byte_size);
1461 let (mut ld_buff, st_buff) = dest.split_at_mut(self.n_elems * entry_byte_size);
1462 let (mut l1_buff, mut st_buff) = st_buff.split_at_mut((self.n_elems + 1) * l1page_byte_size);
1463 assert_eq!(st_buff.len(), (self.n_elems + 1) * subtree_group_byte_size);
1464 for _ in 0..self.n_elems {
1465 let (cl1_buff, tl1_buff) = l1_buff.split_at_mut(l1page_byte_size);
1467 let (cst_buff, tst_buff) = st_buff.split_at_mut(subtree_group_byte_size);
1468 it = write_l1page(
1469 it,
1470 id_rw,
1471 val_rw,
1472 cl1_buff,
1473 self.sub_tree.as_ref(),
1474 cst_buff,
1475 )?;
1476 l1_buff = tl1_buff;
1477 st_buff = tst_buff;
1478 it.next()
1480 .ok_or_else(|| Error::new(ErrorKind::Other, "Iterator depleted!"))?
1481 .write(&mut ld_buff, id_rw, val_rw)?;
1482 }
1483 it = write_l1page(it, id_rw, val_rw, l1_buff, self.sub_tree.as_ref(), st_buff)?;
1485 assert_eq!(ld_buff.len(), 0);
1486 Ok(it)
1487 }
1488}
1489
1490impl SubTreeR for LDNode {
1491 fn get<I, V, IRW, VRW>(
1492 &self,
1493 val: V,
1494 raw_entries: &[u8],
1495 id_rw: &IRW,
1496 val_rw: &VRW,
1497 ) -> Result<Option<Entry<I, V>>, Error>
1498 where
1499 I: Id,
1500 V: Val,
1501 IRW: ReadWrite<Type = I>,
1502 VRW: ReadWrite<Type = V>,
1503 {
1504 let entry_byte_size = id_rw.n_bytes() + val_rw.n_bytes();
1505 assert_eq!(self.byte_size(entry_byte_size), raw_entries.len());
1506 let l1page_byte_size = self.n_l1page_elems * entry_byte_size;
1508 let subtree_group_byte_size =
1509 (self.n_l1page_elems + 1) * self.sub_tree.byte_size(entry_byte_size);
1510 let (ld_buff, st_buff) = raw_entries.split_at(self.n_elems * entry_byte_size);
1511 let mut entries = RawEntries::new(ld_buff, id_rw, val_rw);
1512 match entries.binary_search(&val)? {
1513 Ok(i) => Ok(Some(entries.get_entry(i)?)),
1514 Err(i) => {
1515 let (l1_buff, st_buff) = st_buff.split_at((self.n_elems + 1) * l1page_byte_size);
1516 let from_l1 = i * l1page_byte_size;
1517 let to_l1 = from_l1 + l1page_byte_size;
1518 let from_st = i * subtree_group_byte_size;
1519 let to_st = from_st + subtree_group_byte_size;
1520 get_l1page(
1521 val,
1522 id_rw,
1523 val_rw,
1524 &l1_buff[from_l1..to_l1],
1525 self.sub_tree.as_ref(),
1526 &st_buff[from_st..to_st],
1527 )
1528 }
1529 }
1530 }
1531
1532 fn visit_desc<I, V, IRW, VRW, T>(
1533 &self,
1534 mut visitor: T,
1535 raw_entries: &[u8],
1536 id_rw: &IRW,
1537 val_rw: &VRW,
1538 ) -> Result<T, Error>
1539 where
1540 I: Id,
1541 V: Val,
1542 IRW: ReadWrite<Type = I>,
1543 VRW: ReadWrite<Type = V>,
1544 T: Visitor<I = I, V = V>,
1545 {
1546 let entry_byte_size = id_rw.n_bytes() + val_rw.n_bytes();
1547 assert_eq!(self.byte_size(entry_byte_size), raw_entries.len());
1548 let l1page_byte_size = self.n_l1page_elems * entry_byte_size;
1550 let subtree_group_byte_size =
1551 (self.n_l1page_elems + 1) * self.sub_tree.byte_size(entry_byte_size);
1552 let (_ld_buff, st_buff) = raw_entries.split_at(self.n_elems * entry_byte_size);
1553 let (l1_buff, st_buff) = st_buff.split_at((self.n_elems + 1) * l1page_byte_size);
1554 let from_l1 = self.n_elems * l1page_byte_size;
1557 let to_l1 = from_l1 + l1page_byte_size;
1558 let from_st = self.n_elems * subtree_group_byte_size;
1559 let to_st = from_st + subtree_group_byte_size;
1560 visitor = visit_desc_l1page(
1561 visitor,
1562 id_rw,
1563 val_rw,
1564 &l1_buff[from_l1..to_l1],
1565 self.sub_tree.as_ref(),
1566 &st_buff[from_st..to_st],
1567 )?;
1568 for i in (0..self.n_elems).rev() {
1569 let from_l1 = i * l1page_byte_size;
1570 let to_l1 = from_l1 + l1page_byte_size;
1571 let from_st = i * subtree_group_byte_size;
1572 let to_st = from_st + subtree_group_byte_size;
1573 visitor = visit_desc_l1page(
1574 visitor,
1575 id_rw,
1576 val_rw,
1577 &l1_buff[from_l1..to_l1],
1578 self.sub_tree.as_ref(),
1579 &st_buff[from_st..to_st],
1580 )?;
1581 }
1582 Ok(visitor)
1583 }
1584 fn visit<I, V, IRW, VRW, T>(
1585 &self,
1586 mut visitor: T,
1587 raw_entries: &[u8],
1588 id_rw: &IRW,
1589 val_rw: &VRW,
1590 ) -> Result<T, Error>
1591 where
1592 I: Id,
1593 V: Val,
1594 IRW: ReadWrite<Type = I>,
1595 VRW: ReadWrite<Type = V>,
1596 T: Visitor<I = I, V = V>,
1597 {
1598 let entry_byte_size = id_rw.n_bytes() + val_rw.n_bytes();
1599 assert_eq!(self.byte_size(entry_byte_size), raw_entries.len());
1600 let l1page_byte_size = self.n_l1page_elems * entry_byte_size;
1602 let subtree_group_byte_size =
1603 (self.n_l1page_elems + 1) * self.sub_tree.byte_size(entry_byte_size);
1604 let (ld_buff, st_buff) = raw_entries.split_at(self.n_elems * entry_byte_size);
1605 let (l1_buff, st_buff) = st_buff.split_at((self.n_elems + 1) * l1page_byte_size);
1606 let mut entries = RawEntries::new(ld_buff, id_rw, val_rw);
1607 let (mut l, mut r) = match entries.binary_search(visitor.center())? {
1608 Ok(i) => {
1609 visitor.visit_center(entries.get_entry(i)?);
1610 if visitor.visit_desc() {
1611 let from_l1 = i * l1page_byte_size;
1612 let to_l1 = from_l1 + l1page_byte_size;
1613 let from_st = i * subtree_group_byte_size;
1614 let to_st = from_st + subtree_group_byte_size;
1615 visitor = visit_desc_l1page(
1616 visitor,
1617 id_rw,
1618 val_rw,
1619 &l1_buff[from_l1..to_l1],
1620 self.sub_tree.as_ref(),
1621 &st_buff[from_st..to_st],
1622 )?;
1623 }
1624 if visitor.visit_asc() {
1625 let from_l1 = (i + 1) * l1page_byte_size;
1626 let to_l1 = from_l1 + l1page_byte_size;
1627 let from_st = (i + 1) * subtree_group_byte_size;
1628 let to_st = from_st + subtree_group_byte_size;
1629 visitor = visit_asc_l1page(
1630 visitor,
1631 id_rw,
1632 val_rw,
1633 &l1_buff[from_l1..to_l1],
1634 self.sub_tree.as_ref(),
1635 &st_buff[from_st..to_st],
1636 )?;
1637 }
1638 (i as i32 - 1, i + 1)
1639 }
1640 Err(i) => {
1641 let from_l1 = i * l1page_byte_size;
1642 let to_l1 = from_l1 + l1page_byte_size;
1643 let from_st = i * subtree_group_byte_size;
1644 let to_st = from_st + subtree_group_byte_size;
1645 visitor = visit_l1page(
1646 visitor,
1647 id_rw,
1648 val_rw,
1649 &l1_buff[from_l1..to_l1],
1650 self.sub_tree.as_ref(),
1651 &st_buff[from_st..to_st],
1652 )?;
1653 (i as i32 - 1, i)
1654 }
1655 };
1656 while l >= 0 {
1657 if !visitor.visit_desc() {
1658 break;
1659 }
1660 visitor.visit_le_center(entries.get_entry(l as usize)?);
1661 if !visitor.visit_desc() {
1662 break;
1663 }
1664 let from_l1 = l as usize * l1page_byte_size;
1665 let to_l1 = from_l1 + l1page_byte_size;
1666 let from_st = l as usize * subtree_group_byte_size;
1667 let to_st = from_st + subtree_group_byte_size;
1668 visitor = visit_desc_l1page(
1669 visitor,
1670 id_rw,
1671 val_rw,
1672 &l1_buff[from_l1..to_l1],
1673 self.sub_tree.as_ref(),
1674 &st_buff[from_st..to_st],
1675 )?;
1676 l -= 1;
1677 }
1678 while r < self.n_elems {
1679 if !visitor.visit_asc() {
1680 break;
1681 }
1682 visitor.visit_he_center(entries.get_entry(r)?);
1683 if !visitor.visit_asc() {
1684 break;
1685 }
1686 let from_l1 = (r + 1) * l1page_byte_size;
1687 let to_l1 = from_l1 + l1page_byte_size;
1688 let from_st = (r + 1) * subtree_group_byte_size;
1689 let to_st = from_st + subtree_group_byte_size;
1690 visitor = visit_asc_l1page(
1691 visitor,
1692 id_rw,
1693 val_rw,
1694 &l1_buff[from_l1..to_l1],
1695 self.sub_tree.as_ref(),
1696 &st_buff[from_st..to_st],
1697 )?;
1698 r += 1;
1699 }
1700 Ok(visitor)
1701 }
1702 fn visit_asc<I, V, IRW, VRW, T>(
1703 &self,
1704 mut visitor: T,
1705 raw_entries: &[u8],
1706 id_rw: &IRW,
1707 val_rw: &VRW,
1708 ) -> Result<T, Error>
1709 where
1710 I: Id,
1711 V: Val,
1712 IRW: ReadWrite<Type = I>,
1713 VRW: ReadWrite<Type = V>,
1714 T: Visitor<I = I, V = V>,
1715 {
1716 let entry_byte_size = id_rw.n_bytes() + val_rw.n_bytes();
1717 assert_eq!(self.byte_size(entry_byte_size), raw_entries.len());
1718 let l1page_byte_size = self.n_l1page_elems * entry_byte_size;
1720 let subtree_group_byte_size =
1721 (self.n_l1page_elems + 1) * self.sub_tree.byte_size(entry_byte_size);
1722 let (_ld_buff, st_buff) = raw_entries.split_at(self.n_elems * entry_byte_size);
1723 let (l1_buff, st_buff) = st_buff.split_at((self.n_elems + 1) * l1page_byte_size);
1724 visitor = visit_asc_l1page(
1727 visitor,
1728 id_rw,
1729 val_rw,
1730 &l1_buff[0..l1page_byte_size],
1731 self.sub_tree.as_ref(),
1732 &st_buff[0..subtree_group_byte_size],
1733 )?;
1734 for i in 1..=self.n_elems {
1735 let from_l1 = i * l1page_byte_size;
1736 let to_l1 = from_l1 + l1page_byte_size;
1737 let from_st = i * subtree_group_byte_size;
1738 let to_st = from_st + subtree_group_byte_size;
1739 visitor = visit_asc_l1page(
1740 visitor,
1741 id_rw,
1742 val_rw,
1743 &l1_buff[from_l1..to_l1],
1744 self.sub_tree.as_ref(),
1745 &st_buff[from_st..to_st],
1746 )?;
1747 }
1748 Ok(visitor)
1749 }
1750}
1751
1752fn write_l1page<I, V, IRW, VRW, S, T>(
1760 mut it: T,
1761 id_rw: &IRW,
1762 val_rw: &VRW,
1763 mut l1_buff: &mut [u8],
1764 sub_tree: &S,
1765 mut subtree_buff: &mut [u8],
1766) -> Result<T, Error>
1767where
1768 I: Id,
1769 V: Val,
1770 IRW: ReadWrite<Type = I>,
1771 VRW: ReadWrite<Type = V>,
1772 S: SubTreeW,
1773 T: Iterator<Item = Entry<I, V>>,
1774{
1775 assert!(!l1_buff.is_empty());
1776 let entry_byte_size = id_rw.n_bytes() + val_rw.n_bytes();
1777 let subtree_byte_size = sub_tree.byte_size(entry_byte_size);
1778 let n_l1 = l1_buff.len() / entry_byte_size;
1779 assert_eq!(
1780 l1_buff.len(),
1781 n_l1 * entry_byte_size,
1782 "Wrong L1 buff size: {} != {}",
1783 l1_buff.len(),
1784 n_l1 * entry_byte_size
1785 );
1786 assert_eq!(
1787 subtree_buff.len(),
1788 (n_l1 + 1) * subtree_byte_size,
1789 "Wrong SubTree buff size: {} != {}",
1790 subtree_buff.len(),
1791 (n_l1 + 1) * subtree_byte_size
1792 );
1793 for _ in 0..n_l1 {
1794 let (curr_buff, st_buff) = subtree_buff.split_at_mut(subtree_byte_size);
1795 it = sub_tree.write(it, id_rw, val_rw, curr_buff)?;
1796 subtree_buff = st_buff;
1797 it.next()
1798 .ok_or_else(|| Error::new(ErrorKind::Other, "Iterator depleted!"))?
1799 .write(&mut l1_buff, id_rw, val_rw)?;
1800 }
1801 it = sub_tree.write(it, id_rw, val_rw, subtree_buff)?;
1802 assert!(l1_buff.is_empty());
1803 Ok(it)
1804}
1805
1806fn get_l1page<I, V, IRW, VRW, S>(
1807 val: V,
1808 id_rw: &IRW,
1809 val_rw: &VRW,
1810 l1_buff: &[u8],
1811 sub_tree: &S,
1812 subtree_buff: &[u8],
1813) -> Result<Option<Entry<I, V>>, Error>
1814where
1815 I: Id,
1816 V: Val,
1817 IRW: ReadWrite<Type = I>,
1818 VRW: ReadWrite<Type = V>,
1819 S: SubTreeR,
1820{
1821 assert!(!l1_buff.is_empty());
1822 let entry_byte_size = id_rw.n_bytes() + val_rw.n_bytes();
1823 let subtree_byte_size = sub_tree.byte_size(entry_byte_size);
1824 let n_l1 = l1_buff.len() / entry_byte_size;
1825 assert_eq!(l1_buff.len(), n_l1 * entry_byte_size);
1826 assert_eq!(subtree_buff.len(), (n_l1 + 1) * subtree_byte_size);
1827 let mut l1_entries = RawEntries::new(l1_buff, id_rw, val_rw);
1828 match l1_entries.binary_search(&val)? {
1829 Ok(i) => Ok(Some(l1_entries.get_entry(i)?)),
1830 Err(i) => {
1831 let from = i * subtree_byte_size;
1832 let to = from + subtree_byte_size;
1833 sub_tree.get(val, &subtree_buff[from..to], id_rw, val_rw)
1834 }
1835 }
1836}
1837
1838fn visit_l1page<I, V, IRW, VRW, S, T>(
1839 mut visitor: T,
1840 id_rw: &IRW,
1841 val_rw: &VRW,
1842 l1_buff: &[u8],
1843 sub_tree: &S,
1844 subtree_buff: &[u8],
1845) -> Result<T, Error>
1846where
1847 I: Id,
1848 V: Val,
1849 IRW: ReadWrite<Type = I>,
1850 VRW: ReadWrite<Type = V>,
1851 S: SubTreeR,
1852 T: Visitor<I = I, V = V>,
1853{
1854 assert!(!l1_buff.is_empty());
1855 let entry_byte_size = id_rw.n_bytes() + val_rw.n_bytes();
1856 let subtree_byte_size = sub_tree.byte_size(entry_byte_size);
1857 let n_l1 = l1_buff.len() / entry_byte_size;
1858 assert_eq!(l1_buff.len(), n_l1 * entry_byte_size);
1859 assert_eq!(subtree_buff.len(), (n_l1 + 1) * subtree_byte_size);
1860 let mut l1_entries = RawEntries::new(l1_buff, id_rw, val_rw);
1861 let (mut l, mut r) = match l1_entries.binary_search(visitor.center())? {
1862 Ok(i) => {
1863 visitor.visit_center(l1_entries.get_entry(i)?);
1864 if visitor.visit_desc() {
1865 let from = i * subtree_byte_size;
1866 let to = from + subtree_byte_size;
1867 visitor = sub_tree.visit_desc(visitor, &subtree_buff[from..to], id_rw, val_rw)?;
1868 }
1869 if visitor.visit_asc() {
1870 let from = (i + 1) * subtree_byte_size;
1871 let to = from + subtree_byte_size;
1872 visitor = sub_tree.visit_asc(visitor, &subtree_buff[from..to], id_rw, val_rw)?;
1873 }
1874 (i as i32 - 1, i + 1)
1875 }
1876 Err(i) => {
1877 let from = i * subtree_byte_size;
1878 let to = from + subtree_byte_size;
1879 visitor = sub_tree.visit(visitor, &subtree_buff[from..to], id_rw, val_rw)?;
1880 (i as i32 - 1, i)
1881 }
1882 };
1883 while l >= 0 {
1884 if !visitor.visit_desc() {
1885 break;
1886 }
1887 visitor.visit_le_center(l1_entries.get_entry(l as usize)?);
1888 if !visitor.visit_desc() {
1889 break;
1890 }
1891 let from = l as usize * subtree_byte_size;
1892 let to = from + subtree_byte_size;
1893 visitor = sub_tree.visit_desc(visitor, &subtree_buff[from..to], id_rw, val_rw)?;
1894 l -= 1;
1895 }
1896 while r < n_l1 {
1897 if !visitor.visit_asc() {
1898 break;
1899 }
1900 visitor.visit_he_center(l1_entries.get_entry(r)?);
1901 if !visitor.visit_asc() {
1902 break;
1903 }
1904 let from = (r + 1) * subtree_byte_size;
1905 let to = from + subtree_byte_size;
1906 visitor = sub_tree.visit_asc(visitor, &subtree_buff[from..to], id_rw, val_rw)?;
1907 r += 1;
1908 }
1909 Ok(visitor)
1910}
1911
1912fn visit_desc_l1page<I, V, IRW, VRW, S, T>(
1913 mut visitor: T,
1914 id_rw: &IRW,
1915 val_rw: &VRW,
1916 l1_buff: &[u8],
1917 sub_tree: &S,
1918 subtree_buff: &[u8],
1919) -> Result<T, Error>
1920where
1921 I: Id,
1922 V: Val,
1923 IRW: ReadWrite<Type = I>,
1924 VRW: ReadWrite<Type = V>,
1925 S: SubTreeR,
1926 T: Visitor<I = I, V = V>,
1927{
1928 assert!(!l1_buff.is_empty());
1929 let entry_byte_size = id_rw.n_bytes() + val_rw.n_bytes();
1930 let subtree_byte_size = sub_tree.byte_size(entry_byte_size);
1931 let n_l1 = l1_buff.len() / entry_byte_size;
1932 assert_eq!(l1_buff.len(), n_l1 * entry_byte_size);
1933 assert_eq!(subtree_buff.len(), (n_l1 + 1) * subtree_byte_size);
1934 let mut l1_entries = RawEntries::new(l1_buff, id_rw, val_rw);
1935 let from = n_l1 * subtree_byte_size;
1936 let to = from + subtree_byte_size;
1937 visitor = sub_tree.visit_desc(visitor, &subtree_buff[from..to], id_rw, val_rw)?;
1938 let mut i = 0;
1939 while i < n_l1 && visitor.visit_desc() {
1940 visitor.visit_le_center(l1_entries.get_entry(i)?);
1941 if !visitor.visit_desc() {
1942 break;
1943 }
1944 let from = i * subtree_byte_size;
1945 let to = from + subtree_byte_size;
1946 visitor = sub_tree.visit_desc(visitor, &subtree_buff[from..to], id_rw, val_rw)?;
1947 i += 1;
1948 }
1949 Ok(visitor)
1950}
1951
1952fn visit_asc_l1page<I, V, IRW, VRW, S, T>(
1953 mut visitor: T,
1954 id_rw: &IRW,
1955 val_rw: &VRW,
1956 l1_buff: &[u8],
1957 sub_tree: &S,
1958 subtree_buff: &[u8],
1959) -> Result<T, Error>
1960where
1961 I: Id,
1962 V: Val,
1963 IRW: ReadWrite<Type = I>,
1964 VRW: ReadWrite<Type = V>,
1965 S: SubTreeR,
1966 T: Visitor<I = I, V = V>,
1967{
1968 assert!(!l1_buff.is_empty());
1969 let entry_byte_size = id_rw.n_bytes() + val_rw.n_bytes();
1970 let subtree_byte_size = sub_tree.byte_size(entry_byte_size);
1971 let n_l1 = l1_buff.len() / entry_byte_size;
1972 assert_eq!(l1_buff.len(), n_l1 * entry_byte_size);
1973 assert_eq!(subtree_buff.len(), (n_l1 + 1) * subtree_byte_size);
1974 let mut l1_entries = RawEntries::new(l1_buff, id_rw, val_rw);
1975 let mut i = 0;
1976 while i < n_l1 {
1977 let from = i * subtree_byte_size;
1978 let to = from + subtree_byte_size;
1979 visitor = sub_tree.visit_asc(visitor, &subtree_buff[from..to], id_rw, val_rw)?;
1980 if !visitor.visit_asc() {
1981 break;
1982 }
1983 visitor.visit_he_center(l1_entries.get_entry(i)?);
1984 if !visitor.visit_asc() {
1985 break;
1986 }
1987 i += 1;
1988 }
1989 if i == n_l1 {
1990 let from = i * subtree_byte_size;
1991 let to = from + subtree_byte_size;
1992 visitor = sub_tree.visit_asc(visitor, &subtree_buff[from..to], id_rw, val_rw)?;
1993 }
1994 Ok(visitor)
1995}
1996
1997#[derive(Debug, Serialize, Deserialize)]
1998pub struct BSTreeMeta {
1999 pub types: IdVal,
2000 constants: BSTreeConstants,
2001 pub layout: BSTreeLayout,
2002}
2003
2004impl BSTreeMeta {
2005 fn from(
2006 types: IdVal,
2007 n_entries: usize,
2008 entry_byte_size: usize,
2009 l1_byte_size: usize,
2010 ld_byte_size: usize,
2011 ) -> BSTreeMeta {
2012 let constants = BSTreeConstants::new(n_entries, entry_byte_size, l1_byte_size, ld_byte_size);
2013 let layout = BSTreeLayout::new(&constants);
2014 BSTreeMeta {
2015 types,
2016 constants,
2017 layout,
2018 }
2019 }
2020
2021 pub fn get_root(&self) -> Root {
2022 self.layout.get_root(&self.constants)
2023 }
2024
2025 }
2029
2030#[derive(Debug, Serialize, Deserialize)]
2031struct BSTreeConstants {
2032 n_entries: u64,
2034 entry_byte_size: u8,
2036 n_entries_per_l1page: u16,
2040 n_l1page_per_ldpage: u16,
2046}
2047
2048impl BSTreeConstants {
2049 fn new(
2053 n_entries: usize,
2054 entry_byte_size: usize,
2055 l1_byte_size: usize,
2056 ld_byte_size: usize,
2057 ) -> BSTreeConstants {
2058 let n_entries_per_l1page = l1_byte_size / entry_byte_size;
2059 let n_entries_per_ldpage_max = ld_byte_size / entry_byte_size;
2060 let n_l1page_per_ldpage = (n_entries_per_ldpage_max + 1) / (n_entries_per_l1page + 1);
2066 BSTreeConstants {
2067 n_entries: n_entries as u64,
2068 entry_byte_size: entry_byte_size as u8,
2069 n_entries_per_l1page: n_entries_per_l1page as u16, n_l1page_per_ldpage: n_l1page_per_ldpage as u16, }
2072 }
2073
2074 }
2087
2088#[derive(Debug, Serialize, Deserialize)]
2118pub struct BSTreeLayout {
2119 depth: u8,
2125 n_entries_root: u16,
2127 n_entries_main: u64,
2130 rigthmost_subtree: Option<Box<BSTreeLayout>>,
2133}
2134
2135impl BSTreeLayout {
2136 fn new(cte: &BSTreeConstants) -> BSTreeLayout {
2137 BSTreeLayout::from(cte.n_entries, cte)
2138 }
2139
2140 fn from(n_entries: u64, cte: &BSTreeConstants) -> BSTreeLayout {
2141 let n_l1 = cte.n_entries_per_l1page as u64;
2142 let n_ld_elem = cte.n_l1page_per_ldpage as u64 - 1;
2143 let n_ld = n_ld_elem + (n_ld_elem + 1) * n_l1;
2144 if n_entries <= n_l1 {
2146 return BSTreeLayout {
2147 depth: 0,
2148 n_entries_root: n_entries as u16,
2149 n_entries_main: n_entries,
2150 rigthmost_subtree: None,
2151 };
2152 }
2153 let mut n_sub = n_l1;
2156 if n_entries <= n_l1 + (n_l1 + 1) * n_sub {
2157 return BSTreeLayout::from_known_depth(1, n_entries, n_sub, cte);
2158 }
2159 n_sub = n_ld;
2160 for depth in (2..=8).step_by(2) {
2162 if n_entries <= n_l1 + (n_l1 + 1) * n_sub {
2164 return BSTreeLayout::from_known_depth(depth, n_entries, n_sub, cte);
2165 }
2166 n_sub = n_l1 + (n_l1 + 1) * n_sub;
2167 if n_entries <= n_l1 + (n_l1 + 1) * n_sub {
2169 return BSTreeLayout::from_known_depth(depth + 1, n_entries, n_sub, cte);
2170 }
2171 n_sub = n_ld_elem + (n_ld_elem + 1) * n_sub;
2172 }
2173 panic!("Too deep tree. Check your inputs (entry size in bytes, ...).");
2175 }
2176
2177 fn from_known_depth(
2179 depth: u8,
2180 n_entries: u64,
2181 n_subtree: u64,
2182 cte: &BSTreeConstants,
2183 ) -> BSTreeLayout {
2184 let n_root = (n_entries - n_subtree) / (1 + n_subtree);
2188 let n_rem = n_entries - (n_root + (n_root + 1) * n_subtree);
2189 assert!(n_root as u16 <= cte.n_entries_per_l1page);
2190 assert!(n_root as u16 <= cte.n_entries_per_l1page);
2191 if n_rem == 0 {
2192 BSTreeLayout {
2194 depth,
2195 n_entries_root: n_root as u16,
2196 n_entries_main: n_entries,
2197 rigthmost_subtree: None,
2198 }
2199 } else {
2200 let n_entries_main = (n_root + 1) + (n_root + 1) * n_subtree;
2201 let n_entries_sub = n_entries - n_entries_main;
2202 BSTreeLayout {
2203 depth,
2204 n_entries_root: n_root as u16 + 1,
2205 n_entries_main,
2206 rigthmost_subtree: Some(Box::new(BSTreeLayout::from(n_entries_sub, cte))),
2207 }
2208 }
2209 }
2210
2211 fn get_root(&self, cte: &BSTreeConstants) -> Root {
2217 match (self.depth, self.depth & 1, self.rigthmost_subtree.as_ref()) {
2218 (0, _, _) => Root::L1Leaf(L1Leaf::new(self.n_entries_root as usize)),
2220 (1, _, None) => Root::L1Node(
2222 L1Node::new(self.n_entries_root as usize, self.get_subtree(1, cte)),
2224 ),
2225 (1, _, Some(sub_layout)) => Root::RootL1Node(
2226 RootL1Node::new(
2228 self.n_entries_root as usize,
2229 self.get_subtree(1, cte),
2230 sub_layout.get_root(cte),
2231 ),
2232 ),
2233 (_, 0, None) => Root::L1Node(L1Node::new(
2236 self.n_entries_root as usize,
2237 self.get_subtree(1, cte),
2238 )),
2239 (_, 1, None) => Root::LDNode(LDNode::new(
2240 self.n_entries_root as usize,
2241 cte.n_entries_per_l1page as usize,
2242 self.get_ld_subtree(2, cte),
2243 )),
2244 (_, 0, Some(sub_layout)) => Root::RootL1Node(RootL1Node::new(
2246 self.n_entries_root as usize,
2247 self.get_subtree(1, cte),
2248 sub_layout.get_root(cte),
2249 )),
2250 (_, 1, Some(sub_layout)) => Root::RootLDNode(RootLDNode::new(
2251 self.n_entries_root as usize,
2252 cte.n_entries_per_l1page as usize,
2253 self.get_ld_subtree(2, cte),
2254 sub_layout.get_root(cte),
2255 )),
2256 (_, _, _) => unreachable!(),
2257 }
2258 }
2259
2260 fn get_subtree(&self, d: u8, cte: &BSTreeConstants) -> SubTree {
2261 if d == self.depth {
2262 SubTree::L1Leaf(L1Leaf::new(cte.n_entries_per_l1page as usize))
2263 } else if d == (self.depth - 1) {
2264 SubTree::L1Node(L1Node::new(
2265 cte.n_l1page_per_ldpage as usize - 1,
2267 self.get_subtree(d + 1, cte),
2268 ))
2269 } else {
2270 SubTree::LDNode(LDNode::new(
2271 cte.n_l1page_per_ldpage as usize - 1,
2272 cte.n_entries_per_l1page as usize,
2273 self.get_ld_subtree(d + 2, cte),
2274 ))
2275 }
2276 }
2277
2278 fn get_ld_subtree(&self, d: u8, cte: &BSTreeConstants) -> LDSubTree {
2279 assert!(d < self.depth);
2280 if d == (self.depth - 1) {
2281 LDSubTree::L1Node(L1Node::new(
2282 cte.n_l1page_per_ldpage as usize - 1,
2284 self.get_subtree(d + 1, cte),
2285 ))
2286 } else {
2287 LDSubTree::LDNode(LDNode::new(
2288 cte.n_l1page_per_ldpage as usize - 1,
2289 cte.n_entries_per_l1page as usize,
2290 self.get_ld_subtree(d + 2, cte),
2291 ))
2292 }
2293 }
2294}
2295
2296#[cfg(not(target_arch = "wasm32"))]
2308pub fn build<I, V, IRW, VRW, T>(
2309 output_file: PathBuf,
2310 mem_args: &MemSizeArgs,
2311 n_entries: usize,
2312 entries_iterator: T,
2313 types: &IdVal,
2314 id_rw: &IRW,
2315 val_rw: &VRW,
2316) -> Result<(), Error>
2317where
2318 I: Id,
2319 V: Val,
2320 IRW: ReadWrite<Type = I>,
2321 VRW: ReadWrite<Type = V>,
2322 T: Iterator<Item = Entry<I, V>>,
2323{
2324 let entry_byte_size = id_rw.n_bytes() + val_rw.n_bytes();
2328 let meta = dbg!(BSTreeMeta::from(
2329 types.clone(),
2330 n_entries,
2331 entry_byte_size,
2332 mem_args.l1_byte_size(),
2333 mem_args.disk_byte_size()
2334 ));
2335 let encoded_meta: Vec<u8> = bincode::serialize(&meta).unwrap();
2336 let file = OpenOptions::new()
2338 .read(true)
2339 .write(true)
2340 .create(true)
2341 .open(output_file)?;
2342 let before_meta_len = FILE_TYPE.len() + 3 + 2;
2344 let data_starting_byte = before_meta_len + encoded_meta.len();
2345 let file_byte_size = data_starting_byte + n_entries * entry_byte_size;
2346 file.set_len(file_byte_size as u64)?;
2348 let mut mmap = unsafe { MmapMut::map_mut(&file)? };
2350 write_meta(&mut mmap[0..data_starting_byte], encoded_meta)?;
2352 mmap.flush_range(0, data_starting_byte)?;
2353 let root = meta.get_root();
2355 root.write(
2356 entries_iterator,
2357 id_rw,
2358 val_rw,
2359 &mut mmap[data_starting_byte..file_byte_size],
2360 )?;
2361 mmap.flush()?;
2362 file.sync_all()
2363}
2364
2365fn write_meta(mut buff: &mut [u8], encoded_meta: Vec<u8>) -> Result<(), Error> {
2366 let v_nums = parse_version().unwrap();
2367 buff.write_all(FILE_TYPE)?;
2368 buff.write_all(&v_nums)?;
2369 buff.write_u16::<LittleEndian>(encoded_meta.len() as u16)?;
2370 assert_eq!(buff.len(), encoded_meta.len());
2371 buff.copy_from_slice(&encoded_meta[..]);
2372 Ok(())
2373}
2374
2375#[cfg(not(target_arch = "wasm32"))]
2391struct GetProcess<'a> {
2392 value: String,
2393 meta: &'a BSTreeMeta,
2394 mmap: &'a Mmap,
2395 data_starting_byte: usize,
2396}
2397
2398#[cfg(not(target_arch = "wasm32"))]
2399impl<'a> Process for GetProcess<'a> {
2400 type Output = Option<(String, String)>;
2401
2402 fn exec<I, V, D, IRW, VRW>(
2403 self,
2404 _types: IdVal,
2405 id_rw: IRW,
2406 val_rw: VRW,
2407 _dist: D,
2408 ) -> Result<Self::Output, std::io::Error>
2409 where
2410 I: Id,
2411 V: Val,
2412 D: Fn(&V, &V) -> V,
2413 IRW: ReadWrite<Type = I>,
2414 VRW: ReadWrite<Type = V>,
2415 {
2416 let v = self
2417 .value
2418 .parse::<V>()
2419 .map_err(|_e| Error::new(ErrorKind::Other, ""))?; let root = self.meta.get_root();
2421 let opt_entry = root.get(v, &self.mmap[self.data_starting_byte..], &id_rw, &val_rw)?;
2422 Ok(opt_entry.map(|Entry { id, val }| (format!("{:?}", id), format!("{:?}", val))))
2423 }
2424}
2425
2426#[cfg(not(target_arch = "wasm32"))]
2427struct GetExactProcess<'a> {
2428 value: String,
2429 meta: &'a BSTreeMeta,
2430 mmap: &'a Mmap,
2431 data_starting_byte: usize,
2432}
2433
2434#[cfg(not(target_arch = "wasm32"))]
2435impl<'a> Process for GetExactProcess<'a> {
2436 type Output = Option<(String, String)>;
2437
2438 fn exec<I, V, D, IRW, VRW>(
2439 self,
2440 _types: IdVal,
2441 id_rw: IRW,
2442 val_rw: VRW,
2443 _dist: D,
2444 ) -> Result<Self::Output, std::io::Error>
2445 where
2446 I: Id,
2447 V: Val,
2448 D: Fn(&V, &V) -> V,
2449 IRW: ReadWrite<Type = I>,
2450 VRW: ReadWrite<Type = V>,
2451 {
2452 let v = self
2453 .value
2454 .parse::<V>()
2455 .map_err(|_e| Error::new(ErrorKind::Other, ""))?; let visitor = VisitorExact::new(v);
2457
2458 let root = self.meta.get_root();
2459 let visitor = root.visit(
2460 visitor,
2461 &self.mmap[self.data_starting_byte..],
2462 &id_rw,
2463 &val_rw,
2464 )?;
2465 Ok(
2466 visitor
2467 .entry
2468 .map(|Entry { id, val }| (format!("{:?}", id), format!("{:?}", val))),
2469 )
2470 }
2471}
2472
2473pub fn read_meta(mut buff: &[u8]) -> Result<([u8; 3], usize, BSTreeMeta), Error> {
2540 let mut file_type = *FILE_TYPE;
2541 buff.read_exact(&mut file_type)?;
2542 assert_eq!((*FILE_TYPE), file_type);
2543 let mut v_nums: [u8; 3] = Default::default();
2544 buff.read_exact(&mut v_nums)?;
2545 let meta_byte_size = buff.read_u16::<LittleEndian>()? as usize;
2547 let meta: BSTreeMeta = bincode::deserialize_from(&buff[..meta_byte_size])
2548 .map_err(|_e| Error::new(ErrorKind::Other, String::from("Unable to dezerialize meta")))?;
2549 Ok((v_nums, file_type.len() + 3 + 2 + meta_byte_size, meta))
2550}
2551
2552pub fn parse_version() -> Result<[u8; 3], ParseIntError> {
2577 let rv: Result<Vec<u8>, ParseIntError> = VERSION.rsplit('.').map(|i| i.parse::<u8>()).collect();
2579 let v = rv?;
2580 assert_eq!(v.len(), 3);
2581 Ok([v[0], v[1], v[2]])
2582}
2583
2584#[cfg(test)]
2615mod tests {
2616 use super::*;
2617 use crate::{rw::U64RW, IdType, ValType};
2618
2619 #[test]
2620 fn testok_num_nside() {
2621 assert_eq!(VERSION, "0.1.1");
2622 assert_eq!(parse_version().unwrap(), [1_u8, 1_u8, 0_u8]);
2623 }
2624
2625 #[test]
2626 fn testok_build() {
2627 use std::path::PathBuf;
2628 let path = PathBuf::from("./test_u64u64_x3.bstree");
2629 {
2631 let mem_args = MemSizeArgs {
2632 l1: 32,
2633 disk: 8192,
2634 fill_factor: 1.0,
2635 };
2636 let n = 3_000_000_u64;
2637 let mut entries = Vec::with_capacity(n as usize);
2638 for i in 0..n {
2639 entries.push(Entry { id: i, val: i });
2640 }
2641 let res = build(
2642 path.clone(),
2643 &mem_args,
2644 entries.len(),
2645 entries.into_iter(),
2646 &IdVal(IdType::U64, ValType::U64),
2647 &U64RW,
2648 &U64RW,
2649 );
2650 res.unwrap();
2651 }
2652 }
2669}