rax/lib.rs
1#![cfg_attr(test, feature(test))]
2
3/// Representation of a radix tree as implemented in this file, that contains
4/// the strings "foo", "foobar" and "footer" after the insertion of each
5/// word. When the node represents a key inside the radix tree, we write it
6/// between [], otherwise it is written between ().
7///
8/// This is the vanilla representation:
9///
10/// ```text
11/// (f) ""
12/// \
13/// (o) "f"
14/// \
15/// (o) "fo"
16/// \
17/// [t b] "foo"
18/// / \
19/// "foot" (e) (a) "foob"
20/// / \
21/// "foote" (r) (r) "fooba"
22/// / \
23/// "footer" [] [] "foobar"
24/// ```
25///
26/// However, this implementation implements a very common optimization where
27/// successive nodes having a single child are "compressed" into the node
28/// itself as a string of characters, each representing a next-level child,
29/// and only the link to the node representing the last character node is
30/// provided inside the representation. So the above representation is turned
31/// into:
32///
33/// ```text
34/// ["foo"] ""
35/// |
36/// [t b] "foo"
37/// / \
38/// "foot" ("er") ("ar") "foob"
39/// / \
40/// "footer" [] [] "foobar"
41/// ```
42///
43/// However this optimization makes the implementation a bit more complex.
44/// For instance if a key "first" is added in the above radix tree, a
45/// "node splitting" operation is needed, since the "foo" prefix is no longer
46/// composed of nodes having a single child one after the other. This is the
47/// above tree and the resulting node splitting after this event happens:
48///
49///
50/// ```text
51/// (f) ""
52/// /
53/// (i o) "f"
54/// / \
55/// "firs" ("rst") (o) "fo"
56/// / \
57/// "first" [] [t b] "foo"
58/// / \
59/// "foot" ("er") ("ar") "foob"
60/// / \
61/// "footer" [] [] "foobar"
62/// ```
63///
64/// Similarly after deletion, if a new chain of nodes having a single child
65/// is created (the chain must also not include nodes that represent keys),
66/// it must be compressed back into a single node.
67extern crate libc;
68extern crate nix;
69
70use std::{
71 error, fmt,
72 mem::{size_of, MaybeUninit},
73 ptr,
74};
75
76use nix::errno::Errno;
77
78pub const GREATER: &str = ">";
79pub const GREATER_EQUAL: &str = ">=";
80pub const LESSER: &str = "<";
81pub const LESSER_EQUAL: &str = "<=";
82pub const EQUAL: &str = "=";
83pub const BEGIN: &str = "^";
84pub const END: &str = "$";
85
86pub const RAX_NODE_MAX_SIZE: libc::c_int = (1 << 29) - 1;
87pub const RAX_STACK_STATIC_ITEMS: libc::c_int = 32;
88pub const RAX_ITER_STATIC_LEN: libc::c_int = 128;
89pub const RAX_ITER_JUST_SEEKED: libc::c_int = 1 << 0;
90pub const RAX_ITER_EOF: libc::c_int = 1 << 1;
91pub const RAX_ITER_SAFE: libc::c_int = 1 << 2;
92
93/// Return the existing Rax allocator.
94///
95/// # Safety
96///
97/// Must only be called when no other thread is modifying the allocator.
98pub unsafe fn allocator() -> (
99 extern "C" fn(size: libc::size_t) -> *mut u8,
100 extern "C" fn(ptr: *mut libc::c_void, size: libc::size_t) -> *mut u8,
101 extern "C" fn(ptr: *mut libc::c_void),
102) {
103 (rax_malloc, rax_realloc, rax_free)
104}
105
106/// Rax internally makes calls to "malloc", "realloc" and "free" for all of it's
107/// heap memory needs. These calls can be patched with the supplied hooks.
108/// Do not call this method after Rax has been used at all. This must
109/// be called before using or calling any other Rax API function.
110///
111/// # Safety
112///
113/// Must be called before any Rax allocation occurs. Not thread-safe.
114pub unsafe fn set_allocator(
115 malloc: extern "C" fn(size: libc::size_t) -> *mut u8,
116 realloc: extern "C" fn(ptr: *mut libc::c_void, size: libc::size_t) -> *mut u8,
117 free: extern "C" fn(ptr: *mut libc::c_void),
118) {
119 rax_malloc = malloc;
120 rax_realloc = realloc;
121 rax_free = free;
122}
123
124#[derive(Debug)]
125pub enum RaxError {
126 Generic(GenericError),
127 OutOfMemory(),
128}
129
130impl RaxError {
131 pub fn generic(message: &str) -> RaxError {
132 RaxError::Generic(GenericError::new(message))
133 }
134}
135
136/// Redis has a beautiful Radix Tree implementation in ANSI C.
137/// This brings it to Rust and creates a safe Map like wrapper
138/// for it. This is very similar in utility to a BTreeMap, but
139/// RAX is likely much faster and more efficient. Naive testing
140/// showed a 2x-4x improvement for all common operations. The only
141/// disadvantage to BTreeMap is that BTree's allow much more flexibility
142/// in regards to comparing keys. Radix trees are lexicographically only.
143/// Composite keys where the non-last member is variable length could
144/// be something BTrees could handle much easier.
145///
146/// Internal RAX Node Layout
147///
148/// uint32_t iskey:1; /* Does this node contain a key? */
149/// uint32_t isnull:1; /* Associated value is NULL (don't store it). */
150/// uint32_t iscompr:1; /* Node is compressed. */
151/// uint32_t size:29; /* Number of children, or compressed string len. */
152///
153/// +----+---+--------+--------+--------+--------+
154/// |HDR |xyz| x-ptr | y-ptr | z-ptr |dataptr |
155/// +----+---+--------+--------+--------+--------+
156///
157/// As is evident above, there is no storage penalty for NULL values.
158///
159/// Keys are represented in compressed form therefore, there is no
160/// need to pump in Boxed keys or any sort of heap allocated chunk of
161/// memory. Stack or heap keys may be used from rust. Values can either
162/// be a sizeof<usize> size integer or it's a data pointer to a heap
163/// allocated / Boxed value.
164///
165/// Iterators were designed to be fast and attempt to only use stack
166/// allocated memory. RaxMap provides a model to take full advantage
167/// of stack allocated iterators through wrapping in a closure.
168///
169/// #Examples
170///
171/// ```
172/// use rax::RaxMap;
173/// let mut r = RaxMap::new();
174/// r.insert(1, Box::new("my heap allocation".to_string()));
175/// r.insert(2, Box::new("my other heap allocation".to_string()));
176///
177/// r.iter(|r, iter| {
178/// // Place iterator at the first entry.
179/// if !iter.seek_min() {
180/// // EOF
181/// return;
182/// }
183///
184/// // Can test EOF at any time.
185/// if iter.eof() {
186/// // EOF
187/// return;
188/// }
189///
190/// while iter.forward() {
191/// iter.key();
192/// iter.value();
193/// }
194/// // In reverse
195/// // Place iterator at the end.
196/// if !iter.seek_max() {
197/// // EOF
198/// return;
199/// }
200/// while iter.back() {
201/// iter.key();
202/// iter.value();
203/// }
204///
205/// // Seek
206/// if !iter.seek(">=", 2) {
207/// // EOF
208/// }
209/// while iter.forward() {
210/// iter.key();
211/// iter.value();
212/// }
213/// });
214/// ```
215pub struct RaxMap<K: RaxKey, V> {
216 pub rax: *mut rax,
217 phantom: std::marker::PhantomData<(K, V)>,
218}
219
220impl<K: RaxKey, V> Drop for RaxMap<K, V> {
221 fn drop(&mut self) {
222 unsafe {
223 // Cleanup RAX
224 raxFreeWithCallback(self.rax, RaxFreeWithCallbackWrapper::<V>);
225 }
226 }
227}
228
229impl<K: RaxKey, V> Default for RaxMap<K, V> {
230 fn default() -> Self {
231 Self::new()
232 }
233}
234
235/// Implementation of RaxMap
236impl<K: RaxKey, V> RaxMap<K, V> {
237 pub fn new() -> RaxMap<K, V> {
238 unsafe {
239 RaxMap {
240 rax: raxNew(),
241 phantom: std::marker::PhantomData,
242 }
243 }
244 }
245
246 /// Fallible constructor. Returns `Err(OutOfMemory)` when
247 /// `raxNew()` returns NULL.
248 pub fn try_new() -> Result<RaxMap<K, V>, RaxError> {
249 let ptr = unsafe { raxNew() };
250 if ptr.is_null() {
251 Err(RaxError::OutOfMemory())
252 } else {
253 Ok(RaxMap {
254 rax: ptr,
255 phantom: std::marker::PhantomData,
256 })
257 }
258 }
259
260 /// The number of entries in the RAX
261 pub fn len(&self) -> u64 {
262 unsafe { raxSize(self.rax) }
263 }
264
265 /// The number of entries in the RAX
266 pub fn size(&self) -> u64 {
267 unsafe { raxSize(self.rax) }
268 }
269
270 /// Returns true if the RAX is empty.
271 pub fn is_empty(&self) -> bool {
272 self.len() == 0
273 }
274
275 /// Prints the Rax as ASCII art to stdout.
276 pub fn show(&self) {
277 unsafe { raxShow(self.rax) }
278 }
279
280 /// Insert or replace existing key with a NULL value.
281 pub fn insert_null(&mut self, key: K) -> Result<Option<Box<V>>, RaxError> {
282 unsafe {
283 // Allocate a pointer to catch the old value.
284 let old: &mut *mut u8 = &mut ptr::null_mut();
285
286 // Integer values require Big Endian to allow the Rax to fully optimize
287 // storing them since it will be able to compress the prefixes especially
288 // for 64/128bit numbers.
289 let k = key.encode();
290 let (ptr, len) = k.to_buf();
291
292 let r = raxInsert(
293 self.rax,
294 // Grab a raw pointer to the key. Keys are most likely allocated
295 // on the stack. The rax will keep it's own copy of the key so we
296 // don't want to keep in in the heap twice and it exists in the
297 // rax in it's compressed form.
298 ptr,
299 len,
300 std::ptr::null_mut(),
301 old,
302 );
303
304 if r == 0 && Errno::last() == Errno::ENOMEM {
305 Err(RaxError::OutOfMemory())
306 } else if old.is_null() {
307 Ok(None)
308 } else {
309 // Box the previous value since Rax is done with it and it's our
310 // responsibility now to drop it. Once this Box goes out of scope
311 // the value is dropped and memory reclaimed.
312 Ok(Some(Box::from_raw(*old as *mut V)))
313 }
314 }
315 }
316
317 /// Insert a new entry into the RAX if an existing one does not exist.
318 pub fn try_insert(&mut self, key: K, data: Box<V>) -> Result<Option<Box<V>>, RaxError> {
319 unsafe {
320 // Allocate a pointer to catch the old value.
321 let old: &mut *mut u8 = &mut ptr::null_mut();
322
323 // Leak the boxed value as we hand it over to Rax to keep track of.
324 // These must be heap allocated unless we want to store sizeof(usize) or
325 // less bytes, then the value can be the pointer.
326 let value: &mut V = Box::leak(data);
327
328 // Integer values require Big Endian to allow the Rax to fully optimize
329 // storing them since it will be able to compress the prefixes especially
330 // for 64/128bit numbers.
331 let k = key.encode();
332 let (ptr, len) = k.to_buf();
333
334 let r = raxTryInsert(
335 self.rax,
336 // Grab a raw pointer to the key. Keys are most likely allocated
337 // on the stack. The rax will keep it's own copy of the key so we
338 // don't want to keep in in the heap twice and it exists in the
339 // rax in it's compressed form.
340 ptr,
341 len,
342 value as *mut V as *mut u8,
343 old,
344 );
345
346 if r == 0 {
347 if Errno::last() == Errno::ENOMEM {
348 Err(RaxError::OutOfMemory())
349 } else {
350 Ok(Some(Box::from_raw(value as *mut V)))
351 }
352 } else if old.is_null() {
353 Ok(None)
354 } else {
355 // This shouldn't happen, but if it does let's be safe and
356 // not leak memory.
357 Ok(Some(Box::from_raw(*old as *mut V)))
358 }
359 }
360 }
361
362 /// Try to insert a raw pointer value into the RAX.
363 ///
364 /// # Safety
365 ///
366 /// `value` must be a valid pointer or null.
367 pub unsafe fn try_insert_ptr(
368 &mut self,
369 key: K,
370 value: *mut u8,
371 ) -> Result<Option<*mut u8>, RaxError> {
372 // Allocate a pointer to catch the old value.
373 let old: &mut *mut u8 = &mut ptr::null_mut();
374
375 // Integer values require Big Endian to allow the Rax to fully optimize
376 // storing them since it will be able to compress the prefixes especially
377 // for 64/128bit numbers.
378 let k = key.encode();
379 let (ptr, len) = k.to_buf();
380
381 let r = raxTryInsert(
382 self.rax,
383 // Grab a raw pointer to the key. Keys are most likely allocated
384 // on the stack. The rax will keep it's own copy of the key so we
385 // don't want to keep in in the heap twice and it exists in the
386 // rax in it's compressed form.
387 ptr, len, value, old,
388 );
389
390 if r == 0 {
391 if Errno::last() == Errno::ENOMEM {
392 Err(RaxError::OutOfMemory())
393 } else {
394 Ok(Some(value))
395 }
396 } else if old.is_null() {
397 Ok(None)
398 } else {
399 // This shouldn't happen, but if it does let's be safe and
400 // not leak memory.
401 Ok(Some(*old))
402 }
403 }
404
405 /// Insert a new entry into the RAX replacing and returning the existing
406 /// entry for the supplied key.
407 pub fn insert(&mut self, key: K, data: Box<V>) -> Result<Option<Box<V>>, RaxError> {
408 unsafe {
409 // Allocate a pointer to catch the old value.
410 let old: &mut *mut u8 = &mut ptr::null_mut();
411
412 // Leak the boxed value as we hand it over to Rax to keep track of.
413 // These must be heap allocated unless we want to store sizeof(usize) or
414 // less bytes, then the value can be the pointer.
415 let value: &mut V = Box::leak(data);
416
417 // Integer values require Big Endian to allow the Rax to fully optimize
418 // storing them since it will be able to compress the prefixes especially
419 // for 64/128bit numbers.
420 let k = key.encode();
421 let (ptr, len) = k.to_buf();
422
423 let r = raxInsert(
424 self.rax,
425 // Grab a raw pointer to the key. Keys are most likely allocated
426 // on the stack. The rax will keep it's own copy of the key so we
427 // don't want to keep in in the heap twice and it exists in the
428 // rax in it's compressed form.
429 ptr,
430 len,
431 value as *mut V as *mut u8,
432 old,
433 );
434
435 if r == 0 && Errno::last() == Errno::ENOMEM {
436 Err(RaxError::OutOfMemory())
437 } else if old.is_null() {
438 Ok(None)
439 } else {
440 // Box the previous value since Rax is done with it and it's our
441 // responsibility now to drop it. Once this Box goes out of scope
442 // the value is dropped and memory reclaimed.
443 Ok(Some(Box::from_raw(*old as *mut V)))
444 }
445 }
446 }
447
448 /// Insert a raw pointer value into the RAX.
449 ///
450 /// # Safety
451 ///
452 /// `value` must be a valid pointer or null.
453 pub unsafe fn insert_ptr(
454 &mut self,
455 key: K,
456 value: *mut u8,
457 ) -> Result<Option<*mut u8>, RaxError> {
458 // Allocate a pointer to catch the old value.
459 let old: &mut *mut u8 = &mut ptr::null_mut();
460
461 // Integer values require Big Endian to allow the Rax to fully optimize
462 // storing them since it will be able to compress the prefixes especially
463 // for 64/128bit numbers.
464 let k = key.encode();
465 let (ptr, len) = k.to_buf();
466
467 let r = raxInsert(
468 self.rax,
469 // Grab a raw pointer to the key. Keys are most likely allocated
470 // on the stack. The rax will keep it's own copy of the key so we
471 // don't want to keep in in the heap twice and it exists in the
472 // rax in it's compressed form.
473 ptr, len, value, old,
474 );
475
476 if r == 0 && Errno::last() == Errno::ENOMEM {
477 Err(RaxError::OutOfMemory())
478 } else if old.is_null() {
479 Ok(None)
480 } else {
481 // Box the previous value since Rax is done with it and it's our
482 // responsibility now to drop it. Once this Box goes out of scope
483 // the value is dropped and memory reclaimed.
484 Ok(Some(*old))
485 }
486 }
487
488 /// Remove an entry from the RAX and return the associated value.
489 pub fn remove(&mut self, key: K) -> (bool, Option<Box<V>>) {
490 unsafe {
491 let old: &mut *mut u8 = &mut ptr::null_mut();
492 let k = key.encode();
493 let (ptr, len) = k.to_buf();
494
495 let r = raxRemove(self.rax, ptr, len, old);
496
497 if old.is_null() {
498 (r == 1, None)
499 } else {
500 (r == 1, Some(Box::from_raw(*old as *mut V)))
501 }
502 }
503 }
504
505 /// Find a key and return whether it exists along with its value.
506 pub fn find_exists(&self, key: K) -> (bool, Option<&V>) {
507 unsafe {
508 let k = key.encode();
509 let (ptr, len) = k.to_buf();
510
511 let value = raxFind(self.rax, ptr, len);
512
513 if value.is_null() {
514 (true, None)
515 } else if value == raxNotFound {
516 (false, None)
517 } else {
518 // transmute to the value so we don't drop the actual value accidentally.
519 // While the key associated to the value is in the RAX then we cannot
520 // drop it.
521 (true, Some(&*(value as *const V)))
522 }
523 }
524 }
525
526 /// Same as get but added for semantics parity.
527 pub fn find(&self, key: K) -> Option<&V> {
528 unsafe {
529 let k = key.encode();
530 let (ptr, len) = k.to_buf();
531
532 let value = raxFind(self.rax, ptr, len);
533
534 if value.is_null() || value == raxNotFound {
535 None
536 } else {
537 // transmute to the value so we don't drop the actual value accidentally.
538 // While the key associated to the value is in the RAX then we cannot
539 // drop it.
540 Some(&*(value as *const V))
541 }
542 }
543 }
544
545 /// Get the value associated with the key.
546 pub fn get(&self, key: K) -> Option<&V> {
547 unsafe {
548 let k = key.encode();
549 let (ptr, len) = k.to_buf();
550
551 let value = raxFind(self.rax, ptr, len);
552
553 if value.is_null() || value == raxNotFound {
554 None
555 } else {
556 // transmute to the value so we don't drop the actual value accidentally.
557 // While the key associated to the value is in the RAX then we cannot
558 // drop it.
559 Some(&*(value as *const V))
560 }
561 }
562 }
563
564 /// Determines if the supplied key exists in the Rax.
565 pub fn exists(&self, key: K) -> bool {
566 unsafe {
567 let k = key.encode();
568 let (ptr, len) = k.to_buf();
569
570 let value = raxFind(self.rax, ptr, len);
571
572 !(value.is_null() || value == raxNotFound)
573 }
574 }
575
576 /// Seek to the minimum key and execute the closure.
577 #[inline]
578 pub fn seek_min<F>(&mut self, f: F)
579 where
580 F: Fn(&mut RaxMap<K, V>, &mut RaxIterator<K, V>),
581 {
582 unsafe {
583 // Allocate stack memory.
584 // Initialize a Rax iterator. This call should be performed a single time
585 // to initialize the iterator, and must be followed by a raxSeek() call,
586 // otherwise the raxPrev()/raxNext() functions will just return EOF.
587 let mut iter = MaybeUninit::<RaxIterator<K, V>>::zeroed().assume_init();
588 raxStart(&mut iter as *mut _ as *mut _, self.rax);
589 iter.seek_min();
590 // Borrow stack iterator and execute the closure.
591 f(self, &mut iter)
592 }
593 }
594
595 /// Seek to the minimum key and execute the closure, returning a result.
596 #[inline]
597 pub fn seek_min_result<R, F>(&mut self, f: F) -> Result<R, RaxError>
598 where
599 F: Fn(&mut RaxMap<K, V>, &mut RaxIterator<K, V>) -> Result<R, RaxError>,
600 {
601 unsafe {
602 // Allocate stack memory.
603 // Initialize a Rax iterator. This call should be performed a single time
604 // to initialize the iterator, and must be followed by a raxSeek() call,
605 // otherwise the raxPrev()/raxNext() functions will just return EOF.
606 let mut iter = MaybeUninit::<RaxIterator<K, V>>::zeroed().assume_init();
607 raxStart(&mut iter as *mut _ as *mut _, self.rax);
608 iter.seek_min();
609 // Borrow stack iterator and execute the closure.
610 f(self, &mut iter)
611 }
612 }
613
614 /// Seek to the maximum key and execute the closure.
615 #[inline]
616 pub fn seek_max<F>(&mut self, f: F)
617 where
618 F: Fn(&mut RaxMap<K, V>, &mut RaxIterator<K, V>),
619 {
620 unsafe {
621 // Allocate stack memory.
622 // Initialize a Rax iterator. This call should be performed a single time
623 // to initialize the iterator, and must be followed by a raxSeek() call,
624 // otherwise the raxPrev()/raxNext() functions will just return EOF.
625 let mut iter = MaybeUninit::<RaxIterator<K, V>>::zeroed().assume_init();
626 raxStart(&mut iter as *mut _ as *mut _, self.rax);
627 iter.seek_max();
628 // Borrow stack iterator and execute the closure.
629 f(self, &mut iter)
630 }
631 }
632
633 /// Seek to the maximum key and execute the closure, returning a result.
634 #[inline]
635 pub fn seek_max_result<R, F>(&mut self, f: F) -> Result<R, RaxError>
636 where
637 F: Fn(&mut RaxMap<K, V>, &mut RaxIterator<K, V>) -> Result<R, RaxError>,
638 {
639 unsafe {
640 // Allocate stack memory.
641 // Initialize a Rax iterator. This call should be performed a single time
642 // to initialize the iterator, and must be followed by a raxSeek() call,
643 // otherwise the raxPrev()/raxNext() functions will just return EOF.
644 let mut iter = MaybeUninit::<RaxIterator<K, V>>::zeroed().assume_init();
645 raxStart(&mut iter as *mut _ as *mut _, self.rax);
646 iter.seek_max();
647 // Borrow stack iterator and execute the closure.
648 f(self, &mut iter)
649 }
650 }
651
652 /// Seek to the given key using the specified operator and execute the closure.
653 #[inline]
654 pub fn seek<F>(&mut self, op: &str, key: K, f: F)
655 where
656 F: Fn(&mut RaxMap<K, V>, &mut RaxIterator<K, V>),
657 {
658 unsafe {
659 // Allocate stack memory.
660 // Initialize a Rax iterator. This call should be performed a single time
661 // to initialize the iterator, and must be followed by a raxSeek() call,
662 // otherwise the raxPrev()/raxNext() functions will just return EOF.
663 let mut iter = MaybeUninit::<RaxIterator<K, V>>::zeroed().assume_init();
664 raxStart(&mut iter as *mut _ as *mut _, self.rax);
665 iter.seek(op, key);
666 // Borrow stack iterator and execute the closure.
667 f(self, &mut iter)
668 }
669 }
670
671 /// Seek to the given key using the specified operator and execute the closure, returning a result.
672 #[inline]
673 pub fn seek_result<R, F>(&mut self, op: &str, key: K, f: F) -> Result<R, RaxError>
674 where
675 F: Fn(&mut RaxMap<K, V>, &mut RaxIterator<K, V>) -> Result<R, RaxError>,
676 {
677 unsafe {
678 // Allocate stack memory.
679 // Initialize a Rax iterator. This call should be performed a single time
680 // to initialize the iterator, and must be followed by a raxSeek() call,
681 // otherwise the raxPrev()/raxNext() functions will just return EOF.
682 let mut iter = MaybeUninit::<RaxIterator<K, V>>::zeroed().assume_init();
683 raxStart(&mut iter as *mut _ as *mut _, self.rax);
684 iter.seek(op, key);
685 // Borrow stack iterator and execute the closure.
686 f(self, &mut iter)
687 }
688 }
689
690 /// Create an iterator and execute the closure.
691 #[inline]
692 pub fn iter<F>(&mut self, f: F)
693 where
694 F: Fn(&mut RaxMap<K, V>, &mut RaxIterator<K, V>),
695 {
696 unsafe {
697 // Allocate stack memory.
698 // Initialize a Rax iterator. This call should be performed a single time
699 // to initialize the iterator, and must be followed by a raxSeek() call,
700 // otherwise the raxPrev()/raxNext() functions will just return EOF.
701 let mut iter = MaybeUninit::<RaxIterator<K, V>>::zeroed().assume_init();
702 raxStart(&mut iter as *mut _ as *mut _, self.rax);
703 // Borrow stack iterator and execute the closure.
704 f(self, &mut iter)
705 }
706 }
707
708 /// Create an iterator and execute the closure, returning a result.
709 #[inline]
710 pub fn iter_result<F, R>(&mut self, f: F) -> Result<R, RaxError>
711 where
712 F: Fn(&mut RaxMap<K, V>, &mut RaxIterator<K, V>) -> Result<R, RaxError>,
713 {
714 unsafe {
715 // Allocate stack memory.
716 // Initialize a Rax iterator. This call should be performed a single time
717 // to initialize the iterator, and must be followed by a raxSeek() call,
718 // otherwise the raxPrev()/raxNext() functions will just return EOF.
719 let mut iter = MaybeUninit::<RaxIterator<K, V>>::zeroed().assume_init();
720 raxStart(&mut iter as *mut _ as *mut _, self.rax);
721 // Borrow stack iterator and execute the closure.
722 f(self, &mut iter)
723 }
724 }
725}
726
727/// RaxMap but without the values. The "isnull" bit will be set for
728/// all entries.
729/// #Examples
730///
731/// ```
732/// use rax::RaxSet;
733/// let mut r = RaxSet::new();
734/// r.insert(1);
735/// r.insert(2);
736///
737/// r.iter(|r, iter| {
738/// // Place iterator at the first entry.
739/// if !iter.seek_min() {
740/// // EOF
741/// return;
742/// }
743///
744/// // Can test EOF at any time.
745/// if iter.eof() {
746/// // EOF
747/// return;
748/// }
749///
750/// while iter.forward() {
751/// iter.key();
752/// }
753/// // In reverse
754/// // Place iterator at the end.
755/// if !iter.seek_max() {
756/// // EOF
757/// return;
758/// }
759/// while iter.back() {
760/// iter.key();
761/// }
762///
763/// // Seek
764/// if !iter.seek(">=", 2) {
765/// // EOF
766/// }
767/// while iter.forward() {
768/// iter.key();
769/// }
770/// });
771/// ```
772pub struct RaxSet<K: RaxKey> {
773 rax: *mut rax,
774 _marker: std::marker::PhantomData<K>,
775}
776
777impl<K: RaxKey> Drop for RaxSet<K> {
778 fn drop(&mut self) {
779 unsafe {
780 // Cleanup RAX
781 raxFree(self.rax)
782 }
783 }
784}
785
786impl<K: RaxKey> Default for RaxSet<K> {
787 fn default() -> Self {
788 Self::new()
789 }
790}
791
792/// Implementation of RaxSet.
793impl<K: RaxKey> RaxSet<K> {
794 pub fn new() -> RaxSet<K> {
795 RaxSet {
796 rax: unsafe { raxNew() },
797 _marker: std::marker::PhantomData,
798 }
799 }
800
801 /// Fallible constructor. Returns `Err(OutOfMemory)` when
802 /// `raxNew()` returns NULL.
803 pub fn try_new() -> Result<RaxSet<K>, RaxError> {
804 let ptr = unsafe { raxNew() };
805 if ptr.is_null() {
806 Err(RaxError::OutOfMemory())
807 } else {
808 Ok(RaxSet {
809 rax: ptr,
810 _marker: std::marker::PhantomData,
811 })
812 }
813 }
814
815 /// The number of entries in the RAX
816 #[inline]
817 pub fn len(&self) -> u64 {
818 unsafe { raxSize(self.rax) }
819 }
820
821 /// The number of entries in the RAX
822 #[inline]
823 pub fn size(&self) -> u64 {
824 unsafe { raxSize(self.rax) }
825 }
826
827 /// Prints the Rax as ASCII art to stdout.
828 #[inline]
829 pub fn show(&self) {
830 unsafe { raxShow(self.rax) }
831 }
832
833 /// Returns true if the RAX is empty.
834 pub fn is_empty(&self) -> bool {
835 self.len() == 0
836 }
837
838 /// Insert a new entry into the RAX replacing and returning the existing
839 /// entry for the supplied key.
840 pub fn insert(&mut self, key: K) -> Result<bool, RaxError> {
841 unsafe {
842 // Integer values require Big Endian to allow the Rax to fully optimize
843 // storing them since it will be able to compress the prefixes especially
844 // for 64/128bit numbers.
845 let k = key.encode();
846 let (ptr, len) = k.to_buf();
847
848 let r = raxTryInsert(
849 self.rax,
850 // Grab a raw pointer to the key. Keys are most likely allocated
851 // on the stack. The rax will keep it's own copy of the key so we
852 // don't want to keep in in the heap twice and it exists in the
853 // rax in it's compressed form.
854 ptr,
855 len,
856 std::ptr::null_mut(),
857 std::ptr::null_mut(),
858 );
859
860 if r == 0 {
861 if Errno::last() == Errno::ENOMEM {
862 Err(RaxError::OutOfMemory())
863 } else {
864 Ok(false)
865 }
866 } else {
867 Ok(true)
868 }
869 }
870 }
871
872 pub fn remove(&mut self, key: K) -> bool {
873 unsafe {
874 let k = key.encode();
875 let (ptr, len) = k.to_buf();
876
877 let r = raxRemove(self.rax, ptr, len, &mut std::ptr::null_mut());
878
879 r == 1
880 }
881 }
882
883 /// Determines if the supplied key exists in the Rax.
884 pub fn exists(&self, key: K) -> bool {
885 unsafe {
886 let k = key.encode();
887 let (ptr, len) = k.to_buf();
888
889 let value = raxFind(self.rax, ptr, len);
890
891 value != raxNotFound
892 }
893 }
894
895 /// Seek to the minimum key and execute the closure.
896 #[inline]
897 pub fn seek_min<F>(&mut self, f: F)
898 where
899 F: Fn(&mut RaxSet<K>, &mut RaxIterator<K, usize>),
900 {
901 unsafe {
902 // Allocate stack memory.
903 // Initialize a Rax iterator. This call should be performed a single time
904 // to initialize the iterator, and must be followed by a raxSeek() call,
905 // otherwise the raxPrev()/raxNext() functions will just return EOF.
906 let mut iter = MaybeUninit::<RaxIterator<K, usize>>::zeroed().assume_init();
907 raxStart(&mut iter as *mut _ as *mut _, self.rax);
908 iter.seek_min();
909 // Borrow stack iterator and execute the closure.
910 f(self, &mut iter)
911 }
912 }
913
914 /// Seek to the minimum key and execute the closure, returning a result.
915 #[inline]
916 pub fn seek_min_result<R, F>(&mut self, f: F) -> Result<R, RaxError>
917 where
918 F: Fn(&mut RaxSet<K>, &mut RaxIterator<K, usize>) -> Result<R, RaxError>,
919 {
920 unsafe {
921 // Allocate stack memory.
922 // Initialize a Rax iterator. This call should be performed a single time
923 // to initialize the iterator, and must be followed by a raxSeek() call,
924 // otherwise the raxPrev()/raxNext() functions will just return EOF.
925 let mut iter = MaybeUninit::<RaxIterator<K, usize>>::zeroed().assume_init();
926 raxStart(&mut iter as *mut _ as *mut _, self.rax);
927 iter.seek_min();
928 // Borrow stack iterator and execute the closure.
929 f(self, &mut iter)
930 }
931 }
932
933 /// Seek to the maximum key and execute the closure.
934 #[inline]
935 pub fn seek_max<F>(&mut self, f: F)
936 where
937 F: Fn(&mut RaxSet<K>, &mut RaxIterator<K, usize>),
938 {
939 unsafe {
940 // Allocate stack memory.
941 // Initialize a Rax iterator. This call should be performed a single time
942 // to initialize the iterator, and must be followed by a raxSeek() call,
943 // otherwise the raxPrev()/raxNext() functions will just return EOF.
944 let mut iter = MaybeUninit::<RaxIterator<K, usize>>::zeroed().assume_init();
945 raxStart(&mut iter as *mut _ as *mut _, self.rax);
946 iter.seek_max();
947 // Borrow stack iterator and execute the closure.
948 f(self, &mut iter)
949 }
950 }
951
952 /// Seek to the maximum key and execute the closure, returning a result.
953 #[inline]
954 pub fn seek_max_result<R, F>(&mut self, f: F) -> Result<R, RaxError>
955 where
956 F: Fn(&mut RaxSet<K>, &mut RaxIterator<K, usize>) -> Result<R, RaxError>,
957 {
958 unsafe {
959 // Allocate stack memory.
960 // Initialize a Rax iterator. This call should be performed a single time
961 // to initialize the iterator, and must be followed by a raxSeek() call,
962 // otherwise the raxPrev()/raxNext() functions will just return EOF.
963 let mut iter = MaybeUninit::<RaxIterator<K, usize>>::zeroed().assume_init();
964 raxStart(&mut iter as *mut _ as *mut _, self.rax);
965 iter.seek_max();
966 // Borrow stack iterator and execute the closure.
967 f(self, &mut iter)
968 }
969 }
970
971 /// Seek to the given key using the specified operator and execute the closure.
972 #[inline]
973 pub fn seek<F>(&mut self, op: &str, key: K, f: F)
974 where
975 F: Fn(&mut RaxSet<K>, &mut RaxIterator<K, usize>),
976 {
977 unsafe {
978 // Allocate stack memory.
979 // Initialize a Rax iterator. This call should be performed a single time
980 // to initialize the iterator, and must be followed by a raxSeek() call,
981 // otherwise the raxPrev()/raxNext() functions will just return EOF.
982 let mut iter = MaybeUninit::<RaxIterator<K, usize>>::zeroed().assume_init();
983 raxStart(&mut iter as *mut _ as *mut _, self.rax);
984 iter.seek(op, key);
985 // Borrow stack iterator and execute the closure.
986 f(self, &mut iter)
987 }
988 }
989
990 /// Seek to the given key using the specified operator and execute the closure, returning a result.
991 #[inline]
992 pub fn seek_result<R, F>(&mut self, op: &str, key: K, f: F) -> Result<R, RaxError>
993 where
994 F: Fn(&mut RaxSet<K>, &mut RaxIterator<K, usize>) -> Result<R, RaxError>,
995 {
996 unsafe {
997 // Allocate stack memory.
998 // Initialize a Rax iterator. This call should be performed a single time
999 // to initialize the iterator, and must be followed by a raxSeek() call,
1000 // otherwise the raxPrev()/raxNext() functions will just return EOF.
1001 let mut iter = MaybeUninit::<RaxIterator<K, usize>>::zeroed().assume_init();
1002 raxStart(&mut iter as *mut _ as *mut _, self.rax);
1003 iter.seek(op, key);
1004 // Borrow stack iterator and execute the closure.
1005 f(self, &mut iter)
1006 }
1007 }
1008
1009 /// Create an iterator and execute the closure.
1010 #[inline]
1011 pub fn iter<F>(&mut self, f: F)
1012 where
1013 F: Fn(&mut RaxSet<K>, &mut RaxIterator<K, usize>),
1014 {
1015 unsafe {
1016 // Allocate stack memory.
1017 // Initialize a Rax iterator. This call should be performed a single time
1018 // to initialize the iterator, and must be followed by a raxSeek() call,
1019 // otherwise the raxPrev()/raxNext() functions will just return EOF.
1020 let mut iter = MaybeUninit::<RaxIterator<K, usize>>::zeroed().assume_init();
1021 raxStart(&mut iter as *mut _ as *mut _, self.rax);
1022 // Borrow stack iterator and execute the closure.
1023 f(self, &mut iter)
1024 }
1025 }
1026
1027 /// Create an iterator and execute the closure, returning a result.
1028 #[inline]
1029 pub fn iter_result<F, R>(&mut self, f: F) -> Result<R, RaxError>
1030 where
1031 F: Fn(&mut RaxSet<K>, &mut RaxIterator<K, usize>) -> Result<R, RaxError>,
1032 {
1033 unsafe {
1034 // Allocate stack memory.
1035 // Initialize a Rax iterator. This call should be performed a single time
1036 // to initialize the iterator, and must be followed by a raxSeek() call,
1037 // otherwise the raxPrev()/raxNext() functions will just return EOF.
1038 let mut iter = MaybeUninit::<RaxIterator<K, usize>>::zeroed().assume_init();
1039 raxStart(&mut iter as *mut _ as *mut _, self.rax);
1040 // Borrow stack iterator and execute the closure.
1041 f(self, &mut iter)
1042 }
1043 }
1044}
1045
1046// Same as RaxMap except values are not pointers to heap allocations.
1047// Instead the "data pointer" in the RAX is the value. This means we
1048// have sizeof<usize> worth of bytes to play with. Perhaps, in the future
1049// we could create data values of any size, but for now we have the size
1050// of pointers to work with or null which has no added size to a rax node.
1051//pub struct RaxIntMap<K: RaxKey> {
1052// rax: *mut rax,
1053// _marker: std::marker::PhantomData<K>,
1054//}
1055//
1056//impl<K: RaxKey> RaxIntMap<K> {
1057// pub fn new() -> RaxIntMap<K> {
1058// RaxIntMap {
1059// rax: unsafe { raxNew() },
1060// _marker: std::marker::PhantomData,
1061// }
1062// }
1063//
1064// /// Insert a new entry into the RAX replacing and returning the existing
1065// /// entry for the supplied key.
1066// pub fn insert(&mut self, key: K, value: usize) -> Result<Option<usize>, RaxError> {
1067// unsafe {
1068// // Allocate a pointer to catch the old value.
1069// let old: &mut *mut u8 = &mut ptr::null_mut();
1070//
1071// // Integer values require Big Endian to allow the Rax to fully optimize
1072// // storing them since it will be able to compress the prefixes especially
1073// // for 64/128bit numbers.
1074// let k = key.encode();
1075// let (ptr, len) = k.to_buf();
1076//
1077// let r = raxInsert(
1078// self.rax,
1079// // Grab a raw pointer to the key. Keys are most likely allocated
1080// // on the stack. The rax will keep it's own copy of the key so we
1081// // don't want to keep in in the heap twice and it exists in the
1082// // rax in it's compressed form.
1083// ptr,
1084// len,
1085// &value as *const _ as *mut u8,
1086// old,
1087// );
1088//
1089// if r == 0 && Errno::last() == Errno::ENOMEM {
1090// Err(RaxError::OutOfMemory())
1091// } else if old.is_null() {
1092// Ok(None)
1093// } else {
1094// Ok(Some(std::mem::transmute(*old)))
1095// }
1096// }
1097// }
1098//
1099// /// Insert a new entry into the RAX if an existing one does not exist.
1100// pub fn try_insert(&mut self, key: K, data: usize) -> Result<Option<usize>, RaxError> {
1101// unsafe {
1102// // Allocate a pointer to catch the old value.
1103// let old: &mut *mut u8 = &mut ptr::null_mut();
1104//
1105// // Integer values require Big Endian to allow the Rax to fully optimize
1106// // storing them since it will be able to compress the prefixes especially
1107// // for 64/128bit numbers.
1108// let k = key.encode();
1109// let (ptr, len) = k.to_buf();
1110//
1111// let r = raxTryInsert(
1112// self.rax,
1113// // Grab a raw pointer to the key. Keys are most likely allocated
1114// // on the stack. The rax will keep it's own copy of the key so we
1115// // don't want to keep in in the heap twice and it exists in the
1116// // rax in it's compressed form.
1117// ptr,
1118// len,
1119// &data as *const _ as *mut u8,
1120// old,
1121// );
1122//
1123// if r == 0 {
1124// if Errno::last() == Errno::ENOMEM {
1125// Err(RaxError::OutOfMemory())
1126// } else if old.is_null() {
1127// Ok(None)
1128// } else {
1129// Ok(Some(transmute(*old)))
1130// }
1131// } else if old.is_null() {
1132// Ok(None)
1133// } else {
1134// Ok(Some(std::mem::transmute(*old)))
1135// }
1136// }
1137// }
1138//}
1139
1140pub trait RaxKey<RHS = Self>: Clone + Default + std::fmt::Debug {
1141 type Output: RaxKey;
1142
1143 fn encode(self) -> Self::Output;
1144
1145 fn to_buf(&self) -> (*const u8, usize);
1146
1147 /// # Safety
1148 ///
1149 /// `ptr` must be valid for reads of `len` bytes.
1150 unsafe fn from_buf(ptr: *const u8, len: usize) -> RHS;
1151}
1152
1153impl RaxKey for f32 {
1154 type Output = u32;
1155
1156 #[inline]
1157 fn encode(self) -> Self::Output {
1158 // Encode as u32 Big Endian
1159 self.to_bits().to_be()
1160 }
1161
1162 #[inline]
1163 fn to_buf(&self) -> (*const u8, usize) {
1164 // This should never get called since we represent as a u32
1165 (
1166 self as *const _ as *const u8,
1167 std::mem::size_of::<Self::Output>(),
1168 )
1169 }
1170
1171 #[inline]
1172 unsafe fn from_buf(ptr: *const u8, len: usize) -> f32 {
1173 if len != size_of::<Self::Output>() {
1174 return Self::default();
1175 }
1176 unsafe {
1177 // We used a BigEndian u32 to encode so let's reverse it
1178 f32::from_bits(u32::from_be(
1179 *(ptr as *mut [u8; std::mem::size_of::<Self::Output>()] as *mut u32),
1180 ))
1181 }
1182 }
1183}
1184
1185impl RaxKey for f64 {
1186 type Output = u64;
1187
1188 #[inline]
1189 fn encode(self) -> Self::Output {
1190 // Encode as u64 Big Endian
1191 self.to_bits().to_be()
1192 }
1193
1194 #[inline]
1195 fn to_buf(&self) -> (*const u8, usize) {
1196 // This should never get called since we represent as a u64
1197 (self as *const _ as *const u8, size_of::<Self::Output>())
1198 }
1199
1200 #[inline]
1201 unsafe fn from_buf(ptr: *const u8, len: usize) -> f64 {
1202 if len != size_of::<Self::Output>() {
1203 return Self::default();
1204 }
1205 unsafe {
1206 // We used a BigEndian u64 to encode so let's reverse it
1207 f64::from_bits(u64::from_be(
1208 *(ptr as *mut [u8; size_of::<Self::Output>()] as *mut u64),
1209 ))
1210 }
1211 }
1212}
1213
1214impl RaxKey for isize {
1215 type Output = isize;
1216
1217 #[inline]
1218 fn encode(self) -> Self::Output {
1219 self.to_be()
1220 }
1221
1222 #[inline]
1223 fn to_buf(&self) -> (*const u8, usize) {
1224 (self as *const _ as *const u8, size_of::<Self::Output>())
1225 }
1226
1227 #[inline]
1228 unsafe fn from_buf(ptr: *const u8, len: usize) -> isize {
1229 if len != size_of::<Self::Output>() {
1230 return Self::default();
1231 }
1232 unsafe { isize::from_be(*(ptr as *mut [u8; size_of::<Self::Output>()] as *mut isize)) }
1233 }
1234}
1235
1236impl RaxKey for usize {
1237 type Output = usize;
1238
1239 #[inline]
1240 fn encode(self) -> Self::Output {
1241 self.to_be()
1242 }
1243
1244 #[inline]
1245 fn to_buf(&self) -> (*const u8, usize) {
1246 (
1247 self as *const _ as *const u8,
1248 std::mem::size_of::<Self::Output>(),
1249 )
1250 }
1251
1252 #[inline]
1253 unsafe fn from_buf(ptr: *const u8, len: usize) -> usize {
1254 if len != size_of::<Self::Output>() {
1255 return Self::default();
1256 }
1257 unsafe {
1258 usize::from_be(*(ptr as *mut [u8; std::mem::size_of::<Self::Output>()] as *mut usize))
1259 }
1260 }
1261}
1262
1263impl RaxKey for i16 {
1264 type Output = i16;
1265
1266 #[inline]
1267 fn encode(self) -> Self::Output {
1268 self.to_be()
1269 }
1270
1271 #[inline]
1272 fn to_buf(&self) -> (*const u8, usize) {
1273 (self as *const _ as *const u8, size_of::<Self::Output>())
1274 }
1275
1276 #[inline]
1277 unsafe fn from_buf(ptr: *const u8, len: usize) -> Self {
1278 if len != size_of::<Self::Output>() {
1279 return Self::default();
1280 }
1281 unsafe { i16::from_be(*(ptr as *mut [u8; size_of::<Self::Output>()] as *mut i16)) }
1282 }
1283}
1284
1285impl RaxKey for u16 {
1286 type Output = u16;
1287
1288 #[inline]
1289 fn encode(self) -> Self::Output {
1290 self.to_be()
1291 }
1292
1293 #[inline]
1294 fn to_buf(&self) -> (*const u8, usize) {
1295 (self as *const _ as *const u8, size_of::<Self::Output>())
1296 }
1297
1298 #[inline]
1299 unsafe fn from_buf(ptr: *const u8, len: usize) -> u16 {
1300 if len != size_of::<Self::Output>() {
1301 return Self::default();
1302 }
1303 unsafe { u16::from_be(*(ptr as *mut [u8; size_of::<Self::Output>()] as *mut u16)) }
1304 }
1305}
1306
1307impl RaxKey for i32 {
1308 type Output = i32;
1309
1310 #[inline]
1311 fn encode(self) -> Self::Output {
1312 self.to_be()
1313 }
1314
1315 #[inline]
1316 fn to_buf(&self) -> (*const u8, usize) {
1317 (self as *const _ as *const u8, size_of::<Self::Output>())
1318 }
1319
1320 #[inline]
1321 unsafe fn from_buf(ptr: *const u8, len: usize) -> i32 {
1322 if len != size_of::<Self::Output>() {
1323 return Self::default();
1324 }
1325 unsafe { i32::from_be(*(ptr as *mut [u8; size_of::<Self::Output>()] as *mut i32)) }
1326 }
1327}
1328
1329impl RaxKey for u32 {
1330 type Output = u32;
1331
1332 #[inline]
1333 fn encode(self) -> Self::Output {
1334 self.to_be()
1335 }
1336
1337 #[inline]
1338 fn to_buf(&self) -> (*const u8, usize) {
1339 (self as *const _ as *const u8, size_of::<Self::Output>())
1340 }
1341
1342 #[inline]
1343 unsafe fn from_buf(ptr: *const u8, len: usize) -> u32 {
1344 if len != size_of::<Self::Output>() {
1345 return Self::default();
1346 }
1347 unsafe { u32::from_be(*(ptr as *mut [u8; size_of::<Self::Output>()] as *mut u32)) }
1348 }
1349}
1350
1351impl RaxKey for i64 {
1352 type Output = i64;
1353
1354 #[inline]
1355 fn encode(self) -> Self::Output {
1356 self.to_be()
1357 }
1358
1359 #[inline]
1360 fn to_buf(&self) -> (*const u8, usize) {
1361 (self as *const _ as *const u8, size_of::<Self::Output>())
1362 }
1363
1364 #[inline]
1365 unsafe fn from_buf(ptr: *const u8, len: usize) -> i64 {
1366 if len != size_of::<Self::Output>() {
1367 return Self::default();
1368 }
1369 unsafe { i64::from_be(*(ptr as *mut [u8; size_of::<Self::Output>()] as *mut i64)) }
1370 }
1371}
1372
1373impl RaxKey for u64 {
1374 type Output = u64;
1375
1376 #[inline]
1377 fn encode(self) -> Self::Output {
1378 self.to_be()
1379 }
1380
1381 #[inline]
1382 fn to_buf(&self) -> (*const u8, usize) {
1383 (self as *const _ as *const u8, size_of::<Self::Output>())
1384 }
1385
1386 #[inline]
1387 unsafe fn from_buf(ptr: *const u8, len: usize) -> u64 {
1388 if len != size_of::<Self::Output>() {
1389 return Self::default();
1390 }
1391 unsafe { u64::from_be(*(ptr as *mut [u8; size_of::<Self::Output>()] as *mut u64)) }
1392 }
1393}
1394
1395impl RaxKey for i128 {
1396 type Output = i128;
1397
1398 #[inline]
1399 fn encode(self) -> Self::Output {
1400 self.to_be()
1401 }
1402
1403 #[inline]
1404 fn to_buf(&self) -> (*const u8, usize) {
1405 (self as *const _ as *const u8, size_of::<Self::Output>())
1406 }
1407
1408 #[inline]
1409 unsafe fn from_buf(ptr: *const u8, len: usize) -> i128 {
1410 if len != size_of::<Self::Output>() {
1411 return Self::default();
1412 }
1413 unsafe { i128::from_be(*(ptr as *mut [u8; size_of::<Self::Output>()] as *mut i128)) }
1414 }
1415}
1416
1417impl RaxKey for u128 {
1418 type Output = u128;
1419
1420 #[inline]
1421 fn encode(self) -> Self::Output {
1422 self.to_be()
1423 }
1424
1425 #[inline]
1426 fn to_buf(&self) -> (*const u8, usize) {
1427 (self as *const _ as *const u8, size_of::<Self::Output>())
1428 }
1429
1430 #[inline]
1431 unsafe fn from_buf(ptr: *const u8, len: usize) -> u128 {
1432 if len != size_of::<Self::Output>() {
1433 return Self::default();
1434 }
1435 unsafe { u128::from_be(*(ptr as *mut [u8; size_of::<Self::Output>()] as *mut u128)) }
1436 }
1437}
1438
1439impl RaxKey for Vec<u8> {
1440 type Output = Vec<u8>;
1441
1442 #[inline]
1443 fn encode(self) -> Vec<u8> {
1444 self
1445 }
1446
1447 #[inline]
1448 fn to_buf(&self) -> (*const u8, usize) {
1449 (self.as_ptr(), self.len())
1450 }
1451
1452 #[inline]
1453 unsafe fn from_buf(ptr: *const u8, len: usize) -> Vec<u8> {
1454 unsafe { Vec::from_raw_parts(ptr as *mut u8, len, len) }
1455 }
1456}
1457
1458impl<'a> RaxKey for &'a [u8] {
1459 type Output = &'a [u8];
1460
1461 #[inline]
1462 fn encode(self) -> &'a [u8] {
1463 self
1464 }
1465
1466 #[inline]
1467 fn to_buf(&self) -> (*const u8, usize) {
1468 (self.as_ptr(), self.len())
1469 }
1470
1471 #[inline]
1472 unsafe fn from_buf(ptr: *const u8, len: usize) -> &'a [u8] {
1473 unsafe { std::slice::from_raw_parts(ptr, len) }
1474 }
1475}
1476
1477//impl RaxKey for SDS {
1478// type Output = SDS;
1479//
1480// #[inline]
1481// fn encode(self) -> Self::Output {
1482// self
1483// }
1484//
1485// #[inline]
1486// fn to_buf(&self) -> (*const u8, usize) {
1487// (self.as_ptr(), self.len())
1488// }
1489//
1490// #[inline]
1491// unsafe fn from_buf(ptr: *const u8, len: usize) -> SDS {
1492// SDS::from_ptr(ptr, len)
1493// }
1494//}
1495
1496impl<'a> RaxKey for &'a str {
1497 type Output = &'a str;
1498
1499 #[inline]
1500 fn encode(self) -> Self::Output {
1501 self
1502 }
1503
1504 #[inline]
1505 fn to_buf(&self) -> (*const u8, usize) {
1506 ((*self).as_ptr(), self.len())
1507 }
1508
1509 #[inline]
1510 unsafe fn from_buf(ptr: *const u8, len: usize) -> &'a str {
1511 unsafe { std::str::from_utf8(std::slice::from_raw_parts(ptr, len)).unwrap_or_default() }
1512 }
1513}
1514
1515#[repr(C)]
1516pub struct RaxIterator<K: RaxKey, V> {
1517 pub flags: libc::c_int,
1518 pub rt: *mut rax,
1519 pub key: *mut u8,
1520 pub data: *mut libc::c_void,
1521 pub key_len: libc::size_t,
1522 pub key_max: libc::size_t,
1523 pub key_static_string: [u8; 128],
1524 pub node: *mut raxNode,
1525 pub stack: raxStack,
1526 pub node_cb: Option<raxNodeCallback>,
1527 _marker: std::marker::PhantomData<(K, V)>,
1528}
1529
1530/// Free up memory
1531impl<K: RaxKey, V> Drop for RaxIterator<K, V> {
1532 fn drop(&mut self) {
1533 unsafe {
1534 if self.key_max == RAX_ITER_STATIC_LEN as usize {
1535 // Key wasn't heap allocated.
1536 // Force it back to the current address.
1537 self.key = self.key_static_string.as_mut_ptr();
1538 }
1539 raxStop(self as *const _ as *const raxIterator);
1540 }
1541 }
1542}
1543
1544/// Implement std::Iterator
1545impl<K: RaxKey, V: 'static> Iterator for RaxIterator<K, V> {
1546 type Item = (K, Option<&'static V>);
1547
1548 fn next(&mut self) -> Option<<Self as Iterator>::Item> {
1549 unsafe {
1550 if raxNext(self as *const _ as *const raxIterator) == 1 {
1551 let data: *mut libc::c_void = self.data;
1552 if data.is_null() {
1553 None
1554 } else {
1555 let val = data as *const V;
1556 if val.is_null() {
1557 Some((self.key(), None))
1558 } else {
1559 Some((self.key(), Some(&*val)))
1560 }
1561 }
1562 } else {
1563 None
1564 }
1565 }
1566 }
1567}
1568
1569/// Implement std::DoubleEndedIterator
1570impl<K: RaxKey, V: 'static> DoubleEndedIterator for RaxIterator<K, V> {
1571 fn next_back(&mut self) -> Option<<Self as Iterator>::Item> {
1572 unsafe {
1573 if raxPrev(self as *const _ as *const raxIterator) == 1 {
1574 let data: *mut libc::c_void = self.data;
1575 if data.is_null() {
1576 None
1577 } else {
1578 let val = data as *const V;
1579 if val.is_null() {
1580 Some((self.key(), None))
1581 } else {
1582 Some((self.key(), Some(&*val)))
1583 }
1584 }
1585 } else {
1586 None
1587 }
1588 }
1589 }
1590}
1591
1592/// Core iterator implementation
1593impl<K: RaxKey, V> RaxIterator<K, V> {
1594 pub fn new(r: RaxMap<K, V>) -> RaxIterator<K, V> {
1595 unsafe {
1596 // Allocate stack memory.
1597 // Initialize a Rax iterator. This call should be performed a single time
1598 // to initialize the iterator, and must be followed by a raxSeek() call,
1599 // otherwise the raxPrev()/raxNext() functions will just return EOF.
1600 let mut iter = MaybeUninit::<RaxIterator<K, V>>::zeroed().assume_init();
1601 raxStart(&mut iter as *mut _ as *mut _, r.rax);
1602 iter
1603 }
1604 }
1605
1606 pub fn print_ptr(&self) {
1607 println!("ptr = {:p}", self);
1608 println!("ptr = {:p}", self as *const _ as *const raxIterator);
1609 }
1610
1611 #[inline]
1612 pub fn seek_min(&self) -> bool {
1613 unsafe {
1614 if raxSeek(
1615 self as *const _ as *const raxIterator,
1616 BEGIN.as_ptr(),
1617 std::ptr::null(),
1618 0,
1619 ) == 1
1620 {
1621 self.forward()
1622 } else {
1623 false
1624 }
1625 }
1626 }
1627
1628 #[inline]
1629 pub fn seek_max(&self) -> bool {
1630 unsafe {
1631 if raxSeek(
1632 self as *const _ as *const raxIterator,
1633 END.as_ptr(),
1634 std::ptr::null(),
1635 0,
1636 ) == 1
1637 {
1638 self.back()
1639 } else {
1640 false
1641 }
1642 }
1643 }
1644
1645 #[inline]
1646 pub fn back(&self) -> bool {
1647 unsafe { raxPrev(self as *const _ as *const raxIterator) == 1 }
1648 }
1649
1650 #[inline]
1651 pub fn forward(&self) -> bool {
1652 unsafe { raxNext(self as *const _ as *const raxIterator) == 1 }
1653 }
1654
1655 /// Key at current position
1656 #[inline]
1657 pub fn key(&self) -> K {
1658 unsafe { K::from_buf(self.key, self.key_len) }
1659 }
1660
1661 /// Data at current position.
1662 #[inline]
1663 pub fn value(&self) -> Option<&V> {
1664 unsafe {
1665 let data: *mut libc::c_void = self.data;
1666 if data.is_null() {
1667 None
1668 } else {
1669 Some(&*(data as *const V))
1670 }
1671 }
1672 }
1673
1674 #[inline]
1675 pub fn lesser(&self, key: K) -> bool {
1676 self.seek(LESSER, key)
1677 }
1678
1679 #[inline]
1680 pub fn lesser_equal(&self, key: K) -> bool {
1681 self.seek(LESSER_EQUAL, key)
1682 }
1683
1684 #[inline]
1685 pub fn greater(&self, key: K) -> bool {
1686 self.seek(GREATER, key)
1687 }
1688
1689 #[inline]
1690 pub fn greater_equal(&self, key: K) -> bool {
1691 self.seek(GREATER_EQUAL, key)
1692 }
1693
1694 #[inline]
1695 pub fn seek(&self, op: &str, key: K) -> bool {
1696 unsafe {
1697 let k = key.encode();
1698 let (p, len) = k.to_buf();
1699 raxSeek(self as *const _ as *const raxIterator, op.as_ptr(), p, len) == 1
1700 && self.flags & RAX_ITER_EOF != 0
1701 }
1702 }
1703
1704 #[inline]
1705 pub fn seek_raw(&self, op: &str, key: K) -> i32 {
1706 unsafe {
1707 let k = key.encode();
1708 let (p, len) = k.to_buf();
1709 raxSeek(self as *const _ as *const raxIterator, op.as_ptr(), p, len)
1710 }
1711 }
1712
1713 #[inline]
1714 pub fn seek_bytes(&self, op: &str, ele: &[u8]) -> bool {
1715 unsafe {
1716 raxSeek(
1717 self as *const _ as *const raxIterator,
1718 op.as_ptr(),
1719 ele.as_ptr(),
1720 ele.len() as libc::size_t,
1721 ) == 1
1722 }
1723 }
1724
1725 /// Return if the iterator is in an EOF state. This happens when raxSeek()
1726 /// failed to seek an appropriate element, so that raxNext() or raxPrev()
1727 /// will return zero, or when an EOF condition was reached while iterating
1728 /// with next() and prev().
1729 #[inline]
1730 pub fn eof(&self) -> bool {
1731 self.flags & RAX_ITER_EOF != 0
1732 }
1733}
1734
1735impl fmt::Display for RaxError {
1736 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1737 match *self {
1738 // Both underlying errors already impl `Display`, so we defer to
1739 // their implementations.
1740 RaxError::Generic(ref err) => write!(f, "{}", err),
1741 RaxError::OutOfMemory() => write!(f, "out of memory"),
1742 }
1743 }
1744}
1745
1746impl error::Error for RaxError {
1747 fn source(&self) -> Option<&(dyn error::Error + 'static)> {
1748 match *self {
1749 // N.B. Both of these implicitly cast `err` from their concrete
1750 // types (either `&io::Error` or `&num::ParseIntError`)
1751 // to a trait object `&Error`. This works because both error types
1752 // implement `Error`.
1753 RaxError::Generic(ref err) => Some(err),
1754 RaxError::OutOfMemory() => Some(self),
1755 }
1756 }
1757}
1758
1759#[derive(Debug)]
1760pub struct GenericError {
1761 message: String,
1762}
1763
1764impl GenericError {
1765 pub fn new(message: &str) -> GenericError {
1766 GenericError {
1767 message: String::from(message),
1768 }
1769 }
1770}
1771
1772impl fmt::Display for GenericError {
1773 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1774 write!(f, "Store error: {}", self.message)
1775 }
1776}
1777
1778impl error::Error for GenericError {}
1779
1780#[derive(Clone, Copy)]
1781#[repr(C)]
1782pub struct rax;
1783
1784#[derive(Clone, Copy)]
1785#[repr(C)]
1786pub struct raxNode;
1787
1788#[derive(Clone, Copy)]
1789#[repr(C)]
1790pub struct raxStack {
1791 stack: *mut *mut libc::c_void,
1792 items: libc::size_t,
1793 maxitems: libc::size_t,
1794 static_items: [*mut libc::c_void; 32],
1795 oom: libc::c_int,
1796}
1797
1798#[repr(C)]
1799pub struct raxIterator;
1800
1801#[allow(non_snake_case)]
1802#[allow(non_camel_case_types)]
1803extern "C" fn RaxFreeWithCallbackWrapper<V>(v: *mut libc::c_void) {
1804 unsafe {
1805 // Re-box it so it can drop it immediately after it leaves this scope.
1806 drop(Box::from_raw(v as *mut V));
1807 }
1808}
1809
1810#[allow(non_camel_case_types)]
1811type raxNodeCallback = extern "C" fn(v: *mut libc::c_void);
1812
1813type RaxFreeCallback = extern "C" fn(v: *mut libc::c_void);
1814
1815#[allow(improper_ctypes)]
1816#[allow(non_snake_case)]
1817#[allow(non_camel_case_types)]
1818#[link(name = "rax", kind = "static")]
1819extern "C" {
1820 pub static raxNotFound: *mut u8;
1821
1822 pub static mut rax_malloc: extern "C" fn(size: libc::size_t) -> *mut u8;
1823 pub static mut rax_realloc:
1824 extern "C" fn(ptr: *mut libc::c_void, size: libc::size_t) -> *mut u8;
1825 pub static mut rax_free: extern "C" fn(ptr: *mut libc::c_void);
1826
1827 pub fn raxIteratorSize() -> libc::c_int;
1828
1829 pub fn raxNew() -> *mut rax;
1830
1831 pub fn raxFree(rax: *mut rax);
1832
1833 pub fn raxFreeWithCallback(rax: *mut rax, callback: RaxFreeCallback);
1834
1835 pub fn raxInsert(
1836 rax: *mut rax,
1837 s: *const u8,
1838 len: libc::size_t,
1839 data: *const u8,
1840 old: &mut *mut u8,
1841 ) -> libc::c_int;
1842
1843 pub fn raxTryInsert(
1844 rax: *mut rax,
1845 s: *const u8,
1846 len: libc::size_t,
1847 data: *const u8,
1848 old: *mut *mut u8,
1849 ) -> libc::c_int;
1850
1851 pub fn raxRemove(
1852 rax: *mut rax,
1853 s: *const u8,
1854 len: libc::size_t,
1855 old: &mut *mut u8,
1856 ) -> libc::c_int;
1857
1858 pub fn raxFind(rax: *mut rax, s: *const u8, len: libc::size_t) -> *mut u8;
1859
1860 pub fn raxIteratorNew(rt: *mut rax) -> *mut raxIterator;
1861
1862 pub fn raxStart(it: *const raxIterator, rt: *mut rax);
1863
1864 pub fn raxSeek(
1865 it: *const raxIterator,
1866 op: *const u8,
1867 ele: *const u8,
1868 len: libc::size_t,
1869 ) -> libc::c_int;
1870
1871 pub fn raxNext(it: *const raxIterator) -> libc::c_int;
1872
1873 pub fn raxPrev(it: *const raxIterator) -> libc::c_int;
1874
1875 pub fn raxRandomWalk(it: *const raxIterator, steps: libc::size_t) -> libc::c_int;
1876
1877 pub fn raxCompare(
1878 it: *const raxIterator,
1879 op: *const u8,
1880 key: *mut u8,
1881 key_len: libc::size_t,
1882 ) -> libc::c_int;
1883
1884 pub fn raxStop(it: *const raxIterator);
1885
1886 pub fn raxEOF(it: *const raxIterator) -> libc::c_int;
1887
1888 pub fn raxShow(rax: *mut rax);
1889
1890 pub fn raxSize(rax: *mut rax) -> u64;
1891}
1892
1893#[cfg(test)]
1894mod tests {
1895 extern crate test;
1896 use std::{
1897 self,
1898 default::Default,
1899 fmt,
1900 time::{Duration, Instant},
1901 };
1902
1903 use super::*;
1904
1905 extern "C" fn rax_malloc_hook(size: libc::size_t) -> *mut u8 {
1906 unsafe {
1907 println!("malloc");
1908 libc::malloc(size) as *mut u8
1909 }
1910 }
1911
1912 extern "C" fn rax_realloc_hook(ptr: *mut libc::c_void, size: libc::size_t) -> *mut u8 {
1913 unsafe {
1914 println!("realloc");
1915 libc::realloc(ptr, size) as *mut u8
1916 }
1917 }
1918
1919 extern "C" fn rax_free_hook(ptr: *mut libc::c_void) {
1920 unsafe {
1921 println!("free");
1922 libc::free(ptr)
1923 }
1924 }
1925
1926 pub struct MyMsg<'a>(&'a str);
1927
1928 impl<'a> Drop for MyMsg<'a> {
1929 fn drop(&mut self) {
1930 println!("dropped -> {}", self.0);
1931 }
1932 }
1933
1934 #[derive(Clone, Copy)]
1935 pub struct Stopwatch {
1936 start_time: Option<Instant>,
1937 elapsed: Duration,
1938 }
1939
1940 impl Default for Stopwatch {
1941 fn default() -> Stopwatch {
1942 Stopwatch {
1943 start_time: None,
1944 elapsed: Duration::from_secs(0),
1945 }
1946 }
1947 }
1948
1949 impl fmt::Display for Stopwatch {
1950 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1951 return write!(f, "{}ms", self.elapsed_ms());
1952 }
1953 }
1954
1955 #[expect(unused)]
1956 impl Stopwatch {
1957 pub fn new() -> Stopwatch {
1958 let sw: Stopwatch = Default::default();
1959 return sw;
1960 }
1961 pub fn start_new() -> Stopwatch {
1962 let mut sw = Stopwatch::new();
1963 sw.start();
1964 return sw;
1965 }
1966
1967 pub fn start(&mut self) {
1968 self.start_time = Some(Instant::now());
1969 }
1970 pub fn stop(&mut self) {
1971 self.elapsed = self.elapsed();
1972 self.start_time = None;
1973 }
1974 pub fn reset(&mut self) {
1975 self.elapsed = Duration::from_secs(0);
1976 self.start_time = None;
1977 }
1978 pub fn restart(&mut self) {
1979 self.reset();
1980 self.start();
1981 }
1982
1983 pub fn is_running(&self) -> bool {
1984 return self.start_time.is_some();
1985 }
1986
1987 pub fn elapsed(&self) -> Duration {
1988 match self.start_time {
1989 Some(t1) => {
1990 return t1.elapsed() + self.elapsed;
1991 }
1992 None => {
1993 return self.elapsed;
1994 }
1995 }
1996 }
1997 pub fn elapsed_ms(&self) -> i64 {
1998 let dur = self.elapsed();
1999 return (dur.as_secs() * 1000 + (dur.subsec_nanos() / 1000000) as u64) as i64;
2000 }
2001 }
2002
2003 #[test]
2004 fn bench() {
2005 let ops = 1000000;
2006 println!("{} operations per function", ops);
2007
2008 for _ in 0..2 {
2009 println!();
2010 println!("Gets...");
2011 {
2012 let r = &mut RaxSet::<u64>::new();
2013 for x in 0..2000 {
2014 r.insert(x).expect("whoops!");
2015 }
2016
2017 let sw = Stopwatch::start_new();
2018 for _po in 0..ops {
2019 r.exists(1601);
2020 }
2021
2022 println!("RaxSet::get {}ms", sw.elapsed_ms());
2023 }
2024 {
2025 let r = &mut RaxMap::<u64, &str>::new();
2026 for x in 0..2000 {
2027 r.insert_null(x).expect("whoops!");
2028 }
2029
2030 match r.find(1601) {
2031 Some(v) => println!("{}", v),
2032 None => {}
2033 }
2034
2035 let sw = Stopwatch::start_new();
2036
2037 for _po in 0..ops {
2038 r.find(1601);
2039 }
2040
2041 println!("RaxMap::get {}ms", sw.elapsed_ms());
2042 }
2043
2044 {
2045 let r = &mut RaxMap::<u64, &str>::new();
2046 for x in 0..2000 {
2047 r.insert_null(x).expect("whoops!");
2048 }
2049 let sw = Stopwatch::start_new();
2050
2051 for _po in 0..ops {
2052 r.iter(|_, iter| {
2053 iter.seek(EQUAL, 1601);
2054 });
2055 }
2056
2057 println!("RaxCursor:seek {}ms", sw.elapsed_ms());
2058 }
2059 {
2060 let r = &mut std::collections::HashSet::<u64>::new();
2061 for x in 0..2000 {
2062 r.insert(x);
2063 }
2064
2065 let sw = Stopwatch::start_new();
2066
2067 let xx = 300;
2068 for _po in 0..ops {
2069 r.get(&xx);
2070 }
2071
2072 println!("HashSet::get {}ms", sw.elapsed_ms());
2073 }
2074 {
2075 let r = &mut std::collections::HashMap::<u64, &str>::new();
2076 for x in 0..2000 {
2077 r.insert(x, "");
2078 }
2079
2080 let sw = Stopwatch::start_new();
2081
2082 let xx = 300;
2083 for _po in 0..ops {
2084 r.get(&xx);
2085 }
2086
2087 println!("HashMap::get {}ms", sw.elapsed_ms());
2088 }
2089 {
2090 let r = &mut std::collections::BTreeSet::<u64>::new();
2091 for x in 0..2000 {
2092 r.insert(x);
2093 }
2094
2095 let sw = Stopwatch::start_new();
2096
2097 let xx = 300;
2098 for _po in 0..ops {
2099 r.get(&xx);
2100 }
2101
2102 println!("BTreeSet::get {}ms", sw.elapsed_ms());
2103 }
2104 {
2105 let r = &mut std::collections::BTreeMap::<u64, &str>::new();
2106 for x in 0..2000 {
2107 r.insert(x, "");
2108 }
2109
2110 let sw = Stopwatch::start_new();
2111
2112 let xx = 300;
2113 for _po in 0..ops {
2114 r.get(&xx);
2115 }
2116
2117 println!("BTreeMap::get {}ms", sw.elapsed_ms());
2118 }
2119
2120 println!();
2121 println!("Inserts...");
2122 {
2123 let r = &mut RaxMap::<u64, &str>::new();
2124 let sw = Stopwatch::start_new();
2125
2126 for x in 0..ops {
2127 r.insert(x, Box::new("")).expect("whoops!");
2128 }
2129
2130 println!("RaxMap::insert {}ms", sw.elapsed_ms());
2131 }
2132
2133 {
2134 let r = &mut RaxSet::<u64>::new();
2135 let sw = Stopwatch::start_new();
2136
2137 for x in 0..ops {
2138 r.insert(x).expect("whoops!");
2139 }
2140
2141 println!("RaxSet::insert {}ms", sw.elapsed_ms());
2142 }
2143
2144 {
2145 let r = &mut std::collections::BTreeSet::<u64>::new();
2146 let sw = Stopwatch::start_new();
2147
2148 for x in 0..ops {
2149 r.insert(x);
2150 }
2151
2152 println!("BTreeSet::insert {}ms", sw.elapsed_ms());
2153 }
2154 {
2155 let r = &mut std::collections::BTreeMap::<u64, &str>::new();
2156 let sw = Stopwatch::start_new();
2157
2158 for x in 0..ops {
2159 r.insert(x, "");
2160 }
2161
2162 println!("BTreeMap::insert {}ms", sw.elapsed_ms());
2163 }
2164
2165 {
2166 let r = &mut std::collections::HashMap::<u64, &str>::new();
2167 let sw = Stopwatch::start_new();
2168
2169 for x in 0..ops {
2170 r.insert(x, "");
2171 }
2172
2173 println!("HashMap::insert {}ms", sw.elapsed_ms());
2174 }
2175 }
2176 }
2177
2178 #[test]
2179 fn bench_rax_find() {
2180 for _ in 0..10 {
2181 let r = &mut RaxMap::<u64, &str>::new();
2182 for x in 0..2000 {
2183 r.insert_null(x).expect("whoops!");
2184 }
2185
2186 match r.find(1601) {
2187 Some(v) => println!("{}", v),
2188 None => {}
2189 }
2190
2191 let sw = Stopwatch::start_new();
2192
2193 for _po in 0..1000000 {
2194 r.find(1601);
2195 }
2196
2197 println!("Thing took {}ms", sw.elapsed_ms());
2198 }
2199 }
2200
2201 #[test]
2202 fn bench_rax_cur_find() {
2203 for _ in 0..10 {
2204 let r = &mut RaxMap::<u64, &str>::new();
2205 for x in 0..2000 {
2206 r.insert_null(x).expect("whoops!");
2207 }
2208
2209 match r.find(1601) {
2210 Some(v) => println!("{}", v),
2211 None => {}
2212 }
2213
2214 let sw = Stopwatch::start_new();
2215
2216 for _po in 0..1000000 {
2217 r.iter(|_, iter| {
2218 iter.seek(EQUAL, 1601);
2219 });
2220 }
2221
2222 println!("RaxMap::cursor_find {}ms", sw.elapsed_ms());
2223 }
2224 }
2225
2226 #[test]
2227 fn bench_rax_insert() {
2228 for _ in 0..10 {
2229 let r = &mut RaxMap::<u64, &str>::new();
2230 //
2231 let sw = Stopwatch::start_new();
2232
2233 for x in 0..1000000 {
2234 r.insert(x, Box::new("")).expect("whoops!");
2235 }
2236
2237 println!("RaxMap::insert {}ms", sw.elapsed_ms());
2238 println!("Size {}", r.size());
2239 }
2240 }
2241
2242 #[test]
2243 fn bench_rax_insert_show() {
2244 let r = &mut RaxMap::<u64, &str>::new();
2245 //
2246 let sw = Stopwatch::start_new();
2247
2248 for x in 0..100 {
2249 r.insert(x, Box::new("")).expect("whoops!");
2250 }
2251
2252 r.show();
2253 println!("RaxMap::insert {}ms", sw.elapsed_ms());
2254 assert_eq!(r.size(), 100);
2255 }
2256
2257 #[test]
2258 fn bench_rax_replace() {
2259 let ops = 1000000;
2260 for _ in 0..2 {
2261 let r = &mut RaxMap::<u64, &str>::new();
2262 // Insert values
2263 for x in 0..ops {
2264 r.insert(x, Box::new("")).expect("whoops!");
2265 }
2266
2267 let sw = Stopwatch::start_new();
2268
2269 for x in 0..ops {
2270 // Replace existing key
2271 r.insert(x, Box::new("")).expect("whoops!");
2272 }
2273
2274 println!("RaxMap::replace {}ms", sw.elapsed_ms());
2275 assert_eq!(r.size(), ops);
2276 }
2277 }
2278
2279 #[test]
2280 fn key_str() {
2281 unsafe {
2282 set_allocator(rax_malloc_hook, rax_realloc_hook, rax_free_hook);
2283 }
2284
2285 let mut r = RaxMap::<&str, MyMsg>::new();
2286
2287 let key = "hello-way";
2288
2289 r.insert(key, Box::new(MyMsg("world 80"))).expect("whoops!");
2290 r.insert("hello-war", Box::new(MyMsg("world 80")))
2291 .expect("whoops!");
2292
2293 r.insert("hello-wares", Box::new(MyMsg("world 80")))
2294 .expect("whoops!");
2295 r.insert("hello", Box::new(MyMsg("world 100")))
2296 .expect("whoops!");
2297
2298 {
2299 match r.find("hello") {
2300 Some(v) => println!("Found {}", v.0),
2301 None => println!("Not Found"),
2302 }
2303 }
2304
2305 r.show();
2306
2307 r.iter(|_, iter| {
2308 if !iter.seek_min() {
2309 return;
2310 }
2311 while iter.forward() {
2312 println!("{}", iter.key());
2313 }
2314 if !iter.seek_max() {
2315 return;
2316 }
2317 while iter.back() {
2318 println!("{}", iter.key());
2319 }
2320 });
2321 }
2322
2323 #[test]
2324 fn key_f64() {
2325 println!("sizeof(Rax) {}", std::mem::size_of::<RaxMap<f64, MyMsg>>());
2326
2327 let mut r = RaxMap::<f64, MyMsg>::new();
2328
2329 r.insert(100.01, Box::new(MyMsg("world 100")))
2330 .expect("whoops!");
2331 r.insert(80.20, Box::new(MyMsg("world 80")))
2332 .expect("whoops!");
2333 r.insert(100.00, Box::new(MyMsg("world 200")))
2334 .expect("whoops!");
2335 r.insert(99.10, Box::new(MyMsg("world 1")))
2336 .expect("whoops!");
2337
2338 r.show();
2339
2340 r.iter(|_, iter| {
2341 // for (k, v) in iter {
2342 //
2343 // }
2344 iter.seek_min();
2345 while iter.forward() {
2346 println!("{}", iter.key());
2347 }
2348 iter.seek_max();
2349 while iter.back() {
2350 println!("{}", iter.key());
2351 }
2352 });
2353 }
2354
2355 #[test]
2356 fn key_u64() {
2357 println!("sizeof(Rax) {}", std::mem::size_of::<RaxMap<u64, MyMsg>>());
2358
2359 let mut r = RaxMap::<u64, MyMsg>::new();
2360
2361 r.insert(100, Box::new(MyMsg("world 100")))
2362 .expect("whoops!");
2363 r.insert(80, Box::new(MyMsg("world 80"))).expect("whoops!");
2364 r.insert(200, Box::new(MyMsg("world 200")))
2365 .expect("whoops!");
2366 r.insert(1, Box::new(MyMsg("world 1"))).expect("whoops!");
2367
2368 r.show();
2369
2370 // let result = r.iter_result(move |it| {
2371 //
2372 // if !it.seek(GREATER_EQUAL, 800) {
2373 // println!("Not Found");
2374 // return Ok("");
2375 // }
2376 //
2377 // if it.eof() {
2378 // println!("Not Found");
2379 // return Ok("");
2380 // }
2381 //
2382 // while it.forward() {
2383 // println!("Key Len = {}", it.key());
2384 // println!("Data = {}", it.data().unwrap().0);
2385 // }
2386 //
2387 // Ok("")
2388 // });
2389
2390 // r.seek(GREATER_EQUAL, 80, |_, iter| {
2391 // for (key, value) in iter {
2392 // println!("Key Len = {}", key);
2393 // println!("Data = {}", value.unwrap().0);
2394 // }
2395 // });
2396
2397 // r.seek_result(GREATER_EQUAL, 80, |_, iter| {
2398 // for (key, value) in iter {
2399 // println!("Key Len = {}", key);
2400 // println!("Data = {}", value.unwrap().0);
2401 // }
2402 // Ok(())
2403 // });
2404
2405 r.seek_min(|_, it| {
2406 for (key, value) in it.rev() {
2407 println!("Key Len = {}", key);
2408 println!("Data = {}", value.unwrap().0);
2409 }
2410 });
2411
2412 // r.iter(move |it| {
2413 // if !it.seek(GREATER_EQUAL, 800) {
2414 // println!("Not Found");
2415 // return;
2416 // }
2417 //
2418 //
2419 //
2420 // while it.forward() {
2421 // println!("Key Len = {}", it.key());
2422 // println!("Data = {}", it.data().unwrap().0);
2423 // }
2424 // });
2425
2426 // let result = r.iter_apply(move |r, it| {
2427 // if !it.seek(GREATER_EQUAL, 800) {
2428 // println!("Out of Memory");
2429 // return Ok("");
2430 // }
2431 //
2432 // r.insert(800, Box::new(MyMsg("moved")));
2433 // it.seek(GREATER_EQUAL, 800);
2434 //
2435 // if it.eof() {
2436 // println!("Not Found");
2437 // return Ok("");
2438 // }
2439 //
2440 // while it.back() {
2441 // println!("Key Len = {}", it.key());
2442 // println!("Data = {}", it.data().unwrap().0);
2443 // }
2444 //
2445 // Ok("")
2446 // });
2447 }
2448}