rax/lib.rs
1#![cfg_attr(test, feature(test))]
2
3/// Representation of a radix tree as implemented in this file, that contains
4/// the strings "foo", "foobar" and "footer" after the insertion of each
5/// word. When the node represents a key inside the radix tree, we write it
6/// between [], otherwise it is written between ().
7///
8/// This is the vanilla representation:
9///
10/// ```text
11/// (f) ""
12/// \
13/// (o) "f"
14/// \
15/// (o) "fo"
16/// \
17/// [t b] "foo"
18/// / \
19/// "foot" (e) (a) "foob"
20/// / \
21/// "foote" (r) (r) "fooba"
22/// / \
23/// "footer" [] [] "foobar"
24/// ```
25///
26/// However, this implementation implements a very common optimization where
27/// successive nodes having a single child are "compressed" into the node
28/// itself as a string of characters, each representing a next-level child,
29/// and only the link to the node representing the last character node is
30/// provided inside the representation. So the above representation is turned
31/// into:
32///
33/// ```text
34/// ["foo"] ""
35/// |
36/// [t b] "foo"
37/// / \
38/// "foot" ("er") ("ar") "foob"
39/// / \
40/// "footer" [] [] "foobar"
41/// ```
42///
43/// However this optimization makes the implementation a bit more complex.
44/// For instance if a key "first" is added in the above radix tree, a
45/// "node splitting" operation is needed, since the "foo" prefix is no longer
46/// composed of nodes having a single child one after the other. This is the
47/// above tree and the resulting node splitting after this event happens:
48///
49///
50/// ```text
51/// (f) ""
52/// /
53/// (i o) "f"
54/// / \
55/// "firs" ("rst") (o) "fo"
56/// / \
57/// "first" [] [t b] "foo"
58/// / \
59/// "foot" ("er") ("ar") "foob"
60/// / \
61/// "footer" [] [] "foobar"
62/// ```
63///
64/// Similarly after deletion, if a new chain of nodes having a single child
65/// is created (the chain must also not include nodes that represent keys),
66/// it must be compressed back into a single node.
67extern crate libc;
68extern crate nix;
69
70use std::{
71 error, fmt,
72 mem::{size_of, MaybeUninit},
73 ptr,
74};
75
76use nix::errno::Errno;
77
78pub const GREATER: &str = ">";
79pub const GREATER_EQUAL: &str = ">=";
80pub const LESSER: &str = "<";
81pub const LESSER_EQUAL: &str = "<=";
82pub const EQUAL: &str = "=";
83pub const BEGIN: &str = "^";
84pub const END: &str = "$";
85
86pub const RAX_NODE_MAX_SIZE: libc::c_int = (1 << 29) - 1;
87pub const RAX_STACK_STATIC_ITEMS: libc::c_int = 32;
88pub const RAX_ITER_STATIC_LEN: libc::c_int = 128;
89pub const RAX_ITER_JUST_SEEKED: libc::c_int = 1 << 0;
90pub const RAX_ITER_EOF: libc::c_int = 1 << 1;
91pub const RAX_ITER_SAFE: libc::c_int = 1 << 2;
92
93/// Return the existing Rax allocator.
94///
95/// # Safety
96///
97/// Must only be called when no other thread is modifying the allocator.
98pub unsafe fn allocator() -> (
99 extern "C" fn(size: libc::size_t) -> *mut u8,
100 extern "C" fn(ptr: *mut libc::c_void, size: libc::size_t) -> *mut u8,
101 extern "C" fn(ptr: *mut libc::c_void),
102) {
103 (rax_malloc, rax_realloc, rax_free)
104}
105
106/// Rax internally makes calls to "malloc", "realloc" and "free" for all of it's
107/// heap memory needs. These calls can be patched with the supplied hooks.
108/// Do not call this method after Rax has been used at all. This must
109/// be called before using or calling any other Rax API function.
110///
111/// # Safety
112///
113/// Must be called before any Rax allocation occurs. Not thread-safe.
114pub unsafe fn set_allocator(
115 malloc: extern "C" fn(size: libc::size_t) -> *mut u8,
116 realloc: extern "C" fn(ptr: *mut libc::c_void, size: libc::size_t) -> *mut u8,
117 free: extern "C" fn(ptr: *mut libc::c_void),
118) {
119 rax_malloc = malloc;
120 rax_realloc = realloc;
121 rax_free = free;
122}
123
124#[derive(Debug)]
125pub enum RaxError {
126 Generic(GenericError),
127 OutOfMemory(),
128}
129
130impl RaxError {
131 pub fn generic(message: &str) -> RaxError {
132 RaxError::Generic(GenericError::new(message))
133 }
134}
135
136/// Redis has a beautiful Radix Tree implementation in ANSI C.
137/// This brings it to Rust and creates a safe Map like wrapper
138/// for it. This is very similar in utility to a BTreeMap, but
139/// RAX is likely much faster and more efficient. Naive testing
140/// showed a 2x-4x improvement for all common operations. The only
141/// disadvantage to BTreeMap is that BTree's allow much more flexibility
142/// in regards to comparing keys. Radix trees are lexicographically only.
143/// Composite keys where the non-last member is variable length could
144/// be something BTrees could handle much easier.
145///
146/// Internal RAX Node Layout
147///
148/// uint32_t iskey:1; /* Does this node contain a key? */
149/// uint32_t isnull:1; /* Associated value is NULL (don't store it). */
150/// uint32_t iscompr:1; /* Node is compressed. */
151/// uint32_t size:29; /* Number of children, or compressed string len. */
152///
153/// +----+---+--------+--------+--------+--------+
154/// |HDR |xyz| x-ptr | y-ptr | z-ptr |dataptr |
155/// +----+---+--------+--------+--------+--------+
156///
157/// As is evident above, there is no storage penalty for NULL values.
158///
159/// Keys are represented in compressed form therefore, there is no
160/// need to pump in Boxed keys or any sort of heap allocated chunk of
161/// memory. Stack or heap keys may be used from rust. Values can either
162/// be a sizeof<usize> size integer or it's a data pointer to a heap
163/// allocated / Boxed value.
164///
165/// Iterators were designed to be fast and attempt to only use stack
166/// allocated memory. RaxMap provides a model to take full advantage
167/// of stack allocated iterators through wrapping in a closure.
168///
169/// #Examples
170///
171/// ```
172/// use rax::RaxMap;
173/// let mut r = RaxMap::new();
174/// r.insert(1, Box::new("my heap allocation".to_string()));
175/// r.insert(2, Box::new("my other heap allocation".to_string()));
176///
177/// r.iter(|r, iter| {
178/// // Place iterator at the first entry.
179/// if !iter.seek_min() {
180/// // EOF
181/// return;
182/// }
183///
184/// // Can test EOF at any time.
185/// if iter.eof() {
186/// // EOF
187/// return;
188/// }
189///
190/// while iter.forward() {
191/// iter.key();
192/// iter.value();
193/// }
194/// // In reverse
195/// // Place iterator at the end.
196/// if !iter.seek_max() {
197/// // EOF
198/// return;
199/// }
200/// while iter.back() {
201/// iter.key();
202/// iter.value();
203/// }
204///
205/// // Seek
206/// if !iter.seek(">=", 2) {
207/// // EOF
208/// }
209/// while iter.forward() {
210/// iter.key();
211/// iter.value();
212/// }
213/// });
214/// ```
215pub struct RaxMap<K: RaxKey, V> {
216 pub rax: *mut rax,
217 phantom: std::marker::PhantomData<(K, V)>,
218}
219
220impl<K: RaxKey, V> Drop for RaxMap<K, V> {
221 fn drop(&mut self) {
222 unsafe {
223 // Cleanup RAX
224 raxFreeWithCallback(self.rax, RaxFreeWithCallbackWrapper::<V>);
225 }
226 }
227}
228
229impl<K: RaxKey, V> Default for RaxMap<K, V> {
230 fn default() -> Self {
231 Self::new()
232 }
233}
234
235/// Implementation of RaxMap
236impl<K: RaxKey, V> RaxMap<K, V> {
237 pub fn new() -> RaxMap<K, V> {
238 unsafe {
239 RaxMap {
240 rax: raxNew(),
241 phantom: std::marker::PhantomData,
242 }
243 }
244 }
245
246 /// The number of entries in the RAX
247 pub fn len(&self) -> u64 {
248 unsafe { raxSize(self.rax) }
249 }
250
251 /// The number of entries in the RAX
252 pub fn size(&self) -> u64 {
253 unsafe { raxSize(self.rax) }
254 }
255
256 /// Returns true if the RAX is empty.
257 pub fn is_empty(&self) -> bool {
258 self.len() == 0
259 }
260
261 /// Prints the Rax as ASCII art to stdout.
262 pub fn show(&self) {
263 unsafe { raxShow(self.rax) }
264 }
265
266 /// Insert or replace existing key with a NULL value.
267 pub fn insert_null(&mut self, key: K) -> Result<Option<Box<V>>, RaxError> {
268 unsafe {
269 // Allocate a pointer to catch the old value.
270 let old: &mut *mut u8 = &mut ptr::null_mut();
271
272 // Integer values require Big Endian to allow the Rax to fully optimize
273 // storing them since it will be able to compress the prefixes especially
274 // for 64/128bit numbers.
275 let k = key.encode();
276 let (ptr, len) = k.to_buf();
277
278 let r = raxInsert(
279 self.rax,
280 // Grab a raw pointer to the key. Keys are most likely allocated
281 // on the stack. The rax will keep it's own copy of the key so we
282 // don't want to keep in in the heap twice and it exists in the
283 // rax in it's compressed form.
284 ptr,
285 len,
286 std::ptr::null_mut(),
287 old,
288 );
289
290 if r == 0 && Errno::last() == Errno::ENOMEM {
291 Err(RaxError::OutOfMemory())
292 } else if old.is_null() {
293 Ok(None)
294 } else {
295 // Box the previous value since Rax is done with it and it's our
296 // responsibility now to drop it. Once this Box goes out of scope
297 // the value is dropped and memory reclaimed.
298 Ok(Some(Box::from_raw(*old as *mut V)))
299 }
300 }
301 }
302
303 /// Insert a new entry into the RAX if an existing one does not exist.
304 pub fn try_insert(&mut self, key: K, data: Box<V>) -> Result<Option<Box<V>>, RaxError> {
305 unsafe {
306 // Allocate a pointer to catch the old value.
307 let old: &mut *mut u8 = &mut ptr::null_mut();
308
309 // Leak the boxed value as we hand it over to Rax to keep track of.
310 // These must be heap allocated unless we want to store sizeof(usize) or
311 // less bytes, then the value can be the pointer.
312 let value: &mut V = Box::leak(data);
313
314 // Integer values require Big Endian to allow the Rax to fully optimize
315 // storing them since it will be able to compress the prefixes especially
316 // for 64/128bit numbers.
317 let k = key.encode();
318 let (ptr, len) = k.to_buf();
319
320 let r = raxTryInsert(
321 self.rax,
322 // Grab a raw pointer to the key. Keys are most likely allocated
323 // on the stack. The rax will keep it's own copy of the key so we
324 // don't want to keep in in the heap twice and it exists in the
325 // rax in it's compressed form.
326 ptr,
327 len,
328 value as *mut V as *mut u8,
329 old,
330 );
331
332 if r == 0 {
333 if Errno::last() == Errno::ENOMEM {
334 Err(RaxError::OutOfMemory())
335 } else {
336 Ok(Some(Box::from_raw(value as *mut V)))
337 }
338 } else if old.is_null() {
339 Ok(None)
340 } else {
341 // This shouldn't happen, but if it does let's be safe and
342 // not leak memory.
343 Ok(Some(Box::from_raw(*old as *mut V)))
344 }
345 }
346 }
347
348 /// Try to insert a raw pointer value into the RAX.
349 ///
350 /// # Safety
351 ///
352 /// `value` must be a valid pointer or null.
353 pub unsafe fn try_insert_ptr(
354 &mut self,
355 key: K,
356 value: *mut u8,
357 ) -> Result<Option<*mut u8>, RaxError> {
358 // Allocate a pointer to catch the old value.
359 let old: &mut *mut u8 = &mut ptr::null_mut();
360
361 // Integer values require Big Endian to allow the Rax to fully optimize
362 // storing them since it will be able to compress the prefixes especially
363 // for 64/128bit numbers.
364 let k = key.encode();
365 let (ptr, len) = k.to_buf();
366
367 let r = raxTryInsert(
368 self.rax,
369 // Grab a raw pointer to the key. Keys are most likely allocated
370 // on the stack. The rax will keep it's own copy of the key so we
371 // don't want to keep in in the heap twice and it exists in the
372 // rax in it's compressed form.
373 ptr, len, value, old,
374 );
375
376 if r == 0 {
377 if Errno::last() == Errno::ENOMEM {
378 Err(RaxError::OutOfMemory())
379 } else {
380 Ok(Some(value))
381 }
382 } else if old.is_null() {
383 Ok(None)
384 } else {
385 // This shouldn't happen, but if it does let's be safe and
386 // not leak memory.
387 Ok(Some(*old))
388 }
389 }
390
391 /// Insert a new entry into the RAX replacing and returning the existing
392 /// entry for the supplied key.
393 pub fn insert(&mut self, key: K, data: Box<V>) -> Result<Option<Box<V>>, RaxError> {
394 unsafe {
395 // Allocate a pointer to catch the old value.
396 let old: &mut *mut u8 = &mut ptr::null_mut();
397
398 // Leak the boxed value as we hand it over to Rax to keep track of.
399 // These must be heap allocated unless we want to store sizeof(usize) or
400 // less bytes, then the value can be the pointer.
401 let value: &mut V = Box::leak(data);
402
403 // Integer values require Big Endian to allow the Rax to fully optimize
404 // storing them since it will be able to compress the prefixes especially
405 // for 64/128bit numbers.
406 let k = key.encode();
407 let (ptr, len) = k.to_buf();
408
409 let r = raxInsert(
410 self.rax,
411 // Grab a raw pointer to the key. Keys are most likely allocated
412 // on the stack. The rax will keep it's own copy of the key so we
413 // don't want to keep in in the heap twice and it exists in the
414 // rax in it's compressed form.
415 ptr,
416 len,
417 value as *mut V as *mut u8,
418 old,
419 );
420
421 if r == 0 && Errno::last() == Errno::ENOMEM {
422 Err(RaxError::OutOfMemory())
423 } else if old.is_null() {
424 Ok(None)
425 } else {
426 // Box the previous value since Rax is done with it and it's our
427 // responsibility now to drop it. Once this Box goes out of scope
428 // the value is dropped and memory reclaimed.
429 Ok(Some(Box::from_raw(*old as *mut V)))
430 }
431 }
432 }
433
434 /// Insert a raw pointer value into the RAX.
435 ///
436 /// # Safety
437 ///
438 /// `value` must be a valid pointer or null.
439 pub unsafe fn insert_ptr(
440 &mut self,
441 key: K,
442 value: *mut u8,
443 ) -> Result<Option<*mut u8>, RaxError> {
444 // Allocate a pointer to catch the old value.
445 let old: &mut *mut u8 = &mut ptr::null_mut();
446
447 // Integer values require Big Endian to allow the Rax to fully optimize
448 // storing them since it will be able to compress the prefixes especially
449 // for 64/128bit numbers.
450 let k = key.encode();
451 let (ptr, len) = k.to_buf();
452
453 let r = raxInsert(
454 self.rax,
455 // Grab a raw pointer to the key. Keys are most likely allocated
456 // on the stack. The rax will keep it's own copy of the key so we
457 // don't want to keep in in the heap twice and it exists in the
458 // rax in it's compressed form.
459 ptr, len, value, old,
460 );
461
462 if r == 0 && Errno::last() == Errno::ENOMEM {
463 Err(RaxError::OutOfMemory())
464 } else if old.is_null() {
465 Ok(None)
466 } else {
467 // Box the previous value since Rax is done with it and it's our
468 // responsibility now to drop it. Once this Box goes out of scope
469 // the value is dropped and memory reclaimed.
470 Ok(Some(*old))
471 }
472 }
473
474 /// Remove an entry from the RAX and return the associated value.
475 pub fn remove(&mut self, key: K) -> (bool, Option<Box<V>>) {
476 unsafe {
477 let old: &mut *mut u8 = &mut ptr::null_mut();
478 let k = key.encode();
479 let (ptr, len) = k.to_buf();
480
481 let r = raxRemove(self.rax, ptr, len, old);
482
483 if old.is_null() {
484 (r == 1, None)
485 } else {
486 (r == 1, Some(Box::from_raw(*old as *mut V)))
487 }
488 }
489 }
490
491 /// Find a key and return whether it exists along with its value.
492 pub fn find_exists(&self, key: K) -> (bool, Option<&V>) {
493 unsafe {
494 let k = key.encode();
495 let (ptr, len) = k.to_buf();
496
497 let value = raxFind(self.rax, ptr, len);
498
499 if value.is_null() {
500 (true, None)
501 } else if value == raxNotFound {
502 (false, None)
503 } else {
504 // transmute to the value so we don't drop the actual value accidentally.
505 // While the key associated to the value is in the RAX then we cannot
506 // drop it.
507 (true, Some(&*(value as *const V)))
508 }
509 }
510 }
511
512 /// Same as get but added for semantics parity.
513 pub fn find(&self, key: K) -> Option<&V> {
514 unsafe {
515 let k = key.encode();
516 let (ptr, len) = k.to_buf();
517
518 let value = raxFind(self.rax, ptr, len);
519
520 if value.is_null() || value == raxNotFound {
521 None
522 } else {
523 // transmute to the value so we don't drop the actual value accidentally.
524 // While the key associated to the value is in the RAX then we cannot
525 // drop it.
526 Some(&*(value as *const V))
527 }
528 }
529 }
530
531 /// Get the value associated with the key.
532 pub fn get(&self, key: K) -> Option<&V> {
533 unsafe {
534 let k = key.encode();
535 let (ptr, len) = k.to_buf();
536
537 let value = raxFind(self.rax, ptr, len);
538
539 if value.is_null() || value == raxNotFound {
540 None
541 } else {
542 // transmute to the value so we don't drop the actual value accidentally.
543 // While the key associated to the value is in the RAX then we cannot
544 // drop it.
545 Some(&*(value as *const V))
546 }
547 }
548 }
549
550 /// Determines if the supplied key exists in the Rax.
551 pub fn exists(&self, key: K) -> bool {
552 unsafe {
553 let k = key.encode();
554 let (ptr, len) = k.to_buf();
555
556 let value = raxFind(self.rax, ptr, len);
557
558 !(value.is_null() || value == raxNotFound)
559 }
560 }
561
562 /// Seek to the minimum key and execute the closure.
563 #[inline]
564 pub fn seek_min<F>(&mut self, f: F)
565 where
566 F: Fn(&mut RaxMap<K, V>, &mut RaxIterator<K, V>),
567 {
568 unsafe {
569 // Allocate stack memory.
570 // Initialize a Rax iterator. This call should be performed a single time
571 // to initialize the iterator, and must be followed by a raxSeek() call,
572 // otherwise the raxPrev()/raxNext() functions will just return EOF.
573 let mut iter = MaybeUninit::<RaxIterator<K, V>>::zeroed().assume_init();
574 raxStart(&mut iter as *mut _ as *mut _, self.rax);
575 iter.seek_min();
576 // Borrow stack iterator and execute the closure.
577 f(self, &mut iter)
578 }
579 }
580
581 /// Seek to the minimum key and execute the closure, returning a result.
582 #[inline]
583 pub fn seek_min_result<R, F>(&mut self, f: F) -> Result<R, RaxError>
584 where
585 F: Fn(&mut RaxMap<K, V>, &mut RaxIterator<K, V>) -> Result<R, RaxError>,
586 {
587 unsafe {
588 // Allocate stack memory.
589 // Initialize a Rax iterator. This call should be performed a single time
590 // to initialize the iterator, and must be followed by a raxSeek() call,
591 // otherwise the raxPrev()/raxNext() functions will just return EOF.
592 let mut iter = MaybeUninit::<RaxIterator<K, V>>::zeroed().assume_init();
593 raxStart(&mut iter as *mut _ as *mut _, self.rax);
594 iter.seek_min();
595 // Borrow stack iterator and execute the closure.
596 f(self, &mut iter)
597 }
598 }
599
600 /// Seek to the maximum key and execute the closure.
601 #[inline]
602 pub fn seek_max<F>(&mut self, f: F)
603 where
604 F: Fn(&mut RaxMap<K, V>, &mut RaxIterator<K, V>),
605 {
606 unsafe {
607 // Allocate stack memory.
608 // Initialize a Rax iterator. This call should be performed a single time
609 // to initialize the iterator, and must be followed by a raxSeek() call,
610 // otherwise the raxPrev()/raxNext() functions will just return EOF.
611 let mut iter = MaybeUninit::<RaxIterator<K, V>>::zeroed().assume_init();
612 raxStart(&mut iter as *mut _ as *mut _, self.rax);
613 iter.seek_max();
614 // Borrow stack iterator and execute the closure.
615 f(self, &mut iter)
616 }
617 }
618
619 /// Seek to the maximum key and execute the closure, returning a result.
620 #[inline]
621 pub fn seek_max_result<R, F>(&mut self, f: F) -> Result<R, RaxError>
622 where
623 F: Fn(&mut RaxMap<K, V>, &mut RaxIterator<K, V>) -> Result<R, RaxError>,
624 {
625 unsafe {
626 // Allocate stack memory.
627 // Initialize a Rax iterator. This call should be performed a single time
628 // to initialize the iterator, and must be followed by a raxSeek() call,
629 // otherwise the raxPrev()/raxNext() functions will just return EOF.
630 let mut iter = MaybeUninit::<RaxIterator<K, V>>::zeroed().assume_init();
631 raxStart(&mut iter as *mut _ as *mut _, self.rax);
632 iter.seek_max();
633 // Borrow stack iterator and execute the closure.
634 f(self, &mut iter)
635 }
636 }
637
638 /// Seek to the given key using the specified operator and execute the closure.
639 #[inline]
640 pub fn seek<F>(&mut self, op: &str, key: K, f: F)
641 where
642 F: Fn(&mut RaxMap<K, V>, &mut RaxIterator<K, V>),
643 {
644 unsafe {
645 // Allocate stack memory.
646 // Initialize a Rax iterator. This call should be performed a single time
647 // to initialize the iterator, and must be followed by a raxSeek() call,
648 // otherwise the raxPrev()/raxNext() functions will just return EOF.
649 let mut iter = MaybeUninit::<RaxIterator<K, V>>::zeroed().assume_init();
650 raxStart(&mut iter as *mut _ as *mut _, self.rax);
651 iter.seek(op, key);
652 // Borrow stack iterator and execute the closure.
653 f(self, &mut iter)
654 }
655 }
656
657 /// Seek to the given key using the specified operator and execute the closure, returning a result.
658 #[inline]
659 pub fn seek_result<R, F>(&mut self, op: &str, key: K, f: F) -> Result<R, RaxError>
660 where
661 F: Fn(&mut RaxMap<K, V>, &mut RaxIterator<K, V>) -> Result<R, RaxError>,
662 {
663 unsafe {
664 // Allocate stack memory.
665 // Initialize a Rax iterator. This call should be performed a single time
666 // to initialize the iterator, and must be followed by a raxSeek() call,
667 // otherwise the raxPrev()/raxNext() functions will just return EOF.
668 let mut iter = MaybeUninit::<RaxIterator<K, V>>::zeroed().assume_init();
669 raxStart(&mut iter as *mut _ as *mut _, self.rax);
670 iter.seek(op, key);
671 // Borrow stack iterator and execute the closure.
672 f(self, &mut iter)
673 }
674 }
675
676 /// Create an iterator and execute the closure.
677 #[inline]
678 pub fn iter<F>(&mut self, f: F)
679 where
680 F: Fn(&mut RaxMap<K, V>, &mut RaxIterator<K, V>),
681 {
682 unsafe {
683 // Allocate stack memory.
684 // Initialize a Rax iterator. This call should be performed a single time
685 // to initialize the iterator, and must be followed by a raxSeek() call,
686 // otherwise the raxPrev()/raxNext() functions will just return EOF.
687 let mut iter = MaybeUninit::<RaxIterator<K, V>>::zeroed().assume_init();
688 raxStart(&mut iter as *mut _ as *mut _, self.rax);
689 // Borrow stack iterator and execute the closure.
690 f(self, &mut iter)
691 }
692 }
693
694 /// Create an iterator and execute the closure, returning a result.
695 #[inline]
696 pub fn iter_result<F, R>(&mut self, f: F) -> Result<R, RaxError>
697 where
698 F: Fn(&mut RaxMap<K, V>, &mut RaxIterator<K, V>) -> Result<R, RaxError>,
699 {
700 unsafe {
701 // Allocate stack memory.
702 // Initialize a Rax iterator. This call should be performed a single time
703 // to initialize the iterator, and must be followed by a raxSeek() call,
704 // otherwise the raxPrev()/raxNext() functions will just return EOF.
705 let mut iter = MaybeUninit::<RaxIterator<K, V>>::zeroed().assume_init();
706 raxStart(&mut iter as *mut _ as *mut _, self.rax);
707 // Borrow stack iterator and execute the closure.
708 f(self, &mut iter)
709 }
710 }
711}
712
713/// RaxMap but without the values. The "isnull" bit will be set for
714/// all entries.
715/// #Examples
716///
717/// ```
718/// use rax::RaxSet;
719/// let mut r = RaxSet::new();
720/// r.insert(1);
721/// r.insert(2);
722///
723/// r.iter(|r, iter| {
724/// // Place iterator at the first entry.
725/// if !iter.seek_min() {
726/// // EOF
727/// return;
728/// }
729///
730/// // Can test EOF at any time.
731/// if iter.eof() {
732/// // EOF
733/// return;
734/// }
735///
736/// while iter.forward() {
737/// iter.key();
738/// }
739/// // In reverse
740/// // Place iterator at the end.
741/// if !iter.seek_max() {
742/// // EOF
743/// return;
744/// }
745/// while iter.back() {
746/// iter.key();
747/// }
748///
749/// // Seek
750/// if !iter.seek(">=", 2) {
751/// // EOF
752/// }
753/// while iter.forward() {
754/// iter.key();
755/// }
756/// });
757/// ```
758pub struct RaxSet<K: RaxKey> {
759 rax: *mut rax,
760 _marker: std::marker::PhantomData<K>,
761}
762
763impl<K: RaxKey> Drop for RaxSet<K> {
764 fn drop(&mut self) {
765 unsafe {
766 // Cleanup RAX
767 raxFree(self.rax)
768 }
769 }
770}
771
772impl<K: RaxKey> Default for RaxSet<K> {
773 fn default() -> Self {
774 Self::new()
775 }
776}
777
778/// Implementation of RaxSet.
779impl<K: RaxKey> RaxSet<K> {
780 pub fn new() -> RaxSet<K> {
781 RaxSet {
782 rax: unsafe { raxNew() },
783 _marker: std::marker::PhantomData,
784 }
785 }
786
787 /// The number of entries in the RAX
788 #[inline]
789 pub fn len(&self) -> u64 {
790 unsafe { raxSize(self.rax) }
791 }
792
793 /// The number of entries in the RAX
794 #[inline]
795 pub fn size(&self) -> u64 {
796 unsafe { raxSize(self.rax) }
797 }
798
799 /// Prints the Rax as ASCII art to stdout.
800 #[inline]
801 pub fn show(&self) {
802 unsafe { raxShow(self.rax) }
803 }
804
805 /// Returns true if the RAX is empty.
806 pub fn is_empty(&self) -> bool {
807 self.len() == 0
808 }
809
810 /// Insert a new entry into the RAX replacing and returning the existing
811 /// entry for the supplied key.
812 pub fn insert(&mut self, key: K) -> Result<bool, RaxError> {
813 unsafe {
814 // Integer values require Big Endian to allow the Rax to fully optimize
815 // storing them since it will be able to compress the prefixes especially
816 // for 64/128bit numbers.
817 let k = key.encode();
818 let (ptr, len) = k.to_buf();
819
820 let r = raxTryInsert(
821 self.rax,
822 // Grab a raw pointer to the key. Keys are most likely allocated
823 // on the stack. The rax will keep it's own copy of the key so we
824 // don't want to keep in in the heap twice and it exists in the
825 // rax in it's compressed form.
826 ptr,
827 len,
828 std::ptr::null_mut(),
829 std::ptr::null_mut(),
830 );
831
832 if r == 0 {
833 if Errno::last() == Errno::ENOMEM {
834 Err(RaxError::OutOfMemory())
835 } else {
836 Ok(false)
837 }
838 } else {
839 Ok(true)
840 }
841 }
842 }
843
844 pub fn remove(&mut self, key: K) -> bool {
845 unsafe {
846 let k = key.encode();
847 let (ptr, len) = k.to_buf();
848
849 let r = raxRemove(self.rax, ptr, len, &mut std::ptr::null_mut());
850
851 r == 1
852 }
853 }
854
855 /// Determines if the supplied key exists in the Rax.
856 pub fn exists(&self, key: K) -> bool {
857 unsafe {
858 let k = key.encode();
859 let (ptr, len) = k.to_buf();
860
861 let value = raxFind(self.rax, ptr, len);
862
863 value != raxNotFound
864 }
865 }
866
867 /// Seek to the minimum key and execute the closure.
868 #[inline]
869 pub fn seek_min<F>(&mut self, f: F)
870 where
871 F: Fn(&mut RaxSet<K>, &mut RaxIterator<K, usize>),
872 {
873 unsafe {
874 // Allocate stack memory.
875 // Initialize a Rax iterator. This call should be performed a single time
876 // to initialize the iterator, and must be followed by a raxSeek() call,
877 // otherwise the raxPrev()/raxNext() functions will just return EOF.
878 let mut iter = MaybeUninit::<RaxIterator<K, usize>>::zeroed().assume_init();
879 raxStart(&mut iter as *mut _ as *mut _, self.rax);
880 iter.seek_min();
881 // Borrow stack iterator and execute the closure.
882 f(self, &mut iter)
883 }
884 }
885
886 /// Seek to the minimum key and execute the closure, returning a result.
887 #[inline]
888 pub fn seek_min_result<R, F>(&mut self, f: F) -> Result<R, RaxError>
889 where
890 F: Fn(&mut RaxSet<K>, &mut RaxIterator<K, usize>) -> Result<R, RaxError>,
891 {
892 unsafe {
893 // Allocate stack memory.
894 // Initialize a Rax iterator. This call should be performed a single time
895 // to initialize the iterator, and must be followed by a raxSeek() call,
896 // otherwise the raxPrev()/raxNext() functions will just return EOF.
897 let mut iter = MaybeUninit::<RaxIterator<K, usize>>::zeroed().assume_init();
898 raxStart(&mut iter as *mut _ as *mut _, self.rax);
899 iter.seek_min();
900 // Borrow stack iterator and execute the closure.
901 f(self, &mut iter)
902 }
903 }
904
905 /// Seek to the maximum key and execute the closure.
906 #[inline]
907 pub fn seek_max<F>(&mut self, f: F)
908 where
909 F: Fn(&mut RaxSet<K>, &mut RaxIterator<K, usize>),
910 {
911 unsafe {
912 // Allocate stack memory.
913 // Initialize a Rax iterator. This call should be performed a single time
914 // to initialize the iterator, and must be followed by a raxSeek() call,
915 // otherwise the raxPrev()/raxNext() functions will just return EOF.
916 let mut iter = MaybeUninit::<RaxIterator<K, usize>>::zeroed().assume_init();
917 raxStart(&mut iter as *mut _ as *mut _, self.rax);
918 iter.seek_max();
919 // Borrow stack iterator and execute the closure.
920 f(self, &mut iter)
921 }
922 }
923
924 /// Seek to the maximum key and execute the closure, returning a result.
925 #[inline]
926 pub fn seek_max_result<R, F>(&mut self, f: F) -> Result<R, RaxError>
927 where
928 F: Fn(&mut RaxSet<K>, &mut RaxIterator<K, usize>) -> Result<R, RaxError>,
929 {
930 unsafe {
931 // Allocate stack memory.
932 // Initialize a Rax iterator. This call should be performed a single time
933 // to initialize the iterator, and must be followed by a raxSeek() call,
934 // otherwise the raxPrev()/raxNext() functions will just return EOF.
935 let mut iter = MaybeUninit::<RaxIterator<K, usize>>::zeroed().assume_init();
936 raxStart(&mut iter as *mut _ as *mut _, self.rax);
937 iter.seek_max();
938 // Borrow stack iterator and execute the closure.
939 f(self, &mut iter)
940 }
941 }
942
943 /// Seek to the given key using the specified operator and execute the closure.
944 #[inline]
945 pub fn seek<F>(&mut self, op: &str, key: K, f: F)
946 where
947 F: Fn(&mut RaxSet<K>, &mut RaxIterator<K, usize>),
948 {
949 unsafe {
950 // Allocate stack memory.
951 // Initialize a Rax iterator. This call should be performed a single time
952 // to initialize the iterator, and must be followed by a raxSeek() call,
953 // otherwise the raxPrev()/raxNext() functions will just return EOF.
954 let mut iter = MaybeUninit::<RaxIterator<K, usize>>::zeroed().assume_init();
955 raxStart(&mut iter as *mut _ as *mut _, self.rax);
956 iter.seek(op, key);
957 // Borrow stack iterator and execute the closure.
958 f(self, &mut iter)
959 }
960 }
961
962 /// Seek to the given key using the specified operator and execute the closure, returning a result.
963 #[inline]
964 pub fn seek_result<R, F>(&mut self, op: &str, key: K, f: F) -> Result<R, RaxError>
965 where
966 F: Fn(&mut RaxSet<K>, &mut RaxIterator<K, usize>) -> Result<R, RaxError>,
967 {
968 unsafe {
969 // Allocate stack memory.
970 // Initialize a Rax iterator. This call should be performed a single time
971 // to initialize the iterator, and must be followed by a raxSeek() call,
972 // otherwise the raxPrev()/raxNext() functions will just return EOF.
973 let mut iter = MaybeUninit::<RaxIterator<K, usize>>::zeroed().assume_init();
974 raxStart(&mut iter as *mut _ as *mut _, self.rax);
975 iter.seek(op, key);
976 // Borrow stack iterator and execute the closure.
977 f(self, &mut iter)
978 }
979 }
980
981 /// Create an iterator and execute the closure.
982 #[inline]
983 pub fn iter<F>(&mut self, f: F)
984 where
985 F: Fn(&mut RaxSet<K>, &mut RaxIterator<K, usize>),
986 {
987 unsafe {
988 // Allocate stack memory.
989 // Initialize a Rax iterator. This call should be performed a single time
990 // to initialize the iterator, and must be followed by a raxSeek() call,
991 // otherwise the raxPrev()/raxNext() functions will just return EOF.
992 let mut iter = MaybeUninit::<RaxIterator<K, usize>>::zeroed().assume_init();
993 raxStart(&mut iter as *mut _ as *mut _, self.rax);
994 // Borrow stack iterator and execute the closure.
995 f(self, &mut iter)
996 }
997 }
998
999 /// Create an iterator and execute the closure, returning a result.
1000 #[inline]
1001 pub fn iter_result<F, R>(&mut self, f: F) -> Result<R, RaxError>
1002 where
1003 F: Fn(&mut RaxSet<K>, &mut RaxIterator<K, usize>) -> Result<R, RaxError>,
1004 {
1005 unsafe {
1006 // Allocate stack memory.
1007 // Initialize a Rax iterator. This call should be performed a single time
1008 // to initialize the iterator, and must be followed by a raxSeek() call,
1009 // otherwise the raxPrev()/raxNext() functions will just return EOF.
1010 let mut iter = MaybeUninit::<RaxIterator<K, usize>>::zeroed().assume_init();
1011 raxStart(&mut iter as *mut _ as *mut _, self.rax);
1012 // Borrow stack iterator and execute the closure.
1013 f(self, &mut iter)
1014 }
1015 }
1016}
1017
1018// Same as RaxMap except values are not pointers to heap allocations.
1019// Instead the "data pointer" in the RAX is the value. This means we
1020// have sizeof<usize> worth of bytes to play with. Perhaps, in the future
1021// we could create data values of any size, but for now we have the size
1022// of pointers to work with or null which has no added size to a rax node.
1023//pub struct RaxIntMap<K: RaxKey> {
1024// rax: *mut rax,
1025// _marker: std::marker::PhantomData<K>,
1026//}
1027//
1028//impl<K: RaxKey> RaxIntMap<K> {
1029// pub fn new() -> RaxIntMap<K> {
1030// RaxIntMap {
1031// rax: unsafe { raxNew() },
1032// _marker: std::marker::PhantomData,
1033// }
1034// }
1035//
1036// /// Insert a new entry into the RAX replacing and returning the existing
1037// /// entry for the supplied key.
1038// pub fn insert(&mut self, key: K, value: usize) -> Result<Option<usize>, RaxError> {
1039// unsafe {
1040// // Allocate a pointer to catch the old value.
1041// let old: &mut *mut u8 = &mut ptr::null_mut();
1042//
1043// // Integer values require Big Endian to allow the Rax to fully optimize
1044// // storing them since it will be able to compress the prefixes especially
1045// // for 64/128bit numbers.
1046// let k = key.encode();
1047// let (ptr, len) = k.to_buf();
1048//
1049// let r = raxInsert(
1050// self.rax,
1051// // Grab a raw pointer to the key. Keys are most likely allocated
1052// // on the stack. The rax will keep it's own copy of the key so we
1053// // don't want to keep in in the heap twice and it exists in the
1054// // rax in it's compressed form.
1055// ptr,
1056// len,
1057// &value as *const _ as *mut u8,
1058// old,
1059// );
1060//
1061// if r == 0 && Errno::last() == Errno::ENOMEM {
1062// Err(RaxError::OutOfMemory())
1063// } else if old.is_null() {
1064// Ok(None)
1065// } else {
1066// Ok(Some(std::mem::transmute(*old)))
1067// }
1068// }
1069// }
1070//
1071// /// Insert a new entry into the RAX if an existing one does not exist.
1072// pub fn try_insert(&mut self, key: K, data: usize) -> Result<Option<usize>, RaxError> {
1073// unsafe {
1074// // Allocate a pointer to catch the old value.
1075// let old: &mut *mut u8 = &mut ptr::null_mut();
1076//
1077// // Integer values require Big Endian to allow the Rax to fully optimize
1078// // storing them since it will be able to compress the prefixes especially
1079// // for 64/128bit numbers.
1080// let k = key.encode();
1081// let (ptr, len) = k.to_buf();
1082//
1083// let r = raxTryInsert(
1084// self.rax,
1085// // Grab a raw pointer to the key. Keys are most likely allocated
1086// // on the stack. The rax will keep it's own copy of the key so we
1087// // don't want to keep in in the heap twice and it exists in the
1088// // rax in it's compressed form.
1089// ptr,
1090// len,
1091// &data as *const _ as *mut u8,
1092// old,
1093// );
1094//
1095// if r == 0 {
1096// if Errno::last() == Errno::ENOMEM {
1097// Err(RaxError::OutOfMemory())
1098// } else if old.is_null() {
1099// Ok(None)
1100// } else {
1101// Ok(Some(transmute(*old)))
1102// }
1103// } else if old.is_null() {
1104// Ok(None)
1105// } else {
1106// Ok(Some(std::mem::transmute(*old)))
1107// }
1108// }
1109// }
1110//}
1111
1112pub trait RaxKey<RHS = Self>: Clone + Default + std::fmt::Debug {
1113 type Output: RaxKey;
1114
1115 fn encode(self) -> Self::Output;
1116
1117 fn to_buf(&self) -> (*const u8, usize);
1118
1119 /// # Safety
1120 ///
1121 /// `ptr` must be valid for reads of `len` bytes.
1122 unsafe fn from_buf(ptr: *const u8, len: usize) -> RHS;
1123}
1124
1125impl RaxKey for f32 {
1126 type Output = u32;
1127
1128 #[inline]
1129 fn encode(self) -> Self::Output {
1130 // Encode as u32 Big Endian
1131 self.to_bits().to_be()
1132 }
1133
1134 #[inline]
1135 fn to_buf(&self) -> (*const u8, usize) {
1136 // This should never get called since we represent as a u32
1137 (
1138 self as *const _ as *const u8,
1139 std::mem::size_of::<Self::Output>(),
1140 )
1141 }
1142
1143 #[inline]
1144 unsafe fn from_buf(ptr: *const u8, len: usize) -> f32 {
1145 if len != size_of::<Self::Output>() {
1146 return Self::default();
1147 }
1148 unsafe {
1149 // We used a BigEndian u32 to encode so let's reverse it
1150 f32::from_bits(u32::from_be(
1151 *(ptr as *mut [u8; std::mem::size_of::<Self::Output>()] as *mut u32),
1152 ))
1153 }
1154 }
1155}
1156
1157impl RaxKey for f64 {
1158 type Output = u64;
1159
1160 #[inline]
1161 fn encode(self) -> Self::Output {
1162 // Encode as u64 Big Endian
1163 self.to_bits().to_be()
1164 }
1165
1166 #[inline]
1167 fn to_buf(&self) -> (*const u8, usize) {
1168 // This should never get called since we represent as a u64
1169 (self as *const _ as *const u8, size_of::<Self::Output>())
1170 }
1171
1172 #[inline]
1173 unsafe fn from_buf(ptr: *const u8, len: usize) -> f64 {
1174 if len != size_of::<Self::Output>() {
1175 return Self::default();
1176 }
1177 unsafe {
1178 // We used a BigEndian u64 to encode so let's reverse it
1179 f64::from_bits(u64::from_be(
1180 *(ptr as *mut [u8; size_of::<Self::Output>()] as *mut u64),
1181 ))
1182 }
1183 }
1184}
1185
1186impl RaxKey for isize {
1187 type Output = isize;
1188
1189 #[inline]
1190 fn encode(self) -> Self::Output {
1191 self.to_be()
1192 }
1193
1194 #[inline]
1195 fn to_buf(&self) -> (*const u8, usize) {
1196 (self as *const _ as *const u8, size_of::<Self::Output>())
1197 }
1198
1199 #[inline]
1200 unsafe fn from_buf(ptr: *const u8, len: usize) -> isize {
1201 if len != size_of::<Self::Output>() {
1202 return Self::default();
1203 }
1204 unsafe { isize::from_be(*(ptr as *mut [u8; size_of::<Self::Output>()] as *mut isize)) }
1205 }
1206}
1207
1208impl RaxKey for usize {
1209 type Output = usize;
1210
1211 #[inline]
1212 fn encode(self) -> Self::Output {
1213 self.to_be()
1214 }
1215
1216 #[inline]
1217 fn to_buf(&self) -> (*const u8, usize) {
1218 (
1219 self as *const _ as *const u8,
1220 std::mem::size_of::<Self::Output>(),
1221 )
1222 }
1223
1224 #[inline]
1225 unsafe fn from_buf(ptr: *const u8, len: usize) -> usize {
1226 if len != size_of::<Self::Output>() {
1227 return Self::default();
1228 }
1229 unsafe {
1230 usize::from_be(*(ptr as *mut [u8; std::mem::size_of::<Self::Output>()] as *mut usize))
1231 }
1232 }
1233}
1234
1235impl RaxKey for i16 {
1236 type Output = i16;
1237
1238 #[inline]
1239 fn encode(self) -> Self::Output {
1240 self.to_be()
1241 }
1242
1243 #[inline]
1244 fn to_buf(&self) -> (*const u8, usize) {
1245 (self as *const _ as *const u8, size_of::<Self::Output>())
1246 }
1247
1248 #[inline]
1249 unsafe fn from_buf(ptr: *const u8, len: usize) -> Self {
1250 if len != size_of::<Self::Output>() {
1251 return Self::default();
1252 }
1253 unsafe { i16::from_be(*(ptr as *mut [u8; size_of::<Self::Output>()] as *mut i16)) }
1254 }
1255}
1256
1257impl RaxKey for u16 {
1258 type Output = u16;
1259
1260 #[inline]
1261 fn encode(self) -> Self::Output {
1262 self.to_be()
1263 }
1264
1265 #[inline]
1266 fn to_buf(&self) -> (*const u8, usize) {
1267 (self as *const _ as *const u8, size_of::<Self::Output>())
1268 }
1269
1270 #[inline]
1271 unsafe fn from_buf(ptr: *const u8, len: usize) -> u16 {
1272 if len != size_of::<Self::Output>() {
1273 return Self::default();
1274 }
1275 unsafe { u16::from_be(*(ptr as *mut [u8; size_of::<Self::Output>()] as *mut u16)) }
1276 }
1277}
1278
1279impl RaxKey for i32 {
1280 type Output = i32;
1281
1282 #[inline]
1283 fn encode(self) -> Self::Output {
1284 self.to_be()
1285 }
1286
1287 #[inline]
1288 fn to_buf(&self) -> (*const u8, usize) {
1289 (self as *const _ as *const u8, size_of::<Self::Output>())
1290 }
1291
1292 #[inline]
1293 unsafe fn from_buf(ptr: *const u8, len: usize) -> i32 {
1294 if len != size_of::<Self::Output>() {
1295 return Self::default();
1296 }
1297 unsafe { i32::from_be(*(ptr as *mut [u8; size_of::<Self::Output>()] as *mut i32)) }
1298 }
1299}
1300
1301impl RaxKey for u32 {
1302 type Output = u32;
1303
1304 #[inline]
1305 fn encode(self) -> Self::Output {
1306 self.to_be()
1307 }
1308
1309 #[inline]
1310 fn to_buf(&self) -> (*const u8, usize) {
1311 (self as *const _ as *const u8, size_of::<Self::Output>())
1312 }
1313
1314 #[inline]
1315 unsafe fn from_buf(ptr: *const u8, len: usize) -> u32 {
1316 if len != size_of::<Self::Output>() {
1317 return Self::default();
1318 }
1319 unsafe { u32::from_be(*(ptr as *mut [u8; size_of::<Self::Output>()] as *mut u32)) }
1320 }
1321}
1322
1323impl RaxKey for i64 {
1324 type Output = i64;
1325
1326 #[inline]
1327 fn encode(self) -> Self::Output {
1328 self.to_be()
1329 }
1330
1331 #[inline]
1332 fn to_buf(&self) -> (*const u8, usize) {
1333 (self as *const _ as *const u8, size_of::<Self::Output>())
1334 }
1335
1336 #[inline]
1337 unsafe fn from_buf(ptr: *const u8, len: usize) -> i64 {
1338 if len != size_of::<Self::Output>() {
1339 return Self::default();
1340 }
1341 unsafe { i64::from_be(*(ptr as *mut [u8; size_of::<Self::Output>()] as *mut i64)) }
1342 }
1343}
1344
1345impl RaxKey for u64 {
1346 type Output = u64;
1347
1348 #[inline]
1349 fn encode(self) -> Self::Output {
1350 self.to_be()
1351 }
1352
1353 #[inline]
1354 fn to_buf(&self) -> (*const u8, usize) {
1355 (self as *const _ as *const u8, size_of::<Self::Output>())
1356 }
1357
1358 #[inline]
1359 unsafe fn from_buf(ptr: *const u8, len: usize) -> u64 {
1360 if len != size_of::<Self::Output>() {
1361 return Self::default();
1362 }
1363 unsafe { u64::from_be(*(ptr as *mut [u8; size_of::<Self::Output>()] as *mut u64)) }
1364 }
1365}
1366
1367impl RaxKey for i128 {
1368 type Output = i128;
1369
1370 #[inline]
1371 fn encode(self) -> Self::Output {
1372 self.to_be()
1373 }
1374
1375 #[inline]
1376 fn to_buf(&self) -> (*const u8, usize) {
1377 (self as *const _ as *const u8, size_of::<Self::Output>())
1378 }
1379
1380 #[inline]
1381 unsafe fn from_buf(ptr: *const u8, len: usize) -> i128 {
1382 if len != size_of::<Self::Output>() {
1383 return Self::default();
1384 }
1385 unsafe { i128::from_be(*(ptr as *mut [u8; size_of::<Self::Output>()] as *mut i128)) }
1386 }
1387}
1388
1389impl RaxKey for u128 {
1390 type Output = u128;
1391
1392 #[inline]
1393 fn encode(self) -> Self::Output {
1394 self.to_be()
1395 }
1396
1397 #[inline]
1398 fn to_buf(&self) -> (*const u8, usize) {
1399 (self as *const _ as *const u8, size_of::<Self::Output>())
1400 }
1401
1402 #[inline]
1403 unsafe fn from_buf(ptr: *const u8, len: usize) -> u128 {
1404 if len != size_of::<Self::Output>() {
1405 return Self::default();
1406 }
1407 unsafe { u128::from_be(*(ptr as *mut [u8; size_of::<Self::Output>()] as *mut u128)) }
1408 }
1409}
1410
1411impl RaxKey for Vec<u8> {
1412 type Output = Vec<u8>;
1413
1414 #[inline]
1415 fn encode(self) -> Vec<u8> {
1416 self
1417 }
1418
1419 #[inline]
1420 fn to_buf(&self) -> (*const u8, usize) {
1421 (self.as_ptr(), self.len())
1422 }
1423
1424 #[inline]
1425 unsafe fn from_buf(ptr: *const u8, len: usize) -> Vec<u8> {
1426 unsafe { Vec::from_raw_parts(ptr as *mut u8, len, len) }
1427 }
1428}
1429
1430impl<'a> RaxKey for &'a [u8] {
1431 type Output = &'a [u8];
1432
1433 #[inline]
1434 fn encode(self) -> &'a [u8] {
1435 self
1436 }
1437
1438 #[inline]
1439 fn to_buf(&self) -> (*const u8, usize) {
1440 (self.as_ptr(), self.len())
1441 }
1442
1443 #[inline]
1444 unsafe fn from_buf(ptr: *const u8, len: usize) -> &'a [u8] {
1445 unsafe { std::slice::from_raw_parts(ptr, len) }
1446 }
1447}
1448
1449//impl RaxKey for SDS {
1450// type Output = SDS;
1451//
1452// #[inline]
1453// fn encode(self) -> Self::Output {
1454// self
1455// }
1456//
1457// #[inline]
1458// fn to_buf(&self) -> (*const u8, usize) {
1459// (self.as_ptr(), self.len())
1460// }
1461//
1462// #[inline]
1463// unsafe fn from_buf(ptr: *const u8, len: usize) -> SDS {
1464// SDS::from_ptr(ptr, len)
1465// }
1466//}
1467
1468impl<'a> RaxKey for &'a str {
1469 type Output = &'a str;
1470
1471 #[inline]
1472 fn encode(self) -> Self::Output {
1473 self
1474 }
1475
1476 #[inline]
1477 fn to_buf(&self) -> (*const u8, usize) {
1478 ((*self).as_ptr(), self.len())
1479 }
1480
1481 #[inline]
1482 unsafe fn from_buf(ptr: *const u8, len: usize) -> &'a str {
1483 unsafe { std::str::from_utf8(std::slice::from_raw_parts(ptr, len)).unwrap_or_default() }
1484 }
1485}
1486
1487#[repr(C)]
1488pub struct RaxIterator<K: RaxKey, V> {
1489 pub flags: libc::c_int,
1490 pub rt: *mut rax,
1491 pub key: *mut u8,
1492 pub data: *mut libc::c_void,
1493 pub key_len: libc::size_t,
1494 pub key_max: libc::size_t,
1495 pub key_static_string: [u8; 128],
1496 pub node: *mut raxNode,
1497 pub stack: raxStack,
1498 pub node_cb: Option<raxNodeCallback>,
1499 _marker: std::marker::PhantomData<(K, V)>,
1500}
1501
1502/// Free up memory
1503impl<K: RaxKey, V> Drop for RaxIterator<K, V> {
1504 fn drop(&mut self) {
1505 unsafe {
1506 if self.key_max == RAX_ITER_STATIC_LEN as usize {
1507 // Key wasn't heap allocated.
1508 // Force it back to the current address.
1509 self.key = self.key_static_string.as_mut_ptr();
1510 }
1511 raxStop(self as *const _ as *const raxIterator);
1512 }
1513 }
1514}
1515
1516/// Implement std::Iterator
1517impl<K: RaxKey, V: 'static> Iterator for RaxIterator<K, V> {
1518 type Item = (K, Option<&'static V>);
1519
1520 fn next(&mut self) -> Option<<Self as Iterator>::Item> {
1521 unsafe {
1522 if raxNext(self as *const _ as *const raxIterator) == 1 {
1523 let data: *mut libc::c_void = self.data;
1524 if data.is_null() {
1525 None
1526 } else {
1527 let val = data as *const V;
1528 if val.is_null() {
1529 Some((self.key(), None))
1530 } else {
1531 Some((self.key(), Some(&*val)))
1532 }
1533 }
1534 } else {
1535 None
1536 }
1537 }
1538 }
1539}
1540
1541/// Implement std::DoubleEndedIterator
1542impl<K: RaxKey, V: 'static> DoubleEndedIterator for RaxIterator<K, V> {
1543 fn next_back(&mut self) -> Option<<Self as Iterator>::Item> {
1544 unsafe {
1545 if raxPrev(self as *const _ as *const raxIterator) == 1 {
1546 let data: *mut libc::c_void = self.data;
1547 if data.is_null() {
1548 None
1549 } else {
1550 let val = data as *const V;
1551 if val.is_null() {
1552 Some((self.key(), None))
1553 } else {
1554 Some((self.key(), Some(&*val)))
1555 }
1556 }
1557 } else {
1558 None
1559 }
1560 }
1561 }
1562}
1563
1564/// Core iterator implementation
1565impl<K: RaxKey, V> RaxIterator<K, V> {
1566 pub fn new(r: RaxMap<K, V>) -> RaxIterator<K, V> {
1567 unsafe {
1568 // Allocate stack memory.
1569 // Initialize a Rax iterator. This call should be performed a single time
1570 // to initialize the iterator, and must be followed by a raxSeek() call,
1571 // otherwise the raxPrev()/raxNext() functions will just return EOF.
1572 let mut iter = MaybeUninit::<RaxIterator<K, V>>::zeroed().assume_init();
1573 raxStart(&mut iter as *mut _ as *mut _, r.rax);
1574 iter
1575 }
1576 }
1577
1578 pub fn print_ptr(&self) {
1579 println!("ptr = {:p}", self);
1580 println!("ptr = {:p}", self as *const _ as *const raxIterator);
1581 }
1582
1583 #[inline]
1584 pub fn seek_min(&self) -> bool {
1585 unsafe {
1586 if raxSeek(
1587 self as *const _ as *const raxIterator,
1588 BEGIN.as_ptr(),
1589 std::ptr::null(),
1590 0,
1591 ) == 1
1592 {
1593 self.forward()
1594 } else {
1595 false
1596 }
1597 }
1598 }
1599
1600 #[inline]
1601 pub fn seek_max(&self) -> bool {
1602 unsafe {
1603 if raxSeek(
1604 self as *const _ as *const raxIterator,
1605 END.as_ptr(),
1606 std::ptr::null(),
1607 0,
1608 ) == 1
1609 {
1610 self.back()
1611 } else {
1612 false
1613 }
1614 }
1615 }
1616
1617 #[inline]
1618 pub fn back(&self) -> bool {
1619 unsafe { raxPrev(self as *const _ as *const raxIterator) == 1 }
1620 }
1621
1622 #[inline]
1623 pub fn forward(&self) -> bool {
1624 unsafe { raxNext(self as *const _ as *const raxIterator) == 1 }
1625 }
1626
1627 /// Key at current position
1628 #[inline]
1629 pub fn key(&self) -> K {
1630 unsafe { K::from_buf(self.key, self.key_len) }
1631 }
1632
1633 /// Data at current position.
1634 #[inline]
1635 pub fn value(&self) -> Option<&V> {
1636 unsafe {
1637 let data: *mut libc::c_void = self.data;
1638 if data.is_null() {
1639 None
1640 } else {
1641 Some(&*(data as *const V))
1642 }
1643 }
1644 }
1645
1646 #[inline]
1647 pub fn lesser(&self, key: K) -> bool {
1648 self.seek(LESSER, key)
1649 }
1650
1651 #[inline]
1652 pub fn lesser_equal(&self, key: K) -> bool {
1653 self.seek(LESSER_EQUAL, key)
1654 }
1655
1656 #[inline]
1657 pub fn greater(&self, key: K) -> bool {
1658 self.seek(GREATER, key)
1659 }
1660
1661 #[inline]
1662 pub fn greater_equal(&self, key: K) -> bool {
1663 self.seek(GREATER_EQUAL, key)
1664 }
1665
1666 #[inline]
1667 pub fn seek(&self, op: &str, key: K) -> bool {
1668 unsafe {
1669 let k = key.encode();
1670 let (p, len) = k.to_buf();
1671 raxSeek(self as *const _ as *const raxIterator, op.as_ptr(), p, len) == 1
1672 && self.flags & RAX_ITER_EOF != 0
1673 }
1674 }
1675
1676 #[inline]
1677 pub fn seek_raw(&self, op: &str, key: K) -> i32 {
1678 unsafe {
1679 let k = key.encode();
1680 let (p, len) = k.to_buf();
1681 raxSeek(self as *const _ as *const raxIterator, op.as_ptr(), p, len)
1682 }
1683 }
1684
1685 #[inline]
1686 pub fn seek_bytes(&self, op: &str, ele: &[u8]) -> bool {
1687 unsafe {
1688 raxSeek(
1689 self as *const _ as *const raxIterator,
1690 op.as_ptr(),
1691 ele.as_ptr(),
1692 ele.len() as libc::size_t,
1693 ) == 1
1694 }
1695 }
1696
1697 /// Return if the iterator is in an EOF state. This happens when raxSeek()
1698 /// failed to seek an appropriate element, so that raxNext() or raxPrev()
1699 /// will return zero, or when an EOF condition was reached while iterating
1700 /// with next() and prev().
1701 #[inline]
1702 pub fn eof(&self) -> bool {
1703 self.flags & RAX_ITER_EOF != 0
1704 }
1705}
1706
1707impl fmt::Display for RaxError {
1708 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1709 match *self {
1710 // Both underlying errors already impl `Display`, so we defer to
1711 // their implementations.
1712 RaxError::Generic(ref err) => write!(f, "{}", err),
1713 RaxError::OutOfMemory() => write!(f, "out of memory"),
1714 }
1715 }
1716}
1717
1718impl error::Error for RaxError {
1719 fn source(&self) -> Option<&(dyn error::Error + 'static)> {
1720 match *self {
1721 // N.B. Both of these implicitly cast `err` from their concrete
1722 // types (either `&io::Error` or `&num::ParseIntError`)
1723 // to a trait object `&Error`. This works because both error types
1724 // implement `Error`.
1725 RaxError::Generic(ref err) => Some(err),
1726 RaxError::OutOfMemory() => Some(self),
1727 }
1728 }
1729}
1730
1731#[derive(Debug)]
1732pub struct GenericError {
1733 message: String,
1734}
1735
1736impl GenericError {
1737 pub fn new(message: &str) -> GenericError {
1738 GenericError {
1739 message: String::from(message),
1740 }
1741 }
1742}
1743
1744impl fmt::Display for GenericError {
1745 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1746 write!(f, "Store error: {}", self.message)
1747 }
1748}
1749
1750impl error::Error for GenericError {}
1751
1752#[derive(Clone, Copy)]
1753#[repr(C)]
1754pub struct rax;
1755
1756#[derive(Clone, Copy)]
1757#[repr(C)]
1758pub struct raxNode;
1759
1760#[derive(Clone, Copy)]
1761#[repr(C)]
1762pub struct raxStack {
1763 stack: *mut *mut libc::c_void,
1764 items: libc::size_t,
1765 maxitems: libc::size_t,
1766 static_items: [*mut libc::c_void; 32],
1767 oom: libc::c_int,
1768}
1769
1770#[repr(C)]
1771pub struct raxIterator;
1772
1773#[allow(non_snake_case)]
1774#[allow(non_camel_case_types)]
1775extern "C" fn RaxFreeWithCallbackWrapper<V>(v: *mut libc::c_void) {
1776 unsafe {
1777 // Re-box it so it can drop it immediately after it leaves this scope.
1778 drop(Box::from_raw(v as *mut V));
1779 }
1780}
1781
1782#[allow(non_camel_case_types)]
1783type raxNodeCallback = extern "C" fn(v: *mut libc::c_void);
1784
1785type RaxFreeCallback = extern "C" fn(v: *mut libc::c_void);
1786
1787#[allow(improper_ctypes)]
1788#[allow(non_snake_case)]
1789#[allow(non_camel_case_types)]
1790#[link(name = "rax", kind = "static")]
1791extern "C" {
1792 pub static raxNotFound: *mut u8;
1793
1794 pub static mut rax_malloc: extern "C" fn(size: libc::size_t) -> *mut u8;
1795 pub static mut rax_realloc:
1796 extern "C" fn(ptr: *mut libc::c_void, size: libc::size_t) -> *mut u8;
1797 pub static mut rax_free: extern "C" fn(ptr: *mut libc::c_void);
1798
1799 pub fn raxIteratorSize() -> libc::c_int;
1800
1801 pub fn raxNew() -> *mut rax;
1802
1803 pub fn raxFree(rax: *mut rax);
1804
1805 pub fn raxFreeWithCallback(rax: *mut rax, callback: RaxFreeCallback);
1806
1807 pub fn raxInsert(
1808 rax: *mut rax,
1809 s: *const u8,
1810 len: libc::size_t,
1811 data: *const u8,
1812 old: &mut *mut u8,
1813 ) -> libc::c_int;
1814
1815 pub fn raxTryInsert(
1816 rax: *mut rax,
1817 s: *const u8,
1818 len: libc::size_t,
1819 data: *const u8,
1820 old: *mut *mut u8,
1821 ) -> libc::c_int;
1822
1823 pub fn raxRemove(
1824 rax: *mut rax,
1825 s: *const u8,
1826 len: libc::size_t,
1827 old: &mut *mut u8,
1828 ) -> libc::c_int;
1829
1830 pub fn raxFind(rax: *mut rax, s: *const u8, len: libc::size_t) -> *mut u8;
1831
1832 pub fn raxIteratorNew(rt: *mut rax) -> *mut raxIterator;
1833
1834 pub fn raxStart(it: *const raxIterator, rt: *mut rax);
1835
1836 pub fn raxSeek(
1837 it: *const raxIterator,
1838 op: *const u8,
1839 ele: *const u8,
1840 len: libc::size_t,
1841 ) -> libc::c_int;
1842
1843 pub fn raxNext(it: *const raxIterator) -> libc::c_int;
1844
1845 pub fn raxPrev(it: *const raxIterator) -> libc::c_int;
1846
1847 pub fn raxRandomWalk(it: *const raxIterator, steps: libc::size_t) -> libc::c_int;
1848
1849 pub fn raxCompare(
1850 it: *const raxIterator,
1851 op: *const u8,
1852 key: *mut u8,
1853 key_len: libc::size_t,
1854 ) -> libc::c_int;
1855
1856 pub fn raxStop(it: *const raxIterator);
1857
1858 pub fn raxEOF(it: *const raxIterator) -> libc::c_int;
1859
1860 pub fn raxShow(rax: *mut rax);
1861
1862 pub fn raxSize(rax: *mut rax) -> u64;
1863}
1864
1865#[cfg(test)]
1866mod tests {
1867 extern crate test;
1868 use std::{
1869 self,
1870 default::Default,
1871 fmt,
1872 time::{Duration, Instant},
1873 };
1874
1875 use super::*;
1876
1877 extern "C" fn rax_malloc_hook(size: libc::size_t) -> *mut u8 {
1878 unsafe {
1879 println!("malloc");
1880 libc::malloc(size) as *mut u8
1881 }
1882 }
1883
1884 extern "C" fn rax_realloc_hook(ptr: *mut libc::c_void, size: libc::size_t) -> *mut u8 {
1885 unsafe {
1886 println!("realloc");
1887 libc::realloc(ptr, size) as *mut u8
1888 }
1889 }
1890
1891 extern "C" fn rax_free_hook(ptr: *mut libc::c_void) {
1892 unsafe {
1893 println!("free");
1894 libc::free(ptr)
1895 }
1896 }
1897
1898 pub struct MyMsg<'a>(&'a str);
1899
1900 impl<'a> Drop for MyMsg<'a> {
1901 fn drop(&mut self) {
1902 println!("dropped -> {}", self.0);
1903 }
1904 }
1905
1906 #[derive(Clone, Copy)]
1907 pub struct Stopwatch {
1908 start_time: Option<Instant>,
1909 elapsed: Duration,
1910 }
1911
1912 impl Default for Stopwatch {
1913 fn default() -> Stopwatch {
1914 Stopwatch {
1915 start_time: None,
1916 elapsed: Duration::from_secs(0),
1917 }
1918 }
1919 }
1920
1921 impl fmt::Display for Stopwatch {
1922 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1923 return write!(f, "{}ms", self.elapsed_ms());
1924 }
1925 }
1926
1927 #[expect(unused)]
1928 impl Stopwatch {
1929 pub fn new() -> Stopwatch {
1930 let sw: Stopwatch = Default::default();
1931 return sw;
1932 }
1933 pub fn start_new() -> Stopwatch {
1934 let mut sw = Stopwatch::new();
1935 sw.start();
1936 return sw;
1937 }
1938
1939 pub fn start(&mut self) {
1940 self.start_time = Some(Instant::now());
1941 }
1942 pub fn stop(&mut self) {
1943 self.elapsed = self.elapsed();
1944 self.start_time = None;
1945 }
1946 pub fn reset(&mut self) {
1947 self.elapsed = Duration::from_secs(0);
1948 self.start_time = None;
1949 }
1950 pub fn restart(&mut self) {
1951 self.reset();
1952 self.start();
1953 }
1954
1955 pub fn is_running(&self) -> bool {
1956 return self.start_time.is_some();
1957 }
1958
1959 pub fn elapsed(&self) -> Duration {
1960 match self.start_time {
1961 Some(t1) => {
1962 return t1.elapsed() + self.elapsed;
1963 }
1964 None => {
1965 return self.elapsed;
1966 }
1967 }
1968 }
1969 pub fn elapsed_ms(&self) -> i64 {
1970 let dur = self.elapsed();
1971 return (dur.as_secs() * 1000 + (dur.subsec_nanos() / 1000000) as u64) as i64;
1972 }
1973 }
1974
1975 #[test]
1976 fn bench() {
1977 let ops = 1000000;
1978 println!("{} operations per function", ops);
1979
1980 for _ in 0..2 {
1981 println!();
1982 println!("Gets...");
1983 {
1984 let r = &mut RaxSet::<u64>::new();
1985 for x in 0..2000 {
1986 r.insert(x).expect("whoops!");
1987 }
1988
1989 let sw = Stopwatch::start_new();
1990 for _po in 0..ops {
1991 r.exists(1601);
1992 }
1993
1994 println!("RaxSet::get {}ms", sw.elapsed_ms());
1995 }
1996 {
1997 let r = &mut RaxMap::<u64, &str>::new();
1998 for x in 0..2000 {
1999 r.insert_null(x).expect("whoops!");
2000 }
2001
2002 match r.find(1601) {
2003 Some(v) => println!("{}", v),
2004 None => {}
2005 }
2006
2007 let sw = Stopwatch::start_new();
2008
2009 for _po in 0..ops {
2010 r.find(1601);
2011 }
2012
2013 println!("RaxMap::get {}ms", sw.elapsed_ms());
2014 }
2015
2016 {
2017 let r = &mut RaxMap::<u64, &str>::new();
2018 for x in 0..2000 {
2019 r.insert_null(x).expect("whoops!");
2020 }
2021 let sw = Stopwatch::start_new();
2022
2023 for _po in 0..ops {
2024 r.iter(|_, iter| {
2025 iter.seek(EQUAL, 1601);
2026 });
2027 }
2028
2029 println!("RaxCursor:seek {}ms", sw.elapsed_ms());
2030 }
2031 {
2032 let r = &mut std::collections::HashSet::<u64>::new();
2033 for x in 0..2000 {
2034 r.insert(x);
2035 }
2036
2037 let sw = Stopwatch::start_new();
2038
2039 let xx = 300;
2040 for _po in 0..ops {
2041 r.get(&xx);
2042 }
2043
2044 println!("HashSet::get {}ms", sw.elapsed_ms());
2045 }
2046 {
2047 let r = &mut std::collections::HashMap::<u64, &str>::new();
2048 for x in 0..2000 {
2049 r.insert(x, "");
2050 }
2051
2052 let sw = Stopwatch::start_new();
2053
2054 let xx = 300;
2055 for _po in 0..ops {
2056 r.get(&xx);
2057 }
2058
2059 println!("HashMap::get {}ms", sw.elapsed_ms());
2060 }
2061 {
2062 let r = &mut std::collections::BTreeSet::<u64>::new();
2063 for x in 0..2000 {
2064 r.insert(x);
2065 }
2066
2067 let sw = Stopwatch::start_new();
2068
2069 let xx = 300;
2070 for _po in 0..ops {
2071 r.get(&xx);
2072 }
2073
2074 println!("BTreeSet::get {}ms", sw.elapsed_ms());
2075 }
2076 {
2077 let r = &mut std::collections::BTreeMap::<u64, &str>::new();
2078 for x in 0..2000 {
2079 r.insert(x, "");
2080 }
2081
2082 let sw = Stopwatch::start_new();
2083
2084 let xx = 300;
2085 for _po in 0..ops {
2086 r.get(&xx);
2087 }
2088
2089 println!("BTreeMap::get {}ms", sw.elapsed_ms());
2090 }
2091
2092 println!();
2093 println!("Inserts...");
2094 {
2095 let r = &mut RaxMap::<u64, &str>::new();
2096 let sw = Stopwatch::start_new();
2097
2098 for x in 0..ops {
2099 r.insert(x, Box::new("")).expect("whoops!");
2100 }
2101
2102 println!("RaxMap::insert {}ms", sw.elapsed_ms());
2103 }
2104
2105 {
2106 let r = &mut RaxSet::<u64>::new();
2107 let sw = Stopwatch::start_new();
2108
2109 for x in 0..ops {
2110 r.insert(x).expect("whoops!");
2111 }
2112
2113 println!("RaxSet::insert {}ms", sw.elapsed_ms());
2114 }
2115
2116 {
2117 let r = &mut std::collections::BTreeSet::<u64>::new();
2118 let sw = Stopwatch::start_new();
2119
2120 for x in 0..ops {
2121 r.insert(x);
2122 }
2123
2124 println!("BTreeSet::insert {}ms", sw.elapsed_ms());
2125 }
2126 {
2127 let r = &mut std::collections::BTreeMap::<u64, &str>::new();
2128 let sw = Stopwatch::start_new();
2129
2130 for x in 0..ops {
2131 r.insert(x, "");
2132 }
2133
2134 println!("BTreeMap::insert {}ms", sw.elapsed_ms());
2135 }
2136
2137 {
2138 let r = &mut std::collections::HashMap::<u64, &str>::new();
2139 let sw = Stopwatch::start_new();
2140
2141 for x in 0..ops {
2142 r.insert(x, "");
2143 }
2144
2145 println!("HashMap::insert {}ms", sw.elapsed_ms());
2146 }
2147 }
2148 }
2149
2150 #[test]
2151 fn bench_rax_find() {
2152 for _ in 0..10 {
2153 let r = &mut RaxMap::<u64, &str>::new();
2154 for x in 0..2000 {
2155 r.insert_null(x).expect("whoops!");
2156 }
2157
2158 match r.find(1601) {
2159 Some(v) => println!("{}", v),
2160 None => {}
2161 }
2162
2163 let sw = Stopwatch::start_new();
2164
2165 for _po in 0..1000000 {
2166 r.find(1601);
2167 }
2168
2169 println!("Thing took {}ms", sw.elapsed_ms());
2170 }
2171 }
2172
2173 #[test]
2174 fn bench_rax_cur_find() {
2175 for _ in 0..10 {
2176 let r = &mut RaxMap::<u64, &str>::new();
2177 for x in 0..2000 {
2178 r.insert_null(x).expect("whoops!");
2179 }
2180
2181 match r.find(1601) {
2182 Some(v) => println!("{}", v),
2183 None => {}
2184 }
2185
2186 let sw = Stopwatch::start_new();
2187
2188 for _po in 0..1000000 {
2189 r.iter(|_, iter| {
2190 iter.seek(EQUAL, 1601);
2191 });
2192 }
2193
2194 println!("RaxMap::cursor_find {}ms", sw.elapsed_ms());
2195 }
2196 }
2197
2198 #[test]
2199 fn bench_rax_insert() {
2200 for _ in 0..10 {
2201 let r = &mut RaxMap::<u64, &str>::new();
2202 //
2203 let sw = Stopwatch::start_new();
2204
2205 for x in 0..1000000 {
2206 r.insert(x, Box::new("")).expect("whoops!");
2207 }
2208
2209 println!("RaxMap::insert {}ms", sw.elapsed_ms());
2210 println!("Size {}", r.size());
2211 }
2212 }
2213
2214 #[test]
2215 fn bench_rax_insert_show() {
2216 let r = &mut RaxMap::<u64, &str>::new();
2217 //
2218 let sw = Stopwatch::start_new();
2219
2220 for x in 0..100 {
2221 r.insert(x, Box::new("")).expect("whoops!");
2222 }
2223
2224 r.show();
2225 println!("RaxMap::insert {}ms", sw.elapsed_ms());
2226 assert_eq!(r.size(), 100);
2227 }
2228
2229 #[test]
2230 fn bench_rax_replace() {
2231 let ops = 1000000;
2232 for _ in 0..2 {
2233 let r = &mut RaxMap::<u64, &str>::new();
2234 // Insert values
2235 for x in 0..ops {
2236 r.insert(x, Box::new("")).expect("whoops!");
2237 }
2238
2239 let sw = Stopwatch::start_new();
2240
2241 for x in 0..ops {
2242 // Replace existing key
2243 r.insert(x, Box::new("")).expect("whoops!");
2244 }
2245
2246 println!("RaxMap::replace {}ms", sw.elapsed_ms());
2247 assert_eq!(r.size(), ops);
2248 }
2249 }
2250
2251 #[test]
2252 fn key_str() {
2253 unsafe {
2254 set_allocator(rax_malloc_hook, rax_realloc_hook, rax_free_hook);
2255 }
2256
2257 let mut r = RaxMap::<&str, MyMsg>::new();
2258
2259 let key = "hello-way";
2260
2261 r.insert(key, Box::new(MyMsg("world 80"))).expect("whoops!");
2262 r.insert("hello-war", Box::new(MyMsg("world 80")))
2263 .expect("whoops!");
2264
2265 r.insert("hello-wares", Box::new(MyMsg("world 80")))
2266 .expect("whoops!");
2267 r.insert("hello", Box::new(MyMsg("world 100")))
2268 .expect("whoops!");
2269
2270 {
2271 match r.find("hello") {
2272 Some(v) => println!("Found {}", v.0),
2273 None => println!("Not Found"),
2274 }
2275 }
2276
2277 r.show();
2278
2279 r.iter(|_, iter| {
2280 if !iter.seek_min() {
2281 return;
2282 }
2283 while iter.forward() {
2284 println!("{}", iter.key());
2285 }
2286 if !iter.seek_max() {
2287 return;
2288 }
2289 while iter.back() {
2290 println!("{}", iter.key());
2291 }
2292 });
2293 }
2294
2295 #[test]
2296 fn key_f64() {
2297 println!("sizeof(Rax) {}", std::mem::size_of::<RaxMap<f64, MyMsg>>());
2298
2299 let mut r = RaxMap::<f64, MyMsg>::new();
2300
2301 r.insert(100.01, Box::new(MyMsg("world 100")))
2302 .expect("whoops!");
2303 r.insert(80.20, Box::new(MyMsg("world 80")))
2304 .expect("whoops!");
2305 r.insert(100.00, Box::new(MyMsg("world 200")))
2306 .expect("whoops!");
2307 r.insert(99.10, Box::new(MyMsg("world 1")))
2308 .expect("whoops!");
2309
2310 r.show();
2311
2312 r.iter(|_, iter| {
2313 // for (k, v) in iter {
2314 //
2315 // }
2316 iter.seek_min();
2317 while iter.forward() {
2318 println!("{}", iter.key());
2319 }
2320 iter.seek_max();
2321 while iter.back() {
2322 println!("{}", iter.key());
2323 }
2324 });
2325 }
2326
2327 #[test]
2328 fn key_u64() {
2329 println!("sizeof(Rax) {}", std::mem::size_of::<RaxMap<u64, MyMsg>>());
2330
2331 let mut r = RaxMap::<u64, MyMsg>::new();
2332
2333 r.insert(100, Box::new(MyMsg("world 100")))
2334 .expect("whoops!");
2335 r.insert(80, Box::new(MyMsg("world 80"))).expect("whoops!");
2336 r.insert(200, Box::new(MyMsg("world 200")))
2337 .expect("whoops!");
2338 r.insert(1, Box::new(MyMsg("world 1"))).expect("whoops!");
2339
2340 r.show();
2341
2342 // let result = r.iter_result(move |it| {
2343 //
2344 // if !it.seek(GREATER_EQUAL, 800) {
2345 // println!("Not Found");
2346 // return Ok("");
2347 // }
2348 //
2349 // if it.eof() {
2350 // println!("Not Found");
2351 // return Ok("");
2352 // }
2353 //
2354 // while it.forward() {
2355 // println!("Key Len = {}", it.key());
2356 // println!("Data = {}", it.data().unwrap().0);
2357 // }
2358 //
2359 // Ok("")
2360 // });
2361
2362 // r.seek(GREATER_EQUAL, 80, |_, iter| {
2363 // for (key, value) in iter {
2364 // println!("Key Len = {}", key);
2365 // println!("Data = {}", value.unwrap().0);
2366 // }
2367 // });
2368
2369 // r.seek_result(GREATER_EQUAL, 80, |_, iter| {
2370 // for (key, value) in iter {
2371 // println!("Key Len = {}", key);
2372 // println!("Data = {}", value.unwrap().0);
2373 // }
2374 // Ok(())
2375 // });
2376
2377 r.seek_min(|_, it| {
2378 for (key, value) in it.rev() {
2379 println!("Key Len = {}", key);
2380 println!("Data = {}", value.unwrap().0);
2381 }
2382 });
2383
2384 // r.iter(move |it| {
2385 // if !it.seek(GREATER_EQUAL, 800) {
2386 // println!("Not Found");
2387 // return;
2388 // }
2389 //
2390 //
2391 //
2392 // while it.forward() {
2393 // println!("Key Len = {}", it.key());
2394 // println!("Data = {}", it.data().unwrap().0);
2395 // }
2396 // });
2397
2398 // let result = r.iter_apply(move |r, it| {
2399 // if !it.seek(GREATER_EQUAL, 800) {
2400 // println!("Out of Memory");
2401 // return Ok("");
2402 // }
2403 //
2404 // r.insert(800, Box::new(MyMsg("moved")));
2405 // it.seek(GREATER_EQUAL, 800);
2406 //
2407 // if it.eof() {
2408 // println!("Not Found");
2409 // return Ok("");
2410 // }
2411 //
2412 // while it.back() {
2413 // println!("Key Len = {}", it.key());
2414 // println!("Data = {}", it.data().unwrap().0);
2415 // }
2416 //
2417 // Ok("")
2418 // });
2419 }
2420}