1use core::{borrow::Borrow, mem::MaybeUninit, ops::Deref, ptr::NonNull, sync::atomic::AtomicPtr};
2
3use crate::alloc::{sync::Arc, DefaultAllocator, IAlloc};
4
5pub struct AtomicArcBTreeSet<T: Ord, const REPLACE_ON_INSERT: bool, const SPLIT_LIMIT: usize>(
7 AtomicPtr<ArcBTreeSetNodeInner<T, DefaultAllocator, REPLACE_ON_INSERT, SPLIT_LIMIT>>,
8)
9where
10 DefaultAllocator: core::default::Default + IAlloc;
11impl<T: Ord + Clone, const REPLACE_ON_INSERT: bool, const SPLIT_LIMIT: usize> Default
12 for AtomicArcBTreeSet<T, REPLACE_ON_INSERT, SPLIT_LIMIT>
13where
14 DefaultAllocator: core::default::Default + IAlloc,
15{
16 fn default() -> Self {
17 Self::new()
18 }
19}
20impl<T: Ord + Clone, const REPLACE_ON_INSERT: bool, const SPLIT_LIMIT: usize>
21 AtomicArcBTreeSet<T, REPLACE_ON_INSERT, SPLIT_LIMIT>
22where
23 DefaultAllocator: core::default::Default + IAlloc,
24{
25 pub const fn new() -> Self {
27 Self(AtomicPtr::new(core::ptr::null_mut()))
28 }
29 pub fn edit(
33 &self,
34 mut f: impl FnMut(
35 ArcBTreeSet<T, DefaultAllocator, REPLACE_ON_INSERT, SPLIT_LIMIT>,
36 ) -> ArcBTreeSet<T, DefaultAllocator, REPLACE_ON_INSERT, SPLIT_LIMIT>,
37 ) {
38 let mut current = self.0.load(core::sync::atomic::Ordering::Acquire);
39 loop {
40 let new = f(ArcBTreeSet::copy_from_ptr(current));
41 let new_ptr = new.as_ptr();
42 if core::ptr::eq(current, new_ptr) {
43 return;
44 }
45 match self.0.compare_exchange(
46 current,
47 new_ptr,
48 core::sync::atomic::Ordering::Release,
49 core::sync::atomic::Ordering::Acquire,
50 ) {
51 Ok(old) => unsafe {
53 core::mem::forget(new);
54 ArcBTreeSet::take_ownership_from_ptr(old);
55 return;
56 },
57 Err(new_old) => {
58 current = new_old;
59 }
60 }
61 }
62 }
63 pub fn get<K>(&self, value: &K, f: impl FnOnce(Option<&T>))
65 where
66 T: PartialOrd<K>,
67 {
68 let set = ArcBTreeSet::copy_from_ptr(self.0.load(core::sync::atomic::Ordering::Relaxed));
69 f(set.get(value))
70 }
71}
72
73#[crate::stabby]
75#[derive(Debug, Clone)]
76pub struct Entry<K, V> {
77 key: K,
78 value: V,
79}
80
81impl<K: Ord, V> PartialEq for Entry<K, V> {
82 fn eq(&self, other: &Self) -> bool {
83 self.key.eq(&other.key)
84 }
85}
86impl<K: Ord, V> PartialOrd for Entry<K, V> {
87 fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
88 Some(self.cmp(other))
89 }
90}
91impl<K: Ord, V> PartialEq<K> for Entry<K, V> {
92 fn eq(&self, other: &K) -> bool {
93 self.key.eq(other)
94 }
95}
96impl<K: Ord, V> PartialOrd<K> for Entry<K, V> {
97 fn partial_cmp(&self, other: &K) -> Option<core::cmp::Ordering> {
98 Some(self.key.cmp(other))
99 }
100}
101impl<K: Ord, V> Eq for Entry<K, V> {}
102impl<K: Ord, V> Ord for Entry<K, V> {
103 fn cmp(&self, other: &Self) -> core::cmp::Ordering {
104 self.key.cmp(&other.key)
105 }
106}
107#[crate::stabby]
111#[derive(Clone)]
112pub struct ArcBTreeMap<K, V, Alloc: IAlloc = DefaultAllocator, const SPLIT_LIMIT: usize = { 5 }>(
113 ArcBTreeSet<Entry<K, V>, Alloc, true, SPLIT_LIMIT>,
114);
115impl<K: Ord, V, Alloc: IAlloc, const SPLIT_LIMIT: usize> ArcBTreeMap<K, V, Alloc, SPLIT_LIMIT> {
116 pub const fn new_in(alloc: Alloc) -> ArcBTreeMap<K, V, Alloc> {
120 ArcBTreeMap::from_alloc(alloc)
121 }
122 pub const fn from_alloc(alloc: Alloc) -> Self {
126 Self(ArcBTreeSet::from_alloc(alloc))
127 }
128 pub fn get<Q: Borrow<K>>(&self, key: &Q) -> Option<&V> {
130 self.0.get(key.borrow()).map(|entry| &entry.value)
131 }
132 pub fn insert(&mut self, key: K, value: V) -> Option<V>
140 where
141 K: Clone,
142 V: Clone,
143 Alloc: Clone,
144 {
145 self.0.insert(Entry { key, value }).map(|entry| entry.value)
146 }
147}
148
149#[crate::stabby]
153pub struct ArcBTreeSet<
154 T,
155 Alloc: IAlloc = DefaultAllocator,
156 const REPLACE_ON_INSERT: bool = { false },
157 const SPLIT_LIMIT: usize = { 5 },
158> {
159 root: Option<ArcBTreeSetNode<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>>,
161 alloc: core::mem::MaybeUninit<Alloc>,
163}
164impl<T, Alloc: IAlloc, const REPLACE_ON_INSERT: bool, const SPLIT_LIMIT: usize> Clone
165 for ArcBTreeSet<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>
166where
167 T: Clone,
168 Alloc: Clone,
169{
170 fn clone(&self) -> Self {
171 match self.root.clone() {
172 Some(root) => Self {
173 root: Some(root),
174 alloc: core::mem::MaybeUninit::uninit(),
175 },
176 None => unsafe {
178 Self {
179 root: None,
180 alloc: core::mem::MaybeUninit::new(self.alloc.assume_init_ref().clone()),
181 }
182 },
183 }
184 }
185}
186impl<T: Ord, Alloc: IAlloc + Default> Default for ArcBTreeSet<T, Alloc> {
187 fn default() -> Self {
188 Self::new_in(Alloc::default())
189 }
190}
191impl<T: Ord> ArcBTreeSet<T>
192where
193 DefaultAllocator: IAlloc,
194{
195 pub const fn new() -> Self {
197 Self::new_in(DefaultAllocator::new())
198 }
199}
200impl<
201 T: Ord + core::fmt::Debug,
202 Alloc: IAlloc,
203 const REPLACE_ON_INSERT: bool,
204 const SPLIT_LIMIT: usize,
205 > core::fmt::Debug for ArcBTreeSet<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>
206{
207 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
208 f.write_str("ArcBTreeSet")?;
209 match self.root.as_ref() {
210 None => f.write_str("{}"),
211 Some(set) => set.fmt(f),
212 }
213 }
214}
215impl<
216 T: Ord + core::fmt::Debug,
217 Alloc: IAlloc,
218 const REPLACE_ON_INSERT: bool,
219 const SPLIT_LIMIT: usize,
220 > core::fmt::Debug for ArcBTreeSetNode<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>
221{
222 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
223 let mut f = f.debug_set();
224 for entry in self.0.entries() {
225 if let Some(lesser) = &entry.smaller {
226 f.entry(&lesser);
227 }
228 f.entry(&entry.value);
229 }
230 if let Some(greater) = &self.0.greater {
231 f.entry(greater);
232 }
233 f.finish()
234 }
235}
236impl<T, Alloc: IAlloc, const REPLACE_ON_INSERT: bool, const SPLIT_LIMIT: usize> Drop
237 for ArcBTreeSet<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>
238{
239 fn drop(&mut self) {
240 if self.root.is_none() {
241 unsafe { self.alloc.assume_init_drop() }
243 }
244 }
245}
246impl<T: Ord + Clone, const REPLACE_ON_INSERT: bool, const SPLIT_LIMIT: usize>
247 ArcBTreeSet<T, DefaultAllocator, REPLACE_ON_INSERT, SPLIT_LIMIT>
248where
249 DefaultAllocator: core::default::Default,
250{
251 const fn as_ptr(
255 &self,
256 ) -> *mut ArcBTreeSetNodeInner<T, DefaultAllocator, REPLACE_ON_INSERT, SPLIT_LIMIT> {
257 unsafe {
259 core::mem::transmute::<
260 Option<ArcBTreeSetNode<T, DefaultAllocator, REPLACE_ON_INSERT, SPLIT_LIMIT>>,
261 *mut ArcBTreeSetNodeInner<T, DefaultAllocator, REPLACE_ON_INSERT, SPLIT_LIMIT>,
262 >(core::ptr::read(&self.root))
263 }
264 }
265 fn copy_from_ptr(
267 ptr: *const ArcBTreeSetNodeInner<T, DefaultAllocator, REPLACE_ON_INSERT, SPLIT_LIMIT>,
268 ) -> Self {
269 match NonNull::new(ptr.cast_mut()) {
270 None => Self {
271 root: None,
272 alloc: core::mem::MaybeUninit::new(Default::default()),
273 },
274 Some(ptr) => {
275 let owner: core::mem::ManuallyDrop<_> = unsafe {
277 core::mem::transmute::<
278 NonNull<
279 ArcBTreeSetNodeInner<
280 T,
281 DefaultAllocator,
282 REPLACE_ON_INSERT,
283 SPLIT_LIMIT,
284 >,
285 >,
286 core::mem::ManuallyDrop<
287 Option<
288 ArcBTreeSetNode<
289 T,
290 DefaultAllocator,
291 REPLACE_ON_INSERT,
292 SPLIT_LIMIT,
293 >,
294 >,
295 >,
296 >(ptr)
297 };
298 let root = owner.deref().clone();
299 Self {
300 root,
301 alloc: core::mem::MaybeUninit::uninit(),
302 }
303 }
304 }
305 }
306 unsafe fn take_ownership_from_ptr(
308 ptr: *mut ArcBTreeSetNodeInner<T, DefaultAllocator, REPLACE_ON_INSERT, SPLIT_LIMIT>,
309 ) -> Self {
310 match NonNull::new(ptr) {
311 None => Self {
312 root: None,
313 alloc: core::mem::MaybeUninit::new(Default::default()),
314 },
315 Some(ptr) => {
316 let root = unsafe {
318 core::mem::transmute::<
319 NonNull<
320 ArcBTreeSetNodeInner<
321 T,
322 DefaultAllocator,
323 REPLACE_ON_INSERT,
324 SPLIT_LIMIT,
325 >,
326 >,
327 Option<
328 ArcBTreeSetNode<T, DefaultAllocator, REPLACE_ON_INSERT, SPLIT_LIMIT>,
329 >,
330 >(ptr)
331 };
332 Self {
333 root,
334 alloc: core::mem::MaybeUninit::uninit(),
335 }
336 }
337 }
338 }
339}
340impl<T: Ord, Alloc: IAlloc> ArcBTreeSet<T, Alloc> {
341 #[allow(clippy::let_unit_value)]
345 pub const fn new_in(alloc: Alloc) -> ArcBTreeSet<T, Alloc> {
346 ArcBTreeSet::from_alloc(alloc)
347 }
348}
349impl<T: Ord, Alloc: IAlloc, const REPLACE_ON_INSERT: bool, const SPLIT_LIMIT: usize>
350 ArcBTreeSet<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>
351{
352 const CHECK: () = if SPLIT_LIMIT % 2 == 0 {
353 panic!("SPLIT_LIMIT on BTreeSet/BTreeMap must be odd (it is the number of elements at which a node will split)");
354 };
355 #[allow(clippy::let_unit_value)]
359 pub const fn from_alloc(alloc: Alloc) -> Self {
360 _ = Self::CHECK;
361 Self {
362 root: None,
363 alloc: core::mem::MaybeUninit::new(alloc),
364 }
365 }
366 pub fn get<K>(&self, key: &K) -> Option<&T>
368 where
369 T: PartialOrd<K>,
370 {
371 self.root.as_ref().and_then(|set| set.get(key))
372 }
373 pub fn insert(&mut self, value: T) -> Option<T>
379 where
380 T: Clone,
381 Alloc: Clone,
382 {
383 match &mut self.root {
384 Some(inner) => inner.insert(value),
385 None => {
386 let alloc = unsafe { self.alloc.assume_init_read() };
388 self.root = Some(ArcBTreeSetNode(Arc::new_in(
389 ArcBTreeSetNodeInner::new(
390 Some(ArcBTreeSetEntry {
391 value,
392 smaller: None,
393 }),
394 None,
395 ),
396 alloc,
397 )));
398 None
399 }
400 }
401 }
402 #[cfg(test)]
403 pub(crate) fn for_each(&self, mut f: impl FnMut(&T)) {
404 if let Some(this) = &self.root {
405 this.for_each(&mut f)
406 }
407 }
408 pub fn len(&self) -> usize {
410 match &self.root {
411 None => 0,
412 Some(node) => node.len(),
413 }
414 }
415 pub const fn is_empty(&self) -> bool {
417 self.root.is_none()
418 }
419}
420use seal::*;
421mod seal {
422 use super::*;
423 #[crate::stabby]
424 pub struct ArcBTreeSetNode<
426 T,
427 Alloc: IAlloc,
428 const REPLACE_ON_INSERT: bool,
429 const SPLIT_LIMIT: usize,
430 >(pub Arc<ArcBTreeSetNodeInner<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>, Alloc>);
431
432 #[crate::stabby]
433 pub struct ArcBTreeSetNodeInner<
434 T,
435 Alloc: IAlloc,
436 const REPLACE_ON_INSERT: bool,
437 const SPLIT_LIMIT: usize,
438 > {
439 pub entries:
440 [MaybeUninit<ArcBTreeSetEntry<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>>; SPLIT_LIMIT],
441 pub len: usize,
442 pub greater: Option<ArcBTreeSetNode<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>>,
443 }
444 impl<T, Alloc: IAlloc, const REPLACE_ON_INSERT: bool, const SPLIT_LIMIT: usize>
445 ArcBTreeSetNodeInner<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>
446 {
447 pub fn new(
448 entries: impl IntoIterator<
449 Item = ArcBTreeSetEntry<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>,
450 >,
451 greater: Option<ArcBTreeSetNode<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>>,
452 ) -> Self {
453 let mut this = ArcBTreeSetNodeInner {
454 entries: [(); SPLIT_LIMIT].map(|_| MaybeUninit::uninit()),
455 len: 0,
456 greater,
457 };
458 for entry in entries {
459 if this.len >= SPLIT_LIMIT - 1 {
460 panic!("Attempted to construct an node with too many entries");
461 }
462 this.entries[this.len].write(entry);
463 this.len += 1;
464 }
465 this
466 }
467 }
468 impl<T, Alloc: IAlloc, const REPLACE_ON_INSERT: bool, const SPLIT_LIMIT: usize> Clone
469 for ArcBTreeSetNode<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>
470 {
471 fn clone(&self) -> Self {
472 Self(self.0.clone())
473 }
474 }
475 impl<T, Alloc: IAlloc, const REPLACE_ON_INSERT: bool, const SPLIT_LIMIT: usize> Drop
476 for ArcBTreeSetNodeInner<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>
477 {
478 fn drop(&mut self) {
479 unsafe { core::ptr::drop_in_place(self.entries_mut()) }
481 }
482 }
483 impl<T: Clone, Alloc: IAlloc, const REPLACE_ON_INSERT: bool, const SPLIT_LIMIT: usize> Clone
484 for ArcBTreeSetNodeInner<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>
485 {
486 fn clone(&self) -> Self {
487 let mut entries: [MaybeUninit<
488 ArcBTreeSetEntry<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>,
489 >; SPLIT_LIMIT] = [(); SPLIT_LIMIT].map(|_| core::mem::MaybeUninit::uninit());
490 for (i, entry) in self.entries().iter().enumerate() {
491 unsafe { *entries.get_unchecked_mut(i) = MaybeUninit::new(entry.clone()) }
493 }
494 Self {
495 entries,
496 len: self.len,
497 greater: self.greater.clone(),
498 }
499 }
500 }
501
502 #[crate::stabby]
503 pub struct ArcBTreeSetEntry<
505 T,
506 Alloc: IAlloc,
507 const REPLACE_ON_INSERT: bool,
508 const SPLIT_LIMIT: usize,
509 > {
510 pub smaller: Option<ArcBTreeSetNode<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>>,
511 pub value: T,
512 }
513 impl<T: Clone, Alloc: IAlloc, const REPLACE_ON_INSERT: bool, const SPLIT_LIMIT: usize> Clone
514 for ArcBTreeSetEntry<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>
515 {
516 fn clone(&self) -> Self {
517 Self {
518 value: self.value.clone(),
519 smaller: self.smaller.clone(),
520 }
521 }
522 }
523
524 impl<T: Ord, Alloc: IAlloc, const REPLACE_ON_INSERT: bool, const SPLIT_LIMIT: usize>
525 ArcBTreeSetNode<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>
526 {
527 pub fn len(&self) -> usize {
528 self.0.entries().iter().fold(0, |acc, it| {
529 acc + 1 + it.smaller.as_ref().map_or(0, |n| n.len())
530 }) + self.0.greater.as_ref().map_or(0, |n| n.len())
531 }
532 pub fn get<K>(&self, key: &K) -> Option<&T>
533 where
534 T: PartialOrd<K>,
535 {
536 use core::cmp::Ordering;
537 for entry in self.0.entries() {
538 match entry.value.partial_cmp(key)? {
539 Ordering::Equal => return Some(&entry.value),
540 Ordering::Greater => return entry.smaller.as_ref()?.get(key),
541 _ => {}
542 }
543 }
544 self.0.greater.as_ref()?.get(key)
545 }
546 pub fn insert(&mut self, value: T) -> Option<T>
547 where
548 T: Clone,
549 Alloc: Clone,
550 {
551 if !REPLACE_ON_INSERT && self.get(&value).is_some() {
552 return Some(value);
553 }
554 match self.insert_inner(value) {
555 Err(done) => done,
556 Ok((right, pivot)) => {
557 let entry = ArcBTreeSetEntry {
558 value: pivot,
559 smaller: Some(self.clone()),
560 };
561 let mut inner = ArcBTreeSetNodeInner {
562 entries: [(); SPLIT_LIMIT].map(|_| MaybeUninit::uninit()),
563 len: 1,
564 greater: Some(right),
565 };
566 inner.entries[0].write(entry);
567 self.0 = Arc::new_in(inner, Arc::allocator(&self.0).clone());
568 None
569 }
570 }
571 }
572 fn insert_inner(&mut self, value: T) -> Result<(Self, T), Option<T>>
573 where
574 T: Clone,
575 Alloc: Clone,
576 {
577 use core::cmp::Ordering;
578 let (inner, alloc) = Arc::make_mut_and_get_alloc(&mut self.0);
579 let entries = inner.entries_mut();
581 for (i, entry) in entries.iter_mut().enumerate() {
582 match entry.value.cmp(&value) {
583 Ordering::Equal => {
584 return Err(Some(core::mem::replace(&mut entry.value, value)))
585 }
586 Ordering::Greater => match entry.smaller.as_mut() {
587 Some(smaller) => {
588 let (right, pivot) = smaller.insert_inner(value)?;
589 return match inner.insert(i, pivot, Some(right), alloc) {
590 None => Err(None),
591 Some(splits) => Ok(splits),
592 };
593 }
594 None => {
595 return match inner.insert(i, value, None, alloc) {
596 None => Err(None),
597 Some(splits) => Ok(splits),
598 }
599 }
600 },
601 _ => {}
602 }
603 }
604 match inner.greater.as_mut() {
605 Some(greater) => {
606 let (right, pivot) = greater.insert_inner(value)?;
607 if let Some(splits) = inner.push(pivot, Some(right), alloc) {
608 return Ok(splits);
609 }
610 }
611 None => {
612 if let Some(splits) = inner.push(value, None, alloc) {
613 return Ok(splits);
614 }
615 }
616 }
617 Err(None)
618 }
619 #[cfg(test)]
620 pub fn for_each(&self, f: &mut impl FnMut(&T)) {
621 for ArcBTreeSetEntry { value, smaller } in self.0.entries() {
622 if let Some(smaller) = smaller {
623 smaller.for_each(f);
624 }
625 f(value)
626 }
627 if let Some(greater) = self.0.greater.as_ref() {
628 greater.for_each(f)
629 }
630 }
631 }
632 impl<T: Ord, Alloc: IAlloc, const REPLACE_ON_INSERT: bool, const SPLIT_LIMIT: usize>
633 ArcBTreeSetNodeInner<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>
634 {
635 fn insert(
636 &mut self,
637 i: usize,
638 value: T,
639 greater: Option<ArcBTreeSetNode<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>>,
640 alloc: &Alloc,
641 ) -> Option<(ArcBTreeSetNode<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>, T)>
642 where
643 Alloc: Clone,
644 {
645 debug_assert!(i < self.len);
646 assert!(self.len < self.entries.len());
649 for i in (i..self.len).rev() {
650 self.entries[i + 1] =
651 MaybeUninit::new(unsafe { self.entries[i].assume_init_read() });
652 }
653 unsafe {
655 *self.entries.get_unchecked_mut(i) = MaybeUninit::new(ArcBTreeSetEntry {
656 value,
657 smaller: core::mem::replace(
658 &mut self
659 .entries
660 .get_unchecked_mut(i + 1)
661 .assume_init_mut()
662 .smaller,
663 greater,
664 ),
665 });
666 }
667
668 self.len += 1;
669 self.split(alloc)
670 }
671 fn push(
672 &mut self,
673 value: T,
674 greater: Option<ArcBTreeSetNode<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>>,
675 alloc: &Alloc,
676 ) -> Option<(ArcBTreeSetNode<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>, T)>
677 where
678 Alloc: Clone,
679 {
680 unsafe {
682 self.entries
683 .get_unchecked_mut(self.len)
684 .write(ArcBTreeSetEntry {
685 value,
686 smaller: core::mem::replace(&mut self.greater, greater),
687 });
688 }
689 self.len += 1;
690 self.split(alloc)
691 }
692 fn split(
693 &mut self,
694 alloc: &Alloc,
695 ) -> Option<(ArcBTreeSetNode<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>, T)>
696 where
697 Alloc: Clone,
698 {
699 if self.len == SPLIT_LIMIT {
700 let pivot_index: usize = SPLIT_LIMIT / 2;
701 let pivot = unsafe { self.entries.get_unchecked(pivot_index).assume_init_read() };
703 let ArcBTreeSetEntry {
704 value: pivot,
705 smaller,
706 } = pivot;
707 let mut right = Self {
708 entries: [(); SPLIT_LIMIT].map(|_| core::mem::MaybeUninit::uninit()),
709 len: self.len - (pivot_index + 1),
710 greater: self.greater.take(),
711 };
712 for i in (pivot_index + 1)..self.len {
714 right.entries[i - (pivot_index + 1)] = MaybeUninit::new(unsafe {
715 self.entries.get_unchecked(i).assume_init_read()
716 });
717 }
718 self.greater = smaller;
719 self.len = pivot_index;
720 let right = ArcBTreeSetNode(Arc::new_in(right, alloc.clone()));
721 Some((right, pivot))
722 } else {
723 None
724 }
725 }
726 }
727
728 impl<T, Alloc: IAlloc, const REPLACE_ON_INSERT: bool, const SPLIT_LIMIT: usize>
729 ArcBTreeSetNodeInner<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>
730 {
731 pub fn entries(&self) -> &[ArcBTreeSetEntry<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>] {
732 unsafe { core::mem::transmute(self.entries.get_unchecked(..self.len)) }
734 }
735 pub fn entries_mut(
736 &mut self,
737 ) -> &mut [ArcBTreeSetEntry<T, Alloc, REPLACE_ON_INSERT, SPLIT_LIMIT>] {
738 unsafe { core::mem::transmute(self.entries.get_unchecked_mut(..self.len)) }
740 }
741 }
742}
743#[test]
744#[cfg(feature = "libc")]
745fn btree_insert_libc() {
746 use rand::Rng;
747 let mut rng = rand::thread_rng();
748 for i in 0..if cfg!(miri) { 5 } else { 1000 } {
749 dbg!(i);
750 let mut vec =
751 crate::alloc::vec::Vec::new_in(crate::alloc::allocators::LibcAlloc::default());
752 let mut btree = ArcBTreeSet::new_in(crate::alloc::allocators::LibcAlloc::default());
753 for _ in 0..rng.gen_range(0..800) {
754 let val = rng.gen_range(0..100u8);
755 if vec.binary_search(&val).is_ok() {
756 assert_eq!(btree.insert(val), Some(val));
757 } else {
758 vec.push(val);
759 vec.sort();
760 assert_eq!(
761 btree.insert(val),
762 None,
763 "The BTree contained an unexpected value: {btree:?}, {vec:?}"
764 );
765 }
766 }
767 vec.sort();
768 assert_eq!(vec.len(), btree.len());
769 let mut iter = vec.into_iter();
770 btree.for_each(|i| assert_eq!(Some(*i), iter.next()));
771 assert_eq!(iter.next(), None);
772 }
773}
774
775#[test]
776#[cfg(feature = "alloc-rs")]
777fn btree_insert_rs() {
778 use rand::Rng;
779 let mut rng = rand::thread_rng();
780 for i in 0..if cfg!(miri) { 5 } else { 1000 } {
781 dbg!(i);
782 let mut vec =
783 crate::alloc::vec::Vec::new_in(crate::alloc::allocators::RustAlloc::default());
784 let mut btree = ArcBTreeSet::new_in(crate::alloc::allocators::RustAlloc::default());
785 for _ in 0..rng.gen_range(0..800) {
786 let val = rng.gen_range(0..100);
787 if vec.binary_search(&val).is_ok() {
788 assert_eq!(
789 btree.insert(val),
790 Some(val),
791 "btree: {btree:?}, vec: {vec:?}, val: {val}"
792 );
793 } else {
794 vec.push(val);
795 vec.sort();
796 assert_eq!(
797 btree.insert(val),
798 None,
799 "The BTree contained an unexpected value: {btree:?}, {vec:?}"
800 );
801 }
802 }
803 vec.sort();
804 assert_eq!(vec.len(), btree.len());
805 let mut iter = vec.into_iter();
806 btree.for_each(|i| assert_eq!(Some(*i), iter.next()));
807 assert_eq!(iter.next(), None);
808 }
809}
810
811