lurk_elsa/sync/
mod.rs

1//! **This module is experimental**
2//!
3//! This module provides threadsafe versions of FrozenMap and FrozenVec,
4//! ideal for use as a cache.
5//!
6//! These lock internally, however locks only last as long as the method calls
7//!
8
9use stable_deref_trait::StableDeref;
10use std::alloc::Layout;
11use std::borrow::Borrow;
12use std::cmp::Eq;
13use std::collections::BTreeMap;
14use std::collections::HashMap;
15use std::fmt;
16use std::hash::Hash;
17use std::iter::{FromIterator, IntoIterator};
18use std::ops::Index;
19
20use std::ptr::NonNull;
21use std::sync::atomic::AtomicBool;
22use std::sync::atomic::AtomicPtr;
23use std::sync::atomic::AtomicUsize;
24use std::sync::atomic::Ordering;
25use std::sync::RwLock;
26use std::sync::TryLockError;
27
28#[cfg(feature = "indexmap")]
29pub mod index_map;
30#[cfg(feature = "indexmap")]
31pub mod index_set;
32
33/// Append-only threadsafe version of `std::collections::HashMap` where
34/// insertion does not require mutable access
35pub struct FrozenMap<K, V> {
36    map: RwLock<HashMap<K, V>>,
37}
38
39impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for FrozenMap<K, V> {
40    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
41        match self.map.try_read() {
42            Ok(guard) => guard.fmt(f),
43            Err(TryLockError::Poisoned(err)) => {
44                f.debug_tuple("FrozenMap").field(&&**err.get_ref()).finish()
45            }
46            Err(TryLockError::WouldBlock) => {
47                struct LockedPlaceholder;
48                impl fmt::Debug for LockedPlaceholder {
49                    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
50                        f.write_str("<locked>")
51                    }
52                }
53                f.debug_tuple("FrozenMap")
54                    .field(&LockedPlaceholder)
55                    .finish()
56            }
57        }
58    }
59}
60
61impl<K, V> Default for FrozenMap<K, V> {
62    fn default() -> Self {
63        Self {
64            map: Default::default(),
65        }
66    }
67}
68
69impl<K, V> FrozenMap<K, V> {
70    pub fn new() -> Self {
71        Self::default()
72    }
73}
74
75impl<T> From<Vec<T>> for FrozenVec<T> {
76    fn from(vec: Vec<T>) -> Self {
77        Self {
78            vec: RwLock::new(vec),
79        }
80    }
81}
82
83impl<K: Eq + Hash, V: StableDeref> FrozenMap<K, V> {
84    // these should never return &K or &V
85    // these should never delete any entries
86
87    /// If the key exists in the map, returns a reference
88    /// to the corresponding value, otherwise inserts a
89    /// new entry in the map for that key and returns a
90    /// reference to the given value.
91    ///
92    /// Existing values are never overwritten.
93    ///
94    /// The key may be any borrowed form of the map's key type, but
95    /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
96    /// the key type.
97    ///
98    /// # Examples
99    ///
100    /// ```
101    /// use lurk_elsa::sync::FrozenMap;
102    ///
103    /// let map = FrozenMap::new();
104    /// assert_eq!(map.insert(1, Box::new("a")), &"a");
105    /// assert_eq!(map.insert(1, Box::new("b")), &"a");
106    /// ```
107    pub fn insert(&self, k: K, v: V) -> &V::Target {
108        let mut map = self.map.write().unwrap();
109        let ret = unsafe {
110            let inserted = &**map.entry(k).or_insert(v);
111            &*(inserted as *const _)
112        };
113        ret
114    }
115
116    /// If the key exists in the map, returns a reference to the corresponding
117    /// value, otherwise inserts a new entry in the map for that key and the
118    /// value returned by the creation function, and returns a reference to the
119    /// generated value.
120    ///
121    /// Existing values are never overwritten.
122    ///
123    /// The key may be any borrowed form of the map's key type, but [`Hash`] and
124    /// [`Eq`] on the borrowed form *must* match those for the key type.
125    ///
126    /// **Note** that the write lock is held for the duration of this function’s
127    /// execution, even while the value creation function is executing (if
128    /// needed). This will block any concurrent `get` or `insert` calls.
129    ///
130    /// # Examples
131    ///
132    /// ```
133    /// use lurk_elsa::sync::FrozenMap;
134    ///
135    /// let map = FrozenMap::new();
136    /// assert_eq!(map.insert_with(1, || Box::new("a")), &"a");
137    /// assert_eq!(map.insert_with(1, || unreachable!()), &"a");
138    /// ```
139    pub fn insert_with(&self, k: K, f: impl FnOnce() -> V) -> &V::Target {
140        let mut map = self.map.write().unwrap();
141        let ret = unsafe {
142            let inserted = &**map.entry(k).or_insert_with(f);
143            &*(inserted as *const _)
144        };
145        ret
146    }
147
148    /// If the key exists in the map, returns a reference to the corresponding
149    /// value, otherwise inserts a new entry in the map for that key and the
150    /// value returned by the creation function, and returns a reference to the
151    /// generated value.
152    ///
153    /// Existing values are never overwritten.
154    ///
155    /// The key may be any borrowed form of the map's key type, but [`Hash`] and
156    /// [`Eq`] on the borrowed form *must* match those for the key type.
157    ///
158    /// **Note** that the write lock is held for the duration of this function’s
159    /// execution, even while the value creation function is executing (if
160    /// needed). This will block any concurrent `get` or `insert` calls.
161    ///
162    /// # Examples
163    ///
164    /// ```
165    /// use lurk_elsa::sync::FrozenMap;
166    ///
167    /// let map = FrozenMap::new();
168    /// assert_eq!(map.insert_with_key(1, |_| Box::new("a")), &"a");
169    /// assert_eq!(map.insert_with_key(1, |_| unreachable!()), &"a");
170    /// ```
171    pub fn insert_with_key(&self, k: K, f: impl FnOnce(&K) -> V) -> &V::Target {
172        let mut map = self.map.write().unwrap();
173        let ret = unsafe {
174            let inserted = &**map.entry(k).or_insert_with_key(f);
175            &*(inserted as *const _)
176        };
177        ret
178    }
179
180    /// Returns a reference to the value corresponding to the key.
181    ///
182    /// The key may be any borrowed form of the map's key type, but
183    /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
184    /// the key type.
185    ///
186    /// # Examples
187    ///
188    /// ```
189    /// use lurk_elsa::sync::FrozenMap;
190    ///
191    /// let map = FrozenMap::new();
192    /// map.insert(1, Box::new("a"));
193    /// assert_eq!(map.get(&1), Some(&"a"));
194    /// assert_eq!(map.get(&2), None);
195    /// ```
196    pub fn get<Q: ?Sized>(&self, k: &Q) -> Option<&V::Target>
197    where
198        K: Borrow<Q>,
199        Q: Hash + Eq,
200    {
201        let map = self.map.read().unwrap();
202        let ret = unsafe { map.get(k).map(|x| &*(&**x as *const V::Target)) };
203        ret
204    }
205
206    /// Applies a function to the owner of the value corresponding to the key (if any).
207    ///
208    /// The key may be any borrowed form of the map's key type, but
209    /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
210    /// the key type.
211    ///
212    /// # Examples
213    ///
214    /// ```
215    /// use lurk_elsa::sync::FrozenMap;
216    ///
217    /// let map = FrozenMap::new();
218    /// map.insert(1, Box::new("a"));
219    /// assert_eq!(map.map_get(&1, Clone::clone), Some(Box::new("a")));
220    /// assert_eq!(map.map_get(&2, Clone::clone), None);
221    /// ```
222    pub fn map_get<Q: ?Sized, T, F>(&self, k: &Q, f: F) -> Option<T>
223    where
224        K: Borrow<Q>,
225        Q: Hash + Eq,
226        F: FnOnce(&V) -> T,
227    {
228        let map = self.map.read().unwrap();
229        let ret = map.get(k).map(f);
230        ret
231    }
232}
233
234impl<K, V> FrozenMap<K, V> {
235    /// Collects the contents of this map into a vector of tuples.
236    ///
237    /// The order of the entries is as if iterating a [`HashMap`] (stochastic).
238    ///
239    /// # Examples
240    ///
241    /// ```
242    /// use lurk_elsa::sync::FrozenMap;
243    ///
244    /// let map = FrozenMap::new();
245    /// map.insert(1, Box::new("a"));
246    /// map.insert(2, Box::new("b"));
247    /// let mut tuple_vec = map.into_tuple_vec();
248    /// tuple_vec.sort();
249    ///
250    /// assert_eq!(tuple_vec, vec![(1, Box::new("a")), (2, Box::new("b"))]);
251    /// ```
252    pub fn into_tuple_vec(self) -> Vec<(K, V)> {
253        self.map
254            .into_inner()
255            .unwrap()
256            .into_iter()
257            .collect::<Vec<_>>()
258    }
259
260    /// # Examples
261    ///
262    /// ```
263    /// use lurk_elsa::sync::FrozenMap;
264    ///
265    /// let map = FrozenMap::new();
266    /// assert_eq!(map.len(), 0);
267    /// map.insert(1, Box::new("a"));
268    /// assert_eq!(map.len(), 1);
269    /// ```
270    pub fn len(&self) -> usize {
271        let map = self.map.read().unwrap();
272        map.len()
273    }
274
275    /// # Examples
276    ///
277    /// ```
278    /// use lurk_elsa::sync::FrozenMap;
279    ///
280    /// let map = FrozenMap::new();
281    /// assert_eq!(map.is_empty(), true);
282    /// map.insert(1, Box::new("a"));
283    /// assert_eq!(map.is_empty(), false);
284    /// ```
285    pub fn is_empty(&self) -> bool {
286        let map = self.map.read().unwrap();
287        map.is_empty()
288    }
289
290    // TODO add more
291}
292
293impl<K: Clone, V> FrozenMap<K, V> {
294    pub fn keys_cloned(&self) -> Vec<K> {
295        self.map.read().unwrap().keys().cloned().collect()
296    }
297}
298
299impl<K: Eq + Hash, V: Copy> FrozenMap<K, V> {
300    /// Returns a copy of the value corresponding to the key.
301    ///
302    /// The key may be any borrowed form of the map's key type, but
303    /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
304    /// the key type.
305    ///
306    /// # Examples
307    ///
308    /// ```
309    /// use lurk_elsa::sync::FrozenMap;
310    ///
311    /// let map = FrozenMap::new();
312    /// map.get_copy_or_insert(1, 6);
313    /// assert_eq!(map.get_copy(&1), Some(6));
314    /// assert_eq!(map.get_copy(&2), None);
315    /// ```
316    pub fn get_copy<Q: ?Sized>(&self, k: &Q) -> Option<V>
317    where
318        K: Borrow<Q>,
319        Q: Hash + Eq,
320    {
321        let map = self.map.read().unwrap();
322        map.get(k).cloned()
323    }
324
325    /// If the key exists in the map, returns a reference
326    /// to the corresponding value, otherwise inserts a
327    /// new entry in the map for that key and returns a
328    /// reference to the given value.
329    ///
330    /// Existing values are never overwritten.
331    ///
332    /// The key may be any borrowed form of the map's key type, but
333    /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
334    /// the key type.
335    ///
336    /// # Examples
337    ///
338    /// ```
339    /// use lurk_elsa::sync::FrozenMap;
340    ///
341    /// let map = FrozenMap::new();
342    /// assert_eq!(map.get_copy_or_insert(1, 6), 6);
343    /// assert_eq!(map.get_copy_or_insert(1, 12), 6);
344    /// ```
345    pub fn get_copy_or_insert(&self, k: K, v: V) -> V {
346        let mut map = self.map.write().unwrap();
347        // This is safe because `or_insert` does not overwrite existing values
348        let inserted = map.entry(k).or_insert(v);
349        *inserted
350    }
351
352    /// If the key exists in the map, returns a reference to the corresponding
353    /// value, otherwise inserts a new entry in the map for that key and the
354    /// value returned by the creation function, and returns a reference to the
355    /// generated value.
356    ///
357    /// Existing values are never overwritten.
358    ///
359    /// The key may be any borrowed form of the map's key type, but [`Hash`] and
360    /// [`Eq`] on the borrowed form *must* match those for the key type.
361    ///
362    /// **Note** that the write lock is held for the duration of this function’s
363    /// execution, even while the value creation function is executing (if
364    /// needed). This will block any concurrent `get` or `insert` calls.
365    ///
366    /// # Examples
367    ///
368    /// ```
369    /// use lurk_elsa::sync::FrozenMap;
370    ///
371    /// let map = FrozenMap::new();
372    /// assert_eq!(map.get_copy_or_insert_with(1, || 6), 6);
373    /// assert_eq!(map.get_copy_or_insert_with(1, || unreachable!()), 6);
374    /// ```
375    pub fn get_copy_or_insert_with(&self, k: K, f: impl FnOnce() -> V) -> V {
376        let mut map = self.map.write().unwrap();
377        // This is safe because `or_insert_with` does not overwrite existing values
378        let inserted = map.entry(k).or_insert_with(f);
379        *inserted
380    }
381
382    /// If the key exists in the map, returns a reference to the corresponding
383    /// value, otherwise inserts a new entry in the map for that key and the
384    /// value returned by the creation function, and returns a reference to the
385    /// generated value.
386    ///
387    /// Existing values are never overwritten.
388    ///
389    /// The key may be any borrowed form of the map's key type, but [`Hash`] and
390    /// [`Eq`] on the borrowed form *must* match those for the key type.
391    ///
392    /// **Note** that the write lock is held for the duration of this function’s
393    /// execution, even while the value creation function is executing (if
394    /// needed). This will block any concurrent `get` or `insert` calls.
395    ///
396    /// # Examples
397    ///
398    /// ```
399    /// use lurk_elsa::sync::FrozenMap;
400    ///
401    /// let map = FrozenMap::new();
402    /// assert_eq!(map.get_copy_or_insert_with_key(1, |_| 6), 6);
403    /// assert_eq!(map.get_copy_or_insert_with_key(1, |_| unreachable!()), 6);
404    /// ```
405    pub fn get_copy_or_insert_with_key(&self, k: K, f: impl FnOnce(&K) -> V) -> V {
406        let mut map = self.map.write().unwrap();
407        // This is safe because `or_insert_with_key` does not overwrite existing values
408        let inserted = map.entry(k).or_insert_with_key(f);
409        *inserted
410    }
411}
412
413impl<K, V> std::convert::AsMut<HashMap<K, V>> for FrozenMap<K, V> {
414    /// Get mutable access to the underlying [`HashMap`].
415    ///
416    /// This is safe, as it requires a `&mut self`, ensuring nothing is using
417    /// the 'frozen' contents.
418    fn as_mut(&mut self) -> &mut HashMap<K, V> {
419        self.map.get_mut().unwrap()
420    }
421}
422
423impl<K: Clone, V: Clone> Clone for FrozenMap<K, V> {
424    fn clone(&self) -> Self {
425        Self {
426            map: self.map.read().unwrap().clone().into(),
427        }
428    }
429}
430
431impl<K: Eq + Hash, V: PartialEq> PartialEq for FrozenMap<K, V> {
432    fn eq(&self, other: &Self) -> bool {
433        let self_ref: &HashMap<K, V> = &self.map.read().unwrap();
434        let other_ref: &HashMap<K, V> = &other.map.read().unwrap();
435        self_ref == other_ref
436    }
437}
438
439/// Append-only threadsafe version of `std::vec::Vec` where
440/// insertion does not require mutable access
441pub struct FrozenVec<T> {
442    vec: RwLock<Vec<T>>,
443}
444
445impl<T: fmt::Debug> fmt::Debug for FrozenVec<T> {
446    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
447        match self.vec.try_read() {
448            Ok(guard) => guard.fmt(f),
449            Err(TryLockError::Poisoned(err)) => {
450                f.debug_tuple("FrozenMap").field(&&**err.get_ref()).finish()
451            }
452            Err(TryLockError::WouldBlock) => {
453                struct LockedPlaceholder;
454                impl fmt::Debug for LockedPlaceholder {
455                    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
456                        f.write_str("<locked>")
457                    }
458                }
459                f.debug_tuple("FrozenMap")
460                    .field(&LockedPlaceholder)
461                    .finish()
462            }
463        }
464    }
465}
466
467impl<T> FrozenVec<T> {
468    /// Returns the number of elements in the vector.
469    pub fn len(&self) -> usize {
470        let vec = self.vec.read().unwrap();
471        vec.len()
472    }
473
474    /// Returns `true` if the vector contains no elements.
475    pub fn is_empty(&self) -> bool {
476        self.len() == 0
477    }
478}
479
480impl<T: StableDeref> FrozenVec<T> {
481    pub fn new() -> Self {
482        Default::default()
483    }
484
485    // these should never return &T
486    // these should never delete any entries
487
488    pub fn push(&self, val: T) {
489        let mut vec = self.vec.write().unwrap();
490        vec.push(val);
491    }
492
493    /// Push, immediately getting a reference to the element
494    pub fn push_get(&self, val: T) -> &T::Target {
495        let mut vec = self.vec.write().unwrap();
496        vec.push(val);
497        unsafe { &*(&**vec.get_unchecked(vec.len() - 1) as *const T::Target) }
498    }
499
500    /// Push, immediately getting a an index of the element
501    ///
502    /// Index can then be used with the `get` method
503    ///
504    /// # Examples
505    ///
506    /// ```
507    /// use lurk_elsa::sync::FrozenVec;
508    ///
509    /// let map = FrozenVec::new();
510    /// let idx = map.push_get_index(String::from("a"));
511    /// assert_eq!(map.get(idx), Some("a"));
512    /// assert_eq!(idx, 0);
513    /// assert_eq!(map.push_get_index(String::from("b")), 1);
514    /// ```
515    pub fn push_get_index(&self, val: T) -> usize {
516        let mut vec = self.vec.write().unwrap();
517        let index = vec.len();
518        vec.push(val);
519        return index;
520    }
521
522    pub fn get(&self, index: usize) -> Option<&T::Target> {
523        let vec = self.vec.read().unwrap();
524        unsafe { vec.get(index).map(|x| &*(&**x as *const T::Target)) }
525    }
526
527    /// Returns an iterator over the vector.
528    pub fn iter(&self) -> Iter<'_, T> {
529        self.into_iter()
530    }
531}
532
533/// Iterator over FrozenVec, obtained via `.iter()`
534///
535/// It is safe to push to the vector during iteration
536#[derive(Debug)]
537pub struct Iter<'a, T> {
538    vec: &'a FrozenVec<T>,
539    idx: usize,
540}
541
542impl<'a, T: StableDeref> Iterator for Iter<'a, T> {
543    type Item = &'a T::Target;
544    fn next(&mut self) -> Option<&'a T::Target> {
545        if let Some(ret) = self.vec.get(self.idx) {
546            self.idx += 1;
547            Some(ret)
548        } else {
549            None
550        }
551    }
552}
553
554impl<'a, T: StableDeref> IntoIterator for &'a FrozenVec<T> {
555    type Item = &'a T::Target;
556    type IntoIter = Iter<'a, T>;
557    fn into_iter(self) -> Iter<'a, T> {
558        Iter { vec: self, idx: 0 }
559    }
560}
561
562#[test]
563fn test_iteration() {
564    let vec = vec!["a", "b", "c", "d"];
565    let frozen: FrozenVec<_> = vec.clone().into();
566
567    assert_eq!(vec, frozen.iter().collect::<Vec<_>>());
568    for (e1, e2) in vec.iter().zip(frozen.iter()) {
569        assert_eq!(*e1, e2);
570    }
571
572    assert_eq!(vec.len(), frozen.iter().count())
573}
574
575impl<T> FrozenVec<T> {
576    /// Returns the internal vector backing this structure
577    ///
578    /// # Examples
579    ///
580    /// ```
581    /// use lurk_elsa::sync::FrozenVec;
582    ///
583    /// let map = FrozenVec::new();
584    /// map.push("a");
585    /// map.push("b");
586    /// let tuple_vec = map.into_vec();
587    ///
588    /// assert_eq!(tuple_vec, vec!["a", "b"]);
589    /// ```
590    pub fn into_vec(self) -> Vec<T> {
591        self.vec.into_inner().unwrap()
592    }
593
594    // TODO add more
595}
596
597impl<T> std::convert::AsMut<Vec<T>> for FrozenVec<T> {
598    /// Get mutable access to the underlying vector.
599    ///
600    /// This is safe, as it requires a `&mut self`, ensuring nothing is using
601    /// the 'frozen' contents.
602    fn as_mut(&mut self) -> &mut Vec<T> {
603        self.vec.get_mut().unwrap()
604    }
605}
606
607impl<T> Default for FrozenVec<T> {
608    fn default() -> Self {
609        Self {
610            vec: Default::default(),
611        }
612    }
613}
614
615impl<T: Clone> Clone for FrozenVec<T> {
616    fn clone(&self) -> Self {
617        Self {
618            vec: self.vec.read().unwrap().clone().into(),
619        }
620    }
621}
622
623impl<T: PartialEq> PartialEq for FrozenVec<T> {
624    fn eq(&self, other: &Self) -> bool {
625        let self_ref: &Vec<T> = &self.vec.read().unwrap();
626        let other_ref: &Vec<T> = &other.vec.read().unwrap();
627        self_ref == other_ref
628    }
629}
630
631// The context for these functions is that we want to have a
632// series of exponentially increasing buffer sizes. We want
633// to maximize the total size of the buffers (since this
634// determines the maximum size of the container) whilst
635// minimizing the number of buffers (since we pay an up-front
636// cost in space proportional to the number of buffers)
637// without increasing the buffer size too much each time as
638// this determines how much space will be wasted on average
639// in allocated buffers. Finally, we also want a sequence
640// which will generate nice round numbers and is easy to
641// work with.
642
643/// we multiply the buffer size by 4 each time whilst sizing
644/// the first buffer to 3, so the buffer sizes generated by
645/// the function will be 3, 12, 48, 192, etc.
646const fn buffer_size(idx: usize) -> usize {
647    3 << (idx * 2)
648}
649
650/// This computes the sum of the sizes of buffers prior to a
651/// particular buffer index, aka `4^idx - 1`. The choice of
652/// sequence means that the total buffer size will always be
653/// a sequence of `1`s in binary, since it's a power of 2 minus one.
654const fn prior_total_buffer_size(idx: usize) -> usize {
655    (1 << (idx * 2)) - 1
656}
657
658/// This determines which buffer contains the nth item
659/// (assuming the items are arranged sequentially in the buffers).
660/// Since the total buffer sizes are always sequences of 1s in binary,
661/// we can just count the number of binary digits in `(i+1)` and
662/// divide by `2` (rounding up).
663/// (That's what the `(65 - (i + 1).leading_zeros()) >> 1` part does.)
664/// We use 65 rather than `64` so that the division by `2` rounds
665/// up instead of down. We divide by `2 (>> 1)` because we skip
666/// every other power of `2` since we increase the buffer size by `4`
667/// each time, and finally we subtract one because buffer indices are
668/// zero-indexed.
669const fn buffer_index(i: usize) -> usize {
670    (((usize::BITS + 1 - (i + 1).leading_zeros()) >> 1) - 1) as usize
671}
672
673const NUM_BUFFERS: usize = (usize::BITS >> 2) as usize;
674
675/// Append-only threadsafe version of `std::vec::Vec` where
676/// insertion does not require mutable access.
677/// Does not lock for reading, only allows `Copy` types and
678/// will spinlock on pushes without affecting reads.
679/// Note that this data structure is `64` pointers large on
680/// 64 bit systems,
681/// in contrast to `Vec` which is `3` pointers large.
682pub struct LockFreeFrozenVec<T: Copy> {
683    data: [AtomicPtr<T>; NUM_BUFFERS],
684    len: AtomicUsize,
685    locked: AtomicBool,
686}
687
688impl<T: Copy> Drop for LockFreeFrozenVec<T> {
689    fn drop(&mut self) {
690        // We need to drop the elements from all allocated buffers.
691        for i in 0..NUM_BUFFERS {
692            let layout = Self::layout(buffer_size(i));
693            unsafe {
694                let ptr = *self.data[i].get_mut();
695                if ptr.is_null() {
696                    // After the first null pointer there will only be more
697                    // null pointers.
698                    break;
699                } else {
700                    std::alloc::dealloc(ptr.cast(), layout);
701                }
702            }
703        }
704    }
705}
706
707impl<T: Copy> Default for LockFreeFrozenVec<T> {
708    /// Creates an empty `LockFreeFrozenVec` that does not allocate
709    /// any heap allocations until the first time data is pushed to it.
710    fn default() -> Self {
711        Self {
712            data: [Self::NULL; NUM_BUFFERS],
713            len: AtomicUsize::new(0),
714            locked: AtomicBool::new(false),
715        }
716    }
717}
718
719impl<T: Copy> LockFreeFrozenVec<T> {
720    const NULL: AtomicPtr<T> = AtomicPtr::new(std::ptr::null_mut());
721    pub fn new() -> Self {
722        Default::default()
723    }
724
725    /// Obtains a write lock that ensures other writing threads
726    /// wait for us to finish. Reading threads are unaffected and
727    /// can concurrently read while we push a new element.
728    fn lock<U>(&self, f: impl FnOnce() -> U) -> U {
729        while self.locked.swap(true, Ordering::Acquire) {
730            // Wheeeee spinlock
731            std::hint::spin_loop();
732        }
733
734        let ret = f();
735        self.locked.store(false, Ordering::Release);
736        ret
737    }
738
739    fn layout(cap: usize) -> Layout {
740        Layout::array::<T>(cap).unwrap()
741    }
742
743    // these should never return &T
744    // these should never delete any entries
745
746    const NOT_ZST: () = if std::mem::size_of::<T>() == 0 {
747        panic!("`LockFreeFrozenVec` cannot be used with ZSTs");
748    };
749
750    /// Pushes an element to the vector, potentially allocating new memory.
751    /// Returns the index at which the element was inserted.
752    pub fn push(&self, val: T) -> usize {
753        // This statement actually does something: it evaluates a constant.
754        #[allow(path_statements)]
755        {
756            Self::NOT_ZST
757        }
758        self.lock(|| {
759            // These values must be consistent with the pointer we got.
760            let len = self.len.load(Ordering::Acquire);
761            let buffer_idx = buffer_index(len);
762            let mut ptr = self.data[buffer_idx].load(Ordering::Acquire);
763            if ptr.is_null() {
764                // Out of memory, allocate more
765                let layout = Self::layout(buffer_size(buffer_idx));
766                // SAFETY: `LockFreeFrozenVec` statically rejects zsts and the input `ptr` has always been
767                // allocated at the size stated in `cap`.
768                unsafe {
769                    ptr = std::alloc::alloc(layout).cast::<T>();
770                }
771
772                assert!(!ptr.is_null());
773
774                self.data[buffer_idx].store(ptr, Ordering::Release);
775            }
776            let local_index = len - prior_total_buffer_size(buffer_idx);
777            unsafe {
778                ptr.add(local_index).write(val);
779            }
780            // This is written before updating the data pointer. Other `push` calls cannot observe this,
781            // because they are blocked on aquiring the data pointer before they ever read the `len`.
782            // `get` may read the length without synchronization, but that is fine,
783            // as there will be actually the right number of elements stored, or less elements,
784            // in which case you get a spurious `None`.
785            self.len.store(len + 1, Ordering::Release);
786            len
787        })
788    }
789
790    /// Load an element (if it exists). This operation is lock-free and
791    /// performs minimal synchronization.
792    pub fn get(&self, index: usize) -> Option<T> {
793        // The length can only grow, so just doing the length check
794        // independently of the  read is fine. Worst case we
795        // read an old length value and end up returning `None` even if
796        // another thread already inserted the value.
797        let len = self.len.load(Ordering::Acquire);
798        if index >= len {
799            return None;
800        }
801        let buffer_idx = buffer_index(index);
802        let buffer_ptr = self.data[buffer_idx].load(Ordering::Acquire);
803        let local_index = index - prior_total_buffer_size(buffer_idx);
804        Some(unsafe { *buffer_ptr.add(local_index) })
805    }
806
807    pub fn is_empty(&self) -> bool {
808        self.len.load(Ordering::Relaxed) == 0
809    }
810
811    /// Load an element (if it exists). This operation is lock-free and
812    /// performs no synchronized atomic operations. This is a useful primitive to
813    /// implement your own data structure with newtypes around the index.
814    pub unsafe fn get_unchecked(&self, index: usize) -> T {
815        let buffer_idx = buffer_index(index);
816        let buffer_ptr = self.data[buffer_idx].load(Ordering::Relaxed);
817        let local_index = index - prior_total_buffer_size(buffer_idx);
818        unsafe { *buffer_ptr.add(local_index) }
819    }
820
821    /// Run a function on each buffer in the vector.
822    ///
823    /// ## Arguments
824    /// - `func`: a function that takes a slice to the buffer and the buffer index
825    ///
826    fn for_each_buffer(&self, mut func: impl FnMut(&[T], usize)) {
827        // for each buffer, run the function
828        for buffer_index in 0..NUM_BUFFERS {
829            // get the buffer pointer
830            if let Some(buffer_ptr) = NonNull::new(self.data[buffer_index].load(Ordering::Acquire))
831            {
832                // get the buffer size and index
833                let buffer_size = buffer_size(buffer_index);
834
835                // create a slice from the buffer pointer and size
836                let buffer_slice =
837                    unsafe { std::slice::from_raw_parts(buffer_ptr.as_ptr(), buffer_size) };
838
839                // run the function
840                func(buffer_slice, buffer_index);
841            } else {
842                // no data in this buffer, so we're done
843                break;
844            }
845        }
846    }
847}
848
849impl<T: Copy + PartialEq> PartialEq for LockFreeFrozenVec<T> {
850    fn eq(&self, other: &Self) -> bool {
851        // first check the length
852        let self_len = self.len.load(Ordering::Acquire);
853        let other_len = other.len.load(Ordering::Acquire);
854        if self_len != other_len {
855            return false;
856        }
857
858        // Since the lengths are the same, just check the elements in order
859        for index in 0..self_len {
860            // This is safe because the indices are in bounds (for `LockFreeFrozenVec` the bounds can only grow).
861            if self.get(index) != other.get(index) {
862                return false;
863            }
864        }
865        return true;
866    }
867}
868
869#[test]
870fn test_non_lockfree_unchecked() {
871    #[derive(Copy, Clone, Debug, PartialEq, Eq)]
872    struct Moo(i32);
873
874    let vec = LockFreeFrozenVec::new();
875
876    let idx_set = std::sync::Mutex::new(std::collections::HashSet::new());
877
878    std::thread::scope(|s| {
879        s.spawn(|| {
880            for i in 0..1000 {
881                idx_set.lock().unwrap().insert(vec.push(Moo(i)));
882            }
883        });
884        s.spawn(|| {
885            for i in 0..1000 {
886                idx_set.lock().unwrap().insert(vec.push(Moo(i)));
887            }
888        });
889        for _ in 0..2000 {
890            let idxes = std::mem::take(&mut *idx_set.lock().unwrap());
891            for idx in idxes {
892                unsafe {
893                    vec.get_unchecked(idx);
894                }
895            }
896        }
897    });
898
899    // Test dropping empty vecs
900    LockFreeFrozenVec::<()>::new();
901}
902
903impl<T: Copy + Clone> Clone for LockFreeFrozenVec<T> {
904    fn clone(&self) -> Self {
905        let mut coppied_data = [Self::NULL; NUM_BUFFERS];
906        // for each buffer, copy the data
907        self.for_each_buffer(|buffer_slice, buffer_index| {
908            // allocate a new buffer
909            let layout = Self::layout(buffer_slice.len());
910            let new_buffer_ptr = unsafe { std::alloc::alloc(layout).cast::<T>() };
911            assert!(!new_buffer_ptr.is_null());
912            // copy the data to the new buffer
913            unsafe {
914                std::ptr::copy_nonoverlapping(
915                    buffer_slice.as_ptr(),
916                    new_buffer_ptr,
917                    buffer_slice.len(),
918                );
919            };
920            // store the new buffer pointer
921            *coppied_data[buffer_index].get_mut() = new_buffer_ptr;
922        });
923
924        return Self {
925            data: coppied_data,
926            len: AtomicUsize::new(self.len.load(Ordering::Relaxed)),
927            locked: AtomicBool::new(false),
928        };
929    }
930}
931
932#[test]
933fn test_non_lockfree() {
934    #[derive(Copy, Clone, Debug, PartialEq, Eq)]
935    struct Moo(i32);
936
937    let vec = LockFreeFrozenVec::new();
938
939    assert_eq!(vec.get(1), None);
940
941    vec.push(Moo(1));
942    let i = vec.push(Moo(2));
943    vec.push(Moo(3));
944
945    assert_eq!(vec.get(i), Some(Moo(2)));
946
947    std::thread::scope(|s| {
948        s.spawn(|| {
949            for i in 0..1000 {
950                vec.push(Moo(i));
951            }
952        });
953        s.spawn(|| {
954            for i in 0..1000 {
955                vec.push(Moo(i));
956            }
957        });
958        for i in 0..2000 {
959            while vec.get(i).is_none() {}
960        }
961    });
962
963    // Test cloning
964    {
965        let vec2 = vec.clone();
966        assert_eq!(vec2.get(0), Some(Moo(1)));
967        assert_eq!(vec2.get(1), Some(Moo(2)));
968        assert_eq!(vec2.get(2), Some(Moo(3)));
969    }
970    // Test cloning a large vector
971    {
972        let large_vec = LockFreeFrozenVec::new();
973        for i in 0..1000 {
974            large_vec.push(Moo(i));
975        }
976        let large_vec_2 = large_vec.clone();
977        for i in 0..1000 {
978            assert_eq!(large_vec_2.get(i), Some(Moo(i as i32)));
979        }
980    }
981    // Test cloning an empty vector
982    {
983        let empty_vec = LockFreeFrozenVec::<()>::new();
984        let empty_vec_2 = empty_vec.clone();
985        assert_eq!(empty_vec_2.get(0), None);
986    }
987
988    // Test dropping empty vecs
989    LockFreeFrozenVec::<()>::new();
990}
991
992// TODO: Implement IntoIterator for LockFreeFrozenVec
993
994/// Append-only threadsafe version of `std::collections::BTreeMap` where
995/// insertion does not require mutable access
996#[derive(Debug)]
997pub struct FrozenBTreeMap<K, V>(RwLock<BTreeMap<K, V>>);
998
999impl<K: Clone + Ord, V: StableDeref> FrozenBTreeMap<K, V> {
1000    pub fn new() -> Self {
1001        Self(RwLock::new(BTreeMap::new()))
1002    }
1003
1004    // these should never return &K or &V
1005    // these should never delete any entries
1006
1007    /// Returns a reference to the value corresponding to the key.
1008    ///
1009    /// The key may be any borrowed form of the map's key type, but
1010    /// [`Ord`] on the borrowed form *must* match those for
1011    /// the key type.
1012    ///
1013    /// # Examples
1014    ///
1015    /// ```
1016    /// use lurk_elsa::sync::FrozenBTreeMap;
1017    ///
1018    /// let map = FrozenBTreeMap::new();
1019    /// map.insert(1, Box::new("a"));
1020    /// assert_eq!(map.get(&1), Some(&"a"));
1021    /// assert_eq!(map.get(&2), None);
1022    /// ```
1023    pub fn get<Q: ?Sized>(&self, k: &Q) -> Option<&V::Target>
1024    where
1025        K: Borrow<Q>,
1026        Q: Ord,
1027    {
1028        let map = self.0.read().unwrap();
1029        let ret = unsafe { map.get(k).map(|x| &*(&**x as *const V::Target)) };
1030        ret
1031    }
1032
1033    /// Insert a new value into the map. Does nothing if the key is already occupied.
1034    ///
1035    /// # Examples
1036    ///
1037    /// ```
1038    /// use lurk_elsa::sync::FrozenBTreeMap;
1039    ///
1040    /// let map = FrozenBTreeMap::new();
1041    /// map.insert(1, Box::new("a"));
1042    /// assert_eq!(map.get(&1), Some(&"a"));
1043    /// ```
1044    pub fn insert(&self, k: K, v: V) -> &V::Target {
1045        let mut map = self.0.write().unwrap();
1046        let ret = unsafe {
1047            let inserted = &**map.entry(k).or_insert(v);
1048            &*(inserted as *const _)
1049        };
1050        ret
1051    }
1052
1053    /// Applies a function to the owner of the value corresponding to the key (if any).
1054    ///
1055    /// The key may be any borrowed form of the map's key type, but
1056    /// [`Ord`] on the borrowed form *must* match those for
1057    /// the key type.
1058    ///
1059    /// # Examples
1060    ///
1061    /// ```
1062    /// use lurk_elsa::sync::FrozenBTreeMap;
1063    ///
1064    /// let map = FrozenBTreeMap::new();
1065    /// map.insert(1, Box::new("a"));
1066    /// assert_eq!(map.map_get(&1, Clone::clone), Some(Box::new("a")));
1067    /// assert_eq!(map.map_get(&2, Clone::clone), None);
1068    /// ```
1069    pub fn map_get<Q: ?Sized, T, F>(&self, k: &Q, f: F) -> Option<T>
1070    where
1071        K: Borrow<Q>,
1072        Q: Ord,
1073        F: FnOnce(&V) -> T,
1074    {
1075        let map = self.0.read().unwrap();
1076        let ret = map.get(k).map(f);
1077        ret
1078    }
1079}
1080
1081impl<K, V> FrozenBTreeMap<K, V> {
1082    /// Collects the contents of this map into a vector of tuples.
1083    ///
1084    /// The order of the entries is as if iterating a [`BTreeMap`] (ordered by key).
1085    ///
1086    /// # Examples
1087    ///
1088    /// ```
1089    /// use lurk_elsa::sync::FrozenBTreeMap;
1090    ///
1091    /// let map = FrozenBTreeMap::new();
1092    /// map.insert(1, Box::new("a"));
1093    /// map.insert(2, Box::new("b"));
1094    /// let tuple_vec = map.into_tuple_vec();
1095    ///
1096    /// assert_eq!(tuple_vec, vec![(1, Box::new("a")), (2, Box::new("b"))]);
1097    /// ```
1098    pub fn into_tuple_vec(self) -> Vec<(K, V)> {
1099        self.0.into_inner().unwrap().into_iter().collect::<Vec<_>>()
1100    }
1101
1102    /// # Examples
1103    ///
1104    /// ```
1105    /// use lurk_elsa::sync::FrozenBTreeMap;
1106    ///
1107    /// let map = FrozenBTreeMap::new();
1108    /// assert_eq!(map.len(), 0);
1109    /// map.insert(1, Box::new("a"));
1110    /// assert_eq!(map.len(), 1);
1111    /// ```
1112    pub fn len(&self) -> usize {
1113        let map = self.0.read().unwrap();
1114        map.len()
1115    }
1116
1117    /// # Examples
1118    ///
1119    /// ```
1120    /// use lurk_elsa::sync::FrozenBTreeMap;
1121    ///
1122    /// let map = FrozenBTreeMap::new();
1123    /// assert_eq!(map.is_empty(), true);
1124    /// map.insert(1, Box::new("a"));
1125    /// assert_eq!(map.is_empty(), false);
1126    /// ```
1127    pub fn is_empty(&self) -> bool {
1128        let map = self.0.read().unwrap();
1129        map.is_empty()
1130    }
1131}
1132
1133impl<K: Clone + Ord, V: StableDeref> From<BTreeMap<K, V>> for FrozenBTreeMap<K, V> {
1134    fn from(map: BTreeMap<K, V>) -> Self {
1135        Self(RwLock::new(map))
1136    }
1137}
1138
1139impl<Q: ?Sized, K, V> Index<&Q> for FrozenBTreeMap<K, V>
1140where
1141    Q: Ord,
1142    K: Clone + Ord + Borrow<Q>,
1143    V: StableDeref,
1144{
1145    type Output = V::Target;
1146
1147    /// # Examples
1148    ///
1149    /// ```
1150    /// use lurk_elsa::sync::FrozenBTreeMap;
1151    ///
1152    /// let map = FrozenBTreeMap::new();
1153    /// map.insert(1, Box::new("a"));
1154    /// assert_eq!(map[&1], "a");
1155    /// ```
1156    fn index(&self, idx: &Q) -> &V::Target {
1157        self.get(idx)
1158            .expect("attempted to index FrozenBTreeMap with unknown key")
1159    }
1160}
1161
1162impl<K: Clone + Ord, V: StableDeref> FromIterator<(K, V)> for FrozenBTreeMap<K, V> {
1163    fn from_iter<T>(iter: T) -> Self
1164    where
1165        T: IntoIterator<Item = (K, V)>,
1166    {
1167        let map: BTreeMap<_, _> = iter.into_iter().collect();
1168        map.into()
1169    }
1170}
1171
1172impl<K: Clone + Ord, V: StableDeref> Default for FrozenBTreeMap<K, V> {
1173    fn default() -> Self {
1174        Self::new()
1175    }
1176}
1177
1178impl<K: Clone, V: Clone> Clone for FrozenBTreeMap<K, V> {
1179    fn clone(&self) -> Self {
1180        Self(self.0.read().unwrap().clone().into())
1181    }
1182}
1183
1184impl<K: PartialEq, V: PartialEq> PartialEq for FrozenBTreeMap<K, V> {
1185    fn eq(&self, other: &Self) -> bool {
1186        let self_ref: &BTreeMap<K, V> = &self.0.read().unwrap();
1187        let other_ref: &BTreeMap<K, V> = &other.0.read().unwrap();
1188        self_ref == other_ref
1189    }
1190}