rafx_base/
atomic_once_cell_stack.rs

1use crate::atomic_once_cell_array::AtomicOnceCellArray;
2use std::ops::Range;
3use std::sync::atomic::{AtomicUsize, Ordering};
4
5/// A fixed-size stack with `capacity` determined at run-time. The elements of the stack are uninitialized.
6/// Elements may be initialized with `push` and then retrieved as a reference with `get`. `push` returns the
7/// index of the element in the stack. The caller may reserve space for multiple elements with `reserve_uninit`.
8/// Reserved elements must be initialized with `set`. Calling `push`, `reserve_uninit`, or `set` is thread-safe.
9/// The stack will panic if the `capacity` is exceeded, or the `index` is out of range, or the `set` function is
10/// called more than once on an index when `T` is not zero-sized, or if the `set` function is called on an index
11/// that was not reserved by a prior call to `reserve_uninit`. The stack will only drop initialized elements.
12///
13/// # Guarantees
14///
15/// - The stack will not allocate if `capacity` is 0 or if `T` is zero-sized.
16/// - The allocated memory will not be `default` initialized.
17/// - Elements initialized by `push` or `set` are immutable.
18/// - The synchronization is `lock-free`.
19///
20/// # Zero-sized Types
21///
22/// When `T` is zero-sized, the stack does not track individual indices and instead only maintains a count of
23/// how many instances of `T` have been `set` in the stack. On `drop`, the stack will `drop` that many instances
24/// of `T`. The stack will panic if the number of calls to `set` exceeds the capacity.
25pub struct AtomicOnceCellStack<T> {
26    data: AtomicOnceCellArray<T>,
27    last_index: AtomicUsize,
28}
29
30impl<T> AtomicOnceCellStack<T> {
31    pub fn with_capacity(capacity: usize) -> Self {
32        // SAFETY: `data` will catch any issues with <T> or capacity.
33        Self {
34            data: AtomicOnceCellArray::with_capacity(capacity),
35            last_index: AtomicUsize::new(0),
36        }
37    }
38
39    pub fn push(
40        &self,
41        val: T,
42    ) -> usize {
43        // SAFETY: `data` will catch index out of bounds or multiple attempted writes.
44        let last_len = self.last_index.fetch_add(1, Ordering::Relaxed);
45        self.data.set(last_len, val);
46        last_len
47    }
48
49    pub fn reserve_uninit(
50        &self,
51        num_to_reserve: usize,
52    ) -> usize {
53        let last_len = self.last_index.fetch_add(num_to_reserve, Ordering::Relaxed);
54
55        if last_len + num_to_reserve > self.capacity() {
56            // SAFETY: `push` and `set` will catch any incorrect indexing,
57            // but there's no sense in waiting to blow up some indeterminate time
58            // in the future if we know that this is a problem right now.
59            panic!(
60                "len {} + num_to_reserve {} must be <= capacity {}",
61                last_len,
62                num_to_reserve,
63                self.capacity()
64            );
65        }
66
67        last_len
68    }
69
70    pub fn set(
71        &self,
72        index: usize,
73        val: T,
74    ) {
75        if index < self.len() {
76            // SAFETY: `data` will catch multiple attempted writes.
77            self.data.set(index, val);
78        } else {
79            // SAFETY:
80            panic!(
81                "index {} must be < len {} (did you forget to `reserve_uninit` first?)",
82                index,
83                self.capacity()
84            );
85        }
86    }
87
88    pub fn get(
89        &self,
90        index: usize,
91    ) -> &T {
92        // SAFETY: `data` will catch index out of bounds or attempts to read uninitialized memory.
93        self.data.get(index)
94    }
95
96    pub unsafe fn get_range_unchecked(
97        &self,
98        range: Range<usize>,
99    ) -> &[T] {
100        let len = self.len();
101        if range.end > len {
102            panic!("end of range {} must be < len {}", range.end, len);
103        }
104
105        &self.data.get_all_unchecked()[range]
106    }
107
108    pub unsafe fn get_all_unchecked(&self) -> &[T] {
109        let len = self.len();
110        let capacity = self.capacity();
111        if len < capacity {
112            panic!("length {} must be == capacity {}", len, capacity);
113        }
114
115        self.data.get_all_unchecked()
116    }
117
118    pub fn capacity(&self) -> usize {
119        self.data.capacity()
120    }
121
122    pub fn len(&self) -> usize {
123        self.last_index.load(Ordering::Acquire)
124    }
125
126    pub fn iter(&self) -> Iter<T> {
127        self.into_iter()
128    }
129}
130
131impl<'a, T> IntoIterator for &'a AtomicOnceCellStack<T> {
132    type Item = &'a T;
133    type IntoIter = Iter<'a, T>;
134
135    fn into_iter(self) -> Self::IntoIter {
136        Iter::new(self)
137    }
138}
139
140pub struct Iter<'a, T> {
141    source: &'a AtomicOnceCellStack<T>,
142    next_index: usize,
143}
144
145impl<'a, T> Iter<'a, T> {
146    #[inline]
147    pub fn new(source: &'a AtomicOnceCellStack<T>) -> Self {
148        Self {
149            source,
150            next_index: 0,
151        }
152    }
153}
154
155impl<'a, T> Iterator for Iter<'a, T> {
156    type Item = &'a T;
157
158    fn next(&mut self) -> Option<Self::Item> {
159        if self.next_index < self.source.len() {
160            let index = self.next_index;
161            self.next_index += 1;
162            Some(self.source.get(index))
163        } else {
164            None
165        }
166    }
167}