edict/entity/
allocator.rs

1use alloc::boxed::Box;
2use core::num::NonZeroU64;
3
4/// Range of raw entity IDs.
5/// `start` is inclusive, `end` is exclusive.
6///
7/// `IdRangeAllocator` provides ranges of IDs.
8#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
9pub struct IdRange {
10    /// Start of the range. Inclusive.
11    pub start: NonZeroU64,
12
13    /// End of the range. Exclusive.
14    pub end: NonZeroU64,
15}
16
17/// Start of the valid ID range.
18pub const START: NonZeroU64 = unsafe { NonZeroU64::new_unchecked(1) };
19
20/// End of the valid ID range.
21/// This value is never allocated as valid ID.
22pub const END: NonZeroU64 = unsafe { NonZeroU64::new_unchecked(u64::MAX) };
23
24impl IdRange {
25    /// Returns proper range with `start` less than or equal to `end`.
26    pub fn proper(&self) -> Self {
27        IdRange {
28            start: self.start,
29            end: self.end.max(self.start),
30        }
31    }
32
33    /// Returns number of IDs in the range.
34    pub fn count(&self) -> u64 {
35        debug_assert!(self.start <= self.end);
36        self.end.get() - self.start.get()
37    }
38
39    /// Returns true if the range is empty.
40    pub fn is_empty(&self) -> bool {
41        debug_assert!(self.start <= self.end);
42        self.start == self.end
43    }
44
45    /// Returns ID at the given index.
46    pub fn get(&self, idx: u64) -> Option<NonZeroU64> {
47        if idx >= self.count() {
48            return None;
49        }
50
51        // Safety: `self.start + idx` can't overflow
52        // since `idx` is less than `self.count`.
53        Some(unsafe { NonZeroU64::new_unchecked(self.start.get() + idx) })
54    }
55
56    /// Advances range by at most `count` IDs.
57    /// Calls provided closure with each ID.
58    /// Returns number of IDs advanced.
59    pub fn advance(&mut self, count: u64, mut f: impl FnMut(NonZeroU64)) -> u64 {
60        let count = count.min(self.count());
61
62        let mut id = self.start;
63
64        // Safety: `self.start + count` never overflows.
65        self.start = unsafe { NonZeroU64::new_unchecked(self.start.get() + count) };
66
67        while id < self.start {
68            f(id);
69            // Safety: `id + 1` never overflows
70            // since it's less than another `NonZeroU64`.
71            unsafe { id = NonZeroU64::new_unchecked(id.get() + 1) };
72        }
73
74        count
75    }
76
77    /// Take first ID from the range.
78    pub fn take(&mut self) -> Option<NonZeroU64> {
79        if self.is_empty() {
80            return None;
81        }
82
83        let id = self.start;
84
85        // Safety: `id + 1` can't overflow
86        // since there's larger value.
87        self.start = unsafe { NonZeroU64::new_unchecked(id.get() + 1) };
88
89        Some(id)
90    }
91}
92
93pub(super) struct IdAllocator {
94    current: IdRange,
95    next: IdRange,
96    range_alloc: Box<dyn IdRangeAllocator>,
97}
98
99impl IdAllocator {
100    /// Id allocator that allocates IDs from [1..=u64::MAX].
101    /// without external ID ranges.
102    pub fn new() -> Self {
103        IdAllocator {
104            current: IdRange {
105                start: START,
106                end: END,
107            },
108            next: IdRange {
109                start: END,
110                end: END,
111            },
112            range_alloc: Box::new(DummyAllocator),
113        }
114    }
115
116    /// Id allocator that allocates IDs from ranges.
117    /// And allocate ranges from the given id range allocator.
118    pub fn with_range_allocator(mut range_alloc: Box<dyn IdRangeAllocator>) -> Self {
119        let current = range_alloc.allocate_range().proper();
120        let next = range_alloc.allocate_range().proper();
121
122        IdAllocator {
123            current,
124            next,
125            range_alloc,
126        }
127    }
128
129    /// Returns next ID from the range.
130    /// If the range is exhausted, allocates new range from the allocator.
131    /// If allocator is exhausted, returns `None`.
132    pub fn next(&mut self) -> Option<NonZeroU64> {
133        if self.current.is_empty() {
134            self.current = self.next;
135            self.next = self.range_alloc.allocate_range().proper();
136        }
137
138        self.current.take()
139    }
140
141    /// Reserves new ID.
142    /// Call should use unique `idx` for each call
143    /// between calls to `flush_reserved`.
144    ///
145    /// Caller SHOULD use `idx` values in order from 0 to not
146    /// waste IDs.
147    pub fn reserve(&self, idx: u64) -> Option<NonZeroU64> {
148        if let Some(id) = self.current.get(idx) {
149            return Some(id);
150        }
151
152        let idx2 = idx - self.current.count();
153        self.next.get(idx2)
154    }
155
156    /// Returns reserve index of the ID.
157    /// Returns `None` if ID is not reserved.
158    pub fn reserved(&self, id: NonZeroU64) -> Option<u64> {
159        let id = id.get();
160        if id >= self.current.start.get() && id < self.current.end.get() {
161            return Some(id - self.current.start.get());
162        }
163        if id >= self.next.start.get() && id < self.next.end.get() {
164            return Some(id - self.next.start.get() + self.current.count());
165        }
166        None
167    }
168
169    /// Calls provided closure with reserved IDs.
170    /// `count` must be larger than all `idx` values passed to `reserve` that
171    /// returned `Some`
172    #[inline(always)]
173    pub unsafe fn flush_reserved(&mut self, count: u64, mut f: impl FnMut(NonZeroU64)) {
174        let mut advanced = self.current.advance(count, &mut f);
175        if advanced < count {
176            advanced += self.next.advance(count - advanced, &mut f);
177            self.current = self.next;
178            self.next = self.range_alloc.allocate_range().proper();
179        }
180        debug_assert_eq!(advanced, count);
181    }
182}
183
184/// Allocator for entity IDs.
185///
186/// User may provide custom `IdRangeAllocator` implementation
187/// to allocate ID ranges that `World` will be using.
188///
189/// This allows user to control IDs and ensure uniqueness across multiple worlds
190/// when needed.
191///
192/// Allocator should return large range of IDs for two reasons.
193/// First, it's faster to allocate IDs from pre-allocated range.
194/// Second, entity reservation may not be able to allocate new range.
195/// If current and pre-allocated ranges are exhausted, entity reservation will panic.
196///
197/// The actual size of range required to reserve entities between two flushes
198/// is application specific, but `u32::MAX` is a safe upper bound
199/// because edict does not support more than `u32::MAX` entities alive in the world.
200pub unsafe trait IdRangeAllocator: Send + Sync + 'static {
201    /// Allocate range of unique entity IDs.
202    /// IDs generated must be unique for the given allocator.
203    /// Special allocator types may enforce uniqueness
204    /// multiple across allocator instances.\
205    ///
206    /// If allocator is exhausted, returns empty range.
207    fn allocate_range(&mut self) -> IdRange;
208}
209
210struct DummyAllocator;
211
212unsafe impl IdRangeAllocator for DummyAllocator {
213    fn allocate_range(&mut self) -> IdRange {
214        IdRange {
215            start: END,
216            end: END,
217        }
218    }
219}
220
221/// `IdRangeAllocator` implementation that allocates single ID range
222/// provided in constructor.
223pub struct OneRangeAllocator {
224    range: IdRange,
225}
226
227const fn client_range() -> IdRange {
228    IdRange {
229        start: unsafe { NonZeroU64::new_unchecked(1) },
230        end: unsafe { NonZeroU64::new_unchecked(1 << 48) },
231    }
232}
233
234const fn server_range() -> IdRange {
235    IdRange {
236        start: unsafe { NonZeroU64::new_unchecked(1 << 48) },
237        end: unsafe { NonZeroU64::new_unchecked(u64::MAX) },
238    }
239}
240
241impl OneRangeAllocator {
242    /// Creates new `OneRangeAllocator` that will allocate given range once.
243    /// And then return empty range on subsequent allocations.
244    pub const fn new(range: IdRange) -> Self {
245        OneRangeAllocator { range }
246    }
247
248    /// Creates new `OneRangeAllocator` that will allocate
249    /// client's entity ID range once.
250    /// The client's ID range is pre-defined range `1..2^48`.
251    ///
252    /// The range is chosen to be large enough to not cause
253    /// overflow in years of continuous client activity.
254    pub const fn client() -> Self {
255        OneRangeAllocator {
256            range: client_range(),
257        }
258    }
259
260    /// Creates new `OneRangeAllocator` that will allocate
261    /// server's entity ID range once.
262    /// The server's ID range is pre-defined range `2^48..2^64-1`.
263    /// The range is chosen to be large enough to not cause
264    /// overflow in thousands of years of continuous server activity.
265    ///
266    /// This allocator should only be used for isolated server setup.
267    /// If servers are interconnected and share entities,
268    /// construct custom allocator that will distribute ID ranges
269    /// from common pool.
270    pub const fn server() -> Self {
271        OneRangeAllocator {
272            range: server_range(),
273        }
274    }
275}
276
277unsafe impl IdRangeAllocator for OneRangeAllocator {
278    fn allocate_range(&mut self) -> IdRange {
279        let range = self.range;
280        self.range.start = END;
281        self.range.end = END;
282        range
283    }
284}