Skip to main content

goggles/
entity.rs

1use std::{
2    iter,
3    num::NonZeroI32,
4    sync::atomic::{AtomicU32, Ordering},
5    u32,
6};
7
8use hibitset::{AtomicBitSet, BitSet, BitSetLike, BitSetOr};
9use thiserror::Error;
10
11use crate::join::{Index, Join};
12
13#[derive(Debug, Error)]
14#[error("Entity is no longer alive or has a mismatched generation")]
15pub struct WrongGeneration;
16
17/// Entities are unqiue "generational indexes" with low-valued `index` values that are appropriate
18/// as indexes into contiguous arrays.
19///
20/// In order to make sure every `Entity` is unique, allocating an `Entity` with the same index will
21/// result in an incremented `generation` field.
22///
23/// No two entities will share the same `index` and `generation`, so every created `Entity` is unique.
24#[derive(Clone, Copy, Debug, Hash, Eq, Ord, PartialEq, PartialOrd)]
25pub struct Entity {
26    index: Index,
27    generation: AliveGeneration,
28}
29
30impl Entity {
31    /// The low-valued `index` of the Entity.
32    #[inline]
33    pub fn index(self) -> Index {
34        self.index
35    }
36
37    /// The entity's generation.
38    ///
39    /// This will never be zero.
40    #[inline]
41    pub fn generation(self) -> u32 {
42        self.generation.id() as u32
43    }
44
45    fn new(index: Index, generation: AliveGeneration) -> Entity {
46        Entity { index, generation }
47    }
48}
49
50pub type LiveBitSet<'a> = BitSetOr<&'a BitSet, &'a AtomicBitSet>;
51
52#[derive(Debug, Default)]
53pub struct Allocator {
54    generations: Vec<Generation>,
55    alive: BitSet,
56    raised_atomic: AtomicBitSet,
57    killed_atomic: AtomicBitSet,
58    cache: EntityCache,
59    // The maximum ever allocated index + 1.  If there are no outstanding atomic operations, the
60    // `generations` vector should be equal to this length.
61    index_len: AtomicIndex,
62}
63
64impl Allocator {
65    pub fn new() -> Allocator {
66        Allocator::default()
67    }
68
69    /// Kill the given entity.
70    ///
71    /// Will return `Err(WrongGeneration)` if the given entity is not the current generation in this
72    /// allcoator.
73    #[inline]
74    pub fn kill(&mut self, entity: Entity) -> Result<(), WrongGeneration> {
75        if !self.is_alive(entity) {
76            return Err(WrongGeneration);
77        }
78
79        self.alive.remove(entity.index);
80        self.killed_atomic.remove(entity.index);
81
82        if self.raised_atomic.remove(entity.index) {
83            // If this entity is alive atomically and we're killing it non-atomically, we must commit
84            // the entity as having been added then killed so it can properly go into the cache.
85            self.update_generation_length();
86            let generation = &mut self.generations[entity.index as usize];
87            debug_assert!(!generation.is_alive());
88            *generation = generation.raised().generation().killed();
89        } else {
90            let generation = &mut self.generations[entity.index as usize];
91            debug_assert!(generation.is_alive());
92            *generation = generation.killed();
93        }
94
95        self.cache.push(entity.index);
96
97        Ok(())
98    }
99
100    /// Mark an entity to be killed on the next call to `Allocator::merge_atomic`.
101    ///
102    /// The entity's state is not changed at all until the next call to `Allocator::merge_atomic`,
103    /// it is still considered live and may even have `Allocator::kill_atomic` called on it multiple
104    /// times.
105    ///
106    /// If the entity is not current at the time of this call, however, then this will return
107    /// `Err(WrongGeneration)`.
108    #[inline]
109    pub fn kill_atomic(&self, e: Entity) -> Result<(), WrongGeneration> {
110        if !self.is_alive(e) {
111            return Err(WrongGeneration);
112        }
113
114        self.killed_atomic.add_atomic(e.index());
115        Ok(())
116    }
117
118    /// Returns whether the given entity has not been killed, and is thus the current generation for
119    /// this allocator.
120    ///
121    /// More specifically, it checks whether the generation for the given entity is the current
122    /// alive generation for that index.  This generally means that the generation is too old
123    /// because the `Allocator` allocated it and then later killed it, but it may also happen if
124    /// `Entity`s are improperly mixed between `Allocator` instances and this entity has a newer
125    /// generation than the current live one for that index.
126    #[inline]
127    pub fn is_alive(&self, e: Entity) -> bool {
128        self.entity(e.index()) == Some(e)
129    }
130
131    /// *If* the given index has a live entity associated with it, returns that live `Entity`.
132    #[inline]
133    pub fn entity(&self, index: Index) -> Option<Entity> {
134        let generation = self.generation(index);
135        if let Some(alive) = generation.to_alive() {
136            Some(Entity::new(index, alive))
137        } else if self.raised_atomic.contains(index) {
138            Some(Entity::new(index, generation.raised()))
139        } else {
140            None
141        }
142    }
143
144    /// Allocate a new unique Entity.
145    #[inline]
146    pub fn allocate(&mut self) -> Entity {
147        let index = self.cache.pop().unwrap_or_else(|| {
148            let index = *self.index_len.get_mut();
149            let index_len = index.checked_add(1).expect("no entity left to allocate");
150            *self.index_len.get_mut() = index_len;
151            self.update_generation_length();
152            index
153        });
154
155        self.alive.add(index);
156
157        let generation = &mut self.generations[index as usize];
158        let raised = generation.raised();
159        *generation = raised.generation();
160        Entity::new(index, raised)
161    }
162
163    /// Allocate an entity atomically.
164    ///
165    /// Atomically allocated entities are immediately valid, live entities indistinguishable from
166    /// non-atomically allocated entities.
167    ///
168    /// The only observable difference is that the query performance of atomically allocated
169    /// entities may be slightly worse until `merge_atomic` is called, at which point they will be
170    /// merged into the same data structure that keeps track of regular live entities.
171    #[inline]
172    pub fn allocate_atomic(&self) -> Entity {
173        let index = self.cache.pop_atomic().unwrap_or_else(|| {
174            atomic_increment(&self.index_len).expect("no entity left to allocate")
175        });
176
177        self.raised_atomic.add_atomic(index);
178        Entity::new(index, self.generation(index).raised())
179    }
180
181    /// Returns a `BitSetLike` for all live entities.
182    ///
183    /// This is a `BitSetOr` of the non-atomically live entities and the atomically live entities.
184    #[inline]
185    pub fn live_bitset(&self) -> LiveBitSet {
186        BitSetOr(&self.alive, &self.raised_atomic)
187    }
188
189    /// Merge all atomic operations done since the last call to `Allocator::merge_atomic`.
190    ///
191    /// Atomically allocated entities become merged into the faster non-atomic BitSet, and entities
192    /// marked for deletion with `Allocator::kill_atomic` actually become killed.
193    ///
194    /// Takes a `&mut Vec<Entity>` parameter which will be cleared and filled with newly killed
195    /// entities.
196    pub fn merge_atomic(&mut self, killed: &mut Vec<Entity>) {
197        killed.clear();
198
199        self.update_generation_length();
200
201        for index in (&self.raised_atomic).iter() {
202            let generation = &mut self.generations[index as usize];
203            *generation = generation.raised().generation();
204            self.alive.add(index);
205        }
206        self.raised_atomic.clear();
207
208        for index in (&self.killed_atomic).iter() {
209            self.alive.remove(index);
210            let generation = &mut self.generations[index as usize];
211            killed.push(Entity::new(index, generation.to_alive().unwrap()));
212            *generation = generation.killed();
213        }
214        self.killed_atomic.clear();
215
216        self.cache.extend(killed.iter().map(|e| e.index));
217    }
218
219    fn generation(&self, index: Index) -> Generation {
220        self.generations
221            .get(index as usize)
222            .copied()
223            .unwrap_or(Generation::zero())
224    }
225
226    // Commit the changes to the length of the generation vector from the atomically adjusted
227    // index length.
228    fn update_generation_length(&mut self) {
229        let index_len = *self.index_len.get_mut() as usize;
230        if self.generations.len() < index_len {
231            self.generations.resize_with(index_len, Default::default);
232        }
233    }
234}
235
236impl<'a> Join for &'a Allocator {
237    type Item = Entity;
238    type Access = &'a Allocator;
239    type Mask = LiveBitSet<'a>;
240
241    fn open(self) -> (Self::Mask, Self::Access) {
242        (self.live_bitset(), self)
243    }
244
245    unsafe fn get(access: &Self::Access, index: Index) -> Self::Item {
246        Entity::new(index, access.generation(index).raised())
247    }
248}
249
250#[derive(Default, Debug)]
251struct EntityCache {
252    cache: Vec<Index>,
253    len: AtomicIndex,
254}
255
256impl EntityCache {
257    fn push(&mut self, index: Index) {
258        self.extend(iter::once(index));
259    }
260
261    fn pop(&mut self) -> Option<Index> {
262        self.maintain();
263        let x = self.cache.pop();
264        *self.len.get_mut() = self.cache.len() as Index;
265        x
266    }
267
268    fn pop_atomic(&self) -> Option<Index> {
269        atomic_decrement(&self.len).map(|x| self.cache[(x - 1) as usize])
270    }
271
272    fn maintain(&mut self) {
273        self.cache.truncate(*self.len.get_mut() as usize);
274    }
275}
276
277impl Extend<Index> for EntityCache {
278    fn extend<T: IntoIterator<Item = Index>>(&mut self, iter: T) {
279        self.maintain();
280        self.cache.extend(iter);
281        *self.len.get_mut() = self.cache.len() as Index;
282    }
283}
284
285const MAX_INDEX: Index = u32::MAX;
286type AtomicIndex = AtomicU32;
287
288type GenId = i32;
289type NZGenId = NonZeroI32;
290
291#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
292struct Generation(GenId);
293
294impl Generation {
295    // Generations start at the dead generation of zero.
296    fn zero() -> Generation {
297        Generation(0)
298    }
299
300    fn id(self) -> GenId {
301        self.0
302    }
303
304    // A generation is alive if its ID is > 0
305    fn is_alive(self) -> bool {
306        self.0 > 0
307    }
308
309    fn to_alive(self) -> Option<AliveGeneration> {
310        if self.0 > 0 {
311            Some(AliveGeneration(unsafe { NZGenId::new_unchecked(self.0) }))
312        } else {
313            None
314        }
315    }
316
317    // If this generation is alive, returns the 'killed' version of this generation, otherwise just
318    // returns the current dead generation.
319    //
320    // The 'killed' version of a generation has an ID which is the negation of its current live ID.
321    fn killed(self) -> Generation {
322        if self.is_alive() {
323            Generation(-self.id())
324        } else {
325            self
326        }
327    }
328
329    // If this generation is dead, returns the 'raised' version of this generation, otherwise just
330    // returns the current live generation.
331    //
332    // The 'raised' version of a generation has an ID which is the negation of its current dead ID
333    // (so the positive verison of its dead ID) + 1.
334    fn raised(self) -> AliveGeneration {
335        if self.0 > 0 {
336            AliveGeneration(unsafe { NZGenId::new_unchecked(self.0) })
337        } else {
338            let id = (1 as GenId)
339                .checked_sub(self.id())
340                .expect("generation overflow");
341            AliveGeneration(unsafe { NZGenId::new_unchecked(id) })
342        }
343    }
344}
345
346// A generation that is guaranteed to be alive.
347//
348// Since the generation id cannot be 0, this can use `NZGenId` and enable layout optimizations.
349#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
350struct AliveGeneration(NZGenId);
351
352impl AliveGeneration {
353    fn id(self) -> GenId {
354        self.0.get()
355    }
356
357    fn generation(self) -> Generation {
358        Generation(self.0.get())
359    }
360}
361
362// Increments `i` atomically without wrapping on overflow.  Resembles a `fetch_add(1,
363// Ordering::Relaxed)` with checked overflow, returning `None` instead.
364fn atomic_increment(i: &AtomicIndex) -> Option<Index> {
365    let mut prev = i.load(Ordering::Relaxed);
366    while prev != MAX_INDEX {
367        match i.compare_exchange_weak(prev, prev + 1, Ordering::Relaxed, Ordering::Relaxed) {
368            Ok(x) => return Some(x),
369            Err(next_prev) => prev = next_prev,
370        }
371    }
372    None
373}
374
375// Increments `i` atomically without wrapping on overflow.  Resembles a `fetch_sub(1,
376// Ordering::Relaxed)` with checked underflow, returning `None` instead.
377fn atomic_decrement(i: &AtomicIndex) -> Option<Index> {
378    let mut prev = i.load(Ordering::Relaxed);
379    while prev != 0 {
380        match i.compare_exchange_weak(prev, prev - 1, Ordering::Relaxed, Ordering::Relaxed) {
381            Ok(x) => return Some(x),
382            Err(next_prev) => prev = next_prev,
383        }
384    }
385    None
386}