1use std::{
2 iter,
3 num::NonZeroI32,
4 sync::atomic::{AtomicU32, Ordering},
5 u32,
6};
7
8use hibitset::{AtomicBitSet, BitSet, BitSetLike, BitSetOr};
9use thiserror::Error;
10
11use crate::join::{Index, Join};
12
13#[derive(Debug, Error)]
14#[error("Entity is no longer alive or has a mismatched generation")]
15pub struct WrongGeneration;
16
17#[derive(Clone, Copy, Debug, Hash, Eq, Ord, PartialEq, PartialOrd)]
25pub struct Entity {
26 index: Index,
27 generation: AliveGeneration,
28}
29
30impl Entity {
31 #[inline]
33 pub fn index(self) -> Index {
34 self.index
35 }
36
37 #[inline]
41 pub fn generation(self) -> u32 {
42 self.generation.id() as u32
43 }
44
45 fn new(index: Index, generation: AliveGeneration) -> Entity {
46 Entity { index, generation }
47 }
48}
49
50pub type LiveBitSet<'a> = BitSetOr<&'a BitSet, &'a AtomicBitSet>;
51
52#[derive(Debug, Default)]
53pub struct Allocator {
54 generations: Vec<Generation>,
55 alive: BitSet,
56 raised_atomic: AtomicBitSet,
57 killed_atomic: AtomicBitSet,
58 cache: EntityCache,
59 index_len: AtomicIndex,
62}
63
64impl Allocator {
65 pub fn new() -> Allocator {
66 Allocator::default()
67 }
68
69 #[inline]
74 pub fn kill(&mut self, entity: Entity) -> Result<(), WrongGeneration> {
75 if !self.is_alive(entity) {
76 return Err(WrongGeneration);
77 }
78
79 self.alive.remove(entity.index);
80 self.killed_atomic.remove(entity.index);
81
82 if self.raised_atomic.remove(entity.index) {
83 self.update_generation_length();
86 let generation = &mut self.generations[entity.index as usize];
87 debug_assert!(!generation.is_alive());
88 *generation = generation.raised().generation().killed();
89 } else {
90 let generation = &mut self.generations[entity.index as usize];
91 debug_assert!(generation.is_alive());
92 *generation = generation.killed();
93 }
94
95 self.cache.push(entity.index);
96
97 Ok(())
98 }
99
100 #[inline]
109 pub fn kill_atomic(&self, e: Entity) -> Result<(), WrongGeneration> {
110 if !self.is_alive(e) {
111 return Err(WrongGeneration);
112 }
113
114 self.killed_atomic.add_atomic(e.index());
115 Ok(())
116 }
117
118 #[inline]
127 pub fn is_alive(&self, e: Entity) -> bool {
128 self.entity(e.index()) == Some(e)
129 }
130
131 #[inline]
133 pub fn entity(&self, index: Index) -> Option<Entity> {
134 let generation = self.generation(index);
135 if let Some(alive) = generation.to_alive() {
136 Some(Entity::new(index, alive))
137 } else if self.raised_atomic.contains(index) {
138 Some(Entity::new(index, generation.raised()))
139 } else {
140 None
141 }
142 }
143
144 #[inline]
146 pub fn allocate(&mut self) -> Entity {
147 let index = self.cache.pop().unwrap_or_else(|| {
148 let index = *self.index_len.get_mut();
149 let index_len = index.checked_add(1).expect("no entity left to allocate");
150 *self.index_len.get_mut() = index_len;
151 self.update_generation_length();
152 index
153 });
154
155 self.alive.add(index);
156
157 let generation = &mut self.generations[index as usize];
158 let raised = generation.raised();
159 *generation = raised.generation();
160 Entity::new(index, raised)
161 }
162
163 #[inline]
172 pub fn allocate_atomic(&self) -> Entity {
173 let index = self.cache.pop_atomic().unwrap_or_else(|| {
174 atomic_increment(&self.index_len).expect("no entity left to allocate")
175 });
176
177 self.raised_atomic.add_atomic(index);
178 Entity::new(index, self.generation(index).raised())
179 }
180
181 #[inline]
185 pub fn live_bitset(&self) -> LiveBitSet {
186 BitSetOr(&self.alive, &self.raised_atomic)
187 }
188
189 pub fn merge_atomic(&mut self, killed: &mut Vec<Entity>) {
197 killed.clear();
198
199 self.update_generation_length();
200
201 for index in (&self.raised_atomic).iter() {
202 let generation = &mut self.generations[index as usize];
203 *generation = generation.raised().generation();
204 self.alive.add(index);
205 }
206 self.raised_atomic.clear();
207
208 for index in (&self.killed_atomic).iter() {
209 self.alive.remove(index);
210 let generation = &mut self.generations[index as usize];
211 killed.push(Entity::new(index, generation.to_alive().unwrap()));
212 *generation = generation.killed();
213 }
214 self.killed_atomic.clear();
215
216 self.cache.extend(killed.iter().map(|e| e.index));
217 }
218
219 fn generation(&self, index: Index) -> Generation {
220 self.generations
221 .get(index as usize)
222 .copied()
223 .unwrap_or(Generation::zero())
224 }
225
226 fn update_generation_length(&mut self) {
229 let index_len = *self.index_len.get_mut() as usize;
230 if self.generations.len() < index_len {
231 self.generations.resize_with(index_len, Default::default);
232 }
233 }
234}
235
236impl<'a> Join for &'a Allocator {
237 type Item = Entity;
238 type Access = &'a Allocator;
239 type Mask = LiveBitSet<'a>;
240
241 fn open(self) -> (Self::Mask, Self::Access) {
242 (self.live_bitset(), self)
243 }
244
245 unsafe fn get(access: &Self::Access, index: Index) -> Self::Item {
246 Entity::new(index, access.generation(index).raised())
247 }
248}
249
250#[derive(Default, Debug)]
251struct EntityCache {
252 cache: Vec<Index>,
253 len: AtomicIndex,
254}
255
256impl EntityCache {
257 fn push(&mut self, index: Index) {
258 self.extend(iter::once(index));
259 }
260
261 fn pop(&mut self) -> Option<Index> {
262 self.maintain();
263 let x = self.cache.pop();
264 *self.len.get_mut() = self.cache.len() as Index;
265 x
266 }
267
268 fn pop_atomic(&self) -> Option<Index> {
269 atomic_decrement(&self.len).map(|x| self.cache[(x - 1) as usize])
270 }
271
272 fn maintain(&mut self) {
273 self.cache.truncate(*self.len.get_mut() as usize);
274 }
275}
276
277impl Extend<Index> for EntityCache {
278 fn extend<T: IntoIterator<Item = Index>>(&mut self, iter: T) {
279 self.maintain();
280 self.cache.extend(iter);
281 *self.len.get_mut() = self.cache.len() as Index;
282 }
283}
284
285const MAX_INDEX: Index = u32::MAX;
286type AtomicIndex = AtomicU32;
287
288type GenId = i32;
289type NZGenId = NonZeroI32;
290
291#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
292struct Generation(GenId);
293
294impl Generation {
295 fn zero() -> Generation {
297 Generation(0)
298 }
299
300 fn id(self) -> GenId {
301 self.0
302 }
303
304 fn is_alive(self) -> bool {
306 self.0 > 0
307 }
308
309 fn to_alive(self) -> Option<AliveGeneration> {
310 if self.0 > 0 {
311 Some(AliveGeneration(unsafe { NZGenId::new_unchecked(self.0) }))
312 } else {
313 None
314 }
315 }
316
317 fn killed(self) -> Generation {
322 if self.is_alive() {
323 Generation(-self.id())
324 } else {
325 self
326 }
327 }
328
329 fn raised(self) -> AliveGeneration {
335 if self.0 > 0 {
336 AliveGeneration(unsafe { NZGenId::new_unchecked(self.0) })
337 } else {
338 let id = (1 as GenId)
339 .checked_sub(self.id())
340 .expect("generation overflow");
341 AliveGeneration(unsafe { NZGenId::new_unchecked(id) })
342 }
343 }
344}
345
346#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
350struct AliveGeneration(NZGenId);
351
352impl AliveGeneration {
353 fn id(self) -> GenId {
354 self.0.get()
355 }
356
357 fn generation(self) -> Generation {
358 Generation(self.0.get())
359 }
360}
361
362fn atomic_increment(i: &AtomicIndex) -> Option<Index> {
365 let mut prev = i.load(Ordering::Relaxed);
366 while prev != MAX_INDEX {
367 match i.compare_exchange_weak(prev, prev + 1, Ordering::Relaxed, Ordering::Relaxed) {
368 Ok(x) => return Some(x),
369 Err(next_prev) => prev = next_prev,
370 }
371 }
372 None
373}
374
375fn atomic_decrement(i: &AtomicIndex) -> Option<Index> {
378 let mut prev = i.load(Ordering::Relaxed);
379 while prev != 0 {
380 match i.compare_exchange_weak(prev, prev - 1, Ordering::Relaxed, Ordering::Relaxed) {
381 Ok(x) => return Some(x),
382 Err(next_prev) => prev = next_prev,
383 }
384 }
385 None
386}