async_ecs/entity/
entities.rs1use std::iter::Iterator;
2use std::sync::atomic::{AtomicU32, Ordering};
3
4use hibitset::{AtomicBitSet, BitSet, BitSetLike};
5use thiserror::Error;
6
7use crate::{
8 access::WriteStorage,
9 component::Component,
10 join::{Join, ParJoin},
11};
12
13use super::{Entity, Index};
14
15#[derive(Default)]
26pub struct Entities {
27 alive: BitSet,
28 raised: AtomicBitSet,
29 killed: AtomicBitSet,
30
31 cache: IndexCache,
32 generations: Vec<u32>,
33 max_index: AtomicU32,
34}
35
36impl Entities {
37 pub fn allocate(&mut self) -> Entity {
39 let index = self.cache.pop().unwrap_or_else(|| {
40 let index = self.max_index.get_mut();
41 *index = index.checked_add(1).expect("No entity left to allocate");
42
43 *index
44 });
45
46 self.update_generations(index as usize);
47
48 self.alive.add(index);
49
50 let generation = &mut self.generations[index as usize];
51 *generation = generation.wrapping_add(1);
52
53 Entity::from_parts(index, *generation)
54 }
55
56 pub fn create(&self) -> Entity {
64 let index = self.cache.pop_atomic().unwrap_or_else(|| {
65 atomic_increment(&self.max_index).expect("No entity left to allocate") as Index
66 });
67
68 self.raised.add_atomic(index);
69
70 let generation = self
71 .generations
72 .get(index as usize)
73 .map(|g| g.wrapping_add(1))
74 .unwrap_or_default();
75
76 Entity::from_parts(index, generation)
77 }
78
79 pub fn create_iter(&self) -> CreateIterAtomic {
82 CreateIterAtomic(&self)
83 }
84
85 pub fn build_entity(&self) -> AtomicBuilder {
89 let entity = self.create();
90
91 AtomicBuilder {
92 entities: self,
93 entity,
94 built: false,
95 }
96 }
97
98 pub fn kill(&mut self, delete: &[Entity]) -> Result<(), Error> {
100 for &entity in delete {
101 if !self.is_alive(entity) {
102 return Err(Error::EntityIsDead {
103 id: entity.id(),
104 op: "kill",
105 });
106 }
107
108 let index = entity.index();
109
110 self.alive.remove(index);
111 self.killed.remove(index);
112
113 self.update_generations(index as usize);
114
115 if self.raised.remove(index) {
116 let gen = &mut self.generations[index as usize];
117 *gen = gen.wrapping_add(1);
118 }
119 }
120
121 self.cache.extend(delete.iter().map(Entity::index));
122
123 Ok(())
124 }
125
126 pub fn delete(&self, entity: Entity) -> Result<(), Error> {
129 if !self.is_alive(entity) {
130 return Err(Error::EntityIsDead {
131 id: entity.id(),
132 op: "delete",
133 });
134 }
135
136 let index = entity.index();
137
138 self.killed.add_atomic(index);
139
140 Ok(())
141 }
142
143 #[inline]
145 pub fn is_alive(&self, entity: Entity) -> bool {
146 let idx = entity.index();
147 let gen = entity.generation();
148
149 match self.generations.get(idx as usize) {
150 Some(g) if self.raised.contains(idx) => gen == g.wrapping_add(1),
151 Some(g) => self.alive.contains(idx) && gen == *g,
152 None if self.raised.contains(idx) => gen == 0,
153 None => false,
154 }
155 }
156
157 pub fn maintain(&mut self) -> Vec<Entity> {
160 let mut deleted = vec![];
161
162 let max_index = *self.max_index.get_mut();
163 self.update_generations(max_index as usize + 1);
164
165 for index in (&self.raised).iter() {
166 let generation = &mut self.generations[index as usize];
167 *generation = generation.wrapping_add(1);
168
169 self.alive.add(index);
170 }
171 self.raised.clear();
172
173 for index in (&self.killed).iter() {
174 self.alive.remove(index);
175 deleted.push(Entity::from_parts(index, self.generations[index as usize]));
176 }
177
178 self.cache.extend(deleted.iter().map(Entity::index));
179
180 deleted
181 }
182
183 fn update_generations(&mut self, index: usize) {
184 if self.generations.len() <= index {
185 self.generations.resize(index + 1, 0);
186 }
187 }
188}
189
190impl<'a> Join for &'a Entities {
191 type Mask = &'a BitSet;
192 type Type = Entity;
193 type Value = Self;
194
195 unsafe fn open(self) -> (Self::Mask, Self) {
196 (&self.alive, self)
197 }
198
199 unsafe fn get(v: &mut &'a Entities, index: Index) -> Entity {
200 let generation = v
201 .generations
202 .get(index as usize)
203 .copied()
204 .unwrap_or_default();
205
206 Entity::from_parts(index, generation)
207 }
208}
209
210impl<'a> ParJoin for &'a Entities {}
211
212#[derive(Debug, Error)]
215pub enum Error {
216 #[error("Entity is not alive (id = {id}, operation = {op})!")]
217 EntityIsDead { id: u64, op: &'static str },
218}
219
220pub struct CreateIterAtomic<'a>(&'a Entities);
223
224impl<'a> Iterator for CreateIterAtomic<'a> {
225 type Item = Entity;
226
227 fn next(&mut self) -> Option<Entity> {
228 Some(self.0.create())
229 }
230}
231
232pub struct AtomicBuilder<'a> {
237 entities: &'a Entities,
238 entity: Entity,
239 built: bool,
240}
241
242impl<'a> AtomicBuilder<'a> {
243 pub fn with<T: Component>(self, storage: &mut WriteStorage<T>, component: T) -> Self {
245 storage.insert(self.entity, component).unwrap();
246
247 self
248 }
249
250 pub fn build(mut self) -> Entity {
252 self.built = true;
253
254 self.entity
255 }
256}
257
258impl<'a> Drop for AtomicBuilder<'a> {
259 fn drop(&mut self) {
260 if !self.built {
261 self.entities.delete(self.entity).unwrap();
262 }
263 }
264}
265
266#[derive(Default, Debug)]
269struct IndexCache {
270 cache: Vec<Index>,
271 len: AtomicU32,
272}
273
274impl IndexCache {
275 fn push(&mut self, index: Index) {
276 self.maintain();
277
278 self.cache.push(index);
279
280 *self.len.get_mut() = self.cache.len() as u32;
281 }
282
283 fn pop_atomic(&self) -> Option<Index> {
284 atomic_decrement(&self.len).map(|x| self.cache[x as usize - 1])
285 }
286
287 fn pop(&mut self) -> Option<Index> {
288 self.maintain();
289
290 let x = self.cache.pop();
291
292 *self.len.get_mut() = self.cache.len() as u32;
293
294 x
295 }
296
297 fn maintain(&mut self) {
298 self.cache.truncate(*self.len.get_mut() as usize);
299 }
300}
301
302impl Extend<Index> for IndexCache {
303 fn extend<T: IntoIterator<Item = Index>>(&mut self, iter: T) {
304 self.maintain();
305
306 self.cache.extend(iter);
307
308 *self.len.get_mut() = self.cache.len() as u32;
309 }
310}
311
312fn atomic_increment(i: &AtomicU32) -> Option<u32> {
316 let mut prev = i.load(Ordering::Relaxed);
317
318 while prev != u32::MAX {
319 match i.compare_exchange_weak(prev, prev + 1, Ordering::Relaxed, Ordering::Relaxed) {
320 Ok(x) => return Some(x),
321 Err(next_prev) => prev = next_prev,
322 }
323 }
324
325 None
326}
327
328fn atomic_decrement(i: &AtomicU32) -> Option<u32> {
332 let mut prev = i.load(Ordering::Relaxed);
333
334 while prev != 0 {
335 match i.compare_exchange_weak(prev, prev - 1, Ordering::Relaxed, Ordering::Relaxed) {
336 Ok(x) => return Some(x),
337 Err(next_prev) => prev = next_prev,
338 }
339 }
340
341 None
342}