hecs/
entities.rs

1use alloc::vec::Vec;
2use core::cmp;
3use core::convert::TryFrom;
4use core::iter::ExactSizeIterator;
5use core::num::{NonZeroU32, NonZeroU64};
6use core::ops::Range;
7use core::sync::atomic::{AtomicIsize, Ordering};
8use core::{fmt, mem};
9#[cfg(feature = "std")]
10use std::error::Error;
11
12/// Lightweight unique ID, or handle, of an entity
13///
14/// Obtained from `World::spawn`. Can be stored to refer to an entity in the future.
15///
16/// Enable the `serde` feature on the crate to make this `Serialize`able. Some applications may be
17/// able to save space by only serializing the output of `Entity::id`.
18#[derive(Clone, Copy, Hash, Eq, Ord, PartialEq, PartialOrd)]
19pub struct Entity {
20    pub(crate) id: u32,
21    pub(crate) generation: NonZeroU32,
22}
23
24impl Entity {
25    /// An [`Entity`] that does not necessarily correspond to data in any `World`
26    ///
27    /// Useful as a dummy value. It is possible (albeit unlikely) for a `World` to contain this
28    /// entity.
29    pub const DANGLING: Entity = Entity {
30        generation: match NonZeroU32::new(u32::MAX) {
31            Some(x) => x,
32            None => unreachable!(),
33        },
34        id: u32::MAX,
35    };
36
37    /// Convert to a form convenient for passing outside of rust
38    ///
39    /// No particular structure is guaranteed for the returned bits.
40    ///
41    /// Useful for storing entity IDs externally, or in conjunction with `Entity::from_bits` and
42    /// `World::spawn_at` for easy serialization. Alternatively, consider `id` for more compact
43    /// representation.
44    pub const fn to_bits(self) -> NonZeroU64 {
45        unsafe {
46            NonZeroU64::new_unchecked(((self.generation.get() as u64) << 32) | (self.id as u64))
47        }
48    }
49
50    /// Reconstruct an `Entity` previously destructured with `to_bits` if the bitpattern is valid,
51    /// else `None`
52    ///
53    /// Useful for storing entity IDs externally, or in conjunction with `Entity::to_bits` and
54    /// `World::spawn_at` for easy serialization.
55    pub const fn from_bits(bits: u64) -> Option<Self> {
56        Some(Self {
57            // // `?` is not yet supported in const fns
58            generation: match NonZeroU32::new((bits >> 32) as u32) {
59                Some(g) => g,
60                None => return None,
61            },
62            id: bits as u32,
63        })
64    }
65
66    /// Extract a transiently unique identifier
67    ///
68    /// No two simultaneously-live entities share the same ID, but dead entities' IDs may collide
69    /// with both live and dead entities. Useful for compactly representing entities within a
70    /// specific snapshot of the world, such as when serializing.
71    ///
72    /// See also `World::find_entity_from_id`.
73    pub const fn id(self) -> u32 {
74        self.id
75    }
76}
77
78impl fmt::Debug for Entity {
79    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
80        write!(f, "{}v{}", self.id, self.generation)
81    }
82}
83
84#[cfg(feature = "serde")]
85impl serde::Serialize for Entity {
86    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
87    where
88        S: serde::Serializer,
89    {
90        self.to_bits().serialize(serializer)
91    }
92}
93
94#[cfg(feature = "serde")]
95impl<'de> serde::Deserialize<'de> for Entity {
96    fn deserialize<D>(deserializer: D) -> Result<Entity, D::Error>
97    where
98        D: serde::Deserializer<'de>,
99    {
100        let bits = u64::deserialize(deserializer)?;
101
102        match Entity::from_bits(bits) {
103            Some(ent) => Ok(ent),
104            None => Err(serde::de::Error::invalid_value(
105                serde::de::Unexpected::Unsigned(bits),
106                &"`a valid `Entity` bitpattern",
107            )),
108        }
109    }
110}
111
112/// An iterator returning a sequence of Entity values from `Entities::reserve_entities`.
113pub struct ReserveEntitiesIterator<'a> {
114    // Metas, so we can recover the current generation for anything in the freelist.
115    meta: &'a [EntityMeta],
116
117    // Reserved IDs formerly in the freelist to hand out.
118    id_iter: core::slice::Iter<'a, u32>,
119
120    // New Entity IDs to hand out, outside the range of meta.len().
121    id_range: core::ops::Range<u32>,
122}
123
124impl Iterator for ReserveEntitiesIterator<'_> {
125    type Item = Entity;
126
127    fn next(&mut self) -> Option<Self::Item> {
128        self.id_iter
129            .next()
130            .map(|&id| Entity {
131                generation: self.meta[id as usize].generation,
132                id,
133            })
134            .or_else(|| {
135                self.id_range.next().map(|id| Entity {
136                    generation: NonZeroU32::new(1).unwrap(),
137                    id,
138                })
139            })
140    }
141
142    fn size_hint(&self) -> (usize, Option<usize>) {
143        let len = self.id_iter.len() + self.id_range.len();
144        (len, Some(len))
145    }
146}
147
148impl ExactSizeIterator for ReserveEntitiesIterator<'_> {}
149
150#[derive(Default)]
151pub(crate) struct Entities {
152    pub meta: Vec<EntityMeta>,
153
154    // The `pending` and `free_cursor` fields describe three sets of Entity IDs
155    // that have been freed or are in the process of being allocated:
156    //
157    // - The `freelist` IDs, previously freed by `free()`. These IDs are available to any
158    //   of `alloc()`, `reserve_entity()` or `reserve_entities()`. Allocation will
159    //   always prefer these over brand new IDs.
160    //
161    // - The `reserved` list of IDs that were once in the freelist, but got
162    //   reserved by `reserve_entities` or `reserve_entity()`. They are now waiting
163    //   for `flush()` to make them fully allocated.
164    //
165    // - The count of new IDs that do not yet exist in `self.meta()`, but which
166    //   we have handed out and reserved. `flush()` will allocate room for them in `self.meta()`.
167    //
168    // The contents of `pending` look like this:
169    //
170    // ```
171    // ----------------------------
172    // |  freelist  |  reserved   |
173    // ----------------------------
174    //              ^             ^
175    //          free_cursor   pending.len()
176    // ```
177    //
178    // As IDs are allocated, `free_cursor` is atomically decremented, moving
179    // items from the freelist into the reserved list by sliding over the boundary.
180    //
181    // Once the freelist runs out, `free_cursor` starts going negative.
182    // The more negative it is, the more IDs have been reserved starting exactly at
183    // the end of `meta.len()`.
184    //
185    // This formulation allows us to reserve any number of IDs first from the freelist
186    // and then from the new IDs, using only a single atomic subtract.
187    //
188    // Once `flush()` is done, `free_cursor` will equal `pending.len()`.
189    pending: Vec<u32>,
190    free_cursor: AtomicIsize,
191    len: u32,
192}
193
194impl Entities {
195    /// Reserve entity IDs concurrently
196    ///
197    /// Storage for entity generation and location is lazily allocated by calling `flush`.
198    pub fn reserve_entities(&self, count: u32) -> ReserveEntitiesIterator<'_> {
199        // Use one atomic subtract to grab a range of new IDs. The range might be
200        // entirely nonnegative, meaning all IDs come from the freelist, or entirely
201        // negative, meaning they are all new IDs to allocate, or a mix of both.
202        let range_end = self
203            .free_cursor
204            .fetch_sub(count as isize, Ordering::Relaxed);
205        let range_start = range_end - count as isize;
206
207        let freelist_range = range_start.max(0) as usize..range_end.max(0) as usize;
208
209        let (new_id_start, new_id_end) = if range_start >= 0 {
210            // We satisfied all requests from the freelist.
211            (0, 0)
212        } else {
213            // We need to allocate some new Entity IDs outside of the range of self.meta.
214            //
215            // `range_start` covers some negative territory, e.g. `-3..6`.
216            // Since the nonnegative values `0..6` are handled by the freelist, that
217            // means we need to handle the negative range here.
218            //
219            // In this example, we truncate the end to 0, leaving us with `-3..0`.
220            // Then we negate these values to indicate how far beyond the end of `meta.end()`
221            // to go, yielding `meta.len()+0 .. meta.len()+3`.
222            let base = self.meta.len() as isize;
223
224            let new_id_end = u32::try_from(base - range_start).expect("too many entities");
225
226            // `new_id_end` is in range, so no need to check `start`.
227            let new_id_start = (base - range_end.min(0)) as u32;
228
229            (new_id_start, new_id_end)
230        };
231
232        ReserveEntitiesIterator {
233            meta: &self.meta[..],
234            id_iter: self.pending[freelist_range].iter(),
235            id_range: new_id_start..new_id_end,
236        }
237    }
238
239    /// Reserve one entity ID concurrently
240    ///
241    /// Equivalent to `self.reserve_entities(1).next().unwrap()`, but more efficient.
242    pub fn reserve_entity(&self) -> Entity {
243        let n = self.free_cursor.fetch_sub(1, Ordering::Relaxed);
244        if n > 0 {
245            // Allocate from the freelist.
246            let id = self.pending[(n - 1) as usize];
247            Entity {
248                generation: self.meta[id as usize].generation,
249                id,
250            }
251        } else {
252            // Grab a new ID, outside the range of `meta.len()`. `flush()` must
253            // eventually be called to make it valid.
254            //
255            // As `self.free_cursor` goes more and more negative, we return IDs farther
256            // and farther beyond `meta.len()`.
257            Entity {
258                generation: NonZeroU32::new(1).unwrap(),
259                id: u32::try_from(self.meta.len() as isize - n).expect("too many entities"),
260            }
261        }
262    }
263
264    /// Check that we do not have pending work requiring `flush()` to be called.
265    fn verify_flushed(&mut self) {
266        debug_assert!(
267            !self.needs_flush(),
268            "flush() needs to be called before this operation is legal"
269        );
270    }
271
272    /// Allocate an entity ID directly
273    ///
274    /// Location should be written immediately.
275    pub fn alloc(&mut self) -> Entity {
276        self.verify_flushed();
277
278        self.len += 1;
279        if let Some(id) = self.pending.pop() {
280            let new_free_cursor = self.pending.len() as isize;
281            *self.free_cursor.get_mut() = new_free_cursor;
282            Entity {
283                generation: self.meta[id as usize].generation,
284                id,
285            }
286        } else {
287            let id = u32::try_from(self.meta.len()).expect("too many entities");
288            self.meta.push(EntityMeta::EMPTY);
289            Entity {
290                generation: NonZeroU32::new(1).unwrap(),
291                id,
292            }
293        }
294    }
295
296    /// Allocate and set locations for many entity IDs laid out contiguously in an archetype
297    ///
298    /// `self.finish_alloc_many()` must be called after!
299    pub fn alloc_many(&mut self, n: u32, archetype: u32, mut first_index: u32) -> AllocManyState {
300        self.verify_flushed();
301
302        let fresh = (n as usize).saturating_sub(self.pending.len()) as u32;
303        assert!(
304            (self.meta.len() + fresh as usize) < u32::MAX as usize,
305            "too many entities"
306        );
307        let pending_end = self.pending.len().saturating_sub(n as usize);
308        for &id in &self.pending[pending_end..] {
309            self.meta[id as usize].location = Location {
310                archetype,
311                index: first_index,
312            };
313            first_index += 1;
314        }
315
316        let fresh_start = self.meta.len() as u32;
317        self.meta.extend(
318            (first_index..(first_index + fresh)).map(|index| EntityMeta {
319                generation: NonZeroU32::new(1).unwrap(),
320                location: Location { archetype, index },
321            }),
322        );
323
324        self.len += n;
325
326        AllocManyState {
327            fresh: fresh_start..(fresh_start + fresh),
328            pending_end,
329        }
330    }
331
332    /// Remove entities used by `alloc_many` from the freelist
333    ///
334    /// This is an awkward separate function to avoid borrowck issues in `SpawnColumnBatchIter`.
335    pub fn finish_alloc_many(&mut self, pending_end: usize) {
336        self.pending.truncate(pending_end);
337    }
338
339    /// Allocate a specific entity ID, overwriting its generation
340    ///
341    /// Returns the location of the entity currently using the given ID, if any. Location should be written immediately.
342    pub fn alloc_at(&mut self, entity: Entity) -> Option<Location> {
343        self.verify_flushed();
344
345        let loc = if entity.id as usize >= self.meta.len() {
346            // ID has never been used in this world before
347            self.pending.extend((self.meta.len() as u32)..entity.id);
348            let new_free_cursor = self.pending.len() as isize;
349            *self.free_cursor.get_mut() = new_free_cursor;
350            self.meta.resize(entity.id as usize + 1, EntityMeta::EMPTY);
351            self.len += 1;
352            None
353        } else if let Some(index) = self.pending.iter().position(|item| *item == entity.id) {
354            // ID was previously in use, but is now free
355            self.pending.swap_remove(index);
356            let new_free_cursor = self.pending.len() as isize;
357            *self.free_cursor.get_mut() = new_free_cursor;
358            self.len += 1;
359            None
360        } else {
361            // ID is currently in use by a live entity
362            Some(mem::replace(
363                &mut self.meta[entity.id as usize].location,
364                EntityMeta::EMPTY.location,
365            ))
366        };
367
368        self.meta[entity.id as usize].generation = entity.generation;
369
370        loc
371    }
372
373    /// Destroy an entity, allowing it to be reused
374    ///
375    /// Must not be called while reserved entities are awaiting `flush()`.
376    pub fn free(&mut self, entity: Entity) -> Result<Location, NoSuchEntity> {
377        self.verify_flushed();
378
379        let meta = self.meta.get_mut(entity.id as usize).ok_or(NoSuchEntity)?;
380        if meta.generation != entity.generation || meta.location.index == u32::MAX {
381            return Err(NoSuchEntity);
382        }
383
384        meta.generation = NonZeroU32::new(u32::from(meta.generation).wrapping_add(1))
385            .unwrap_or_else(|| NonZeroU32::new(1).unwrap());
386
387        let loc = mem::replace(&mut meta.location, EntityMeta::EMPTY.location);
388
389        self.pending.push(entity.id);
390
391        let new_free_cursor = self.pending.len() as isize;
392        *self.free_cursor.get_mut() = new_free_cursor;
393        self.len -= 1;
394
395        Ok(loc)
396    }
397
398    /// Ensure at least `n` allocations can succeed without reallocating
399    pub fn reserve(&mut self, additional: u32) {
400        self.verify_flushed();
401
402        let freelist_size = *self.free_cursor.get_mut();
403        let shortfall = additional as isize - freelist_size;
404        if shortfall > 0 {
405            self.meta.reserve(shortfall as usize);
406        }
407    }
408
409    pub fn contains(&self, entity: Entity) -> bool {
410        match self.meta.get(entity.id as usize) {
411            Some(meta) => {
412                meta.generation == entity.generation
413                    && (meta.location.index != u32::MAX
414                        || self.pending[self.free_cursor.load(Ordering::Relaxed).max(0) as usize..]
415                            .contains(&entity.id))
416            }
417            None => {
418                // Check if this could have been obtained from `reserve_entity`
419                let free = self.free_cursor.load(Ordering::Relaxed);
420                entity.generation.get() == 1
421                    && free < 0
422                    && (entity.id as isize) < (free.abs() + self.meta.len() as isize)
423            }
424        }
425    }
426
427    pub fn clear(&mut self) {
428        self.meta.clear();
429        self.pending.clear();
430        *self.free_cursor.get_mut() = 0;
431        self.len = 0;
432    }
433
434    /// Access the location storage of an entity
435    ///
436    /// Must not be called on pending entities.
437    pub fn get_mut(&mut self, entity: Entity) -> Result<&mut Location, NoSuchEntity> {
438        let meta = self.meta.get_mut(entity.id as usize).ok_or(NoSuchEntity)?;
439        if meta.generation == entity.generation && meta.location.index != u32::MAX {
440            Ok(&mut meta.location)
441        } else {
442            Err(NoSuchEntity)
443        }
444    }
445
446    /// Returns `Ok(Location { archetype: 0, index: undefined })` for pending entities
447    pub fn get(&self, entity: Entity) -> Result<Location, NoSuchEntity> {
448        if self.meta.len() <= entity.id as usize {
449            // Check if this could have been obtained from `reserve_entity`
450            let free = self.free_cursor.load(Ordering::Relaxed);
451            if entity.generation.get() == 1
452                && free < 0
453                && (entity.id as isize) < (free.abs() + self.meta.len() as isize)
454            {
455                return Ok(Location {
456                    archetype: 0,
457                    index: u32::MAX,
458                });
459            } else {
460                return Err(NoSuchEntity);
461            }
462        }
463        let meta = &self.meta[entity.id as usize];
464        if meta.generation != entity.generation || meta.location.index == u32::MAX {
465            return Err(NoSuchEntity);
466        }
467        Ok(meta.location)
468    }
469
470    /// Panics if the given id would represent an index outside of `meta`.
471    ///
472    /// # Safety
473    /// Must only be called for currently allocated `id`s.
474    pub unsafe fn resolve_unknown_gen(&self, id: u32) -> Entity {
475        let meta_len = self.meta.len();
476
477        if meta_len > id as usize {
478            let meta = &self.meta[id as usize];
479            Entity {
480                generation: meta.generation,
481                id,
482            }
483        } else {
484            // See if it's pending, but not yet flushed.
485            let free_cursor = self.free_cursor.load(Ordering::Relaxed);
486            let num_pending = cmp::max(-free_cursor, 0) as usize;
487
488            if meta_len + num_pending > id as usize {
489                // Pending entities will have the first generation.
490                Entity {
491                    generation: NonZeroU32::new(1).unwrap(),
492                    id,
493                }
494            } else {
495                panic!("entity id is out of range");
496            }
497        }
498    }
499
500    fn needs_flush(&mut self) -> bool {
501        *self.free_cursor.get_mut() != self.pending.len() as isize
502    }
503
504    /// Allocates space for entities previously reserved with `reserve_entity` or
505    /// `reserve_entities`, then initializes each one using the supplied function.
506    pub fn flush(&mut self, mut init: impl FnMut(u32, &mut Location)) {
507        let free_cursor = *self.free_cursor.get_mut();
508
509        let new_free_cursor = if free_cursor >= 0 {
510            free_cursor as usize
511        } else {
512            let old_meta_len = self.meta.len();
513            let new_meta_len = old_meta_len + -free_cursor as usize;
514            self.meta.resize(new_meta_len, EntityMeta::EMPTY);
515
516            self.len += -free_cursor as u32;
517            for (id, meta) in self.meta.iter_mut().enumerate().skip(old_meta_len) {
518                init(id as u32, &mut meta.location);
519            }
520
521            *self.free_cursor.get_mut() = 0;
522            0
523        };
524
525        self.len += (self.pending.len() - new_free_cursor) as u32;
526        for id in self.pending.drain(new_free_cursor..) {
527            init(id, &mut self.meta[id as usize].location);
528        }
529    }
530
531    #[inline]
532    pub fn len(&self) -> u32 {
533        self.len
534    }
535
536    pub fn freelist(&self) -> impl ExactSizeIterator<Item = Entity> + '_ {
537        let free = self.free_cursor.load(Ordering::Relaxed);
538        let ids = match usize::try_from(free) {
539            Err(_) => &[],
540            Ok(free) => &self.pending[0..free],
541        };
542        ids.iter().map(|&id| Entity {
543            id,
544            generation: self.meta[id as usize].generation,
545        })
546    }
547
548    pub fn set_freelist(&mut self, freelist: &[Entity]) {
549        #[cfg(debug_assertions)]
550        {
551            for entity in freelist {
552                let Some(meta) = self.meta.get(entity.id as usize) else {
553                    continue;
554                };
555                assert_eq!(
556                    meta.location.index,
557                    u32::MAX,
558                    "freelist addresses live entities"
559                );
560            }
561        }
562        if let Some(max) = freelist.iter().map(|e: &Entity| e.id()).max() {
563            if max as usize >= self.meta.len() {
564                self.meta.resize(max as usize + 1, EntityMeta::EMPTY);
565            }
566        }
567        self.pending.clear();
568        for entity in freelist {
569            self.pending.push(entity.id);
570            self.meta[entity.id as usize].generation = entity.generation;
571        }
572        self.free_cursor = AtomicIsize::new(freelist.len() as isize);
573    }
574}
575
576/// ID-indexed entity properties
577#[derive(Copy, Clone)]
578pub struct EntityMeta {
579    pub(crate) generation: NonZeroU32,
580    pub(crate) location: Location,
581}
582
583impl EntityMeta {
584    const EMPTY: EntityMeta = EntityMeta {
585        generation: match NonZeroU32::new(1) {
586            Some(x) => x,
587            None => unreachable!(),
588        },
589        location: Location {
590            archetype: 0,
591            index: u32::MAX, // dummy value, to be filled in
592        },
593    };
594}
595
596#[derive(Copy, Clone)]
597pub(crate) struct Location {
598    pub archetype: u32,
599    pub index: u32,
600}
601
602/// Error indicating that no entity with a particular ID exists
603#[derive(Debug, Clone, Eq, PartialEq)]
604pub struct NoSuchEntity;
605
606impl fmt::Display for NoSuchEntity {
607    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
608        f.pad("no such entity")
609    }
610}
611
612#[cfg(feature = "std")]
613impl Error for NoSuchEntity {}
614
615#[derive(Clone)]
616pub(crate) struct AllocManyState {
617    pub pending_end: usize,
618    fresh: Range<u32>,
619}
620
621impl AllocManyState {
622    pub fn next(&mut self, entities: &Entities) -> Option<u32> {
623        if self.pending_end < entities.pending.len() {
624            let id = entities.pending[self.pending_end];
625            self.pending_end += 1;
626            Some(id)
627        } else {
628            self.fresh.next()
629        }
630    }
631
632    pub fn len(&self, entities: &Entities) -> usize {
633        self.fresh.len() + (entities.pending.len() - self.pending_end)
634    }
635}
636
637#[cfg(test)]
638mod tests {
639    use super::*;
640    use hashbrown::{HashMap, HashSet};
641    use rand::{rngs::StdRng, Rng, SeedableRng};
642
643    #[test]
644    fn entity_bits_roundtrip() {
645        let e = Entity {
646            generation: NonZeroU32::new(0xDEADBEEF).unwrap(),
647            id: 0xBAADF00D,
648        };
649        assert_eq!(Entity::from_bits(e.to_bits().into()).unwrap(), e);
650    }
651
652    #[test]
653    fn alloc_and_free() {
654        let mut rng = StdRng::seed_from_u64(0xFEEDFACEDEADF00D);
655
656        let mut e = Entities::default();
657        let mut first_unused = 0u32;
658        let mut id_to_gen: HashMap<u32, u32> = Default::default();
659        let mut free_set: HashSet<u32> = Default::default();
660        let mut len = 0;
661
662        for _ in 0..100 {
663            let alloc = rng.random_bool(0.7);
664            if alloc || first_unused == 0 {
665                let entity = e.alloc();
666                e.meta[entity.id as usize].location.index = 0;
667                len += 1;
668
669                let id = entity.id;
670                if !free_set.is_empty() {
671                    // This should have come from the freelist.
672                    assert!(free_set.remove(&id));
673                } else if id >= first_unused {
674                    first_unused = id + 1;
675                }
676
677                e.get_mut(entity).unwrap().index = 37;
678
679                assert!(id_to_gen.insert(id, entity.generation.get()).is_none());
680            } else {
681                // Free a random ID, whether or not it's in use, and check for errors.
682                let id = rng.random_range(0..first_unused);
683
684                let generation = id_to_gen.remove(&id);
685                let entity = Entity {
686                    id,
687                    generation: NonZeroU32::new(
688                        generation.unwrap_or_else(|| NonZeroU32::new(1).unwrap().get()),
689                    )
690                    .unwrap(),
691                };
692
693                assert_eq!(e.free(entity).is_ok(), generation.is_some());
694                if generation.is_some() {
695                    len -= 1;
696                }
697
698                free_set.insert(id);
699            }
700            assert_eq!(e.len(), len);
701        }
702    }
703
704    #[test]
705    fn alloc_at() {
706        let mut e = Entities::default();
707
708        let mut old = Vec::new();
709
710        for _ in 0..2 {
711            let entity = e.alloc();
712            e.meta[entity.id as usize].location.index = 0;
713            old.push(entity);
714            e.free(entity).unwrap();
715        }
716
717        assert_eq!(e.len(), 0);
718
719        let id = old.first().unwrap().id();
720        assert!(old.iter().all(|entity| entity.id() == id));
721
722        let entity = *old.last().unwrap();
723        // The old ID shouldn't exist at this point, and should exist
724        // in the pending list.
725        assert!(!e.contains(entity));
726        assert!(e.pending.contains(&entity.id()));
727        // Allocating an entity at an unused location should not cause a location to be returned.
728        assert!(e.alloc_at(entity).is_none());
729        e.meta[entity.id as usize].location.index = 0;
730        assert!(e.contains(entity));
731        // The entity in question should not exist in the free-list once allocated.
732        assert!(!e.pending.contains(&entity.id()));
733        assert_eq!(e.len(), 1);
734        // Allocating at the same id again should cause a location to be returned
735        // this time around.
736        assert!(e.alloc_at(entity).is_some());
737        e.meta[entity.id as usize].location.index = 0;
738        assert!(e.contains(entity));
739        assert_eq!(e.len(), 1);
740
741        // Allocating an Entity should cause the new empty locations
742        // to be located in the free list.
743        assert_eq!(e.meta.len(), 1);
744        assert!(e
745            .alloc_at(Entity {
746                id: 3,
747                generation: NonZeroU32::new(2).unwrap(),
748            })
749            .is_none());
750        e.meta[entity.id as usize].location.index = 0;
751        assert_eq!(e.pending.len(), 2);
752        assert_eq!(&e.pending, &[1, 2]);
753        assert_eq!(e.meta.len(), 4);
754    }
755
756    #[test]
757    fn contains() {
758        let mut e = Entities::default();
759
760        for _ in 0..2 {
761            let entity = e.alloc();
762            e.meta[entity.id as usize].location.index = 0;
763            assert!(e.contains(entity));
764
765            e.free(entity).unwrap();
766            assert!(!e.contains(entity));
767        }
768
769        // Reserved but not flushed are still "contained".
770        for _ in 0..3 {
771            let entity = e.reserve_entity();
772            assert!(e.contains(entity));
773            assert!(!e.contains(Entity {
774                id: entity.id,
775                generation: NonZeroU32::new(2).unwrap(),
776            }));
777            assert!(!e.contains(Entity {
778                id: entity.id + 1,
779                generation: NonZeroU32::new(1).unwrap(),
780            }));
781        }
782    }
783
784    // Shared test code parameterized by how we want to allocate an Entity block.
785    fn reserve_test_helper(reserve_n: impl FnOnce(&mut Entities, u32) -> Vec<Entity>) {
786        let mut e = Entities::default();
787
788        // Allocate 10 items.
789        let mut v1: Vec<Entity> = (0..10).map(|_| e.alloc()).collect();
790        for &entity in &v1 {
791            e.meta[entity.id as usize].location.index = 0;
792        }
793        assert_eq!(v1.iter().map(|e| e.id).max(), Some(9));
794        for &entity in v1.iter() {
795            assert!(e.contains(entity));
796            e.get_mut(entity).unwrap().index = 37;
797        }
798
799        // Put the last 4 on the freelist.
800        for entity in v1.drain(6..) {
801            e.free(entity).unwrap();
802        }
803        assert_eq!(*e.free_cursor.get_mut(), 4);
804
805        // Reserve 10 entities, so 4 will come from the freelist.
806        // This means we will have allocated 10 + 10 - 4 total items, so max id is 15.
807        let v2 = reserve_n(&mut e, 10);
808        assert_eq!(v2.iter().map(|e| e.id).max(), Some(15));
809
810        // Reserved IDs still count as "contained".
811        assert!(v2.iter().all(|&entity| e.contains(entity)));
812
813        // We should have exactly IDs 0..16
814        let mut v3: Vec<Entity> = v1.iter().chain(v2.iter()).copied().collect();
815        assert_eq!(v3.len(), 16);
816        v3.sort_by_key(|entity| entity.id);
817        for (i, entity) in v3.into_iter().enumerate() {
818            assert_eq!(entity.id, i as u32);
819        }
820
821        // 6 will come from pending.
822        assert_eq!(*e.free_cursor.get_mut(), -6);
823
824        let mut flushed = Vec::new();
825        e.flush(|id, loc| {
826            loc.index = 0;
827            flushed.push(id);
828        });
829        flushed.sort_unstable();
830
831        assert_eq!(flushed, (6..16).collect::<Vec<_>>());
832    }
833
834    #[test]
835    fn reserve_entity() {
836        reserve_test_helper(|e, n| (0..n).map(|_| e.reserve_entity()).collect())
837    }
838
839    #[test]
840    fn reserve_entities() {
841        reserve_test_helper(|e, n| e.reserve_entities(n).collect())
842    }
843
844    #[test]
845    fn reserve_grows() {
846        let mut e = Entities::default();
847        let _ = e.reserve_entity();
848        e.flush(|_, l| {
849            l.index = 0;
850        });
851        assert_eq!(e.len(), 1);
852    }
853
854    #[test]
855    fn reserve_grows_mixed() {
856        let mut e = Entities::default();
857        let a = e.alloc();
858        e.meta[a.id as usize].location.index = 0;
859        let b = e.alloc();
860        e.meta[b.id as usize].location.index = 0;
861        e.free(a).unwrap();
862        let _ = e.reserve_entities(3);
863        e.flush(|_, l| {
864            l.index = 0;
865        });
866        assert_eq!(e.len(), 4);
867    }
868
869    #[test]
870    fn alloc_at_regression() {
871        let mut e = Entities::default();
872        assert!(e
873            .alloc_at(Entity {
874                generation: NonZeroU32::new(1).unwrap(),
875                id: 1
876            })
877            .is_none());
878        assert!(!e.contains(Entity {
879            generation: NonZeroU32::new(1).unwrap(),
880            id: 0
881        }));
882    }
883}