hecs/
entities.rs

1use alloc::vec::Vec;
2use core::cmp;
3use core::convert::TryFrom;
4use core::iter::ExactSizeIterator;
5use core::num::{NonZeroU32, NonZeroU64};
6use core::ops::Range;
7use core::sync::atomic::{AtomicIsize, Ordering};
8use core::{fmt, mem};
9#[cfg(feature = "std")]
10use std::error::Error;
11
12/// Lightweight unique ID, or handle, of an entity
13///
14/// Obtained from `World::spawn`. Can be stored to refer to an entity in the future.
15///
16/// Enable the `serde` feature on the crate to make this `Serialize`able. Some applications may be
17/// able to save space by only serializing the output of `Entity::id`.
18#[derive(Clone, Copy, Hash, Eq, Ord, PartialEq, PartialOrd)]
19pub struct Entity {
20    pub(crate) id: u32,
21    pub(crate) generation: NonZeroU32,
22}
23
24impl Entity {
25    /// An [`Entity`] that does not necessarily correspond to data in any `World`
26    ///
27    /// Useful as a dummy value. It is possible (albeit unlikely) for a `World` to contain this
28    /// entity.
29    pub const DANGLING: Entity = Entity {
30        generation: match NonZeroU32::new(u32::MAX) {
31            Some(x) => x,
32            None => unreachable!(),
33        },
34        id: u32::MAX,
35    };
36
37    /// Convert to a form convenient for passing outside of rust
38    ///
39    /// No particular structure is guaranteed for the returned bits.
40    ///
41    /// Useful for storing entity IDs externally, or in conjunction with `Entity::from_bits` and
42    /// `World::spawn_at` for easy serialization. Alternatively, consider `id` for more compact
43    /// representation.
44    pub const fn to_bits(self) -> NonZeroU64 {
45        unsafe {
46            NonZeroU64::new_unchecked((self.generation.get() as u64) << 32 | (self.id as u64))
47        }
48    }
49
50    /// Reconstruct an `Entity` previously destructured with `to_bits` if the bitpattern is valid,
51    /// else `None`
52    ///
53    /// Useful for storing entity IDs externally, or in conjunction with `Entity::to_bits` and
54    /// `World::spawn_at` for easy serialization.
55    pub const fn from_bits(bits: u64) -> Option<Self> {
56        Some(Self {
57            // // `?` is not yet supported in const fns
58            generation: match NonZeroU32::new((bits >> 32) as u32) {
59                Some(g) => g,
60                None => return None,
61            },
62            id: bits as u32,
63        })
64    }
65
66    /// Extract a transiently unique identifier
67    ///
68    /// No two simultaneously-live entities share the same ID, but dead entities' IDs may collide
69    /// with both live and dead entities. Useful for compactly representing entities within a
70    /// specific snapshot of the world, such as when serializing.
71    ///
72    /// See also `World::find_entity_from_id`.
73    pub const fn id(self) -> u32 {
74        self.id
75    }
76}
77
78impl fmt::Debug for Entity {
79    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
80        write!(f, "{}v{}", self.id, self.generation)
81    }
82}
83
84#[cfg(feature = "serde")]
85impl serde::Serialize for Entity {
86    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
87    where
88        S: serde::Serializer,
89    {
90        self.to_bits().serialize(serializer)
91    }
92}
93
94#[cfg(feature = "serde")]
95impl<'de> serde::Deserialize<'de> for Entity {
96    fn deserialize<D>(deserializer: D) -> Result<Entity, D::Error>
97    where
98        D: serde::Deserializer<'de>,
99    {
100        let bits = u64::deserialize(deserializer)?;
101
102        match Entity::from_bits(bits) {
103            Some(ent) => Ok(ent),
104            None => Err(serde::de::Error::invalid_value(
105                serde::de::Unexpected::Unsigned(bits),
106                &"`a valid `Entity` bitpattern",
107            )),
108        }
109    }
110}
111
112/// An iterator returning a sequence of Entity values from `Entities::reserve_entities`.
113pub struct ReserveEntitiesIterator<'a> {
114    // Metas, so we can recover the current generation for anything in the freelist.
115    meta: &'a [EntityMeta],
116
117    // Reserved IDs formerly in the freelist to hand out.
118    id_iter: core::slice::Iter<'a, u32>,
119
120    // New Entity IDs to hand out, outside the range of meta.len().
121    id_range: core::ops::Range<u32>,
122}
123
124impl<'a> Iterator for ReserveEntitiesIterator<'a> {
125    type Item = Entity;
126
127    fn next(&mut self) -> Option<Self::Item> {
128        self.id_iter
129            .next()
130            .map(|&id| Entity {
131                generation: self.meta[id as usize].generation,
132                id,
133            })
134            .or_else(|| {
135                self.id_range.next().map(|id| Entity {
136                    generation: NonZeroU32::new(1).unwrap(),
137                    id,
138                })
139            })
140    }
141
142    fn size_hint(&self) -> (usize, Option<usize>) {
143        let len = self.id_iter.len() + self.id_range.len();
144        (len, Some(len))
145    }
146}
147
148impl<'a> ExactSizeIterator for ReserveEntitiesIterator<'a> {}
149
150#[derive(Default)]
151pub(crate) struct Entities {
152    pub meta: Vec<EntityMeta>,
153
154    // The `pending` and `free_cursor` fields describe three sets of Entity IDs
155    // that have been freed or are in the process of being allocated:
156    //
157    // - The `freelist` IDs, previously freed by `free()`. These IDs are available to any
158    //   of `alloc()`, `reserve_entity()` or `reserve_entities()`. Allocation will
159    //   always prefer these over brand new IDs.
160    //
161    // - The `reserved` list of IDs that were once in the freelist, but got
162    //   reserved by `reserve_entities` or `reserve_entity()`. They are now waiting
163    //   for `flush()` to make them fully allocated.
164    //
165    // - The count of new IDs that do not yet exist in `self.meta()`, but which
166    //   we have handed out and reserved. `flush()` will allocate room for them in `self.meta()`.
167    //
168    // The contents of `pending` look like this:
169    //
170    // ```
171    // ----------------------------
172    // |  freelist  |  reserved   |
173    // ----------------------------
174    //              ^             ^
175    //          free_cursor   pending.len()
176    // ```
177    //
178    // As IDs are allocated, `free_cursor` is atomically decremented, moving
179    // items from the freelist into the reserved list by sliding over the boundary.
180    //
181    // Once the freelist runs out, `free_cursor` starts going negative.
182    // The more negative it is, the more IDs have been reserved starting exactly at
183    // the end of `meta.len()`.
184    //
185    // This formulation allows us to reserve any number of IDs first from the freelist
186    // and then from the new IDs, using only a single atomic subtract.
187    //
188    // Once `flush()` is done, `free_cursor` will equal `pending.len()`.
189    pending: Vec<u32>,
190    free_cursor: AtomicIsize,
191    len: u32,
192}
193
194impl Entities {
195    /// Reserve entity IDs concurrently
196    ///
197    /// Storage for entity generation and location is lazily allocated by calling `flush`.
198    pub fn reserve_entities(&self, count: u32) -> ReserveEntitiesIterator {
199        // Use one atomic subtract to grab a range of new IDs. The range might be
200        // entirely nonnegative, meaning all IDs come from the freelist, or entirely
201        // negative, meaning they are all new IDs to allocate, or a mix of both.
202        let range_end = self
203            .free_cursor
204            .fetch_sub(count as isize, Ordering::Relaxed);
205        let range_start = range_end - count as isize;
206
207        let freelist_range = range_start.max(0) as usize..range_end.max(0) as usize;
208
209        let (new_id_start, new_id_end) = if range_start >= 0 {
210            // We satisfied all requests from the freelist.
211            (0, 0)
212        } else {
213            // We need to allocate some new Entity IDs outside of the range of self.meta.
214            //
215            // `range_start` covers some negative territory, e.g. `-3..6`.
216            // Since the nonnegative values `0..6` are handled by the freelist, that
217            // means we need to handle the negative range here.
218            //
219            // In this example, we truncate the end to 0, leaving us with `-3..0`.
220            // Then we negate these values to indicate how far beyond the end of `meta.end()`
221            // to go, yielding `meta.len()+0 .. meta.len()+3`.
222            let base = self.meta.len() as isize;
223
224            let new_id_end = u32::try_from(base - range_start).expect("too many entities");
225
226            // `new_id_end` is in range, so no need to check `start`.
227            let new_id_start = (base - range_end.min(0)) as u32;
228
229            (new_id_start, new_id_end)
230        };
231
232        ReserveEntitiesIterator {
233            meta: &self.meta[..],
234            id_iter: self.pending[freelist_range].iter(),
235            id_range: new_id_start..new_id_end,
236        }
237    }
238
239    /// Reserve one entity ID concurrently
240    ///
241    /// Equivalent to `self.reserve_entities(1).next().unwrap()`, but more efficient.
242    pub fn reserve_entity(&self) -> Entity {
243        let n = self.free_cursor.fetch_sub(1, Ordering::Relaxed);
244        if n > 0 {
245            // Allocate from the freelist.
246            let id = self.pending[(n - 1) as usize];
247            Entity {
248                generation: self.meta[id as usize].generation,
249                id,
250            }
251        } else {
252            // Grab a new ID, outside the range of `meta.len()`. `flush()` must
253            // eventually be called to make it valid.
254            //
255            // As `self.free_cursor` goes more and more negative, we return IDs farther
256            // and farther beyond `meta.len()`.
257            Entity {
258                generation: NonZeroU32::new(1).unwrap(),
259                id: u32::try_from(self.meta.len() as isize - n).expect("too many entities"),
260            }
261        }
262    }
263
264    /// Check that we do not have pending work requiring `flush()` to be called.
265    fn verify_flushed(&mut self) {
266        debug_assert!(
267            !self.needs_flush(),
268            "flush() needs to be called before this operation is legal"
269        );
270    }
271
272    /// Allocate an entity ID directly
273    ///
274    /// Location should be written immediately.
275    pub fn alloc(&mut self) -> Entity {
276        self.verify_flushed();
277
278        self.len += 1;
279        if let Some(id) = self.pending.pop() {
280            let new_free_cursor = self.pending.len() as isize;
281            *self.free_cursor.get_mut() = new_free_cursor;
282            Entity {
283                generation: self.meta[id as usize].generation,
284                id,
285            }
286        } else {
287            let id = u32::try_from(self.meta.len()).expect("too many entities");
288            self.meta.push(EntityMeta::EMPTY);
289            Entity {
290                generation: NonZeroU32::new(1).unwrap(),
291                id,
292            }
293        }
294    }
295
296    /// Allocate and set locations for many entity IDs laid out contiguously in an archetype
297    ///
298    /// `self.finish_alloc_many()` must be called after!
299    pub fn alloc_many(&mut self, n: u32, archetype: u32, mut first_index: u32) -> AllocManyState {
300        self.verify_flushed();
301
302        let fresh = (n as usize).saturating_sub(self.pending.len()) as u32;
303        assert!(
304            (self.meta.len() + fresh as usize) < u32::MAX as usize,
305            "too many entities"
306        );
307        let pending_end = self.pending.len().saturating_sub(n as usize);
308        for &id in &self.pending[pending_end..] {
309            self.meta[id as usize].location = Location {
310                archetype,
311                index: first_index,
312            };
313            first_index += 1;
314        }
315
316        let fresh_start = self.meta.len() as u32;
317        self.meta.extend(
318            (first_index..(first_index + fresh)).map(|index| EntityMeta {
319                generation: NonZeroU32::new(1).unwrap(),
320                location: Location { archetype, index },
321            }),
322        );
323
324        self.len += n;
325
326        AllocManyState {
327            fresh: fresh_start..(fresh_start + fresh),
328            pending_end,
329        }
330    }
331
332    /// Remove entities used by `alloc_many` from the freelist
333    ///
334    /// This is an awkward separate function to avoid borrowck issues in `SpawnColumnBatchIter`.
335    pub fn finish_alloc_many(&mut self, pending_end: usize) {
336        self.pending.truncate(pending_end);
337    }
338
339    /// Allocate a specific entity ID, overwriting its generation
340    ///
341    /// Returns the location of the entity currently using the given ID, if any. Location should be written immediately.
342    pub fn alloc_at(&mut self, entity: Entity) -> Option<Location> {
343        self.verify_flushed();
344
345        let loc = if entity.id as usize >= self.meta.len() {
346            self.pending.extend((self.meta.len() as u32)..entity.id);
347            let new_free_cursor = self.pending.len() as isize;
348            *self.free_cursor.get_mut() = new_free_cursor;
349            self.meta.resize(entity.id as usize + 1, EntityMeta::EMPTY);
350            self.len += 1;
351            None
352        } else if let Some(index) = self.pending.iter().position(|item| *item == entity.id) {
353            self.pending.swap_remove(index);
354            let new_free_cursor = self.pending.len() as isize;
355            *self.free_cursor.get_mut() = new_free_cursor;
356            self.len += 1;
357            None
358        } else {
359            Some(mem::replace(
360                &mut self.meta[entity.id as usize].location,
361                EntityMeta::EMPTY.location,
362            ))
363        };
364
365        self.meta[entity.id as usize].generation = entity.generation;
366
367        loc
368    }
369
370    /// Destroy an entity, allowing it to be reused
371    ///
372    /// Must not be called while reserved entities are awaiting `flush()`.
373    pub fn free(&mut self, entity: Entity) -> Result<Location, NoSuchEntity> {
374        self.verify_flushed();
375
376        let meta = self.meta.get_mut(entity.id as usize).ok_or(NoSuchEntity)?;
377        if meta.generation != entity.generation || meta.location.index == u32::MAX {
378            return Err(NoSuchEntity);
379        }
380
381        meta.generation = NonZeroU32::new(u32::from(meta.generation).wrapping_add(1))
382            .unwrap_or_else(|| NonZeroU32::new(1).unwrap());
383
384        let loc = mem::replace(&mut meta.location, EntityMeta::EMPTY.location);
385
386        self.pending.push(entity.id);
387
388        let new_free_cursor = self.pending.len() as isize;
389        *self.free_cursor.get_mut() = new_free_cursor;
390        self.len -= 1;
391
392        Ok(loc)
393    }
394
395    /// Ensure at least `n` allocations can succeed without reallocating
396    pub fn reserve(&mut self, additional: u32) {
397        self.verify_flushed();
398
399        let freelist_size = *self.free_cursor.get_mut();
400        let shortfall = additional as isize - freelist_size;
401        if shortfall > 0 {
402            self.meta.reserve(shortfall as usize);
403        }
404    }
405
406    pub fn contains(&self, entity: Entity) -> bool {
407        match self.meta.get(entity.id as usize) {
408            Some(meta) => {
409                meta.generation == entity.generation
410                    && (meta.location.index != u32::MAX
411                        || self.pending[self.free_cursor.load(Ordering::Relaxed).max(0) as usize..]
412                            .contains(&entity.id))
413            }
414            None => {
415                // Check if this could have been obtained from `reserve_entity`
416                let free = self.free_cursor.load(Ordering::Relaxed);
417                entity.generation.get() == 1
418                    && free < 0
419                    && (entity.id as isize) < (free.abs() + self.meta.len() as isize)
420            }
421        }
422    }
423
424    pub fn clear(&mut self) {
425        self.meta.clear();
426        self.pending.clear();
427        *self.free_cursor.get_mut() = 0;
428        self.len = 0;
429    }
430
431    /// Access the location storage of an entity
432    ///
433    /// Must not be called on pending entities.
434    pub fn get_mut(&mut self, entity: Entity) -> Result<&mut Location, NoSuchEntity> {
435        let meta = self.meta.get_mut(entity.id as usize).ok_or(NoSuchEntity)?;
436        if meta.generation == entity.generation && meta.location.index != u32::MAX {
437            Ok(&mut meta.location)
438        } else {
439            Err(NoSuchEntity)
440        }
441    }
442
443    /// Returns `Ok(Location { archetype: 0, index: undefined })` for pending entities
444    pub fn get(&self, entity: Entity) -> Result<Location, NoSuchEntity> {
445        if self.meta.len() <= entity.id as usize {
446            // Check if this could have been obtained from `reserve_entity`
447            let free = self.free_cursor.load(Ordering::Relaxed);
448            if entity.generation.get() == 1
449                && free < 0
450                && (entity.id as isize) < (free.abs() + self.meta.len() as isize)
451            {
452                return Ok(Location {
453                    archetype: 0,
454                    index: u32::max_value(),
455                });
456            } else {
457                return Err(NoSuchEntity);
458            }
459        }
460        let meta = &self.meta[entity.id as usize];
461        if meta.generation != entity.generation || meta.location.index == u32::MAX {
462            return Err(NoSuchEntity);
463        }
464        Ok(meta.location)
465    }
466
467    /// Panics if the given id would represent an index outside of `meta`.
468    ///
469    /// # Safety
470    /// Must only be called for currently allocated `id`s.
471    pub unsafe fn resolve_unknown_gen(&self, id: u32) -> Entity {
472        let meta_len = self.meta.len();
473
474        if meta_len > id as usize {
475            let meta = &self.meta[id as usize];
476            Entity {
477                generation: meta.generation,
478                id,
479            }
480        } else {
481            // See if it's pending, but not yet flushed.
482            let free_cursor = self.free_cursor.load(Ordering::Relaxed);
483            let num_pending = cmp::max(-free_cursor, 0) as usize;
484
485            if meta_len + num_pending > id as usize {
486                // Pending entities will have the first generation.
487                Entity {
488                    generation: NonZeroU32::new(1).unwrap(),
489                    id,
490                }
491            } else {
492                panic!("entity id is out of range");
493            }
494        }
495    }
496
497    fn needs_flush(&mut self) -> bool {
498        *self.free_cursor.get_mut() != self.pending.len() as isize
499    }
500
501    /// Allocates space for entities previously reserved with `reserve_entity` or
502    /// `reserve_entities`, then initializes each one using the supplied function.
503    pub fn flush(&mut self, mut init: impl FnMut(u32, &mut Location)) {
504        let free_cursor = *self.free_cursor.get_mut();
505
506        let new_free_cursor = if free_cursor >= 0 {
507            free_cursor as usize
508        } else {
509            let old_meta_len = self.meta.len();
510            let new_meta_len = old_meta_len + -free_cursor as usize;
511            self.meta.resize(new_meta_len, EntityMeta::EMPTY);
512
513            self.len += -free_cursor as u32;
514            for (id, meta) in self.meta.iter_mut().enumerate().skip(old_meta_len) {
515                init(id as u32, &mut meta.location);
516            }
517
518            *self.free_cursor.get_mut() = 0;
519            0
520        };
521
522        self.len += (self.pending.len() - new_free_cursor) as u32;
523        for id in self.pending.drain(new_free_cursor..) {
524            init(id, &mut self.meta[id as usize].location);
525        }
526    }
527
528    #[inline]
529    pub fn len(&self) -> u32 {
530        self.len
531    }
532}
533
534#[derive(Copy, Clone)]
535pub(crate) struct EntityMeta {
536    pub generation: NonZeroU32,
537    pub location: Location,
538}
539
540impl EntityMeta {
541    const EMPTY: EntityMeta = EntityMeta {
542        generation: match NonZeroU32::new(1) {
543            Some(x) => x,
544            None => unreachable!(),
545        },
546        location: Location {
547            archetype: 0,
548            index: u32::max_value(), // dummy value, to be filled in
549        },
550    };
551}
552
553#[derive(Copy, Clone)]
554pub(crate) struct Location {
555    pub archetype: u32,
556    pub index: u32,
557}
558
559/// Error indicating that no entity with a particular ID exists
560#[derive(Debug, Clone, Eq, PartialEq)]
561pub struct NoSuchEntity;
562
563impl fmt::Display for NoSuchEntity {
564    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
565        f.pad("no such entity")
566    }
567}
568
569#[cfg(feature = "std")]
570impl Error for NoSuchEntity {}
571
572#[derive(Clone)]
573pub(crate) struct AllocManyState {
574    pub pending_end: usize,
575    fresh: Range<u32>,
576}
577
578impl AllocManyState {
579    pub fn next(&mut self, entities: &Entities) -> Option<u32> {
580        if self.pending_end < entities.pending.len() {
581            let id = entities.pending[self.pending_end];
582            self.pending_end += 1;
583            Some(id)
584        } else {
585            self.fresh.next()
586        }
587    }
588
589    pub fn len(&self, entities: &Entities) -> usize {
590        self.fresh.len() + (entities.pending.len() - self.pending_end)
591    }
592}
593
594#[cfg(test)]
595mod tests {
596    use super::*;
597    use hashbrown::{HashMap, HashSet};
598    use rand::{rngs::StdRng, Rng, SeedableRng};
599
600    #[test]
601    fn entity_bits_roundtrip() {
602        let e = Entity {
603            generation: NonZeroU32::new(0xDEADBEEF).unwrap(),
604            id: 0xBAADF00D,
605        };
606        assert_eq!(Entity::from_bits(e.to_bits().into()).unwrap(), e);
607    }
608
609    #[test]
610    fn alloc_and_free() {
611        let mut rng = StdRng::seed_from_u64(0xFEEDFACEDEADF00D);
612
613        let mut e = Entities::default();
614        let mut first_unused = 0u32;
615        let mut id_to_gen: HashMap<u32, u32> = Default::default();
616        let mut free_set: HashSet<u32> = Default::default();
617        let mut len = 0;
618
619        for _ in 0..100 {
620            let alloc = rng.gen_bool(0.7);
621            if alloc || first_unused == 0 {
622                let entity = e.alloc();
623                e.meta[entity.id as usize].location.index = 0;
624                len += 1;
625
626                let id = entity.id;
627                if !free_set.is_empty() {
628                    // This should have come from the freelist.
629                    assert!(free_set.remove(&id));
630                } else if id >= first_unused {
631                    first_unused = id + 1;
632                }
633
634                e.get_mut(entity).unwrap().index = 37;
635
636                assert!(id_to_gen.insert(id, entity.generation.get()).is_none());
637            } else {
638                // Free a random ID, whether or not it's in use, and check for errors.
639                let id = rng.gen_range(0..first_unused);
640
641                let generation = id_to_gen.remove(&id);
642                let entity = Entity {
643                    id,
644                    generation: NonZeroU32::new(
645                        generation.unwrap_or_else(|| NonZeroU32::new(1).unwrap().get()),
646                    )
647                    .unwrap(),
648                };
649
650                assert_eq!(e.free(entity).is_ok(), generation.is_some());
651                if generation.is_some() {
652                    len -= 1;
653                }
654
655                free_set.insert(id);
656            }
657            assert_eq!(e.len(), len);
658        }
659    }
660
661    #[test]
662    fn alloc_at() {
663        let mut e = Entities::default();
664
665        let mut old = Vec::new();
666
667        for _ in 0..2 {
668            let entity = e.alloc();
669            e.meta[entity.id as usize].location.index = 0;
670            old.push(entity);
671            e.free(entity).unwrap();
672        }
673
674        assert_eq!(e.len(), 0);
675
676        let id = old.first().unwrap().id();
677        assert!(old.iter().all(|entity| entity.id() == id));
678
679        let entity = *old.last().unwrap();
680        // The old ID shouldn't exist at this point, and should exist
681        // in the pending list.
682        assert!(!e.contains(entity));
683        assert!(e.pending.contains(&entity.id()));
684        // Allocating an entity at an unused location should not cause a location to be returned.
685        assert!(e.alloc_at(entity).is_none());
686        e.meta[entity.id as usize].location.index = 0;
687        assert!(e.contains(entity));
688        // The entity in question should not exist in the free-list once allocated.
689        assert!(!e.pending.contains(&entity.id()));
690        assert_eq!(e.len(), 1);
691        // Allocating at the same id again should cause a location to be returned
692        // this time around.
693        assert!(e.alloc_at(entity).is_some());
694        e.meta[entity.id as usize].location.index = 0;
695        assert!(e.contains(entity));
696        assert_eq!(e.len(), 1);
697
698        // Allocating an Entity should cause the new empty locations
699        // to be located in the free list.
700        assert_eq!(e.meta.len(), 1);
701        assert!(e
702            .alloc_at(Entity {
703                id: 3,
704                generation: NonZeroU32::new(2).unwrap(),
705            })
706            .is_none());
707        e.meta[entity.id as usize].location.index = 0;
708        assert_eq!(e.pending.len(), 2);
709        assert_eq!(&e.pending, &[1, 2]);
710        assert_eq!(e.meta.len(), 4);
711    }
712
713    #[test]
714    fn contains() {
715        let mut e = Entities::default();
716
717        for _ in 0..2 {
718            let entity = e.alloc();
719            e.meta[entity.id as usize].location.index = 0;
720            assert!(e.contains(entity));
721
722            e.free(entity).unwrap();
723            assert!(!e.contains(entity));
724        }
725
726        // Reserved but not flushed are still "contained".
727        for _ in 0..3 {
728            let entity = e.reserve_entity();
729            assert!(e.contains(entity));
730            assert!(!e.contains(Entity {
731                id: entity.id,
732                generation: NonZeroU32::new(2).unwrap(),
733            }));
734            assert!(!e.contains(Entity {
735                id: entity.id + 1,
736                generation: NonZeroU32::new(1).unwrap(),
737            }));
738        }
739    }
740
741    // Shared test code parameterized by how we want to allocate an Entity block.
742    fn reserve_test_helper(reserve_n: impl FnOnce(&mut Entities, u32) -> Vec<Entity>) {
743        let mut e = Entities::default();
744
745        // Allocate 10 items.
746        let mut v1: Vec<Entity> = (0..10).map(|_| e.alloc()).collect();
747        for &entity in &v1 {
748            e.meta[entity.id as usize].location.index = 0;
749        }
750        assert_eq!(v1.iter().map(|e| e.id).max(), Some(9));
751        for &entity in v1.iter() {
752            assert!(e.contains(entity));
753            e.get_mut(entity).unwrap().index = 37;
754        }
755
756        // Put the last 4 on the freelist.
757        for entity in v1.drain(6..) {
758            e.free(entity).unwrap();
759        }
760        assert_eq!(*e.free_cursor.get_mut(), 4);
761
762        // Reserve 10 entities, so 4 will come from the freelist.
763        // This means we will have allocated 10 + 10 - 4 total items, so max id is 15.
764        let v2 = reserve_n(&mut e, 10);
765        assert_eq!(v2.iter().map(|e| e.id).max(), Some(15));
766
767        // Reserved IDs still count as "contained".
768        assert!(v2.iter().all(|&entity| e.contains(entity)));
769
770        // We should have exactly IDs 0..16
771        let mut v3: Vec<Entity> = v1.iter().chain(v2.iter()).copied().collect();
772        assert_eq!(v3.len(), 16);
773        v3.sort_by_key(|entity| entity.id);
774        for (i, entity) in v3.into_iter().enumerate() {
775            assert_eq!(entity.id, i as u32);
776        }
777
778        // 6 will come from pending.
779        assert_eq!(*e.free_cursor.get_mut(), -6);
780
781        let mut flushed = Vec::new();
782        e.flush(|id, loc| {
783            loc.index = 0;
784            flushed.push(id);
785        });
786        flushed.sort_unstable();
787
788        assert_eq!(flushed, (6..16).collect::<Vec<_>>());
789    }
790
791    #[test]
792    fn reserve_entity() {
793        reserve_test_helper(|e, n| (0..n).map(|_| e.reserve_entity()).collect())
794    }
795
796    #[test]
797    fn reserve_entities() {
798        reserve_test_helper(|e, n| e.reserve_entities(n).collect())
799    }
800
801    #[test]
802    fn reserve_grows() {
803        let mut e = Entities::default();
804        let _ = e.reserve_entity();
805        e.flush(|_, l| {
806            l.index = 0;
807        });
808        assert_eq!(e.len(), 1);
809    }
810
811    #[test]
812    fn reserve_grows_mixed() {
813        let mut e = Entities::default();
814        let a = e.alloc();
815        e.meta[a.id as usize].location.index = 0;
816        let b = e.alloc();
817        e.meta[b.id as usize].location.index = 0;
818        e.free(a).unwrap();
819        let _ = e.reserve_entities(3);
820        e.flush(|_, l| {
821            l.index = 0;
822        });
823        assert_eq!(e.len(), 4);
824    }
825
826    #[test]
827    fn alloc_at_regression() {
828        let mut e = Entities::default();
829        assert!(e
830            .alloc_at(Entity {
831                generation: NonZeroU32::new(1).unwrap(),
832                id: 1
833            })
834            .is_none());
835        assert!(!e.contains(Entity {
836            generation: NonZeroU32::new(1).unwrap(),
837            id: 0
838        }));
839    }
840}