hv_ecs/
entities.rs

1use alloc::vec::Vec;
2use core::cmp;
3use core::convert::TryFrom;
4use core::iter::ExactSizeIterator;
5use core::num::{NonZeroU32, NonZeroU64};
6use core::ops::Range;
7use core::sync::atomic::{AtomicI64, Ordering};
8use core::{fmt, mem};
9#[cfg(feature = "std")]
10use std::error::Error;
11
12/// Lightweight unique ID, or handle, of an entity
13///
14/// Obtained from `World::spawn`. Can be stored to refer to an entity in the future.
15///
16/// Enable the `serde` feature on the crate to make this `Serialize`able. Some applications may be
17/// able to save space by only serializing the output of `Entity::id`.
18#[derive(Clone, Copy, Hash, Eq, Ord, PartialEq, PartialOrd)]
19pub struct Entity {
20    pub(crate) generation: NonZeroU32,
21    pub(crate) id: u32,
22}
23
24impl Entity {
25    /// Convert to a form convenient for passing outside of rust
26    ///
27    /// No particular structure is guaranteed for the returned bits.
28    ///
29    /// Useful for storing entity IDs externally, or in conjunction with `Entity::from_bits` and
30    /// `World::spawn_at` for easy serialization. Alternatively, consider `id` for more compact
31    /// representation.
32    pub fn to_bits(self) -> NonZeroU64 {
33        unsafe {
34            NonZeroU64::new_unchecked(u64::from(self.generation.get()) << 32 | u64::from(self.id))
35        }
36    }
37
38    /// Reconstruct an `Entity` previously destructured with `to_bits` if the bitpattern is valid,
39    /// else `None`
40    ///
41    /// Useful for storing entity IDs externally, or in conjunction with `Entity::to_bits` and
42    /// `World::spawn_at` for easy serialization.
43    pub fn from_bits(bits: u64) -> Option<Self> {
44        Some(Self {
45            generation: NonZeroU32::new((bits >> 32) as u32)?,
46            id: bits as u32,
47        })
48    }
49
50    /// Extract a transiently unique identifier
51    ///
52    /// No two simultaneously-live entities share the same ID, but dead entities' IDs may collide
53    /// with both live and dead entities. Useful for compactly representing entities within a
54    /// specific snapshot of the world, such as when serializing.
55    ///
56    /// See also `World::find_entity_from_id`.
57    pub fn id(self) -> u32 {
58        self.id
59    }
60}
61
62impl fmt::Debug for Entity {
63    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
64        write!(f, "{}v{}", self.id, self.generation)
65    }
66}
67
68#[cfg(feature = "serde")]
69impl serde::Serialize for Entity {
70    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
71    where
72        S: serde::Serializer,
73    {
74        self.to_bits().serialize(serializer)
75    }
76}
77
78#[cfg(feature = "serde")]
79impl<'de> serde::Deserialize<'de> for Entity {
80    fn deserialize<D>(deserializer: D) -> Result<Entity, D::Error>
81    where
82        D: serde::Deserializer<'de>,
83    {
84        let bits = u64::deserialize(deserializer)?;
85
86        match Entity::from_bits(bits) {
87            Some(ent) => Ok(ent),
88            None => Err(serde::de::Error::invalid_value(
89                serde::de::Unexpected::Unsigned(bits),
90                &"`a valid `Entity` bitpattern",
91            )),
92        }
93    }
94}
95
96/// An iterator returning a sequence of Entity values from `Entities::reserve_entities`.
97pub struct ReserveEntitiesIterator<'a> {
98    // Metas, so we can recover the current generation for anything in the freelist.
99    meta: &'a [EntityMeta],
100
101    // Reserved IDs formerly in the freelist to hand out.
102    id_iter: core::slice::Iter<'a, u32>,
103
104    // New Entity IDs to hand out, outside the range of meta.len().
105    id_range: core::ops::Range<u32>,
106}
107
108impl<'a> Iterator for ReserveEntitiesIterator<'a> {
109    type Item = Entity;
110
111    fn next(&mut self) -> Option<Self::Item> {
112        self.id_iter
113            .next()
114            .map(|&id| Entity {
115                generation: self.meta[id as usize].generation,
116                id,
117            })
118            .or_else(|| {
119                self.id_range.next().map(|id| Entity {
120                    generation: NonZeroU32::new(1).unwrap(),
121                    id,
122                })
123            })
124    }
125
126    fn size_hint(&self) -> (usize, Option<usize>) {
127        let len = self.id_iter.len() + self.id_range.len();
128        (len, Some(len))
129    }
130}
131
132impl<'a> ExactSizeIterator for ReserveEntitiesIterator<'a> {}
133
134#[derive(Default)]
135pub(crate) struct Entities {
136    pub meta: Vec<EntityMeta>,
137
138    // The `pending` and `free_cursor` fields describe three sets of Entity IDs
139    // that have been freed or are in the process of being allocated:
140    //
141    // - The `freelist` IDs, previously freed by `free()`. These IDs are available to any
142    //   of `alloc()`, `reserve_entity()` or `reserve_entities()`. Allocation will
143    //   always prefer these over brand new IDs.
144    //
145    // - The `reserved` list of IDs that were once in the freelist, but got
146    //   reserved by `reserve_entities` or `reserve_entity()`. They are now waiting
147    //   for `flush()` to make them fully allocated.
148    //
149    // - The count of new IDs that do not yet exist in `self.meta()`, but which
150    //   we have handed out and reserved. `flush()` will allocate room for them in `self.meta()`.
151    //
152    // The contents of `pending` look like this:
153    //
154    // ```
155    // ----------------------------
156    // |  freelist  |  reserved   |
157    // ----------------------------
158    //              ^             ^
159    //          free_cursor   pending.len()
160    // ```
161    //
162    // As IDs are allocated, `free_cursor` is atomically decremented, moving
163    // items from the freelist into the reserved list by sliding over the boundary.
164    //
165    // Once the freelist runs out, `free_cursor` starts going negative.
166    // The more negative it is, the more IDs have been reserved starting exactly at
167    // the end of `meta.len()`.
168    //
169    // This formulation allows us to reserve any number of IDs first from the freelist
170    // and then from the new IDs, using only a single atomic subtract.
171    //
172    // Once `flush()` is done, `free_cursor` will equal `pending.len()`.
173    pending: Vec<u32>,
174    free_cursor: AtomicI64,
175    len: u32,
176}
177
178impl Entities {
179    /// Reserve entity IDs concurrently
180    ///
181    /// Storage for entity generation and location is lazily allocated by calling `flush`.
182    pub fn reserve_entities(&self, count: u32) -> ReserveEntitiesIterator {
183        // Use one atomic subtract to grab a range of new IDs. The range might be
184        // entirely nonnegative, meaning all IDs come from the freelist, or entirely
185        // negative, meaning they are all new IDs to allocate, or a mix of both.
186        let range_end = self.free_cursor.fetch_sub(count as i64, Ordering::Relaxed);
187        let range_start = range_end - count as i64;
188
189        let freelist_range = range_start.max(0) as usize..range_end.max(0) as usize;
190
191        let (new_id_start, new_id_end) = if range_start >= 0 {
192            // We satisfied all requests from the freelist.
193            (0, 0)
194        } else {
195            // We need to allocate some new Entity IDs outside of the range of self.meta.
196            //
197            // `range_start` covers some negative territory, e.g. `-3..6`.
198            // Since the nonnegative values `0..6` are handled by the freelist, that
199            // means we need to handle the negative range here.
200            //
201            // In this example, we truncate the end to 0, leaving us with `-3..0`.
202            // Then we negate these values to indicate how far beyond the end of `meta.end()`
203            // to go, yielding `meta.len()+0 .. meta.len()+3`.
204            let base = self.meta.len() as i64;
205
206            let new_id_end = u32::try_from(base - range_start).expect("too many entities");
207
208            // `new_id_end` is in range, so no need to check `start`.
209            let new_id_start = (base - range_end.min(0)) as u32;
210
211            (new_id_start, new_id_end)
212        };
213
214        ReserveEntitiesIterator {
215            meta: &self.meta[..],
216            id_iter: self.pending[freelist_range].iter(),
217            id_range: new_id_start..new_id_end,
218        }
219    }
220
221    /// Reserve one entity ID concurrently
222    ///
223    /// Equivalent to `self.reserve_entities(1).next().unwrap()`, but more efficient.
224    pub fn reserve_entity(&self) -> Entity {
225        let n = self.free_cursor.fetch_sub(1, Ordering::Relaxed);
226        if n > 0 {
227            // Allocate from the freelist.
228            let id = self.pending[(n - 1) as usize];
229            Entity {
230                generation: self.meta[id as usize].generation,
231                id,
232            }
233        } else {
234            // Grab a new ID, outside the range of `meta.len()`. `flush()` must
235            // eventually be called to make it valid.
236            //
237            // As `self.free_cursor` goes more and more negative, we return IDs farther
238            // and farther beyond `meta.len()`.
239            Entity {
240                generation: NonZeroU32::new(1).unwrap(),
241                id: u32::try_from(self.meta.len() as i64 - n).expect("too many entities"),
242            }
243        }
244    }
245
246    /// Check that we do not have pending work requiring `flush()` to be called.
247    fn verify_flushed(&mut self) {
248        debug_assert!(
249            !self.needs_flush(),
250            "flush() needs to be called before this operation is legal"
251        );
252    }
253
254    /// Allocate an entity ID directly
255    ///
256    /// Location should be written immediately.
257    pub fn alloc(&mut self) -> Entity {
258        self.verify_flushed();
259
260        self.len += 1;
261        if let Some(id) = self.pending.pop() {
262            let new_free_cursor = self.pending.len() as i64;
263            self.free_cursor.store(new_free_cursor, Ordering::Relaxed); // Not racey due to &mut self
264            Entity {
265                generation: self.meta[id as usize].generation,
266                id,
267            }
268        } else {
269            let id = u32::try_from(self.meta.len()).expect("too many entities");
270            self.meta.push(EntityMeta::EMPTY);
271            Entity {
272                generation: NonZeroU32::new(1).unwrap(),
273                id,
274            }
275        }
276    }
277
278    /// Allocate and set locations for many entity IDs laid out contiguously in an archetype
279    ///
280    /// `self.finish_alloc_many()` must be called after!
281    pub fn alloc_many(&mut self, n: u32, archetype: u32, mut first_index: u32) -> AllocManyState {
282        self.verify_flushed();
283
284        let fresh = (n as usize).saturating_sub(self.pending.len()) as u32;
285        assert!(
286            (self.meta.len() + fresh as usize) < u32::MAX as usize,
287            "too many entities"
288        );
289        let pending_end = self.pending.len().saturating_sub(n as usize);
290        for &id in &self.pending[pending_end..] {
291            self.meta[id as usize].location = Location {
292                archetype,
293                index: first_index,
294            };
295            first_index += 1;
296        }
297
298        let fresh_start = self.meta.len() as u32;
299        self.meta.extend(
300            (first_index..(first_index + fresh)).map(|index| EntityMeta {
301                generation: NonZeroU32::new(1).unwrap(),
302                location: Location { archetype, index },
303            }),
304        );
305
306        self.len += n;
307
308        AllocManyState {
309            fresh: fresh_start..(fresh_start + fresh),
310            pending_end,
311        }
312    }
313
314    /// Remove entities used by `alloc_many` from the freelist
315    ///
316    /// This is an awkward separate function to avoid borrowck issues in `SpawnColumnBatchIter`.
317    pub fn finish_alloc_many(&mut self, pending_end: usize) {
318        self.pending.truncate(pending_end);
319    }
320
321    /// Allocate a specific entity ID, overwriting its generation
322    ///
323    /// Returns the location of the entity currently using the given ID, if any. Location should be written immediately.
324    pub fn alloc_at(&mut self, entity: Entity) -> Option<Location> {
325        self.verify_flushed();
326
327        let loc = if entity.id as usize >= self.meta.len() {
328            self.pending.extend((self.meta.len() as u32)..entity.id);
329            let new_free_cursor = self.pending.len() as i64;
330            self.free_cursor.store(new_free_cursor, Ordering::Relaxed); // Not racey due to &mut self
331            self.meta.resize(entity.id as usize + 1, EntityMeta::EMPTY);
332            self.len += 1;
333            None
334        } else if let Some(index) = self.pending.iter().position(|item| *item == entity.id) {
335            self.pending.swap_remove(index);
336            let new_free_cursor = self.pending.len() as i64;
337            self.free_cursor.store(new_free_cursor, Ordering::Relaxed); // Not racey due to &mut self
338            self.len += 1;
339            None
340        } else {
341            Some(mem::replace(
342                &mut self.meta[entity.id as usize].location,
343                EntityMeta::EMPTY.location,
344            ))
345        };
346
347        self.meta[entity.id as usize].generation = entity.generation;
348
349        loc
350    }
351
352    /// Destroy an entity, allowing it to be reused
353    ///
354    /// Must not be called while reserved entities are awaiting `flush()`.
355    pub fn free(&mut self, entity: Entity) -> Result<Location, NoSuchEntity> {
356        self.verify_flushed();
357
358        let meta = self.meta.get_mut(entity.id as usize).ok_or(NoSuchEntity)?;
359        if meta.generation != entity.generation {
360            return Err(NoSuchEntity);
361        }
362
363        meta.generation = NonZeroU32::new(u32::from(meta.generation).wrapping_add(1))
364            .unwrap_or_else(|| NonZeroU32::new(1).unwrap());
365
366        let loc = mem::replace(&mut meta.location, EntityMeta::EMPTY.location);
367
368        self.pending.push(entity.id);
369
370        let new_free_cursor = self.pending.len() as i64;
371        self.free_cursor.store(new_free_cursor, Ordering::Relaxed); // Not racey due to &mut self
372        self.len -= 1;
373
374        Ok(loc)
375    }
376
377    /// Ensure at least `n` allocations can succeed without reallocating
378    pub fn reserve(&mut self, additional: u32) {
379        self.verify_flushed();
380
381        let freelist_size = self.free_cursor.load(Ordering::Relaxed);
382        let shortfall = additional as i64 - freelist_size;
383        if shortfall > 0 {
384            self.meta.reserve(shortfall as usize);
385        }
386    }
387
388    pub fn contains(&self, entity: Entity) -> bool {
389        // Note that out-of-range IDs are considered to be "contained" because
390        // they must be reserved IDs that we haven't flushed yet.
391        self.meta
392            .get(entity.id as usize)
393            .map_or(true, |meta| meta.generation == entity.generation)
394    }
395
396    pub fn clear(&mut self) {
397        self.meta.clear();
398        self.pending.clear();
399        self.free_cursor.store(0, Ordering::Relaxed); // Not racey due to &mut self
400    }
401
402    /// Access the location storage of an entity
403    ///
404    /// Must not be called on pending entities.
405    pub fn get_mut(&mut self, entity: Entity) -> Result<&mut Location, NoSuchEntity> {
406        let meta = self.meta.get_mut(entity.id as usize).ok_or(NoSuchEntity)?;
407        if meta.generation == entity.generation {
408            Ok(&mut meta.location)
409        } else {
410            Err(NoSuchEntity)
411        }
412    }
413
414    /// Returns `Ok(Location { archetype: 0, index: undefined })` for pending entities
415    pub fn get(&self, entity: Entity) -> Result<Location, NoSuchEntity> {
416        if self.meta.len() <= entity.id as usize {
417            return Ok(Location {
418                archetype: 0,
419                index: u32::max_value(),
420            });
421        }
422        let meta = &self.meta[entity.id as usize];
423        if meta.generation != entity.generation {
424            return Err(NoSuchEntity);
425        }
426        Ok(meta.location)
427    }
428
429    /// Panics if the given id would represent an index outside of `meta`.
430    ///
431    /// # Safety
432    /// Must only be called for currently allocated `id`s.
433    pub unsafe fn resolve_unknown_gen(&self, id: u32) -> Entity {
434        let meta_len = self.meta.len();
435
436        if meta_len > id as usize {
437            let meta = &self.meta[id as usize];
438            Entity {
439                generation: meta.generation,
440                id,
441            }
442        } else {
443            // See if it's pending, but not yet flushed.
444            let free_cursor = self.free_cursor.load(Ordering::Relaxed);
445            let num_pending = cmp::max(-free_cursor, 0) as usize;
446
447            if meta_len + num_pending > id as usize {
448                // Pending entities will have generation 0.
449                Entity {
450                    generation: NonZeroU32::new(1).unwrap(),
451                    id,
452                }
453            } else {
454                panic!("entity id is out of range");
455            }
456        }
457    }
458
459    fn needs_flush(&mut self) -> bool {
460        // Not racey due to &mut self
461        self.free_cursor.load(Ordering::Relaxed) != self.pending.len() as i64
462    }
463
464    /// Allocates space for entities previously reserved with `reserve_entity` or
465    /// `reserve_entities`, then initializes each one using the supplied function.
466    pub fn flush(&mut self, mut init: impl FnMut(u32, &mut Location)) {
467        // Not racey due because of self is &mut.
468        let free_cursor = self.free_cursor.load(Ordering::Relaxed);
469
470        let new_free_cursor = if free_cursor >= 0 {
471            free_cursor as usize
472        } else {
473            let old_meta_len = self.meta.len();
474            let new_meta_len = old_meta_len + -free_cursor as usize;
475            self.meta.resize(new_meta_len, EntityMeta::EMPTY);
476
477            self.len += -free_cursor as u32;
478            for (id, meta) in self.meta.iter_mut().enumerate().skip(old_meta_len) {
479                init(id as u32, &mut meta.location);
480            }
481
482            self.free_cursor.store(0, Ordering::Relaxed);
483            0
484        };
485
486        self.len += (self.pending.len() - new_free_cursor) as u32;
487        for id in self.pending.drain(new_free_cursor..) {
488            init(id, &mut self.meta[id as usize].location);
489        }
490    }
491
492    #[inline]
493    pub fn len(&self) -> u32 {
494        self.len
495    }
496}
497
498#[derive(Copy, Clone)]
499pub(crate) struct EntityMeta {
500    pub generation: NonZeroU32,
501    pub location: Location,
502}
503
504impl EntityMeta {
505    const EMPTY: EntityMeta = EntityMeta {
506        generation: unsafe { NonZeroU32::new_unchecked(1) },
507        location: Location {
508            archetype: 0,
509            index: u32::max_value(), // dummy value, to be filled in
510        },
511    };
512}
513
514#[derive(Copy, Clone)]
515pub(crate) struct Location {
516    pub archetype: u32,
517    pub index: u32,
518}
519
520/// Error indicating that no entity with a particular ID exists
521#[derive(Debug, Clone, Eq, PartialEq)]
522pub struct NoSuchEntity;
523
524impl fmt::Display for NoSuchEntity {
525    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
526        f.pad("no such entity")
527    }
528}
529
530#[cfg(feature = "std")]
531impl Error for NoSuchEntity {}
532
533#[derive(Clone)]
534pub(crate) struct AllocManyState {
535    pub pending_end: usize,
536    fresh: Range<u32>,
537}
538
539impl AllocManyState {
540    pub fn next(&mut self, entities: &Entities) -> Option<u32> {
541        if self.pending_end < entities.pending.len() {
542            let id = entities.pending[self.pending_end];
543            self.pending_end += 1;
544            Some(id)
545        } else {
546            self.fresh.next()
547        }
548    }
549
550    pub fn len(&self, entities: &Entities) -> usize {
551        self.fresh.len() + (entities.pending.len() - self.pending_end)
552    }
553}
554
555#[cfg(test)]
556mod tests {
557    use super::*;
558    use hashbrown::{HashMap, HashSet};
559    use rand::{rngs::StdRng, Rng, SeedableRng};
560
561    #[test]
562    fn entity_bits_roundtrip() {
563        let e = Entity {
564            generation: NonZeroU32::new(0xDEADBEEF).unwrap(),
565            id: 0xBAADF00D,
566        };
567        assert_eq!(Entity::from_bits(e.to_bits().into()).unwrap(), e);
568    }
569
570    #[test]
571    fn alloc_and_free() {
572        let mut rng = StdRng::seed_from_u64(0xFEEDFACEDEADF00D);
573
574        let mut e = Entities::default();
575        let mut first_unused = 0u32;
576        let mut id_to_gen: HashMap<u32, u32> = Default::default();
577        let mut free_set: HashSet<u32> = Default::default();
578        let mut len = 0;
579
580        for _ in 0..100 {
581            let alloc = rng.gen_bool(0.7);
582            if alloc || first_unused == 0 {
583                let entity = e.alloc();
584                len += 1;
585
586                let id = entity.id;
587                if !free_set.is_empty() {
588                    // This should have come from the freelist.
589                    assert!(free_set.remove(&id));
590                } else if id >= first_unused {
591                    first_unused = id + 1;
592                }
593
594                e.get_mut(entity).unwrap().index = 37;
595
596                assert!(id_to_gen.insert(id, entity.generation.get()).is_none());
597            } else {
598                // Free a random ID, whether or not it's in use, and check for errors.
599                let id = rng.gen_range(0..first_unused);
600
601                let generation = id_to_gen.remove(&id);
602                let entity = Entity {
603                    id,
604                    generation: NonZeroU32::new(
605                        generation.unwrap_or_else(|| NonZeroU32::new(1).unwrap().get()),
606                    )
607                    .unwrap(),
608                };
609
610                assert_eq!(e.free(entity).is_ok(), generation.is_some());
611                if generation.is_some() {
612                    len -= 1;
613                }
614
615                free_set.insert(id);
616            }
617            assert_eq!(e.len(), len);
618        }
619    }
620
621    #[test]
622    fn alloc_at() {
623        let mut e = Entities::default();
624
625        let mut old = Vec::new();
626
627        for _ in 0..2 {
628            let entity = e.alloc();
629            old.push(entity);
630            e.free(entity).unwrap();
631        }
632
633        assert_eq!(e.len(), 0);
634
635        let id = old.first().unwrap().id();
636        assert!(old.iter().all(|entity| entity.id() == id));
637
638        let entity = *old.last().unwrap();
639        // The old ID shouldn't exist at this point, and should exist
640        // in the pending list.
641        assert!(!e.contains(entity));
642        assert!(e.pending.contains(&entity.id()));
643        // Allocating an entity at an unused location should not cause a location to be returned.
644        assert!(e.alloc_at(entity).is_none());
645        assert!(e.contains(entity));
646        // The entity in question should not exist in the free-list once allocated.
647        assert!(!e.pending.contains(&entity.id()));
648        assert_eq!(e.len(), 1);
649        // Allocating at the same id again should cause a location to be returned
650        // this time around.
651        assert!(e.alloc_at(entity).is_some());
652        assert!(e.contains(entity));
653        assert_eq!(e.len(), 1);
654
655        // Allocating an Entity should cause the new empty locations
656        // to be located in the free list.
657        assert_eq!(e.meta.len(), 1);
658        assert!(e
659            .alloc_at(Entity {
660                id: 3,
661                generation: NonZeroU32::new(2).unwrap(),
662            })
663            .is_none());
664        assert_eq!(e.pending.len(), 2);
665        assert_eq!(&e.pending, &[1, 2]);
666        assert_eq!(e.meta.len(), 4);
667    }
668
669    #[test]
670    fn contains() {
671        let mut e = Entities::default();
672
673        for _ in 0..2 {
674            let entity = e.alloc();
675            assert!(e.contains(entity));
676
677            e.free(entity).unwrap();
678            assert!(!e.contains(entity));
679        }
680
681        // Reserved but not flushed are still "contained".
682        for _ in 0..3 {
683            let entity = e.reserve_entity();
684            assert!(e.contains(entity));
685        }
686    }
687
688    // Shared test code parameterized by how we want to allocate an Entity block.
689    fn reserve_test_helper(reserve_n: impl FnOnce(&mut Entities, u32) -> Vec<Entity>) {
690        let mut e = Entities::default();
691
692        // Allocate 10 items.
693        let mut v1: Vec<Entity> = (0..10).map(|_| e.alloc()).collect();
694        assert_eq!(v1.iter().map(|e| e.id).max(), Some(9));
695        for &entity in v1.iter() {
696            assert!(e.contains(entity));
697            e.get_mut(entity).unwrap().index = 37;
698        }
699
700        // Put the last 4 on the freelist.
701        for entity in v1.drain(6..) {
702            e.free(entity).unwrap();
703        }
704        assert_eq!(e.free_cursor.load(Ordering::Relaxed), 4);
705
706        // Reserve 10 entities, so 4 will come from the freelist.
707        // This means we will have allocated 10 + 10 - 4 total items, so max id is 15.
708        let v2 = reserve_n(&mut e, 10);
709        assert_eq!(v2.iter().map(|e| e.id).max(), Some(15));
710
711        // Reserved IDs still count as "contained".
712        assert!(v2.iter().all(|&entity| e.contains(entity)));
713
714        // We should have exactly IDs 0..16
715        let mut v3: Vec<Entity> = v1.iter().chain(v2.iter()).copied().collect();
716        assert_eq!(v3.len(), 16);
717        v3.sort_by_key(|entity| entity.id);
718        for (i, entity) in v3.into_iter().enumerate() {
719            assert_eq!(entity.id, i as u32);
720        }
721
722        // 6 will come from pending.
723        assert_eq!(e.free_cursor.load(Ordering::Relaxed), -6);
724
725        let mut flushed = Vec::new();
726        e.flush(|id, _| flushed.push(id));
727        flushed.sort_unstable();
728
729        assert_eq!(flushed, (6..16).collect::<Vec<_>>());
730    }
731
732    #[test]
733    fn reserve_entity() {
734        reserve_test_helper(|e, n| (0..n).map(|_| e.reserve_entity()).collect())
735    }
736
737    #[test]
738    fn reserve_entities() {
739        reserve_test_helper(|e, n| e.reserve_entities(n).collect())
740    }
741
742    #[test]
743    fn reserve_grows() {
744        let mut e = Entities::default();
745        let _ = e.reserve_entity();
746        e.flush(|_, _| {});
747        assert_eq!(e.len(), 1);
748    }
749
750    #[test]
751    fn reserve_grows_mixed() {
752        let mut e = Entities::default();
753        let a = e.alloc();
754        e.alloc();
755        e.free(a).unwrap();
756        let _ = e.reserve_entities(3);
757        e.flush(|_, _| {});
758        assert_eq!(e.len(), 4);
759    }
760}