Skip to main content

mtb_entity_slab/
alloc.rs

1//! Entity slab allocator implementation.
2//!
3//! 实体 Slab 分块分配器实现.
4
5use crate::{
6    IndexedID, PtrID, SlicePtrErr, SlicePtrRes,
7    bitalloc::IBitAlloc,
8    chunk::{Chunk, Unit},
9    gen_index::GenIndex,
10    iter::{EntityAllocConsumeIter, EntityAllocEditIter, EntityAllocReadIter},
11    policy::{AllocPolicy256, IAllocPolicy},
12};
13use std::{
14    cell::{BorrowMutError, Cell, RefCell},
15    ptr::NonNull,
16};
17
18/// Possible errors when accessing allocated entities.
19///
20/// 分配实体访问时可能出现的错误.
21#[derive(Debug)]
22pub enum EntityAccessErr {
23    /// The entity has been freed and cannot be accessed.
24    ///
25    /// 实体已被释放,无法访问。
26    UseAfterFree,
27
28    /// The pointer is null and does not reference any entity.
29    ///
30    /// 指针为空,未引用任何实体。
31    NullReference,
32}
33impl std::fmt::Display for EntityAccessErr {
34    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
35        let msg = match self {
36            EntityAccessErr::UseAfterFree => "Entity has been freed (use-after-free).",
37            EntityAccessErr::NullReference => "Pointer is null (no entity referenced).",
38        };
39        f.write_str(msg)
40    }
41}
42impl std::error::Error for EntityAccessErr {}
43
44/// Result type for entity access operations.
45///
46/// 实体访问操作的结果类型.
47pub type EntityAccessRes<T = ()> = Result<T, EntityAccessErr>;
48
49/// Entity slab allocator for entities of type `E` using allocation policy `P`.
50///
51/// This allocator maintains a growable vector of fixed-size `Chunk<E, P>` slabs.
52/// Each `Chunk` stores up to `P::CHUNK_SIZE` units and keeps per-unit generation
53/// indices plus a bitset of allocated slots. `EntityAlloc` provides fast
54/// allocation/deallocation inside chunks, keeps a free-chain of chunks that have
55/// available slots (`free_head`), and tracks the total number of allocated
56/// entities (`num_allocated`).
57///
58/// Primary responsibilities and guarantees:
59/// - Allocate and deallocate entities with O(1) operations inside a chunk;
60///   the allocator grows by creating new `Chunk`s when needed.
61/// - Maintain a `free_head` pointing to a chunk with available slots to avoid
62///   scanning all chunks on each allocation.
63/// - Expose helper methods that return either raw unit pointers or generation-
64///   aware indexes (`GenIndex`). Callers must respect generation checks to
65///   avoid use-after-free; the allocator provides checks in `unit_ptr_of_indexed`
66///   and related helpers.
67///
68/// Concurrency and safety notes:
69/// - `EntityAlloc` uses interior mutability (`RefCell` and `Cell`) and is not
70///   `Sync`; external synchronization is required for concurrent access.
71/// - Several `pub(crate)` functions operate on raw pointers and rely on
72///   invariants (generation matching, non-null indexed values). Use the public
73///   crate APIs where possible; internal functions are optimized for
74///   performance and assume correct usage.
75///
76/// Field overview:
77/// - `chunks`: `RefCell<Vec<Chunk<E, P>>>` — growable storage of slab chunks.
78/// - `free_head`: `Cell<u32>` — index of the first chunk with free slots (or a
79///   sentinel when none available).
80/// - `num_allocated`: `Cell<usize>` — current count of allocated entities.
81///
82/// Usage tips:
83/// - Use `remake_free_chain` to rebuild the free-chain after bulk mutations.
84/// - Prefer `free_if` / `fully_free_if` for conditional freeing, they maintain
85///   allocator invariants and update `num_allocated` correctly.
86/// - Be careful when converting between raw unit pointers and `GenIndex` —
87///   always handle `EntityAccessErr` and `SlicePtrErr` reported by the helpers.
88///
89/// 实体分配器详述(中文):
90/// - 管理一组可增长的 `Chunk<E, P>`,每个 `Chunk` 包含固定大小的单元、位图和
91///   generation 信息,用于检测 use-after-free。
92/// - 在 chunk 内部的分配与释放为 O(1);当现有 chunk 全满时通过新增 chunk 增长容量。
93/// - 维护 `free_head` 指向含有空闲单元的 chunk,以加速分配。
94/// - `num_allocated` 跟踪当前分配数量;`remake_free_chain` 可在批量修改后重建空闲链表。
95/// - 结构体使用 `RefCell`/`Cell` 实现内部可变性,默认不安全以跨线程共享,若需要
96///   并发访问请自行加锁或在上层保证互斥。
97///
98/// 示例:
99/// ```ignore
100/// // 参见 crate 文档中的示例,通常通过 crate 提供的安全 API 分配/访问/释放实体。
101/// ```
102pub struct EntityAlloc<E, P: IAllocPolicy = AllocPolicy256> {
103    pub(crate) chunks: RefCell<Vec<Chunk<E, P>>>,
104    pub(crate) free_head: Cell<u32>,
105    pub(crate) num_allocated: Cell<usize>,
106}
107impl<E, P: IAllocPolicy> Default for EntityAlloc<E, P> {
108    fn default() -> Self {
109        Self {
110            chunks: RefCell::new(Vec::new()),
111            free_head: Cell::new(0),
112            num_allocated: Cell::new(0),
113        }
114    }
115}
116impl<E, P: IAllocPolicy> EntityAlloc<E, P> {
117    /// Create a new `EntityAlloc` with no preallocated capacity.
118    pub fn new() -> Self {
119        Self::default()
120    }
121
122    /// Create a new `EntityAlloc` with preallocated capacity for `cap` entities.
123    pub fn with_capacity(cap: usize) -> Self {
124        let chunks = {
125            let nchunks = cap.next_multiple_of(P::CHUNK_SIZE) / P::CHUNK_SIZE;
126            let mut chunks = Vec::with_capacity(nchunks);
127            for chunk_id in 0..(nchunks as u32) {
128                let chunk = Chunk::new(chunk_id, chunk_id + 1);
129                chunks.push(chunk);
130            }
131            RefCell::new(chunks)
132        };
133        Self {
134            chunks,
135            free_head: Cell::new(0),
136            num_allocated: Cell::new(0),
137        }
138    }
139
140    pub(crate) fn try_allocate_unit(&self, val: E) -> Result<*const Unit<E>, (BorrowMutError, E)> {
141        let mut chunks = match self.chunks.try_borrow_mut() {
142            Ok(c) => c,
143            Err(e) => return Err((e, val)),
144        };
145        let chunk = Self::find_chunk(self.free_head.get(), &mut chunks);
146        let Ok(ptr) = chunk.allocate(val) else {
147            unreachable!("Internal error: Cannot allocate inside a non-full chunk");
148        };
149        self.num_allocated.set(self.num_allocated.get() + 1);
150        if chunk.is_full() {
151            self.free_head.set(chunk.free_next);
152        }
153        Ok(ptr)
154    }
155    fn find_chunk(free: u32, chunks: &mut Vec<Chunk<E, P>>) -> &mut Chunk<E, P> {
156        assert_ne!(free, u32::MAX, "OOM detected");
157        let free = free as usize;
158        while chunks.len() <= free {
159            let id = chunks.len() as u32;
160            chunks.push(Chunk::new(id, id + 1));
161        }
162        &mut chunks[free]
163    }
164    pub(crate) fn next_index(&self) -> GenIndex {
165        let chunks = self.chunks.borrow();
166        let free = self.free_head.get() as usize;
167        let Some(chunk) = chunks.get(free) else {
168            return GenIndex::compose(free << P::CHUNK_SIZE_LOG2, 0);
169        };
170        let unit_id = chunk.next_available().unwrap();
171        chunk.indexed_id_of_unit(unit_id)
172    }
173
174    pub(crate) fn free_gen_index(&mut self, index: GenIndex) -> Option<E> {
175        let (chunk_id, unit_id, gene) = P::tear(index);
176        let chunks = self.chunks.get_mut();
177        let chunk = chunks.get_mut(chunk_id as usize)?;
178        let chunk_full = chunk.is_full();
179        let val = chunk.deallocate_checked(unit_id, gene)?;
180        self.num_allocated.set(self.num_allocated.get() - 1);
181        if chunk_full {
182            chunk.free_next = self.free_head.get();
183            self.free_head.set(chunk_id);
184        }
185        Some(val)
186    }
187    pub(crate) fn free_unit_ptr(&mut self, ptr: *const Unit<E>) -> Option<E> {
188        let indexed = self.index_of_unit_ptr(ptr).ok()?;
189        self.free_gen_index(indexed)
190    }
191    pub(crate) fn check_unit_validity(&self, ptr: *const Unit<E>) -> SlicePtrRes<GenIndex> {
192        let chunks = self.chunks.borrow();
193        for chunk in chunks.iter() {
194            let unit_id = match chunk.unit_of_ptr(ptr) {
195                Ok(id) => id,
196                Err(SlicePtrErr::OutOfRange) => continue,
197                Err(SlicePtrErr::NotAlignedWith) => return Err(SlicePtrErr::NotAlignedWith),
198            };
199            let unit = chunk.unit(unit_id);
200            return if unit.indexed.is_null() {
201                Err(SlicePtrErr::OutOfRange)
202            } else {
203                Ok(unit.indexed)
204            };
205        }
206        Err(SlicePtrErr::OutOfRange)
207    }
208    pub(crate) fn index_of_unit_ptr(&self, ptr: *const Unit<E>) -> SlicePtrRes<GenIndex> {
209        if cfg!(debug_assertions) {
210            self.check_unit_validity(ptr)
211        } else {
212            let indexed = unsafe { ptr.as_ref().ok_or(SlicePtrErr::OutOfRange)?.indexed };
213            if indexed.is_null() {
214                Err(SlicePtrErr::OutOfRange)
215            } else {
216                Ok(indexed)
217            }
218        }
219    }
220    pub(crate) fn unit_ptr_of_indexed(
221        &self,
222        indexed: GenIndex,
223    ) -> EntityAccessRes<NonNull<Unit<E>>> {
224        if indexed.is_null() {
225            return Err(EntityAccessErr::NullReference);
226        }
227        let (chunk_id, unit_id, _) = P::tear(indexed);
228        let chunks = self.chunks.borrow();
229        let Some(chunk) = chunks.get(chunk_id as usize) else {
230            return Err(EntityAccessErr::UseAfterFree);
231        };
232        let unit = chunk.unit(unit_id);
233        if unit.indexed != indexed {
234            return Err(EntityAccessErr::UseAfterFree);
235        }
236        Ok(NonNull::new(unit as *const _ as *mut Unit<E>).unwrap())
237    }
238
239    /// Rebuild the free-chain of chunks with available slots.
240    pub fn remake_free_chain(&mut self) {
241        let chunks = self.chunks.get_mut();
242        let free_chunks = {
243            let mut free_chunks = Vec::with_capacity(chunks.len());
244            for chunk in chunks.iter_mut() {
245                if !chunk.is_full() {
246                    free_chunks.push(chunk.chunk_id);
247                }
248            }
249            free_chunks
250        };
251        for i in 1..free_chunks.len() {
252            let chunk_id = free_chunks[i - 1];
253            let next_chunk_id = free_chunks[i];
254            let chunk = &mut chunks[chunk_id as usize];
255            chunk.free_next = next_chunk_id;
256        }
257        if let Some(&chunk_id) = free_chunks.first() {
258            self.free_head.set(chunk_id);
259        } else {
260            self.free_head.set(chunks.len() as u32);
261        }
262    }
263}
264
265impl<E, P: IAllocPolicy> EntityAlloc<E, P> {
266    /// Free entities that satisfy the given predicate.
267    pub fn free_if(&mut self, pred: impl FnMut(&E, PtrID<E, P>, IndexedID<E, P>) -> bool) {
268        self.fully_free_if(pred, drop);
269    }
270
271    /// Free entities that satisfy the given predicate, consuming them with the provided closure.
272    ///
273    /// This method automatically chooses between dense and sparse freeing strategies
274    /// based on the chunk's allocation density for efficiency.
275    pub fn fully_free_if(
276        &mut self,
277        mut should_free: impl FnMut(&E, PtrID<E, P>, IndexedID<E, P>) -> bool,
278        mut consume: impl FnMut(E),
279    ) {
280        let chunks = self.chunks.get_mut();
281        let mut num_last = self.num_allocated.get();
282        for chunk in chunks.iter_mut() {
283            if chunk.is_empty() {
284                continue;
285            }
286            if num_last == 0 {
287                break;
288            }
289            let chunk_full_previously = chunk.is_full();
290            let chunk_num_allocated = chunk.num_allocated as usize;
291            let num_freed = if chunk_num_allocated * 3 >= P::CHUNK_SIZE {
292                Self::densely_free_chunk(&mut should_free, &mut consume, chunk)
293            } else {
294                Self::sparsely_free_chunk(&mut should_free, &mut consume, chunk)
295            };
296
297            num_last -= chunk_num_allocated;
298            // 维护全局已分配计数
299            if num_freed > 0 {
300                self.num_allocated
301                    .set(self.num_allocated.get().saturating_sub(num_freed));
302            }
303            // 如果之前是满的,现在有空位了,加入空闲链表
304            if chunk_full_previously && !chunk.is_full() {
305                chunk.free_next = self.free_head.get();
306                self.free_head.set(chunk.chunk_id);
307            }
308        }
309    }
310
311    fn densely_free_chunk(
312        should_free: &mut impl FnMut(&E, PtrID<E, P>, IndexedID<E, P>) -> bool,
313        consume: &mut impl FnMut(E),
314        chunk: &mut Chunk<E, P>,
315    ) -> usize {
316        let mut num_freed = 0;
317        let mut count_down = chunk.num_allocated;
318        for unit_id in 0..(P::CHUNK_SIZE as u16) {
319            if count_down == 0 {
320                break;
321            }
322            let index = chunk.indexed_id_of_unit(unit_id);
323            if !chunk.allocated.is_allocated(unit_id) {
324                continue;
325            }
326            count_down -= 1;
327
328            let unit = chunk.unit(unit_id);
329            let ptr = PtrID::from(unit);
330            let elem = unit.as_init_ref().unwrap();
331            if !should_free(elem, ptr, IndexedID::from(index)) {
332                continue;
333            }
334            let Some(e) = chunk.deallocate_unchecked(unit_id) else {
335                unreachable!("Deallocation failed unexpectedly");
336            };
337            consume(e);
338            num_freed += 1;
339        }
340        num_freed
341    }
342    fn sparsely_free_chunk(
343        should_free: &mut impl FnMut(&E, PtrID<E, P>, IndexedID<E, P>) -> bool,
344        consume: &mut impl FnMut(E),
345        chunk: &mut Chunk<E, P>,
346    ) -> usize {
347        // 逐个枚举当前已分配位,避免全表扫描
348        let mut num_freed = 0usize;
349        let mut next = chunk.allocated.next_allocated(0);
350        while let Some(unit_id) = next {
351            let index = chunk.indexed_id_of_unit(unit_id);
352            let unit = chunk.unit(unit_id);
353            let ptr = PtrID::from(unit);
354            let elem = unit.as_init_ref().unwrap();
355            if should_free(elem, ptr, IndexedID::from(index)) {
356                if let Some(e) = chunk.deallocate_unchecked(unit_id) {
357                    consume(e);
358                    num_freed += 1;
359                } else {
360                    unreachable!("Deallocation failed unexpectedly");
361                }
362                // 删除了当前位后,从 unit_id+1 继续
363                let next_start = unit_id.saturating_add(1);
364                next = chunk.allocated.next_allocated(next_start);
365            } else {
366                // 未删除则从下一个位置开始
367                let next_start = unit_id.saturating_add(1);
368                next = chunk.allocated.next_allocated(next_start);
369            }
370        }
371        num_freed
372    }
373
374    /// Get the number of currently allocated entities.
375    ///
376    /// Note that this is not the length of the internal chunk vector or the
377    /// "allocated" sections in chunks, but the actual count of live entities.
378    #[inline]
379    pub fn len(&self) -> usize {
380        self.num_allocated.get()
381    }
382
383    /// Get the current capacity in terms of number of entities.
384    #[inline]
385    pub fn capacity(&self) -> usize {
386        self.chunks.borrow().len() * P::CHUNK_SIZE
387    }
388
389    /// Check if there are no allocated entities.
390    #[inline]
391    pub fn is_empty(&self) -> bool {
392        self.len() == 0
393    }
394
395    /// Clear all allocated entities, resetting the allocator to an empty state.
396    pub fn clear(&mut self) {
397        self.chunks.get_mut().clear();
398        self.free_head.set(0);
399        self.num_allocated.set(0);
400    }
401
402    /// Get an iterator over allocated entities for read-only access.
403    ///
404    /// The iterator yields tuples of `(IndexedID, PtrID, &E)`.
405    #[inline]
406    pub fn iter(&self) -> EntityAllocReadIter<'_, E, P> {
407        EntityAllocReadIter::new(self)
408    }
409    /// Get an iterator over allocated entities for mutable access.
410    ///
411    /// The iterator yields tuples of `(IndexedID, PtrID, &mut E)`.
412    #[inline]
413    pub fn iter_mut(&mut self) -> EntityAllocEditIter<'_, E, P> {
414        EntityAllocEditIter::new(self)
415    }
416}
417
418impl<'alloc, E, P: IAllocPolicy> IntoIterator for &'alloc EntityAlloc<E, P> {
419    type Item = (IndexedID<E, P>, PtrID<E, P>, &'alloc E);
420    type IntoIter = EntityAllocReadIter<'alloc, E, P>;
421
422    fn into_iter(self) -> Self::IntoIter {
423        self.iter()
424    }
425}
426impl<'alloc, E, P: IAllocPolicy> IntoIterator for &'alloc mut EntityAlloc<E, P> {
427    type Item = (IndexedID<E, P>, PtrID<E, P>, &'alloc mut E);
428    type IntoIter = EntityAllocEditIter<'alloc, E, P>;
429
430    fn into_iter(self) -> Self::IntoIter {
431        self.iter_mut()
432    }
433}
434impl<E, P: IAllocPolicy> IntoIterator for EntityAlloc<E, P> {
435    type Item = (GenIndex, E);
436    type IntoIter = EntityAllocConsumeIter<E, P>;
437
438    fn into_iter(self) -> Self::IntoIter {
439        EntityAllocConsumeIter::new(self)
440    }
441}