rustc_arena_modified/typed_arena.rs
1use std::cell::{Cell, RefCell};
2use std::cmp::max;
3use std::fmt::{Debug, Formatter};
4use std::marker::PhantomData;
5use std::mem::{forget, size_of, transmute};
6use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe};
7use std::ptr::{drop_in_place, null_mut, write, NonNull};
8use std::slice::{from_raw_parts, from_raw_parts_mut};
9
10use smallvec::SmallVec;
11
12use crate::arena_chunk::ArenaChunk;
13use crate::{PtrUnstables, HUGE_PAGE, PAGE};
14
15#[cfg(test)]
16mod tests;
17
18/// An arena that can hold objects of only one type, allocations return shared references, and can
19/// iterate behind a shared reference
20pub type TypedArena<T> = TypedArenaGen<T, Shared>;
21
22/// An arena that can hold objects of only one type, allocations return mutable references, and can
23/// only iterate behind a mutable reference
24pub type TypedArenaMut<T> = TypedArenaGen<T, Mutable>;
25
26/// An arena that can hold objects of only one type. `RM` determines whether the references
27/// returned from allocation are mutable and whether iteration requires a mutable reference; either
28/// mutating returned references or iterating via immutable reference are possible, but not both.
29pub struct TypedArenaGen<T, RM: RefMutability> {
30 /// The number of inserted entries
31 len: Cell<usize>,
32 /// A pointer to the next object to be allocated.
33 ptr: Cell<*mut T>,
34 /// A pointer to the end of the allocated area. When this pointer is
35 /// reached, a new chunk is allocated.
36 end: Cell<*mut T>,
37 /// A vector of arena chunks.
38 chunks: RefCell<Vec<ArenaChunk<T>>>,
39 /// The # of chunks actually used by the arena. The rest were allocated but are now empty,
40 /// and we will try to re-use them before allocating a new chunk.
41 used_chunks: Cell<usize>,
42 /// Marker indicating that dropping the arena causes its owned
43 /// instances of `T` to be dropped.
44 _own: PhantomData<T>,
45 /// RM is a phantom type parameter so we need this
46 _rm: PhantomData<RM>,
47}
48
49/// Iterates all elements in an arena, and can handle new elements being allocated during iteration.
50pub type Iter<'a, T> = IterGen<'a, T, Shared>;
51
52/// Iterates all elements in an arena.
53pub type IterMut<'a, T> = IterGen<'a, T, Mutable>;
54
55/// Iterates all elements in an arena, and if [Shared], can handle new elements being allocated
56/// during iteration.
57pub struct IterGen<'a, T, RM: RefMutability>(PtrIter<'a, T>, PhantomData<RM>);
58
59/// Iterates pointers to all elements in the arena, and (if shared) can handle new elements being
60/// allocated during iteration.
61pub struct PtrIter<'a, T> {
62 /// The arena being iterated. The actual `RM` is irrelevant, we put [Shared] because we only use
63 /// it like that and there may be other active references, but it may be `transmute`d.
64 arena: &'a TypedArena<T>,
65 /// Index of the current chunk being iterated
66 chunk_index: usize,
67 /// Pointer to the next entry in the current chunk being iterated
68 chunk_data: NonNull<T>,
69 /// Entries remaining in the current chunk being iterated, **except** like [ArenaChunk], if we
70 /// are iterating the last chunk, this will be 0 (unset) even though we have more entries
71 chunk_remaining_entries: usize,
72 /// Index in the arena of the current element being iterated
73 element_index: usize,
74}
75
76/// An iterable which can be allocated faster into the arena than the default [IntoIterator]
77/// implementation, using [TypedArenaGen::alloc_raw_slice].
78///
79/// `rustc_arena` uses default implementations, but those are unstable, so instead you will need to
80/// call [TypedArenaGen::alloc_from_iter_fast] manually. Unlike `rustc_arena` you can implement this on
81/// your own collections, although they will probably just delegate to one of builtin
82/// implementations; and often, simply using [TypedArenaGen::alloc_from_iter_reg] will as fast or fast
83/// enough, and you won't need a custom implementation at all.
84pub trait IterWithFastAlloc<T> {
85 fn alloc_into<RM: RefMutability>(self, arena: &TypedArenaGen<T, RM>) -> RM::SliceRef<'_, T>;
86}
87
88/// Whether references to allocated objects are mutable, and iteration requires a mutable reference.
89pub trait RefMutability: private::Sealed + 'static {
90 /// `&T` or `&mut T`
91 type Ref<'a, T>
92 where
93 T: 'a;
94 /// `&[T]` or `&mut T`
95 type SliceRef<'a, T>
96 where
97 T: 'a;
98
99 /// Reference to the empty slice
100 fn empty<'a, T>() -> Self::SliceRef<'a, T>;
101 /// Dereference a pointer
102 ///
103 /// # Safety
104 /// The pointer must be valid: see [pointer::as_ref] and [pointer::as_mut]'s requirements (the
105 /// latter's are required if `Self` is [Mutable])
106 ///
107 /// [pointer::as_ref]: https://doc.rust-lang.org/std/primitive.pointer.html#method.as_ref
108 /// [pointer::as_mut]: https://doc.rust-lang.org/std/primitive.pointer.html#method.as_mut
109 unsafe fn from_ptr<'a, T>(t: *mut T) -> Self::Ref<'a, T>;
110 /// Dereference a pointer and add length metadata to make it a slice reference
111 ///
112 /// # Safety
113 /// The pointer must be valid: see [pointer::as_ref] and [pointer::as_mut]'s requirements (the
114 /// latter's are required if `Self` is [Mutable]). Additionally, you must follow the
115 /// requirements of [from_raw_parts] or [from_raw_parts_mut]
116 ///
117 /// [pointer::as_ref]: https://doc.rust-lang.org/std/primitive.pointer.html#method.as_ref
118 /// [pointer::as_mut]: https://doc.rust-lang.org/std/primitive.pointer.html#method.as_mut
119 unsafe fn from_raw_parts<'a, T>(t: *mut T, len: usize) -> Self::SliceRef<'a, T>;
120 /// Convert a reference of this type into a shared reference
121 #[allow(clippy::wrong_self_convention)]
122 fn as_ref<T>(t: Self::Ref<'_, T>) -> &T;
123 /// Convert a reference of this slice type into a shared reference
124 #[allow(clippy::wrong_self_convention)]
125 fn as_slice_ref<T>(t: Self::SliceRef<'_, T>) -> &[T];
126
127 /// Dereference a non-null pointer
128 ///
129 /// # Safety
130 /// The pointer must be valid: see [NonNull::as_ref] and [NonNull::as_mut]'s requirements (the
131 /// latter's are required if `Self` is [Mutable])
132 #[inline]
133 unsafe fn from_non_null<'a, T>(t: NonNull<T>) -> Self::Ref<'a, T> {
134 Self::from_ptr(t.as_ptr())
135 }
136}
137
138pub enum Shared {}
139pub enum Mutable {}
140
141impl<T, RM: RefMutability> Default for TypedArenaGen<T, RM> {
142 /// Creates a new, empty arena
143 #[inline]
144 fn default() -> Self {
145 Self::new()
146 }
147}
148
149impl<T, RM: RefMutability> TypedArenaGen<T, RM> {
150 /// Creates a new, empty arena
151 #[inline]
152 pub fn new() -> Self {
153 Self {
154 len: Cell::new(0),
155 // We set both `ptr` and `end` to 0 so that the first call to
156 // alloc() will trigger a grow().
157 ptr: Cell::new(null_mut()),
158 end: Cell::new(null_mut()),
159 chunks: Default::default(),
160 used_chunks: Cell::new(0),
161 _own: PhantomData,
162 _rm: PhantomData,
163 }
164 }
165
166 /// Allocates an object in the `TypedArena`, returning a reference to it.
167 ///
168 /// If the type parameter `RM` is [Shared] we return a shared reference. If `RM` is [Mutable] we
169 /// return a mutable reference.
170 #[inline]
171 pub fn alloc(&self, object: T) -> RM::Ref<'_, T> {
172 self.len.set(self.len.get() + 1);
173 if size_of::<T>() == 0 {
174 // We don't actually allocate ZSTs, just prevent them from being dropped and return a
175 // reference to random data (this is a valid ZST reference).
176 unsafe {
177 let ptr = NonNull::<T>::dangling().as_ptr();
178 // This `write` is equivalent to `forget`
179 write(ptr, object);
180 return RM::from_ptr(ptr);
181 }
182 }
183
184 if self.ptr == self.end {
185 self.grow(1)
186 }
187
188 unsafe {
189 let ptr = self.ptr.get();
190 // Advance the pointer.
191 self.ptr.set(self.ptr.get().add(1));
192 // Write into uninitialized memory.
193 write(ptr, object);
194 RM::from_ptr(ptr)
195 }
196 }
197
198 /// Allocates multiple objects in a contiguous slice, returning a reference to the slice.
199 ///
200 /// If the type parameter `RM` is [Shared] we return a shared reference. If `RM` is [Mutable] we
201 /// return a mutable reference.
202 ///
203 /// This collects into a `SmallVec` and then allocates by copying from it. Use `alloc_from_iter`
204 /// if possible because it's more efficient, copying directly without the intermediate
205 /// collecting step. This default could be made more efficient, like
206 /// [crate::DroplessArena::alloc_from_iter], but it's not hot enough to bother.
207 #[inline]
208 pub fn alloc_from_iter_reg(&self, iter: impl IntoIterator<Item = T>) -> RM::SliceRef<'_, T> {
209 self.alloc_from_iter_fast(iter.into_iter().collect::<SmallVec<[_; 8]>>())
210 }
211
212 /// Allocates multiple objects in a contiguous slice, returning a reference to the slice.
213 ///
214 /// If the type parameter `RM` is [Shared] we return a shared reference. If `RM` is [Mutable] we
215 /// return a mutable reference.
216 ///
217 /// This is equivalent semantics to [Self::alloc_from_iter_reg] except it's faster, whereas
218 /// [Self::alloc_from_iter_reg] permits more types.
219 #[inline]
220 pub fn alloc_from_iter_fast(&self, iter: impl IterWithFastAlloc<T>) -> RM::SliceRef<'_, T> {
221 iter.alloc_into(self)
222 }
223
224 /// Number of allocated elements in the arena.
225 #[inline]
226 pub fn len(&self) -> usize {
227 self.len.get()
228 }
229
230 /// Does the arena have any allocated elements?
231 #[inline]
232 pub fn is_empty(&self) -> bool {
233 self.len() == 0
234 }
235
236 /// Iterates all allocated elements in the arena behind a mutable reference.
237 #[inline]
238 pub fn iter_mut(&mut self) -> IterMut<'_, T> {
239 // SAFETY: Same repr, this transmuted version won't be publicly exposed or used incorrectly
240 // (we can't use while [IterMut] is alive), and we're not actually violating any borrow
241 // rules with a mutable phantom type like we would if this was a mutable reference.
242 IterMut::new(unsafe { transmute(self) })
243 }
244
245 /// Iterates pointers to all allocated elements in the arena behind a shared reference. This is
246 /// allows since we can have pointers even to mutably borrowed elements.
247 #[inline]
248 pub fn ptr_iter(&self) -> PtrIter<'_, T> {
249 PtrIter::new(self)
250 }
251
252 /// Clears the arena, dropping all elements, but doesn't free up its memory.
253 ///
254 /// This means we can insert new elements without having to reallocate, until we reach the old
255 /// capacity or allocate a slice too large to fit in an existing region.
256 #[inline]
257 pub fn clear(&mut self) {
258 // Ensure that, even on panic, we resize len (we leak elements we didn't drop yet instead of
259 // double-freeing elements we did)
260 let panic_result = catch_unwind(AssertUnwindSafe(|| {
261 for elem in self.ptr_iter() {
262 // SAFETY: we're shrinking the arena, so we A) won't drop later if we drop the arena
263 // before growing it again, and B) if we do grow it again, we'll overwrite this data
264 // before setting it to "initialized" (we might also grow past this data but it will
265 // still be uninitialized and therefore not dropped).
266 //
267 // Also, elem.as_ptr() is alive, and we have the only reference since we have a mutable
268 // reference to the entire arena.
269 unsafe {
270 drop_in_place(elem.as_ptr());
271 }
272 }
273 }));
274
275 // This code will run even if we panic
276 // Update len, num used chunks, used chunk entries, ptr, and end
277 self.len.set(0);
278 if size_of::<T>() != 0 {
279 for chunk in self
280 .chunks
281 .borrow_mut()
282 .iter_mut()
283 .take(self.used_chunks.get())
284 {
285 chunk.entries = 0;
286 }
287 self.used_chunks.set(0);
288 // ptr and end can be null and we'll still reuse instead of allocating new chunks
289 self.ptr.set(null_mut());
290 self.end.set(null_mut());
291 }
292
293 // Still unwind if we panicked
294 if let Err(caught_panic) = panic_result {
295 resume_unwind(caught_panic)
296 }
297 }
298
299 /// Removes some elements from this arena, and coalesces the rest so that we don't have gaps.
300 ///
301 /// Pointers to regions in the memory may be invalidated as elements get rearranged. This
302 /// function is behind a mutable reference, which ensures that there are no references to
303 /// rearranged elements, but if there are any raw pointers they can no longer be dereferenced
304 /// without UB.
305 #[inline]
306 pub fn retain(&mut self, mut predicate: impl FnMut(&mut T) -> bool) {
307 // Ensure that, even on panic, we resize len (we leak elements we didn't drop yet instead of
308 // double-freeing elements we did). Furthermore, kept elements are still in the arena,
309 // although this doesn't really matter and is subject to change between versions.
310 let mut num_kept = 0;
311 let panic_result = catch_unwind(AssertUnwindSafe(|| {
312 let mut write_iter = self.ptr_iter();
313 let mut is_write_iter_at_read_iter = true;
314 for mut elem in self.ptr_iter() {
315 let elem_ptr = elem.as_ptr();
316 // SAFETY: elem is alive (Self::iter and Self::ptr_iter only iterate initialized data)
317 // and we have a mutable reference to the arena, so there are no other references to
318 // elem. Therefore, we can dereference and drop elem_ptr.
319 //
320 // write_ptr is allocated (inside this struct) and aligned (came from Self::ptr_iter).
321 // It has previously pointed to a live object since it has been elem_ptr, but we may
322 // have dropped that elem_ptr so it's no longer alive. However, we can still write to
323 // it.
324 //
325 // Lastly, we can read from elem_ptr when we write to write_ptr (effectively copying the
326 // value) because we will either overwrite the value when write_ptr advances to it, or
327 // (if elem_ptr advances to the end first) we will shrink the arena to be before it, so
328 // that it is effectively forgotten; and then it will either be re-allocated if we grow
329 // the arena again, or released without drop if we drop the arena.
330 unsafe {
331 if !predicate(elem.as_mut()) {
332 // Drop the element, keep write_iter at the same position
333 is_write_iter_at_read_iter = false;
334 drop_in_place(elem_ptr);
335 } else {
336 // Keep the element, but move it to write_iter if unaligned. Advance write_iter
337 num_kept += 1;
338
339 // If write_chunk can hold more elements (length < capacity), we should
340 // desync write_iter from read_iter and do so (length = capacity)
341 let mut next_is_write_iter_at_read_iter = is_write_iter_at_read_iter;
342 if write_iter.chunk_remaining_entries == 1 {
343 let mut chunks = self.chunks.borrow_mut();
344 let write_chunk = chunks
345 .get_mut(write_iter.chunk_index)
346 .expect("write_iter chunk index out of bounds");
347 let difference = write_chunk.capacity - write_chunk.entries;
348 if difference > 0 {
349 // If write_iter and read_iter are still synced, they'll be synced
350 // for this element and then desynced after.
351 //
352 // Also, we can't just put write_iter.next() before this block
353 // because we need to catch chunk_remaining_entries == 1 first
354 next_is_write_iter_at_read_iter = false;
355 // This may not be the last chunk, so we need to update the count.
356 // We also need to update write_iter's count so that it won't reach
357 // the chunk end until it reaches write_chunk's capacity.
358 write_chunk.entries += difference;
359 write_iter.chunk_remaining_entries += difference;
360 // Even if elem_iter (the implicit iterator returning elem) is
361 // synced, we still want it to move on, not read the chunk's
362 // remaining memory because it;s uninitialized
363 }
364 }
365
366 let write_ptr = write_iter.next()
367 .expect("read_iter not done but write_iter is, write_iter should always be behind")
368 .as_ptr();
369 if size_of::<T>() != 0 && !is_write_iter_at_read_iter {
370 write_ptr.write(elem_ptr.read());
371 }
372
373 is_write_iter_at_read_iter = next_is_write_iter_at_read_iter;
374 }
375 }
376 }
377 }));
378
379 // This code will run even if we panic
380 // Update len, num used chunks, used chunk entries, ptr, and end
381 let old_len = self.len.get();
382 self.len.set(num_kept);
383 if size_of::<T>() != 0 {
384 let mut chunks = self.chunks.borrow_mut();
385 let mut num_entries = 0;
386 let used_chunks = chunks
387 .iter()
388 .take_while(|chunk| {
389 if num_entries < num_kept {
390 num_entries += chunk.entries;
391 true
392 } else {
393 false
394 }
395 })
396 .count();
397 if num_entries < num_kept {
398 debug_assert_eq!(used_chunks, self.used_chunks.get());
399 num_entries = old_len;
400 } else {
401 self.used_chunks.set(used_chunks);
402 }
403 if used_chunks == 0 {
404 // These assertions are pretty obvious
405 debug_assert_eq!((num_entries, num_kept), (0, 0));
406 self.ptr.set(null_mut());
407 self.end.set(null_mut());
408 } else {
409 let num_in_last = num_entries - num_kept;
410 let last_chunk = &mut chunks[used_chunks - 1];
411 // This is the last chunk, so unset (0) its entries, even though there actually are some
412 last_chunk.entries = 0;
413 // Set ptr and end to this chunk, and make sure ptr is offset past the existing entries
414 self.ptr.set(unsafe { last_chunk.storage.add(num_in_last) });
415 self.end.set(last_chunk.end());
416 }
417 }
418
419 // Still unwind if we panicked
420 if let Err(caught_panic) = panic_result {
421 resume_unwind(caught_panic)
422 }
423 }
424
425 /// Return `self` but with a different [RefMutability].
426 ///
427 /// With a mutable reference, we can convert between mutable and immutable variants, since there
428 /// are no live allocated references.
429 #[inline]
430 pub fn convert<RM2: RefMutability>(self) -> TypedArenaGen<T, RM2> {
431 // SAFETY: Same repr
432 unsafe { transmute(self) }
433 }
434
435 /// Destroys this arena and collects all elements into a vector.
436 #[inline]
437 pub fn into_vec(self) -> Vec<T> {
438 let mut elements = Vec::with_capacity(self.len());
439 if size_of::<T>() == 0 {
440 // Create `len` ZSTs which will be dropped when the vector is.
441 // Remember: a random non-null pointer is a valid reference to a ZST, and dereferencing
442 // is probably a no-op
443 elements.extend(
444 (0..self.len()).map(|_| unsafe { NonNull::<T>::dangling().as_ptr().read() }),
445 );
446 return elements;
447 }
448
449 let mut remaining = self.len();
450 let mut chunks_borrow = self.chunks.borrow_mut();
451 let mut prev_chunk = None;
452 for chunk in chunks_borrow.iter_mut().take(self.used_chunks.get()) {
453 if let Some(prev_chunk) = prev_chunk.replace(chunk) {
454 // SAFETY: This chunk has all entries filled because we've moved on to the next one
455 // (and we resize the chunk's entries when we move on, even though it has more capacity).
456 let mut prev_entries = unsafe { prev_chunk.destroy_and_return(prev_chunk.entries) };
457 elements.append(&mut prev_entries);
458 remaining -= prev_chunk.entries;
459 }
460 }
461 if let Some(last_chunk) = prev_chunk {
462 // SAFETY: This chunk only has `remaining` entries filled
463 let mut last_entries = unsafe { last_chunk.destroy_and_return(remaining) };
464 elements.append(&mut last_entries);
465 }
466 // Ensure we don't destroy these chunks' contents in `Drop`, only free their memory
467 self.used_chunks.set(0);
468 elements
469 }
470
471 /// Checks if `additional` elements can be inserted into the arena without creating a new chunk
472 #[inline]
473 fn can_allocate(&self, additional: usize) -> bool {
474 debug_assert_ne!(size_of::<T>(), 0);
475 // FIXME: this should *likely* use `offset_from`, but more
476 // investigation is needed (including running tests in miri).
477 let available_bytes = self.end.get().addr_() - self.ptr.get().addr_();
478 let additional_bytes = additional.checked_mul(size_of::<T>()).unwrap();
479 available_bytes >= additional_bytes
480 }
481
482 /// Ensures there's enough space in the current chunk to fit `len` objects. If not, it will
483 /// create a new chunk.
484 #[inline]
485 fn ensure_capacity(&self, additional: usize) {
486 if !self.can_allocate(additional) {
487 self.grow(additional);
488 debug_assert!(self.can_allocate(additional));
489 }
490 }
491
492 /// Allocate a contiguous slice of data and return a pointer to the start of the slice. The
493 /// slice is uninitialized (why we return a pointer), and you must initialize it before calling
494 /// other arena methods or dropping the arena, or you will cause UB.
495 ///
496 /// # Safety
497 ///
498 /// You must initialize the slice before calling other arena methods or dropping the arena. If
499 /// iterating, you must initialize the slice before advancing the iterator.
500 #[inline]
501 pub unsafe fn alloc_raw_slice(&self, len: usize) -> *mut T {
502 assert_ne!(len, 0);
503
504 self.len.set(self.len.get() + len);
505
506 if size_of::<T>() == 0 {
507 // ZSTs have no memory, so we won't allocate.
508 // Remember: a random non-null pointer is a valid reference to a ZST
509 return NonNull::<T>::dangling().as_ptr();
510 }
511 self.ensure_capacity(len);
512
513 let start_ptr = self.ptr.get();
514 self.ptr.set(start_ptr.add(len));
515 start_ptr
516 }
517
518 /// Grows the arena = creates a new chunk which will hold at least `additional` elements,
519 /// or reuses a chunk if we have extras.
520 #[inline(never)]
521 #[cold]
522 fn grow(&self, additional: usize) {
523 debug_assert_ne!(size_of::<T>(), 0);
524 let used_chunks = self.used_chunks.get();
525 let mut chunks = self.chunks.borrow_mut();
526 let mut reused_a_chunk = false;
527 for potential_reuse_idx in used_chunks..chunks.len() {
528 let potential_reuse_chunk = &mut chunks[potential_reuse_idx];
529 if potential_reuse_chunk.capacity >= additional {
530 // We found a chunk that can hold the additional elements, so we'll use it.
531 // Make sure to update the # entries; since this is the last chunk, we unset (0) it
532 // even though there are actually additional (see `ArenaChunk.entries` doc)
533 potential_reuse_chunk.entries = 0;
534 // Set ptr and end to the reused chunk
535 self.ptr.set(potential_reuse_chunk.storage);
536 self.end.set(potential_reuse_chunk.end());
537 if used_chunks != potential_reuse_idx {
538 // We have to ensure the reused chunk is the next one
539 chunks.swap(used_chunks, potential_reuse_idx);
540 }
541 reused_a_chunk = true;
542 break;
543 }
544 }
545
546 if !reused_a_chunk {
547 // Actually grow = insert a chunk at used_chunks with the required capacity
548 unsafe {
549 // We need the element size to convert chunk sizes (ranging from
550 // PAGE to HUGE_PAGE bytes) to element counts.
551 let elem_size = max(1, size_of::<T>());
552 let mut new_cap;
553 if let Some(last_chunk) = used_chunks.checked_sub(1).map(|i| &mut chunks[i]) {
554 // If a type is `!needs_drop`, we don't need to keep track of how many elements
555 // the chunk stores - the field will be ignored anyway.
556 // FIXME: this should *likely* use `offset_from`, but more
557 // investigation is needed (including running tests in miri).
558 let used_bytes = self.ptr.get().addr_() - last_chunk.storage.addr_();
559 // Set # entries since this is no longer the last chunk
560 last_chunk.entries = used_bytes / size_of::<T>();
561
562 // If the previous chunk's capacity is less than HUGE_PAGE
563 // bytes, then this chunk will be least double the previous
564 // chunk's size.
565 new_cap = last_chunk.capacity.min(HUGE_PAGE / elem_size / 2);
566 new_cap *= 2;
567 } else {
568 new_cap = PAGE / elem_size;
569 }
570 // Also ensure that this chunk can fit `additional`.
571 new_cap = max(additional, new_cap);
572
573 let mut chunk = ArenaChunk::<T>::new(new_cap);
574 // Set ptr and end to the new chunk
575 self.ptr.set(chunk.storage);
576 self.end.set(chunk.end());
577
578 // Add chunk to index used_chunks (used_chunks will be incremented in grow())
579 let last_index = chunks.len();
580 chunks.push(chunk);
581 if used_chunks < last_index {
582 chunks.swap(used_chunks, last_index);
583 }
584 }
585 }
586
587 self.used_chunks.set(used_chunks + 1);
588 }
589
590 /// Drops the contents of the last chunk. The last chunk is partially empty, unlike all other
591 /// chunks.
592 fn clear_last_chunk(&self, last_chunk: &mut ArenaChunk<T>) {
593 debug_assert_ne!(size_of::<T>(), 0);
594 // Determine how much was filled.
595 let start = last_chunk.storage.addr_();
596 // We obtain the value of the pointer to the first uninitialized element.
597 let end = self.ptr.get().addr_();
598 // We then calculate the number of elements to be dropped in the last chunk,
599 // which is the filled area's length.
600 // FIXME: this should *likely* use `offset_from`, but more
601 // investigation is needed (including running tests in miri).
602 let diff = (end - start) / size_of::<T>();
603 // Pass that to the `destroy` method.
604 unsafe {
605 last_chunk.destroy(diff);
606 }
607 // Reset the chunk.
608 self.ptr.set(last_chunk.storage);
609 }
610}
611
612impl<T> TypedArenaGen<T, Shared> {
613 /// Iterates all allocated elements in the arena behind a shared reference.
614 ///
615 /// The iterator can handle new objects being allocated. If you allocate new objects they will
616 /// be added to the end. If the iterator has already ended and you allocate new objects, it will
617 /// suddenly have more elements; if you don't want that behavior use `fuse`.
618 #[inline]
619 pub fn iter(&self) -> Iter<'_, T> {
620 Iter::new(self)
621 }
622}
623
624impl<T: Debug> Debug for TypedArena<T> {
625 fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
626 write!(f, "TypedArena")?;
627 f.debug_list().entries(self.iter()).finish()
628 }
629}
630
631impl<T, RM: RefMutability> Drop for TypedArenaGen<T, RM> {
632 fn drop(&mut self) {
633 if size_of::<T>() == 0 {
634 // These invariants always hold, we only assert them here
635 debug_assert!(self.ptr.get().is_null());
636 debug_assert!(self.end.get().is_null());
637 debug_assert_eq!(self.chunks.borrow().len(), 0);
638 debug_assert_eq!(self.used_chunks.get(), 0);
639
640 // Drop `len` ZSTs.
641 // Remember: a dangling pointer is a valid ZST reference, `drop_in_place` will only run
642 // the ZSTs drop code (which probably shouldn't rely on the address, since it was
643 // allocated into an arena and therefore already in an effectively undefined location,
644 // without any adjacent structures)
645 for _ in 0..self.len() {
646 unsafe {
647 drop_in_place(NonNull::<T>::dangling().as_ptr());
648 }
649 }
650 } else {
651 // `ArenaChunk` drop ensures that the memory is dropped, but we have to drop the contents
652 // here because chunks can't because they don't always know their size
653 unsafe {
654 // Determine how much was filled.
655 let mut chunks_borrow = self.chunks.borrow_mut();
656 // Remove unused chunks (we don't need to destroy because we've already dropped or moved
657 // their contents)
658 for _ in 0..(chunks_borrow.len() - self.used_chunks.get()) {
659 chunks_borrow.pop();
660 }
661 // Drop elements in the used chunks
662 if let Some(mut last_chunk) = chunks_borrow.pop() {
663 // Drop the contents of the last chunk.
664 self.clear_last_chunk(&mut last_chunk);
665 // The last chunk will be dropped. Destroy all other chunks.
666 for chunk in chunks_borrow.iter_mut() {
667 chunk.destroy(chunk.entries);
668 }
669 }
670 }
671 }
672 }
673}
674
675impl<'a, T> IntoIterator for &'a TypedArenaGen<T, Shared> {
676 type Item = &'a T;
677 type IntoIter = Iter<'a, T>;
678
679 #[inline]
680 fn into_iter(self) -> Self::IntoIter {
681 self.iter()
682 }
683}
684
685impl<'a, T, RM: RefMutability> IntoIterator for &'a mut TypedArenaGen<T, RM> {
686 type Item = &'a mut T;
687 type IntoIter = IterMut<'a, T>;
688
689 #[inline]
690 fn into_iter(self) -> Self::IntoIter {
691 self.iter_mut()
692 }
693}
694
695unsafe impl<T: Send, RM: RefMutability> Send for TypedArenaGen<T, RM> {}
696
697impl<'a, T> PtrIter<'a, T> {
698 /// Create a new iterator for the arena
699 #[inline]
700 fn new<RM: RefMutability>(arena: &'a TypedArenaGen<T, RM>) -> Self {
701 // SAFETY: Same repr, this transmuted version won't be publicly exposed or used incorrectly
702 // (see [PtrIter] type doc), and we're not actually violating any borrow rules.
703 let arena = unsafe { transmute::<&'a TypedArenaGen<T, RM>, &'a TypedArena<T>>(arena) };
704 let chunks = arena.chunks.borrow();
705 let chunk = chunks.first();
706 Self {
707 arena,
708 chunk_index: 0,
709 chunk_data: chunk.map_or(NonNull::dangling(), |c| NonNull::new(c.storage).unwrap()),
710 chunk_remaining_entries: chunk.map_or(0, |c| c.entries),
711 element_index: 0,
712 }
713 }
714
715 /// Get the number of remaining elements, assuming there are no new ones
716 #[inline]
717 pub fn remaining(&self) -> usize {
718 self.arena.len() - self.element_index
719 }
720
721 /// Whether we have a next element
722 #[inline]
723 pub fn has_next(&self) -> bool {
724 self.remaining() > 0
725 }
726}
727
728impl<'a, T> Iterator for PtrIter<'a, T> {
729 type Item = NonNull<T>;
730
731 /// Gets a the next element as a pointer
732 fn next(&mut self) -> Option<Self::Item> {
733 if !self.has_next() {
734 return None;
735 }
736
737 let element = self.chunk_data;
738 self.element_index += 1;
739
740 // If this is a ZST we only need to count the # of items to iterate, and `chunk_data` is
741 // already a dangling pointer fron `Self::new` since there are no chunks.
742 if size_of::<T>() != 0 {
743 // If chunk_remaining_entries is 0, we actually still have entries but are on the last
744 // chunk. We'll run out when `has_next` returns false.
745 if self.chunk_remaining_entries == 1 {
746 // We've exhausted the current chunk, so move to the next one
747 self.chunk_index += 1;
748 let chunks = self.arena.chunks.borrow();
749 let chunk = chunks.get(self.chunk_index).expect(
750 "ArenaIter::next invariant error: arena has more elements but no more chunks",
751 );
752 self.chunk_data = NonNull::new(chunk.storage).unwrap();
753 self.chunk_remaining_entries = chunk.entries;
754 } else {
755 // SAFETY: We're still in the chunk, so we have a valid pointer and add is valid
756 self.chunk_data =
757 unsafe { NonNull::new_unchecked(self.chunk_data.as_ptr().add(1)) };
758 self.chunk_remaining_entries = self.chunk_remaining_entries.saturating_sub(1);
759 }
760 }
761 Some(element)
762 }
763
764 #[inline]
765 fn size_hint(&self) -> (usize, Option<usize>) {
766 // We will return at least len more elements, but we can't return an upper bound in case
767 // some get added
768 (self.remaining(), None)
769 }
770}
771
772impl<'a, T, RM: RefMutability> IterGen<'a, T, RM> {
773 /// Create a new iterator for the arena
774 #[inline]
775 fn new(arena: &'a TypedArenaGen<T, RM>) -> Self {
776 Self(PtrIter::new(arena), PhantomData)
777 }
778
779 /// Gets a the next element as a pointer
780 pub fn next_ptr(&mut self) -> Option<NonNull<T>> {
781 self.0.next()
782 }
783
784 /// Get the number of remaining elements, assuming there are no new ones
785 #[inline]
786 pub fn remaining(&self) -> usize {
787 self.0.remaining()
788 }
789
790 /// Whether we have a next element
791 #[inline]
792 pub fn has_next(&self) -> bool {
793 self.0.has_next()
794 }
795}
796
797impl<'a, T> PartialEq for PtrIter<'a, T> {
798 #[inline]
799 fn eq(&self, other: &Self) -> bool {
800 std::ptr::eq(self.arena, other.arena) && self.element_index == other.element_index
801 }
802}
803
804impl<'a, T, RM: RefMutability> PartialEq for IterGen<'a, T, RM> {
805 #[inline]
806 fn eq(&self, other: &Self) -> bool {
807 self.0 == other.0
808 }
809}
810
811impl<'a, T, RM: RefMutability> Iterator for IterGen<'a, T, RM> {
812 type Item = RM::Ref<'a, T>;
813
814 /// Gets the next element as a reference. Mutable or shared depends on the `RM` type parameter.
815 #[inline]
816 fn next(&mut self) -> Option<Self::Item> {
817 // SAFETY: The value is initialized, because the chunk has more entries and (important for
818 // the last chunk) the arena has more elements
819 self.next_ptr().map(|e| unsafe { RM::from_non_null(e) })
820 }
821
822 #[inline]
823 fn size_hint(&self) -> (usize, Option<usize>) {
824 self.0.size_hint()
825 }
826}
827
828impl<T, const N: usize> IterWithFastAlloc<T> for std::array::IntoIter<T, N> {
829 #[inline]
830 fn alloc_into<RM: RefMutability>(self, arena: &TypedArenaGen<T, RM>) -> RM::SliceRef<'_, T> {
831 let len = self.len();
832 if len == 0 {
833 return RM::empty();
834 }
835 // Move the content to the arena by copying and then forgetting it.
836 unsafe {
837 let start_ptr = arena.alloc_raw_slice(len);
838 self.as_slice()
839 .as_ptr()
840 .copy_to_nonoverlapping(start_ptr, len);
841 forget(self);
842 RM::from_raw_parts(start_ptr, len)
843 }
844 }
845}
846
847impl<T> IterWithFastAlloc<T> for Vec<T> {
848 #[inline]
849 fn alloc_into<RM: RefMutability>(
850 mut self,
851 arena: &TypedArenaGen<T, RM>,
852 ) -> RM::SliceRef<'_, T> {
853 let len = self.len();
854 if len == 0 {
855 return RM::empty();
856 }
857 // Move the content to the arena by copying and then forgetting it.
858 unsafe {
859 let start_ptr = arena.alloc_raw_slice(len);
860 self.as_ptr().copy_to_nonoverlapping(start_ptr, len);
861 self.set_len(0);
862 RM::from_raw_parts(start_ptr, len)
863 }
864 }
865}
866
867impl<A: smallvec::Array> IterWithFastAlloc<A::Item> for SmallVec<A> {
868 #[inline]
869 fn alloc_into<RM: RefMutability>(
870 mut self,
871 arena: &TypedArenaGen<A::Item, RM>,
872 ) -> RM::SliceRef<'_, A::Item> {
873 let len = self.len();
874 if len == 0 {
875 return RM::empty();
876 }
877 // Move the content to the arena by copying and then forgetting it.
878 unsafe {
879 let start_ptr = arena.alloc_raw_slice(len);
880 self.as_ptr().copy_to_nonoverlapping(start_ptr, len);
881 self.set_len(0);
882 RM::from_raw_parts(start_ptr, len)
883 }
884 }
885}
886
887impl RefMutability for Shared {
888 type Ref<'a, T> = &'a T where T: 'a;
889 type SliceRef<'a, T> = &'a [T] where T: 'a;
890
891 fn empty<'a, T>() -> Self::SliceRef<'a, T> {
892 &[]
893 }
894
895 unsafe fn from_ptr<'a, T>(t: *mut T) -> Self::Ref<'a, T> {
896 &*t
897 }
898
899 unsafe fn from_raw_parts<'a, T>(t: *mut T, len: usize) -> Self::SliceRef<'a, T> {
900 from_raw_parts(t, len)
901 }
902
903 fn as_ref<T>(t: Self::Ref<'_, T>) -> &T {
904 t
905 }
906
907 fn as_slice_ref<T>(t: Self::SliceRef<'_, T>) -> &[T] {
908 t
909 }
910}
911
912impl RefMutability for Mutable {
913 type Ref<'a, T> = &'a mut T where T: 'a;
914 type SliceRef<'a, T> = &'a mut [T] where T: 'a;
915
916 fn empty<'a, T>() -> Self::SliceRef<'a, T> {
917 &mut []
918 }
919
920 unsafe fn from_ptr<'a, T>(t: *mut T) -> Self::Ref<'a, T> {
921 &mut *t
922 }
923
924 unsafe fn from_raw_parts<'a, T>(t: *mut T, len: usize) -> Self::SliceRef<'a, T> {
925 from_raw_parts_mut(t, len)
926 }
927
928 fn as_ref<T>(t: Self::Ref<'_, T>) -> &T {
929 t
930 }
931
932 fn as_slice_ref<T>(t: Self::SliceRef<'_, T>) -> &[T] {
933 t
934 }
935}
936
937mod private {
938 use super::{Mutable, Shared};
939
940 pub trait Sealed {}
941
942 impl Sealed for Shared {}
943 impl Sealed for Mutable {}
944}