sync_arena/lib.rs
1//! The arena, a fast but limited type of allocator.
2//!
3//! Arenas are a type of allocator that destroy the objects within, all at
4//! once, once the arena itself is destroyed. They do not support deallocation
5//! of individual objects while the arena itself is still alive. The benefit
6//! of an arena is very fast allocation; just a pointer bump.
7//!
8//! This crate implements several kinds of arena.
9
10use remutex::ReentrantMutex;
11use std::alloc::Layout;
12use std::cell::{Cell, RefCell};
13use std::marker::PhantomData;
14use std::mem::{self, MaybeUninit};
15use std::ptr::{self, NonNull};
16use std::{cmp, slice};
17
18use smallvec::SmallVec;
19
20/// This calls the passed function while ensuring it won't be inlined into the caller.
21#[inline(never)]
22#[cold]
23fn outline<F: FnOnce() -> R, R>(f: F) -> R {
24 f()
25}
26
27struct ArenaChunk<T = u8> {
28 /// The raw storage for the arena chunk.
29 storage: NonNull<[MaybeUninit<T>]>,
30 /// The number of valid entries in the chunk.
31 entries: usize,
32}
33
34impl<T> Drop for ArenaChunk<T> {
35 fn drop(&mut self) {
36 unsafe { drop(Box::from_raw(self.storage.as_mut())) }
37 }
38}
39
40impl<T> ArenaChunk<T> {
41 #[inline]
42 unsafe fn new(capacity: usize) -> ArenaChunk<T> {
43 ArenaChunk {
44 storage: NonNull::from(Box::leak(Box::new_uninit_slice(capacity))),
45 entries: 0,
46 }
47 }
48
49 /// Destroys this arena chunk.
50 ///
51 /// # Safety
52 ///
53 /// The caller must ensure that `len` elements of this chunk have been initialized.
54 #[inline]
55 unsafe fn destroy(&mut self, len: usize) {
56 // The branch on needs_drop() is an -O1 performance optimization.
57 // Without the branch, dropping TypedArena<T> takes linear time.
58 if mem::needs_drop::<T>() {
59 // SAFETY: The caller must ensure that `len` elements of this chunk have
60 // been initialized.
61 unsafe {
62 let slice = self.storage.as_mut();
63 // slice[..len].assume_init_drop();
64
65 /// See [`MaybeUninit::slice_assume_init_mut`].
66 pub const unsafe fn slice_assume_init_mut<T>(
67 slice: &mut [MaybeUninit<T>],
68 ) -> &mut [T] {
69 unsafe { &mut *(slice as *mut [MaybeUninit<T>] as *mut [T]) }
70 }
71 ptr::drop_in_place(slice_assume_init_mut(&mut slice[..len]));
72 }
73 }
74 }
75
76 // Returns a pointer to the first allocated object.
77 #[inline]
78 fn start(&mut self) -> *mut T {
79 self.storage.as_ptr() as *mut T
80 }
81
82 // Returns a pointer to the end of the allocated space.
83 #[inline]
84 fn end(&mut self) -> *mut T {
85 unsafe {
86 if size_of::<T>() == 0 {
87 // A pointer as large as possible for zero-sized elements.
88 ptr::without_provenance_mut(!0)
89 } else {
90 self.start().add(self.storage.len())
91 }
92 }
93 }
94}
95
96// The arenas start with PAGE-sized chunks, and then each new chunk is twice as
97// big as its predecessor, up until we reach HUGE_PAGE-sized chunks, whereupon
98// we stop growing. This scales well, from arenas that are barely used up to
99// arenas that are used for 100s of MiBs. Note also that the chosen sizes match
100// the usual sizes of pages and huge pages on Linux.
101const PAGE: usize = 4096;
102const HUGE_PAGE: usize = 2 * 1024 * 1024;
103
104/// An arena that can hold objects of only one type.
105pub struct TypedArena<T> {
106 /// A pointer to the next object to be allocated.
107 ptr: Cell<*mut T>,
108
109 /// A pointer to the end of the allocated area. When this pointer is
110 /// reached, a new chunk is allocated.
111 end: Cell<*mut T>,
112
113 /// A vector of arena chunks.
114 chunks: RefCell<Vec<ArenaChunk<T>>>,
115
116 /// Marker indicating that dropping the arena causes its owned
117 /// instances of `T` to be dropped.
118 _own: PhantomData<T>,
119
120 lock: ReentrantMutex<()>,
121}
122
123impl<T> Default for TypedArena<T> {
124 /// Creates a new `TypedArena`.
125 fn default() -> TypedArena<T> {
126 TypedArena {
127 // We set both `ptr` and `end` to 0 so that the first call to
128 // alloc() will trigger a grow().
129 ptr: Cell::new(ptr::null_mut()),
130 end: Cell::new(ptr::null_mut()),
131 chunks: Default::default(),
132 _own: PhantomData,
133 lock: ReentrantMutex::new(()),
134 }
135 }
136}
137
138impl<T> TypedArena<T> {
139 /// Allocates an object in the `TypedArena`, returning a reference to it.
140 #[inline]
141 pub fn alloc(&self, object: T) -> &mut T {
142 let _unused = self.lock.lock().unwrap();
143 if self.ptr == self.end {
144 self.grow(1)
145 }
146
147 unsafe {
148 if size_of::<T>() == 0 {
149 self.ptr.set(self.ptr.get().wrapping_byte_add(1));
150 let ptr = ptr::NonNull::<T>::dangling().as_ptr();
151 // Don't drop the object. This `write` is equivalent to `forget`.
152 ptr::write(ptr, object);
153 &mut *ptr
154 } else {
155 let ptr = self.ptr.get();
156 // Advance the pointer.
157 self.ptr.set(self.ptr.get().add(1));
158 // Write into uninitialized memory.
159 ptr::write(ptr, object);
160 &mut *ptr
161 }
162 }
163 }
164
165 #[inline]
166 fn can_allocate(&self, additional: usize) -> bool {
167 // FIXME: this should *likely* use `offset_from`, but more
168 // investigation is needed (including running tests in miri).
169 let available_bytes = self.end.get().addr() - self.ptr.get().addr();
170 let additional_bytes = additional.checked_mul(size_of::<T>()).unwrap();
171 available_bytes >= additional_bytes
172 }
173
174 #[inline]
175 fn alloc_raw_slice(&self, len: usize) -> *mut T {
176 assert!(size_of::<T>() != 0);
177 assert!(len != 0);
178
179 // Ensure the current chunk can fit `len` objects.
180 if !self.can_allocate(len) {
181 self.grow(len);
182 debug_assert!(self.can_allocate(len));
183 }
184
185 let start_ptr = self.ptr.get();
186 // SAFETY: `can_allocate`/`grow` ensures that there is enough space for
187 // `len` elements.
188 unsafe { self.ptr.set(start_ptr.add(len)) };
189 start_ptr
190 }
191
192 /// Allocates the elements of this iterator into a contiguous slice in the `TypedArena`.
193 ///
194 /// Note: for reasons of reentrancy and panic safety we collect into a `SmallVec<[_; 8]>` before
195 /// storing the elements in the arena.
196 #[inline]
197 pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
198 let _unused = self.lock.lock().unwrap();
199 // Despite the similarlty with `DroplessArena`, we cannot reuse their fast case. The reason
200 // is subtle: these arenas are reentrant. In other words, `iter` may very well be holding a
201 // reference to `self` and adding elements to the arena during iteration.
202 //
203 // For this reason, if we pre-allocated any space for the elements of this iterator, we'd
204 // have to track that some uninitialized elements are followed by some initialized elements,
205 // else we might accidentally drop uninitialized memory if something panics or if the
206 // iterator doesn't fill all the length we expected.
207 //
208 // So we collect all the elements beforehand, which takes care of reentrancy and panic
209 // safety. This function is much less hot than `DroplessArena::alloc_from_iter`, so it
210 // doesn't need to be hyper-optimized.
211 assert!(size_of::<T>() != 0);
212
213 let mut vec: SmallVec<[_; 8]> = iter.into_iter().collect();
214 if vec.is_empty() {
215 return &mut [];
216 }
217 // Move the content to the arena by copying and then forgetting it.
218 let len = vec.len();
219 let start_ptr = self.alloc_raw_slice(len);
220 unsafe {
221 vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
222 vec.set_len(0);
223 slice::from_raw_parts_mut(start_ptr, len)
224 }
225 }
226
227 /// Grows the arena.
228 #[inline(never)]
229 #[cold]
230 fn grow(&self, additional: usize) {
231 unsafe {
232 // We need the element size to convert chunk sizes (ranging from
233 // PAGE to HUGE_PAGE bytes) to element counts.
234 let elem_size = cmp::max(1, size_of::<T>());
235 let mut chunks = self.chunks.borrow_mut();
236 let mut new_cap;
237 if let Some(last_chunk) = chunks.last_mut() {
238 // If a type is `!needs_drop`, we don't need to keep track of how many elements
239 // the chunk stores - the field will be ignored anyway.
240 if mem::needs_drop::<T>() {
241 // FIXME: this should *likely* use `offset_from`, but more
242 // investigation is needed (including running tests in miri).
243 let used_bytes = self.ptr.get().addr() - last_chunk.start().addr();
244 last_chunk.entries = used_bytes / size_of::<T>();
245 }
246
247 // If the previous chunk's len is less than HUGE_PAGE
248 // bytes, then this chunk will be least double the previous
249 // chunk's size.
250 new_cap = last_chunk.storage.len().min(HUGE_PAGE / elem_size / 2);
251 new_cap *= 2;
252 } else {
253 new_cap = PAGE / elem_size;
254 }
255 // Also ensure that this chunk can fit `additional`.
256 new_cap = cmp::max(additional, new_cap);
257
258 let mut chunk = ArenaChunk::<T>::new(new_cap);
259 self.ptr.set(chunk.start());
260 self.end.set(chunk.end());
261 chunks.push(chunk);
262 }
263 }
264
265 // Drops the contents of the last chunk. The last chunk is partially empty, unlike all other
266 // chunks.
267 fn clear_last_chunk(&self, last_chunk: &mut ArenaChunk<T>) {
268 // Determine how much was filled.
269 let start = last_chunk.start().addr();
270 // We obtain the value of the pointer to the first uninitialized element.
271 let end = self.ptr.get().addr();
272 // We then calculate the number of elements to be dropped in the last chunk,
273 // which is the filled area's length.
274 let diff = if size_of::<T>() == 0 {
275 // `T` is ZST. It can't have a drop flag, so the value here doesn't matter. We get
276 // the number of zero-sized values in the last and only chunk, just out of caution.
277 // Recall that `end` was incremented for each allocated value.
278 end - start
279 } else {
280 // FIXME: this should *likely* use `offset_from`, but more
281 // investigation is needed (including running tests in miri).
282 (end - start) / size_of::<T>()
283 };
284 // Pass that to the `destroy` method.
285 unsafe {
286 last_chunk.destroy(diff);
287 }
288 // Reset the chunk.
289 self.ptr.set(last_chunk.start());
290 }
291}
292
293impl<T> Drop for TypedArena<T> {
294 fn drop(&mut self) {
295 unsafe {
296 // Determine how much was filled.
297 let mut chunks_borrow = self.chunks.borrow_mut();
298 if let Some(mut last_chunk) = chunks_borrow.pop() {
299 // Drop the contents of the last chunk.
300 self.clear_last_chunk(&mut last_chunk);
301 // The last chunk will be dropped. Destroy all other chunks.
302 for chunk in chunks_borrow.iter_mut() {
303 chunk.destroy(chunk.entries);
304 }
305 }
306 // Box handles deallocation of `last_chunk` and `self.chunks`.
307 }
308 }
309}
310
311unsafe impl<T: Send> Send for TypedArena<T> {}
312unsafe impl<T: Send + Sync> Sync for TypedArena<T> {}
313
314#[inline(always)]
315fn align_down(val: usize, align: usize) -> usize {
316 debug_assert!(align.is_power_of_two());
317 val & !(align - 1)
318}
319
320#[inline(always)]
321fn align_up(val: usize, align: usize) -> usize {
322 debug_assert!(align.is_power_of_two());
323 (val + align - 1) & !(align - 1)
324}
325
326// Pointer alignment is common in compiler types, so keep `DroplessArena` aligned to them
327// to optimize away alignment code.
328const DROPLESS_ALIGNMENT: usize = align_of::<usize>();
329
330/// An arena that can hold objects of multiple different types that impl `Copy`
331/// and/or satisfy `!mem::needs_drop`.
332pub struct DroplessArena {
333 /// A pointer to the start of the free space.
334 start: Cell<*mut u8>,
335
336 /// A pointer to the end of free space.
337 ///
338 /// The allocation proceeds downwards from the end of the chunk towards the
339 /// start. (This is slightly simpler and faster than allocating upwards,
340 /// see <https://fitzgeraldnick.com/2019/11/01/always-bump-downwards.html>.)
341 /// When this pointer crosses the start pointer, a new chunk is allocated.
342 ///
343 /// This is kept aligned to DROPLESS_ALIGNMENT.
344 end: Cell<*mut u8>,
345
346 /// A vector of arena chunks.
347 chunks: RefCell<Vec<ArenaChunk>>,
348
349 lock: ReentrantMutex<()>,
350}
351
352unsafe impl Send for DroplessArena {}
353unsafe impl Sync for DroplessArena {}
354
355impl Default for DroplessArena {
356 #[inline]
357 fn default() -> DroplessArena {
358 DroplessArena {
359 // We set both `start` and `end` to 0 so that the first call to
360 // alloc() will trigger a grow().
361 start: Cell::new(ptr::null_mut()),
362 end: Cell::new(ptr::null_mut()),
363 chunks: Default::default(),
364 lock: ReentrantMutex::new(()),
365 }
366 }
367}
368
369impl DroplessArena {
370 #[inline(never)]
371 #[cold]
372 fn grow(&self, layout: Layout) {
373 // Add some padding so we can align `self.end` while
374 // still fitting in a `layout` allocation.
375 let additional = layout.size() + cmp::max(DROPLESS_ALIGNMENT, layout.align()) - 1;
376
377 unsafe {
378 let mut chunks = self.chunks.borrow_mut();
379 let mut new_cap;
380 if let Some(last_chunk) = chunks.last_mut() {
381 // There is no need to update `last_chunk.entries` because that
382 // field isn't used by `DroplessArena`.
383
384 // If the previous chunk's len is less than HUGE_PAGE
385 // bytes, then this chunk will be least double the previous
386 // chunk's size.
387 new_cap = last_chunk.storage.len().min(HUGE_PAGE / 2);
388 new_cap *= 2;
389 } else {
390 new_cap = PAGE;
391 }
392 // Also ensure that this chunk can fit `additional`.
393 new_cap = cmp::max(additional, new_cap);
394
395 let mut chunk = ArenaChunk::new(align_up(new_cap, PAGE));
396 self.start.set(chunk.start());
397
398 // Align the end to DROPLESS_ALIGNMENT.
399 let end = align_down(chunk.end().addr(), DROPLESS_ALIGNMENT);
400
401 // Make sure we don't go past `start`. This should not happen since the allocation
402 // should be at least DROPLESS_ALIGNMENT - 1 bytes.
403 debug_assert!(chunk.start().addr() <= end);
404
405 self.end.set(chunk.end().with_addr(end));
406
407 chunks.push(chunk);
408 }
409 }
410
411 #[inline]
412 pub fn alloc_raw(&self, layout: Layout) -> *mut u8 {
413 let _unused = self.lock.lock().unwrap();
414 assert!(layout.size() != 0);
415
416 // This loop executes once or twice: if allocation fails the first
417 // time, the `grow` ensures it will succeed the second time.
418 loop {
419 let start = self.start.get().addr();
420 let old_end = self.end.get();
421 let end = old_end.addr();
422
423 // Align allocated bytes so that `self.end` stays aligned to
424 // DROPLESS_ALIGNMENT.
425 let bytes = align_up(layout.size(), DROPLESS_ALIGNMENT);
426
427 if let Some(sub) = end.checked_sub(bytes) {
428 let new_end = align_down(sub, layout.align());
429 if start <= new_end {
430 let new_end = old_end.with_addr(new_end);
431 // `new_end` is aligned to DROPLESS_ALIGNMENT as `align_down`
432 // preserves alignment as both `end` and `bytes` are already
433 // aligned to DROPLESS_ALIGNMENT.
434 self.end.set(new_end);
435 return new_end;
436 }
437 }
438
439 // No free space left. Allocate a new chunk to satisfy the request.
440 // On failure the grow will panic or abort.
441 self.grow(layout);
442 }
443 }
444
445 #[inline]
446 pub fn alloc<T>(&self, object: T) -> &mut T {
447 let _unused = self.lock.lock().unwrap();
448 assert!(!mem::needs_drop::<T>());
449 assert!(size_of::<T>() != 0);
450
451 let mem = self.alloc_raw(Layout::new::<T>()) as *mut T;
452
453 unsafe {
454 // Write into uninitialized memory.
455 ptr::write(mem, object);
456 &mut *mem
457 }
458 }
459
460 /// Allocates a slice of objects that are copied into the `DroplessArena`, returning a mutable
461 /// reference to it. Will panic if passed a zero-sized type.
462 ///
463 /// Panics:
464 ///
465 /// - Zero-sized types
466 /// - Zero-length slices
467 #[inline]
468 pub fn alloc_slice<T>(&self, slice: &[T]) -> &mut [T]
469 where
470 T: Copy,
471 {
472 let _unused = self.lock.lock().unwrap();
473 assert!(!mem::needs_drop::<T>());
474 assert!(size_of::<T>() != 0);
475 assert!(!slice.is_empty());
476
477 let mem = self.alloc_raw(Layout::for_value::<[T]>(slice)) as *mut T;
478
479 unsafe {
480 mem.copy_from_nonoverlapping(slice.as_ptr(), slice.len());
481 slice::from_raw_parts_mut(mem, slice.len())
482 }
483 }
484
485 /// Used by `Lift` to check whether this slice is allocated
486 /// in this arena.
487 #[inline]
488 pub fn contains_slice<T>(&self, slice: &[T]) -> bool {
489 let _unused = self.lock.lock().unwrap();
490 for chunk in self.chunks.borrow_mut().iter_mut() {
491 let ptr = slice.as_ptr().cast::<u8>().cast_mut();
492 if chunk.start() <= ptr && chunk.end() >= ptr {
493 return true;
494 }
495 }
496 false
497 }
498
499 /// Allocates a string slice that is copied into the `DroplessArena`, returning a
500 /// reference to it. Will panic if passed an empty string.
501 ///
502 /// Panics:
503 ///
504 /// - Zero-length string
505 #[inline]
506 pub fn alloc_str(&self, string: &str) -> &str {
507 let _unused = self.lock.lock().unwrap();
508 let slice = self.alloc_slice(string.as_bytes());
509
510 // SAFETY: the result has a copy of the same valid UTF-8 bytes.
511 unsafe { std::str::from_utf8_unchecked(slice) }
512 }
513
514 /// # Safety
515 ///
516 /// The caller must ensure that `mem` is valid for writes up to `size_of::<T>() * len`, and that
517 /// that memory stays allocated and not shared for the lifetime of `self`. This must hold even
518 /// if `iter.next()` allocates onto `self`.
519 #[inline]
520 unsafe fn write_from_iter<T, I: Iterator<Item = T>>(
521 &self,
522 mut iter: I,
523 len: usize,
524 mem: *mut T,
525 ) -> &mut [T] {
526 let mut i = 0;
527 // Use a manual loop since LLVM manages to optimize it better for
528 // slice iterators
529 loop {
530 // SAFETY: The caller must ensure that `mem` is valid for writes up to
531 // `size_of::<T>() * len`.
532 unsafe {
533 match iter.next() {
534 Some(value) if i < len => mem.add(i).write(value),
535 Some(_) | None => {
536 // We only return as many items as the iterator gave us, even
537 // though it was supposed to give us `len`
538 return slice::from_raw_parts_mut(mem, i);
539 }
540 }
541 }
542 i += 1;
543 }
544 }
545
546 #[inline]
547 pub fn alloc_from_iter<T, I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
548 let _unused = self.lock.lock().unwrap();
549 // Warning: this function is reentrant: `iter` could hold a reference to `&self` and
550 // allocate additional elements while we're iterating.
551 let iter = iter.into_iter();
552 assert!(size_of::<T>() != 0);
553 assert!(!mem::needs_drop::<T>());
554
555 let size_hint = iter.size_hint();
556
557 match size_hint {
558 (min, Some(max)) if min == max => {
559 // We know the exact number of elements the iterator expects to produce here.
560 let len = min;
561
562 if len == 0 {
563 return &mut [];
564 }
565
566 let mem = self.alloc_raw(Layout::array::<T>(len).unwrap()) as *mut T;
567 // SAFETY: `write_from_iter` doesn't touch `self`. It only touches the slice we just
568 // reserved. If the iterator panics or doesn't output `len` elements, this will
569 // leave some unallocated slots in the arena, which is fine because we do not call
570 // `drop`.
571 unsafe { self.write_from_iter(iter, len, mem) }
572 }
573 (_, _) => {
574 outline(move || -> &mut [T] {
575 // Takes care of reentrancy.
576 let mut vec: SmallVec<[_; 8]> = iter.collect();
577 if vec.is_empty() {
578 return &mut [];
579 }
580 // Move the content to the arena by copying it and then forgetting
581 // the content of the SmallVec
582 unsafe {
583 let len = vec.len();
584 let start_ptr =
585 self.alloc_raw(Layout::for_value::<[T]>(vec.as_slice())) as *mut T;
586 vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
587 vec.set_len(0);
588 slice::from_raw_parts_mut(start_ptr, len)
589 }
590 })
591 }
592 }
593 }
594}
595
596/// Declare an `Arena` containing one dropless arena and many typed arenas (the
597/// types of the typed arenas are specified by the arguments).
598///
599/// There are three cases of interest.
600/// - Types that are `Copy`: these need not be specified in the arguments. They
601/// will use the `DroplessArena`.
602/// - Types that are `!Copy` and `!Drop`: these must be specified in the
603/// arguments. An empty `TypedArena` will be created for each one, but the
604/// `DroplessArena` will always be used and the `TypedArena` will stay empty.
605/// This is odd but harmless, because an empty arena allocates no memory.
606/// - Types that are `!Copy` and `Drop`: these must be specified in the
607/// arguments. The `TypedArena` will be used for them.
608///
609#[macro_export]
610macro_rules! declare_arena {
611 ([$($a:tt $name:ident: $ty:ty,)*])=> {
612 #[derive(Default)]
613 pub struct Arena<'tcx> {
614 pub dropless: $crate::DroplessArena,
615 $($name: $crate::TypedArena<$ty>,)*
616 }
617
618 pub trait ArenaAllocatable<'tcx, C = rustc_arena::IsNotCopy>: Sized {
619 #[allow(clippy::mut_from_ref)]
620 fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self;
621 #[allow(clippy::mut_from_ref)]
622 fn allocate_from_iter(
623 arena: &'tcx Arena<'tcx>,
624 iter: impl ::std::iter::IntoIterator<Item = Self>,
625 ) -> &'tcx mut [Self];
626 }
627
628 // Any type that impls `Copy` can be arena-allocated in the `DroplessArena`.
629 impl<'tcx, T: Copy> ArenaAllocatable<'tcx, rustc_arena::IsCopy> for T {
630 #[inline]
631 #[allow(clippy::mut_from_ref)]
632 fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self {
633 arena.dropless.alloc(self)
634 }
635 #[inline]
636 #[allow(clippy::mut_from_ref)]
637 fn allocate_from_iter(
638 arena: &'tcx Arena<'tcx>,
639 iter: impl ::std::iter::IntoIterator<Item = Self>,
640 ) -> &'tcx mut [Self] {
641 arena.dropless.alloc_from_iter(iter)
642 }
643 }
644 $(
645 impl<'tcx> ArenaAllocatable<'tcx, rustc_arena::IsNotCopy> for $ty {
646 #[inline]
647 fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self {
648 if !::std::mem::needs_drop::<Self>() {
649 arena.dropless.alloc(self)
650 } else {
651 arena.$name.alloc(self)
652 }
653 }
654
655 #[inline]
656 #[allow(clippy::mut_from_ref)]
657 fn allocate_from_iter(
658 arena: &'tcx Arena<'tcx>,
659 iter: impl ::std::iter::IntoIterator<Item = Self>,
660 ) -> &'tcx mut [Self] {
661 if !::std::mem::needs_drop::<Self>() {
662 arena.dropless.alloc_from_iter(iter)
663 } else {
664 arena.$name.alloc_from_iter(iter)
665 }
666 }
667 }
668 )*
669
670 impl<'tcx> Arena<'tcx> {
671 #[inline]
672 #[allow(clippy::mut_from_ref)]
673 pub fn alloc<T: ArenaAllocatable<'tcx, C>, C>(&'tcx self, value: T) -> &mut T {
674 value.allocate_on(self)
675 }
676
677 // Any type that impls `Copy` can have slices be arena-allocated in the `DroplessArena`.
678 #[inline]
679 #[allow(clippy::mut_from_ref)]
680 pub fn alloc_slice<T: ::std::marker::Copy>(&self, value: &[T]) -> &mut [T] {
681 if value.is_empty() {
682 return &mut [];
683 }
684 self.dropless.alloc_slice(value)
685 }
686
687 #[inline]
688 pub fn alloc_str(&self, string: &str) -> &str {
689 if string.is_empty() {
690 return "";
691 }
692 self.dropless.alloc_str(string)
693 }
694
695 #[allow(clippy::mut_from_ref)]
696 pub fn alloc_from_iter<T: ArenaAllocatable<'tcx, C>, C>(
697 &'tcx self,
698 iter: impl ::std::iter::IntoIterator<Item = T>,
699 ) -> &mut [T] {
700 T::allocate_from_iter(self, iter)
701 }
702 }
703 };
704}
705
706// Marker types that let us give different behaviour for arenas allocating
707// `Copy` types vs `!Copy` types.
708pub struct IsCopy;
709pub struct IsNotCopy;
710
711#[cfg(test)]
712mod tests;