sync_arena/
lib.rs

1#![doc = include_str!("../README.md")]
2#![cfg_attr(feature = "std-reentrant-lock", feature(reentrant_lock))]
3#![cfg_attr(feature = "may_dangle", feature(dropck_eyepatch))]
4#![cfg_attr(feature = "bench", feature(test))]
5#![allow(clippy::mut_from_ref)]
6
7use std::alloc::Layout;
8use std::cell::{Cell, RefCell};
9use std::marker::PhantomData;
10use std::mem::{self, MaybeUninit};
11use std::ptr::{self, NonNull};
12use std::{cmp, slice};
13
14use smallvec::SmallVec;
15
16use impls::Mutex;
17use impls::Reentrant;
18
19mod impls;
20
21/// This calls the passed function while ensuring it won't be inlined into the caller.
22#[inline(never)]
23#[cold]
24fn outline<F: FnOnce() -> R, R>(f: F) -> R {
25    f()
26}
27
28struct ArenaChunk<T = u8> {
29    /// The raw storage for the arena chunk.
30    storage: NonNull<[MaybeUninit<T>]>,
31    /// The number of valid entries in the chunk.
32    entries: usize,
33}
34
35cfg_if::cfg_if! {
36    if #[cfg(feature = "may_dangle")] {
37        unsafe impl<#[may_dangle] T> Drop for ArenaChunk<T> {
38            fn drop(&mut self) {
39                unsafe { drop(Box::from_raw(self.storage.as_mut())) }
40            }
41        }
42    } else {
43        impl<T> Drop for ArenaChunk<T> {
44            fn drop(&mut self) {
45                unsafe { drop(Box::from_raw(self.storage.as_mut())) }
46            }
47        }
48    }
49}
50
51impl<T> ArenaChunk<T> {
52    #[inline]
53    unsafe fn new(capacity: usize) -> ArenaChunk<T> {
54        ArenaChunk {
55            storage: NonNull::from(Box::leak(Box::new_uninit_slice(capacity))),
56            entries: 0,
57        }
58    }
59
60    /// Destroys this arena chunk.
61    ///
62    /// # Safety
63    ///
64    /// The caller must ensure that `len` elements of this chunk have been initialized.
65    #[inline]
66    unsafe fn destroy(&mut self, len: usize) {
67        // The branch on needs_drop() is an -O1 performance optimization.
68        // Without the branch, dropping TypedArena<T> takes linear time.
69        if mem::needs_drop::<T>() {
70            // SAFETY: The caller must ensure that `len` elements of this chunk have
71            // been initialized.
72            unsafe {
73                let slice = self.storage.as_mut();
74                // slice[..len].assume_init_drop();
75
76                /// See [`MaybeUninit::slice_assume_init_mut`].
77                pub const unsafe fn slice_assume_init_mut<T>(
78                    slice: &mut [MaybeUninit<T>],
79                ) -> &mut [T] {
80                    unsafe { &mut *(slice as *mut [MaybeUninit<T>] as *mut [T]) }
81                }
82                ptr::drop_in_place(slice_assume_init_mut(&mut slice[..len]));
83            }
84        }
85    }
86
87    // Returns a pointer to the first allocated object.
88    #[inline]
89    fn start(&mut self) -> *mut T {
90        self.storage.as_ptr() as *mut T
91    }
92
93    // Returns a pointer to the end of the allocated space.
94    #[inline]
95    fn end(&mut self) -> *mut T {
96        unsafe {
97            if size_of::<T>() == 0 {
98                // A pointer as large as possible for zero-sized elements.
99                ptr::without_provenance_mut(!0)
100            } else {
101                self.start().add(self.storage.len())
102            }
103        }
104    }
105}
106
107// The arenas start with PAGE-sized chunks, and then each new chunk is twice as
108// big as its predecessor, up until we reach HUGE_PAGE-sized chunks, whereupon
109// we stop growing. This scales well, from arenas that are barely used up to
110// arenas that are used for 100s of MiBs. Note also that the chosen sizes match
111// the usual sizes of pages and huge pages on Linux.
112const PAGE: usize = 4096;
113const HUGE_PAGE: usize = 2 * 1024 * 1024;
114
115/// An arena that can hold objects of only one type.
116pub struct TypedArena<T> {
117    /// A pointer to the next object to be allocated.
118    ptr: Cell<*mut T>,
119
120    /// A pointer to the end of the allocated area. When this pointer is
121    /// reached, a new chunk is allocated.
122    end: Cell<*mut T>,
123
124    /// A vector of arena chunks.
125    chunks: RefCell<Vec<ArenaChunk<T>>>,
126
127    /// Marker indicating that dropping the arena causes its owned
128    /// instances of `T` to be dropped.
129    _own: PhantomData<T>,
130
131    lock: Mutex<()>,
132}
133
134impl<T> Default for TypedArena<T> {
135    /// Creates a new `TypedArena`.
136    fn default() -> TypedArena<T> {
137        TypedArena {
138            // We set both `ptr` and `end` to 0 so that the first call to
139            // alloc() will trigger a grow().
140            ptr: Cell::new(ptr::null_mut()),
141            end: Cell::new(ptr::null_mut()),
142            chunks: Default::default(),
143            _own: PhantomData,
144            lock: Mutex::create(()),
145        }
146    }
147}
148
149impl<T> TypedArena<T> {
150    /// Allocates an object in the `TypedArena`, returning a reference to it.
151    #[inline]
152    pub fn alloc(&self, object: T) -> &mut T {
153        let _unused = self.lock.reentrant_lock();
154        if self.ptr == self.end {
155            self.grow(1)
156        }
157
158        unsafe {
159            if size_of::<T>() == 0 {
160                self.ptr.set(self.ptr.get().wrapping_byte_add(1));
161                let ptr = ptr::NonNull::<T>::dangling().as_ptr();
162                // Don't drop the object. This `write` is equivalent to `forget`.
163                ptr::write(ptr, object);
164                &mut *ptr
165            } else {
166                let ptr = self.ptr.get();
167                // Advance the pointer.
168                self.ptr.set(self.ptr.get().add(1));
169                // Write into uninitialized memory.
170                ptr::write(ptr, object);
171                &mut *ptr
172            }
173        }
174    }
175
176    #[inline]
177    fn can_allocate(&self, additional: usize) -> bool {
178        // FIXME: this should *likely* use `offset_from`, but more
179        // investigation is needed (including running tests in miri).
180        let available_bytes = self.end.get().addr() - self.ptr.get().addr();
181        let additional_bytes = additional.checked_mul(size_of::<T>()).unwrap();
182        available_bytes >= additional_bytes
183    }
184
185    #[inline]
186    fn alloc_raw_slice(&self, len: usize) -> *mut T {
187        assert!(size_of::<T>() != 0);
188        assert!(len != 0);
189
190        // Ensure the current chunk can fit `len` objects.
191        if !self.can_allocate(len) {
192            self.grow(len);
193            debug_assert!(self.can_allocate(len));
194        }
195
196        let start_ptr = self.ptr.get();
197        // SAFETY: `can_allocate`/`grow` ensures that there is enough space for
198        // `len` elements.
199        unsafe { self.ptr.set(start_ptr.add(len)) };
200        start_ptr
201    }
202
203    /// Allocates the elements of this iterator into a contiguous slice in the `TypedArena`.
204    ///
205    /// Note: for reasons of reentrancy and panic safety we collect into a `SmallVec<[_; 8]>` before
206    /// storing the elements in the arena.
207    #[inline]
208    pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
209        let _unused = self.lock.reentrant_lock();
210        // Despite the similarlty with `DroplessArena`, we cannot reuse their fast case. The reason
211        // is subtle: these arenas are reentrant. In other words, `iter` may very well be holding a
212        // reference to `self` and adding elements to the arena during iteration.
213        //
214        // For this reason, if we pre-allocated any space for the elements of this iterator, we'd
215        // have to track that some uninitialized elements are followed by some initialized elements,
216        // else we might accidentally drop uninitialized memory if something panics or if the
217        // iterator doesn't fill all the length we expected.
218        //
219        // So we collect all the elements beforehand, which takes care of reentrancy and panic
220        // safety. This function is much less hot than `DroplessArena::alloc_from_iter`, so it
221        // doesn't need to be hyper-optimized.
222        assert!(size_of::<T>() != 0);
223
224        let mut vec: SmallVec<[_; 8]> = iter.into_iter().collect();
225        if vec.is_empty() {
226            return &mut [];
227        }
228        // Move the content to the arena by copying and then forgetting it.
229        let len = vec.len();
230        let start_ptr = self.alloc_raw_slice(len);
231        unsafe {
232            vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
233            vec.set_len(0);
234            slice::from_raw_parts_mut(start_ptr, len)
235        }
236    }
237
238    /// Grows the arena.
239    #[inline(never)]
240    #[cold]
241    fn grow(&self, additional: usize) {
242        unsafe {
243            // We need the element size to convert chunk sizes (ranging from
244            // PAGE to HUGE_PAGE bytes) to element counts.
245            let elem_size = cmp::max(1, size_of::<T>());
246            let mut chunks = self.chunks.borrow_mut();
247            let mut new_cap;
248            if let Some(last_chunk) = chunks.last_mut() {
249                // If a type is `!needs_drop`, we don't need to keep track of how many elements
250                // the chunk stores - the field will be ignored anyway.
251                if mem::needs_drop::<T>() {
252                    // FIXME: this should *likely* use `offset_from`, but more
253                    // investigation is needed (including running tests in miri).
254                    let used_bytes = self.ptr.get().addr() - last_chunk.start().addr();
255                    last_chunk.entries = used_bytes / size_of::<T>();
256                }
257
258                // If the previous chunk's len is less than HUGE_PAGE
259                // bytes, then this chunk will be least double the previous
260                // chunk's size.
261                new_cap = last_chunk.storage.len().min(HUGE_PAGE / elem_size / 2);
262                new_cap *= 2;
263            } else {
264                new_cap = PAGE / elem_size;
265            }
266            // Also ensure that this chunk can fit `additional`.
267            new_cap = cmp::max(additional, new_cap);
268
269            let mut chunk = ArenaChunk::<T>::new(new_cap);
270            self.ptr.set(chunk.start());
271            self.end.set(chunk.end());
272            chunks.push(chunk);
273        }
274    }
275
276    // Drops the contents of the last chunk. The last chunk is partially empty, unlike all other
277    // chunks.
278    fn clear_last_chunk(&self, last_chunk: &mut ArenaChunk<T>) {
279        // Determine how much was filled.
280        let start = last_chunk.start().addr();
281        // We obtain the value of the pointer to the first uninitialized element.
282        let end = self.ptr.get().addr();
283        // We then calculate the number of elements to be dropped in the last chunk,
284        // which is the filled area's length.
285        let diff = if size_of::<T>() == 0 {
286            // `T` is ZST. It can't have a drop flag, so the value here doesn't matter. We get
287            // the number of zero-sized values in the last and only chunk, just out of caution.
288            // Recall that `end` was incremented for each allocated value.
289            end - start
290        } else {
291            // FIXME: this should *likely* use `offset_from`, but more
292            // investigation is needed (including running tests in miri).
293            (end - start) / size_of::<T>()
294        };
295        // Pass that to the `destroy` method.
296        unsafe {
297            last_chunk.destroy(diff);
298        }
299        // Reset the chunk.
300        self.ptr.set(last_chunk.start());
301    }
302}
303
304cfg_if::cfg_if! {
305    if #[cfg(feature = "may_dangle")] {
306        unsafe impl<#[may_dangle] T> Drop for TypedArena<T> {
307            fn drop(&mut self) {
308                unsafe {
309                    // Determine how much was filled.
310                    let mut chunks_borrow = self.chunks.borrow_mut();
311                    if let Some(mut last_chunk) = chunks_borrow.pop() {
312                        // Drop the contents of the last chunk.
313                        self.clear_last_chunk(&mut last_chunk);
314                        // The last chunk will be dropped. Destroy all other chunks.
315                        for chunk in chunks_borrow.iter_mut() {
316                            chunk.destroy(chunk.entries);
317                        }
318                    }
319                    // Box handles deallocation of `last_chunk` and `self.chunks`.
320                }
321            }
322        }
323    } else {
324        impl<T> Drop for TypedArena<T> {
325            fn drop(&mut self) {
326                unsafe {
327                    // Determine how much was filled.
328                    let mut chunks_borrow = self.chunks.borrow_mut();
329                    if let Some(mut last_chunk) = chunks_borrow.pop() {
330                        // Drop the contents of the last chunk.
331                        self.clear_last_chunk(&mut last_chunk);
332                        // The last chunk will be dropped. Destroy all other chunks.
333                        for chunk in chunks_borrow.iter_mut() {
334                            chunk.destroy(chunk.entries);
335                        }
336                    }
337                    // Box handles deallocation of `last_chunk` and `self.chunks`.
338                }
339            }
340        }
341    }
342}
343
344unsafe impl<T: Send> Send for TypedArena<T> {}
345unsafe impl<T: Send + Sync> Sync for TypedArena<T> {}
346
347#[inline(always)]
348fn align_down(val: usize, align: usize) -> usize {
349    debug_assert!(align.is_power_of_two());
350    val & !(align - 1)
351}
352
353#[inline(always)]
354fn align_up(val: usize, align: usize) -> usize {
355    debug_assert!(align.is_power_of_two());
356    (val + align - 1) & !(align - 1)
357}
358
359// Pointer alignment is common in compiler types, so keep `DroplessArena` aligned to them
360// to optimize away alignment code.
361const DROPLESS_ALIGNMENT: usize = align_of::<usize>();
362
363/// An arena that can hold objects of multiple different types that impl `Copy`
364/// and/or satisfy `!mem::needs_drop`.
365pub struct DroplessArena {
366    /// A pointer to the start of the free space.
367    start: Cell<*mut u8>,
368
369    /// A pointer to the end of free space.
370    ///
371    /// The allocation proceeds downwards from the end of the chunk towards the
372    /// start. (This is slightly simpler and faster than allocating upwards,
373    /// see <https://fitzgeraldnick.com/2019/11/01/always-bump-downwards.html>.)
374    /// When this pointer crosses the start pointer, a new chunk is allocated.
375    ///
376    /// This is kept aligned to DROPLESS_ALIGNMENT.
377    end: Cell<*mut u8>,
378
379    /// A vector of arena chunks.
380    chunks: RefCell<Vec<ArenaChunk>>,
381
382    lock: Mutex<()>,
383}
384
385unsafe impl Send for DroplessArena {}
386unsafe impl Sync for DroplessArena {}
387
388impl Default for DroplessArena {
389    #[inline]
390    fn default() -> DroplessArena {
391        DroplessArena {
392            // We set both `start` and `end` to 0 so that the first call to
393            // alloc() will trigger a grow().
394            start: Cell::new(ptr::null_mut()),
395            end: Cell::new(ptr::null_mut()),
396            chunks: Default::default(),
397            lock: Mutex::create(()),
398        }
399    }
400}
401
402impl DroplessArena {
403    #[inline(never)]
404    #[cold]
405    fn grow(&self, layout: Layout) {
406        // Add some padding so we can align `self.end` while
407        // still fitting in a `layout` allocation.
408        let additional = layout.size() + cmp::max(DROPLESS_ALIGNMENT, layout.align()) - 1;
409
410        unsafe {
411            let mut chunks = self.chunks.borrow_mut();
412            let mut new_cap;
413            if let Some(last_chunk) = chunks.last_mut() {
414                // There is no need to update `last_chunk.entries` because that
415                // field isn't used by `DroplessArena`.
416
417                // If the previous chunk's len is less than HUGE_PAGE
418                // bytes, then this chunk will be least double the previous
419                // chunk's size.
420                new_cap = last_chunk.storage.len().min(HUGE_PAGE / 2);
421                new_cap *= 2;
422            } else {
423                new_cap = PAGE;
424            }
425            // Also ensure that this chunk can fit `additional`.
426            new_cap = cmp::max(additional, new_cap);
427
428            let mut chunk = ArenaChunk::new(align_up(new_cap, PAGE));
429            self.start.set(chunk.start());
430
431            // Align the end to DROPLESS_ALIGNMENT.
432            let end = align_down(chunk.end().addr(), DROPLESS_ALIGNMENT);
433
434            // Make sure we don't go past `start`. This should not happen since the allocation
435            // should be at least DROPLESS_ALIGNMENT - 1 bytes.
436            debug_assert!(chunk.start().addr() <= end);
437
438            self.end.set(chunk.end().with_addr(end));
439
440            chunks.push(chunk);
441        }
442    }
443
444    #[inline]
445    pub fn alloc_raw(&self, layout: Layout) -> *mut u8 {
446        let _unused = self.lock.reentrant_lock();
447        assert!(layout.size() != 0);
448
449        // This loop executes once or twice: if allocation fails the first
450        // time, the `grow` ensures it will succeed the second time.
451        loop {
452            let start = self.start.get().addr();
453            let old_end = self.end.get();
454            let end = old_end.addr();
455
456            // Align allocated bytes so that `self.end` stays aligned to
457            // DROPLESS_ALIGNMENT.
458            let bytes = align_up(layout.size(), DROPLESS_ALIGNMENT);
459
460            if let Some(sub) = end.checked_sub(bytes) {
461                let new_end = align_down(sub, layout.align());
462                if start <= new_end {
463                    let new_end = old_end.with_addr(new_end);
464                    // `new_end` is aligned to DROPLESS_ALIGNMENT as `align_down`
465                    // preserves alignment as both `end` and `bytes` are already
466                    // aligned to DROPLESS_ALIGNMENT.
467                    self.end.set(new_end);
468                    return new_end;
469                }
470            }
471
472            // No free space left. Allocate a new chunk to satisfy the request.
473            // On failure the grow will panic or abort.
474            self.grow(layout);
475        }
476    }
477
478    #[inline]
479    pub fn alloc<T>(&self, object: T) -> &mut T {
480        let _unused = self.lock.reentrant_lock();
481        assert!(!mem::needs_drop::<T>());
482        assert!(size_of::<T>() != 0);
483
484        let mem = self.alloc_raw(Layout::new::<T>()) as *mut T;
485
486        unsafe {
487            // Write into uninitialized memory.
488            ptr::write(mem, object);
489            &mut *mem
490        }
491    }
492
493    /// Allocates a slice of objects that are copied into the `DroplessArena`, returning a mutable
494    /// reference to it. Will panic if passed a zero-sized type.
495    ///
496    /// Panics:
497    ///
498    ///  - Zero-sized types
499    ///  - Zero-length slices
500    #[inline]
501    pub fn alloc_slice<T>(&self, slice: &[T]) -> &mut [T]
502    where
503        T: Copy,
504    {
505        let _unused = self.lock.reentrant_lock();
506        assert!(!mem::needs_drop::<T>());
507        assert!(size_of::<T>() != 0);
508        assert!(!slice.is_empty());
509
510        let mem = self.alloc_raw(Layout::for_value::<[T]>(slice)) as *mut T;
511
512        unsafe {
513            mem.copy_from_nonoverlapping(slice.as_ptr(), slice.len());
514            slice::from_raw_parts_mut(mem, slice.len())
515        }
516    }
517
518    /// Used by `Lift` to check whether this slice is allocated
519    /// in this arena.
520    #[inline]
521    pub fn contains_slice<T>(&self, slice: &[T]) -> bool {
522        let _unused = self.lock.reentrant_lock();
523        for chunk in self.chunks.borrow_mut().iter_mut() {
524            let ptr = slice.as_ptr().cast::<u8>().cast_mut();
525            if chunk.start() <= ptr && chunk.end() >= ptr {
526                return true;
527            }
528        }
529        false
530    }
531
532    /// Allocates a string slice that is copied into the `DroplessArena`, returning a
533    /// reference to it. Will panic if passed an empty string.
534    ///
535    /// Panics:
536    ///
537    ///  - Zero-length string
538    #[inline]
539    pub fn alloc_str(&self, string: &str) -> &str {
540        let _unused = self.lock.reentrant_lock();
541        let slice = self.alloc_slice(string.as_bytes());
542
543        // SAFETY: the result has a copy of the same valid UTF-8 bytes.
544        unsafe { std::str::from_utf8_unchecked(slice) }
545    }
546
547    /// # Safety
548    ///
549    /// The caller must ensure that `mem` is valid for writes up to `size_of::<T>() * len`, and that
550    /// that memory stays allocated and not shared for the lifetime of `self`. This must hold even
551    /// if `iter.next()` allocates onto `self`.
552    #[inline]
553    unsafe fn write_from_iter<T, I: Iterator<Item = T>>(
554        &self,
555        mut iter: I,
556        len: usize,
557        mem: *mut T,
558    ) -> &mut [T] {
559        let mut i = 0;
560        // Use a manual loop since LLVM manages to optimize it better for
561        // slice iterators
562        loop {
563            // SAFETY: The caller must ensure that `mem` is valid for writes up to
564            // `size_of::<T>() * len`.
565            unsafe {
566                match iter.next() {
567                    Some(value) if i < len => mem.add(i).write(value),
568                    Some(_) | None => {
569                        // We only return as many items as the iterator gave us, even
570                        // though it was supposed to give us `len`
571                        return slice::from_raw_parts_mut(mem, i);
572                    }
573                }
574            }
575            i += 1;
576        }
577    }
578
579    #[inline]
580    pub fn alloc_from_iter<T, I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
581        let _unused = self.lock.reentrant_lock();
582        // Warning: this function is reentrant: `iter` could hold a reference to `&self` and
583        // allocate additional elements while we're iterating.
584        let iter = iter.into_iter();
585        assert!(size_of::<T>() != 0);
586        assert!(!mem::needs_drop::<T>());
587
588        let size_hint = iter.size_hint();
589
590        match size_hint {
591            (min, Some(max)) if min == max => {
592                // We know the exact number of elements the iterator expects to produce here.
593                let len = min;
594
595                if len == 0 {
596                    return &mut [];
597                }
598
599                let mem = self.alloc_raw(Layout::array::<T>(len).unwrap()) as *mut T;
600                // SAFETY: `write_from_iter` doesn't touch `self`. It only touches the slice we just
601                // reserved. If the iterator panics or doesn't output `len` elements, this will
602                // leave some unallocated slots in the arena, which is fine because we do not call
603                // `drop`.
604                unsafe { self.write_from_iter(iter, len, mem) }
605            }
606            (_, _) => {
607                outline(move || -> &mut [T] {
608                    // Takes care of reentrancy.
609                    let mut vec: SmallVec<[_; 8]> = iter.collect();
610                    if vec.is_empty() {
611                        return &mut [];
612                    }
613                    // Move the content to the arena by copying it and then forgetting
614                    // the content of the SmallVec
615                    unsafe {
616                        let len = vec.len();
617                        let start_ptr =
618                            self.alloc_raw(Layout::for_value::<[T]>(vec.as_slice())) as *mut T;
619                        vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
620                        vec.set_len(0);
621                        slice::from_raw_parts_mut(start_ptr, len)
622                    }
623                })
624            }
625        }
626    }
627}
628
629/// Declare an `Arena` containing one dropless arena and many typed arenas (the
630/// types of the typed arenas are specified by the arguments).
631///
632/// There are three cases of interest.
633/// - Types that are `Copy`: these need not be specified in the arguments. They
634///   will use the `DroplessArena`.
635/// - Types that are `!Copy` and `!Drop`: these must be specified in the
636///   arguments. An empty `TypedArena` will be created for each one, but the
637///   `DroplessArena` will always be used and the `TypedArena` will stay empty.
638///   This is odd but harmless, because an empty arena allocates no memory.
639/// - Types that are `!Copy` and `Drop`: these must be specified in the
640///   arguments. The `TypedArena` will be used for them.
641///
642#[macro_export]
643macro_rules! declare_arena {
644    ([$($a:tt $name:ident: $ty:ty,)*])=> {
645        #[derive(Default)]
646        pub struct Arena<'tcx> {
647            pub dropless: $crate::DroplessArena,
648            $($name: $crate::TypedArena<$ty>,)*
649        }
650
651        pub trait ArenaAllocatable<'tcx, C = rustc_arena::IsNotCopy>: Sized {
652            #[allow(clippy::mut_from_ref)]
653            fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self;
654            #[allow(clippy::mut_from_ref)]
655            fn allocate_from_iter(
656                arena: &'tcx Arena<'tcx>,
657                iter: impl ::std::iter::IntoIterator<Item = Self>,
658            ) -> &'tcx mut [Self];
659        }
660
661        // Any type that impls `Copy` can be arena-allocated in the `DroplessArena`.
662        impl<'tcx, T: Copy> ArenaAllocatable<'tcx, rustc_arena::IsCopy> for T {
663            #[inline]
664            #[allow(clippy::mut_from_ref)]
665            fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self {
666                arena.dropless.alloc(self)
667            }
668            #[inline]
669            #[allow(clippy::mut_from_ref)]
670            fn allocate_from_iter(
671                arena: &'tcx Arena<'tcx>,
672                iter: impl ::std::iter::IntoIterator<Item = Self>,
673            ) -> &'tcx mut [Self] {
674                arena.dropless.alloc_from_iter(iter)
675            }
676        }
677        $(
678            impl<'tcx> ArenaAllocatable<'tcx, rustc_arena::IsNotCopy> for $ty {
679                #[inline]
680                fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self {
681                    if !::std::mem::needs_drop::<Self>() {
682                        arena.dropless.alloc(self)
683                    } else {
684                        arena.$name.alloc(self)
685                    }
686                }
687
688                #[inline]
689                #[allow(clippy::mut_from_ref)]
690                fn allocate_from_iter(
691                    arena: &'tcx Arena<'tcx>,
692                    iter: impl ::std::iter::IntoIterator<Item = Self>,
693                ) -> &'tcx mut [Self] {
694                    if !::std::mem::needs_drop::<Self>() {
695                        arena.dropless.alloc_from_iter(iter)
696                    } else {
697                        arena.$name.alloc_from_iter(iter)
698                    }
699                }
700            }
701        )*
702
703        impl<'tcx> Arena<'tcx> {
704            #[inline]
705            #[allow(clippy::mut_from_ref)]
706            pub fn alloc<T: ArenaAllocatable<'tcx, C>, C>(&'tcx self, value: T) -> &mut T {
707                value.allocate_on(self)
708            }
709
710            // Any type that impls `Copy` can have slices be arena-allocated in the `DroplessArena`.
711            #[inline]
712            #[allow(clippy::mut_from_ref)]
713            pub fn alloc_slice<T: ::std::marker::Copy>(&self, value: &[T]) -> &mut [T] {
714                if value.is_empty() {
715                    return &mut [];
716                }
717                self.dropless.alloc_slice(value)
718            }
719
720            #[inline]
721            pub fn alloc_str(&self, string: &str) -> &str {
722                if string.is_empty() {
723                    return "";
724                }
725                self.dropless.alloc_str(string)
726            }
727
728            #[allow(clippy::mut_from_ref)]
729            pub fn alloc_from_iter<T: ArenaAllocatable<'tcx, C>, C>(
730                &'tcx self,
731                iter: impl ::std::iter::IntoIterator<Item = T>,
732            ) -> &mut [T] {
733                T::allocate_from_iter(self, iter)
734            }
735        }
736    };
737}
738
739// Marker types that let us give different behaviour for arenas allocating
740// `Copy` types vs `!Copy` types.
741pub struct IsCopy;
742pub struct IsNotCopy;
743
744#[cfg(test)]
745mod tests;