sync_arena/lib.rs
1//! The arena, a fast but limited type of allocator.
2//!
3//! Arenas are a type of allocator that destroy the objects within, all at
4//! once, once the arena itself is destroyed. They do not support deallocation
5//! of individual objects while the arena itself is still alive. The benefit
6//! of an arena is very fast allocation; just a pointer bump.
7//!
8//! This crate implements several kinds of arena.
9
10use std::alloc::Layout;
11use std::marker::PhantomData;
12use std::mem::{self, MaybeUninit};
13use std::ptr::{self, NonNull};
14use std::sync::RwLock;
15use std::{cmp, slice};
16
17use smallvec::SmallVec;
18
19/// This calls the passed function while ensuring it won't be inlined into the caller.
20#[inline(never)]
21#[cold]
22fn outline<F: FnOnce() -> R, R>(f: F) -> R {
23 f()
24}
25
26struct ArenaChunk<T = u8> {
27 /// The raw storage for the arena chunk.
28 storage: NonNull<[MaybeUninit<T>]>,
29 /// The number of valid entries in the chunk.
30 entries: usize,
31}
32
33impl<T> Drop for ArenaChunk<T> {
34 fn drop(&mut self) {
35 unsafe { drop(Box::from_raw(self.storage.as_mut())) }
36 }
37}
38
39impl<T> ArenaChunk<T> {
40 #[inline]
41 unsafe fn new(capacity: usize) -> ArenaChunk<T> {
42 ArenaChunk {
43 storage: NonNull::from(Box::leak(Box::new_uninit_slice(capacity))),
44 entries: 0,
45 }
46 }
47
48 /// Destroys this arena chunk.
49 ///
50 /// # Safety
51 ///
52 /// The caller must ensure that `len` elements of this chunk have been initialized.
53 #[inline]
54 unsafe fn destroy(&mut self, len: usize) {
55 // The branch on needs_drop() is an -O1 performance optimization.
56 // Without the branch, dropping TypedArena<T> takes linear time.
57 if mem::needs_drop::<T>() {
58 // SAFETY: The caller must ensure that `len` elements of this chunk have
59 // been initialized.
60 unsafe {
61 let slice = self.storage.as_mut();
62 // slice[..len].assume_init_drop();
63
64 /// See [`MaybeUninit::slice_assume_init_mut`].
65 pub const unsafe fn slice_assume_init_mut<T>(
66 slice: &mut [MaybeUninit<T>],
67 ) -> &mut [T] {
68 unsafe { &mut *(slice as *mut [MaybeUninit<T>] as *mut [T]) }
69 }
70 ptr::drop_in_place(slice_assume_init_mut(&mut slice[..len]));
71 }
72 }
73 }
74
75 // Returns a pointer to the first allocated object.
76 #[inline]
77 fn start(&mut self) -> *mut T {
78 self.storage.as_ptr() as *mut T
79 }
80
81 // Returns a pointer to the end of the allocated space.
82 #[inline]
83 fn end(&mut self) -> *mut T {
84 unsafe {
85 if size_of::<T>() == 0 {
86 // A pointer as large as possible for zero-sized elements.
87 ptr::without_provenance_mut(!0)
88 } else {
89 self.start().add(self.storage.len())
90 }
91 }
92 }
93}
94
95// The arenas start with PAGE-sized chunks, and then each new chunk is twice as
96// big as its predecessor, up until we reach HUGE_PAGE-sized chunks, whereupon
97// we stop growing. This scales well, from arenas that are barely used up to
98// arenas that are used for 100s of MiBs. Note also that the chosen sizes match
99// the usual sizes of pages and huge pages on Linux.
100const PAGE: usize = 4096;
101const HUGE_PAGE: usize = 2 * 1024 * 1024;
102
103/// An arena that can hold objects of only one type.
104pub struct TypedArena<T> {
105 /// A pointer to the next object to be allocated.
106 ptr: RwLock<*mut T>,
107
108 /// A pointer to the end of the allocated area. When this pointer is
109 /// reached, a new chunk is allocated.
110 end: RwLock<*mut T>,
111
112 /// A vector of arena chunks.
113 chunks: RwLock<Vec<ArenaChunk<T>>>,
114
115 /// Marker indicating that dropping the arena causes its owned
116 /// instances of `T` to be dropped.
117 _own: PhantomData<T>,
118}
119
120impl<T> Default for TypedArena<T> {
121 /// Creates a new `TypedArena`.
122 fn default() -> TypedArena<T> {
123 TypedArena {
124 // We set both `ptr` and `end` to 0 so that the first call to
125 // alloc() will trigger a grow().
126 ptr: RwLock::new(ptr::null_mut()),
127 end: RwLock::new(ptr::null_mut()),
128 chunks: Default::default(),
129 _own: PhantomData,
130 }
131 }
132}
133
134impl<T> TypedArena<T> {
135 /// Allocates an object in the `TypedArena`, returning a reference to it.
136 #[inline]
137 pub fn alloc(&self, object: T) -> &mut T {
138 if *self.ptr.read().unwrap() == *self.end.read().unwrap() {
139 self.grow(1)
140 }
141
142 unsafe {
143 if size_of::<T>() == 0 {
144 {
145 let mut ptr = self.ptr.write().unwrap();
146 *ptr = ptr.wrapping_byte_add(1);
147 }
148 let ptr = ptr::NonNull::<T>::dangling().as_ptr();
149 // Don't drop the object. This `write` is equivalent to `forget`.
150 ptr::write(ptr, object);
151 &mut *ptr
152 } else {
153 let mut p = self.ptr.write().unwrap();
154 let ptr = *p;
155 // Advance the pointer.
156 *p = ptr.add(1);
157 // Write into uninitialized memory.
158 ptr::write(ptr, object);
159 &mut *ptr
160 }
161 }
162 }
163
164 #[inline]
165 fn can_allocate(&self, additional: usize) -> bool {
166 // FIXME: this should *likely* use `offset_from`, but more
167 // investigation is needed (including running tests in miri).
168 let available_bytes = self.end.read().unwrap().addr() - self.ptr.read().unwrap().addr();
169 let additional_bytes = additional.checked_mul(size_of::<T>()).unwrap();
170 available_bytes >= additional_bytes
171 }
172
173 #[inline]
174 fn alloc_raw_slice(&self, len: usize) -> *mut T {
175 assert!(size_of::<T>() != 0);
176 assert!(len != 0);
177
178 // Ensure the current chunk can fit `len` objects.
179 if !self.can_allocate(len) {
180 self.grow(len);
181 debug_assert!(self.can_allocate(len));
182 }
183
184 let mut ptr = self.ptr.write().unwrap();
185 let start_ptr = *ptr;
186 // SAFETY: `can_allocate`/`grow` ensures that there is enough space for
187 // `len` elements.
188 unsafe { *ptr = start_ptr.add(len) }
189 start_ptr
190 }
191
192 /// Allocates the elements of this iterator into a contiguous slice in the `TypedArena`.
193 ///
194 /// Note: for reasons of reentrancy and panic safety we collect into a `SmallVec<[_; 8]>` before
195 /// storing the elements in the arena.
196 #[inline]
197 pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
198 // Despite the similarlty with `DroplessArena`, we cannot reuse their fast case. The reason
199 // is subtle: these arenas are reentrant. In other words, `iter` may very well be holding a
200 // reference to `self` and adding elements to the arena during iteration.
201 //
202 // For this reason, if we pre-allocated any space for the elements of this iterator, we'd
203 // have to track that some uninitialized elements are followed by some initialized elements,
204 // else we might accidentally drop uninitialized memory if something panics or if the
205 // iterator doesn't fill all the length we expected.
206 //
207 // So we collect all the elements beforehand, which takes care of reentrancy and panic
208 // safety. This function is much less hot than `DroplessArena::alloc_from_iter`, so it
209 // doesn't need to be hyper-optimized.
210 assert!(size_of::<T>() != 0);
211
212 let mut vec: SmallVec<[_; 8]> = iter.into_iter().collect();
213 if vec.is_empty() {
214 return &mut [];
215 }
216 // Move the content to the arena by copying and then forgetting it.
217 let len = vec.len();
218 let start_ptr = self.alloc_raw_slice(len);
219 unsafe {
220 vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
221 vec.set_len(0);
222 slice::from_raw_parts_mut(start_ptr, len)
223 }
224 }
225
226 /// Grows the arena.
227 #[inline(never)]
228 #[cold]
229 fn grow(&self, additional: usize) {
230 unsafe {
231 // We need the element size to convert chunk sizes (ranging from
232 // PAGE to HUGE_PAGE bytes) to element counts.
233 let elem_size = cmp::max(1, size_of::<T>());
234 let mut chunks = self.chunks.write().unwrap();
235 let mut new_cap;
236 if let Some(last_chunk) = chunks.last_mut() {
237 // If a type is `!needs_drop`, we don't need to keep track of how many elements
238 // the chunk stores - the field will be ignored anyway.
239 if mem::needs_drop::<T>() {
240 // FIXME: this should *likely* use `offset_from`, but more
241 // investigation is needed (including running tests in miri).
242 let used_bytes = self.ptr.read().unwrap().addr() - last_chunk.start().addr();
243 last_chunk.entries = used_bytes / size_of::<T>();
244 }
245
246 // If the previous chunk's len is less than HUGE_PAGE
247 // bytes, then this chunk will be least double the previous
248 // chunk's size.
249 new_cap = last_chunk.storage.len().min(HUGE_PAGE / elem_size / 2);
250 new_cap *= 2;
251 } else {
252 new_cap = PAGE / elem_size;
253 }
254 // Also ensure that this chunk can fit `additional`.
255 new_cap = cmp::max(additional, new_cap);
256
257 let mut chunk = ArenaChunk::<T>::new(new_cap);
258 *self.ptr.write().unwrap() = chunk.start();
259 *self.end.write().unwrap() = chunk.end();
260 chunks.push(chunk);
261 }
262 }
263
264 // Drops the contents of the last chunk. The last chunk is partially empty, unlike all other
265 // chunks.
266 fn clear_last_chunk(&self, last_chunk: &mut ArenaChunk<T>) {
267 // Determine how much was filled.
268 let start = last_chunk.start().addr();
269 // We obtain the value of the pointer to the first uninitialized element.
270 let end = self.ptr.read().unwrap().addr();
271 // We then calculate the number of elements to be dropped in the last chunk,
272 // which is the filled area's length.
273 let diff = if size_of::<T>() == 0 {
274 // `T` is ZST. It can't have a drop flag, so the value here doesn't matter. We get
275 // the number of zero-sized values in the last and only chunk, just out of caution.
276 // Recall that `end` was incremented for each allocated value.
277 end - start
278 } else {
279 // FIXME: this should *likely* use `offset_from`, but more
280 // investigation is needed (including running tests in miri).
281 (end - start) / size_of::<T>()
282 };
283 // Pass that to the `destroy` method.
284 unsafe {
285 last_chunk.destroy(diff);
286 }
287 // Reset the chunk.
288 *self.ptr.write().unwrap() = last_chunk.start();
289 }
290}
291
292impl<T> Drop for TypedArena<T> {
293 fn drop(&mut self) {
294 unsafe {
295 // Determine how much was filled.
296 let mut chunks_borrow = self.chunks.write().unwrap();
297 if let Some(mut last_chunk) = chunks_borrow.pop() {
298 // Drop the contents of the last chunk.
299 self.clear_last_chunk(&mut last_chunk);
300 // The last chunk will be dropped. Destroy all other chunks.
301 for chunk in chunks_borrow.iter_mut() {
302 chunk.destroy(chunk.entries);
303 }
304 }
305 // Box handles deallocation of `last_chunk` and `self.chunks`.
306 }
307 }
308}
309
310unsafe impl<T: Send> Send for TypedArena<T> {}
311
312#[inline(always)]
313fn align_down(val: usize, align: usize) -> usize {
314 debug_assert!(align.is_power_of_two());
315 val & !(align - 1)
316}
317
318#[inline(always)]
319fn align_up(val: usize, align: usize) -> usize {
320 debug_assert!(align.is_power_of_two());
321 (val + align - 1) & !(align - 1)
322}
323
324// Pointer alignment is common in compiler types, so keep `DroplessArena` aligned to them
325// to optimize away alignment code.
326const DROPLESS_ALIGNMENT: usize = align_of::<usize>();
327
328/// An arena that can hold objects of multiple different types that impl `Copy`
329/// and/or satisfy `!mem::needs_drop`.
330pub struct DroplessArena {
331 /// A pointer to the start of the free space.
332 start: RwLock<*mut u8>,
333
334 /// A pointer to the end of free space.
335 ///
336 /// The allocation proceeds downwards from the end of the chunk towards the
337 /// start. (This is slightly simpler and faster than allocating upwards,
338 /// see <https://fitzgeraldnick.com/2019/11/01/always-bump-downwards.html>.)
339 /// When this pointer crosses the start pointer, a new chunk is allocated.
340 ///
341 /// This is kept aligned to DROPLESS_ALIGNMENT.
342 end: RwLock<*mut u8>,
343
344 /// A vector of arena chunks.
345 chunks: RwLock<Vec<ArenaChunk>>,
346}
347
348unsafe impl Send for DroplessArena {}
349
350impl Default for DroplessArena {
351 #[inline]
352 fn default() -> DroplessArena {
353 DroplessArena {
354 // We set both `start` and `end` to 0 so that the first call to
355 // alloc() will trigger a grow().
356 start: RwLock::new(ptr::null_mut()),
357 end: RwLock::new(ptr::null_mut()),
358 chunks: Default::default(),
359 }
360 }
361}
362
363impl DroplessArena {
364 #[inline(never)]
365 #[cold]
366 fn grow(&self, layout: Layout) {
367 // Add some padding so we can align `self.end` while
368 // still fitting in a `layout` allocation.
369 let additional = layout.size() + cmp::max(DROPLESS_ALIGNMENT, layout.align()) - 1;
370
371 unsafe {
372 let mut chunks = self.chunks.write().unwrap();
373 let mut new_cap;
374 if let Some(last_chunk) = chunks.last_mut() {
375 // There is no need to update `last_chunk.entries` because that
376 // field isn't used by `DroplessArena`.
377
378 // If the previous chunk's len is less than HUGE_PAGE
379 // bytes, then this chunk will be least double the previous
380 // chunk's size.
381 new_cap = last_chunk.storage.len().min(HUGE_PAGE / 2);
382 new_cap *= 2;
383 } else {
384 new_cap = PAGE;
385 }
386 // Also ensure that this chunk can fit `additional`.
387 new_cap = cmp::max(additional, new_cap);
388
389 let mut chunk = ArenaChunk::new(align_up(new_cap, PAGE));
390 *self.start.write().unwrap() = chunk.start();
391
392 // Align the end to DROPLESS_ALIGNMENT.
393 let end = align_down(chunk.end().addr(), DROPLESS_ALIGNMENT);
394
395 // Make sure we don't go past `start`. This should not happen since the allocation
396 // should be at least DROPLESS_ALIGNMENT - 1 bytes.
397 debug_assert!(chunk.start().addr() <= end);
398
399 *self.end.write().unwrap() = chunk.end().with_addr(end);
400
401 chunks.push(chunk);
402 }
403 }
404
405 #[inline]
406 pub fn alloc_raw(&self, layout: Layout) -> *mut u8 {
407 assert!(layout.size() != 0);
408
409 // This loop executes once or twice: if allocation fails the first
410 // time, the `grow` ensures it will succeed the second time.
411 loop {
412 let start = self.start.read().unwrap().addr();
413 let old_end = self.end.read().unwrap();
414 let end = old_end.addr();
415
416 // Align allocated bytes so that `self.end` stays aligned to
417 // DROPLESS_ALIGNMENT.
418 let bytes = align_up(layout.size(), DROPLESS_ALIGNMENT);
419
420 if let Some(sub) = end.checked_sub(bytes) {
421 let new_end = align_down(sub, layout.align());
422 if start <= new_end {
423 let new_end = old_end.with_addr(new_end);
424 // `new_end` is aligned to DROPLESS_ALIGNMENT as `align_down`
425 // preserves alignment as both `end` and `bytes` are already
426 // aligned to DROPLESS_ALIGNMENT.
427 *self.end.write().unwrap() = new_end;
428 return new_end;
429 }
430 }
431
432 // No free space left. Allocate a new chunk to satisfy the request.
433 // On failure the grow will panic or abort.
434 self.grow(layout);
435 }
436 }
437
438 #[inline]
439 pub fn alloc<T>(&self, object: T) -> &mut T {
440 assert!(!mem::needs_drop::<T>());
441 assert!(size_of::<T>() != 0);
442
443 let mem = self.alloc_raw(Layout::new::<T>()) as *mut T;
444
445 unsafe {
446 // Write into uninitialized memory.
447 ptr::write(mem, object);
448 &mut *mem
449 }
450 }
451
452 /// Allocates a slice of objects that are copied into the `DroplessArena`, returning a mutable
453 /// reference to it. Will panic if passed a zero-sized type.
454 ///
455 /// Panics:
456 ///
457 /// - Zero-sized types
458 /// - Zero-length slices
459 #[inline]
460 pub fn alloc_slice<T>(&self, slice: &[T]) -> &mut [T]
461 where
462 T: Copy,
463 {
464 assert!(!mem::needs_drop::<T>());
465 assert!(size_of::<T>() != 0);
466 assert!(!slice.is_empty());
467
468 let mem = self.alloc_raw(Layout::for_value::<[T]>(slice)) as *mut T;
469
470 unsafe {
471 mem.copy_from_nonoverlapping(slice.as_ptr(), slice.len());
472 slice::from_raw_parts_mut(mem, slice.len())
473 }
474 }
475
476 /// Used by `Lift` to check whether this slice is allocated
477 /// in this arena.
478 #[inline]
479 pub fn contains_slice<T>(&self, slice: &[T]) -> bool {
480 for chunk in self.chunks.write().unwrap().iter_mut() {
481 let ptr = slice.as_ptr().cast::<u8>().cast_mut();
482 if chunk.start() <= ptr && chunk.end() >= ptr {
483 return true;
484 }
485 }
486 false
487 }
488
489 /// Allocates a string slice that is copied into the `DroplessArena`, returning a
490 /// reference to it. Will panic if passed an empty string.
491 ///
492 /// Panics:
493 ///
494 /// - Zero-length string
495 #[inline]
496 pub fn alloc_str(&self, string: &str) -> &str {
497 let slice = self.alloc_slice(string.as_bytes());
498
499 // SAFETY: the result has a copy of the same valid UTF-8 bytes.
500 unsafe { std::str::from_utf8_unchecked(slice) }
501 }
502
503 /// # Safety
504 ///
505 /// The caller must ensure that `mem` is valid for writes up to `size_of::<T>() * len`, and that
506 /// that memory stays allocated and not shared for the lifetime of `self`. This must hold even
507 /// if `iter.next()` allocates onto `self`.
508 #[inline]
509 unsafe fn write_from_iter<T, I: Iterator<Item = T>>(
510 &self,
511 mut iter: I,
512 len: usize,
513 mem: *mut T,
514 ) -> &mut [T] {
515 let mut i = 0;
516 // Use a manual loop since LLVM manages to optimize it better for
517 // slice iterators
518 loop {
519 // SAFETY: The caller must ensure that `mem` is valid for writes up to
520 // `size_of::<T>() * len`.
521 unsafe {
522 match iter.next() {
523 Some(value) if i < len => mem.add(i).write(value),
524 Some(_) | None => {
525 // We only return as many items as the iterator gave us, even
526 // though it was supposed to give us `len`
527 return slice::from_raw_parts_mut(mem, i);
528 }
529 }
530 }
531 i += 1;
532 }
533 }
534
535 #[inline]
536 pub fn alloc_from_iter<T, I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
537 // Warning: this function is reentrant: `iter` could hold a reference to `&self` and
538 // allocate additional elements while we're iterating.
539 let iter = iter.into_iter();
540 assert!(size_of::<T>() != 0);
541 assert!(!mem::needs_drop::<T>());
542
543 let size_hint = iter.size_hint();
544
545 match size_hint {
546 (min, Some(max)) if min == max => {
547 // We know the exact number of elements the iterator expects to produce here.
548 let len = min;
549
550 if len == 0 {
551 return &mut [];
552 }
553
554 let mem = self.alloc_raw(Layout::array::<T>(len).unwrap()) as *mut T;
555 // SAFETY: `write_from_iter` doesn't touch `self`. It only touches the slice we just
556 // reserved. If the iterator panics or doesn't output `len` elements, this will
557 // leave some unallocated slots in the arena, which is fine because we do not call
558 // `drop`.
559 unsafe { self.write_from_iter(iter, len, mem) }
560 }
561 (_, _) => {
562 outline(move || -> &mut [T] {
563 // Takes care of reentrancy.
564 let mut vec: SmallVec<[_; 8]> = iter.collect();
565 if vec.is_empty() {
566 return &mut [];
567 }
568 // Move the content to the arena by copying it and then forgetting
569 // the content of the SmallVec
570 unsafe {
571 let len = vec.len();
572 let start_ptr =
573 self.alloc_raw(Layout::for_value::<[T]>(vec.as_slice())) as *mut T;
574 vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
575 vec.set_len(0);
576 slice::from_raw_parts_mut(start_ptr, len)
577 }
578 })
579 }
580 }
581 }
582}
583
584/// Declare an `Arena` containing one dropless arena and many typed arenas (the
585/// types of the typed arenas are specified by the arguments).
586///
587/// There are three cases of interest.
588/// - Types that are `Copy`: these need not be specified in the arguments. They
589/// will use the `DroplessArena`.
590/// - Types that are `!Copy` and `!Drop`: these must be specified in the
591/// arguments. An empty `TypedArena` will be created for each one, but the
592/// `DroplessArena` will always be used and the `TypedArena` will stay empty.
593/// This is odd but harmless, because an empty arena allocates no memory.
594/// - Types that are `!Copy` and `Drop`: these must be specified in the
595/// arguments. The `TypedArena` will be used for them.
596///
597#[macro_export]
598macro_rules! declare_arena {
599 ([$($a:tt $name:ident: $ty:ty,)*])=> {
600 #[derive(Default)]
601 pub struct Arena<'tcx> {
602 pub dropless: $crate::DroplessArena,
603 $($name: $crate::TypedArena<$ty>,)*
604 }
605
606 pub trait ArenaAllocatable<'tcx, C = rustc_arena::IsNotCopy>: Sized {
607 #[allow(clippy::mut_from_ref)]
608 fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self;
609 #[allow(clippy::mut_from_ref)]
610 fn allocate_from_iter(
611 arena: &'tcx Arena<'tcx>,
612 iter: impl ::std::iter::IntoIterator<Item = Self>,
613 ) -> &'tcx mut [Self];
614 }
615
616 // Any type that impls `Copy` can be arena-allocated in the `DroplessArena`.
617 impl<'tcx, T: Copy> ArenaAllocatable<'tcx, rustc_arena::IsCopy> for T {
618 #[inline]
619 #[allow(clippy::mut_from_ref)]
620 fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self {
621 arena.dropless.alloc(self)
622 }
623 #[inline]
624 #[allow(clippy::mut_from_ref)]
625 fn allocate_from_iter(
626 arena: &'tcx Arena<'tcx>,
627 iter: impl ::std::iter::IntoIterator<Item = Self>,
628 ) -> &'tcx mut [Self] {
629 arena.dropless.alloc_from_iter(iter)
630 }
631 }
632 $(
633 impl<'tcx> ArenaAllocatable<'tcx, rustc_arena::IsNotCopy> for $ty {
634 #[inline]
635 fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self {
636 if !::std::mem::needs_drop::<Self>() {
637 arena.dropless.alloc(self)
638 } else {
639 arena.$name.alloc(self)
640 }
641 }
642
643 #[inline]
644 #[allow(clippy::mut_from_ref)]
645 fn allocate_from_iter(
646 arena: &'tcx Arena<'tcx>,
647 iter: impl ::std::iter::IntoIterator<Item = Self>,
648 ) -> &'tcx mut [Self] {
649 if !::std::mem::needs_drop::<Self>() {
650 arena.dropless.alloc_from_iter(iter)
651 } else {
652 arena.$name.alloc_from_iter(iter)
653 }
654 }
655 }
656 )*
657
658 impl<'tcx> Arena<'tcx> {
659 #[inline]
660 #[allow(clippy::mut_from_ref)]
661 pub fn alloc<T: ArenaAllocatable<'tcx, C>, C>(&'tcx self, value: T) -> &mut T {
662 value.allocate_on(self)
663 }
664
665 // Any type that impls `Copy` can have slices be arena-allocated in the `DroplessArena`.
666 #[inline]
667 #[allow(clippy::mut_from_ref)]
668 pub fn alloc_slice<T: ::std::marker::Copy>(&self, value: &[T]) -> &mut [T] {
669 if value.is_empty() {
670 return &mut [];
671 }
672 self.dropless.alloc_slice(value)
673 }
674
675 #[inline]
676 pub fn alloc_str(&self, string: &str) -> &str {
677 if string.is_empty() {
678 return "";
679 }
680 self.dropless.alloc_str(string)
681 }
682
683 #[allow(clippy::mut_from_ref)]
684 pub fn alloc_from_iter<T: ArenaAllocatable<'tcx, C>, C>(
685 &'tcx self,
686 iter: impl ::std::iter::IntoIterator<Item = T>,
687 ) -> &mut [T] {
688 T::allocate_from_iter(self, iter)
689 }
690 }
691 };
692}
693
694// Marker types that let us give different behaviour for arenas allocating
695// `Copy` types vs `!Copy` types.
696pub struct IsCopy;
697pub struct IsNotCopy;
698
699#[cfg(test)]
700mod tests;