ra_ap_rustc_arena/lib.rs
1//! The arena, a fast but limited type of allocator.
2//!
3//! Arenas are a type of allocator that destroy the objects within, all at
4//! once, once the arena itself is destroyed. They do not support deallocation
5//! of individual objects while the arena itself is still alive. The benefit
6//! of an arena is very fast allocation; just a pointer bump.
7//!
8//! This crate implements several kinds of arena.
9
10#![doc(
11 html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/",
12 test(no_crate_inject, attr(deny(warnings)))
13)]
14#![doc(rust_logo)]
15#![feature(rustdoc_internals)]
16#![feature(core_intrinsics)]
17#![feature(dropck_eyepatch)]
18#![feature(new_uninit)]
19#![feature(maybe_uninit_slice)]
20#![feature(decl_macro)]
21#![feature(rustc_attrs)]
22#![cfg_attr(test, feature(test))]
23#![feature(strict_provenance)]
24#![deny(unsafe_op_in_unsafe_fn)]
25#![deny(rustc::untranslatable_diagnostic)]
26#![deny(rustc::diagnostic_outside_of_impl)]
27#![allow(internal_features)]
28#![allow(clippy::mut_from_ref)] // Arena allocators are one of the places where this pattern is fine.
29
30use smallvec::SmallVec;
31
32use std::alloc::Layout;
33use std::cell::{Cell, RefCell};
34use std::marker::PhantomData;
35use std::mem::{self, MaybeUninit};
36use std::ptr::{self, NonNull};
37use std::slice;
38use std::{cmp, intrinsics};
39
40/// This calls the passed function while ensuring it won't be inlined into the caller.
41#[inline(never)]
42#[cold]
43fn outline<F: FnOnce() -> R, R>(f: F) -> R {
44 f()
45}
46
47struct ArenaChunk<T = u8> {
48 /// The raw storage for the arena chunk.
49 storage: NonNull<[MaybeUninit<T>]>,
50 /// The number of valid entries in the chunk.
51 entries: usize,
52}
53
54unsafe impl<#[may_dangle] T> Drop for ArenaChunk<T> {
55 fn drop(&mut self) {
56 unsafe { drop(Box::from_raw(self.storage.as_mut())) }
57 }
58}
59
60impl<T> ArenaChunk<T> {
61 #[inline]
62 unsafe fn new(capacity: usize) -> ArenaChunk<T> {
63 ArenaChunk {
64 storage: NonNull::from(Box::leak(Box::new_uninit_slice(capacity))),
65 entries: 0,
66 }
67 }
68
69 /// Destroys this arena chunk.
70 ///
71 /// # Safety
72 ///
73 /// The caller must ensure that `len` elements of this chunk have been initialized.
74 #[inline]
75 unsafe fn destroy(&mut self, len: usize) {
76 // The branch on needs_drop() is an -O1 performance optimization.
77 // Without the branch, dropping TypedArena<T> takes linear time.
78 if mem::needs_drop::<T>() {
79 // SAFETY: The caller must ensure that `len` elements of this chunk have
80 // been initialized.
81 unsafe {
82 let slice = self.storage.as_mut();
83 ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(&mut slice[..len]));
84 }
85 }
86 }
87
88 // Returns a pointer to the first allocated object.
89 #[inline]
90 fn start(&mut self) -> *mut T {
91 self.storage.as_ptr() as *mut T
92 }
93
94 // Returns a pointer to the end of the allocated space.
95 #[inline]
96 fn end(&mut self) -> *mut T {
97 unsafe {
98 if mem::size_of::<T>() == 0 {
99 // A pointer as large as possible for zero-sized elements.
100 ptr::invalid_mut(!0)
101 } else {
102 self.start().add(self.storage.len())
103 }
104 }
105 }
106}
107
108// The arenas start with PAGE-sized chunks, and then each new chunk is twice as
109// big as its predecessor, up until we reach HUGE_PAGE-sized chunks, whereupon
110// we stop growing. This scales well, from arenas that are barely used up to
111// arenas that are used for 100s of MiBs. Note also that the chosen sizes match
112// the usual sizes of pages and huge pages on Linux.
113const PAGE: usize = 4096;
114const HUGE_PAGE: usize = 2 * 1024 * 1024;
115
116/// An arena that can hold objects of only one type.
117pub struct TypedArena<T> {
118 /// A pointer to the next object to be allocated.
119 ptr: Cell<*mut T>,
120
121 /// A pointer to the end of the allocated area. When this pointer is
122 /// reached, a new chunk is allocated.
123 end: Cell<*mut T>,
124
125 /// A vector of arena chunks.
126 chunks: RefCell<Vec<ArenaChunk<T>>>,
127
128 /// Marker indicating that dropping the arena causes its owned
129 /// instances of `T` to be dropped.
130 _own: PhantomData<T>,
131}
132
133impl<T> Default for TypedArena<T> {
134 /// Creates a new `TypedArena`.
135 fn default() -> TypedArena<T> {
136 TypedArena {
137 // We set both `ptr` and `end` to 0 so that the first call to
138 // alloc() will trigger a grow().
139 ptr: Cell::new(ptr::null_mut()),
140 end: Cell::new(ptr::null_mut()),
141 chunks: Default::default(),
142 _own: PhantomData,
143 }
144 }
145}
146
147impl<T> TypedArena<T> {
148 /// Allocates an object in the `TypedArena`, returning a reference to it.
149 #[inline]
150 pub fn alloc(&self, object: T) -> &mut T {
151 if self.ptr == self.end {
152 self.grow(1)
153 }
154
155 unsafe {
156 if mem::size_of::<T>() == 0 {
157 self.ptr.set(self.ptr.get().wrapping_byte_add(1));
158 let ptr = ptr::NonNull::<T>::dangling().as_ptr();
159 // Don't drop the object. This `write` is equivalent to `forget`.
160 ptr::write(ptr, object);
161 &mut *ptr
162 } else {
163 let ptr = self.ptr.get();
164 // Advance the pointer.
165 self.ptr.set(self.ptr.get().add(1));
166 // Write into uninitialized memory.
167 ptr::write(ptr, object);
168 &mut *ptr
169 }
170 }
171 }
172
173 #[inline]
174 fn can_allocate(&self, additional: usize) -> bool {
175 // FIXME: this should *likely* use `offset_from`, but more
176 // investigation is needed (including running tests in miri).
177 let available_bytes = self.end.get().addr() - self.ptr.get().addr();
178 let additional_bytes = additional.checked_mul(mem::size_of::<T>()).unwrap();
179 available_bytes >= additional_bytes
180 }
181
182 #[inline]
183 fn alloc_raw_slice(&self, len: usize) -> *mut T {
184 assert!(mem::size_of::<T>() != 0);
185 assert!(len != 0);
186
187 // Ensure the current chunk can fit `len` objects.
188 if !self.can_allocate(len) {
189 self.grow(len);
190 debug_assert!(self.can_allocate(len));
191 }
192
193 let start_ptr = self.ptr.get();
194 // SAFETY: `can_allocate`/`grow` ensures that there is enough space for
195 // `len` elements.
196 unsafe { self.ptr.set(start_ptr.add(len)) };
197 start_ptr
198 }
199
200 /// Allocates the elements of this iterator into a contiguous slice in the `TypedArena`.
201 ///
202 /// Note: for reasons of reentrancy and panic safety we collect into a `SmallVec<[_; 8]>` before
203 /// storing the elements in the arena.
204 #[inline]
205 pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
206 // Despite the similarlty with `DroplessArena`, we cannot reuse their fast case. The reason
207 // is subtle: these arenas are reentrant. In other words, `iter` may very well be holding a
208 // reference to `self` and adding elements to the arena during iteration.
209 //
210 // For this reason, if we pre-allocated any space for the elements of this iterator, we'd
211 // have to track that some uninitialized elements are followed by some initialized elements,
212 // else we might accidentally drop uninitialized memory if something panics or if the
213 // iterator doesn't fill all the length we expected.
214 //
215 // So we collect all the elements beforehand, which takes care of reentrancy and panic
216 // safety. This function is much less hot than `DroplessArena::alloc_from_iter`, so it
217 // doesn't need to be hyper-optimized.
218 assert!(mem::size_of::<T>() != 0);
219
220 let mut vec: SmallVec<[_; 8]> = iter.into_iter().collect();
221 if vec.is_empty() {
222 return &mut [];
223 }
224 // Move the content to the arena by copying and then forgetting it.
225 let len = vec.len();
226 let start_ptr = self.alloc_raw_slice(len);
227 unsafe {
228 vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
229 vec.set_len(0);
230 slice::from_raw_parts_mut(start_ptr, len)
231 }
232 }
233
234 /// Grows the arena.
235 #[inline(never)]
236 #[cold]
237 fn grow(&self, additional: usize) {
238 unsafe {
239 // We need the element size to convert chunk sizes (ranging from
240 // PAGE to HUGE_PAGE bytes) to element counts.
241 let elem_size = cmp::max(1, mem::size_of::<T>());
242 let mut chunks = self.chunks.borrow_mut();
243 let mut new_cap;
244 if let Some(last_chunk) = chunks.last_mut() {
245 // If a type is `!needs_drop`, we don't need to keep track of how many elements
246 // the chunk stores - the field will be ignored anyway.
247 if mem::needs_drop::<T>() {
248 // FIXME: this should *likely* use `offset_from`, but more
249 // investigation is needed (including running tests in miri).
250 let used_bytes = self.ptr.get().addr() - last_chunk.start().addr();
251 last_chunk.entries = used_bytes / mem::size_of::<T>();
252 }
253
254 // If the previous chunk's len is less than HUGE_PAGE
255 // bytes, then this chunk will be least double the previous
256 // chunk's size.
257 new_cap = last_chunk.storage.len().min(HUGE_PAGE / elem_size / 2);
258 new_cap *= 2;
259 } else {
260 new_cap = PAGE / elem_size;
261 }
262 // Also ensure that this chunk can fit `additional`.
263 new_cap = cmp::max(additional, new_cap);
264
265 let mut chunk = ArenaChunk::<T>::new(new_cap);
266 self.ptr.set(chunk.start());
267 self.end.set(chunk.end());
268 chunks.push(chunk);
269 }
270 }
271
272 // Drops the contents of the last chunk. The last chunk is partially empty, unlike all other
273 // chunks.
274 fn clear_last_chunk(&self, last_chunk: &mut ArenaChunk<T>) {
275 // Determine how much was filled.
276 let start = last_chunk.start().addr();
277 // We obtain the value of the pointer to the first uninitialized element.
278 let end = self.ptr.get().addr();
279 // We then calculate the number of elements to be dropped in the last chunk,
280 // which is the filled area's length.
281 let diff = if mem::size_of::<T>() == 0 {
282 // `T` is ZST. It can't have a drop flag, so the value here doesn't matter. We get
283 // the number of zero-sized values in the last and only chunk, just out of caution.
284 // Recall that `end` was incremented for each allocated value.
285 end - start
286 } else {
287 // FIXME: this should *likely* use `offset_from`, but more
288 // investigation is needed (including running tests in miri).
289 (end - start) / mem::size_of::<T>()
290 };
291 // Pass that to the `destroy` method.
292 unsafe {
293 last_chunk.destroy(diff);
294 }
295 // Reset the chunk.
296 self.ptr.set(last_chunk.start());
297 }
298}
299
300unsafe impl<#[may_dangle] T> Drop for TypedArena<T> {
301 fn drop(&mut self) {
302 unsafe {
303 // Determine how much was filled.
304 let mut chunks_borrow = self.chunks.borrow_mut();
305 if let Some(mut last_chunk) = chunks_borrow.pop() {
306 // Drop the contents of the last chunk.
307 self.clear_last_chunk(&mut last_chunk);
308 // The last chunk will be dropped. Destroy all other chunks.
309 for chunk in chunks_borrow.iter_mut() {
310 chunk.destroy(chunk.entries);
311 }
312 }
313 // Box handles deallocation of `last_chunk` and `self.chunks`.
314 }
315 }
316}
317
318unsafe impl<T: Send> Send for TypedArena<T> {}
319
320#[inline(always)]
321fn align_down(val: usize, align: usize) -> usize {
322 debug_assert!(align.is_power_of_two());
323 val & !(align - 1)
324}
325
326#[inline(always)]
327fn align_up(val: usize, align: usize) -> usize {
328 debug_assert!(align.is_power_of_two());
329 (val + align - 1) & !(align - 1)
330}
331
332// Pointer alignment is common in compiler types, so keep `DroplessArena` aligned to them
333// to optimize away alignment code.
334const DROPLESS_ALIGNMENT: usize = mem::align_of::<usize>();
335
336/// An arena that can hold objects of multiple different types that impl `Copy`
337/// and/or satisfy `!mem::needs_drop`.
338pub struct DroplessArena {
339 /// A pointer to the start of the free space.
340 start: Cell<*mut u8>,
341
342 /// A pointer to the end of free space.
343 ///
344 /// The allocation proceeds downwards from the end of the chunk towards the
345 /// start. (This is slightly simpler and faster than allocating upwards,
346 /// see <https://fitzgeraldnick.com/2019/11/01/always-bump-downwards.html>.)
347 /// When this pointer crosses the start pointer, a new chunk is allocated.
348 ///
349 /// This is kept aligned to DROPLESS_ALIGNMENT.
350 end: Cell<*mut u8>,
351
352 /// A vector of arena chunks.
353 chunks: RefCell<Vec<ArenaChunk>>,
354}
355
356unsafe impl Send for DroplessArena {}
357
358impl Default for DroplessArena {
359 #[inline]
360 fn default() -> DroplessArena {
361 DroplessArena {
362 // We set both `start` and `end` to 0 so that the first call to
363 // alloc() will trigger a grow().
364 start: Cell::new(ptr::null_mut()),
365 end: Cell::new(ptr::null_mut()),
366 chunks: Default::default(),
367 }
368 }
369}
370
371impl DroplessArena {
372 #[inline(never)]
373 #[cold]
374 fn grow(&self, layout: Layout) {
375 // Add some padding so we can align `self.end` while
376 // still fitting in a `layout` allocation.
377 let additional = layout.size() + cmp::max(DROPLESS_ALIGNMENT, layout.align()) - 1;
378
379 unsafe {
380 let mut chunks = self.chunks.borrow_mut();
381 let mut new_cap;
382 if let Some(last_chunk) = chunks.last_mut() {
383 // There is no need to update `last_chunk.entries` because that
384 // field isn't used by `DroplessArena`.
385
386 // If the previous chunk's len is less than HUGE_PAGE
387 // bytes, then this chunk will be least double the previous
388 // chunk's size.
389 new_cap = last_chunk.storage.len().min(HUGE_PAGE / 2);
390 new_cap *= 2;
391 } else {
392 new_cap = PAGE;
393 }
394 // Also ensure that this chunk can fit `additional`.
395 new_cap = cmp::max(additional, new_cap);
396
397 let mut chunk = ArenaChunk::new(align_up(new_cap, PAGE));
398 self.start.set(chunk.start());
399
400 // Align the end to DROPLESS_ALIGNMENT.
401 let end = align_down(chunk.end().addr(), DROPLESS_ALIGNMENT);
402
403 // Make sure we don't go past `start`. This should not happen since the allocation
404 // should be at least DROPLESS_ALIGNMENT - 1 bytes.
405 debug_assert!(chunk.start().addr() <= end);
406
407 self.end.set(chunk.end().with_addr(end));
408
409 chunks.push(chunk);
410 }
411 }
412
413 #[inline]
414 pub fn alloc_raw(&self, layout: Layout) -> *mut u8 {
415 assert!(layout.size() != 0);
416
417 // This loop executes once or twice: if allocation fails the first
418 // time, the `grow` ensures it will succeed the second time.
419 loop {
420 let start = self.start.get().addr();
421 let old_end = self.end.get();
422 let end = old_end.addr();
423
424 // Align allocated bytes so that `self.end` stays aligned to
425 // DROPLESS_ALIGNMENT.
426 let bytes = align_up(layout.size(), DROPLESS_ALIGNMENT);
427
428 // Tell LLVM that `end` is aligned to DROPLESS_ALIGNMENT.
429 unsafe { intrinsics::assume(end == align_down(end, DROPLESS_ALIGNMENT)) };
430
431 if let Some(sub) = end.checked_sub(bytes) {
432 let new_end = align_down(sub, layout.align());
433 if start <= new_end {
434 let new_end = old_end.with_addr(new_end);
435 // `new_end` is aligned to DROPLESS_ALIGNMENT as `align_down`
436 // preserves alignment as both `end` and `bytes` are already
437 // aligned to DROPLESS_ALIGNMENT.
438 self.end.set(new_end);
439 return new_end;
440 }
441 }
442
443 // No free space left. Allocate a new chunk to satisfy the request.
444 // On failure the grow will panic or abort.
445 self.grow(layout);
446 }
447 }
448
449 #[inline]
450 pub fn alloc<T>(&self, object: T) -> &mut T {
451 assert!(!mem::needs_drop::<T>());
452 assert!(mem::size_of::<T>() != 0);
453
454 let mem = self.alloc_raw(Layout::new::<T>()) as *mut T;
455
456 unsafe {
457 // Write into uninitialized memory.
458 ptr::write(mem, object);
459 &mut *mem
460 }
461 }
462
463 /// Allocates a slice of objects that are copied into the `DroplessArena`, returning a mutable
464 /// reference to it. Will panic if passed a zero-sized type.
465 ///
466 /// Panics:
467 ///
468 /// - Zero-sized types
469 /// - Zero-length slices
470 #[inline]
471 pub fn alloc_slice<T>(&self, slice: &[T]) -> &mut [T]
472 where
473 T: Copy,
474 {
475 assert!(!mem::needs_drop::<T>());
476 assert!(mem::size_of::<T>() != 0);
477 assert!(!slice.is_empty());
478
479 let mem = self.alloc_raw(Layout::for_value::<[T]>(slice)) as *mut T;
480
481 unsafe {
482 mem.copy_from_nonoverlapping(slice.as_ptr(), slice.len());
483 slice::from_raw_parts_mut(mem, slice.len())
484 }
485 }
486
487 /// Allocates a string slice that is copied into the `DroplessArena`, returning a
488 /// reference to it. Will panic if passed an empty string.
489 ///
490 /// Panics:
491 ///
492 /// - Zero-length string
493 #[inline]
494 pub fn alloc_str(&self, string: &str) -> &str {
495 let slice = self.alloc_slice(string.as_bytes());
496
497 // SAFETY: the result has a copy of the same valid UTF-8 bytes.
498 unsafe { std::str::from_utf8_unchecked(slice) }
499 }
500
501 /// # Safety
502 ///
503 /// The caller must ensure that `mem` is valid for writes up to `size_of::<T>() * len`, and that
504 /// that memory stays allocated and not shared for the lifetime of `self`. This must hold even
505 /// if `iter.next()` allocates onto `self`.
506 #[inline]
507 unsafe fn write_from_iter<T, I: Iterator<Item = T>>(
508 &self,
509 mut iter: I,
510 len: usize,
511 mem: *mut T,
512 ) -> &mut [T] {
513 let mut i = 0;
514 // Use a manual loop since LLVM manages to optimize it better for
515 // slice iterators
516 loop {
517 // SAFETY: The caller must ensure that `mem` is valid for writes up to
518 // `size_of::<T>() * len`.
519 unsafe {
520 match iter.next() {
521 Some(value) if i < len => mem.add(i).write(value),
522 Some(_) | None => {
523 // We only return as many items as the iterator gave us, even
524 // though it was supposed to give us `len`
525 return slice::from_raw_parts_mut(mem, i);
526 }
527 }
528 }
529 i += 1;
530 }
531 }
532
533 #[inline]
534 pub fn alloc_from_iter<T, I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
535 // Warning: this function is reentrant: `iter` could hold a reference to `&self` and
536 // allocate additional elements while we're iterating.
537 let iter = iter.into_iter();
538 assert!(mem::size_of::<T>() != 0);
539 assert!(!mem::needs_drop::<T>());
540
541 let size_hint = iter.size_hint();
542
543 match size_hint {
544 (min, Some(max)) if min == max => {
545 // We know the exact number of elements the iterator expects to produce here.
546 let len = min;
547
548 if len == 0 {
549 return &mut [];
550 }
551
552 let mem = self.alloc_raw(Layout::array::<T>(len).unwrap()) as *mut T;
553 // SAFETY: `write_from_iter` doesn't touch `self`. It only touches the slice we just
554 // reserved. If the iterator panics or doesn't output `len` elements, this will
555 // leave some unallocated slots in the arena, which is fine because we do not call
556 // `drop`.
557 unsafe { self.write_from_iter(iter, len, mem) }
558 }
559 (_, _) => {
560 outline(move || -> &mut [T] {
561 // Takes care of reentrancy.
562 let mut vec: SmallVec<[_; 8]> = iter.collect();
563 if vec.is_empty() {
564 return &mut [];
565 }
566 // Move the content to the arena by copying it and then forgetting
567 // the content of the SmallVec
568 unsafe {
569 let len = vec.len();
570 let start_ptr =
571 self.alloc_raw(Layout::for_value::<[T]>(vec.as_slice())) as *mut T;
572 vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
573 vec.set_len(0);
574 slice::from_raw_parts_mut(start_ptr, len)
575 }
576 })
577 }
578 }
579 }
580}
581
582/// Declare an `Arena` containing one dropless arena and many typed arenas (the
583/// types of the typed arenas are specified by the arguments).
584///
585/// There are three cases of interest.
586/// - Types that are `Copy`: these need not be specified in the arguments. They
587/// will use the `DroplessArena`.
588/// - Types that are `!Copy` and `!Drop`: these must be specified in the
589/// arguments. An empty `TypedArena` will be created for each one, but the
590/// `DroplessArena` will always be used and the `TypedArena` will stay empty.
591/// This is odd but harmless, because an empty arena allocates no memory.
592/// - Types that are `!Copy` and `Drop`: these must be specified in the
593/// arguments. The `TypedArena` will be used for them.
594///
595#[rustc_macro_transparency = "semitransparent"]
596pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) {
597 #[derive(Default)]
598 pub struct Arena<'tcx> {
599 pub dropless: $crate::DroplessArena,
600 $($name: $crate::TypedArena<$ty>,)*
601 }
602
603 pub trait ArenaAllocatable<'tcx, C = rustc_arena::IsNotCopy>: Sized {
604 #[allow(clippy::mut_from_ref)]
605 fn allocate_on<'a>(self, arena: &'a Arena<'tcx>) -> &'a mut Self;
606 #[allow(clippy::mut_from_ref)]
607 fn allocate_from_iter<'a>(
608 arena: &'a Arena<'tcx>,
609 iter: impl ::std::iter::IntoIterator<Item = Self>,
610 ) -> &'a mut [Self];
611 }
612
613 // Any type that impls `Copy` can be arena-allocated in the `DroplessArena`.
614 impl<'tcx, T: Copy> ArenaAllocatable<'tcx, rustc_arena::IsCopy> for T {
615 #[inline]
616 #[allow(clippy::mut_from_ref)]
617 fn allocate_on<'a>(self, arena: &'a Arena<'tcx>) -> &'a mut Self {
618 arena.dropless.alloc(self)
619 }
620 #[inline]
621 #[allow(clippy::mut_from_ref)]
622 fn allocate_from_iter<'a>(
623 arena: &'a Arena<'tcx>,
624 iter: impl ::std::iter::IntoIterator<Item = Self>,
625 ) -> &'a mut [Self] {
626 arena.dropless.alloc_from_iter(iter)
627 }
628 }
629 $(
630 impl<'tcx> ArenaAllocatable<'tcx, rustc_arena::IsNotCopy> for $ty {
631 #[inline]
632 fn allocate_on<'a>(self, arena: &'a Arena<'tcx>) -> &'a mut Self {
633 if !::std::mem::needs_drop::<Self>() {
634 arena.dropless.alloc(self)
635 } else {
636 arena.$name.alloc(self)
637 }
638 }
639
640 #[inline]
641 #[allow(clippy::mut_from_ref)]
642 fn allocate_from_iter<'a>(
643 arena: &'a Arena<'tcx>,
644 iter: impl ::std::iter::IntoIterator<Item = Self>,
645 ) -> &'a mut [Self] {
646 if !::std::mem::needs_drop::<Self>() {
647 arena.dropless.alloc_from_iter(iter)
648 } else {
649 arena.$name.alloc_from_iter(iter)
650 }
651 }
652 }
653 )*
654
655 impl<'tcx> Arena<'tcx> {
656 #[inline]
657 #[allow(clippy::mut_from_ref)]
658 pub fn alloc<T: ArenaAllocatable<'tcx, C>, C>(&self, value: T) -> &mut T {
659 value.allocate_on(self)
660 }
661
662 // Any type that impls `Copy` can have slices be arena-allocated in the `DroplessArena`.
663 #[inline]
664 #[allow(clippy::mut_from_ref)]
665 pub fn alloc_slice<T: ::std::marker::Copy>(&self, value: &[T]) -> &mut [T] {
666 if value.is_empty() {
667 return &mut [];
668 }
669 self.dropless.alloc_slice(value)
670 }
671
672 #[inline]
673 pub fn alloc_str(&self, string: &str) -> &str {
674 if string.is_empty() {
675 return "";
676 }
677 self.dropless.alloc_str(string)
678 }
679
680 #[allow(clippy::mut_from_ref)]
681 pub fn alloc_from_iter<T: ArenaAllocatable<'tcx, C>, C>(
682 &self,
683 iter: impl ::std::iter::IntoIterator<Item = T>,
684 ) -> &mut [T] {
685 T::allocate_from_iter(self, iter)
686 }
687 }
688}
689
690// Marker types that let us give different behaviour for arenas allocating
691// `Copy` types vs `!Copy` types.
692pub struct IsCopy;
693pub struct IsNotCopy;
694
695#[cfg(test)]
696mod tests;