rustc_arena_modified/
lib.rs

1//! Original code from [rustc_arena](https://doc.rust-lang.org/stable/nightly-rustc/rustc_arena/index.html).
2//! There are some modifications, including converting unstable features to stable equivalents,
3//! allocating shared references and adding iteration, and adding the ability to filter and coalesce
4//! behind a mutable reference.
5//!
6//! The arena, a fast but limited type of allocator.
7//!
8//! Arenas are a type of allocator that destroy the objects within, all at once, once the arena
9//! itself is destroyed. They do not support deallocation of individual objects while the arena
10//! itself is still alive. The benefit of an arena is very fast allocation; just a pointer bump.
11
12// TODO:
13//   - add tests for untested (e.g. dropless arena)
14
15use std::mem::{align_of, transmute};
16
17pub use self::typed_arena::{TypedArena, TypedArenaGen, TypedArenaMut};
18pub use dropless_arena::DroplessArena;
19#[cfg(feature = "slab")]
20pub use slab_arena::SlabArena;
21
22mod arena_chunk;
23pub mod dropless_arena;
24#[cfg(feature = "slab")]
25pub mod slab_arena;
26pub mod typed_arena;
27
28// The arenas start with PAGE-sized chunks, and then each new chunk is twice as
29// big as its predecessor, up until we reach HUGE_PAGE-sized chunks, whereupon
30// we stop growing. This scales well, from arenas that are barely used up to
31// arenas that are used for 100s of MiBs. Note also that the chosen sizes match
32// the usual sizes of pages and huge pages on Linux.
33const PAGE: usize = 4096;
34const HUGE_PAGE: usize = 2 * 1024 * 1024;
35
36#[inline(never)]
37#[cold]
38fn cold_path<F: FnOnce() -> R, R>(f: F) -> R {
39    f()
40}
41
42// region stable implementations of unstable functions
43trait PtrUnstables<T: ?Sized> {
44    #[must_use]
45    fn wrapping_byte_offset_(self, count: isize) -> Self;
46    #[must_use]
47    fn addr_(self) -> usize;
48    #[must_use]
49    fn with_addr_(self, addr: usize) -> Self;
50}
51
52//noinspection DuplicatedCode
53impl<T> PtrUnstables<T> for *const T {
54    #[inline(always)]
55    fn wrapping_byte_offset_(self, count: isize) -> Self {
56        // Right now we can get away with using regular wrapping offset and requiring alignment,
57        // because we never use this with an unaligned count
58        if count % align_of::<T>() as isize == 0 {
59            self.wrapping_offset(count / align_of::<T>() as isize)
60        } else {
61            panic!("wrapping_byte_offset_ called with unaligned count")
62        }
63    }
64
65    #[inline(always)]
66    fn addr_(self) -> usize {
67        // XXXXX(strict_provenance_magic): I am magic and should be a compiler intrinsic.
68        // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
69        // provenance).
70        #[allow(clippy::transmutes_expressible_as_ptr_casts)]
71        unsafe {
72            transmute(self.cast::<()>())
73        }
74    }
75
76    #[inline]
77    fn with_addr_(self, addr: usize) -> Self {
78        // XXXXX(strict_provenance_magic): I am magic and should be a compiler intrinsic.
79        //
80        // In the mean-time, this operation is defined to be "as if" it was
81        // a wrapping_offset, so we can emulate it as such. This should properly
82        // restore pointer provenance even under today's compiler.
83        let self_addr = self.addr_() as isize;
84        let dest_addr = addr as isize;
85        let offset = dest_addr.wrapping_sub(self_addr);
86
87        // This is the canonical desugarring of this operation
88        self.wrapping_byte_offset_(offset)
89    }
90}
91
92//noinspection DuplicatedCode
93impl<T> PtrUnstables<T> for *mut T {
94    #[inline(always)]
95    fn wrapping_byte_offset_(self, count: isize) -> Self {
96        // Right now we can get away with using regular wrapping offset and requiring alignment,
97        // because we never use this with an unaligned count
98        if count % align_of::<T>() as isize == 0 {
99            self.wrapping_offset(count / align_of::<T>() as isize)
100        } else {
101            panic!("wrapping_byte_offset_ called with unaligned count")
102        }
103    }
104
105    #[inline(always)]
106    fn addr_(self) -> usize {
107        // XXXXX(strict_provenance_magic): I am magic and should be a compiler intrinsic.
108        // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
109        // provenance).
110        #[allow(clippy::transmutes_expressible_as_ptr_casts)]
111        unsafe {
112            transmute(self.cast::<()>())
113        }
114    }
115
116    #[inline]
117    fn with_addr_(self, addr: usize) -> Self {
118        // XXXXX(strict_provenance_magic): I am magic and should be a compiler intrinsic.
119        //
120        // In the mean-time, this operation is defined to be "as if" it was
121        // a wrapping_offset, so we can emulate it as such. This should properly
122        // restore pointer provenance even under today's compiler.
123        let self_addr = self.addr_() as isize;
124        let dest_addr = addr as isize;
125        let offset = dest_addr.wrapping_sub(self_addr);
126
127        // This is the canonical desugarring of this operation
128        self.wrapping_byte_offset_(offset)
129    }
130}