1#![allow(dead_code)]
2#![allow(unused_variables)]
3#![allow(unused_mut)]
4#![allow(unused_imports)]
5#![allow(clippy::legacy_numeric_constants)]
6#![allow(clippy::unwrap_or_default)]
7#![allow(clippy::collapsible_if)]
8#![allow(clippy::let_and_return)]
9#![allow(clippy::collapsible_else_if)]
10
11use cfg_if::cfg_if;
12use std::alloc::{alloc, dealloc, Layout};
13#[cfg(feature = "single_thread_fast")]
14use std::cell::Cell;
15use std::cell::UnsafeCell;
16use std::collections::HashMap;
17use std::marker::PhantomData;
18use std::mem::{self, MaybeUninit};
19use std::ptr::{self, NonNull};
20use std::slice;
21use std::sync::{
22 atomic::{AtomicUsize, Ordering},
23 Mutex,
24};
25use std::vec::Vec;
26
27#[cfg(feature = "arena_module")]
29pub mod arena;
30pub mod core;
31pub mod size_classes;
32
33pub type DiagnosticsSink = Box<dyn Fn(&str) + Send + Sync + 'static>;
35
36pub mod error;
37
38#[cfg(feature = "lockfree")]
39pub mod lockfree;
40
41#[cfg(feature = "thread_local")]
42pub mod thread_local;
43
44#[cfg(feature = "virtual_memory")]
45pub mod virtual_memory;
46
47#[cfg(feature = "debug")]
48pub mod debug;
49
50#[cfg(feature = "slab")]
51pub mod slab;
52
53pub use crate::core::{ArenaCheckpoint, ArenaStats, AtomicCounter, Chunk, DebugStats, MemoryPool};
55
56#[cfg(feature = "arena_module")]
58pub use crate::arena::{Arena, ArenaBuilder, Scope};
59
60pub use crate::error::ArenaError;
62
63#[cfg(not(feature = "arena_module"))]
64pub use self::legacy_arena::{Arena, ArenaBuilder, FeatureBundle, Scope};
65
66#[cfg(target_arch = "x86_64")]
67use std::arch::x86_64::*;
68
69mod legacy_arena;
71
72const CHUNK_ALIGN: usize = 64;
74const DEFAULT_CHUNK_SIZE: usize = 64 * 1024;
75const MIN_CHUNK_SIZE: usize = 4096;
76const MAX_CHUNK_SIZE: usize = 16 * 1024 * 1024;
77const ALIGNMENT_MASK: usize = CHUNK_ALIGN - 1;
78const SIMD_THRESHOLD: usize = 1024;
79
80cfg_if! {
81 if #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] {
82 const HAS_NATIVE_SIMD: bool = true;
83 } else {
84 const HAS_NATIVE_SIMD: bool = false;
85 }
86}
87
88const FAST_ALLOC_THRESHOLD: usize = 1024; const PREFETCH_WARMUP_SIZE: usize = 8; use size_classes::SIZE_CLASSES;
93
94#[repr(transparent)]
95struct UsedCounter {
96 #[cfg(not(feature = "single_thread_fast"))]
97 inner: AtomicUsize,
98 #[cfg(feature = "single_thread_fast")]
99 inner: Cell<usize>,
100}
101
102impl UsedCounter {
103 #[inline]
104 fn new(value: usize) -> Self {
105 Self {
106 #[cfg(not(feature = "single_thread_fast"))]
107 inner: AtomicUsize::new(value),
108 #[cfg(feature = "single_thread_fast")]
109 inner: Cell::new(value),
110 }
111 }
112
113 #[inline]
114 fn load(&self, ordering: Ordering) -> usize {
115 #[cfg(not(feature = "single_thread_fast"))]
116 {
117 self.inner.load(ordering)
118 }
119 #[cfg(feature = "single_thread_fast")]
120 {
121 self.inner.get()
122 }
123 }
124
125 #[inline]
126 fn store(&self, value: usize, ordering: Ordering) {
127 #[cfg(not(feature = "single_thread_fast"))]
128 {
129 self.inner.store(value, ordering);
130 }
131 #[cfg(feature = "single_thread_fast")]
132 {
133 self.inner.set(value);
134 }
135 }
136
137 #[inline]
138 fn fetch_add(&self, value: usize, ordering: Ordering) -> usize {
139 #[cfg(not(feature = "single_thread_fast"))]
140 {
141 self.inner.fetch_add(value, ordering)
142 }
143 #[cfg(feature = "single_thread_fast")]
144 {
145 let current = self.inner.get();
146 self.inner.set(current + value);
147 current
148 }
149 }
150}
151
152#[inline]
153fn align_up(offset: usize, align: usize) -> usize {
154 (offset + align - 1) & !(align - 1)
155}
156
157#[inline]
159fn likely(b: bool) -> bool {
160 b
163}
164
165pub struct PoolStats {
166 pub capacity: usize,
167 pub in_use: usize,
168 pub free: usize,
169}
170
171struct PoolInner<T> {
172 storage: Vec<Option<T>>,
173 free: Vec<usize>,
174 in_use: usize,
175}
176
177pub struct Pool<T> {
178 inner: UnsafeCell<PoolInner<T>>,
179}
180
181pub struct Pooled<'pool, T> {
182 index: usize,
183 pool: &'pool Pool<T>,
184}
185
186impl<T> Default for Pool<T> {
187 fn default() -> Self {
188 Self::new()
189 }
190}
191
192impl<T> Pool<T> {
193 #[inline]
194 pub fn new() -> Self {
195 Self::with_capacity(0)
196 }
197
198 #[inline]
199 pub fn with_capacity(capacity: usize) -> Self {
200 let mut storage = Vec::with_capacity(capacity);
201 storage.resize_with(capacity, || None);
202 let mut free = Vec::with_capacity(capacity);
203 for i in 0..capacity {
204 free.push(i);
205 }
206 Pool {
207 inner: UnsafeCell::new(PoolInner {
208 storage,
209 free,
210 in_use: 0,
211 }),
212 }
213 }
214
215 pub fn alloc<'pool>(&'pool self, value: T) -> Pooled<'pool, T> {
216 unsafe {
217 let inner = &mut *self.inner.get();
218 let index = if let Some(i) = inner.free.pop() {
219 i
220 } else {
221 let idx = inner.storage.len();
222 inner.storage.push(None);
223 idx
224 };
225 debug_assert!(inner.storage[index].is_none());
226 inner.storage[index] = Some(value);
227 inner.in_use += 1;
228 Pooled { index, pool: self }
229 }
230 }
231
232 #[inline]
233 pub fn alloc_default<'pool>(&'pool self) -> Pooled<'pool, T>
234 where
235 T: Default,
236 {
237 self.alloc(T::default())
238 }
239
240 #[inline]
241 pub fn stats(&self) -> PoolStats {
242 unsafe {
243 let inner = &*self.inner.get();
244 PoolStats {
245 capacity: inner.storage.len(),
246 in_use: inner.in_use,
247 free: inner.free.len(),
248 }
249 }
250 }
251
252 #[inline]
253 fn put_back(&self, index: usize) {
254 unsafe {
255 let inner = &mut *self.inner.get();
256 if inner.storage[index].is_some() {
257 inner.storage[index] = None;
258 inner.in_use -= 1;
259 inner.free.push(index);
260 }
261 }
262 }
263}
264
265impl<'pool, T> std::ops::Deref for Pooled<'pool, T> {
266 type Target = T;
267
268 fn deref(&self) -> &Self::Target {
269 unsafe {
270 let inner = &*self.pool.inner.get();
271 inner.storage[self.index]
272 .as_ref()
273 .expect("pooled slot empty")
274 }
275 }
276}
277
278impl<'pool, T> std::ops::DerefMut for Pooled<'pool, T> {
279 fn deref_mut(&mut self) -> &mut Self::Target {
280 unsafe {
281 let inner = &mut *self.pool.inner.get();
282 inner.storage[self.index]
283 .as_mut()
284 .expect("pooled slot empty")
285 }
286 }
287}
288
289impl<'pool, T> Drop for Pooled<'pool, T> {
290 fn drop(&mut self) {
291 self.pool.put_back(self.index);
292 }
293}
294
295#[cfg(not(feature = "single_thread_fast"))]
296pub struct SyncArena {
297 inner: Mutex<Arena>,
298}
299
300#[cfg(not(feature = "single_thread_fast"))]
301impl Default for SyncArena {
302 fn default() -> Self {
303 Self::new()
304 }
305}
306
307#[cfg(not(feature = "single_thread_fast"))]
308impl SyncArena {
309 pub fn new() -> Self {
310 SyncArena {
311 inner: Mutex::new(Arena::new()),
312 }
313 }
314
315 pub fn with_capacity(bytes: usize) -> Self {
316 SyncArena {
317 inner: Mutex::new(Arena::with_capacity(bytes)),
318 }
319 }
320
321 pub fn scope<F, R>(&self, f: F) -> R
322 where
323 F: for<'scope, 'arena> FnOnce(&Scope<'scope, 'arena>) -> R,
324 {
325 let guard = self
326 .inner
327 .lock()
328 .unwrap_or_else(|poison| poison.into_inner());
329 guard.scope(f)
330 }
331
332 pub fn stats(&self) -> crate::core::ArenaStats {
333 let guard = self
334 .inner
335 .lock()
336 .unwrap_or_else(|poison| poison.into_inner());
337 crate::core::ArenaStats {
338 bytes_used: crate::core::AtomicCounter::new(guard.stats().bytes_used),
339 bytes_allocated: crate::core::AtomicCounter::new(guard.stats().bytes_allocated),
340 allocation_count: crate::core::AtomicCounter::new(guard.stats().allocation_count),
341 chunk_count: guard.stats().chunk_count,
342 }
343 }
344
345 pub fn bytes_allocated(&self) -> usize {
346 let guard = self
347 .inner
348 .lock()
349 .unwrap_or_else(|poison| poison.into_inner());
350 guard.stats().bytes_allocated
351 }
352}
353
354#[cfg(feature = "lockfree")]
360pub use lockfree::{LockFreeAllocator, LockFreeBuffer, LockFreePool, LockFreeStats, ThreadSlab};
361
362#[cfg(feature = "thread_local")]
364pub use thread_local::{
365 cleanup_thread_cache, clear_thread_cache, reset_thread_cache, try_thread_local_alloc,
366};
367
368#[cfg(feature = "virtual_memory")]
370pub use virtual_memory::VirtualMemoryRegion;
371
372#[cfg(feature = "debug")]
374pub use debug::{AllocationInfo, DEBUG_STATE, FREED_MAGIC, GUARD_MAGIC};