1extern crate alloc;
4
5use alloc::alloc::{alloc, dealloc, Layout};
6use alloc::vec::Vec;
7use core::cell::UnsafeCell;
8use core::marker::PhantomData;
9use core::mem::{self, MaybeUninit};
10use core::ptr::{self, NonNull};
11use core::slice;
12use core::sync::atomic::{AtomicUsize, Ordering};
13use core::usize;
14use std::sync::Mutex;
15
16#[cfg(target_arch = "x86_64")]
17use core::arch::x86_64::*;
18
19const MIN_CHUNK_SIZE: usize = 64;
21const MAX_CHUNK_SIZE: usize = 16 * 1024 * 1024; const ALIGNMENT_MASK: usize = 63; const PREFETCH_DISTANCE: usize = 8;
24const PREFETCH_WARMUP_SIZE: usize = 64;
25
26pub struct MemoryPool {
28 slots: Vec<Option<NonNull<u8>>>,
29 size_class: usize,
30 capacity: usize,
31}
32
33impl MemoryPool {
34 pub fn new(size_class: usize, capacity: usize) -> Self {
35 Self {
36 slots: Vec::with_capacity(capacity),
37 size_class,
38 capacity,
39 }
40 }
41
42 pub fn alloc(&mut self) -> Option<NonNull<u8>> {
43 self.slots.pop().flatten()
44 }
45
46 pub fn dealloc(&mut self, ptr: NonNull<u8>) {
47 if self.slots.len() < self.capacity {
48 self.slots.push(Some(ptr));
49 }
50 }
51
52 pub fn size_class(&self) -> usize {
53 self.size_class
54 }
55}
56
57pub struct AtomicCounter {
59 value: AtomicUsize,
60}
61
62impl std::fmt::Debug for AtomicCounter {
63 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
64 write!(
65 f,
66 "AtomicCounter({})",
67 self.value.load(std::sync::atomic::Ordering::Relaxed)
68 )
69 }
70}
71
72impl AtomicCounter {
73 pub fn new(value: usize) -> Self {
74 Self {
75 value: AtomicUsize::new(value),
76 }
77 }
78
79 pub fn load(&self, ordering: Ordering) -> usize {
80 self.value.load(ordering)
81 }
82
83 pub fn store(&self, value: usize, ordering: Ordering) {
84 self.value.store(value, ordering);
85 }
86
87 pub fn fetch_add(&self, value: usize, ordering: Ordering) -> usize {
88 self.value.fetch_add(value, ordering)
89 }
90}
91
92impl PartialEq<usize> for AtomicCounter {
93 fn eq(&self, other: &usize) -> bool {
94 self.load(Ordering::Acquire) == *other
95 }
96}
97
98impl PartialEq for AtomicCounter {
99 fn eq(&self, other: &Self) -> bool {
100 self.load(Ordering::Acquire) == other.load(Ordering::Acquire)
101 }
102}
103
104impl PartialOrd<usize> for AtomicCounter {
105 fn partial_cmp(&self, other: &usize) -> Option<std::cmp::Ordering> {
106 self.load(Ordering::Acquire).partial_cmp(other)
107 }
108}
109
110impl std::fmt::Display for AtomicCounter {
111 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
112 write!(f, "{}", self.load(Ordering::Acquire))
113 }
114}
115
116#[repr(C)]
118pub struct Chunk {
119 ptr: NonNull<u8>,
120 capacity: usize,
121 used: AtomicUsize,
122}
123
124impl Chunk {
125 pub fn new(capacity: usize) -> Result<Self, &'static str> {
126 if capacity == 0 {
127 return Err("Capacity must be nonzero");
128 }
129 let capacity = (capacity + 63) & !63;
131 eprintln!(
132 "Attempting allocation: requested_capacity={} aligned_capacity={} alignment=64",
133 capacity, capacity
134 );
135 let layout = Layout::from_size_align(capacity, 64).map_err(|_| "Invalid layout")?;
136
137 let ptr = unsafe { alloc(layout) };
138 if ptr.is_null() {
139 return Err("Failed to allocate memory");
140 }
141
142 Ok(Self {
143 ptr: unsafe { NonNull::new_unchecked(ptr) },
144 capacity,
145 used: AtomicUsize::new(0),
146 })
147 }
148
149 pub fn allocate(&self, layout: Layout) -> Option<*mut u8> {
150 let size = layout.size();
151 let align = layout.align();
152
153 let current_used = self.used.load(Ordering::Acquire);
154 let start = (current_used + align - 1) & !(align - 1);
155 let end = start + size;
156
157 if end > self.capacity {
158 return None;
159 }
160
161 if self
162 .used
163 .compare_exchange_weak(current_used, end, Ordering::AcqRel, Ordering::Acquire)
164 .is_ok()
165 {
166 unsafe {
167 let ptr = self.ptr.as_ptr().add(start);
168 #[cfg(target_arch = "x86_64")]
170 if size >= PREFETCH_WARMUP_SIZE {
171 _mm_prefetch(ptr as *const i8, _MM_HINT_T0);
172 }
173 return Some(ptr);
174 }
175 }
176
177 None
178 }
179
180 pub fn reset(&self) {
181 self.used.store(0, Ordering::Release);
182 }
183
184 pub fn capacity(&self) -> usize {
185 self.capacity
186 }
187
188 pub fn ptr(&self) -> NonNull<u8> {
189 self.ptr
190 }
191
192 pub fn used(&self) -> usize {
193 self.used.load(Ordering::Acquire)
194 }
195
196 pub fn set_used(&self, value: usize) {
197 self.used.store(value, Ordering::Release);
198 }
199}
200
201impl Drop for Chunk {
202 fn drop(&mut self) {
203 unsafe {
204 if self.capacity > 0 {
205 if let Ok(layout) = Layout::from_size_align(self.capacity, 64) {
206 eprintln!(
207 "[Chunk::drop] dealloc ptr={:p} capacity={}",
208 self.ptr.as_ptr(),
209 self.capacity
210 );
211 eprintln!(
213 "[Chunk::drop] backtrace:\n{}",
214 std::backtrace::Backtrace::capture()
215 );
216 dealloc(self.ptr.as_ptr(), layout);
217 } else {
218 let fallback = Layout::from_size_align(64, 64).unwrap_or_else(|_| {
220 eprintln!("[Chunk::drop] Layout::from_size_align(fallback) failed, using minimal layout");
221 Layout::from_size_align(8, 8).expect("fallback layout invalid")
222 });
223 eprintln!(
224 "[Chunk::drop] dealloc ptr={:p} capacity=fallback(64)",
225 self.ptr.as_ptr()
226 );
227 eprintln!(
228 "[Chunk::drop] backtrace:\n{}",
229 std::backtrace::Backtrace::capture()
230 );
231 dealloc(self.ptr.as_ptr(), fallback);
232 }
233 }
234 }
235 }
236}
237
238#[derive(Debug, Clone, Copy, PartialEq, Eq)]
240pub struct ArenaCheckpoint {
241 pub chunk_index: usize,
242 pub chunk_offset: usize,
243 pub checkpoint_id: usize,
244 pub allocation_count: usize,
245}
246
247#[derive(Debug, Default, Clone)]
249pub struct DebugStats {
250 pub total_allocations: usize,
251 pub active_checkpoints: usize,
252 pub current_checkpoint_id: usize,
253 pub corrupted_allocations: usize,
254 pub leak_reports: usize,
256}
257
258pub struct ArenaStats {
260 pub bytes_used: AtomicCounter,
261 pub bytes_allocated: AtomicCounter,
262 pub allocation_count: AtomicCounter,
263 pub chunk_count: usize,
264}
265
266impl std::fmt::Debug for ArenaStats {
267 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
268 f.debug_struct("ArenaStats")
269 .field("bytes_used", &self.bytes_used)
270 .field("allocation_count", &self.allocation_count)
271 .field("chunk_count", &self.chunk_count)
272 .finish()
273 }
274}
275
276impl ArenaStats {
277 pub fn new() -> Self {
278 Self {
279 bytes_used: AtomicCounter::new(0),
280 bytes_allocated: AtomicCounter::new(0),
281 allocation_count: AtomicCounter::new(0),
282 chunk_count: 0,
283 }
284 }
285 pub fn bytes_used(&self) -> usize {
286 self.bytes_used.load(Ordering::Acquire)
287 }
288
289 pub fn allocation_count(&self) -> usize {
290 self.allocation_count.load(Ordering::Acquire)
291 }
292}
293
294impl Default for ArenaStats {
295 fn default() -> Self {
296 Self::new()
297 }
298}
299
300pub struct ArenaInner {
302 pub chunks: Vec<Chunk>,
303 pub current_chunk: AtomicUsize,
304 pub checkpoints: Vec<ArenaCheckpoint>,
305 pub current_checkpoint_id: usize,
306 pub pools: Vec<MemoryPool>,
307 #[cfg(feature = "stats")]
308 pub stats: ArenaStats,
309 #[cfg(feature = "virtual_memory")]
310 pub virtual_region: Option<crate::virtual_memory::VirtualMemoryRegion>,
311 #[cfg(feature = "thread_local")]
312 pub thread_cache_active: bool,
313 #[cfg(feature = "lockfree")]
314 pub lockfree_buffer: Option<crate::lockfree::LockFreeBuffer>,
315 #[cfg(feature = "lockfree")]
316 pub lockfree_stats: crate::lockfree::LockFreeStats,
317}
318
319impl ArenaInner {
320 pub fn new(initial_capacity: usize) -> Result<Self, &'static str> {
321 let chunk = Chunk::new(initial_capacity)?;
322
323 let mut pools = Vec::new();
324 let mut size = 8;
325 while size <= 4096 {
326 pools.push(MemoryPool::new(size, 64));
327 size *= 2;
328 }
329
330 let mut inner = Self {
331 chunks: vec![chunk],
332 current_chunk: AtomicUsize::new(0),
333 checkpoints: Vec::new(),
334 current_checkpoint_id: 0,
335 pools,
336 #[cfg(feature = "stats")]
337 stats: ArenaStats::new(),
338 #[cfg(feature = "virtual_memory")]
339 virtual_region: None,
340 #[cfg(feature = "thread_local")]
341 thread_cache_active: true,
342 #[cfg(feature = "lockfree")]
343 lockfree_buffer: Some(crate::lockfree::LockFreeBuffer::new()),
344 #[cfg(feature = "lockfree")]
345 lockfree_stats: crate::lockfree::LockFreeStats::new(),
346 };
347
348 #[cfg(feature = "stats")]
349 {
350 inner
351 .stats
352 .bytes_allocated
353 .store(initial_capacity, Ordering::Relaxed);
354 inner.stats.chunk_count = inner.chunks.len();
355 }
356
357 Ok(inner)
358 }
359
360 pub fn allocate(&mut self, layout: Layout) -> Option<*mut u8> {
361 let size = layout.size();
362
363 if size <= 4096 {
365 if let Some(_pool) = self.pools.iter().find(|p| p.size_class() >= size) {
366 if let Some(ptr) = self
367 .pools
368 .iter_mut()
369 .find(|p| p.size_class() >= size)
370 .and_then(|p| p.alloc())
371 {
372 return Some(ptr.as_ptr());
373 }
374 }
375 }
376
377 let current_chunk_idx = self.current_chunk.load(Ordering::Acquire);
379 let chunk_idx = if current_chunk_idx >= self.chunks.len() {
380 self.chunks.len().saturating_sub(1)
381 } else {
382 current_chunk_idx
383 };
384
385 if let Some(chunk) = self.chunks.get(chunk_idx) {
386 if let Some(ptr) = chunk.allocate(layout) {
387 #[cfg(feature = "stats")]
388 {
389 self.stats.bytes_used.fetch_add(size, Ordering::Relaxed);
390 self.stats.allocation_count.fetch_add(1, Ordering::Relaxed);
391 }
392 return Some(ptr);
393 }
394 }
395
396 None
398 }
399
400 pub fn add_chunk(&mut self, capacity: usize) -> Result<usize, &'static str> {
401 let chunk = Chunk::new(capacity)?;
402 let chunk_index = self.chunks.len();
403 self.chunks.push(chunk);
404 self.current_chunk.store(chunk_index, Ordering::Release);
405 #[cfg(feature = "stats")]
406 {
407 self.stats.chunk_count = self.chunks.len();
408 self.stats
409 .bytes_allocated
410 .fetch_add(capacity, Ordering::Relaxed);
411 }
412 Ok(chunk_index)
413 }
414
415 pub fn reset(&mut self) {
416 for chunk in &mut self.chunks {
417 chunk.reset();
418 }
419 self.current_chunk.store(0, Ordering::Release);
420 self.checkpoints.clear();
421 self.current_checkpoint_id = 0;
422
423 #[cfg(feature = "stats")]
424 {
425 self.stats.bytes_used.store(0, Ordering::Release);
426 self.stats.allocation_count.store(0, Ordering::Release);
427 }
428
429 #[cfg(feature = "thread_local")]
430 {
431 crate::thread_local::reset_thread_cache();
432 }
433
434 #[cfg(feature = "lockfree")]
435 {
436 if let Some(ref buffer) = self.lockfree_buffer {
437 buffer.reset();
438 }
439 }
440 }
441
442 pub fn checkpoint(&mut self) -> ArenaCheckpoint {
443 let current_chunk_idx = self.current_chunk.load(Ordering::Acquire);
444 let current_chunk_idx = if current_chunk_idx >= self.chunks.len() {
445 self.chunks.len().saturating_sub(1)
446 } else {
447 current_chunk_idx
448 };
449 let current_chunk = &self.chunks[current_chunk_idx];
450 let chunk_offset = current_chunk.used();
451
452 #[cfg(feature = "stats")]
453 let alloc_count = self.stats.allocation_count.load(Ordering::Acquire);
454 #[cfg(not(feature = "stats"))]
455 let alloc_count = 0;
456
457 let checkpoint = ArenaCheckpoint {
458 chunk_index: current_chunk_idx,
459 chunk_offset,
460 checkpoint_id: self.current_checkpoint_id,
461 allocation_count: alloc_count,
462 };
463
464 self.checkpoints.push(checkpoint);
465 self.current_checkpoint_id += 1;
466
467 checkpoint
468 }
469
470 pub fn rewind_to_checkpoint(&mut self, checkpoint: ArenaCheckpoint) {
471 assert!(
473 checkpoint.chunk_index < self.chunks.len(),
474 "Invalid checkpoint: chunk index out of bounds"
475 );
476 assert!(
477 checkpoint.chunk_offset <= self.chunks[checkpoint.chunk_index].capacity(),
478 "Invalid checkpoint: offset exceeds chunk capacity"
479 );
480
481 self.current_chunk
483 .store(checkpoint.chunk_index, Ordering::Release);
484 for (idx, chunk) in self.chunks.iter_mut().enumerate() {
485 if idx < checkpoint.chunk_index {
486 continue;
487 }
488 if idx == checkpoint.chunk_index {
489 unsafe {
490 let used_ptr = &mut chunk.used as *mut AtomicUsize;
491 (*used_ptr).store(checkpoint.chunk_offset, Ordering::Release);
492 }
493 } else {
494 chunk.reset();
495 }
496 }
497
498 self.checkpoints.retain(|cp| {
500 cp.chunk_index < checkpoint.chunk_index
501 || (cp.chunk_index == checkpoint.chunk_index
502 && cp.chunk_offset <= checkpoint.chunk_offset)
503 });
504
505 #[cfg(feature = "debug")]
507 {
508 crate::debug::rewind_to_checkpoint(checkpoint.checkpoint_id);
509 self.current_checkpoint_id = checkpoint.checkpoint_id + 1;
510 }
511
512 #[cfg(feature = "thread_local")]
514 {
515 crate::thread_local::reset_thread_cache();
516 }
517
518 #[cfg(feature = "lockfree")]
520 {
521 if let Some(ref buffer) = self.lockfree_buffer {
522 buffer.reset();
523 }
524 }
525
526 #[cfg(feature = "stats")]
527 {
528 let mut bytes_used = 0;
529 for chunk in self.chunks.iter().take(checkpoint.chunk_index + 1) {
530 bytes_used += chunk.used();
531 }
532 self.stats.bytes_used.store(bytes_used, Ordering::Release);
533 self.stats
535 .allocation_count
536 .store(checkpoint.allocation_count, Ordering::Release);
537 }
538 }
539
540 pub fn push_checkpoint(&mut self) -> ArenaCheckpoint {
541 self.checkpoint()
542 }
543
544 pub fn pop_and_rewind(&mut self) -> Result<(), &'static str> {
545 if let Some(checkpoint) = self.checkpoints.pop() {
546 self.rewind_to_checkpoint(checkpoint);
547 Ok(())
548 } else {
549 Err("No checkpoint to pop")
550 }
551 }
552
553 #[cfg(feature = "stats")]
554 pub fn stats(&self) -> &ArenaStats {
555 &self.stats
556 }
557
558 #[cfg(feature = "debug")]
559 pub fn debug_stats(&self) -> DebugStats {
560 crate::debug::get_debug_stats()
561 }
562
563 #[cfg(feature = "lockfree")]
564 pub fn lockfree_stats(&self) -> (usize, usize, usize, usize) {
565 self.lockfree_stats.get()
566 }
567}
568
569pub struct ArenaBuilder {
571 initial_capacity: usize,
572}
573
574impl ArenaBuilder {
575 pub fn new() -> Self {
576 Self {
577 initial_capacity: 4096,
578 }
579 }
580
581 pub fn initial_capacity(mut self, capacity: usize) -> Self {
582 self.initial_capacity = capacity;
583 self
584 }
585
586 pub fn build(self) -> crate::Arena {
587 crate::Arena::with_capacity(self.initial_capacity)
588 }
589}
590
591impl Default for ArenaBuilder {
592 fn default() -> Self {
593 Self::new()
594 }
595}
596
597pub struct Scope<'scope, 'arena> {
599 arena: &'arena mut crate::Arena,
600 _phantom: PhantomData<&'scope ()>,
601}
602
603impl<'scope, 'arena> Scope<'scope, 'arena> {
604 pub fn new(arena: &'arena mut crate::Arena) -> Self {
605 Self {
606 arena,
607 _phantom: PhantomData,
608 }
609 }
610}
611
612impl<'scope, 'arena> Drop for Scope<'scope, 'arena> {
613 fn drop(&mut self) {
614 unsafe {
615 self.arena.reset();
616 }
617 }
618}