arena_allocator/lib.rs
1use core::ffi::c_void;
2use std::result::Result;
3
4/// Represents errors that can occur when using the `arena-allocator`.
5///
6/// The `ArenaError` enum encapsulates different types of errors that might be encountered while
7/// interacting with the `Arena` or `TypedArena`. These errors typically arise from issues
8/// related to memory reservation, protection, or when the arena runs out of reserved memory.
9///
10/// # Variants
11///
12/// - `ReserveFailed(String)`: This error occurs when the initial reservation of virtual memory
13/// fails. The associated string provides a description of the underlying issue.
14///
15/// - `ProtectionFailed(String)`: This error is returned when the memory protection mechanisms
16/// fail. This is especially relevant in debug mode, where memory protection is used to detect
17/// use-after-free bugs. The associated string provides a detailed explanation of the failure.
18///
19/// - `OutOfReservedMemory`: This error is triggered when an allocation request exceeds the
20/// available reserved memory. It indicates that the arena has run out of its pre-reserved
21/// virtual memory and cannot accommodate additional allocations without further action.
22#[derive(Debug)]
23pub enum ArenaError {
24 ReserveFailed(String),
25 ProtectionFailed(String),
26 OutOfReservedMemory,
27}
28
29#[cfg(not(target_os = "windows"))]
30mod posix {
31 use crate::ArenaError;
32 use core::ffi::{c_void, CStr};
33 use core::ptr::null_mut;
34 use libc::{mmap, mprotect, strerror_r, sysconf};
35 use libc::{MAP_ANON, MAP_PRIVATE, PROT_NONE, PROT_READ, PROT_WRITE, _SC_PAGESIZE};
36 use std::io;
37
38 const MAP_FAILED: *mut c_void = !0 as *mut c_void;
39
40 pub(crate) fn get_page_size() -> usize {
41 unsafe { sysconf(_SC_PAGESIZE) as usize }
42 }
43
44 fn get_last_error_code() -> i32 {
45 io::Error::last_os_error().raw_os_error().unwrap_or(0)
46 }
47
48 fn get_last_error_message() -> String {
49 let err_code = get_last_error_code();
50 let mut buf = [0i8; 256];
51 unsafe {
52 strerror_r(err_code, buf.as_mut_ptr(), buf.len());
53 let c_str = CStr::from_ptr(buf.as_ptr());
54 c_str.to_string_lossy().into_owned()
55 }
56 }
57
58 pub(crate) fn reserve_range(size: usize) -> Result<*mut c_void, ArenaError> {
59 let ptr = unsafe { mmap(null_mut(), size, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0) };
60 if ptr == MAP_FAILED {
61 return Err(ArenaError::ReserveFailed(get_last_error_message()));
62 }
63 Ok(ptr)
64 }
65
66 pub(crate) fn commit_memory(ptr: *mut c_void, size: usize) -> Result<(), ArenaError> {
67 let result = unsafe { mprotect(ptr, size, PROT_READ | PROT_WRITE) };
68 if result != 0 {
69 return Err(ArenaError::ProtectionFailed(get_last_error_message()));
70 }
71 Ok(())
72 }
73
74 pub(crate) fn decommit_memory(ptr: *mut c_void, size: usize) -> Result<(), ArenaError> {
75 let result = unsafe { mprotect(ptr, size, PROT_NONE) };
76 if result != 0 {
77 return Err(ArenaError::ProtectionFailed(get_last_error_message()));
78 }
79 Ok(())
80 }
81
82 #[cfg(debug_assertions)]
83 pub(crate) fn protect_memory(ptr: *mut c_void, size: usize) -> Result<(), ArenaError> {
84 let result = unsafe { mprotect(ptr, size, PROT_NONE) };
85 if result != 0 {
86 return Err(ArenaError::ProtectionFailed(get_last_error_message()));
87 }
88 Ok(())
89 }
90
91 #[cfg(debug_assertions)]
92 pub(crate) fn unprotect_memory(ptr: *mut c_void, size: usize) -> Result<(), ArenaError> {
93 if size > 0 {
94 let result = unsafe { mprotect(ptr, size, PROT_READ | PROT_WRITE) };
95 if result != 0 {
96 return Err(ArenaError::ProtectionFailed(get_last_error_message()));
97 }
98 }
99 Ok(())
100 }
101}
102
103#[cfg(target_os = "windows")]
104mod windows {
105 use crate::ArenaError;
106 use core::{ffi::c_void, mem::zeroed};
107 use std::{ffi::OsString, os::windows::ffi::OsStringExt, ptr::null_mut};
108
109 const FORMAT_MESSAGE_ALLOCATE_BUFFER: u32 = 0x00000100;
110 const FORMAT_MESSAGE_FROM_SYSTEM: u32 = 0x00001000;
111 const FORMAT_MESSAGE_IGNORE_INSERTS: u32 = 0x00000200;
112
113 const MEM_COMMIT: u32 = 0x00001000;
114 const MEM_DECOMMIT: u32 = 0x00004000;
115 const MEM_RESERVE: u32 = 0x00002000;
116 const PAGE_NOACCESS: u32 = 0x01;
117 const PAGE_READWRITE: u32 = 0x04;
118
119 #[repr(C)]
120 #[allow(non_snake_case)]
121 struct SYSTEM_INFO {
122 wProcessorArchitecture: u16,
123 wReserved: u16,
124 dwPageSize: u32,
125 lpMinimumApplicationAddress: *mut u8,
126 lpMaximumApplicationAddress: *mut u8,
127 dwActiveProcessorMask: *mut u64,
128 dwNumberOfProcessors: u32,
129 dwProcessorType: u32,
130 dwAllocationGranularity: u32,
131 wProcessorLevel: u16,
132 wProcessorRevision: u16,
133 }
134
135 #[link(name = "kernel32")]
136 extern "system" {
137 fn GetSystemInfo(lpSystemInfo: *mut SYSTEM_INFO);
138 fn GetLastError() -> u32;
139 fn FormatMessageW(
140 dwFlags: u32,
141 lpSource: *const u16,
142 dwMessageId: u32,
143 dwLanguageId: u32,
144 lpBuffer: *mut u16,
145 nSize: u32,
146 Arguments: *mut *mut u8,
147 ) -> u32;
148 fn LocalFree(hMem: *mut core::ffi::c_void) -> *mut core::ffi::c_void;
149 fn VirtualAlloc(
150 lpAddress: *mut core::ffi::c_void,
151 dwSize: usize,
152 flAllocationType: u32,
153 flProtect: u32,
154 ) -> *mut core::ffi::c_void;
155 fn VirtualProtect(
156 lpAddress: *mut core::ffi::c_void,
157 dwSize: usize,
158 flNewProtect: u32,
159 lpflOldProtect: *mut u32,
160 ) -> i32;
161 fn VirtualFree(lpAddress: *mut core::ffi::c_void, dwSize: usize, dwFreeType: u32) -> i32;
162 }
163
164 fn get_system_info() -> SYSTEM_INFO {
165 let mut info: SYSTEM_INFO = unsafe { zeroed() };
166 unsafe {
167 GetSystemInfo(&mut info);
168 }
169 info
170 }
171
172 pub(crate) fn get_page_size() -> usize {
173 let info = get_system_info();
174 info.dwPageSize as usize
175 }
176
177 fn get_last_error_message() -> String {
178 unsafe {
179 let error_code = GetLastError();
180 if error_code == 0 {
181 return String::new();
182 }
183
184 let mut buf: *mut u16 = null_mut();
185 let size = FormatMessageW(
186 FORMAT_MESSAGE_ALLOCATE_BUFFER
187 | FORMAT_MESSAGE_FROM_SYSTEM
188 | FORMAT_MESSAGE_IGNORE_INSERTS,
189 null_mut(),
190 error_code,
191 0,
192 &mut buf as *mut *mut u16 as *mut u16,
193 0,
194 null_mut(),
195 );
196
197 if size == 0 {
198 return format!("Unknown error code: {}", error_code);
199 }
200
201 let message = OsString::from_wide(core::slice::from_raw_parts(buf, size as usize))
202 .to_string_lossy()
203 .into_owned();
204 LocalFree(buf as *mut _);
205 message
206 }
207 }
208
209 pub(crate) fn reserve_range(size: usize) -> Result<*mut c_void, ArenaError> {
210 let ptr = unsafe { VirtualAlloc(null_mut(), size, MEM_RESERVE, PAGE_READWRITE) };
211 if ptr.is_null() {
212 return Err(ArenaError::ReserveFailed(get_last_error_message()));
213 }
214 Ok(ptr)
215 }
216
217 pub(crate) fn commit_memory(
218 ptr: *mut core::ffi::c_void,
219 size: usize,
220 ) -> Result<(), ArenaError> {
221 let success = unsafe { VirtualAlloc(ptr, size, MEM_COMMIT, PAGE_READWRITE) };
222 if success.is_null() {
223 return Err(ArenaError::ProtectionFailed(get_last_error_message()));
224 }
225 Ok(())
226 }
227
228 pub(crate) fn decommit_memory(
229 ptr: *mut core::ffi::c_void,
230 size: usize,
231 ) -> Result<(), ArenaError> {
232 let success = unsafe { VirtualFree(ptr, size, MEM_DECOMMIT) };
233 if success == 0 {
234 return Err(ArenaError::ProtectionFailed(get_last_error_message()));
235 }
236 Ok(())
237 }
238
239 #[cfg(debug_assertions)]
240 pub(crate) fn protect_memory(
241 ptr: *mut core::ffi::c_void,
242 size: usize,
243 ) -> Result<(), ArenaError> {
244 let mut old_protect = 0u32;
245 let success = unsafe { VirtualProtect(ptr, size, PAGE_NOACCESS, &mut old_protect) };
246 if success == 0 {
247 return Err(ArenaError::ProtectionFailed(get_last_error_message()));
248 }
249 Ok(())
250 }
251
252 #[cfg(debug_assertions)]
253 pub(crate) fn unprotect_memory(
254 ptr: *mut core::ffi::c_void,
255 size: usize,
256 ) -> Result<(), ArenaError> {
257 if size > 0 {
258 let mut old_protect = 0u32;
259 let success = unsafe { VirtualProtect(ptr, size, PAGE_READWRITE, &mut old_protect) };
260 if success == 0 {
261 return Err(ArenaError::ProtectionFailed(get_last_error_message()));
262 }
263 }
264 Ok(())
265 }
266}
267
268#[cfg(not(target_os = "windows"))]
269pub(crate) use posix::*;
270
271#[cfg(target_os = "windows")]
272pub(crate) use windows::*;
273
274#[derive(Copy, Clone)]
275struct VmRange<'a> {
276 ptr: *mut c_void,
277 reserved_size: usize,
278 committed_size: usize,
279 pos: usize,
280 page_size: usize,
281 marker: core::marker::PhantomData<&'a c_void>,
282}
283
284/// Specifies whether the memory should be protected after it is decommitted.
285/// This is useful for debugging purposes, as it can help catch use-after-free bugs.
286/// The way this works is that the memory is that all the memory is set as "no access".
287/// This means if some code is trying to access the memory it will cause a exception.
288///
289//enum UseSafteyRange {
290// Yes,
291// No,
292//}
293
294impl<'a> VmRange<'a> {
295 pub fn new(reserved_size: usize) -> Result<Self, ArenaError> {
296 let page_size = get_page_size();
297 let ptr = reserve_range(std::cmp::max(reserved_size, page_size))?;
298 Ok(Self {
299 ptr,
300 reserved_size,
301 committed_size: 0,
302 pos: 0,
303 marker: core::marker::PhantomData,
304 page_size,
305 })
306 }
307
308 #[inline]
309 fn align_pow2(x: usize, b: usize) -> usize {
310 (x + b - 1) & !(b - 1)
311 }
312
313 /// Allocates a raw memory block in the arena.
314 ///
315 /// # Safety
316 /// The returned data is uninitialized. The caller must ensure that the data is
317 /// properly initialized.
318 pub(crate) unsafe fn alloc_raw(
319 &mut self,
320 size: usize,
321 alignment: usize,
322 ) -> Result<&'a mut [u8], ArenaError> {
323 let new_pos = self.pos + Self::align_pow2(size, alignment);
324 let commit_size = Self::align_pow2(size, self.page_size);
325
326 if self.committed_size + commit_size > self.reserved_size {
327 return Err(ArenaError::OutOfReservedMemory);
328 }
329
330 // If we have already committed the memory, we can just return a slice
331 if new_pos < self.committed_size {
332 let return_slice = std::slice::from_raw_parts_mut(self.ptr as *mut u8, size);
333 self.pos = new_pos;
334 return Ok(return_slice);
335 }
336
337 commit_memory(self.ptr.add(self.committed_size), commit_size)?;
338
339 self.committed_size += commit_size;
340 let return_slice = std::slice::from_raw_parts_mut(self.ptr.add(self.pos) as *mut u8, size);
341 self.pos = new_pos;
342 Ok(return_slice)
343 }
344
345 /// Allocates an array of `T` elements in the arena.
346 ///
347 /// # Safety
348 /// The returned data is uninitialized. The caller must ensure that the data is
349 /// properly initialized.
350 pub(crate) unsafe fn alloc_array<T: Sized>(
351 &mut self,
352 count: usize,
353 ) -> Result<&'a mut [T], ArenaError> {
354 let size = count * core::mem::size_of::<T>();
355 let alignment = core::mem::align_of::<T>();
356 let slice = self.alloc_raw(size, alignment)?;
357 let ptr = slice.as_mut_ptr() as *mut T;
358 Ok(unsafe { std::slice::from_raw_parts_mut(ptr, size) })
359 }
360
361 /// Allocates an array of `T` elements in the arena and initializes them with the default
362 /// value.
363 pub(crate) fn alloc_array_init<T: Default + Sized>(
364 &mut self,
365 count: usize,
366 ) -> Result<&'a mut [T], ArenaError> {
367 let size = count * core::mem::size_of::<T>();
368 let alignment = core::mem::align_of::<T>();
369 let slice = unsafe { self.alloc_raw(size, alignment)? };
370 let ptr = slice.as_mut_ptr() as *mut T;
371 let slice = unsafe { std::slice::from_raw_parts_mut(ptr, count) };
372
373 for v in slice.iter_mut() {
374 *v = T::default();
375 }
376
377 Ok(slice)
378 }
379
380 /// Allocates a single instance of `T` in the arena.
381 ///
382 /// # Safety
383 /// The returned data is uninitialized. The caller must ensure that the data is
384 /// properly initialized.
385 pub(crate) unsafe fn alloc<T: Sized>(&mut self) -> Result<&'a mut T, ArenaError> {
386 let size = core::mem::size_of::<T>();
387 let alignment = core::mem::align_of::<T>();
388 let slice = self.alloc_raw(size, alignment)?;
389 let ptr = slice.as_mut_ptr() as *mut T;
390 Ok(unsafe { &mut *ptr })
391 }
392
393 pub(crate) fn alloc_init<T: Default + Sized>(&mut self) -> Result<&'a mut T, ArenaError> {
394 let size = core::mem::size_of::<T>();
395 let alignment = core::mem::align_of::<T>();
396 let slice = unsafe { self.alloc_raw(size, alignment)? };
397 let ptr = slice.as_mut_ptr() as *mut T;
398 unsafe { ptr.write(T::default()) };
399 Ok(unsafe { &mut *ptr })
400 }
401
402 #[inline]
403 pub(crate) fn rewind(&mut self) {
404 self.pos = 0;
405 }
406
407 #[cfg(debug_assertions)]
408 pub(crate) fn protect(&mut self) {
409 protect_memory(self.ptr, self.committed_size).unwrap();
410 }
411
412 #[cfg(debug_assertions)]
413 pub(crate) fn unprotect(&mut self) {
414 unprotect_memory(self.ptr, self.committed_size).unwrap();
415 }
416
417 #[inline]
418 pub(crate) fn decomit(&mut self) -> Result<(), ArenaError> {
419 decommit_memory(self.ptr, self.committed_size)?;
420 self.committed_size = 0;
421 self.pos = 0;
422 Ok(())
423 }
424}
425
426/// A memory arena for efficient allocation management.
427///
428/// The `Arena` struct manages a reserved block of virtual memory, enabling fast, contiguous
429/// allocations. This structure is particularly useful in scenarios where many small allocations
430/// are required, as it minimizes overhead and fragmentation.
431///
432/// # Primary Use-Cases
433///
434/// The `Arena` is designed for two main allocation patterns:
435///
436/// 1. **Long-Lived Allocations**: When allocations are expected to persist until the end of the
437/// program. This use-case benefits from the arena's efficient management of memory, avoiding
438/// the overhead of frequent deallocations.
439///
440/// 2. **Very Short-Lived Allocations**: When allocations are needed temporarily, and the allocator
441/// is "rewinded" after the allocations are no longer needed. This pattern is ideal for scenarios
442/// where large numbers of temporary objects are created and discarded in one go, as it allows for quick
443/// cleanup and re-use of the allocated memory.
444///
445/// # Fields
446///
447/// - `current`: The active `VmRange` that tracks the currently allocated range within the reserved
448/// memory. Allocations are performed from this range.
449///
450/// - `prev`: A secondary `VmRange` used only in debug mode. This range mirrors the `current` range
451/// and is protected after being decommitted, allowing detection of use-after-free errors.
452/// In release mode, this field is not used, and memory protection is disabled to maximize performance.
453///
454/// # Usage
455///
456/// The `Arena` is initialized with a specified size through the `Arena::new` function. While the
457/// entire size is reserved in virtual memory, physical memory is only committed in page-sized chunks
458/// as needed. This design ensures that the memory footprint remains minimal until actual allocations
459/// occur.
460///
461/// In debug builds, additional memory protection is enabled to catch potential memory safety issues
462/// such as use-after-free, though this comes at the cost of increased memory usage. This feature is
463/// automatically disabled in release builds for optimal performance.
464pub struct Arena<'a> {
465 current: VmRange<'a>,
466 //#[cfg(debug_assertions)]
467 prev: VmRange<'a>,
468}
469
470impl<'a> Arena<'a> {
471 /// Initializes a new `Arena` with the specified size. The `size` parameter defines the amount
472 /// of reserved virtual memory. It is recommended to choose a large size since this reservation
473 /// does not immediately consume physical memory. On a 64-bit system, reserving a few gigabytes
474 /// is generally acceptable. Physical memory is committed incrementally in page-sized chunks
475 /// as allocations occur.
476 ///
477 /// In debug mode, decommitted memory is protected to detect use-after-free errors, resulting
478 /// in double the memory reservation. This protection is disabled in release mode.
479 pub fn new(size: usize) -> Result<Self, ArenaError> {
480 let current = VmRange::new(size)?;
481 #[cfg(debug_assertions)]
482 let prev = VmRange::new(size)?;
483
484 Ok(Self {
485 current,
486 #[cfg(debug_assertions)]
487 prev,
488 })
489 }
490
491 /// Allocates a raw memory block in the arena.
492 ///
493 /// This function allocates a block of uninitialized memory within the arena. The size and alignment
494 /// of the block are specified by the caller. The allocated memory is contiguous and may be used
495 /// for any purpose that requires raw, untyped data.
496 ///
497 /// # Safety
498 /// The returned memory is uninitialized, and it is the caller's responsibility to ensure that
499 /// the memory is properly initialized before it is used. Failing to do so may result in undefined
500 /// behavior.
501 pub unsafe fn alloc_raw(
502 &mut self,
503 size: usize,
504 alignment: usize,
505 ) -> Result<&'a mut [u8], ArenaError> {
506 self.current.alloc_raw(size, alignment)
507 }
508
509 /// Allocates an array of `T` elements in the arena.
510 ///
511 /// This function allocates uninitialized memory for an array of elements of type `T`. The number
512 /// of elements is specified by the `count` parameter. The memory is contiguous and properly aligned
513 /// for the type `T`.
514 ///
515 /// # Safety
516 /// The returned array is uninitialized, and it is the caller's responsibility to initialize the
517 /// elements before use. Using uninitialized data can lead to undefined behavior. After the arena
518 /// is rewound, all references to this array become invalid.
519 pub unsafe fn alloc_array<T: Sized>(
520 &mut self,
521 count: usize,
522 ) -> Result<&'a mut [T], ArenaError> {
523 self.current.alloc_array(count)
524 }
525
526 /// Allocates a single instance of `T` in the arena.
527 ///
528 /// This function allocates uninitialized memory for a single instance of type `T`.
529 ///
530 /// # Safety
531 /// The returned instance is uninitialized, and the caller must ensure that it is initialized
532 /// before any use. Uninitialized memory can lead to undefined behavior if accessed.
533 pub unsafe fn alloc<T: Sized>(&mut self) -> Result<&'a mut T, ArenaError> {
534 self.current.alloc()
535 }
536
537 /// Allocates a single instance of `T` in the arena and initializes it with the default value.
538 ///
539 /// This function allocates memory for a single instance of type `T` and initializes it using
540 /// `T::default()`.
541 pub fn alloc_init<T: Default + Sized>(&mut self) -> Result<&'a mut T, ArenaError> {
542 self.current.alloc_init()
543 }
544
545 /// Allocates an array of `T` elements in the arena and initializes them with the default value.
546 ///
547 /// This function allocates memory for an array of elements of type `T`, and initializes each
548 /// element using `T::default()`.
549 pub fn alloc_array_init<T: Default + Sized>(
550 &mut self,
551 count: usize,
552 ) -> Result<&'a mut [T], ArenaError> {
553 self.current.alloc_array_init(count)
554 }
555
556 /// Rewinds the arena to its initial state.
557 ///
558 /// This method resets the allocation position to the start of the arena without deallocating
559 /// the memory. After calling `rewind`, all references to previously allocated memory in the
560 /// arena should be considered invalid, as any subsequent allocation will overwrite this memory.
561 ///
562 /// # Memory Safety
563 ///
564 /// In debug mode, calling `rewind` will protect the memory that has been rewound, helping to
565 /// catch use-after-free bugs. Any access to memory that was allocated before the `rewind` will
566 /// result in a crash, as demonstrated in the example below:
567 ///
568 /// ```
569 /// use arena_allocator::Arena;
570 ///
571 /// let mut arena = Arena::new(16 * 1024).unwrap();
572 /// let t = arena.alloc_init::<u32>().unwrap();
573 /// *t = 42;
574 /// arena.rewind();
575 /// //*t = 43; // This will crash in debug mode
576 /// ```
577 ///
578 /// # Usage
579 ///
580 /// This method is particularly useful in scenarios where the arena is used for very short-lived
581 /// allocations that are discarded en masse. By rewinding the arena, the allocator can quickly
582 /// reset and re-use the reserved memory without the overhead of deallocation and reallocation.
583 ///
584 /// In release mode, the memory protection mechanism is disabled to ensure optimal performance,
585 /// but in debug mode, the additional checks help identify improper memory usage patterns.
586 #[cfg(debug_assertions)]
587 pub fn rewind(&mut self) {
588 self.current.protect();
589
590 std::mem::swap(&mut self.current, &mut self.prev);
591
592 // Unprotect the new current range and rewind the position to the start
593 self.current.unprotect();
594 self.current.rewind();
595 }
596
597 #[cfg(not(debug_assertions))]
598 pub fn rewind(&mut self) {
599 self.current.rewind();
600 }
601}
602
603impl Drop for Arena<'_> {
604 #[cfg(debug_assertions)]
605 fn drop(&mut self) {
606 self.current.decomit().unwrap();
607 self.prev.decomit().unwrap();
608 }
609
610 #[cfg(not(debug_assertions))]
611 fn drop(&mut self) {
612 self.current.decomit().unwrap();
613 }
614}
615
616/// A type-specific memory arena for efficient allocation of `T` elements.
617///
618/// `TypedArena` is a specialized memory allocator designed for managing objects of a single type `T`.
619/// It builds upon the underlying `Arena`, providing type safety and automatic initialization of
620/// allocated objects using `T::default()`. This makes it ideal for scenarios where a large number of
621/// objects of type `T` need to be allocated efficiently, either for long-term storage or for
622/// short-lived usage with rapid recycling.
623///
624/// # Type Parameters
625///
626/// - `T`: The type of objects that this arena will manage. `T` must implement the `Default` and
627/// `Sized` traits, ensuring that instances can be created with default values and that their
628/// size is known at compile-time.
629///
630/// # Primary Use-Cases
631///
632/// `TypedArena` is particularly useful in situations where:
633///
634/// 1. **Long-Lived Allocations**: Objects are allocated once and used until the end of the program.
635/// 2. **Short-Lived Allocations**: Objects are allocated and then quickly discarded, with the
636/// entire arena being rewound for reuse. This is efficient for temporary data structures
637/// that need to be quickly recycled.
638///
639/// # Example
640///
641/// ```rust
642/// use arena_allocator::TypedArena;
643///
644/// let mut arena = TypedArena::<u32>::new(1024 * 1024).unwrap();
645/// let item = arena.alloc().unwrap();
646/// *item = 42;
647///
648/// let array = arena.alloc_array(10).unwrap();
649/// for i in 0..10 {
650/// array[i] = i as u32;
651/// }
652///
653/// arena.rewind(); // All previous allocations are now invalid.
654/// ```
655pub struct TypedArena<'a, T: Default + Sized> {
656 arena: Arena<'a>,
657 ptr_type: core::marker::PhantomData<&'a T>,
658}
659
660impl<'a, T: Default + Sized> TypedArena<'a, T> {
661 /// Creates a new `TypedArena` with the specified size.
662 ///
663 /// The `size` parameter specifies the amount of memory to reserve in the arena. It is
664 /// recommended to choose a large size, especially for scenarios where many objects of
665 /// type `T` will be allocated. The reserved memory is not immediately committed, so
666 /// reserving more than necessary does not consume physical memory until allocations occur.
667 ///
668 /// # Errors
669 /// This function will return an `ArenaError` if the underlying memory reservation fails.
670 pub fn new(size: usize) -> Result<Self, ArenaError> {
671 Ok(Self {
672 arena: Arena::new(size)?,
673 ptr_type: core::marker::PhantomData,
674 })
675 }
676
677 /// Allocates a single instance of `T` in the arena and initializes it with the default value.
678 ///
679 /// This function allocates memory for an instance of `T` and initializes it using `T::default()`.
680 /// The returned reference points to the initialized object, which can be used immediately.
681 ///
682 /// # Errors
683 /// This function will return an `ArenaError` if the memory allocation fails.
684 pub fn alloc(&mut self) -> Result<&'a mut T, ArenaError> {
685 self.arena.alloc_init()
686 }
687
688 /// Allocates an array of `T` elements in the arena and initializes them with the default value.
689 ///
690 /// This function allocates memory for an array of `T` elements and initializes each element using
691 /// `T::default()`. The returned slice points to the initialized array, which can be used immediately.
692 ///
693 /// # Errors
694 /// This function will return an `ArenaError` if the memory allocation fails.
695 pub fn alloc_array(&mut self, count: usize) -> Result<&'a mut [T], ArenaError> {
696 self.arena.alloc_array_init(count)
697 }
698
699 /// Rewinds the arena to its initial state, invalidating all previous allocations.
700 ///
701 /// This method resets the arena, allowing it to be reused for new allocations. All previously
702 /// allocated objects become invalid after this operation, and any attempt to access them will
703 /// result in undefined behavior. In debug mode, the memory of the invalidated objects is
704 /// protected to help catch use-after-free bugs.
705 ///
706 /// # Usage
707 /// `rewind` is particularly useful in scenarios where the arena is used for temporary allocations
708 /// that need to be quickly discarded and recycled.
709 pub fn rewind(&mut self) {
710 self.arena.rewind();
711 }
712}
713
714#[cfg(test)]
715mod test {
716 use super::*;
717
718 #[test]
719 fn test_arena() {
720 let mut arena = Arena::new(16 * 1024).unwrap();
721 let slice = unsafe { arena.alloc_raw(1024, 16).unwrap() };
722 assert_eq!(slice.len(), 1024);
723 assert_eq!(slice.as_ptr() as usize % 16, 0);
724 assert!(slice.as_ptr() != std::ptr::null_mut());
725 }
726
727 #[test]
728 fn test_fail_reserve() {
729 let result = Arena::new(usize::MAX);
730 assert!(result.is_err());
731 }
732
733 #[test]
734 fn test_fail_commit() {
735 let size = 16 * 1024;
736 let mut arena = Arena::new(size).unwrap();
737 let result = unsafe { arena.alloc_raw(size * 2, 16) };
738 assert!(result.is_err());
739 }
740
741 #[test]
742 fn test_typed_arena() {
743 let mut arena = TypedArena::<u32>::new(32 * 1024).unwrap();
744 let single = arena.alloc().unwrap();
745 assert_eq!(*single, 0);
746 *single = 42;
747 assert_eq!(*single, 42);
748
749 let array = arena.alloc_array(1024).unwrap();
750 assert_eq!(array.len(), 1024);
751 for i in 0..1024 {
752 assert_eq!(array[i], 0);
753 array[i] = i as u32;
754 }
755 for i in 0..1024 {
756 assert_eq!(array[i], i as u32);
757 }
758 }
759}
760
761#[cfg(any(target_os = "linux", target_os = "macos"))]
762#[cfg(test)]
763mod macos_linux_tests {
764 use super::*;
765 use libc::{fork, waitpid, SIGSEGV, WIFEXITED, WIFSIGNALED};
766 use std::process;
767
768 #[test]
769 fn test_crash_handling() {
770 unsafe {
771 let pid = fork();
772 if pid == -1 {
773 panic!("Failed to fork process");
774 } else if pid == 0 {
775 let mut arena = TypedArena::<u32>::new(32 * 1024).unwrap();
776 let single = arena.alloc().unwrap();
777 *single = 42;
778 arena.rewind();
779 *single = 43; // will crash here as trying to write to protected memory
780 println!("Single: {}", *single);
781 } else {
782 // Parent process
783 let mut status = 0;
784 waitpid(pid, &mut status, 0);
785 if WIFSIGNALED(status) && libc::WTERMSIG(status) == SIGSEGV {
786 println!("Child process crashed as expected");
787 } else if WIFEXITED(status) {
788 println!("Child process exited normally, but crash was expected");
789 process::exit(1); // Mark test as failed if child didn't crash
790 }
791 }
792 }
793 }
794}