#![expect(
clippy::borrow_as_ptr,
clippy::cast_ptr_alignment,
clippy::cast_sign_loss,
clippy::elidable_lifetime_names,
clippy::filter_map_next,
clippy::inconsistent_struct_constructor,
clippy::inline_always,
clippy::manual_div_ceil,
clippy::map_unwrap_or,
clippy::missing_errors_doc,
clippy::missing_panics_doc,
clippy::mut_from_ref,
clippy::needless_lifetimes,
clippy::ptr_as_ptr,
clippy::ptr_cast_constness,
clippy::ref_as_ptr,
clippy::semicolon_if_nothing_returned,
clippy::undocumented_unsafe_blocks,
clippy::uninlined_format_args,
clippy::unnecessary_safety_comment,
clippy::unused_self,
clippy::single_match_else,
unsafe_op_in_unsafe_fn
)]
#![deny(missing_debug_implementations)]
#![deny(missing_docs)]
#[doc(hidden)]
pub extern crate alloc as core_alloc;
use crate::bumpalo_alloc;
use core::cell::Cell;
use core::cmp::Ordering;
use core::fmt::Display;
use core::iter;
use core::marker::PhantomData;
use core::mem;
use core::ptr::{self, NonNull};
use core::slice;
use core::str;
use core_alloc::alloc::{Layout, alloc, dealloc};
use allocator_api2::alloc::{AllocError, Allocator};
pub use bumpalo_alloc::AllocErr;
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum AllocOrInitError<E> {
Alloc(AllocErr),
Init(E),
}
impl<E> From<AllocErr> for AllocOrInitError<E> {
fn from(e: AllocErr) -> Self {
Self::Alloc(e)
}
}
impl<E: Display> Display for AllocOrInitError<E> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
AllocOrInitError::Alloc(err) => err.fmt(f),
AllocOrInitError::Init(err) => write!(f, "initialization failed: {}", err),
}
}
}
#[derive(Debug)]
pub struct Bump<const MIN_ALIGN: usize = 1> {
current_chunk_footer: Cell<NonNull<ChunkFooter>>,
allocation_limit: Cell<Option<usize>>,
}
#[repr(C)]
#[repr(align(16))]
#[derive(Debug)]
struct ChunkFooter {
data: NonNull<u8>,
layout: Layout,
prev: Cell<NonNull<ChunkFooter>>,
ptr: Cell<NonNull<u8>>,
allocated_bytes: usize,
}
#[repr(transparent)]
struct EmptyChunkFooter(ChunkFooter);
unsafe impl Sync for EmptyChunkFooter {}
static EMPTY_CHUNK: EmptyChunkFooter = EmptyChunkFooter(ChunkFooter {
layout: Layout::new::<ChunkFooter>(),
data: unsafe { NonNull::new_unchecked(&EMPTY_CHUNK as *const EmptyChunkFooter as *mut u8) },
ptr: Cell::new(unsafe {
NonNull::new_unchecked(&EMPTY_CHUNK as *const EmptyChunkFooter as *mut u8)
}),
prev: Cell::new(unsafe {
NonNull::new_unchecked(&EMPTY_CHUNK as *const EmptyChunkFooter as *mut ChunkFooter)
}),
allocated_bytes: 0,
});
impl EmptyChunkFooter {
fn get(&'static self) -> NonNull<ChunkFooter> {
NonNull::from(&self.0)
}
}
impl ChunkFooter {
fn as_raw_parts(&self) -> (*const u8, usize) {
let data = self.data.as_ptr() as *const u8;
let ptr = self.ptr.get().as_ptr() as *const u8;
debug_assert!(data <= ptr);
debug_assert!(ptr <= self as *const ChunkFooter as *const u8);
let len = unsafe { (self as *const ChunkFooter as *const u8).offset_from(ptr) as usize };
(ptr, len)
}
fn is_empty(&self) -> bool {
ptr::eq(self, EMPTY_CHUNK.get().as_ptr())
}
}
impl<const MIN_ALIGN: usize> Default for Bump<MIN_ALIGN> {
fn default() -> Self {
Self::with_min_align()
}
}
impl<const MIN_ALIGN: usize> Drop for Bump<MIN_ALIGN> {
fn drop(&mut self) {
unsafe {
dealloc_chunk_list(self.current_chunk_footer.get());
}
}
}
#[inline]
unsafe fn dealloc_chunk_list(mut footer: NonNull<ChunkFooter>) {
while !footer.as_ref().is_empty() {
let f = footer;
footer = f.as_ref().prev.get();
dealloc(f.as_ref().data.as_ptr(), f.as_ref().layout);
}
}
unsafe impl<const MIN_ALIGN: usize> Send for Bump<MIN_ALIGN> {}
#[inline]
fn is_pointer_aligned_to<T>(pointer: *mut T, align: usize) -> bool {
debug_assert!(align.is_power_of_two());
let pointer = pointer as usize;
let pointer_aligned = round_down_to(pointer, align);
pointer == pointer_aligned
}
#[inline]
pub(crate) const fn round_up_to(n: usize, divisor: usize) -> Option<usize> {
debug_assert!(divisor > 0);
debug_assert!(divisor.is_power_of_two());
match n.checked_add(divisor - 1) {
Some(x) => Some(x & !(divisor - 1)),
None => None,
}
}
#[inline]
pub(crate) unsafe fn round_up_to_unchecked(n: usize, divisor: usize) -> usize {
match round_up_to(n, divisor) {
Some(x) => x,
None => {
debug_assert!(false, "round_up_to_unchecked failed");
core::hint::unreachable_unchecked()
}
}
}
#[inline]
pub(crate) fn round_down_to(n: usize, divisor: usize) -> usize {
debug_assert!(divisor > 0);
debug_assert!(divisor.is_power_of_two());
n & !(divisor - 1)
}
#[inline]
pub(crate) fn round_mut_ptr_down_to(ptr: *mut u8, divisor: usize) -> *mut u8 {
debug_assert!(divisor > 0);
debug_assert!(divisor.is_power_of_two());
ptr.wrapping_sub(ptr as usize & (divisor - 1))
}
#[inline]
pub(crate) unsafe fn round_mut_ptr_up_to_unchecked(ptr: *mut u8, divisor: usize) -> *mut u8 {
debug_assert!(divisor > 0);
debug_assert!(divisor.is_power_of_two());
let aligned = round_up_to_unchecked(ptr as usize, divisor);
let delta = aligned - (ptr as usize);
ptr.add(delta)
}
const TYPICAL_PAGE_SIZE: usize = 0x1000;
const SUPPORTED_ITER_ALIGNMENT: usize = 16;
const CHUNK_ALIGN: usize = SUPPORTED_ITER_ALIGNMENT;
const FOOTER_SIZE: usize = mem::size_of::<ChunkFooter>();
const _FOOTER_ALIGN_ASSERTION: () = {
assert!(mem::align_of::<ChunkFooter>() == CHUNK_ALIGN);
};
const MALLOC_OVERHEAD: usize = 16;
const OVERHEAD: usize = match round_up_to(MALLOC_OVERHEAD + FOOTER_SIZE, CHUNK_ALIGN) {
Some(x) => x,
None => panic!(),
};
const FIRST_ALLOCATION_GOAL: usize = 16 * 1024;
const DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER: usize = FIRST_ALLOCATION_GOAL - OVERHEAD;
#[derive(Debug, Clone, Copy)]
struct NewChunkMemoryDetails {
new_size_without_footer: usize,
align: usize,
size: usize,
}
#[inline]
fn layout_from_size_align(size: usize, align: usize) -> Result<Layout, AllocErr> {
Layout::from_size_align(size, align).map_err(|_| AllocErr)
}
#[cold]
#[inline(never)]
fn allocation_size_overflow<T>() -> T {
panic!("requested allocation size overflowed")
}
impl Bump<1> {
pub fn new() -> Self {
Self::with_capacity(0)
}
pub fn try_new() -> Result<Self, AllocErr> {
Bump::try_with_capacity(0)
}
pub fn with_capacity(capacity: usize) -> Self {
Self::try_with_capacity(capacity).unwrap_or_else(|_| oom())
}
pub fn try_with_capacity(capacity: usize) -> Result<Self, AllocErr> {
Self::try_with_min_align_and_capacity(capacity)
}
}
impl<const MIN_ALIGN: usize> Bump<MIN_ALIGN> {
pub fn with_min_align() -> Self {
assert!(MIN_ALIGN.is_power_of_two(), "MIN_ALIGN must be a power of two; found {MIN_ALIGN}");
assert!(
MIN_ALIGN <= CHUNK_ALIGN,
"MIN_ALIGN may not be larger than {CHUNK_ALIGN}; found {MIN_ALIGN}"
);
Bump {
current_chunk_footer: Cell::new(EMPTY_CHUNK.get()),
allocation_limit: Cell::new(None),
}
}
pub fn with_min_align_and_capacity(capacity: usize) -> Self {
Self::try_with_min_align_and_capacity(capacity).unwrap_or_else(|_| oom())
}
pub fn try_with_min_align_and_capacity(capacity: usize) -> Result<Self, AllocErr> {
assert!(MIN_ALIGN.is_power_of_two(), "MIN_ALIGN must be a power of two; found {MIN_ALIGN}");
assert!(
MIN_ALIGN <= CHUNK_ALIGN,
"MIN_ALIGN may not be larger than {CHUNK_ALIGN}; found {MIN_ALIGN}"
);
if capacity == 0 {
return Ok(Bump {
current_chunk_footer: Cell::new(EMPTY_CHUNK.get()),
allocation_limit: Cell::new(None),
});
}
let layout = layout_from_size_align(capacity, MIN_ALIGN)?;
let chunk_footer = unsafe {
Self::new_chunk(
Self::new_chunk_memory_details(Some(capacity), layout).ok_or(AllocErr)?,
layout,
EMPTY_CHUNK.get(),
)
.ok_or(AllocErr)?
};
Ok(Bump {
current_chunk_footer: Cell::new(chunk_footer),
allocation_limit: Cell::new(None),
})
}
#[inline]
pub fn min_align(&self) -> usize {
MIN_ALIGN
}
pub fn allocation_limit(&self) -> Option<usize> {
self.allocation_limit.get()
}
pub fn set_allocation_limit(&self, limit: Option<usize>) {
self.allocation_limit.set(limit);
}
fn allocation_limit_remaining(&self) -> Option<usize> {
self.allocation_limit.get().and_then(|allocation_limit| {
let allocated_bytes = self.allocated_bytes();
if allocated_bytes > allocation_limit {
None
} else {
Some(usize::abs_diff(allocation_limit, allocated_bytes))
}
})
}
fn chunk_fits_under_limit(
allocation_limit_remaining: Option<usize>,
new_chunk_memory_details: NewChunkMemoryDetails,
) -> bool {
allocation_limit_remaining
.map(|allocation_limit_left| {
allocation_limit_left >= new_chunk_memory_details.new_size_without_footer
})
.unwrap_or(true)
}
fn new_chunk_memory_details(
new_size_without_footer: Option<usize>,
requested_layout: Layout,
) -> Option<NewChunkMemoryDetails> {
let align = CHUNK_ALIGN
.max(MIN_ALIGN)
.max(requested_layout.align());
let mut new_size_without_footer =
new_size_without_footer.unwrap_or(DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER);
let requested_size =
round_up_to(requested_layout.size(), align).unwrap_or_else(allocation_size_overflow);
new_size_without_footer = new_size_without_footer.max(requested_size);
if new_size_without_footer < TYPICAL_PAGE_SIZE {
new_size_without_footer =
(new_size_without_footer + OVERHEAD).next_power_of_two() - OVERHEAD;
} else {
new_size_without_footer =
round_up_to(new_size_without_footer + OVERHEAD, TYPICAL_PAGE_SIZE)? - OVERHEAD;
}
debug_assert_eq!(align % CHUNK_ALIGN, 0);
debug_assert_eq!(new_size_without_footer % CHUNK_ALIGN, 0);
let size = new_size_without_footer
.checked_add(FOOTER_SIZE)
.unwrap_or_else(allocation_size_overflow);
Some(NewChunkMemoryDetails { new_size_without_footer, size, align })
}
unsafe fn new_chunk(
new_chunk_memory_details: NewChunkMemoryDetails,
requested_layout: Layout,
prev: NonNull<ChunkFooter>,
) -> Option<NonNull<ChunkFooter>> {
let NewChunkMemoryDetails { new_size_without_footer, align, size } =
new_chunk_memory_details;
let layout = layout_from_size_align(size, align).ok()?;
debug_assert!(size >= requested_layout.size());
let data = alloc(layout);
let data = NonNull::new(data)?;
let footer_ptr = data.as_ptr().add(new_size_without_footer);
debug_assert_eq!((data.as_ptr() as usize) % align, 0);
debug_assert_eq!(footer_ptr as usize % CHUNK_ALIGN, 0);
let footer_ptr = footer_ptr as *mut ChunkFooter;
let ptr = round_mut_ptr_down_to(footer_ptr.cast::<u8>(), MIN_ALIGN);
debug_assert_eq!(ptr as usize % MIN_ALIGN, 0);
debug_assert!(
data.as_ptr() <= ptr,
"bump pointer {ptr:#p} should still be greater than or equal to the \
start of the bump chunk {data:#p}"
);
debug_assert_eq!((ptr as usize) - (data.as_ptr() as usize), new_size_without_footer);
let ptr = Cell::new(NonNull::new_unchecked(ptr));
let allocated_bytes = prev.as_ref().allocated_bytes + new_size_without_footer;
ptr::write(
footer_ptr,
ChunkFooter { data, layout, prev: Cell::new(prev), ptr, allocated_bytes },
);
Some(NonNull::new_unchecked(footer_ptr))
}
pub fn reset(&mut self) {
unsafe {
if self.current_chunk_footer.get().as_ref().is_empty() {
return;
}
let mut cur_chunk = self.current_chunk_footer.get();
let prev_chunk = cur_chunk.as_ref().prev.replace(EMPTY_CHUNK.get());
dealloc_chunk_list(prev_chunk);
debug_assert!(
is_pointer_aligned_to(cur_chunk.as_ptr(), MIN_ALIGN),
"bump pointer {cur_chunk:#p} should be aligned to the minimum alignment of {MIN_ALIGN:#x}"
);
cur_chunk.as_ref().ptr.set(cur_chunk.cast());
cur_chunk.as_mut().allocated_bytes = cur_chunk.as_ref().layout.size() - FOOTER_SIZE;
debug_assert!(
self.current_chunk_footer.get().as_ref().prev.get().as_ref().is_empty(),
"We should only have a single chunk"
);
debug_assert_eq!(
self.current_chunk_footer.get().as_ref().ptr.get(),
self.current_chunk_footer.get().cast(),
"Our chunk's bump finger should be reset to the start of its allocation"
);
}
}
#[inline(always)]
pub fn alloc<T>(&self, val: T) -> &mut T {
self.alloc_with(|| val)
}
#[inline(always)]
pub fn try_alloc<T>(&self, val: T) -> Result<&mut T, AllocErr> {
self.try_alloc_with(|| val)
}
#[inline(always)]
pub fn alloc_with<F, T>(&self, f: F) -> &mut T
where
F: FnOnce() -> T,
{
#[inline(always)]
unsafe fn inner_writer<T, F>(ptr: *mut T, f: F)
where
F: FnOnce() -> T,
{
ptr::write(ptr, f());
}
let layout = Layout::new::<T>();
unsafe {
let p = self.alloc_layout(layout);
let p = p.as_ptr() as *mut T;
inner_writer(p, f);
&mut *p
}
}
#[inline(always)]
pub fn try_alloc_with<F, T>(&self, f: F) -> Result<&mut T, AllocErr>
where
F: FnOnce() -> T,
{
#[inline(always)]
unsafe fn inner_writer<T, F>(ptr: *mut T, f: F)
where
F: FnOnce() -> T,
{
ptr::write(ptr, f());
}
let layout = Layout::new::<T>();
let p = self.try_alloc_layout(layout)?;
let p = p.as_ptr() as *mut T;
unsafe {
inner_writer(p, f);
Ok(&mut *p)
}
}
#[inline(always)]
pub fn alloc_try_with<F, T, E>(&self, f: F) -> Result<&mut T, E>
where
F: FnOnce() -> Result<T, E>,
{
let rewind_footer = self.current_chunk_footer.get();
let rewind_ptr = unsafe { rewind_footer.as_ref() }.ptr.get();
let mut inner_result_ptr = NonNull::from(self.alloc_with(f));
match unsafe { inner_result_ptr.as_mut() } {
Ok(t) => Ok(unsafe {
&mut *(t as *mut _)
}),
Err(e) => unsafe {
if self.is_last_allocation(inner_result_ptr.cast()) {
let current_footer_p = self.current_chunk_footer.get();
let current_ptr = ¤t_footer_p.as_ref().ptr;
if current_footer_p == rewind_footer {
current_ptr.set(rewind_ptr);
} else {
current_ptr.set(current_footer_p.as_ref().data);
}
}
Err(ptr::read(e as *const _))
},
}
}
#[inline(always)]
pub fn try_alloc_try_with<F, T, E>(&self, f: F) -> Result<&mut T, AllocOrInitError<E>>
where
F: FnOnce() -> Result<T, E>,
{
let rewind_footer = self.current_chunk_footer.get();
let rewind_ptr = unsafe { rewind_footer.as_ref() }.ptr.get();
let mut inner_result_ptr = NonNull::from(self.try_alloc_with(f)?);
match unsafe { inner_result_ptr.as_mut() } {
Ok(t) => Ok(unsafe {
&mut *(t as *mut _)
}),
Err(e) => unsafe {
if self.is_last_allocation(inner_result_ptr.cast()) {
let current_footer_p = self.current_chunk_footer.get();
let current_ptr = ¤t_footer_p.as_ref().ptr;
if current_footer_p == rewind_footer {
current_ptr.set(rewind_ptr);
} else {
current_ptr.set(current_footer_p.as_ref().data);
}
}
Err(AllocOrInitError::Init(ptr::read(e as *const _)))
},
}
}
#[inline(always)]
pub fn alloc_slice_copy<T>(&self, src: &[T]) -> &mut [T]
where
T: Copy,
{
let layout = Layout::for_value(src);
let dst = self.alloc_layout(layout).cast::<T>();
unsafe {
ptr::copy_nonoverlapping(src.as_ptr(), dst.as_ptr(), src.len());
slice::from_raw_parts_mut(dst.as_ptr(), src.len())
}
}
#[inline(always)]
pub fn try_alloc_slice_copy<T>(&self, src: &[T]) -> Result<&mut [T], AllocErr>
where
T: Copy,
{
let layout = Layout::for_value(src);
let dst = self.try_alloc_layout(layout)?.cast::<T>();
let result = unsafe {
core::ptr::copy_nonoverlapping(src.as_ptr(), dst.as_ptr(), src.len());
slice::from_raw_parts_mut(dst.as_ptr(), src.len())
};
Ok(result)
}
#[inline(always)]
pub fn alloc_slice_clone<T>(&self, src: &[T]) -> &mut [T]
where
T: Clone,
{
let layout = Layout::for_value(src);
let dst = self.alloc_layout(layout).cast::<T>();
unsafe {
for (i, val) in src.iter().cloned().enumerate() {
ptr::write(dst.as_ptr().add(i), val);
}
slice::from_raw_parts_mut(dst.as_ptr(), src.len())
}
}
#[inline(always)]
pub fn try_alloc_slice_clone<T>(&self, src: &[T]) -> Result<&mut [T], AllocErr>
where
T: Clone,
{
let layout = Layout::for_value(src);
let dst = self.try_alloc_layout(layout)?.cast::<T>();
unsafe {
for (i, val) in src.iter().cloned().enumerate() {
ptr::write(dst.as_ptr().add(i), val);
}
Ok(slice::from_raw_parts_mut(dst.as_ptr(), src.len()))
}
}
#[inline(always)]
pub fn alloc_str(&self, src: &str) -> &mut str {
let buffer = self.alloc_slice_copy(src.as_bytes());
unsafe {
str::from_utf8_unchecked_mut(buffer)
}
}
#[inline(always)]
pub fn try_alloc_str(&self, src: &str) -> Result<&mut str, AllocErr> {
let buffer = self.try_alloc_slice_copy(src.as_bytes())?;
unsafe {
Ok(str::from_utf8_unchecked_mut(buffer))
}
}
#[inline(always)]
pub fn alloc_slice_fill_with<T, F>(&self, len: usize, mut f: F) -> &mut [T]
where
F: FnMut(usize) -> T,
{
let layout = Layout::array::<T>(len).unwrap_or_else(|_| oom());
let dst = self.alloc_layout(layout).cast::<T>();
unsafe {
for i in 0..len {
ptr::write(dst.as_ptr().add(i), f(i));
}
let result = slice::from_raw_parts_mut(dst.as_ptr(), len);
debug_assert_eq!(Layout::for_value(result), layout);
result
}
}
#[inline(always)]
pub fn alloc_slice_try_fill_with<T, F, E>(&self, len: usize, mut f: F) -> Result<&mut [T], E>
where
F: FnMut(usize) -> Result<T, E>,
{
let layout = Layout::array::<T>(len).unwrap_or_else(|_| oom());
let base_ptr = self.alloc_layout(layout);
let dst = base_ptr.cast::<T>();
unsafe {
for i in 0..len {
match f(i) {
Ok(el) => ptr::write(dst.as_ptr().add(i), el),
Err(e) => {
self.dealloc(base_ptr, layout);
return Err(e);
}
}
}
let result = slice::from_raw_parts_mut(dst.as_ptr(), len);
debug_assert_eq!(Layout::for_value(result), layout);
Ok(result)
}
}
#[inline(always)]
pub fn try_alloc_slice_fill_with<T, F>(
&self,
len: usize,
mut f: F,
) -> Result<&mut [T], AllocErr>
where
F: FnMut(usize) -> T,
{
let layout = Layout::array::<T>(len).map_err(|_| AllocErr)?;
let dst = self.try_alloc_layout(layout)?.cast::<T>();
unsafe {
for i in 0..len {
ptr::write(dst.as_ptr().add(i), f(i));
}
let result = slice::from_raw_parts_mut(dst.as_ptr(), len);
debug_assert_eq!(Layout::for_value(result), layout);
Ok(result)
}
}
#[inline(always)]
pub fn alloc_slice_fill_copy<T: Copy>(&self, len: usize, value: T) -> &mut [T] {
self.alloc_slice_fill_with(len, |_| value)
}
#[inline(always)]
pub fn try_alloc_slice_fill_copy<T: Copy>(
&self,
len: usize,
value: T,
) -> Result<&mut [T], AllocErr> {
self.try_alloc_slice_fill_with(len, |_| value)
}
#[inline(always)]
pub fn alloc_slice_fill_clone<T: Clone>(&self, len: usize, value: &T) -> &mut [T] {
self.alloc_slice_fill_with(len, |_| value.clone())
}
#[inline(always)]
pub fn try_alloc_slice_fill_clone<T: Clone>(
&self,
len: usize,
value: &T,
) -> Result<&mut [T], AllocErr> {
self.try_alloc_slice_fill_with(len, |_| value.clone())
}
#[inline(always)]
pub fn alloc_slice_fill_iter<T, I>(&self, iter: I) -> &mut [T]
where
I: IntoIterator<Item = T>,
I::IntoIter: ExactSizeIterator,
{
let mut iter = iter.into_iter();
self.alloc_slice_fill_with(iter.len(), |_| {
iter.next().expect("Iterator supplied too few elements")
})
}
#[inline(always)]
pub fn alloc_slice_try_fill_iter<T, I, E>(&self, iter: I) -> Result<&mut [T], E>
where
I: IntoIterator<Item = Result<T, E>>,
I::IntoIter: ExactSizeIterator,
{
let mut iter = iter.into_iter();
self.alloc_slice_try_fill_with(iter.len(), |_| {
iter.next().expect("Iterator supplied too few elements")
})
}
#[inline(always)]
pub fn try_alloc_slice_fill_iter<T, I>(&self, iter: I) -> Result<&mut [T], AllocErr>
where
I: IntoIterator<Item = T>,
I::IntoIter: ExactSizeIterator,
{
let mut iter = iter.into_iter();
self.try_alloc_slice_fill_with(iter.len(), |_| {
iter.next().expect("Iterator supplied too few elements")
})
}
#[inline(always)]
pub fn alloc_slice_fill_default<T: Default>(&self, len: usize) -> &mut [T] {
self.alloc_slice_fill_with(len, |_| T::default())
}
#[inline(always)]
pub fn try_alloc_slice_fill_default<T: Default>(
&self,
len: usize,
) -> Result<&mut [T], AllocErr> {
self.try_alloc_slice_fill_with(len, |_| T::default())
}
#[inline(always)]
pub fn alloc_layout(&self, layout: Layout) -> NonNull<u8> {
self.try_alloc_layout(layout).unwrap_or_else(|_| oom())
}
#[inline(always)]
pub fn try_alloc_layout(&self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
if let Some(p) = self.try_alloc_layout_fast(layout) {
Ok(p)
} else {
self.alloc_layout_slow(layout).ok_or(AllocErr)
}
}
#[inline(always)]
fn try_alloc_layout_fast(&self, layout: Layout) -> Option<NonNull<u8>> {
unsafe {
let footer_ptr = self.current_chunk_footer.get();
let footer = footer_ptr.as_ref();
let ptr = footer.ptr.get().as_ptr();
let start = footer.data.as_ptr();
debug_assert!(
start <= ptr,
"start pointer {start:#p} should be less than or equal to bump pointer {ptr:#p}"
);
debug_assert!(
ptr <= footer_ptr.cast::<u8>().as_ptr(),
"bump pointer {ptr:#p} should be less than or equal to footer pointer {footer_ptr:#p}"
);
debug_assert!(
is_pointer_aligned_to(ptr, MIN_ALIGN),
"bump pointer {ptr:#p} should be aligned to the minimum alignment of {MIN_ALIGN:#x}"
);
let aligned_ptr = match layout.align().cmp(&MIN_ALIGN) {
Ordering::Less => {
let aligned_size = round_up_to(layout.size(), MIN_ALIGN)?;
let capacity = (ptr as usize) - (start as usize);
if aligned_size > capacity {
return None;
}
ptr.wrapping_sub(aligned_size)
}
Ordering::Equal => {
let aligned_size = round_up_to_unchecked(layout.size(), layout.align());
let capacity = (ptr as usize) - (start as usize);
if aligned_size > capacity {
return None;
}
ptr.wrapping_sub(aligned_size)
}
Ordering::Greater => {
let aligned_size = round_up_to_unchecked(layout.size(), layout.align());
let aligned_ptr = round_mut_ptr_down_to(ptr, layout.align());
let capacity = (aligned_ptr as usize).wrapping_sub(start as usize);
if aligned_ptr < start || aligned_size > capacity {
return None;
}
aligned_ptr.wrapping_sub(aligned_size)
}
};
debug_assert!(
is_pointer_aligned_to(aligned_ptr, layout.align()),
"pointer {aligned_ptr:#p} should be aligned to layout alignment of {:#}",
layout.align()
);
debug_assert!(
is_pointer_aligned_to(aligned_ptr, MIN_ALIGN),
"pointer {aligned_ptr:#p} should be aligned to minimum alignment of {:#}",
MIN_ALIGN
);
debug_assert!(
start <= aligned_ptr && aligned_ptr <= ptr,
"pointer {aligned_ptr:#p} should be in range {start:#p}..{ptr:#p}"
);
debug_assert!(!aligned_ptr.is_null());
let aligned_ptr = NonNull::new_unchecked(aligned_ptr);
footer.ptr.set(aligned_ptr);
Some(aligned_ptr)
}
}
pub fn chunk_capacity(&self) -> usize {
let current_footer = self.current_chunk_footer.get();
let current_footer = unsafe { current_footer.as_ref() };
current_footer.ptr.get().as_ptr() as usize - current_footer.data.as_ptr() as usize
}
#[inline(never)]
#[cold]
fn alloc_layout_slow(&self, layout: Layout) -> Option<NonNull<u8>> {
unsafe {
let allocation_limit_remaining = self.allocation_limit_remaining();
let current_footer = self.current_chunk_footer.get();
let current_layout = current_footer.as_ref().layout;
let min_new_chunk_size = layout.size().max(DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER);
let mut base_size =
(current_layout.size() - FOOTER_SIZE).checked_mul(2)?.max(min_new_chunk_size);
let chunk_memory_details = iter::from_fn(|| {
let bypass_min_chunk_size_for_small_limits = matches!(self.allocation_limit(), Some(limit) if layout.size() < limit
&& base_size >= layout.size()
&& limit < DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER
&& self.allocated_bytes() == 0);
if base_size >= min_new_chunk_size || bypass_min_chunk_size_for_small_limits {
let size = base_size;
base_size /= 2;
Self::new_chunk_memory_details(Some(size), layout)
} else {
None
}
});
let new_footer = chunk_memory_details
.filter_map(|chunk_memory_details| {
if Self::chunk_fits_under_limit(
allocation_limit_remaining,
chunk_memory_details,
) {
Self::new_chunk(chunk_memory_details, layout, current_footer)
} else {
None
}
})
.next()?;
debug_assert_eq!(new_footer.as_ref().data.as_ptr() as usize % layout.align(), 0);
self.current_chunk_footer.set(new_footer);
let ptr = self.try_alloc_layout_fast(layout);
debug_assert!(ptr.is_some());
ptr
}
}
pub fn iter_allocated_chunks(&mut self) -> ChunkIter<'_, MIN_ALIGN> {
let raw = unsafe { self.iter_allocated_chunks_raw() };
ChunkIter { raw, bump: PhantomData }
}
pub unsafe fn iter_allocated_chunks_raw(&self) -> ChunkRawIter<'_, MIN_ALIGN> {
ChunkRawIter { footer: self.current_chunk_footer.get(), bump: PhantomData }
}
pub fn allocated_bytes(&self) -> usize {
let footer = self.current_chunk_footer.get();
unsafe { footer.as_ref().allocated_bytes }
}
pub fn allocated_bytes_including_metadata(&self) -> usize {
let metadata_size =
unsafe { self.iter_allocated_chunks_raw().count() * mem::size_of::<ChunkFooter>() };
self.allocated_bytes() + metadata_size
}
#[inline]
unsafe fn is_last_allocation(&self, ptr: NonNull<u8>) -> bool {
let footer = self.current_chunk_footer.get();
let footer = footer.as_ref();
footer.ptr.get() == ptr
}
#[inline]
unsafe fn dealloc(&self, ptr: NonNull<u8>, layout: Layout) {
if self.is_last_allocation(ptr) {
let ptr = self.current_chunk_footer.get().as_ref().ptr.get();
let ptr = ptr.as_ptr().add(layout.size());
let ptr = round_mut_ptr_up_to_unchecked(ptr, MIN_ALIGN);
debug_assert!(
is_pointer_aligned_to(ptr, MIN_ALIGN),
"bump pointer {ptr:#p} should be aligned to the minimum alignment of {MIN_ALIGN:#x}"
);
let ptr = NonNull::new_unchecked(ptr);
self.current_chunk_footer.get().as_ref().ptr.set(ptr);
}
}
#[inline]
unsafe fn shrink(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<u8>, AllocErr> {
if old_layout.align() < new_layout.align() {
return if is_pointer_aligned_to(ptr.as_ptr(), new_layout.align()) {
Ok(ptr)
} else {
let new_ptr = self.try_alloc_layout(new_layout)?;
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), new_layout.size());
Ok(new_ptr)
};
}
debug_assert!(is_pointer_aligned_to(ptr.as_ptr(), new_layout.align()));
let old_size = old_layout.size();
let new_size = new_layout.size();
let delta = round_down_to(old_size - new_size, new_layout.align().max(MIN_ALIGN));
if self.is_last_allocation(ptr)
&& delta >= (old_size + 1) / 2
{
let footer = self.current_chunk_footer.get();
let footer = footer.as_ref();
let new_ptr = NonNull::new_unchecked(footer.ptr.get().as_ptr().add(delta));
debug_assert!(
is_pointer_aligned_to(new_ptr.as_ptr(), MIN_ALIGN),
"bump pointer {new_ptr:#p} should be aligned to the minimum alignment of {MIN_ALIGN:#x}"
);
footer.ptr.set(new_ptr);
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), new_size);
return Ok(new_ptr);
}
Ok(ptr)
}
#[inline]
unsafe fn grow(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<u8>, AllocErr> {
let old_size = old_layout.size();
let new_size = new_layout.size();
let new_size = round_up_to(new_size, MIN_ALIGN).ok_or(AllocErr)?;
let align_is_compatible = old_layout.align() >= new_layout.align();
if align_is_compatible && self.is_last_allocation(ptr) {
let delta = new_size - old_size;
if let Some(p) =
self.try_alloc_layout_fast(layout_from_size_align(delta, old_layout.align())?)
{
ptr::copy(ptr.as_ptr(), p.as_ptr(), old_size);
return Ok(p);
}
}
let new_ptr = self.try_alloc_layout(new_layout)?;
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), old_size);
Ok(new_ptr)
}
}
#[derive(Debug)]
pub struct ChunkIter<'a, const MIN_ALIGN: usize = 1> {
raw: ChunkRawIter<'a, MIN_ALIGN>,
bump: PhantomData<&'a mut Bump<MIN_ALIGN>>,
}
impl<'a, const MIN_ALIGN: usize> Iterator for ChunkIter<'a, MIN_ALIGN> {
type Item = &'a [mem::MaybeUninit<u8>];
fn next(&mut self) -> Option<Self::Item> {
unsafe {
let (ptr, len) = self.raw.next()?;
let slice = slice::from_raw_parts(ptr as *const mem::MaybeUninit<u8>, len);
Some(slice)
}
}
}
impl<'a, const MIN_ALIGN: usize> iter::FusedIterator for ChunkIter<'a, MIN_ALIGN> {}
#[derive(Debug)]
pub struct ChunkRawIter<'a, const MIN_ALIGN: usize = 1> {
footer: NonNull<ChunkFooter>,
bump: PhantomData<&'a Bump<MIN_ALIGN>>,
}
impl<const MIN_ALIGN: usize> Iterator for ChunkRawIter<'_, MIN_ALIGN> {
type Item = (*mut u8, usize);
fn next(&mut self) -> Option<(*mut u8, usize)> {
unsafe {
let foot = self.footer.as_ref();
if foot.is_empty() {
return None;
}
let (ptr, len) = foot.as_raw_parts();
self.footer = foot.prev.get();
Some((ptr as *mut u8, len))
}
}
}
impl<const MIN_ALIGN: usize> iter::FusedIterator for ChunkRawIter<'_, MIN_ALIGN> {}
#[inline(never)]
#[cold]
fn oom() -> ! {
panic!("out of memory")
}
unsafe impl<'a, const MIN_ALIGN: usize> bumpalo_alloc::Alloc for &'a Bump<MIN_ALIGN> {
#[inline(always)]
unsafe fn alloc(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
self.try_alloc_layout(layout)
}
#[inline]
unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
Bump::<MIN_ALIGN>::dealloc(self, ptr, layout);
}
#[inline]
unsafe fn realloc(
&mut self,
ptr: NonNull<u8>,
layout: Layout,
new_size: usize,
) -> Result<NonNull<u8>, AllocErr> {
let old_size = layout.size();
if old_size == 0 {
return self.try_alloc_layout(layout);
}
let new_layout = layout_from_size_align(new_size, layout.align())?;
if new_size <= old_size {
Bump::shrink(self, ptr, layout, new_layout)
} else {
Bump::grow(self, ptr, layout, new_layout)
}
}
}
#[cfg(doctest)]
fn _doctest_only() {}
unsafe impl<'a, const MIN_ALIGN: usize> Allocator for &'a Bump<MIN_ALIGN> {
#[inline]
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
self.try_alloc_layout(layout)
.map(|p| unsafe {
NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), layout.size()))
})
.map_err(|_| AllocError)
}
#[inline]
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
Bump::<MIN_ALIGN>::dealloc(self, ptr, layout)
}
#[inline]
unsafe fn shrink(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
Bump::<MIN_ALIGN>::shrink(self, ptr, old_layout, new_layout)
.map(|p| unsafe {
NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), new_layout.size()))
})
.map_err(|_| AllocError)
}
#[inline]
unsafe fn grow(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
Bump::<MIN_ALIGN>::grow(self, ptr, old_layout, new_layout)
.map(|p| unsafe {
NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), new_layout.size()))
})
.map_err(|_| AllocError)
}
#[inline]
unsafe fn grow_zeroed(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
let mut ptr = self.grow(ptr, old_layout, new_layout)?;
ptr.as_mut()[old_layout.size()..].fill(0);
Ok(ptr)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[cfg(target_pointer_width = "64")]
#[test]
fn chunk_footer_is_six_words_on_64_bit() {
assert_eq!(mem::size_of::<ChunkFooter>(), mem::size_of::<usize>() * 6);
}
#[cfg(target_pointer_width = "32")]
#[test]
fn chunk_footer_is_eight_words_on_32_bit() {
assert_eq!(mem::size_of::<ChunkFooter>(), mem::size_of::<usize>() * 8);
}
#[test]
fn allocated_bytes() {
let mut b = Bump::new();
assert_eq!(b.allocated_bytes(), 0);
assert_eq!(b.allocated_bytes_including_metadata(), 0);
b.alloc(0u8);
assert_eq!(b.allocated_bytes(), DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER);
assert_eq!(
b.allocated_bytes_including_metadata(),
DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER + FOOTER_SIZE
);
b.reset();
assert_eq!(b.allocated_bytes(), DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER);
assert_eq!(
b.allocated_bytes_including_metadata(),
DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER + FOOTER_SIZE
);
}
#[test]
fn test_realloc() {
use crate::bumpalo_alloc::Alloc;
unsafe {
const CAPACITY: usize = DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER;
let mut b = Bump::<1>::with_min_align_and_capacity(CAPACITY);
let layout = Layout::from_size_align(100, 1).unwrap();
let p = b.alloc_layout(layout);
let q = (&b).realloc(p, layout, 51).unwrap();
assert_eq!(p, q);
b.reset();
let layout = Layout::from_size_align(100, 1).unwrap();
let p = b.alloc_layout(layout);
let q = (&b).realloc(p, layout, 50).unwrap();
assert!(p != q);
b.reset();
let layout = Layout::from_size_align(10, 1).unwrap();
let p = b.alloc_layout(layout);
let q = (&b).realloc(p, layout, 11).unwrap();
assert_eq!(q.as_ptr() as usize, p.as_ptr() as usize - 1);
b.reset();
let layout = Layout::from_size_align(1, 1).unwrap();
let p = b.alloc_layout(layout);
let q = (&b).realloc(p, layout, CAPACITY + 1).unwrap();
assert_ne!(q.as_ptr() as usize, p.as_ptr() as usize - CAPACITY);
b.reset();
let layout = Layout::from_size_align(1, 1).unwrap();
let p = b.alloc_layout(layout);
let _ = b.alloc_layout(layout);
let q = (&b).realloc(p, layout, 2).unwrap();
assert!(q.as_ptr() as usize != p.as_ptr() as usize - 1);
b.reset();
}
}
#[test]
fn invalid_read() {
use crate::bumpalo_alloc::Alloc;
let mut b = &Bump::new();
unsafe {
let l1 = Layout::from_size_align(12000, 4).unwrap();
let p1 = Alloc::alloc(&mut b, l1).unwrap();
let l2 = Layout::from_size_align(1000, 4).unwrap();
Alloc::alloc(&mut b, l2).unwrap();
let p1 = b.realloc(p1, l1, 24000).unwrap();
let l3 = Layout::from_size_align(24000, 4).unwrap();
b.realloc(p1, l3, 48000).unwrap();
}
}
}