#![deny(missing_docs)]
#![cfg_attr(not(feature = "use_std_for_test_debugging"), no_std)]
#![feature(alloc, allocator_api, core_intrinsics, global_allocator)]
#![cfg_attr(target_arch = "wasm32", feature(link_llvm_intrinsics))]
extern crate alloc;
#[cfg(feature = "use_std_for_test_debugging")]
extern crate core;
#[cfg(all(unix, not(target_arch = "wasm32")))]
extern crate libc;
#[cfg(any(target_os = "linux", target_os = "macos"))]
extern crate mmap_alloc;
#[cfg(windows)]
extern crate winapi;
extern crate memory_units;
#[macro_use]
mod extra_assert;
mod const_init;
#[cfg(all(not(unix), not(windows), not(target_arch = "wasm32")))]
compile_error! {
"There is no `wee_alloc` implementation for this target; want to send a pull request? :)"
}
#[cfg(target_arch = "wasm32")]
mod imp_wasm32;
#[cfg(target_arch = "wasm32")]
use imp_wasm32 as imp;
#[cfg(all(unix, not(target_arch = "wasm32")))]
mod imp_unix;
#[cfg(all(unix, not(target_arch = "wasm32")))]
use imp_unix as imp;
#[cfg(windows)]
mod imp_windows;
#[cfg(windows)]
use imp_windows as imp;
#[cfg(feature = "size_classes")]
mod size_classes;
use alloc::heap::{Alloc, AllocErr, Layout};
use const_init::ConstInit;
use core::cmp;
use core::isize;
use core::marker::Sync;
use core::mem;
use core::ptr;
use memory_units::{size_of, Bytes, Pages, RoundUpTo, Words};
pub const PAGE_SIZE: Bytes = Bytes(65536);
extra_only! {
fn assert_is_word_aligned<T>(ptr: *mut T) {
assert_aligned_to(ptr, size_of::<usize>());
}
}
extra_only! {
fn assert_aligned_to<T>(ptr: *mut T, align: Bytes) {
extra_assert_eq!(
ptr as usize % align.0,
0,
"{:p} is not aligned to {}",
ptr,
align.0
);
}
}
#[repr(C)]
struct CellHeader {
next_cell_raw: ptr::NonNull<CellHeader>,
prev_cell_raw: *mut CellHeader,
}
#[repr(C)]
struct AllocatedCell {
header: CellHeader,
}
#[test]
fn allocated_cell_layout() {
assert_eq!(
size_of::<CellHeader>(),
size_of::<AllocatedCell>(),
"Safety and correctness depends on AllocatedCell being the same as CellHeader"
);
assert_eq!(
mem::align_of::<CellHeader>(),
mem::align_of::<AllocatedCell>()
);
}
#[repr(C)]
struct FreeCell {
header: CellHeader,
next_free_raw: *mut FreeCell,
}
#[test]
fn free_cell_layout() {
assert_eq!(
size_of::<CellHeader>() + Bytes(1),
size_of::<FreeCell>(),
"Safety and correctness depends on FreeCell being only one word larger than CellHeader"
);
assert_eq!(
mem::align_of::<CellHeader>(),
mem::align_of::<AllocatedCell>()
);
}
#[cfg(feature = "extra_assertions")]
impl CellHeader {
#[cfg(feature = "size_classes")]
const SIZE_CLASS_FREE_PATTERN: u8 = 0x35;
const LARGE_FREE_PATTERN: u8 = 0x57;
}
impl CellHeader {
const IS_ALLOCATED: usize = 0b01;
const NEXT_CELL_IS_INVALID: usize = 0b10;
const MASK: usize = !0b11;
#[test]
fn can_use_low_bits() {
assert!(
mem::align_of::<*mut u8>() >= 0b100,
"we rely on being able to stick tags into the lowest two bits"
);
}
fn is_allocated(&self) -> bool {
self.next_cell_raw.as_ptr() as usize & Self::IS_ALLOCATED != 0
}
fn is_free(&self) -> bool {
!self.is_allocated()
}
fn set_allocated(&mut self) {
let next = self.next_cell_raw.as_ptr() as usize;
let next = next | Self::IS_ALLOCATED;
extra_assert!(next != 0);
self.next_cell_raw = unsafe { ptr::NonNull::new_unchecked(next as *mut CellHeader) };
}
fn set_free(&mut self) {
let next = self.next_cell_raw.as_ptr() as usize;
let next = next & !Self::IS_ALLOCATED;
extra_assert!(next != 0);
self.next_cell_raw = unsafe { ptr::NonNull::new_unchecked(next as *mut CellHeader) };
}
fn next_cell_is_invalid(&self) -> bool {
self.next_cell_raw.as_ptr() as usize & Self::NEXT_CELL_IS_INVALID != 0
}
fn next_cell_unchecked(&self) -> *mut CellHeader {
let ptr = self.next_cell_raw.as_ptr() as usize;
let ptr = ptr & Self::MASK;
let ptr = ptr as *mut CellHeader;
extra_assert!(!ptr.is_null());
assert_is_word_aligned(ptr);
ptr
}
fn next_cell(&self) -> Option<*mut CellHeader> {
if self.next_cell_is_invalid() {
None
} else {
Some(self.next_cell_unchecked())
}
}
fn prev_cell(&self) -> Option<*mut CellHeader> {
if self.prev_cell_raw.is_null() {
None
} else {
Some(self.prev_cell_raw)
}
}
fn size(&self) -> Bytes {
let data = unsafe { (self as *const CellHeader as *mut CellHeader).offset(1) };
assert_is_word_aligned(data);
let data = data as usize;
let next = self.next_cell_unchecked();
assert_is_word_aligned(next);
let next = next as usize;
extra_assert!(next > data);
Bytes(next - data)
}
#[cfg(feature = "extra_assertions")]
fn as_free_cell(&self) -> Option<&FreeCell> {
if self.is_free() {
Some(unsafe { mem::transmute(self) })
} else {
None
}
}
fn as_free_cell_mut(&mut self) -> Option<&mut FreeCell> {
if self.is_free() {
Some(unsafe { mem::transmute(self) })
} else {
None
}
}
unsafe fn unchecked_data(&self) -> *mut u8 {
(self as *const CellHeader).offset(1) as *const u8 as *mut u8
}
fn is_aligned_to<B: Into<Bytes>>(&self, align: B) -> bool {
let align = align.into();
extra_assert!(align.0.is_power_of_two());
let data = unsafe { self.unchecked_data() } as usize;
data & (align.0 - 1) == 0
}
}
impl FreeCell {
const NEXT_FREE_CELL_CAN_MERGE: usize = 0b01;
const _RESERVED: usize = 0b10;
const MASK: usize = !0b11;
fn next_free_can_merge(&self) -> bool {
self.next_free_raw as usize & Self::NEXT_FREE_CELL_CAN_MERGE != 0
}
fn set_next_free_can_merge(&mut self) {
let next_free = self.next_free_raw as usize;
let next_free = next_free | Self::NEXT_FREE_CELL_CAN_MERGE;
self.next_free_raw = next_free as *mut FreeCell;
}
fn next_free(&self) -> *mut FreeCell {
let next_free = self.next_free_raw as usize & Self::MASK;
next_free as *mut FreeCell
}
unsafe fn from_uninitialized<'a>(
raw: *mut u8,
next_cell: ptr::NonNull<CellHeader>,
prev_cell: Option<*mut CellHeader>,
next_free: Option<*mut FreeCell>,
policy: &AllocPolicy,
) -> *mut FreeCell {
extra_assert!(!raw.is_null());
assert_is_word_aligned(raw);
extra_assert!((raw as usize) < (next_cell.as_ptr() as usize));
extra_assert!((next_cell.as_ptr() as usize) - (raw as usize) >= size_of::<usize>().0);
let prev_cell = prev_cell.unwrap_or(ptr::null_mut());
let next_free = next_free.unwrap_or(ptr::null_mut());
let raw = raw as *mut FreeCell;
ptr::write(
raw,
FreeCell {
header: CellHeader {
next_cell_raw: next_cell,
prev_cell_raw: prev_cell,
},
next_free_raw: next_free,
},
);
write_free_pattern(&mut *raw, policy);
raw
}
fn into_allocated_cell(&mut self, policy: &AllocPolicy) -> &mut AllocatedCell {
assert_local_cell_invariants(&mut self.header);
assert_is_poisoned_with_free_pattern(self, policy);
self.header.set_allocated();
unsafe { mem::transmute(self) }
}
fn try_alloc<'a>(
&'a mut self,
previous: &mut *mut FreeCell,
alloc_size: Words,
align: Bytes,
policy: &AllocPolicy,
) -> Option<&'a mut AllocatedCell> {
extra_assert!(alloc_size.0 > 0);
extra_assert!(align.0 > 0);
extra_assert!(align.0.is_power_of_two());
let size: Bytes = alloc_size.into();
if self.header.size() < size {
return None;
}
let next = self.header.next_cell_unchecked() as usize;
let align: Bytes = align.into();
let split_and_aligned = (next - size.0) & !(align.0 - 1);
let data = unsafe { self.header.unchecked_data() } as usize;
let min_cell_size: Bytes = policy.min_cell_size(alloc_size).into();
if data + size_of::<CellHeader>().0 + min_cell_size.0 <= split_and_aligned {
let split_cell_head = split_and_aligned - size_of::<CellHeader>().0;
let split_cell = unsafe {
&mut *FreeCell::from_uninitialized(
split_cell_head as *mut u8,
self.header.next_cell_raw,
Some(&mut self.header),
None,
policy,
)
};
if let Some(next) = self.header.next_cell() {
unsafe {
(*next).prev_cell_raw = &mut split_cell.header;
}
}
self.header.next_cell_raw =
unsafe { ptr::NonNull::new_unchecked(&mut split_cell.header) };
return Some(split_cell.into_allocated_cell(policy));
}
let align: Bytes = align.into();
if self.header.is_aligned_to(align) {
*previous = self.next_free();
let allocated = self.into_allocated_cell(policy);
assert_is_valid_free_list(*previous, policy);
return Some(allocated);
}
None
}
fn insert_into_free_list<'a>(
&'a mut self,
head: &'a mut *mut FreeCell,
policy: &AllocPolicy,
) -> &'a mut *mut FreeCell {
extra_assert!(!self.next_free_can_merge());
extra_assert!(self.next_free().is_null());
self.next_free_raw = *head;
*head = self;
assert_is_valid_free_list(*head, policy);
head
}
#[cfg(feature = "extra_assertions")]
fn tail_data(&mut self) -> *mut u8 {
let data = unsafe { (self as *mut FreeCell).offset(1) as *mut u8 };
assert_is_word_aligned(data);
data
}
#[cfg(feature = "extra_assertions")]
fn tail_data_size(&self) -> Bytes {
let size = self.header.size();
extra_assert!(size >= size_of::<usize>());
size - size_of::<usize>()
}
}
impl AllocatedCell {
unsafe fn into_free_cell(&mut self, policy: &AllocPolicy) -> &mut FreeCell {
assert_local_cell_invariants(&mut self.header);
self.header.set_free();
let free: &mut FreeCell = mem::transmute(self);
write_free_pattern(free, policy);
free.next_free_raw = ptr::null_mut();
free
}
fn data(&self) -> *mut u8 {
let cell = &self.header as *const CellHeader as *mut CellHeader;
extra_assert!(!cell.is_null());
assert_local_cell_invariants(cell);
unsafe { cell.offset(1) as *mut u8 }
}
}
extra_only! {
fn write_free_pattern(cell: &mut FreeCell, policy: &AllocPolicy) {
unsafe {
let data = cell.tail_data();
let size: Bytes = cell.tail_data_size();
let pattern = policy.free_pattern();
ptr::write_bytes(data, pattern, size.0);
}
}
}
extra_only! {
fn assert_is_poisoned_with_free_pattern(cell: &mut FreeCell, policy: &AllocPolicy) {
use core::slice;
unsafe {
let size: Bytes = cell.tail_data_size();
let data = cell.tail_data();
let data = slice::from_raw_parts(data, size.0);
let pattern = policy.free_pattern();
extra_assert!(data.iter().all(|byte| *byte == pattern));
}
}
}
extra_only! {
fn assert_local_cell_invariants(cell: *mut CellHeader) {
assert_is_word_aligned(cell);
unsafe {
if let Some(cell_ref) = cell.as_ref() {
assert!(cell_ref.size() >= size_of::<usize>());
if let Some(prev) = cell_ref.prev_cell().and_then(|p| p.as_ref()) {
assert!(prev.size() >= size_of::<usize>());
assert!(!prev.next_cell_is_invalid());
assert_eq!(prev.next_cell_unchecked(), cell, "next(prev(cell)) == cell");
}
if let Some(next) = cell_ref.next_cell() {
assert!(!next.is_null());
let next = &*next;
assert!(next.size() >= size_of::<usize>());
assert_eq!(next.prev_cell_raw, cell, "prev(next(cell)) == cell");
}
if let Some(free) = cell_ref.as_free_cell() {
if free.next_free_can_merge() {
let prev_cell = free.header.prev_cell().expect(
"if the next free cell (aka prev_cell) can merge, \
prev_cell had better exist"
);
assert!(!prev_cell.is_null());
assert!(
(*prev_cell).is_free(),
"prev_cell is free, when NEXT_FREE_CELL_CAN_MERGE bit is set"
);
assert_eq!(
free.next_free() as *mut CellHeader,
prev_cell,
"next_free == prev_cell, when NEXT_FREE_CAN_MERGE bit is set"
);
}
}
}
}
}
}
extra_only! {
fn assert_is_valid_free_list(head: *mut FreeCell, policy: &AllocPolicy) {
unsafe {
let mut left = head;
assert_local_cell_invariants(left as *mut CellHeader);
if left.is_null() {
return;
}
assert_is_poisoned_with_free_pattern(&mut *left, policy);
let mut right = (*left).next_free();
loop {
assert_local_cell_invariants(right as *mut CellHeader);
if right.is_null() {
return;
}
assert_is_poisoned_with_free_pattern(&mut *right, policy);
assert!(left != right, "free list should not have cycles");
assert!((*right).header.is_free(), "cells in free list should never be allocated");
assert!((*left).header.is_free(), "cells in free list should never be allocated");
right = (*right).next_free();
assert_local_cell_invariants(right as *mut CellHeader);
if right.is_null() {
return;
}
assert_is_poisoned_with_free_pattern(&mut *right, policy);
left = (*left).next_free();
assert_local_cell_invariants(left as *mut CellHeader);
assert_is_poisoned_with_free_pattern(&mut *left, policy);
assert!(left != right, "free list should not have cycles");
assert!((*right).header.is_free(), "cells in free list should never be allocated");
assert!((*left).header.is_free(), "cells in free list should never be allocated");
right = (*right).next_free();
}
}
}
}
trait AllocPolicy {
unsafe fn new_cell_for_free_list(&self, size: Words, align: Bytes)
-> Result<*mut FreeCell, ()>;
fn min_cell_size(&self, alloc_size: Words) -> Words;
fn should_merge_adjacent_free_cells(&self) -> bool;
#[cfg(feature = "extra_assertions")]
fn free_pattern(&self) -> u8;
}
struct LargeAllocPolicy;
static LARGE_ALLOC_POLICY: LargeAllocPolicy = LargeAllocPolicy;
impl LargeAllocPolicy {
#[cfg(feature = "size_classes")]
const MIN_CELL_SIZE: Words = Words(size_classes::SizeClasses::NUM_SIZE_CLASSES * 2);
#[cfg(not(feature = "size_classes"))]
const MIN_CELL_SIZE: Words = Words(16);
}
impl AllocPolicy for LargeAllocPolicy {
unsafe fn new_cell_for_free_list(
&self,
size: Words,
align: Bytes,
) -> Result<*mut FreeCell, ()> {
let size: Bytes = cmp::max(
size.into(),
((align + Self::MIN_CELL_SIZE) * Words(2)).into(),
);
let pages: Pages = (size + size_of::<CellHeader>()).round_up_to();
let new_pages = imp::alloc_pages(pages)?;
let allocated_size: Bytes = pages.into();
let next_cell = new_pages.offset(allocated_size.0 as isize);
let next_cell = next_cell as usize | CellHeader::NEXT_CELL_IS_INVALID;
extra_assert!(next_cell != 0);
let next_cell = ptr::NonNull::new_unchecked(next_cell as *mut CellHeader);
Ok(FreeCell::from_uninitialized(
new_pages,
next_cell,
None,
None,
self as &AllocPolicy,
))
}
fn min_cell_size(&self, _alloc_size: Words) -> Words {
Self::MIN_CELL_SIZE
}
fn should_merge_adjacent_free_cells(&self) -> bool {
true
}
#[cfg(feature = "extra_assertions")]
fn free_pattern(&self) -> u8 {
CellHeader::LARGE_FREE_PATTERN
}
}
unsafe fn walk_free_list<F, T>(
head: &mut *mut FreeCell,
policy: &AllocPolicy,
mut f: F,
) -> Result<T, ()>
where
F: FnMut(&mut *mut FreeCell, &mut FreeCell) -> Option<T>,
{
let mut previous_free = head;
loop {
let current_free = *previous_free;
assert_local_cell_invariants(current_free as *mut CellHeader);
if current_free.is_null() {
return Err(());
}
let mut current_free = &mut *current_free;
while current_free.next_free_can_merge() {
extra_assert!(policy.should_merge_adjacent_free_cells());
let prev_adjacent = current_free.header.prev_cell_raw as *mut FreeCell;
extra_assert_eq!(prev_adjacent, current_free.next_free());
let prev_adjacent = &mut *prev_adjacent;
(*prev_adjacent).header.next_cell_raw = current_free.header.next_cell_raw;
if let Some(next) = current_free.header.next_cell() {
(*next).prev_cell_raw = &mut prev_adjacent.header;
}
*previous_free = prev_adjacent;
current_free = prev_adjacent;
write_free_pattern(current_free, policy);
assert_local_cell_invariants(&mut current_free.header);
}
if let Some(result) = f(previous_free, current_free) {
return Ok(result);
}
previous_free = &mut current_free.next_free_raw;
}
}
unsafe fn alloc_first_fit(
size: Words,
align: Bytes,
head: &mut *mut FreeCell,
policy: &AllocPolicy,
) -> Result<*mut u8, ()> {
extra_assert!(size.0 > 0);
walk_free_list(head, policy, |previous, current| {
extra_assert_eq!(*previous, current as *mut _);
if let Some(allocated) = current.try_alloc(previous, size, align, policy) {
assert_aligned_to(allocated.data(), align);
return Some(allocated.data());
}
None
})
}
unsafe fn alloc_with_refill(
size: Words,
align: Bytes,
head: &mut *mut FreeCell,
policy: &AllocPolicy,
) -> Result<*mut u8, ()> {
if let Ok(result) = alloc_first_fit(size, align, head, policy) {
return Ok(result);
}
let cell = policy.new_cell_for_free_list(size, align)?;
let head = (*cell).insert_into_free_list(head, policy);
let result = alloc_first_fit(size, align, head, policy);
extra_assert!(
result.is_ok(),
"if refilling the free list succeeds, then retrying the allocation \
should also always succeed"
);
result
}
pub struct WeeAlloc {
head: imp::Exclusive<*mut FreeCell>,
#[cfg(feature = "size_classes")]
size_classes: size_classes::SizeClasses,
}
unsafe impl Sync for WeeAlloc {}
impl ConstInit for WeeAlloc {
const INIT: WeeAlloc = WeeAlloc {
head: imp::Exclusive::INIT,
#[cfg(feature = "size_classes")]
size_classes: size_classes::SizeClasses::INIT,
};
}
impl WeeAlloc {
pub const INIT: Self = <Self as ConstInit>::INIT;
#[cfg(feature = "size_classes")]
unsafe fn with_free_list_and_policy_for_size<F, T>(&self, size: Words, align: Bytes, f: F) -> T
where
F: for<'a> FnOnce(&'a mut *mut FreeCell, &'a AllocPolicy) -> T,
{
extra_assert!(size.0 > 0);
extra_assert!(align.0 > 0);
let align: Bytes = align.into();
if align <= size_of::<usize>() {
if let Some(head) = self.size_classes.get(size) {
let policy = size_classes::SizeClassAllocPolicy(&self.head);
let policy = &policy as &AllocPolicy;
return head.with_exclusive_access(|head| f(head, policy));
}
}
let policy = &LARGE_ALLOC_POLICY as &AllocPolicy;
self.head.with_exclusive_access(|head| f(head, policy))
}
#[cfg(not(feature = "size_classes"))]
unsafe fn with_free_list_and_policy_for_size<F, T>(&self, size: Words, _align: Bytes, f: F) -> T
where
F: for<'a> FnOnce(&'a mut *mut FreeCell, &'a AllocPolicy) -> T,
{
extra_assert!(size.0 > 0);
let policy = &LARGE_ALLOC_POLICY as &AllocPolicy;
self.head.with_exclusive_access(|head| f(head, policy))
}
}
unsafe impl<'a> Alloc for &'a WeeAlloc {
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
let size = Bytes(layout.size());
let align = if layout.align() == 0 {
Bytes(1)
} else {
Bytes(layout.align())
};
if size.0 == 0 {
return Ok(align.0 as *mut u8);
}
let size: Words = size.round_up_to();
self.with_free_list_and_policy_for_size(size, align, |head, policy| {
assert_is_valid_free_list(*head, policy);
alloc_with_refill(size, align, head, policy)
.map_err(|()| AllocErr::Exhausted { request: layout })
})
}
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
let size = Bytes(layout.size());
if size.0 == 0 || ptr.is_null() {
return;
}
let size: Words = size.round_up_to();
let align = Bytes(layout.align());
self.with_free_list_and_policy_for_size(size, align, |head, policy| {
let cell = (ptr as *mut CellHeader).offset(-1);
let cell = &mut *cell;
extra_assert!(cell.size() >= size.into());
extra_assert!(cell.is_allocated());
let cell: &mut AllocatedCell = mem::transmute(cell);
let free = cell.into_free_cell(policy);
if policy.should_merge_adjacent_free_cells() {
if let Some(prev) = free.header
.prev_cell()
.and_then(|p| (*p).as_free_cell_mut())
{
prev.header.next_cell_raw = free.header.next_cell_raw;
if let Some(next) = free.header.next_cell() {
(*next).prev_cell_raw = &mut prev.header;
}
write_free_pattern(prev, policy);
assert_is_valid_free_list(*head, policy);
return;
}
if let Some(next) = free.header
.next_cell()
.and_then(|n| (*n).as_free_cell_mut())
{
free.next_free_raw = next.next_free();
next.next_free_raw = free;
next.set_next_free_can_merge();
assert_is_valid_free_list(*head, policy);
return;
}
}
let _head = free.insert_into_free_list(head, policy);
});
}
}