#![deny(missing_docs)]
#![cfg_attr(not(feature = "use_std_for_test_debugging"), no_std)]
#![cfg_attr(feature = "nightly", feature(allocator_api, core_intrinsics))]
#[macro_use]
extern crate cfg_if;
#[cfg(feature = "nightly")]
extern crate alloc;
#[cfg(feature = "use_std_for_test_debugging")]
extern crate core;
#[cfg(feature = "static_array_backend")]
extern crate spin;
extern crate memory_units;
#[macro_use]
mod extra_assert;
cfg_if! {
if #[cfg(feature = "static_array_backend")] {
mod imp_static_array;
use imp_static_array as imp;
} else if #[cfg(target_arch = "wasm32")] {
mod imp_wasm32;
use imp_wasm32 as imp;
} else if #[cfg(unix)] {
extern crate libc;
mod imp_unix;
use imp_unix as imp;
} else if #[cfg(windows)] {
extern crate winapi;
mod imp_windows;
use imp_windows as imp;
} else {
compile_error! {
"There is no `wee_alloc` implementation for this target; want to send a pull request? :)"
}
}
}
mod const_init;
mod neighbors;
#[cfg(feature = "size_classes")]
mod size_classes;
cfg_if! {
if #[cfg(feature = "nightly")] {
use core::alloc::{Alloc, AllocErr};
} else {
pub(crate) struct AllocErr;
}
}
use const_init::ConstInit;
use core::alloc::{GlobalAlloc, Layout};
use core::cell::Cell;
use core::cmp;
use core::marker::Sync;
use core::mem;
use core::ptr::{self, NonNull};
use memory_units::{size_of, Bytes, Pages, RoundUpTo, Words};
use neighbors::Neighbors;
pub const PAGE_SIZE: Bytes = Bytes(65536);
extra_only! {
fn assert_is_word_aligned<T>(ptr: *const T) {
assert_aligned_to(ptr, size_of::<usize>());
}
}
extra_only! {
fn assert_aligned_to<T>(ptr: *const T, align: Bytes) {
extra_assert_eq!(
ptr as usize % align.0,
0,
"{:p} is not aligned to {}",
ptr,
align.0
);
}
}
#[repr(C)]
#[derive(Default, Debug)]
struct CellHeader<'a> {
neighbors: Neighbors<'a, CellHeader<'a>>,
}
impl<'a> AsRef<Neighbors<'a, CellHeader<'a>>> for CellHeader<'a> {
fn as_ref(&self) -> &Neighbors<'a, CellHeader<'a>> {
&self.neighbors
}
}
unsafe impl<'a> neighbors::HasNeighbors<'a, CellHeader<'a>> for CellHeader<'a> {
#[inline]
unsafe fn next_checked(
neighbors: &Neighbors<'a, CellHeader<'a>>,
next: *const CellHeader<'a>,
) -> Option<&'a CellHeader<'a>> {
if next.is_null() || CellHeader::next_cell_is_invalid(neighbors) {
None
} else {
Some(&*next)
}
}
#[inline]
unsafe fn prev_checked(
_neighbors: &Neighbors<'a, CellHeader<'a>>,
prev: *const CellHeader<'a>,
) -> Option<&'a CellHeader<'a>> {
if prev.is_null() {
None
} else {
Some(&*prev)
}
}
}
#[repr(C)]
#[derive(Debug)]
struct AllocatedCell<'a> {
header: CellHeader<'a>,
}
#[test]
fn allocated_cell_layout() {
assert_eq!(
size_of::<CellHeader>(),
size_of::<AllocatedCell>(),
"Safety and correctness depends on AllocatedCell being the same as CellHeader"
);
assert_eq!(
mem::align_of::<CellHeader>(),
mem::align_of::<AllocatedCell>()
);
}
#[repr(C)]
#[derive(Debug)]
struct FreeCell<'a> {
header: CellHeader<'a>,
next_free_raw: Cell<*const FreeCell<'a>>,
}
#[test]
fn free_cell_layout() {
assert_eq!(
size_of::<CellHeader>() + Words(1),
size_of::<FreeCell>(),
"Safety and correctness depends on FreeCell being only one word larger than CellHeader"
);
assert_eq!(
mem::align_of::<CellHeader>(),
mem::align_of::<AllocatedCell>()
);
}
#[cfg(feature = "extra_assertions")]
impl<'a> CellHeader<'a> {
#[cfg(feature = "size_classes")]
const SIZE_CLASS_FREE_PATTERN: u8 = 0x35;
const LARGE_FREE_PATTERN: u8 = 0x57;
}
impl<'a> CellHeader<'a> {
fn is_allocated(&self) -> bool {
self.neighbors.get_next_bit_1()
}
fn is_free(&self) -> bool {
!self.is_allocated()
}
fn set_allocated(neighbors: &Neighbors<'a, Self>) {
neighbors.set_next_bit_1();
}
fn set_free(neighbors: &Neighbors<'a, Self>) {
neighbors.clear_next_bit_1();
}
fn next_cell_is_invalid(neighbors: &Neighbors<'a, Self>) -> bool {
neighbors.get_next_bit_2()
}
fn set_next_cell_is_invalid(neighbors: &Neighbors<'a, Self>) {
neighbors.set_next_bit_2();
}
fn clear_next_cell_is_invalid(neighbors: &Neighbors<'a, Self>) {
neighbors.clear_next_bit_2();
}
fn size(&self) -> Bytes {
let data = unsafe { (self as *const CellHeader<'a>).offset(1) };
assert_is_word_aligned(data);
let data = data as usize;
let next = self.neighbors.next_unchecked();
assert_is_word_aligned(next);
let next = next as usize;
extra_assert!(
next > data,
"the next cell ({:p}) should always be after the data ({:p})",
next as *const (),
data as *const ()
);
Bytes(next - data)
}
fn as_free_cell(&self) -> Option<&FreeCell<'a>> {
if self.is_free() {
Some(unsafe { mem::transmute(self) })
} else {
None
}
}
unsafe fn unchecked_data(&self) -> *const u8 {
(self as *const CellHeader).offset(1) as *const u8
}
fn is_aligned_to<B: Into<Bytes>>(&self, align: B) -> bool {
let align = align.into();
extra_assert!(align.0.is_power_of_two());
let data = unsafe { self.unchecked_data() } as usize;
data & (align.0 - 1) == 0
}
}
impl<'a> FreeCell<'a> {
const NEXT_FREE_CELL_CAN_MERGE: usize = 0b01;
const _RESERVED: usize = 0b10;
const MASK: usize = !0b11;
fn next_free_can_merge(&self) -> bool {
self.next_free_raw.get() as usize & Self::NEXT_FREE_CELL_CAN_MERGE != 0
}
fn set_next_free_can_merge(&self) {
let next_free = self.next_free_raw.get() as usize;
let next_free = next_free | Self::NEXT_FREE_CELL_CAN_MERGE;
self.next_free_raw.set(next_free as *const FreeCell);
}
fn clear_next_free_can_merge(&self) {
let next_free = self.next_free_raw.get() as usize;
let next_free = next_free & !Self::NEXT_FREE_CELL_CAN_MERGE;
self.next_free_raw.set(next_free as *const FreeCell);
}
fn next_free(&self) -> *const FreeCell<'a> {
let next_free = self.next_free_raw.get() as usize & Self::MASK;
next_free as *const FreeCell<'a>
}
unsafe fn from_uninitialized(
raw: NonNull<u8>,
size: Bytes,
next_free: Option<*const FreeCell<'a>>,
policy: &dyn AllocPolicy<'a>,
) -> *const FreeCell<'a> {
assert_is_word_aligned(raw.as_ptr() as *mut u8);
let next_free = next_free.unwrap_or(ptr::null_mut());
let raw = raw.as_ptr() as *mut FreeCell;
ptr::write(
raw,
FreeCell {
header: CellHeader::default(),
next_free_raw: Cell::new(next_free),
},
);
write_free_pattern(&*raw, size, policy);
raw
}
fn into_allocated_cell(&self, policy: &dyn AllocPolicy<'a>) -> &AllocatedCell<'a> {
assert_local_cell_invariants(&self.header);
assert_is_poisoned_with_free_pattern(self, policy);
CellHeader::set_allocated(&self.header.neighbors);
unsafe { mem::transmute(self) }
}
fn try_alloc<'b>(
&'b self,
previous: &'b Cell<*const FreeCell<'a>>,
alloc_size: Words,
align: Bytes,
policy: &dyn AllocPolicy<'a>,
) -> Option<&'b AllocatedCell<'a>> {
extra_assert!(alloc_size.0 > 0);
extra_assert!(align.0 > 0);
extra_assert!(align.0.is_power_of_two());
let size: Bytes = alloc_size.into();
if self.header.size() < size {
return None;
}
let next = self.header.neighbors.next_unchecked() as usize;
let split_and_aligned = (next - size.0) & !(align.0 - 1);
let data = unsafe { self.header.unchecked_data() } as usize;
let min_cell_size: Bytes = policy.min_cell_size(alloc_size).into();
if data + size_of::<CellHeader>().0 + min_cell_size.0 <= split_and_aligned {
let split_cell_head = split_and_aligned - size_of::<CellHeader>().0;
let split_cell = unsafe {
&*FreeCell::from_uninitialized(
unchecked_unwrap(NonNull::new(split_cell_head as *mut u8)),
Bytes(next - split_cell_head) - size_of::<CellHeader>(),
None,
policy,
)
};
Neighbors::append(&self.header, &split_cell.header);
self.clear_next_free_can_merge();
if CellHeader::next_cell_is_invalid(&self.header.neighbors) {
CellHeader::clear_next_cell_is_invalid(&self.header.neighbors);
CellHeader::set_next_cell_is_invalid(&split_cell.header.neighbors);
}
return Some(split_cell.into_allocated_cell(policy));
}
if self.header.is_aligned_to(align) {
previous.set(self.next_free());
let allocated = self.into_allocated_cell(policy);
assert_is_valid_free_list(previous.get(), policy);
return Some(allocated);
}
None
}
fn insert_into_free_list<'b>(
&'b self,
head: &'b Cell<*const FreeCell<'a>>,
policy: &dyn AllocPolicy<'a>,
) -> &'b Cell<*const FreeCell<'a>> {
extra_assert!(!self.next_free_can_merge());
extra_assert!(self.next_free().is_null());
self.next_free_raw.set(head.get());
head.set(self);
assert_is_valid_free_list(head.get(), policy);
head
}
#[cfg(feature = "extra_assertions")]
fn tail_data(&self) -> *const u8 {
let data = unsafe { (self as *const FreeCell as *const FreeCell).offset(1) as *const u8 };
assert_is_word_aligned(data);
data
}
#[cfg(feature = "extra_assertions")]
fn tail_data_size(&self) -> Bytes {
let size = self.header.size();
extra_assert!(size >= size_of::<usize>());
size - size_of::<usize>()
}
}
impl<'a> AllocatedCell<'a> {
unsafe fn into_free_cell(&self, policy: &dyn AllocPolicy<'a>) -> &FreeCell<'a> {
assert_local_cell_invariants(&self.header);
CellHeader::set_free(&self.header.neighbors);
let free: &FreeCell = mem::transmute(self);
write_free_pattern(free, free.header.size(), policy);
free.next_free_raw.set(ptr::null_mut());
free
}
fn data(&self) -> *const u8 {
let cell = &self.header as *const CellHeader;
assert_local_cell_invariants(cell);
unsafe { cell.offset(1) as *const u8 }
}
}
extra_only! {
fn write_free_pattern(cell: &FreeCell, size: Bytes, policy: &dyn AllocPolicy) {
unsafe {
let data = cell.tail_data();
let pattern = policy.free_pattern();
ptr::write_bytes(
data as *mut u8,
pattern,
(size - (size_of::<FreeCell>() - size_of::<CellHeader>())).0
);
}
}
}
extra_only! {
fn assert_is_poisoned_with_free_pattern(cell: &FreeCell, policy: &dyn AllocPolicy) {
use core::slice;
unsafe {
let size: Bytes = cell.tail_data_size();
let data = cell.tail_data();
let data = slice::from_raw_parts(data, size.0);
let pattern = policy.free_pattern();
extra_assert!(data.iter().all(|byte| *byte == pattern));
}
}
}
extra_only! {
fn assert_local_cell_invariants(cell: *const CellHeader) {
assert_is_word_aligned(cell);
unsafe {
if let Some(cell_ref) = cell.as_ref() {
assert!(cell_ref.size() >= size_of::<usize>());
if let Some(prev) = cell_ref.neighbors.prev() {
assert!(prev.size() >= size_of::<usize>());
assert!(!CellHeader::next_cell_is_invalid(&prev.neighbors));
assert_eq!(prev.neighbors.next_unchecked(), cell, "next(prev(cell)) == cell");
}
if let Some(next) = cell_ref.neighbors.next() {
assert!(next.size() >= size_of::<usize>());
assert_eq!(next.neighbors.prev_unchecked(), cell, "prev(next(cell)) == cell");
}
if let Some(free) = cell_ref.as_free_cell() {
if free.next_free_can_merge() {
let prev_cell = free.header.neighbors.prev().expect(
"if the next free cell (aka prev_cell) can merge, \
prev_cell had better exist"
);
assert!(
prev_cell.is_free(),
"prev_cell is free, when NEXT_FREE_CELL_CAN_MERGE bit is set"
);
assert_eq!(
free.next_free() as *const CellHeader,
prev_cell as *const _,
"next_free == prev_cell, when NEXT_FREE_CAN_MERGE bit is set"
);
}
}
}
}
}
}
extra_only! {
fn assert_is_valid_free_list(head: *const FreeCell, policy: &dyn AllocPolicy) {
unsafe {
let mut left = head;
assert_local_cell_invariants(left as *const CellHeader);
if left.is_null() {
return;
}
assert_is_poisoned_with_free_pattern(&*left, policy);
let mut right = (*left).next_free();
loop {
assert_local_cell_invariants(right as *const CellHeader);
if right.is_null() {
return;
}
assert_is_poisoned_with_free_pattern(&*right, policy);
assert!(left != right, "free list should not have cycles");
assert!((*right).header.is_free(), "cells in free list should never be allocated");
assert!((*left).header.is_free(), "cells in free list should never be allocated");
right = (*right).next_free();
assert_local_cell_invariants(right as *const CellHeader);
if right.is_null() {
return;
}
assert_is_poisoned_with_free_pattern(&*right, policy);
left = (*left).next_free();
assert_local_cell_invariants(left as *const CellHeader);
assert_is_poisoned_with_free_pattern(&*left, policy);
assert!(left != right, "free list should not have cycles");
assert!((*right).header.is_free(), "cells in free list should never be allocated");
assert!((*left).header.is_free(), "cells in free list should never be allocated");
right = (*right).next_free();
}
}
}
}
trait AllocPolicy<'a> {
unsafe fn new_cell_for_free_list(
&self,
size: Words,
align: Bytes,
) -> Result<*const FreeCell<'a>, AllocErr>;
fn min_cell_size(&self, alloc_size: Words) -> Words;
fn should_merge_adjacent_free_cells(&self) -> bool;
#[cfg(feature = "extra_assertions")]
fn free_pattern(&self) -> u8;
}
struct LargeAllocPolicy;
static LARGE_ALLOC_POLICY: LargeAllocPolicy = LargeAllocPolicy;
impl LargeAllocPolicy {
#[cfg(feature = "size_classes")]
const MIN_CELL_SIZE: Words = Words(size_classes::SizeClasses::NUM_SIZE_CLASSES * 2);
#[cfg(not(feature = "size_classes"))]
const MIN_CELL_SIZE: Words = Words(16);
}
impl<'a> AllocPolicy<'a> for LargeAllocPolicy {
unsafe fn new_cell_for_free_list(
&self,
size: Words,
align: Bytes,
) -> Result<*const FreeCell<'a>, AllocErr> {
let size: Bytes = cmp::max(size.into(), (align + Self::MIN_CELL_SIZE) * Words(2));
let pages: Pages = (size + size_of::<CellHeader>()).round_up_to();
let new_pages = imp::alloc_pages(pages)?;
let allocated_size: Bytes = pages.into();
let free_cell = &*FreeCell::from_uninitialized(
new_pages,
allocated_size - size_of::<CellHeader>(),
None,
self as &dyn AllocPolicy<'a>,
);
let next_cell = (new_pages.as_ptr() as *const u8).add(allocated_size.0);
free_cell
.header
.neighbors
.set_next(next_cell as *const CellHeader);
CellHeader::set_next_cell_is_invalid(&free_cell.header.neighbors);
Ok(free_cell)
}
fn min_cell_size(&self, _alloc_size: Words) -> Words {
Self::MIN_CELL_SIZE
}
fn should_merge_adjacent_free_cells(&self) -> bool {
true
}
#[cfg(feature = "extra_assertions")]
fn free_pattern(&self) -> u8 {
CellHeader::LARGE_FREE_PATTERN
}
}
cfg_if! {
if #[cfg(any(debug_assertions, feature = "extra_assertions"))] {
unsafe fn unchecked_unwrap<T>(o: Option<T>) -> T {
o.unwrap()
}
} else {
#[inline]
unsafe fn unchecked_unwrap<T>(o: Option<T>) -> T {
match o {
Some(t) => t,
None => core::hint::unreachable_unchecked(),
}
}
}
}
unsafe fn walk_free_list<'a, F, T>(
head: &Cell<*const FreeCell<'a>>,
policy: &dyn AllocPolicy<'a>,
mut f: F,
) -> Result<T, AllocErr>
where
F: FnMut(&Cell<*const FreeCell<'a>>, &FreeCell<'a>) -> Option<T>,
{
let previous_free = head;
loop {
let current_free = previous_free.get();
assert_local_cell_invariants(&(*current_free).header);
if current_free.is_null() {
return Err(AllocErr);
}
let current_free = Cell::new(current_free);
while (*current_free.get()).next_free_can_merge() {
extra_assert!(policy.should_merge_adjacent_free_cells());
let current = &*current_free.get();
current.clear_next_free_can_merge();
let prev_neighbor = unchecked_unwrap(
current
.header
.neighbors
.prev()
.and_then(|p| p.as_free_cell()),
);
current.header.neighbors.remove();
if CellHeader::next_cell_is_invalid(¤t.header.neighbors) {
CellHeader::set_next_cell_is_invalid(&prev_neighbor.header.neighbors);
}
previous_free.set(prev_neighbor);
current_free.set(prev_neighbor);
write_free_pattern(
&*current_free.get(),
(*current_free.get()).header.size(),
policy,
);
assert_local_cell_invariants(&(*current_free.get()).header);
}
if let Some(result) = f(previous_free, &*current_free.get()) {
return Ok(result);
}
previous_free.set(&*(*current_free.get()).next_free_raw.get());
}
}
unsafe fn alloc_first_fit<'a>(
size: Words,
align: Bytes,
head: &Cell<*const FreeCell<'a>>,
policy: &dyn AllocPolicy<'a>,
) -> Result<NonNull<u8>, AllocErr> {
extra_assert!(size.0 > 0);
walk_free_list(head, policy, |previous, current| {
extra_assert_eq!(previous.get(), current);
if let Some(allocated) = current.try_alloc(previous, size, align, policy) {
assert_aligned_to(allocated.data(), align);
return Some(unchecked_unwrap(NonNull::new(allocated.data() as *mut u8)));
}
None
})
}
unsafe fn alloc_with_refill<'a, 'b>(
size: Words,
align: Bytes,
head: &'b Cell<*const FreeCell<'a>>,
policy: &dyn AllocPolicy<'a>,
) -> Result<NonNull<u8>, AllocErr> {
if let Ok(result) = alloc_first_fit(size, align, head, policy) {
return Ok(result);
}
let cell = policy.new_cell_for_free_list(size, align)?;
let head = (*cell).insert_into_free_list(head, policy);
let result = alloc_first_fit(size, align, head, policy);
extra_assert!(
result.is_ok(),
"if refilling the free list succeeds, then retrying the allocation \
should also always succeed"
);
result
}
pub struct WeeAlloc<'a> {
head: imp::Exclusive<*const FreeCell<'a>>,
#[cfg(feature = "size_classes")]
size_classes: size_classes::SizeClasses<'a>,
}
unsafe impl<'a> Sync for WeeAlloc<'a> {}
impl<'a> ConstInit for WeeAlloc<'a> {
const INIT: WeeAlloc<'a> = WeeAlloc {
head: imp::Exclusive::INIT,
#[cfg(feature = "size_classes")]
size_classes: size_classes::SizeClasses::INIT,
};
}
impl<'a> WeeAlloc<'a> {
pub const INIT: Self = <Self as ConstInit>::INIT;
#[cfg(feature = "size_classes")]
unsafe fn with_free_list_and_policy_for_size<F, T>(&self, size: Words, align: Bytes, f: F) -> T
where
F: for<'b> FnOnce(&'b Cell<*const FreeCell<'a>>, &'b dyn AllocPolicy<'a>) -> T,
{
extra_assert!(size.0 > 0);
extra_assert!(align.0 > 0);
if align <= size_of::<usize>() {
if let Some(head) = self.size_classes.get(size) {
let policy = size_classes::SizeClassAllocPolicy(&self.head);
let policy = &policy as &dyn AllocPolicy<'a>;
return head.with_exclusive_access(|head| {
let head_cell = Cell::new(*head);
let result = f(&head_cell, policy);
*head = head_cell.get();
result
});
}
}
let policy = &LARGE_ALLOC_POLICY as &dyn AllocPolicy<'a>;
self.head.with_exclusive_access(|head| {
let head_cell = Cell::new(*head);
let result = f(&head_cell, policy);
*head = head_cell.get();
result
})
}
#[cfg(not(feature = "size_classes"))]
unsafe fn with_free_list_and_policy_for_size<F, T>(&self, size: Words, _align: Bytes, f: F) -> T
where
F: for<'b> FnOnce(&'b Cell<*const FreeCell<'a>>, &'b dyn AllocPolicy<'a>) -> T,
{
extra_assert!(size.0 > 0);
let policy = &LARGE_ALLOC_POLICY as &dyn AllocPolicy;
self.head.with_exclusive_access(|head| {
let head_cell = Cell::new(*head);
let result = f(&head_cell, policy);
*head = head_cell.get();
result
})
}
unsafe fn alloc_impl(&self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
let size = Bytes(layout.size());
let align = if layout.align() == 0 {
Bytes(1)
} else {
Bytes(layout.align())
};
if size.0 == 0 {
extra_assert!(align.0 > 0);
return Ok(NonNull::new_unchecked(align.0 as *mut u8));
}
let size: Words = size.round_up_to();
self.with_free_list_and_policy_for_size(size, align, |head, policy| {
assert_is_valid_free_list(head.get(), policy);
alloc_with_refill(size, align, head, policy)
})
}
unsafe fn dealloc_impl(&self, ptr: NonNull<u8>, layout: Layout) {
let size = Bytes(layout.size());
if size.0 == 0 {
return;
}
let size: Words = size.round_up_to();
let align = Bytes(layout.align());
self.with_free_list_and_policy_for_size(size, align, |head, policy| {
let cell = (ptr.as_ptr() as *mut CellHeader<'a> as *const CellHeader<'a>).offset(-1);
let cell = &*cell;
extra_assert!(cell.size() >= size.into());
extra_assert!(cell.is_allocated());
let cell: &AllocatedCell<'a> = mem::transmute(cell);
let free = cell.into_free_cell(policy);
if policy.should_merge_adjacent_free_cells() {
if let Some(prev) = free
.header
.neighbors
.prev()
.and_then(|p| (*p).as_free_cell())
{
free.header.neighbors.remove();
if CellHeader::next_cell_is_invalid(&free.header.neighbors) {
CellHeader::set_next_cell_is_invalid(&prev.header.neighbors);
}
write_free_pattern(prev, prev.header.size(), policy);
assert_is_valid_free_list(head.get(), policy);
return;
}
if let Some(next) = free
.header
.neighbors
.next()
.and_then(|n| (*n).as_free_cell())
{
free.next_free_raw.set(next.next_free());
next.next_free_raw.set(free);
next.set_next_free_can_merge();
assert_is_valid_free_list(head.get(), policy);
return;
}
}
let _head = free.insert_into_free_list(head, policy);
});
}
}
#[cfg(feature = "nightly")]
unsafe impl<'a, 'b> Alloc for &'b WeeAlloc<'a>
where
'a: 'b,
{
unsafe fn alloc(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
self.alloc_impl(layout)
}
unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
self.dealloc_impl(ptr, layout)
}
}
unsafe impl GlobalAlloc for WeeAlloc<'static> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
match self.alloc_impl(layout) {
Ok(ptr) => ptr.as_ptr(),
Err(AllocErr) => ptr::null_mut(),
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
if let Some(ptr) = NonNull::new(ptr) {
self.dealloc_impl(ptr, layout);
}
}
}