use crate::core::{
alloc::{AllocError, Layout},
fmt, mem,
num::NonZeroUsize,
ptr::NonNull,
};
#[cfg(all(any(feature = "alloc", test), feature = "unstable"))]
use alloc::alloc::Global;
#[cfg(all(any(feature = "alloc", test), not(feature = "unstable")))]
use crate::{core::alloc::LayoutExt, Global};
#[cfg(feature = "unstable")]
use crate::core::alloc::Allocator;
#[cfg(not(feature = "unstable"))]
use crate::core::ptr::NonNullStrict;
use crate::{AllocInitError, BackingAllocator, BasePtr, BlockLink, Raw};
pub struct Slab<A: BackingAllocator> {
base: BasePtr,
free_list: Option<NonZeroUsize>,
block_size: u32,
block_align: u32,
num_blocks: u32,
outstanding: u32,
backing_allocator: A,
}
unsafe impl<A> Send for Slab<A> where A: BackingAllocator + Send {}
unsafe impl<A> Sync for Slab<A> where A: BackingAllocator + Sync {}
impl Slab<Raw> {
pub unsafe fn new_raw(
region: NonNull<u8>,
block_size: usize,
num_blocks: usize,
) -> Result<Slab<Raw>, AllocInitError> {
unsafe {
RawSlab::try_new(region, block_size, num_blocks).map(|s| s.with_backing_allocator(Raw))
}
}
}
#[cfg(all(any(feature = "alloc", test), not(feature = "unstable")))]
impl Slab<Global> {
#[cfg_attr(docs_rs, doc(cfg(feature = "alloc")))]
pub fn try_new(block_size: usize, num_blocks: usize) -> Result<Slab<Global>, AllocInitError> {
let region_layout = Self::region_layout(block_size, num_blocks)
.map_err(|_| AllocInitError::InvalidConfig)?;
unsafe {
let region_ptr = if region_layout.size() == 0 {
region_layout.dangling()
} else {
let region_raw = alloc::alloc::alloc(region_layout);
NonNull::new(region_raw).ok_or_else(|| {
alloc::alloc::dealloc(region_raw, region_layout);
AllocInitError::AllocFailed(region_layout)
})?
};
match RawSlab::try_new(region_ptr, block_size, num_blocks) {
Ok(s) => Ok(s.with_backing_allocator(Global)),
Err(e) => {
if region_layout.size() != 0 {
alloc::alloc::dealloc(region_ptr.as_ptr(), region_layout);
}
Err(e)
}
}
}
}
}
#[cfg(all(any(feature = "alloc", test), feature = "unstable"))]
impl Slab<Global> {
#[cfg_attr(docs_rs, doc(cfg(feature = "alloc")))]
pub fn try_new(block_size: usize, num_blocks: usize) -> Result<Slab<Global>, AllocInitError> {
Self::try_new_in(block_size, num_blocks, Global)
}
}
#[cfg(feature = "unstable")]
impl<A> Slab<A>
where
A: Allocator,
{
#[cfg_attr(docs_rs, doc(cfg(feature = "unstable")))]
pub fn try_new_in(
block_size: usize,
num_blocks: usize,
backing_allocator: A,
) -> Result<Slab<A>, AllocInitError> {
let region_layout = Self::region_layout(block_size, num_blocks)
.map_err(|_| AllocInitError::InvalidConfig)?;
unsafe {
let region_ptr = if region_layout.size() == 0 {
region_layout.dangling()
} else {
backing_allocator
.allocate(region_layout)
.map_err(|_| AllocInitError::AllocFailed(region_layout))?
.cast()
};
match RawSlab::try_new(region_ptr, block_size, num_blocks) {
Ok(s) => Ok(s.with_backing_allocator(backing_allocator)),
Err(e) => {
if region_layout.size() != 0 {
backing_allocator.deallocate(region_ptr, region_layout);
}
Err(e)
}
}
}
}
}
impl<A> Slab<A>
where
A: BackingAllocator,
{
pub fn region_layout(block_size: usize, num_blocks: usize) -> Result<Layout, AllocInitError> {
let block_size = {
let align = mem::align_of::<BlockLink>();
let up = block_size
.checked_add(align)
.ok_or(AllocInitError::InvalidConfig)?
- 1;
up & !(align - 1)
};
if block_size < mem::size_of::<BlockLink>() {
return Err(AllocInitError::InvalidConfig);
}
let total_size = block_size
.checked_mul(num_blocks)
.ok_or(AllocInitError::InvalidConfig)?;
u32::try_from(total_size).map_err(|_| AllocInitError::InvalidConfig)?;
let align = 1_usize
.checked_shl(block_size.trailing_zeros())
.ok_or(AllocInitError::InvalidConfig)?;
u32::try_from(align).map_err(|_| AllocInitError::InvalidConfig)?;
Layout::from_size_align(total_size, align).map_err(|_| AllocInitError::InvalidConfig)
}
pub fn allocate(&mut self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
if layout.size() > self.block_size as usize || layout.align() > self.block_align as usize {
return Err(AllocError);
}
let old_head = self.free_list.take().ok_or(AllocError)?;
unsafe {
let link_mut = self.base.link_mut(old_head);
self.free_list = link_mut.next.take();
}
self.outstanding += 1;
Ok(self.base.with_addr_and_size(old_head, layout.size()))
}
pub fn allocate_block(&mut self) -> Result<NonNull<[u8]>, AllocError> {
let old_head = self.free_list.take().ok_or(AllocError)?;
unsafe {
let link_mut = self.base.link_mut(old_head);
self.free_list = link_mut.next.take();
}
self.outstanding += 1;
Ok(self.base.with_addr_and_size(old_head, self.block_size()))
}
pub unsafe fn deallocate(&mut self, ptr: NonNull<u8>) {
let addr = ptr.addr();
unsafe {
self.base.link_mut(addr).next = self.free_list;
self.free_list = Some(addr);
}
self.outstanding -= 1;
}
#[inline]
pub fn block_size(&self) -> usize {
self.block_size as usize
}
#[inline]
pub fn block_align(&self) -> usize {
self.block_align as usize
}
#[inline]
pub fn num_blocks(&self) -> usize {
self.num_blocks as usize
}
#[inline]
pub fn size(&self) -> usize {
self.block_size() * self.num_blocks()
}
#[inline]
pub fn limit(&self) -> Option<NonZeroUsize> {
self.base
.addr()
.get()
.checked_add(self.size())
.and_then(NonZeroUsize::new)
}
#[inline]
pub fn contains_ptr(&self, ptr: NonNull<u8>) -> bool {
self.base.addr() <= ptr.addr() && self.limit().map(|lim| ptr.addr() < lim).unwrap_or(true)
}
#[inline]
pub fn can_allocate(&self) -> bool {
self.free_list.is_some()
}
#[inline]
pub fn outstanding(&self) -> usize {
self.outstanding.try_into().unwrap()
}
}
impl<A> fmt::Debug for Slab<A>
where
A: BackingAllocator,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Slab")
.field("base", &self.base.ptr())
.field("block_size", &self.block_size)
.field("num_blocks", &self.num_blocks)
.finish()
}
}
impl<A> Drop for Slab<A>
where
A: BackingAllocator,
{
fn drop(&mut self) {
let region_layout = Self::region_layout(self.block_size(), self.num_blocks()).unwrap();
if region_layout.size() != 0 {
unsafe {
self.backing_allocator
.deallocate(self.base.ptr(), region_layout)
}
}
}
}
struct RawSlab {
base: BasePtr,
free_list: Option<NonZeroUsize>,
block_size: u32,
block_align: u32,
num_blocks: u32,
}
impl RawSlab {
unsafe fn try_new(
region: NonNull<u8>,
block_size: usize,
num_blocks: usize,
) -> Result<RawSlab, AllocInitError> {
assert_eq!(region.addr().get() % mem::align_of::<BlockLink>(), 0);
if block_size < mem::size_of::<BlockLink>() {
return Err(AllocInitError::InvalidConfig);
}
let block_size = {
let align = mem::align_of::<BlockLink>();
let up = block_size.checked_add(align).unwrap() - 1;
up & !(align - 1)
};
assert_eq!(block_size % mem::align_of::<BlockLink>(), 0);
let layout = Slab::<Raw>::region_layout(block_size, num_blocks)
.map_err(|_| AllocInitError::InvalidConfig)?;
let region_end = region
.addr()
.get()
.checked_add(layout.size())
.and_then(NonZeroUsize::new)
.ok_or(AllocInitError::InvalidLocation)?;
let base = BasePtr::new(region, layout.size());
for block_addr in (region.addr().get()..region_end.get()).step_by(block_size) {
let block_addr = NonZeroUsize::new(block_addr).unwrap();
let is_not_last = block_addr.get() < region_end.get() - block_size;
let next = is_not_last.then(|| {
let next_addr = block_addr.get() + block_size;
NonZeroUsize::new(next_addr).unwrap()
});
assert_eq!(block_addr.get() % mem::align_of::<BlockLink>(), 0);
unsafe { base.init_link_at(block_addr, BlockLink { next }) };
}
let block_align = 1_u32
.checked_shl(block_size.trailing_zeros())
.ok_or(AllocInitError::InvalidConfig)?;
Ok(RawSlab {
base,
free_list: (num_blocks > 0).then(|| base.addr()),
block_size: block_size
.try_into()
.map_err(|_| AllocInitError::InvalidConfig)?,
block_align,
num_blocks: num_blocks
.try_into()
.map_err(|_| AllocInitError::InvalidConfig)?,
})
}
fn with_backing_allocator<A: BackingAllocator>(self, backing_allocator: A) -> Slab<A> {
Slab {
base: self.base,
free_list: self.free_list,
block_size: self.block_size,
block_align: self.block_align,
num_blocks: self.num_blocks,
outstanding: 0,
backing_allocator,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn too_small_block_size_errors() {
Slab::<Global>::try_new(0, 0).unwrap_err();
Slab::<Global>::try_new(0, 1).unwrap_err();
Slab::<Global>::try_new(1, 0).unwrap_err();
Slab::<Global>::try_new(1, 1).unwrap_err();
Slab::<Global>::try_new(mem::size_of::<BlockLink>() - 1, 0).unwrap_err();
Slab::<Global>::try_new(mem::size_of::<BlockLink>() - 1, 1).unwrap_err();
}
#[test]
fn overflow_address_space_errors() {
Slab::<Global>::try_new(usize::MAX, 2).unwrap_err();
}
#[test]
fn zero_blocks() {
let mut slab = Slab::<Global>::try_new(128, 0).unwrap();
slab.allocate(Layout::from_size_align(0, 1).unwrap())
.unwrap_err();
}
#[test]
fn one_block() {
const BLK_SIZE: usize = 128;
let mut slab = Slab::<Global>::try_new(BLK_SIZE, 1).unwrap();
assert!(slab.can_allocate());
let layout = Layout::from_size_align(0, 1).unwrap();
let b = slab.allocate(layout).unwrap();
assert_eq!(slab.outstanding(), 1);
assert!(!slab.can_allocate());
slab.allocate(layout).unwrap_err();
assert_eq!(slab.outstanding(), 1);
unsafe { slab.deallocate(b.cast()) };
assert_eq!(slab.outstanding(), 0);
slab.allocate(Layout::from_size_align(BLK_SIZE + 1, 1).unwrap())
.unwrap_err();
}
#[test]
fn two_blocks() {
const BLK_SIZE: usize = 128;
let mut slab = Slab::<Global>::try_new(BLK_SIZE, 2).unwrap();
let layout = Layout::from_size_align(1, 1).unwrap();
assert!(slab.can_allocate());
let a = slab.allocate(layout).unwrap();
assert_eq!(slab.outstanding(), 1);
assert!(slab.can_allocate());
let b = slab.allocate(layout).unwrap();
assert_eq!(slab.outstanding(), 2);
assert!(!slab.can_allocate());
slab.allocate(layout).unwrap_err();
assert_eq!(slab.outstanding(), 2);
unsafe {
slab.deallocate(a.cast());
assert_eq!(slab.outstanding(), 1);
assert!(slab.can_allocate());
slab.deallocate(b.cast());
assert_eq!(slab.outstanding(), 0);
assert!(slab.can_allocate());
}
assert!(slab.can_allocate());
let a = slab.allocate(layout).unwrap();
assert_eq!(slab.outstanding(), 1);
assert!(slab.can_allocate());
let b = slab.allocate(layout).unwrap();
assert_eq!(slab.outstanding(), 2);
assert!(!slab.can_allocate());
slab.allocate(layout).unwrap_err();
assert_eq!(slab.outstanding(), 2);
unsafe {
slab.deallocate(b.cast());
assert_eq!(slab.outstanding(), 1);
assert!(slab.can_allocate());
slab.deallocate(a.cast());
assert_eq!(slab.outstanding(), 0);
assert!(slab.can_allocate());
}
}
}