use crate::*;
use core::sync::atomic::{AtomicU64, Ordering};
pub(crate) trait Bitfield {
fn initialize(&mut self, for_size: usize, capacity: usize);
fn first_fit(
&self,
base_addr: usize,
layout: Layout,
page_size: usize,
) -> Option<(usize, usize)>;
fn is_allocated(&self, idx: usize) -> bool;
fn set_bit(&self, idx: usize);
fn clear_bit(&self, idx: usize);
fn is_full(&self) -> bool;
fn all_free(&self, relevant_bits: usize) -> bool;
}
impl Bitfield for [AtomicU64] {
fn initialize(&mut self, for_size: usize, capacity: usize) {
for bitmap in self.iter_mut() {
*bitmap = AtomicU64::new(u64::max_value());
}
let relevant_bits = core::cmp::min(capacity / for_size, self.len() * 64);
for idx in 0..relevant_bits {
self.clear_bit(idx);
}
}
#[inline(always)]
fn first_fit(
&self,
base_addr: usize,
layout: Layout,
page_size: usize,
) -> Option<(usize, usize)> {
for (base_idx, b) in self.iter().enumerate() {
let bitval = b.load(Ordering::Relaxed);
if bitval == u64::max_value() {
continue;
} else {
let negated = !bitval;
let first_free = negated.trailing_zeros() as usize;
let idx: usize = base_idx * 64 + first_free;
let offset = idx * layout.size();
let offset_inside_data_area =
offset <= (page_size - OBJECT_PAGE_METADATA_OVERHEAD - layout.size());
if !offset_inside_data_area {
return None;
}
let addr: usize = base_addr + offset;
let alignment_ok = addr % layout.align() == 0;
let block_is_free = bitval & (1 << first_free) == 0;
if alignment_ok && block_is_free {
return Some((idx, addr));
}
}
}
None
}
#[inline(always)]
fn is_allocated(&self, idx: usize) -> bool {
let base_idx = idx / 64;
let bit_idx = idx % 64;
(self[base_idx].load(Ordering::Relaxed) & (1 << bit_idx)) > 0
}
#[inline(always)]
fn set_bit(&self, idx: usize) {
let base_idx = idx / 64;
let bit_idx = idx % 64;
self[base_idx].fetch_or(1 << bit_idx, Ordering::Relaxed);
}
#[inline(always)]
fn clear_bit(&self, idx: usize) {
let base_idx = idx / 64;
let bit_idx = idx % 64;
self[base_idx].fetch_and(!(1 << bit_idx), Ordering::Relaxed);
}
#[inline(always)]
fn is_full(&self) -> bool {
self.iter()
.filter(|&x| x.load(Ordering::Relaxed) != u64::max_value())
.count()
== 0
}
#[inline(always)]
fn all_free(&self, relevant_bits: usize) -> bool {
for (idx, bitmap) in self.iter().enumerate() {
let checking_bit_range = (idx * 64, (idx + 1) * 64);
if relevant_bits >= checking_bit_range.0 && relevant_bits < checking_bit_range.1 {
let bits_that_should_be_free = relevant_bits - checking_bit_range.0;
let free_mask = (1 << bits_that_should_be_free) - 1;
return (free_mask & bitmap.load(Ordering::Relaxed)) == 0;
}
if bitmap.load(Ordering::Relaxed) == 0 {
continue;
} else {
return false;
}
}
true
}
}
pub trait AllocablePage {
const SIZE: usize;
fn bitfield(&self) -> &[AtomicU64; 8];
fn bitfield_mut(&mut self) -> &mut [AtomicU64; 8];
fn prev(&mut self) -> &mut Rawlink<Self>
where
Self: core::marker::Sized;
fn next(&mut self) -> &mut Rawlink<Self>
where
Self: core::marker::Sized;
fn first_fit(&self, layout: Layout) -> Option<(usize, usize)> {
let base_addr = (self as *const Self as *const u8) as usize;
self.bitfield().first_fit(base_addr, layout, Self::SIZE)
}
fn allocate(&mut self, layout: Layout) -> *mut u8 {
match self.first_fit(layout) {
Some((idx, addr)) => {
self.bitfield().set_bit(idx);
addr as *mut u8
}
None => ptr::null_mut(),
}
}
fn is_full(&self) -> bool {
self.bitfield().is_full()
}
fn is_empty(&self, relevant_bits: usize) -> bool {
self.bitfield().all_free(relevant_bits)
}
fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) -> Result<(), AllocationError> {
trace!(
"AllocablePage deallocating ptr = {:p} with {:?}",
ptr,
layout
);
let page_offset = (ptr.as_ptr() as usize) & (Self::SIZE - 1);
assert!(page_offset % layout.size() == 0);
let idx = page_offset / layout.size();
assert!(
self.bitfield().is_allocated(idx),
"{:p} not marked allocated?",
ptr
);
self.bitfield().clear_bit(idx);
Ok(())
}
}
#[repr(C)]
pub struct LargeObjectPage<'a> {
#[allow(dead_code)]
data: [u8; LARGE_OBJECT_PAGE_SIZE - OBJECT_PAGE_METADATA_OVERHEAD],
next: Rawlink<LargeObjectPage<'a>>,
prev: Rawlink<LargeObjectPage<'a>>,
pub(crate) bitfield: [AtomicU64; 8],
}
unsafe impl<'a> Send for LargeObjectPage<'a> {}
unsafe impl<'a> Sync for LargeObjectPage<'a> {}
impl<'a> AllocablePage for LargeObjectPage<'a> {
const SIZE: usize = LARGE_OBJECT_PAGE_SIZE;
fn bitfield(&self) -> &[AtomicU64; 8] {
&self.bitfield
}
fn bitfield_mut(&mut self) -> &mut [AtomicU64; 8] {
&mut self.bitfield
}
fn prev(&mut self) -> &mut Rawlink<Self> {
&mut self.prev
}
fn next(&mut self) -> &mut Rawlink<Self> {
&mut self.next
}
}
impl<'a> Default for LargeObjectPage<'a> {
fn default() -> LargeObjectPage<'a> {
unsafe { mem::MaybeUninit::zeroed().assume_init() }
}
}
impl<'a> fmt::Debug for LargeObjectPage<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "LargeObjectPage")
}
}
#[repr(C)]
pub struct ObjectPage<'a> {
#[allow(dead_code)]
data: [u8; OBJECT_PAGE_SIZE - OBJECT_PAGE_METADATA_OVERHEAD],
next: Rawlink<ObjectPage<'a>>,
prev: Rawlink<ObjectPage<'a>>,
pub(crate) bitfield: [AtomicU64; 8],
}
unsafe impl<'a> Send for ObjectPage<'a> {}
unsafe impl<'a> Sync for ObjectPage<'a> {}
impl<'a> AllocablePage for ObjectPage<'a> {
const SIZE: usize = OBJECT_PAGE_SIZE;
fn bitfield(&self) -> &[AtomicU64; 8] {
&self.bitfield
}
fn bitfield_mut(&mut self) -> &mut [AtomicU64; 8] {
&mut self.bitfield
}
fn prev(&mut self) -> &mut Rawlink<Self> {
&mut self.prev
}
fn next(&mut self) -> &mut Rawlink<Self> {
&mut self.next
}
}
impl<'a> Default for ObjectPage<'a> {
fn default() -> ObjectPage<'a> {
unsafe { mem::MaybeUninit::zeroed().assume_init() }
}
}
impl<'a> fmt::Debug for ObjectPage<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ObjectPage")
}
}
pub(crate) struct PageList<'a, T: AllocablePage> {
pub(crate) head: Option<&'a mut T>,
pub(crate) elements: usize,
}
impl<'a, T: AllocablePage> PageList<'a, T> {
#[cfg(feature = "unstable")]
pub(crate) const fn new() -> PageList<'a, T> {
PageList {
head: None,
elements: 0,
}
}
#[cfg(not(feature = "unstable"))]
pub(crate) fn new() -> PageList<'a, T> {
PageList {
head: None,
elements: 0,
}
}
pub(crate) fn iter_mut<'b: 'a>(&mut self) -> ObjectPageIterMut<'b, T> {
let m = match self.head {
None => Rawlink::none(),
Some(ref mut m) => Rawlink::some(*m),
};
ObjectPageIterMut {
head: m,
phantom: core::marker::PhantomData,
}
}
pub(crate) fn insert_front<'b>(&'b mut self, mut new_head: &'a mut T) {
match self.head {
None => {
*new_head.prev() = Rawlink::none();
self.head = Some(new_head);
}
Some(ref mut head) => {
*new_head.prev() = Rawlink::none();
*head.prev() = Rawlink::some(new_head);
mem::swap(head, &mut new_head);
*head.next() = Rawlink::some(new_head);
}
}
self.elements += 1;
}
pub(crate) fn remove_from_list(&mut self, slab_page: &mut T) {
unsafe {
match slab_page.prev().resolve_mut() {
None => {
self.head = slab_page.next().resolve_mut();
}
Some(prev) => {
*prev.next() = match slab_page.next().resolve_mut() {
None => Rawlink::none(),
Some(next) => Rawlink::some(next),
};
}
}
match slab_page.next().resolve_mut() {
None => (),
Some(next) => {
*next.prev() = match slab_page.prev().resolve_mut() {
None => Rawlink::none(),
Some(prev) => Rawlink::some(prev),
};
}
}
}
*slab_page.prev() = Rawlink::none();
*slab_page.next() = Rawlink::none();
self.elements -= 1;
}
pub(crate) fn pop<'b>(&'b mut self) -> Option<&'a mut T> {
match self.head {
None => None,
Some(ref mut head) => {
let head_next = head.next();
let mut new_head = unsafe { head_next.resolve_mut() };
mem::swap(&mut self.head, &mut new_head);
let _ = self.head.as_mut().map(|n| {
*n.prev() = Rawlink::none();
});
self.elements -= 1;
new_head.map(|node| {
*node.prev() = Rawlink::none();
*node.next() = Rawlink::none();
node
})
}
}
}
pub(crate) fn contains(&mut self, s: *const T) -> bool {
for slab_page in self.iter_mut() {
if core::ptr::eq(slab_page, s) {
return true;
}
}
false
}
}
pub(crate) struct ObjectPageIterMut<'a, P: AllocablePage> {
head: Rawlink<P>,
phantom: core::marker::PhantomData<&'a P>,
}
impl<'a, P: AllocablePage + 'a> Iterator for ObjectPageIterMut<'a, P> {
type Item = &'a mut P;
#[inline]
fn next(&mut self) -> Option<&'a mut P> {
unsafe {
self.head.resolve_mut().map(|next| {
self.head = match next.next().resolve_mut() {
None => Rawlink::none(),
Some(ref mut sp) => Rawlink::some(*sp),
};
next
})
}
}
}
pub struct Rawlink<T> {
p: *mut T,
}
impl<T> Default for Rawlink<T> {
fn default() -> Self {
Rawlink { p: ptr::null_mut() }
}
}
impl<T> Rawlink<T> {
pub(crate) fn none() -> Rawlink<T> {
Rawlink { p: ptr::null_mut() }
}
pub(crate) fn some(n: &mut T) -> Rawlink<T> {
Rawlink { p: n }
}
#[allow(dead_code)]
pub(crate) unsafe fn resolve<'a>(&self) -> Option<&'a T> {
self.p.as_ref()
}
pub(crate) unsafe fn resolve_mut<'a>(&mut self) -> Option<&'a mut T> {
self.p.as_mut()
}
#[allow(dead_code)]
pub(crate) fn take(&mut self) -> Rawlink<T> {
mem::replace(self, Rawlink::none())
}
}