#![cfg_attr(not(any(feature = "std", test)), no_std)]
#![deny(missing_docs)]
#![deny(clippy::all)]
#![feature(option_expect_none, option_unwrap_none, map_first_last)]
#![cfg_attr(test, feature(slice_fill, vec_into_raw_parts))]
use core::borrow::{Borrow, BorrowMut};
use core::cell::UnsafeCell;
use core::convert::{TryFrom, TryInto};
use core::marker::PhantomData;
use core::mem::MaybeUninit;
use core::ptr::NonNull;
use core::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
use core::{cmp, fmt, mem, ops, ptr, slice};
extern crate alloc;
use alloc::collections::{BTreeMap, BTreeSet};
use alloc::sync::{Arc, Weak};
#[cfg(any(test, feature = "std"))]
use parking_lot::{RwLock, RwLockUpgradableReadGuard};
#[cfg(not(any(test, feature = "std")))]
use spinning::{RwLock, RwLockUpgradableReadGuard};
pub use guard_trait::{Guard, Guardable, GuardableExclusive, GuardableShared, NoGuard};
mod private {
use core::{fmt, ops};
pub trait Sealed {}
pub trait IntegerRequirements:
Sized
+ From<u8>
+ Copy
+ Clone
+ fmt::Debug
+ fmt::Display
+ Eq
+ PartialEq<Self>
+ PartialOrd<Self>
+ Ord
+ From<u8>
+ ops::Add<Self, Output = Self>
+ ops::AddAssign
+ ops::Sub<Self, Output = Self>
+ ops::Rem<Output = Self>
+ ops::Shl<u8, Output = Self>
+ ops::Shl<u32, Output = Self>
+ ops::Shr<u8, Output = Self>
+ ops::Shr<u32, Output = Self>
+ ops::Not<Output = Self>
+ ops::BitAnd<Output = Self>
+ ops::BitAndAssign
+ ops::BitOr<Output = Self>
+ ops::BitOrAssign
+ ops::BitXor<Self, Output = Self>
{
fn zero() -> Self {
Self::from(0u8)
}
const MAX: Self;
fn trailing_zeros(self) -> u32;
fn try_into_usize(self) -> Option<usize>;
fn checked_add(self, rhs: Self) -> Option<Self>;
fn checked_sub(self, rhs: Self) -> Option<Self>;
fn checked_div(self, rhs: Self) -> Option<Self>;
fn checked_mul(self, rhs: Self) -> Option<Self>;
fn is_power_of_two(self) -> bool;
}
}
pub unsafe trait Integer: private::Sealed + private::IntegerRequirements {}
fn occ_map_ready_shift<I: Integer>() -> u32 {
let bit_count = (mem::size_of::<I>() * 8) as u32;
bit_count - 1
}
fn occ_map_used_bit<I: Integer>() -> I {
I::from(1u8) << occ_map_ready_shift::<I>()
}
fn occ_map_off_mask<I: Integer>() -> I {
!occ_map_used_bit::<I>()
}
macro_rules! impl_integer_for_primitive(
($primitive:ident) => {
impl private::IntegerRequirements for $primitive {
fn trailing_zeros(self) -> u32 {
Self::trailing_zeros(self)
}
const MAX: Self = Self::MAX;
fn try_into_usize(self) -> Option<usize> {
usize::try_from(self).ok()
}
fn checked_add(self, rhs: Self) -> Option<Self> {
Self::checked_add(self, rhs)
}
fn checked_sub(self, rhs: Self) -> Option<Self> {
Self::checked_add(self, rhs)
}
fn checked_div(self, rhs: Self) -> Option<Self> {
Self::checked_div(self, rhs)
}
fn checked_mul(self, rhs: Self) -> Option<Self> {
Self::checked_mul(self, rhs)
}
fn is_power_of_two(self) -> bool {
Self::is_power_of_two(self)
}
}
unsafe impl Integer for $primitive {}
}
);
impl_integer_for_primitive!(u16);
impl_integer_for_primitive!(u32);
impl_integer_for_primitive!(u64);
impl_integer_for_primitive!(u128);
impl_integer_for_primitive!(usize);
impl private::Sealed for u16 {}
impl private::Sealed for u32 {}
impl private::Sealed for u64 {}
impl private::Sealed for u128 {}
impl private::Sealed for usize {}
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
struct Offset<I>(I);
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
struct Size<I>(I);
impl<I: Integer> Size<I> {
fn size(&self) -> I {
self.0
}
fn from_size(size: I) -> Self {
Self(size)
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
struct FreeEntry<I> {
size: I,
offset: I,
}
impl<I: Integer> FreeEntry<I> {
fn size(&self) -> I {
self.size
}
fn offset(&self) -> I {
self.offset
}
fn log2_of_alignment(&self) -> I {
I::from(self.offset.trailing_zeros() as u8)
}
fn set_size(&mut self, size: I) {
self.size = size;
}
fn set_offset(&mut self, offset: I) {
self.offset = offset;
}
fn from_size_offset(size: I, offset: I) -> Self {
Self { size, offset }
}
}
impl<I: Integer> Ord for FreeEntry<I> {
fn cmp(&self, other: &Self) -> cmp::Ordering {
Ord::cmp(&self.size(), &other.size())
.then(Ord::cmp(
&self.log2_of_alignment(),
&other.log2_of_alignment(),
))
.then(Ord::cmp(&self.offset, &other.offset))
}
}
impl<I: Integer> PartialOrd<Self> for FreeEntry<I> {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(Ord::cmp(self, other))
}
}
#[derive(Clone, Copy, Eq, Hash, PartialEq)]
struct OccOffset<I>(I);
impl<I: Integer> fmt::Debug for OccOffset<I> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("OccOffset")
.field("offset", &self.offset())
.field("is_used", &self.is_used())
.finish()
}
}
impl<I: Integer> OccOffset<I> {
fn offset(&self) -> I {
self.0 & occ_map_off_mask::<I>()
}
fn is_used(&self) -> bool {
self.0 & occ_map_used_bit::<I>() == occ_map_used_bit::<I>()
}
fn set_offset(&mut self, offset: I) {
assert_eq!(offset & occ_map_off_mask::<I>(), offset);
self.0 &= !occ_map_off_mask::<I>();
self.0 |= offset;
}
fn set_used(&mut self, used: bool) {
self.0 &= !occ_map_used_bit::<I>();
if used {
self.0 |= occ_map_used_bit::<I>();
}
}
fn from_offset_used(offset: I, used: bool) -> Self {
let mut this = Self(I::zero());
this.set_offset(offset);
this.set_used(used);
this
}
}
impl<I: Integer> Ord for OccOffset<I> {
fn cmp(&self, other: &Self) -> cmp::Ordering {
Ord::cmp(&self.is_used(), &other.is_used()).then(Ord::cmp(&self.offset(), &other.offset()))
}
}
impl<I: Integer> PartialOrd for OccOffset<I> {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(Ord::cmp(self, other))
}
}
struct MmapInfo<I, E> {
size: Size<I>,
extra: UnsafeCell<MaybeUninit<E>>,
addr: AtomicPtr<u8>,
}
impl<I, E> fmt::Debug for MmapInfo<I, E>
where
I: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MmapInfo")
.field("size", &self.size)
.field("addr", &self.addr.load(Ordering::Relaxed))
.finish()
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
pub struct BufferPoolOptions<I> {
log2_minimum_alignment: u8,
log2_maximum_alignment: u8,
minimum_size: I,
maximum_size: I,
}
impl<I: Integer> BufferPoolOptions<I> {
pub fn new() -> Self {
Self::default()
}
pub fn with_maximum_alignment(mut self, alignment: I) -> Self {
assert!(alignment.is_power_of_two());
assert_ne!(alignment, I::zero());
let log2_align = alignment.trailing_zeros();
self.log2_maximum_alignment = log2_align.try_into().unwrap();
self.log2_minimum_alignment =
cmp::min(self.log2_minimum_alignment, self.log2_maximum_alignment);
self
}
pub fn with_no_minimum_alignment(self) -> Self {
self.with_minimum_alignment(I::from(1u8))
}
pub fn with_minimum_alignment(mut self, alignment: I) -> Self {
assert!(alignment.is_power_of_two());
assert_ne!(alignment, I::zero());
let log2_align = alignment.trailing_zeros();
self.log2_minimum_alignment = log2_align.try_into().unwrap();
self.log2_maximum_alignment =
cmp::max(self.log2_maximum_alignment, self.log2_maximum_alignment);
self
}
pub fn with_no_maximum_alignment(self) -> Self {
self.with_maximum_alignment(I::from((mem::size_of::<I>() * 8 - 1) as u8))
}
pub fn with_maximum_size(mut self, size: I) -> Self {
self.maximum_size = size;
self.minimum_size = cmp::min(self.minimum_size, self.maximum_size);
self
}
pub fn with_minimum_size(mut self, size: I) -> Self {
self.minimum_size = size;
self.maximum_size = cmp::max(self.minimum_size, self.maximum_size);
self
}
}
impl<I: Integer> Default for BufferPoolOptions<I> {
fn default() -> Self {
let log2_minimum_alignment = mem::align_of::<usize>() as u8;
let log2_maximum_alignment = (mem::size_of::<I>() * 8 - 1) as u8;
Self {
log2_minimum_alignment,
log2_maximum_alignment: cmp::max(log2_minimum_alignment, log2_maximum_alignment),
maximum_size: I::MAX,
minimum_size: I::from(1u8),
}
}
}
#[derive(Debug)]
pub struct BufferPool<I, H, E>
where
I: Integer,
H: Handle<I, E>,
E: Copy,
{
handle: Option<H>,
options: BufferPoolOptions<I>,
guarded_occ_count: AtomicUsize,
occ_map: RwLock<BTreeMap<OccOffset<I>, Size<I>>>,
free_map: RwLock<BTreeSet<FreeEntry<I>>>,
mmap_map: RwLock<BTreeMap<Offset<I>, MmapInfo<I, E>>>,
}
unsafe impl<I, H, E> Send for BufferPool<I, H, E>
where
I: Integer + Send,
H: Handle<I, E> + Send,
E: Copy + Send,
{
}
unsafe impl<I, H, E> Sync for BufferPool<I, H, E>
where
I: Integer + Sync,
H: Handle<I, E> + Sync,
E: Copy + Sync,
{
}
#[derive(Debug)]
pub enum NoHandle {}
impl<I, E> Handle<I, E> for NoHandle
where
E: Copy,
{
type Error = ::core::convert::Infallible;
fn close(&mut self, _entries: MmapEntries<I, E>) -> Result<(), Self::Error> {
unreachable!("NoHandle cannot be initialized")
}
}
pub trait AsBufferPool<I, H, E>
where
I: Integer,
H: Handle<I, E>,
E: Copy,
{
fn as_buffer_pool(&self) -> &BufferPool<I, H, E>;
}
impl<I, H, E> AsBufferPool<I, H, E> for BufferPool<I, H, E>
where
I: Integer,
H: Handle<I, E>,
E: Copy,
{
fn as_buffer_pool(&self) -> &BufferPool<I, H, E> {
self
}
}
impl<T, I, H, E> AsBufferPool<I, H, E> for T
where
I: Integer,
H: Handle<I, E>,
E: Copy,
T: AsRef<BufferPool<I, H, E>>,
{
fn as_buffer_pool(&self) -> &BufferPool<I, H, E> {
self.as_ref()
}
}
#[derive(Debug)]
pub struct BufferSlice<
'pool,
I,
H,
E,
G = NoGuard,
C = BufferPool<I, H, E>,
M: guard_trait::marker::Mode = guard_trait::marker::Exclusive,
> where
I: Integer,
H: Handle<I, E>,
E: Copy,
G: Guard,
C: AsBufferPool<I, H, E>,
{
alloc_start: I,
alloc_capacity: I,
alloc_len: I,
mmap_start: I,
mmap_size: I,
pointer: *mut u8,
extra: E,
pool: PoolRefKind<'pool, I, H, E, C>,
guard: Option<G>,
_marker: PhantomData<M>,
}
#[derive(Debug)]
enum PoolRefKind<'pool, I: Integer, H: Handle<I, E>, E: Copy, C: AsBufferPool<I, H, E>> {
Ref(&'pool BufferPool<I, H, E>),
Strong(Arc<C>),
Weak(Weak<C>),
}
impl<'pool, I, H, E, C> Clone for PoolRefKind<'pool, I, H, E, C>
where
I: Integer,
H: Handle<I, E>,
E: Copy,
C: AsBufferPool<I, H, E>,
{
fn clone(&self) -> Self {
match *self {
Self::Ref(r) => Self::Ref(r),
Self::Strong(ref arc) => Self::Strong(Arc::clone(arc)),
Self::Weak(ref weak) => Self::Weak(Weak::clone(weak)),
}
}
}
unsafe impl<'pool, I, H, E, G, C, M> Send for BufferSlice<'pool, I, H, E, G, C, M>
where
I: Integer,
H: Handle<I, E> + Send + Sync,
E: Copy + Send,
G: Guard + Send,
C: AsBufferPool<I, H, E>,
M: guard_trait::marker::Mode,
{
}
unsafe impl<'pool, I, H, E, G, C, M> Sync for BufferSlice<'pool, I, H, E, G, C, M>
where
I: Integer,
H: Send + Sync + Handle<I, E>,
E: Copy + Sync,
G: Sync + Guard,
C: AsBufferPool<I, H, E>,
M: guard_trait::marker::Mode,
{
}
impl<'pool, I, H, E, G, C, M> BufferSlice<'pool, I, H, E, G, C, M>
where
I: Integer,
H: Handle<I, E>,
E: Copy,
G: Guard,
C: AsBufferPool<I, H, E>,
M: guard_trait::marker::Mode,
{
pub fn pool_is_alive(&self) -> bool {
match self.pool {
PoolRefKind::Weak(ref w) => w.strong_count() > 0,
PoolRefKind::Ref(_) | PoolRefKind::Strong(_) => true,
}
}
pub unsafe fn as_slice_unchecked(&self) -> &[u8] {
debug_assert!(self.pool_is_alive());
debug_assert!(M::IS_ALIASABLE || !self.has_guard());
debug_assert!(self.alloc_capacity >= self.alloc_len);
slice::from_raw_parts(
self.pointer as *const u8,
self.alloc_len.try_into_usize().expect(
"the buffer pool integer type is too large to fit within the system pointer width",
),
)
}
pub fn try_as_slice(&self) -> Option<&[u8]> {
if !self.pool_is_alive() {
return None;
}
if !M::IS_ALIASABLE && self.has_guard() {
return None;
}
Some(unsafe { self.as_slice_unchecked() })
}
pub unsafe fn as_slice_mut_unchecked(&mut self) -> &mut [u8] {
debug_assert!(self.pool_is_alive());
debug_assert!(self.alloc_capacity >= self.alloc_len);
debug_assert!(!self.has_guard());
slice::from_raw_parts_mut(
self.pointer,
self.alloc_len.try_into_usize().expect(
"the buffer pool integer type is too large to fit within the system pointer width",
),
)
}
pub fn try_as_slice_mut(&mut self) -> Option<&mut [u8]> {
if !self.pool_is_alive() {
return None;
}
if !M::IS_ALIASABLE && self.has_guard() {
return None;
}
Some(unsafe { self.as_slice_mut_unchecked() })
}
pub unsafe fn unguard_unchecked(&mut self) -> Option<G> {
match self.guard.take() {
Some(g) => {
let arc;
let pool = match self.pool {
PoolRefKind::Ref(reference) => reference,
PoolRefKind::Strong(ref arc) => arc.as_buffer_pool(),
PoolRefKind::Weak(ref weak) => {
arc = weak.upgrade().expect(
"calling unguard on a weakly-owned buffer slice where the pool died",
);
arc.as_buffer_pool()
}
};
let prev = pool.guarded_occ_count.fetch_sub(1, Ordering::Release);
assert_ne!(prev, 0, "someone forgot to increment the guarded_occ_count, now I'm getting a subtraction overflow!");
Some(g)
}
None => None,
}
}
pub fn guard(&mut self, guard: G) -> Result<(), WithGuardError<G>> {
if self.guard.is_some() {
return Err(WithGuardError { this: guard });
}
self.guard = Some(guard);
let arc;
let pool = match self.pool {
PoolRefKind::Ref(pool) => pool,
PoolRefKind::Strong(ref arc) => arc.as_buffer_pool(),
PoolRefKind::Weak(ref pool_weak) => {
arc = pool_weak.upgrade().expect(
"trying to guard weakly-owned buffer slice which pool has been dropped",
);
arc.as_buffer_pool()
}
};
pool.guarded_occ_count.fetch_add(1, Ordering::Release);
Ok(())
}
pub fn with_guard<OtherGuard: Guard>(
self,
other: OtherGuard,
) -> WithGuardResult<'pool, I, H, E, G, OtherGuard, C, M> {
if self.has_guard() {
return Err(WithGuardError { this: self });
}
let alloc_start = self.alloc_start;
let alloc_capacity = self.alloc_capacity;
let alloc_len = self.alloc_len;
let mmap_start = self.mmap_start;
let mmap_size = self.mmap_size;
let pointer = self.pointer;
let pool = self.pool.clone();
let extra = self.extra;
mem::forget(self);
let mut slice = BufferSlice {
alloc_start,
alloc_capacity,
alloc_len,
mmap_start,
mmap_size,
pointer,
pool,
extra,
guard: None,
_marker: PhantomData::<M>,
};
slice.guard(other).unwrap();
Ok(slice)
}
fn reclaim_inner(&mut self) -> bool {
let arc;
let pool = match self.pool {
PoolRefKind::Ref(reference) => reference,
PoolRefKind::Strong(ref aliased_arc) => {
arc = Arc::clone(aliased_arc);
arc.as_buffer_pool()
}
PoolRefKind::Weak(ref weak) => {
arc = match weak.upgrade() {
Some(a) => a,
None => return true,
};
arc.as_buffer_pool()
}
};
let (was_guarded, can_be_reclaimed) = match self.guard {
Some(ref mut guard) => (true, guard.try_release()),
None => (false, true),
};
if can_be_reclaimed {
unsafe {
if was_guarded {
self.unguard_unchecked().unwrap();
}
pool.reclaim_slice_inner(&*self);
}
true
} else {
false
}
}
pub fn reclaim(mut self) -> Result<(), ReclaimError<Self>> {
match self.reclaim_inner() {
true => {
mem::forget(self);
Ok(())
}
false => Err(ReclaimError { this: self }),
}
}
pub fn offset(&self) -> I {
self.alloc_start
}
pub fn len(&self) -> I {
self.alloc_len
}
pub fn capacity(&self) -> I {
self.alloc_capacity
}
pub fn is_empty(&self) -> bool {
self.len() == I::zero()
}
pub fn mmap_offset(&self) -> I {
self.mmap_start
}
pub fn mmap_size(&self) -> I {
self.mmap_size
}
pub fn extra(&self) -> E {
self.extra
}
pub fn has_guard(&self) -> bool {
self.guard.is_some()
}
}
impl<'pool, I, H, E, G, C> BufferSlice<'pool, I, H, E, G, C, guard_trait::marker::Shared>
where
I: Integer,
H: Handle<I, E>,
E: Copy,
G: Guard,
C: AsBufferPool<I, H, E>,
{
#[inline]
pub fn as_slice(&self) -> &[u8] {
assert!(self.pool_is_alive());
unsafe { self.as_slice_unchecked() }
}
}
impl<'pool, I, H, E, C> BufferSlice<'pool, I, H, E, NoGuard, C, guard_trait::marker::Exclusive>
where
I: Integer,
H: Handle<I, E>,
E: Copy,
C: AsBufferPool<I, H, E>,
{
#[inline]
pub fn as_slice(&self) -> &[u8] {
assert!(self.pool_is_alive());
unsafe { self.as_slice_unchecked() }
}
#[inline]
pub fn as_slice_mut(&mut self) -> &mut [u8] {
assert!(self.pool_is_alive());
unsafe { self.as_slice_mut_unchecked() }
}
}
impl<'pool, I, H, E, G, C, M> Drop for BufferSlice<'pool, I, H, E, G, C, M>
where
I: Integer,
G: Guard,
E: Copy,
H: Handle<I, E>,
C: AsBufferPool<I, H, E>,
M: guard_trait::marker::Mode,
{
fn drop(&mut self) {
match self.reclaim_inner() {
true => (),
false => {
log::debug!("Trying to drop a BufferSlice that is in use, leaking memory",);
}
}
}
}
impl<'pool, I, H, E, G, C> ops::Deref
for BufferSlice<'pool, I, H, E, G, C, guard_trait::marker::Shared>
where
I: Integer,
H: Handle<I, E>,
E: Copy,
G: Guard,
C: AsBufferPool<I, H, E>,
{
type Target = [u8];
fn deref(&self) -> &Self::Target {
self.as_slice()
}
}
impl<'pool, I, H, E, C> ops::Deref
for BufferSlice<'pool, I, H, E, NoGuard, C, guard_trait::marker::Exclusive>
where
I: Integer,
H: Handle<I, E>,
E: Copy,
C: AsBufferPool<I, H, E>,
{
type Target = [u8];
fn deref(&self) -> &Self::Target {
self.as_slice()
}
}
impl<'pool, I, H, E, C> ops::DerefMut
for BufferSlice<'pool, I, H, E, NoGuard, C, guard_trait::marker::Exclusive>
where
I: Integer,
H: Handle<I, E>,
E: Copy,
C: AsBufferPool<I, H, E>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
self.as_slice_mut()
}
}
impl<'pool, I, H, E, G, C> Borrow<[u8]>
for BufferSlice<'pool, I, H, E, G, C, guard_trait::marker::Shared>
where
I: Integer,
H: Handle<I, E>,
E: Copy,
G: Guard,
C: AsBufferPool<I, H, E>,
{
fn borrow(&self) -> &[u8] {
self.as_slice()
}
}
impl<'pool, I, H, E, C> Borrow<[u8]>
for BufferSlice<'pool, I, H, E, NoGuard, C, guard_trait::marker::Exclusive>
where
I: Integer,
H: Handle<I, E>,
E: Copy,
C: AsBufferPool<I, H, E>,
{
fn borrow(&self) -> &[u8] {
self.as_slice()
}
}
impl<'pool, I, H, E, C> BorrowMut<[u8]> for BufferSlice<'pool, I, H, E, NoGuard, C>
where
I: Integer,
H: Handle<I, E>,
E: Copy,
C: AsBufferPool<I, H, E>,
{
fn borrow_mut(&mut self) -> &mut [u8] {
self.as_slice_mut()
}
}
impl<'pool, I, H, E, G, C> AsRef<[u8]>
for BufferSlice<'pool, I, H, E, G, C, guard_trait::marker::Shared>
where
I: Integer,
H: Handle<I, E>,
E: Copy,
G: Guard,
C: AsBufferPool<I, H, E>,
{
fn as_ref(&self) -> &[u8] {
self.as_slice()
}
}
impl<'pool, I, H, E, C> AsRef<[u8]>
for BufferSlice<'pool, I, H, E, NoGuard, C, guard_trait::marker::Exclusive>
where
I: Integer,
H: Handle<I, E>,
E: Copy,
C: AsBufferPool<I, H, E>,
{
fn as_ref(&self) -> &[u8] {
self.as_slice()
}
}
impl<'pool, I, H, E, C> AsMut<[u8]>
for BufferSlice<'pool, I, H, E, NoGuard, C, guard_trait::marker::Exclusive>
where
I: Integer,
H: Handle<I, E>,
E: Copy,
C: AsBufferPool<I, H, E>,
{
fn as_mut(&mut self) -> &mut [u8] {
self.as_slice_mut()
}
}
pub struct ExpansionHandle<'pool, I, H, E>
where
I: Integer,
H: Handle<I, E>,
E: Copy,
{
offset: I,
len: I,
pool: &'pool BufferPool<I, H, E>,
}
impl<'pool, I, H, E> ExpansionHandle<'pool, I, H, E>
where
I: Integer,
H: Handle<I, E>,
E: Copy,
{
pub fn len(&self) -> I {
self.len
}
pub fn is_empty(&self) -> bool {
self.len == I::zero()
}
pub fn offset(&self) -> I {
self.offset
}
pub unsafe fn initialize(self, pointer: NonNull<u8>, extra: E) {
{
let mmap_read_guard = self.pool.mmap_map.read();
let key = Offset(self.offset());
let value = mmap_read_guard
.get(&key)
.expect("expected ExpansionHandle to already have a pending mmap");
ptr::write(value.extra.get(), MaybeUninit::new(extra));
value.addr.store(pointer.as_ptr(), Ordering::Release);
}
let mut occ_write_guard = self.pool.occ_map.write();
let mut free_write_guard = self.pool.free_map.write();
occ_write_guard
.insert(
OccOffset::from_offset_used(self.offset(), false),
Size(self.len()),
)
.expect_none("expected newly-acquired slice not to conflict with any existing one");
let updated =
free_write_guard.insert(FreeEntry::from_size_offset(self.len(), self.offset()));
assert!(updated);
mem::forget(self);
}
}
impl<'pool, I, H, E> Drop for ExpansionHandle<'pool, I, H, E>
where
I: Integer,
H: Handle<I, E>,
E: Copy,
{
fn drop(&mut self) {
let key = Offset(self.offset());
let mut mmap_write_guard = self.pool.mmap_map.write();
let _ = mmap_write_guard.remove(&key);
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
pub enum AllocationStrategy<I> {
Optimal,
Greedy,
Fixed(I),
}
impl<I> Default for AllocationStrategy<I> {
fn default() -> Self {
Self::Optimal
}
}
type AcquireSliceRet<I, E> = (ops::Range<I>, I, ops::Range<I>, *mut u8, E);
pub type CloseResult<I, H, E> =
Result<(Option<H>, MmapEntries<I, E>), CloseError<BufferPool<I, H, E>>>;
pub type WithGuardResult<'pool, I, H, E, OldGuard, NewGuard, C, M> = Result<
BufferSlice<'pool, I, H, E, NewGuard, C, M>,
WithGuardError<BufferSlice<'pool, I, H, E, OldGuard, C, M>>,
>;
impl<I, H, E> BufferPool<I, H, E>
where
I: Integer,
H: Handle<I, E>,
E: Copy,
{
#[must_use = "calling begin_expand alone only reserves a range; you need to allocate some actually memory, using the handle"]
pub fn begin_expand(
&self,
additional: I,
) -> Result<ExpansionHandle<'_, I, H, E>, BeginExpandError> {
assert_ne!(additional, I::zero());
let new_offset = {
let mmap_intent_guard = self.mmap_map.upgradable_read();
let greatest_offset = mmap_intent_guard.last_key_value().map_or(
Result::<I, BeginExpandError>::Ok(I::zero()),
|(last_key, last_value)| {
let start = last_key
.0
.checked_add(last_value.size.0)
.ok_or(BeginExpandError)?;
let _end = start.checked_add(additional).ok_or(BeginExpandError)?;
Ok(start)
},
);
let new_offset = match greatest_offset {
Ok(o) => o,
Err(_) => {
mmap_intent_guard
.iter()
.find_map(|(k, v)| {
let start = k.0.checked_add(v.size.0)?;
let _end = start.checked_add(additional)?;
Some(start)
})
.ok_or(BeginExpandError)?
}
};
let mut mmap_write_guard = RwLockUpgradableReadGuard::upgrade(mmap_intent_guard);
let new_info = MmapInfo {
addr: AtomicPtr::new(ptr::null_mut()),
extra: UnsafeCell::new(MaybeUninit::uninit()),
size: Size::from_size(additional),
};
let prev = mmap_write_guard.insert(Offset(new_offset), new_info);
assert!(prev.is_none());
new_offset
};
Ok(ExpansionHandle {
offset: new_offset,
len: additional,
pool: self,
})
}
pub fn handle(&self) -> Option<&H> {
self.handle.as_ref()
}
pub fn try_close(mut self) -> CloseResult<I, H, E> {
if self.guarded_occ_count.load(Ordering::Acquire) > 0 {
return Err(CloseError { this: self });
}
let handle = self.handle.take();
let mmap_map = mem::replace(self.mmap_map.get_mut(), BTreeMap::new());
let entries = MmapEntries {
inner: mmap_map.into_iter(),
};
Ok((handle, entries))
}
pub fn new(handle: Option<H>) -> Self {
Self {
occ_map: RwLock::new(BTreeMap::new()),
mmap_map: RwLock::new(BTreeMap::new()),
free_map: RwLock::new(BTreeSet::new()),
guarded_occ_count: AtomicUsize::new(0),
handle,
options: BufferPoolOptions::default(),
}
}
pub fn with_options(mut self, options: BufferPoolOptions<I>) -> Self {
self.options = options;
self
}
fn acquire_slice(
&self,
len: I,
alignment: I,
strategy: AllocationStrategy<I>,
) -> Option<AcquireSliceRet<I, E>> {
assert_ne!(len, I::zero());
if len > self.options.maximum_size {
return None;
}
if alignment > (I::from(1u8) << self.options.log2_maximum_alignment) {
return None;
}
let alignment = cmp::max(
alignment,
I::from(1u8) << self.options.log2_minimum_alignment,
);
let occ_intent_guard = self.occ_map.upgradable_read();
let free_intent_guard = self.free_map.upgradable_read();
fn align<I: Integer>(off: I, alignment: I) -> Option<I> {
assert_ne!(alignment, I::from(0u8));
assert!(alignment.is_power_of_two());
if alignment == I::from(1u8) {
return Some(off);
}
off.checked_add(alignment - I::from(1u8))?
.checked_div(alignment)?
.checked_mul(alignment)
}
let (occ_k, occ_v, free_e, advancement) = if let AllocationStrategy::Fixed(at) = strategy {
assert_eq!(at % alignment, I::zero());
let (base_occ_k, base_occ_v) = occ_intent_guard
.range(..=OccOffset::from_offset_used(at, false))
.next_back()?;
if base_occ_k.is_used() {
return None;
}
if base_occ_k.offset() + base_occ_v.size() < at {
return None;
}
let advancement = at.checked_sub(base_occ_k.offset()).expect(
"expected the preceding free entry to actually come before the fixed offset",
);
let available_size = base_occ_v.size().checked_sub(advancement)?;
if available_size < len {
return None;
}
let free_e = free_intent_guard
.get(&FreeEntry::from_size_offset(
base_occ_v.0,
base_occ_k.offset(),
))
.expect("expected occ map to contain a corresponding entry for the free entry");
(base_occ_k, base_occ_v, *free_e, advancement)
} else {
fn find_o_n<I: Integer>(
free_map: &BTreeSet<FreeEntry<I>>,
len: I,
alignment: I,
) -> Option<&FreeEntry<I>> {
free_map.iter().find(|e| {
e.size() >= len
&& align(e.offset(), alignment)
.map_or(false, |aligned| e.size() - (aligned - e.offset()) >= len)
})
}
fn find_o_logn<I: Integer>(
free_map: &BTreeSet<FreeEntry<I>>,
len: I,
alignment: I,
) -> Option<&FreeEntry<I>> {
let item = free_map
.range(FreeEntry::from_size_offset(len, alignment)..)
.next()?;
let check_if_misalignment_would_work =
|aligned| item.size() - (aligned - item.offset()) >= len;
if item.size() >= len
&& align(item.offset(), alignment)
.map_or(false, check_if_misalignment_would_work)
{
Some(item)
} else {
find_o_n(free_map, len, alignment)
}
}
let free_e = if let AllocationStrategy::Greedy = strategy {
find_o_logn(&*free_intent_guard, len, alignment)?
} else if let AllocationStrategy::Optimal = strategy {
find_o_n(&*free_intent_guard, len, alignment)?
} else {
unreachable!()
};
let (occ_k, occ_v) = occ_intent_guard
.get_key_value(&OccOffset::from_offset_used(free_e.offset(), false))
.expect("expected free map to contain a corresponding entry for the occ entry");
(occ_k, occ_v, *free_e, I::zero())
};
assert!(!occ_k.is_used());
assert_eq!(occ_k.offset(), free_e.offset());
assert_eq!(
I::from(u8::try_from(occ_k.offset().trailing_zeros()).unwrap()),
free_e.log2_of_alignment()
);
assert_eq!(occ_v.0, free_e.size());
let original_off = free_e.offset();
let advanced_off = original_off.checked_add(advancement)?;
let aligned_off =
align(advanced_off, alignment).expect("bypassed alignment check in iterator");
let misalignment = aligned_off - advanced_off;
let new_offset = aligned_off;
assert!(
new_offset < free_e.offset() + free_e.size(),
"assertion failed: {} < {} + {}",
new_offset,
free_e.offset(),
free_e.size()
);
let total_advancement = misalignment.checked_add(advancement)?;
let new_offset = {
let mut occ_write_guard = RwLockUpgradableReadGuard::upgrade(occ_intent_guard);
let mut free_write_guard = RwLockUpgradableReadGuard::upgrade(free_intent_guard);
let had_prev = free_write_guard.remove(&free_e);
assert!(had_prev);
let prev = occ_write_guard.remove(&OccOffset::from_offset_used(original_off, false));
assert!(prev.is_some());
if free_e.size() - total_advancement > len {
let mut upper_free_e = free_e;
upper_free_e.set_size(free_e.size() - len - total_advancement);
upper_free_e.set_offset(free_e.offset() + len + total_advancement);
let updated = free_write_guard.insert(upper_free_e);
assert!(updated);
let prev = occ_write_guard.insert(
OccOffset::from_offset_used(upper_free_e.offset(), false),
Size(upper_free_e.size()),
);
assert_eq!(prev, None);
}
if total_advancement > I::zero() {
let new_free_e = FreeEntry::from_size_offset(total_advancement, original_off);
let updated = free_write_guard.insert(new_free_e);
assert!(
updated,
"somehow the small alignment region was already mapped"
);
let prev = occ_write_guard.insert(
OccOffset::from_offset_used(original_off, false),
Size(new_free_e.size()),
);
assert!(prev.is_none());
}
let new_offset = aligned_off;
let new_occ_k = OccOffset::from_offset_used(new_offset, true);
let new_occ_v = Size(len);
occ_write_guard
.insert(new_occ_k, new_occ_v)
.expect_none("expected new entry not to already be inserted");
new_offset
};
let (mmap_range, pointer, extra) = {
let mmap_read_guard = self.mmap_map.read();
let (mmap_k, mmap_v) = mmap_read_guard
.range(..=Offset(new_offset))
.next_back()
.expect(
"expected all free entries in the occ map to have a corresponding mmap entry",
);
let mmap_start = mmap_k.0;
let mmap_size = mmap_v.size;
let mmap_end = mmap_start
.checked_add(mmap_size.0)
.expect("expected mmap end not to overflow u32::MAX");
assert!(mmap_start <= new_offset);
assert!(mmap_end >= new_offset + len);
let (extra, pointer) = unsafe {
assert_ne!(
mmap_v.size.0,
I::zero(),
"expected found slice to not have size zero"
);
let base_pointer: *mut u8 = mmap_v.addr.load(Ordering::Acquire);
assert!(!base_pointer.is_null());
let extra = ptr::read(mmap_v.extra.get()).assume_init();
let pointer = base_pointer.add((new_offset - mmap_start).try_into_usize().unwrap())
as *mut u8;
(extra, pointer)
};
(mmap_start..mmap_end, pointer, extra)
};
let offset = aligned_off;
let actual_len = len;
Some((offset..offset + actual_len, len, mmap_range, pointer, extra))
}
fn construct_buffer_slice<G: Guard, C: AsBufferPool<I, H, E>>(
alloc_range: ops::Range<I>,
alloc_len: I,
mmap_range: ops::Range<I>,
pointer: *mut u8,
extra: E,
pool: PoolRefKind<I, H, E, C>,
) -> BufferSlice<'_, I, H, E, G, C> {
debug_assert!(alloc_len <= alloc_range.end - alloc_range.start);
BufferSlice {
alloc_start: alloc_range.start,
alloc_capacity: alloc_range.end - alloc_range.start,
alloc_len,
mmap_start: mmap_range.start,
mmap_size: mmap_range.end - mmap_range.start,
pointer,
pool,
extra,
guard: None,
_marker: PhantomData,
}
}
pub fn acquire_borrowed_slice<G>(
&self,
len: I,
alignment: I,
strategy: AllocationStrategy<I>,
) -> Option<BufferSlice<'_, I, H, E, G>>
where
G: Guard,
{
let (alloc_range, alloc_len, mmap_range, pointer, extra) =
self.acquire_slice(len, alignment, strategy)?;
Some(Self::construct_buffer_slice(
alloc_range,
alloc_len,
mmap_range,
pointer,
extra,
PoolRefKind::Ref(self),
))
}
pub fn acquire_weak_slice<G, C>(
this: &Arc<C>,
len: I,
alignment: I,
strategy: AllocationStrategy<I>,
) -> Option<BufferSlice<'static, I, H, E, G, C>>
where
G: Guard,
C: AsBufferPool<I, H, E>,
{
let (alloc_range, alloc_len, mmap_range, pointer, extra) = this
.as_buffer_pool()
.acquire_slice(len, alignment, strategy)?;
Some(Self::construct_buffer_slice(
alloc_range,
alloc_len,
mmap_range,
pointer,
extra,
PoolRefKind::Weak(Arc::downgrade(this)),
))
}
pub fn acquire_strong_slice<G, C>(
this: &Arc<C>,
len: I,
alignment: I,
strategy: AllocationStrategy<I>,
) -> Option<BufferSlice<'static, I, H, E, G, C>>
where
G: Guard,
C: AsBufferPool<I, H, E>,
{
let (alloc_range, alloc_len, mmap_range, pointer, extra) = this
.as_buffer_pool()
.acquire_slice(len, alignment, strategy)?;
Some(Self::construct_buffer_slice(
alloc_range,
alloc_len,
mmap_range,
pointer,
extra,
PoolRefKind::Strong(Arc::clone(this)),
))
}
fn remove_free_offset_below(
free_map: &mut BTreeSet<FreeEntry<I>>,
occ_map: &mut BTreeMap<OccOffset<I>, Size<I>>,
mmap_map: &BTreeMap<Offset<I>, MmapInfo<I, E>>,
start: &mut I,
size: &mut I,
) -> bool {
let previous_start = *start;
let (cur_mmap_k, cur_mmap_v) = mmap_map
.range(..=Offset(previous_start))
.next_back()
.unwrap();
assert!(cur_mmap_k.0 + cur_mmap_v.size.0 > previous_start);
assert!(cur_mmap_k.0 <= previous_start);
let partial_k = OccOffset::from_offset_used(previous_start, false);
let range = ..partial_k;
if let Some((lower_occ_k, lower_occ_v)) = occ_map.range(range).next_back() {
let lower_occ_k = *lower_occ_k;
let lower_occ_v = *lower_occ_v;
assert!(!lower_occ_k.is_used());
if lower_occ_k.offset() + lower_occ_v.0 != previous_start {
return false;
}
let (mmap_k, _) = mmap_map
.range(..=Offset(lower_occ_k.offset()))
.next_back()
.unwrap();
if mmap_k != cur_mmap_k {
return false;
}
let lower_occ_v_again = occ_map
.remove(&lower_occ_k)
.expect("expected previously found key to exist in the b-tree map");
assert_eq!(lower_occ_v_again, lower_occ_v);
let had_prev = free_map.remove(&FreeEntry::from_size_offset(
lower_occ_v.size(),
lower_occ_k.offset(),
));
assert!(had_prev);
*start = lower_occ_k.offset();
*size += lower_occ_v.size();
true
} else {
false
}
}
fn remove_free_offset_above(
free_map: &mut BTreeSet<FreeEntry<I>>,
occ_map: &mut BTreeMap<OccOffset<I>, Size<I>>,
mmap_map: &BTreeMap<Offset<I>, MmapInfo<I, E>>,
start: &mut I,
size: &mut I,
) -> bool {
assert_ne!(*size, I::zero());
let end = *start + *size;
let higher_occ_k = OccOffset::from_offset_used(end, false);
let (cur_mmap_k, cur_mmap_v) = mmap_map.range(..=Offset(end)).next_back().unwrap();
assert!(cur_mmap_k.0 + cur_mmap_v.size.0 >= end);
assert!(cur_mmap_k.0 <= end);
if cur_mmap_k.0 + cur_mmap_v.size.0 == end {
return false;
}
if let Some(higher_occ_v) = occ_map.remove(&higher_occ_k) {
let had_prev = free_map.remove(&FreeEntry::from_size_offset(
higher_occ_v.size(),
higher_occ_k.offset(),
));
assert!(had_prev);
*size += higher_occ_v.size();
true
} else {
false
}
}
unsafe fn reclaim_slice_inner<G, C, M>(&self, slice: &BufferSlice<'_, I, H, E, G, C, M>)
where
C: AsBufferPool<I, H, E>,
G: Guard,
M: guard_trait::marker::Mode,
{
let mut occ_write_guard = self.occ_map.write();
let mut free_write_guard = self.free_map.write();
let mut start = slice.alloc_start;
let mut size = slice.alloc_capacity;
let occ_v = occ_write_guard
.remove(&OccOffset::from_offset_used(start, true))
.expect("expected occ map to contain buffer slice when reclaiming it");
let mmap_guard = self.mmap_map.read();
assert_eq!(occ_v.size(), slice.alloc_capacity);
while Self::remove_free_offset_below(
&mut *free_write_guard,
&mut *occ_write_guard,
&*mmap_guard,
&mut start,
&mut size,
) {}
while Self::remove_free_offset_above(
&mut *free_write_guard,
&mut *occ_write_guard,
&*mmap_guard,
&mut start,
&mut size,
) {}
let new_free_e = FreeEntry::from_size_offset(size, start);
let updated = free_write_guard.insert(new_free_e);
assert!(
updated,
"expected newly resized free range not to start existing again before insertion",
);
let new_occ_k = OccOffset::from_offset_used(start, false);
let new_occ_v = Size::from_size(size);
occ_write_guard.insert(new_occ_k, new_occ_v).unwrap_none();
}
fn drop_impl(&mut self) {
let count = self.guarded_occ_count.load(Ordering::Acquire);
if count == 0 {
if let Some(h) = self.handle.take() {
let entries = mem::replace(self.mmap_map.get_mut(), BTreeMap::new());
let _ = h.close_all(MmapEntries {
inner: entries.into_iter(),
});
}
} else {
log::warn!("Leaking parts of the buffer pool, since there were {} slices that were guarded by futures that haven't been completed", count);
}
}
pub fn active_guard_count(&self) -> usize {
self.guarded_occ_count.load(Ordering::Relaxed)
}
}
impl<I: Integer, H: Handle<I, E>, E: Copy> Drop for BufferPool<I, H, E> {
fn drop(&mut self) {
self.drop_impl();
}
}
#[derive(Debug)]
pub struct MmapEntries<I, E>
where
E: Copy,
{
inner: ::alloc::collections::btree_map::IntoIter<Offset<I>, MmapInfo<I, E>>,
}
#[derive(Debug)]
pub struct MmapEntry<I, E> {
pub pool_offset: I,
pub size: I,
pub pointer: NonNull<u8>,
pub extra: E,
}
impl<I, E> Iterator for MmapEntries<I, E>
where
E: Copy,
{
type Item = MmapEntry<I, E>;
fn next(&mut self) -> Option<Self::Item> {
'entries: loop {
let (offset, info) = self.inner.next()?;
let pointer = info.addr.into_inner();
let pointer = match NonNull::new(pointer) {
Some(p) => p,
None => continue 'entries,
};
return Some(MmapEntry {
pool_offset: offset.0,
size: info.size.0,
pointer,
extra: unsafe { info.extra.into_inner().assume_init() },
});
}
}
}
pub trait Handle<I, E>
where
E: Copy,
Self: Sized,
{
type Error;
fn close(&mut self, mmap_entries: MmapEntries<I, E>) -> Result<(), Self::Error>;
fn close_all(mut self, mmap_entries: MmapEntries<I, E>) -> Result<(), Self::Error> {
self.close(mmap_entries)
}
}
pub struct WithGuardError<T> {
pub this: T,
}
impl<T> fmt::Debug for WithGuardError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("WithGuardError")
.finish()
}
}
impl<T> fmt::Display for WithGuardError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"failed to replace guard, due to another guard already existing"
)
}
}
#[cfg(any(test, feature = "std"))]
impl<T> std::error::Error for WithGuardError<T> {}
pub struct ReclaimError<T> {
pub this: T,
}
impl<T> fmt::Display for ReclaimError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "failed to reclaim buffer slice, since it was in use")
}
}
impl<T> fmt::Debug for ReclaimError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ReclaimError").finish()
}
}
#[cfg(any(test, feature = "std"))]
impl<T> std::error::Error for ReclaimError<T> {}
#[derive(Debug)]
pub struct CloseError<T> {
pub this: T,
}
impl<T> fmt::Display for CloseError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "failed to close since buffers were in use")
}
}
#[derive(Debug)]
pub struct BeginExpandError;
impl fmt::Display for BeginExpandError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"failed to expand buffer: no more buffer pool memory (arithmetic overflow)"
)
}
}
#[cfg(any(test, feature = "std"))]
impl std::error::Error for BeginExpandError {}
#[cfg(feature = "redox")]
mod libc_error_impls {
use super::*;
use syscall::error::Error;
use syscall::error::{EADDRINUSE, EEXIST, ENOMEM};
impl From<BeginExpandError> for Error {
fn from(_: BeginExpandError) -> Error {
Error::new(ENOMEM)
}
}
impl<T> From<CloseError<T>> for Error {
fn from(_: CloseError<T>) -> Error {
Error::new(EADDRINUSE)
}
}
impl<T> From<ReclaimError<T>> for Error {
fn from(_: ReclaimError<T>) -> Error {
Error::new(EADDRINUSE)
}
}
impl<T> From<WithGuardError<T>> for Error {
fn from(_: WithGuardError<T>) -> Error {
Error::new(EEXIST)
}
}
}
unsafe impl<'pool, I, E, G, H, C, M> Guardable<G, [u8]> for BufferSlice<'pool, I, H, E, G, C, M>
where
I: Integer,
H: Handle<I, E>,
E: Copy,
G: Guard,
C: AsBufferPool<I, H, E>,
M: guard_trait::marker::Mode,
{
fn try_guard(&mut self, guard: G) -> Result<(), G> {
match self.guard(guard) {
Ok(()) => Ok(()),
Err(WithGuardError { this: new_guard }) => unsafe {
let mut existing_guard = self.unguard_unchecked().expect(
"expected a BufferSlice not to contain a guard, if the guard method failed",
);
if existing_guard.try_release() {
self.guard(new_guard).expect(
"expected no guard to exist in BufferSlice, if it just was released",
);
Ok(())
} else {
self.guard(existing_guard).expect(
"expected no guard to exist in BufferSlice, if it just was released",
);
Err(new_guard)
}
},
}
}
#[inline]
fn try_get_data(&self) -> Option<&[u8]> {
self.try_as_slice()
}
}
unsafe impl<'pool, I, E, G, H, C> GuardableShared<G, [u8]>
for BufferSlice<'pool, I, H, E, G, C, guard_trait::marker::Shared>
where
I: Integer,
H: Handle<I, E>,
E: Copy,
G: Guard,
C: AsBufferPool<I, H, E>,
{
#[inline]
fn data_shared(&self) -> &[u8] {
self.as_slice()
}
}
unsafe impl<'pool, I, E, G, H, C> GuardableExclusive<G, [u8]>
for BufferSlice<'pool, I, H, E, G, C, guard_trait::marker::Exclusive>
where
I: Integer,
H: Handle<I, E>,
E: Copy,
G: Guard,
C: AsBufferPool<I, H, E>,
{
#[inline]
fn try_get_data_mut(&mut self) -> Option<&mut [u8]> {
self.try_as_slice_mut()
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::{mem, thread};
fn setup_pool(
maps: impl IntoIterator<Item = Vec<u8>>,
options: BufferPoolOptions<u32>,
) -> (BufferPool<u32, NoHandle, ()>, u32) {
let pool = BufferPool::new(None);
let mut total_size = 0;
for map in maps {
let mut slice = map.into_boxed_slice();
let ptr = slice.as_mut_ptr();
let len = slice.len();
total_size += u32::try_from(len).unwrap();
let _raw_slice = Box::into_raw(slice);
unsafe {
pool.begin_expand(u32::try_from(len).unwrap())
.unwrap()
.initialize(std::ptr::NonNull::new(ptr).unwrap(), ())
}
}
(pool.with_options(options), total_size)
}
fn setup_default_pool(options: BufferPoolOptions<u32>) -> (BufferPool<u32, NoHandle, ()>, u32) {
setup_pool(
vec![vec![0u8; 32768], vec![0u8; 4096], vec![0u8; 65536]],
options,
)
}
#[test]
fn occ_map_acquisition_single_mmap_optimal() {
occ_map_acquisition_single_mmap(AllocationStrategy::Optimal)
}
#[test]
fn occ_map_acquisition_single_mmap_greedy() {
occ_map_acquisition_single_mmap(AllocationStrategy::Greedy)
}
fn occ_map_acquisition_single_mmap(strategy: AllocationStrategy<u32>) {
let (pool, _) = setup_default_pool(Default::default());
let mut slices = Vec::new();
loop {
let mut slice = match pool.acquire_borrowed_slice::<NoGuard>(4096, 1, strategy) {
Some(s) => s,
None => break,
};
let text = b"Hello, world!";
slice[..text.len()].copy_from_slice(text);
assert_eq!(&slice[..text.len()], text);
slices.push(slice);
}
drop(slices);
mem::forget(pool);
}
#[test]
fn occ_multithreaded_optimal() {
occ_multithreaded(AllocationStrategy::Optimal)
}
#[test]
fn occ_multithreaded_greedy() {
occ_multithreaded(AllocationStrategy::Optimal)
}
fn occ_multithreaded(strategy: AllocationStrategy<u32>) {
let (pool, _) = setup_default_pool(Default::default());
let pool = Arc::new(pool);
const THREAD_COUNT: usize = 8;
#[cfg(not(miri))]
const N: usize = 1000;
#[cfg(miri)]
const N: usize = 128;
let threads = (0..THREAD_COUNT).map(|_| {
let pool = Arc::clone(&pool);
thread::spawn(move || {
use rand::Rng;
let mut thread_rng = rand::thread_rng();
for _ in 0..N {
'retry: loop {
let len = thread_rng.gen_range(64, 4096);
let align = 1 << thread_rng.gen_range(0, 3);
match pool.acquire_borrowed_slice::<NoGuard>(len, align, strategy) {
Some(_) => break 'retry,
None => continue 'retry,
}
}
}
})
});
for thread in threads {
thread.join().unwrap();
}
}
#[test]
fn no_aliasing_optimal() {
no_aliasing(AllocationStrategy::Optimal)
}
#[test]
fn no_aliasing_greedy() {
no_aliasing(AllocationStrategy::Greedy)
}
fn no_aliasing(strategy: AllocationStrategy<u32>) {
let (pool, _) = setup_default_pool(Default::default());
const SIZE: u32 = 512;
let mut slices = Vec::new();
loop {
let slice = match pool.acquire_borrowed_slice::<NoGuard>(SIZE, 1, strategy) {
Some(s) => s,
None => break,
};
slices.push(slice);
}
for slice in &mut slices {
assert!(slice.iter().all(|&byte| byte == 0));
slice.fill(63);
}
}
#[test]
fn alignment_greedy() {
alignment(AllocationStrategy::Greedy)
}
#[test]
fn alignment_optimal() {
alignment(AllocationStrategy::Optimal)
}
fn alignment(strategy: AllocationStrategy<u32>) {
let options = BufferPoolOptions::default().with_minimum_alignment(1);
let (pool, _) = setup_pool(vec![vec![0u8; 4096]], options);
fn get_and_check_slice(
pool: &BufferPool<u32, NoHandle, ()>,
size: u32,
align: u32,
fill_byte: u8,
strategy: AllocationStrategy<u32>,
) -> BufferSlice<u32, NoHandle, ()> {
let mut slice = pool.acquire_borrowed_slice(size, align, strategy).unwrap();
assert!(slice.iter().all(|&byte| byte == 0));
slice.fill(fill_byte);
assert!(slice.iter().all(|&byte| byte == fill_byte));
assert_eq!(slice.len(), size);
assert_eq!(slice.offset() % align, 0);
slice
}
{
let _small_begin_slice = get_and_check_slice(&pool, 64, 1, 0x01, strategy);
let _aligned_slice = get_and_check_slice(&pool, 128, 128, 0x02, strategy);
let _half_page = get_and_check_slice(&pool, 2048, 2048, 0xFE, strategy);
}
}
#[test]
fn free_entry() {
let mut entry = FreeEntry::from_size_offset(1024u32, 64);
assert_eq!(entry.size(), 1024);
assert_eq!(entry.offset(), 64);
assert_eq!(entry.log2_of_alignment(), 6);
entry.set_offset(128);
assert_eq!(entry.size(), 1024);
assert_eq!(entry.offset(), 128);
assert_eq!(entry.log2_of_alignment(), 7);
entry.set_offset(3);
entry.set_size(4);
assert_eq!(entry.size(), 4);
assert_eq!(entry.offset(), 3);
assert_eq!(entry.log2_of_alignment(), 0);
}
#[test]
fn fixed_allocations() {
use rand::Rng;
let options = BufferPoolOptions::new().with_minimum_alignment(1);
let (pool, _) = setup_pool(vec![vec![0u8; 4096]], options);
let mut thread_rng = rand::thread_rng();
const N: usize = 1000;
for _ in 0..N {
let alignment = 1 << thread_rng.gen_range(0, 3);
let unaligned_offset = thread_rng.gen_range(0, 2048 - alignment);
let offset = (unaligned_offset + alignment - 1) / alignment * alignment;
let len = thread_rng.gen_range(0, 2048 - offset);
if len == 0 {
continue;
}
let slice = pool
.acquire_borrowed_slice::<NoGuard>(
len,
alignment,
AllocationStrategy::Fixed(offset),
)
.unwrap();
assert_eq!(slice.offset(), offset);
assert_eq!(slice.len(), len);
assert_eq!(slice.offset() % alignment, 0);
assert_eq!(slice.mmap_offset(), 0);
assert_eq!(slice.mmap_size(), 4096);
}
}
}