use crate::{
sync::{AtomicMut, AtomicPtr, AtomicU32, AtomicU64, Ordering},
NODE_ALIGNMENT_FACTOR,
};
use core::{
mem,
ptr::{self, NonNull},
slice,
};
#[allow(unused_imports)]
use std::boxed::Box;
use crossbeam_utils::CachePadded;
mod shared;
use shared::{Shared, SharedMeta};
#[cfg(all(feature = "memmap", not(target_family = "wasm")))]
const HEIGHT_ENCODED_SIZE: usize = mem::size_of::<u8>();
#[cfg(all(feature = "memmap", not(target_family = "wasm")))]
const LEN_ENCODED_SIZE: usize = mem::size_of::<u32>();
#[cfg(all(feature = "memmap", not(target_family = "wasm")))]
const MAX_VERSION_ENCODED_SIZE: usize = mem::size_of::<u64>();
#[cfg(all(feature = "memmap", not(target_family = "wasm")))]
const CHECKSUM_ENCODED_SIZE: usize = mem::size_of::<u32>();
#[cfg(all(feature = "memmap", not(target_family = "wasm")))]
const MMAP_OVERHEAD: usize =
HEIGHT_ENCODED_SIZE + LEN_ENCODED_SIZE + MAX_VERSION_ENCODED_SIZE + CHECKSUM_ENCODED_SIZE;
#[derive(Debug, Clone, PartialEq, Eq, Copy)]
pub struct ArenaError;
impl core::fmt::Display for ArenaError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "allocation failed because arena is full")
}
}
#[cfg(feature = "std")]
impl std::error::Error for ArenaError {}
pub struct Arena {
write_data_ptr: NonNull<u8>,
read_data_ptr: *const u8,
n: CachePadded<AtomicU64>,
pub(super) height: CachePadded<AtomicU32>,
pub(super) len: AtomicU32,
pub(super) max_version: AtomicU64,
inner: AtomicPtr<()>,
cap: usize,
}
impl core::fmt::Debug for Arena {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let allocated = self.size();
let data = unsafe { slice::from_raw_parts(self.read_data_ptr, allocated) };
f.debug_struct("Arena")
.field("cap", &self.cap)
.field("allocated", &allocated)
.field("data", &data)
.finish()
}
}
impl Arena {
#[inline]
pub fn size(&self) -> usize {
self.n.load(Ordering::Acquire) as usize
}
#[inline]
pub const fn capacity(&self) -> usize {
self.cap
}
#[inline]
pub fn remaining(&self) -> usize {
self.cap.saturating_sub(self.size())
}
}
impl Arena {
#[inline]
pub(super) fn new_vec(n: usize, min_cap: usize) -> Self {
Self::new(
Shared::new_vec(
n.max(min_cap),
mem::align_of::<u64>().max(NODE_ALIGNMENT_FACTOR),
),
None,
)
}
#[cfg(all(feature = "memmap", not(target_family = "wasm")))]
#[inline]
pub(super) fn mmap_mut<P: AsRef<std::path::Path>>(
n: usize,
min_cap: usize,
path: P,
lock: bool,
) -> std::io::Result<Self> {
let n = n.saturating_add(MMAP_OVERHEAD);
Shared::mmap_mut(n.max(min_cap.saturating_add(MMAP_OVERHEAD)), path, lock)
.map(|shared| Self::new(shared, None))
}
#[cfg(all(feature = "memmap", not(target_family = "wasm")))]
#[inline]
pub(super) fn mmap<P: AsRef<std::path::Path>>(
min_cap: usize,
path: P,
lock: bool,
) -> std::io::Result<Self> {
Shared::mmap(min_cap + MMAP_OVERHEAD, path, lock)
.map(|(meta, shared)| Self::new(shared, Some(meta)))
}
#[cfg(all(feature = "memmap", not(target_family = "wasm")))]
#[inline]
pub(super) fn new_anonymous_mmap(n: usize, min_cap: usize) -> std::io::Result<Self> {
Shared::new_mmaped_anon(n.max(min_cap)).map(|shared| Self::new(shared, None))
}
#[inline]
fn head_offset(&self, max_node_size: u32, align: u32) -> u32 {
let padded = max_node_size as u64 + align as u64 - 1;
let new_size = 1 + padded;
(new_size as u32 - max_node_size) & !(align - 1)
}
pub(super) fn head_ptr(&self, max_node_size: u32, align: u32) -> (*const u8, u32) {
let offset = self.head_offset(max_node_size, align);
(unsafe { self.get_pointer(offset as usize) }, offset)
}
pub(super) fn tail_ptr(&self, max_node_size: u32, align: u32) -> (*const u8, u32) {
let padded = max_node_size as u64 + align as u64 - 1;
let new_size = self.head_offset(max_node_size, align) as u64 + padded + max_node_size as u64;
let offset = (new_size as u32 - max_node_size) & !(align - 1);
(unsafe { self.get_pointer(offset as usize) }, offset)
}
#[inline]
fn new(mut shared: Shared, meta: Option<SharedMeta>) -> Self {
let read_data_ptr = shared.as_ptr();
let write_data_ptr = shared
.as_mut_ptr()
.map(|p| unsafe { NonNull::new_unchecked(p) })
.unwrap_or_else(NonNull::dangling);
let (height, allocated, len, max_version) = match meta {
Some(meta) => (meta.height, meta.allocated, meta.len, meta.max_version),
None => {
let height = 1;
let len = 0;
let max_version = 0;
let allocated = 1;
(height, allocated, len, max_version)
}
};
Self {
cap: shared.cap(),
inner: AtomicPtr::new(Box::into_raw(Box::new(shared)) as _),
write_data_ptr,
read_data_ptr,
height: CachePadded::new(AtomicU32::new(height as u32)),
len: AtomicU32::new(len),
n: CachePadded::new(AtomicU64::new(allocated)),
max_version: AtomicU64::new(max_version),
}
}
#[cfg(all(feature = "memmap", not(target_family = "wasm")))]
pub(super) fn flush(&self) -> std::io::Result<()> {
let shared = self.inner.load(Ordering::Acquire);
{
let shared: *mut Shared = shared.cast();
unsafe { (*shared).flush() }
}
}
#[cfg(all(feature = "memmap", not(target_family = "wasm")))]
pub(super) fn flush_async(&self) -> std::io::Result<()> {
let shared = self.inner.load(Ordering::Acquire);
{
let shared: *mut Shared = shared.cast();
unsafe { (*shared).flush_async() }
}
}
#[inline]
pub(super) fn alloc(
&self,
size: u32,
align: u32,
overflow: u32,
) -> Result<(u32, u32), ArenaError> {
let padded = size as u64 + align as u64 - 1;
let mut current_allocated = self.n.load(Ordering::Acquire);
if current_allocated + padded + overflow as u64 > self.cap as u64 {
return Err(ArenaError);
}
loop {
let want = current_allocated + padded;
match self.n.compare_exchange_weak(
current_allocated,
want,
Ordering::SeqCst,
Ordering::Acquire,
) {
Ok(current) => {
let new_size = current + padded;
let offset = (new_size as u32 - size) & !(align - 1);
return Ok((offset, padded as u32));
}
Err(x) => {
if x + padded + overflow as u64 > self.cap as u64 {
return Err(ArenaError);
}
current_allocated = x;
}
}
}
}
#[inline]
pub(super) unsafe fn get_bytes(&self, offset: usize, size: usize) -> &[u8] {
if offset == 0 {
return &[];
}
let ptr = self.get_pointer(offset);
slice::from_raw_parts(ptr, size)
}
#[allow(clippy::mut_from_ref)]
#[inline]
pub(super) unsafe fn get_bytes_mut(&self, offset: usize, size: usize) -> &mut [u8] {
if offset == 0 {
return &mut [];
}
let ptr = self.get_pointer_mut(offset);
slice::from_raw_parts_mut(ptr, size)
}
#[inline]
pub(super) unsafe fn get_pointer(&self, offset: usize) -> *const u8 {
if offset == 0 {
return ptr::null();
}
self.read_data_ptr.add(offset)
}
#[inline]
pub(super) unsafe fn get_pointer_mut(&self, offset: usize) -> *mut u8 {
if offset == 0 {
return ptr::null_mut();
}
self.write_data_ptr.as_ptr().add(offset)
}
}
impl Drop for Arena {
fn drop(&mut self) {
unsafe {
self.inner.with_mut(|shared| {
let shared: *mut Shared = shared.cast();
if (*shared).refs.fetch_sub(1, Ordering::Release) != 1 {
return;
}
(*shared).refs.load(Ordering::Acquire);
let mut shared = Box::from_raw(shared);
shared.unmount(
self.height.load(Ordering::Acquire) as u8,
self.len.load(Ordering::Acquire),
self.n.load(Ordering::Acquire),
self.max_version.load(Ordering::Acquire),
);
});
}
}
}
#[test]
#[cfg(test)]
fn test_debug() {
let arena = Arena::new_vec(1024, 1024);
assert_eq!(
std::format!("{:?}", arena),
"Arena { cap: 1024, allocated: 1, data: [0] }"
);
let err = ArenaError;
assert_eq!(
std::format!("{}", err),
"allocation failed because arena is full"
);
}