use std::alloc::{alloc, dealloc, Layout};
use std::cell::Cell;
use std::cmp;
use std::fmt;
use std::mem;
use std::ops::{Deref, DerefMut};
use std::ptr::{self, NonNull};
use std::slice;
#[cfg(feature = "serde")]
use serde::{Serialize, Serializer};
#[derive(Debug)]
pub enum ArenaError {
AllocationFailed,
AlreadyLocked,
CannotClear,
}
#[derive(Debug)]
pub enum ArenaBacking {
MemoryMap,
SystemAllocation,
}
pub trait AllocHandle {
fn allocate<T>(&self, count: usize) -> NonNull<T>;
fn allocate_or_extend<T>(&self, ptr: NonNull<T>, old_count: usize, count: usize) -> NonNull<T>;
}
pub struct Slice<T, H> {
ptr: NonNull<T>,
len: usize,
handle: H,
}
pub struct SliceVec<T, H> {
slice: Slice<T, H>,
capacity: usize,
}
impl<T, H: AllocHandle> Slice<T, H> {
pub fn new(handle: H, len: usize) -> Self
where
T: Default,
{
let mut res = unsafe { Self::new_empty(handle, len) };
res.len = len;
for i in 0..len {
unsafe {
ptr::write(res.ptr.as_ptr().add(i), T::default());
}
}
res
}
unsafe fn new_empty(handle: H, real_len: usize) -> Self {
let ptr: NonNull<T> = if real_len == 0 {
NonNull::dangling()
} else {
handle.allocate(real_len)
};
Slice {
ptr,
len: 0,
handle,
}
}
}
impl<T: Clone, H: AllocHandle + Clone> Clone for Slice<T, H> {
fn clone(&self) -> Self {
let ptr: NonNull<T> = self.handle.allocate(self.len);
for i in 0..self.len {
unsafe {
ptr::write(ptr.as_ptr().add(i), (*self.ptr.as_ptr().add(i)).clone());
}
}
Slice {
ptr,
len: self.len,
handle: self.handle.clone(),
}
}
}
impl<T: fmt::Debug, H> fmt::Debug for Slice<T, H> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
self.deref().fmt(fmt)
}
}
impl<T, H> Deref for Slice<T, H> {
type Target = [T];
fn deref(&self) -> &[T] {
unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) }
}
}
impl<T, H> DerefMut for Slice<T, H> {
fn deref_mut(&mut self) -> &mut [T] {
unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) }
}
}
impl<T: Eq, H> Eq for Slice<T, H> {}
impl<T: PartialEq, H> PartialEq for Slice<T, H> {
fn eq(&self, other: &Self) -> bool {
self.deref().eq(other.deref())
}
}
impl<T: PartialOrd, H> PartialOrd for Slice<T, H> {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
self.deref().partial_cmp(other.deref())
}
}
impl<'a, T, H> IntoIterator for &'a Slice<T, H> {
type Item = &'a T;
type IntoIter = slice::Iter<'a, T>;
fn into_iter(self) -> Self::IntoIter {
self.deref().iter()
}
}
impl<'a, T, H> IntoIterator for &'a mut Slice<T, H> {
type Item = &'a mut T;
type IntoIter = slice::IterMut<'a, T>;
fn into_iter(self) -> Self::IntoIter {
self.deref_mut().iter_mut()
}
}
#[cfg(feature = "serde")]
impl<T, H> Serialize for Slice<T, H>
where
T: Serialize,
{
#[inline]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.collect_seq(self.iter())
}
}
impl<T, H> Drop for Slice<T, H> {
fn drop(&mut self) {
unsafe {
ptr::drop_in_place(&mut self[..]);
}
}
}
impl<T, H> SliceVec<T, H> {
pub fn iter<'a>(&'a self) -> slice::Iter<'a, T> {
self.slice.iter()
}
pub fn iter_mut<'a>(&'a mut self) -> slice::IterMut<'a, T> {
self.slice.iter_mut()
}
}
impl<T, H: AllocHandle> SliceVec<T, H> {
pub fn new(handle: H) -> Self {
Self::with_capacity(handle, 0)
}
pub fn with_capacity(handle: H, capacity: usize) -> Self {
SliceVec {
slice: unsafe { Slice::new_empty(handle, capacity) },
capacity,
}
}
pub fn capacity(&self) -> usize {
self.capacity
}
pub fn reserve(&mut self, additional: usize) {
let ptr = self.slice.ptr;
let size = self.slice.len + additional;
if self.capacity >= size {
return;
}
let mut new_capacity = if self.capacity > 0 { self.capacity } else { 4 };
while new_capacity < size {
new_capacity *= 2;
}
let new_ptr: NonNull<T> = self.slice.handle.allocate_or_extend(ptr, self.capacity, new_capacity);
if ptr != new_ptr {
unsafe {
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), self.slice.len());
}
self.slice.ptr = new_ptr;
}
self.capacity = new_capacity;
}
pub fn truncate(&mut self, len: usize) {
let old_len = self.slice.len;
if len < old_len {
unsafe {
ptr::drop_in_place(&mut self.slice[len..old_len]);
}
self.slice.len = len;
}
}
pub fn swap_remove(&mut self, index: usize) -> T {
let hole: *mut T = &mut self[index];
self.slice.len -= 1;
unsafe {
let last = ptr::read(self.slice.ptr.as_ptr().add(self.slice.len));
ptr::replace(hole, last)
}
}
pub fn push(&mut self, elem: T) {
if self.slice.len == self.capacity {
let new_capacity = if self.capacity == 0 {
4
} else {
self.capacity * 2
};
self.reserve(new_capacity - self.capacity);
}
unsafe {
ptr::write(self.slice.ptr.as_ptr().add(self.slice.len()), elem);
}
self.slice.len = self.slice.len() + 1;
}
pub fn pop(&mut self) -> Option<T> {
if self.is_empty() {
return None;
}
unsafe {
self.slice.len -= 1;
Some(ptr::read(self.slice.ptr.as_ptr().add(self.slice.len)))
}
}
pub fn append(&mut self, other: &mut Self) {
let count = other.len();
self.reserve(count);
let len = self.len();
unsafe {
ptr::copy_nonoverlapping(
other.slice.ptr.as_ptr(),
self.slice.ptr.as_ptr().add(len),
count);
}
other.slice.len = 0;
}
pub fn clear(&mut self) {
unsafe {
ptr::drop_in_place(&mut self.slice[..]);
}
self.slice.len = 0;
}
pub fn len(&self) -> usize {
self.slice.len
}
pub fn is_empty(&self) -> bool {
self.slice.len == 0
}
pub fn split_off(&mut self, at: usize) -> Self
where
H: Clone,
{
let mut ret = Self::with_capacity(self.slice.handle.clone(), self.slice.len - at);
ret.slice.len = self.slice.len - at;
unsafe {
ptr::copy_nonoverlapping(
self.slice.ptr.as_ptr().add(at),
ret.slice.ptr.as_ptr(),
ret.len());
}
ret
}
pub fn resize_with<F>(&mut self, len: usize, mut f: F)
where
F: FnMut() -> T,
{
let old_len = self.slice.len;
if self.capacity < len {
self.reserve(len - old_len);
}
for i in old_len..len.saturating_sub(1) {
unsafe { ptr::write(self.slice.ptr.as_ptr().add(i), f()) }
}
if len > old_len {
unsafe {
ptr::write(self.slice.ptr.as_ptr().add(len - 1), f());
}
} else if len < old_len {
unsafe {
ptr::drop_in_place(&mut self.slice[len..old_len]);
}
}
self.slice.len = len;
}
pub fn resize(&mut self, len: usize, value: T)
where
T: Clone,
{
let old_len = self.slice.len;
if self.capacity < len {
self.reserve(len - old_len);
}
for i in old_len..len.saturating_sub(1) {
unsafe { ptr::write(self.slice.ptr.as_ptr().add(i), value.clone()) }
}
if len > old_len {
unsafe {
ptr::write(self.slice.ptr.as_ptr().add(len - 1), value);
}
} else if len < old_len {
unsafe {
ptr::drop_in_place(&mut self.slice[len..old_len]);
}
}
self.slice.len = len;
}
pub fn extend_from_slice(&mut self, other: &[T])
where
T: Clone
{
for e in other {
self.push(e.clone());
}
}
}
impl<T: Clone, H: AllocHandle + Clone> Clone for SliceVec<T, H> {
fn clone(&self) -> Self {
let mut vec: SliceVec<T, H> =
SliceVec::with_capacity(self.slice.handle.clone(), self.capacity);
for i in 0..self.slice.len() {
unsafe {
ptr::write(
vec.slice.ptr.as_ptr().add(i),
(*self.slice.ptr.as_ptr().add(i)).clone(),
);
}
}
vec.slice.len = self.slice.len();
vec
}
}
impl<T: fmt::Debug, H> fmt::Debug for SliceVec<T, H> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
self.slice.fmt(fmt)
}
}
impl<T, H> Deref for SliceVec<T, H> {
type Target = [T];
fn deref(&self) -> &[T] {
self.slice.deref()
}
}
impl<T, H> DerefMut for SliceVec<T, H> {
fn deref_mut(&mut self) -> &mut [T] {
self.slice.deref_mut()
}
}
impl<T: Eq, H> Eq for SliceVec<T, H> { }
impl<T: PartialEq, H> PartialEq for SliceVec<T, H> {
fn eq(&self, other: &Self) -> bool {
self.deref().eq(other.deref())
}
}
impl<T: PartialOrd, H> PartialOrd for SliceVec<T, H> {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
self.deref().partial_cmp(other.deref())
}
}
impl<'a, T: 'a, H> IntoIterator for &'a SliceVec<T, H> {
type Item = &'a T;
type IntoIter = slice::Iter<'a, T>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<'a, T: 'a, H> IntoIterator for &'a mut SliceVec<T, H> {
type Item = &'a mut T;
type IntoIter = slice::IterMut<'a, T>;
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
#[cfg(feature = "serde")]
impl<T, H> Serialize for SliceVec<T, H>
where
T: Serialize,
{
#[inline]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.slice.serialize(serializer)
}
}
#[cfg(unix)]
pub(crate) fn get_page_size() -> usize {
unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize }
}
#[cfg(windows)]
pub(crate) fn get_page_size() -> usize {
use std::mem;
use winapi::um::sysinfoapi::GetSystemInfo;
unsafe {
let mut info = mem::zeroed();
GetSystemInfo(&mut info);
info.dwPageSize as usize
}
}
#[cfg(unix)]
pub(crate) fn create_mapping(capacity: usize) -> *mut u8 {
let ptr = unsafe {
libc::mmap(
ptr::null_mut(),
capacity,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_ANON | libc::MAP_PRIVATE,
-1,
0,
)
};
ptr as *mut u8
}
#[cfg(windows)]
pub(crate) fn create_mapping(capacity: usize) -> *mut u8 {
use std::ptr;
use winapi::shared::basetsd::SIZE_T;
use winapi::shared::minwindef::LPVOID;
use winapi::um::memoryapi::VirtualAlloc;
use winapi::um::winnt::{MEM_COMMIT, MEM_RESERVE, PAGE_READWRITE};
let lpAddress: LPVOID = ptr::null_mut();
let page_size = get_page_size();
let len = if capacity % page_size == 0 {
capacity
} else {
capacity + page_size - (capacity % page_size)
};
let flAllocationType = MEM_COMMIT | MEM_RESERVE;
let flProtect = PAGE_READWRITE;
let r = unsafe { VirtualAlloc(lpAddress, len as SIZE_T, flAllocationType, flProtect) };
r as *mut u8
}
pub(crate) fn create_mapping_alloc(capacity: usize) -> *mut u8 {
unsafe { alloc(Layout::from_size_align_unchecked(capacity, get_page_size())) }
}
#[cfg(unix)]
pub(crate) fn destroy_mapping(base: NonNull<u8>, capacity: usize) {
let res = unsafe { libc::munmap(base.as_ptr() as *mut libc::c_void, capacity) };
debug_assert_eq!(res, 0);
}
#[cfg(windows)]
pub(crate) fn destroy_mapping(base: NonNull<u8>, capacity: usize) {
use winapi::shared::minwindef::LPVOID;
use winapi::um::memoryapi::VirtualFree;
use winapi::um::winnt::MEM_RELEASE;
let res = unsafe { VirtualFree(base.as_ptr() as LPVOID, 0, MEM_RELEASE) };
debug_assert_ne!(res, 0);
}
pub(crate) fn destroy_mapping_alloc(base: NonNull<u8>, capacity: usize) {
unsafe {
let layout = Layout::from_size_align_unchecked(capacity, get_page_size());
dealloc(base.as_ptr(), layout);
}
}
pub(crate) fn allocate_inner<T>(
head: NonNull<u8>,
position: &Cell<usize>,
cap: usize,
count: usize) -> NonNull<T>
{
let layout = Layout::new::<T>();
let mask = layout.align() - 1;
let pos = position.get();
debug_assert!(layout.align() >= (pos & mask));
let mut skip = 64 - (pos & mask);
if skip == layout.align() {
skip = 0;
}
let additional = skip + layout.size() * count;
assert!(
pos + additional <= cap,
"arena overflow: {} > {}",
pos + additional,
cap
);
position.set(pos + additional);
let ret = unsafe { head.as_ptr().add(pos + skip) as *mut T };
assert!((ret as usize) >= head.as_ptr() as usize);
assert!((ret as usize) < (head.as_ptr() as usize + cap));
unsafe { NonNull::new_unchecked(ret) }
}
pub(crate) fn allocate_or_extend_inner<T>(
head: NonNull<u8>,
position: &Cell<usize>,
cap: usize,
ptr: NonNull<T>,
old_count: usize,
count: usize) -> NonNull<T>
{
let pos = position.get();
let next = unsafe { head.as_ptr().add(pos) };
let end = unsafe { ptr.as_ptr().add(old_count) };
if next == end as *mut u8 {
position.set(pos + (count - old_count) * mem::size_of::<T>());
ptr
} else {
allocate_inner(head, position, cap, count)
}
}