use std::mem::MaybeUninit;
use std::ptr::NonNull;
use std::sync::atomic::Ordering::*;
use std::sync::atomic::AtomicPtr;
use std::sync::Arc;
use std::cell::Cell;
use crate::block::Block;
use crate::page::arena::{PageArena, drop_page};
use crate::common::{Pointer, BLOCK_PER_PAGE};
use crate::{ArenaRc, ArenaBox, ArenaArc};
pub struct Arena<T: Sized> {
free_list: Pointer<PageArena<T>>,
pending_free_list: Arc<AtomicPtr<PageArena<T>>>,
full_list: AtomicPtr<PageArena<T>>,
npages: Cell<usize>,
}
unsafe impl<T: Sized> Send for Arena<T> {}
impl<T: Sized> Arena<T> {
fn alloc_new_page(&self) {
let to_allocate = self.npages
.get()
.max(1)
.min(900_000);
let (first, mut last) = PageArena::make_list(to_allocate, &self.pending_free_list);
let first_ptr = first.as_ptr();
let last_ref = unsafe { last.as_mut() };
let current = self.full_list.load(Relaxed);
last_ref.next.store(current, Relaxed);
self.full_list.swap(first_ptr, AcqRel);
let current = self.free_list.get();
assert!(current.is_null(), "Arena.free_list isn't null");
self.free_list.set(first_ptr);
self.npages.set(self.npages.get() + to_allocate);
}
fn find_place(&self) -> NonNull<Block<T>> {
loop {
while let Some(page) = unsafe { self.free_list.get().as_mut() } {
if let Some(block) = page.acquire_free_block() {
return block;
}
let next = page.next_free.load(Acquire);
self.free_list.set(next);
page.in_free_list.store(false, Release);
}
let pending = self.pending_free_list.load(Relaxed);
if !pending.is_null() {
let pending = self.pending_free_list.swap(std::ptr::null_mut(), AcqRel);
self.free_list.set(pending);
} else {
self.alloc_new_page();
}
}
}
pub fn with_capacity(cap: usize) -> Arena<T> {
let npages = ((cap.max(1) - 1) / BLOCK_PER_PAGE) + 1;
let pending_free = Arc::new(AtomicPtr::new(std::ptr::null_mut()));
let (first, _) = PageArena::make_list(npages, &pending_free);
Arena {
npages: Cell::new(npages),
free_list: Cell::new(first.as_ptr()),
pending_free_list: pending_free,
full_list: AtomicPtr::new(first.as_ptr()),
}
}
pub fn new() -> Arena<T> {
Arena::with_capacity(BLOCK_PER_PAGE)
}
pub fn alloc(&self, value: T) -> ArenaBox<T> {
let block = self.find_place();
unsafe {
let ptr = block.as_ref().value.get();
ptr.write(value);
}
ArenaBox::new(block)
}
pub fn alloc_with<F>(&self, initializer: F) -> ArenaBox<T>
where
F: Fn(&mut MaybeUninit<T>) -> &T
{
let block = self.find_place();
let result = ArenaBox::new(block);
unsafe {
let ptr = block.as_ref().value.get();
let reference = initializer(&mut *(ptr as *mut std::mem::MaybeUninit<T>));
assert_eq!(
ptr as * const T,
reference as * const T,
"`initializer` must return a reference of its parameter"
);
}
result
}
pub fn alloc_arc(&self, value: T) -> ArenaArc<T> {
let block = self.find_place();
unsafe {
let ptr = block.as_ref().value.get();
ptr.write(value);
}
ArenaArc::new(block)
}
pub fn alloc_arc_with<F>(&self, initializer: F) -> ArenaArc<T>
where
F: Fn(&mut MaybeUninit<T>) -> &T
{
let block = self.find_place();
let result = ArenaArc::new(block);
unsafe {
let ptr = block.as_ref().value.get();
let reference = initializer(&mut *(ptr as *mut std::mem::MaybeUninit<T>));
assert_eq!(
ptr as * const T,
reference as * const T,
"`initializer` must return a reference of its parameter"
);
}
result
}
pub fn alloc_rc(&self, value: T) -> ArenaRc<T> {
let block = self.find_place();
unsafe {
let ptr = block.as_ref().value.get();
ptr.write(value);
}
ArenaRc::new(block)
}
pub fn alloc_rc_with<F>(&self, initializer: F) -> ArenaRc<T>
where
F: Fn(&mut MaybeUninit<T>) -> &T
{
let block = self.find_place();
let result = ArenaRc::new(block);
unsafe {
let ptr = block.as_ref().value.get();
let reference = initializer(&mut *(ptr as *mut std::mem::MaybeUninit<T>));
assert_eq!(
ptr as * const T,
reference as * const T,
"`initializer` must return a reference of its parameter"
);
}
result
}
pub fn shrink_to_fit(&self) -> bool {
let mut current: &AtomicPtr<PageArena<T>> = &AtomicPtr::new(self.free_list.get());
self.free_list.set(std::ptr::null_mut());
let start = current;
let mut to_drop = Vec::with_capacity(self.npages.get());
while let Some(current_value) = unsafe { current.load(Relaxed).as_mut() } {
let next = ¤t_value.next_free;
let next_value = next.load(Relaxed);
if current_value.bitfield.get() | current_value.bitfield_atomic.load(Acquire) == !0 {
if current.compare_exchange(
current_value as *const _ as *mut _, next_value, AcqRel, Relaxed
).is_ok() {
to_drop.push(current_value as *const _ as *mut PageArena<T>);
}
} else {
current = next;
}
}
let mut current: &AtomicPtr<PageArena<T>> = &self.full_list;
while let Some(current_value) = unsafe { current.load(Relaxed).as_mut() } {
let next = ¤t_value.next;
let next_value = next.load(Relaxed);
if to_drop.contains(&(current_value as *const _ as *mut PageArena<T>)) {
current.compare_exchange(
current_value as *const _ as *mut _, next_value, AcqRel, Relaxed
).expect("Something went wrong in shrinking.");
} else {
current = next;
}
}
for page in &to_drop {
let page_ref = unsafe { page.as_ref().unwrap() };
assert!(page_ref.bitfield.get() | page_ref.bitfield_atomic.load(Acquire) == !0);
drop_page(*page);
}
self.free_list.set(start.load(Relaxed));
self.npages.set(self.npages.get() - to_drop.len());
true
}
pub fn stats(&self) -> (usize, usize) {
let mut next = self.full_list.load(Relaxed);
let mut used = 0;
let mut npages = 0;
while let Some(next_ref) = unsafe { next.as_mut() } {
let next_next = next_ref.next.load(Relaxed);
let bitfield = next_ref.bitfield.get();
let bitfield_atomic = next_ref.bitfield_atomic.load(Relaxed);
let zeros = (bitfield | bitfield_atomic).count_zeros() as usize;
used += zeros;
next = next_next;
npages += 1;
}
assert!(npages == self.npages.get());
let free = (npages * BLOCK_PER_PAGE) - used;
(used, free)
}
#[cfg(target_pointer_width = "64") ]
#[cfg(test)]
pub(crate) fn size_lists(&self) -> (usize, usize, usize) {
let mut next = self.full_list.load(Relaxed);
let mut size = 0;
while let Some(next_ref) = unsafe { next.as_mut() } {
next = next_ref.next.load(Relaxed);
size += 1;
}
let mut next = self.free_list.get();
let mut free = 0;
while let Some(next_ref) = unsafe { next.as_mut() } {
next = next_ref.next_free.load(Relaxed);
free += 1;
}
let mut next = self.pending_free_list.load(Relaxed);
let mut pending = 0;
while let Some(next_ref) = unsafe { next.as_mut() } {
next = next_ref.next_free.load(Relaxed);
pending += 1;
}
(size, free, pending)
}
#[allow(dead_code)]
#[cfg(test)]
pub(crate) fn display_list(&self) {
let mut full = vec![];
let mut next = self.full_list.load(Relaxed);
while let Some(next_ref) = unsafe { next.as_mut() } {
full.push(next);
next = next_ref.next.load(Relaxed);
}
let mut list_free = vec![];
let mut next = self.free_list.get();
while let Some(next_ref) = unsafe { next.as_mut() } {
list_free.push(next);
next = next_ref.next_free.load(Relaxed);
}
println!("FULL {} {:#?}", full.len(), full);
println!("FREE {} {:#?}", list_free.len(), list_free);
}
}
impl<T: Sized> Drop for Arena<T> {
fn drop(&mut self) {
let mut next = self.full_list.load(Relaxed);
while let Some(next_ref) = unsafe { next.as_mut() } {
let next_next = next_ref.next.load(Relaxed);
drop_page(next);
next = next_next;
}
}
}
impl<T: Sized> Default for Arena<T> {
fn default() -> Arena<T> {
Arena::new()
}
}
impl<T> std::fmt::Debug for Arena<T> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
struct Page {
free: usize,
used: usize,
}
impl std::fmt::Debug for Page {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Page {{ free: {} used: {} }}", self.free, self.used)
}
}
let npages = self.npages.get();
let mut vec = Vec::with_capacity(npages);
let mut next = self.full_list.load(Relaxed);
while let Some(next_ref) = unsafe { next.as_mut() } {
let used = (next_ref.bitfield.get() | next_ref.bitfield_atomic.load(Relaxed)).count_zeros() as usize;
vec.push(Page {
used,
free: BLOCK_PER_PAGE - used
});
next = next_ref.next.load(Relaxed);
}
let blocks_used: usize = vec.iter().map(|p| p.used).sum();
let blocks_free: usize = vec.iter().map(|p| p.free).sum();
f.debug_struct("Arena")
.field("blocks_free", &blocks_free)
.field("blocks_used", &blocks_used)
.field("npages", &npages)
.field("pages", &vec)
.finish()
}
}
#[allow(dead_code)]
fn arena_fail() {}
#[cfg(test)]
mod tests {
use super::Arena;
use std::mem::MaybeUninit;
use std::ptr;
#[cfg(target_pointer_width = "64") ]
#[test]
fn arena_shrink() {
let arena = Arena::<usize>::with_capacity(1000);
assert_eq!(arena.stats(), (0, 1008));
arena.shrink_to_fit();
assert_eq!(arena.stats(), (0, 0));
}
#[cfg(target_pointer_width = "64") ]
#[test]
fn arena_shrink2() {
let arena = Arena::<usize>::with_capacity(1000);
println!("A");
let _a = arena.alloc(1);
arena.shrink_to_fit();
assert_eq!(arena.stats(), (1, 62));
println!("A1");
let _a = arena.alloc(1);
arena.shrink_to_fit();
assert_eq!(arena.stats(), (2, 61));
println!("A2");
let mut values = Vec::with_capacity(64);
for _ in 0..64 {
values.push(arena.alloc(1));
}
println!("A3");
assert_eq!(arena.stats(), (66, 60));
println!("A32");
arena.shrink_to_fit();
println!("A33");
assert_eq!(arena.stats(), (66, 60));
println!("A4");
std::mem::drop(values);
println!("A5");
assert_eq!(arena.stats(), (2, 124));
println!("A6");
arena.shrink_to_fit();
println!("A7");
assert_eq!(arena.stats(), (2, 61));
}
#[cfg(target_pointer_width = "64") ]
#[test]
fn arena_size() {
let arena = Arena::<usize>::with_capacity(1000);
assert_eq!(arena.size_lists(), (16, 16, 0));
let a = arena.alloc(1);
assert_eq!(arena.size_lists(), (16, 16, 0));
let mut values = Vec::with_capacity(539);
for _ in 0..539 {
values.push(arena.alloc(1));
}
assert_eq!(arena.size_lists(), (16, 8, 0));
arena.shrink_to_fit();
assert_eq!(arena.size_lists(), (9, 1, 0));
values.truncate(503);
arena.shrink_to_fit();
assert_eq!(arena.size_lists(), (8, 0, 0));
std::mem::drop(a);
for _ in 0..62 {
values.remove(0);
}
assert_eq!(arena.size_lists(), (8, 0, 1));
arena.shrink_to_fit();
assert_eq!(arena.size_lists(), (8, 0, 1));
values.clear();
assert_eq!(arena.size_lists(), (8, 0, 8));
arena.shrink_to_fit();
println!("LA", );
assert_eq!(arena.size_lists(), (8, 0, 8));
println!("LA2", );
{
let _a = arena.alloc(1);
println!("LA3", );
assert_eq!(arena.size_lists(), (8, 8, 0));
println!("{:?}", arena);
arena.display_list();
}
assert_eq!(arena.size_lists(), (8, 8, 0));
arena.shrink_to_fit();
assert_eq!(arena.size_lists(), (0, 0, 0));
let mut values = Vec::with_capacity(126);
for _ in 0..126 {
values.push(arena.alloc(1));
}
assert_eq!(arena.size_lists(), (2, 1, 0));
values.remove(0);
assert_eq!(arena.size_lists(), (2, 1, 1));
values.push(arena.alloc(1));
assert_eq!(arena.size_lists(), (2, 1, 0));
}
#[test]
fn alloc_with_initializer() {
struct MyData {
a: usize
}
fn initialize_data<'d>(uninit: &'d mut MaybeUninit<MyData>, source: &MyData) -> &'d MyData {
unsafe {
let ptr = uninit.as_mut_ptr();
ptr::copy(source, ptr, 1);
&*ptr
}
}
let arena = Arena::<MyData>::new();
let source = MyData { a: 101 };
let data = arena.alloc_with(|uninit| {
initialize_data(uninit, &source)
});
assert!(data.a == 101);
let source = MyData { a: 102 };
let data = arena.alloc_rc_with(|uninit| {
initialize_data(uninit, &source)
});
assert!(data.a == 102);
let source = MyData { a: 103 };
let data = arena.alloc_arc_with(|uninit| {
initialize_data(uninit, &source)
});
assert!(data.a == 103);
}
#[test]
#[should_panic]
fn alloc_with_panic() {
let arena = Arena::<usize>::new();
const SOURCE: usize = 10;
let _ = arena.alloc_with(|_| {
&SOURCE
});
}
#[test]
#[should_panic]
fn alloc_rc_with_panic() {
let arena = Arena::<usize>::new();
const SOURCE: usize = 10;
let _ = arena.alloc_rc_with(|_| {
&SOURCE
});
}
#[test]
#[should_panic]
fn alloc_arc_with_panic() {
let arena = Arena::<usize>::new();
const SOURCE: usize = 10;
let _ = arena.alloc_arc_with(|_| {
&SOURCE
});
}
#[test]
fn alloc_fns() {
let arena = Arena::<usize>::new();
use std::ptr;
let a = arena.alloc_with(|place| unsafe {
ptr::copy(&101, place.as_mut_ptr(), 1);
&*place.as_mut_ptr()
});
assert!(*a == 101);
let a = arena.alloc_arc_with(|place| unsafe {
ptr::copy(&102, place.as_mut_ptr(), 1);
&*place.as_mut_ptr()
});
assert!(*a == 102);
let a = arena.alloc_rc_with(|place| unsafe {
ptr::copy(&103, place.as_mut_ptr(), 1);
&*place.as_mut_ptr()
});
assert!(*a == 103);
let a = arena.alloc(104);
assert!(*a == 104);
let a = arena.alloc_arc(105);
assert!(*a == 105);
let a = arena.alloc_rc(106);
assert!(*a == 106);
}
#[test]
fn drop_arena_with_valid_allocated() {
let (a, b, c, d) = {
let arena = Arena::<usize>::new();
use std::ptr;
let a = arena.alloc_with(|place| unsafe {
ptr::copy(&101, place.as_mut_ptr(), 1);
&*place.as_mut_ptr()
});
let b = arena.alloc_arc_with(|place| unsafe {
ptr::copy(&102, place.as_mut_ptr(), 1);
&*place.as_mut_ptr()
});
let c = arena.alloc(103);
let d = arena.alloc_arc(104);
(a, b, c, d)
};
assert_eq!((*a, *b, *c, *d), (101, 102, 103, 104))
}
#[test]
#[cfg_attr(miri, ignore)]
fn arena_with_threads() {
test_with_threads(12, 1024 * 64, false);
}
#[test]
#[cfg_attr(miri, ignore)]
fn arena_with_threads_and_shrinks() {
test_with_threads(12, 1024 * 4, true);
}
#[test]
fn miri_arena_with_threads() {
test_with_threads(12, 128, false);
}
#[test]
fn miri_arena_with_threads_and_shrinks() {
test_with_threads(12, 64, true);
}
fn test_with_threads(nthreads: usize, nallocs: usize, with_shrink: bool) {
use std::sync::{Arc, Barrier};
use std::thread;
use std::time::{SystemTime, UNIX_EPOCH};
fn get_random_number(max: usize) -> usize {
let nanos = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.subsec_nanos() as usize;
nanos % max
}
let arena = Arena::<usize>::default();
let mut values_for_threads = Vec::with_capacity(nthreads);
for _ in 0..(nthreads + 1) {
let mut values = Vec::with_capacity(nallocs);
for _ in 0..values.capacity() {
values.push(arena.alloc(1));
}
values_for_threads.push(values);
}
let mut handles = Vec::with_capacity(nthreads);
let barrier = Arc::new(Barrier::new(nthreads + 1));
for _ in 0..nthreads {
let c = barrier.clone();
let mut values = values_for_threads.pop().unwrap();
handles.push(thread::spawn(move|| {
c.wait();
while values.len() > 0 {
values.pop();
}
}));
}
let mut values = values_for_threads.pop().unwrap();
barrier.wait();
while values.len() > 0 {
let rand = get_random_number(values.len());
if with_shrink && rand % 200 == 0 {
if arena.shrink_to_fit() {
}
}
values.pop();
}
for handle in handles {
handle.join().unwrap();
}
}
}