use std::{mem, ops, ptr, usize};
use std::cell::UnsafeCell;
use std::sync::Arc;
use std::sync::atomic::{self, AtomicUsize, Ordering};
pub use reset::{Reset, Dirty};
mod reset;
pub struct Pool<T: Reset> {
inner: Arc<UnsafeCell<PoolInner<T>>>,
}
impl<T: Reset> Pool<T> {
pub fn with_capacity<F>(count: usize, mut extra: usize, init: F) -> Pool<T>
where F: Fn() -> T {
let mut inner = PoolInner::with_capacity(count, extra);
extra = inner.entry_size - mem::size_of::<Entry<T>>();
for i in 0..count {
unsafe {
ptr::write(inner.entry_mut(i), Entry {
data: init(),
next: i + 1,
extra: extra,
});
}
inner.init += 1;
}
Pool { inner: Arc::new(UnsafeCell::new(inner)) }
}
pub fn checkout(&mut self) -> Option<Checkout<T>> {
self.inner_mut().checkout()
.map(|ptr| {
Checkout {
entry: ptr,
inner: self.inner.clone(),
}
}).map(|mut checkout| {
checkout.reset();
checkout
})
}
fn inner_mut(&self) -> &mut PoolInner<T> {
unsafe { mem::transmute(self.inner.get()) }
}
}
unsafe impl<T: Send + Reset> Send for Pool<T> { }
pub struct Checkout<T> {
entry: *mut Entry<T>,
inner: Arc<UnsafeCell<PoolInner<T>>>,
}
impl<T> Checkout<T> {
pub fn extra(&self) -> &[u8] {
self.entry().extra()
}
pub fn extra_mut(&mut self) -> &mut [u8] {
self.entry_mut().extra_mut()
}
fn entry(&self) -> &Entry<T> {
unsafe { mem::transmute(self.entry) }
}
fn entry_mut(&mut self) -> &mut Entry<T> {
unsafe { mem::transmute(self.entry) }
}
fn inner(&self) -> &mut PoolInner<T> {
unsafe { mem::transmute(self.inner.get()) }
}
}
impl<T> ops::Deref for Checkout<T> {
type Target = T;
fn deref(&self) -> &T {
&self.entry().data
}
}
impl<T> ops::DerefMut for Checkout<T> {
fn deref_mut(&mut self) -> &mut T {
&mut self.entry_mut().data
}
}
impl<T> Drop for Checkout<T> {
fn drop(&mut self) {
self.inner().checkin(self.entry);
}
}
unsafe impl<T: Send> Send for Checkout<T> { }
unsafe impl<T: Sync> Sync for Checkout<T> { }
struct PoolInner<T> {
#[allow(dead_code)]
memory: Box<[u8]>, next: AtomicUsize, ptr: *mut Entry<T>, init: usize, count: usize, entry_size: usize, }
const MAX: usize = usize::MAX >> 1;
impl<T> PoolInner<T> {
fn with_capacity(count: usize, mut extra: usize) -> PoolInner<T> {
let align = mem::align_of::<Entry<T>>();
assert!(count < MAX, "requested pool size too big");
assert!(align > 0, "something weird is up with the requested alignment");
let mask = align - 1;
if extra & mask != 0 {
extra = (extra + align) & !mask;
}
let entry_size = mem::size_of::<Entry<T>>() + extra;
assert!(entry_size & mask == 0, "entry size is not aligned");
assert!(entry_size.checked_mul(count).is_some(), "requested pool capacity too big");
assert!(entry_size * count < MAX, "requested pool capacity too big");
let size = count * entry_size;
let (memory, ptr) = alloc(size, align);
unsafe {
ptr::write_bytes(ptr, 0, size);
}
PoolInner {
memory: memory,
next: AtomicUsize::new(0),
ptr: ptr as *mut Entry<T>,
init: 0,
count: count,
entry_size: entry_size,
}
}
fn checkout(&mut self) -> Option<*mut Entry<T>> {
let mut idx = self.next.load(Ordering::Acquire);
loop {
debug_assert!(idx <= self.count, "invalid index: {}", idx);
if idx == self.count {
return None;
}
let nxt = self.entry_mut(idx).next;
debug_assert!(nxt <= self.count, "invalid next index: {}", idx);
let res = self.next.compare_and_swap(idx, nxt, Ordering::Relaxed);
if res == idx {
break;
}
atomic::fence(Ordering::Acquire);
idx = res;
}
Some(self.entry_mut(idx) as *mut Entry<T>)
}
fn checkin(&self, ptr: *mut Entry<T>) {
let mut idx;
let mut entry: &mut Entry<T>;
unsafe {
idx = ((ptr as usize) - (self.ptr as usize)) / self.entry_size;
entry = mem::transmute(ptr);
}
debug_assert!(idx < self.count, "invalid index; idx={}", idx);
let mut nxt = self.next.load(Ordering::Relaxed);
loop {
entry.next = nxt;
let actual = self.next.compare_and_swap(nxt, idx, Ordering::Release);
if actual == nxt {
break;
}
nxt = actual;
}
}
fn entry(&self, idx: usize) -> &Entry<T> {
unsafe {
debug_assert!(idx < self.count, "invalid index");
let ptr = self.ptr.offset(idx as isize);
mem::transmute(ptr)
}
}
#[allow(mutable_transmutes)]
fn entry_mut(&mut self, idx: usize) -> &mut Entry<T> {
unsafe { mem::transmute(self.entry(idx)) }
}
}
impl<T> Drop for PoolInner<T> {
fn drop(&mut self) {
for i in 0..self.init {
unsafe {
let _ = ptr::read(self.entry(i));
}
}
}
}
struct Entry<T> {
data: T, next: usize, extra: usize, }
impl<T> Entry<T> {
fn extra(&self) -> &[u8] {
use std::slice;
unsafe {
let ptr: *const u8 = mem::transmute(self);
let ptr = ptr.offset(mem::size_of::<Entry<T>>() as isize);
slice::from_raw_parts(ptr, self.extra)
}
}
#[allow(mutable_transmutes)]
fn extra_mut(&mut self) -> &mut [u8] {
unsafe { mem::transmute(self.extra()) }
}
}
fn alloc(mut size: usize, align: usize) -> (Box<[u8]>, *mut u8) {
size += align;
unsafe {
let mut vec = Vec::with_capacity(size);
vec.set_len(size);
let mut mem = vec.into_boxed_slice();
let ptr = (*mem).as_mut_ptr();
let p = ptr as usize;
let m = align - 1;
if p & m != 0 {
let p = (p + align) & !m;
return (mem, p as *mut u8);
}
(mem, ptr)
}
}