use core::default::Default;
use core::ops::{Deref, DerefMut};
use core::marker::PhantomData;
use core::mem;
use core::slice;
use cbuf::CBuf;
use alloc::heap;
use super::types::{Result, Error, IndexLoc, BlockLoc};
use super::raw_pool::{RawPool, Index, Block, Full, DisplayPool};
fn ceil(a: usize, b: usize) -> usize {
a / b + (if a % b != 0 { 1 } else { 0 })
}
pub struct Pool {
raw: *mut RawPool,
}
impl Drop for Pool {
fn drop(&mut self) {
unsafe {
let align = mem::size_of::<usize>();
let size_raw = mem::size_of::<RawPool>();
let raw = &mut *self.raw;
let size_indexes = raw.len_indexes() as usize * mem::size_of::<Index>();
let size_blocks = raw.len_blocks() as usize * mem::size_of::<Block>();
let size_cache = raw.index_cache.len() as usize * mem::size_of::<IndexLoc>();
heap::deallocate(raw.index_cache.as_mut_ptr() as *mut u8, size_cache, align);
heap::deallocate(raw._indexes as *mut u8, size_indexes, align);
heap::deallocate(raw._blocks as *mut u8, size_blocks, align);
heap::deallocate(self.raw as *mut u8, size_raw, align);
}
}
}
impl Pool {
pub fn new(size: usize, indexes: IndexLoc, index_cache: IndexLoc) -> Result<Pool> {
let cache_len = if index_cache == 0 {
1
} else if index_cache > indexes {
indexes
} else {
index_cache
};
let num_blocks = ceil(size, mem::size_of::<Block>());
if indexes > IndexLoc::max_value() / 2 || num_blocks > BlockLoc::max_value() as usize / 2 {
return Err(Error::InvalidSize);
}
let num_indexes = indexes;
unsafe {
let align = mem::size_of::<usize>();
let size_raw = mem::size_of::<RawPool>();
let size_indexes = num_indexes as usize * mem::size_of::<Index>();
let size_blocks = num_blocks * mem::size_of::<Block>();
let size_cache = cache_len as usize * mem::size_of::<IndexLoc>();
let pool = heap::allocate(size_raw, align);
let indexes = heap::allocate(size_indexes, align);
let blocks = heap::allocate(size_blocks, align);
let cache = heap::allocate(size_cache, align);
if pool.is_null() || indexes.is_null() || blocks.is_null() || cache.is_null() {
if !cache.is_null() {
heap::deallocate(cache, size_cache as usize, align);
}
if !blocks.is_null() {
heap::deallocate(blocks, size_blocks, align);
}
if !indexes.is_null() {
heap::deallocate(indexes, size_indexes, align);
}
if !pool.is_null() {
heap::deallocate(pool, size_raw, align);
}
return Err(Error::OutOfMemory);
}
let pool = pool as *mut RawPool;
let indexes = indexes as *mut Index;
let blocks = blocks as *mut Block;
let cache = cache as *mut IndexLoc;
let cache_slice: &'static mut [IndexLoc] =
slice::from_raw_parts_mut(cache, cache_len as usize);
let index_cache = CBuf::new(cache_slice);
*pool = RawPool::new(indexes, num_indexes, blocks, num_blocks as u16, index_cache);
Ok(Pool::from_raw(pool))
}
}
pub unsafe fn from_raw(raw: *mut RawPool) -> Pool {
Pool { raw: raw }
}
pub fn alloc<T: Default>(&self) -> Result<Mutex<T>> {
self._alloc(false)
}
pub fn alloc_fast<T: Default>(&self) -> Result<Mutex<T>> {
self._alloc(true)
}
#[inline]
fn _alloc<T: Default>(&self, fast: bool) -> Result<Mutex<T>> {
unsafe {
let actual_size: usize = mem::size_of::<Full>() + mem::size_of::<T>();
let blocks = ceil(actual_size, mem::size_of::<Block>());
if blocks > (*self.raw).len_blocks() as usize {
return Err(Error::InvalidSize);
}
let i = try!((*self.raw).alloc_index(blocks as u16, fast));
let index = (*self.raw).index(i);
let mut p = (*self.raw).data(index.block()) as *mut T;
*p = T::default();
Ok(Mutex {
index: i,
pool: self,
_type: PhantomData,
})
}
}
pub fn alloc_slice<T: Default>(&self, len: BlockLoc) -> Result<SliceMutex<T>> {
self._alloc_slice(len, false)
}
pub fn alloc_slice_fast<T: Default>(&self, len: BlockLoc) -> Result<SliceMutex<T>> {
self._alloc_slice(len, true)
}
#[inline]
fn _alloc_slice<T: Default>(&self, len: BlockLoc, fast: bool) -> Result<SliceMutex<T>> {
unsafe {
let actual_size: usize = mem::size_of::<Full>() + mem::size_of::<T>() * len as usize;
let blocks = ceil(actual_size, mem::size_of::<Block>());
if blocks > (*self.raw).len_blocks() as usize {
return Err(Error::InvalidSize);
}
let i = try!((*self.raw).alloc_index(blocks as u16, fast));
let index = (*self.raw).index(i);
let mut p = (*self.raw).data(index.block()) as *mut T;
for _ in 0..len {
*p = T::default();
p = p.offset(1);
}
Ok(SliceMutex {
index: i,
len: len,
pool: self,
_type: PhantomData,
})
}
}
pub fn display(&self) -> DisplayPool {
unsafe { (*self.raw).display() }
}
pub fn clean(&self) {
unsafe { (*self.raw).clean() }
}
pub fn defrag(&self) {
unsafe { (*self.raw).defrag() }
}
pub fn size(&self) -> usize {
unsafe { (*self.raw).size() }
}
pub fn len_indexes(&self) -> IndexLoc {
unsafe { (*self.raw).len_indexes() }
}
}
pub struct Mutex<'a, T> {
index: IndexLoc,
pool: &'a Pool,
_type: PhantomData<T>,
}
impl<'mutex, T> Mutex<'mutex, T> {
pub fn lock<'a>(&'a mut self) -> Value<'a, 'mutex, T> {
unsafe {
let pool = &*self.pool.raw;
let block = pool.index(self.index).block();
let full = pool.full_mut(block);
assert!(!full.is_locked());
full.set_lock();
assert!(full.is_locked());
Value { __lock: self }
}
}
}
impl<'a, T> Drop for Mutex<'a, T> {
fn drop(&mut self) {
unsafe { (*self.pool.raw).dealloc_index(self.index) }
}
}
pub struct Value<'a, 'mutex: 'a, T: 'mutex> {
__lock: &'a Mutex<'mutex, T>,
}
impl<'a, 'mutex: 'a, T: 'mutex> Drop for Value<'a, 'mutex, T> {
fn drop(&mut self) {
unsafe {
let pool = &mut *self.__lock.pool.raw;
let index = pool.index(self.__lock.index);
pool.full_mut(index.block()).clear_lock();
}
}
}
impl<'a, 'mutex: 'a, T: 'mutex> Deref for Value<'a, 'mutex, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe {
let pool = &*self.__lock.pool.raw;
let index = &pool.index(self.__lock.index);
&*(pool.data(index.block()) as *const T)
}
}
}
impl<'a, 'mutex: 'a, T: 'mutex> DerefMut for Value<'a, 'mutex, T> {
fn deref_mut(&mut self) -> &mut T {
unsafe {
let pool = &*self.__lock.pool.raw;
let index = &pool.index(self.__lock.index);
&mut *(pool.data(index.block()) as *mut T)
}
}
}
pub struct SliceMutex<'a, T> {
index: IndexLoc,
pool: &'a Pool,
len: BlockLoc,
_type: PhantomData<T>,
}
impl<'a, T> Drop for SliceMutex<'a, T> {
fn drop(&mut self) {
unsafe { (*self.pool.raw).dealloc_index(self.index) }
}
}
impl<'mutex, T> SliceMutex<'mutex, T> {
pub fn lock<'a>(&'a mut self) -> Slice<'a, 'mutex, T> {
unsafe {
let pool = &*self.pool.raw;
let block = pool.index(self.index).block();
let full = pool.full_mut(block);
assert!(!full.is_locked());
full.set_lock();
assert!(full.is_locked());
Slice { __lock: self }
}
}
}
pub struct Slice<'a, 'mutex: 'a, T: 'mutex> {
__lock: &'a mut SliceMutex<'mutex, T>,
}
impl<'a, 'mutex: 'a, T: 'mutex> Drop for Slice<'a, 'mutex, T> {
fn drop(&mut self) {
unsafe {
let pool = &mut *self.__lock.pool.raw;
let index = pool.index(self.__lock.index);
pool.full_mut(index.block()).clear_lock();
}
}
}
impl<'a, 'mutex: 'a, T: 'mutex> Deref for Slice<'a, 'mutex, T> {
type Target = [T];
fn deref(&self) -> &[T] {
unsafe {
let pool = &*self.__lock.pool.raw;
let index = &pool.index(self.__lock.index);
let t: *const T = mem::transmute(pool.data(index.block()));
slice::from_raw_parts(t, self.__lock.len as usize)
}
}
}
impl<'a, 'mutex: 'a, T: 'mutex> DerefMut for Slice<'a, 'mutex, T> {
fn deref_mut(&mut self) -> &mut [T] {
unsafe {
let pool = &*self.__lock.pool.raw;
let index = &pool.index(self.__lock.index);
let t: *mut T = mem::transmute(pool.data(index.block()));
slice::from_raw_parts_mut(t, self.__lock.len as usize)
}
}
}
#[test]
fn test_alloc() {
let pool = Pool::new(4096, 256, 25).unwrap();
let expected = 0x01010101;
let mut mutex = pool.alloc::<u32>().unwrap();
let mut locked = mutex.lock();
{
let rmut = locked.deref_mut();
*rmut = expected;
}
assert_eq!(locked.deref(), &expected);
let expected2 = -1000;
let mut mutex2 = pool.alloc::<i64>().unwrap();
let mut locked2 = mutex2.lock();
{
let rmut = locked2.deref_mut();
*rmut = expected2;
}
assert_eq!(locked2.deref(), &expected2);
}
#[test]
fn test_alloc_slice() {
let pool = Pool::new(4096 * mem::size_of::<Block>(), 256, 25).unwrap();
let mut mutex = pool.alloc_slice::<u16>(10000).unwrap();
let mut slice = mutex.lock();
{
let rmut = slice.deref_mut();
for n in 0..10000 {
assert_eq!(rmut[n], 0);
rmut[n] = n as u16;
}
}
{
let r = slice.deref_mut();
for n in 0..10000 {
assert_eq!(r[n], n as u16);
}
}
}