1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
#[derive(Debug)]
pub struct RawVec<T> {
    buf: Vec<T>,
}

impl<T> RawVec<T> {
    pub fn with_capacity(cap: usize) -> Self {
        RawVec {
            buf: Vec::with_capacity(cap),
        }
    }

    pub fn ptr(&self) -> *mut T {
        let ptr = self.buf.as_ptr();
        ptr as *mut T
    }
}

use std::cmp;
use std::ptr;
use std::sync::atomic::AtomicPtr;

// size for block_node
pub const BLOCK_SIZE: usize = 1 << BLOCK_SHIFT;
// block mask
pub const BLOCK_MASK: usize = BLOCK_SIZE - 1;
// block shift
pub const BLOCK_SHIFT: usize = 8;

/// a block node contains a bunch of items stored in a array
/// this could make the malloc/free not that frequent, also
/// the array could speed up list operations
pub struct BlockNode<T> {
    pub next: AtomicPtr<BlockNode<T>>,
    // we use RawVec to manage the memory
    // use an array would have it's own drop semantics which is not desired
    data: RawVec<T>,
}

/// we don't implement the block node Drop trait
/// the queue is responsible to drop all the items
/// and would call its get() method for the dropping
impl<T> BlockNode<T> {
    /// create a new BlockNode with uninitialized data
    pub fn new() -> *mut BlockNode<T> {
        Box::into_raw(Box::new(BlockNode {
            next: AtomicPtr::new(ptr::null_mut()),
            data: RawVec::with_capacity(BLOCK_SIZE),
        }))
    }

    /// write index with data
    #[inline]
    pub fn set(&self, index: usize, v: T) {
        unsafe {
            let data = self.data.ptr().offset((index & BLOCK_MASK) as isize);
            ptr::write(data, v);
        }
    }

    /// read out indexed value
    /// this would make the underlying data dropped when it get out of scope
    #[inline]
    pub fn get(&self, index: usize) -> T {
        unsafe {
            let data = self.data.ptr().offset((index & BLOCK_MASK) as isize);
            ptr::read(data)
        }
    }

    /// bulk get until the end of this block
    /// you must make sure that end is not passing the end of this block
    /// use bulk_end() for the end para
    #[inline]
    pub unsafe fn bulk_get<V: Extend<T>>(&self, start: usize, end: usize, vec: &mut V) -> usize {
        let size = end.wrapping_sub(start);
        let mut p_data = self.data.ptr().offset((start & BLOCK_MASK) as isize);
        // vec.reserve(size);
        for _i in 0..size {
            vec.extend(Some(ptr::read(p_data)));
            p_data = p_data.offset(1);
        }
        size
    }
}

/// return the bulk end with in the block
#[inline]
pub fn bulk_end(start: usize, end: usize, mut expect: usize) -> usize {
    if expect == 0 {
        expect = BLOCK_SIZE;
    }
    let size0 = end.wrapping_sub(start);
    let size1 = BLOCK_SIZE - (start & BLOCK_MASK);
    // only pop within a block
    expect = cmp::min(size0, expect);
    expect = cmp::min(size1, expect);
    start.wrapping_add(expect)
}

// impl<T> Index<usize> for BlockNode<T> {
// type Output = T;
//
// read the indexed value
// different from get() that the data would not dropped
// #[inline]
// fn index<'a>(&'a self, index: usize) -> &'a T {
// unsafe { &*self.data.ptr().offset((index & BLOCK_MASK) as isize) }
// }
// }

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn block_node_sanity() {
        let b = unsafe { &*BlockNode::<Box<usize>>::new() };
        for i in 0..BLOCK_SIZE {
            b.set(i, Box::new(i));
            // assert_eq!(i, *b[i]);
            // this would drop the underlying data
            assert_eq!(i, *b.get(i));
        }
    }
}