linked-buffer 0.0.2

Yet another linked buffer implemention.
Documentation
use bytes::{Buf, BufMut};
use std::io::IoSlice;

use crate::{block::BLOCK_CAP, Buffer, BufferMut};

macro_rules! impl_buf {
    ($t:ty) => {
        impl Buf for $t {
            #[inline]
            fn remaining(&self) -> usize {
                self.len()
            }

            #[inline]
            fn chunk(&self) -> &[u8] {
                if self.is_empty() {
                    return &[];
                }

                let begin = self.read_offset;
                let end = BLOCK_CAP.min(self.read_offset + self.len);

                // head must be Some since it's not empty.
                let node = self.head.unwrap();
                if begin == end {
                    // next must be Some since node length is 0 and self length is not 0.
                    let next = unsafe { node.as_ref() }.next.unwrap();
                    &unsafe { next.as_ref().block.as_ref() }.get_buf()[0..self.len.min(BLOCK_CAP)]
                } else {
                    &unsafe { node.as_ref().block.as_ref() }.get_buf()[begin..end]
                }
            }

            #[inline]
            fn advance(&mut self, cnt: usize) {
                self.advance(cnt)
            }

            #[inline]
            fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize {
                // check if is empty
                if self.is_empty() {
                    return 0;
                }

                // current node
                let mut maybe_node = self.head;
                // current block begin index
                let mut block_begin_offset = self.read_offset;
                // move to next block if the first block is empty
                if block_begin_offset == BLOCK_CAP {
                    // unwrap is safe since we checked is_empty()
                    maybe_node = unsafe { maybe_node.unwrap().as_ref() }.next;
                    block_begin_offset = 0;
                }

                let target_offset = block_begin_offset + self.len();
                // how many IoSlice we write
                let mut cnt = 0;
                // index from the first block until now
                let mut current_offset = 0;

                while let Some(node) = maybe_node {
                    if cnt >= dst.len() {
                        // if no space to write, return
                        break;
                    }

                    let block_end_offset = BLOCK_CAP.min(target_offset - current_offset);
                    // if current block is empty, it means it is the end.
                    if block_begin_offset == block_end_offset {
                        break;
                    }

                    // write IoSlice
                    dst[cnt] = IoSlice::new(
                        &unsafe { node.as_ref().block.as_ref() }.get_buf()
                            [block_begin_offset..block_end_offset],
                    );
                    cnt += 1;

                    // update current node, current block begin index, index from the first block until now
                    maybe_node = unsafe { node.as_ref() }.next;
                    block_begin_offset = 0;
                    current_offset += BLOCK_CAP;
                }
                cnt
            }
        }
    };
}

impl_buf!(BufferMut);
impl_buf!(Buffer);

unsafe impl BufMut for BufferMut {
    #[inline]
    fn remaining_mut(&self) -> usize {
        usize::MAX - self.len()
    }

    #[inline]
    unsafe fn advance_mut(&mut self, cnt: usize) {
        self.advance_mut(cnt)
    }

    #[inline]
    fn chunk_mut(&mut self) -> &mut bytes::buf::UninitSlice {
        if self.cap == self.len {
            // grow
            self.reserve_block();
        }
        if self.write_offset == BLOCK_CAP {
            // tail must be Some since when after reserving the capacity must
            // be larger than len. And current node is empty, then next block
            // must exists.
            self.tail = unsafe { self.tail.unwrap().as_ref() }.next;
            self.write_offset = 0;
        }
        // current tail must be Some.
        let slice = &mut unsafe { self.tail.unwrap().as_mut().block.as_mut() }.get_mut_buf()
            [self.write_offset..];
        unsafe { bytes::buf::UninitSlice::from_raw_parts_mut(slice.as_mut_ptr(), slice.len()) }
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn chunk_vectored() {
        const SIZE: usize = (BLOCK_CAP as f32 * 2.5) as _;

        let mut b = BufferMut::with_capacity(BLOCK_CAP * 3);
        unsafe { b.set_len(SIZE) };
        b.advance(10);

        let mut v = Vec::with_capacity(4);
        #[allow(clippy::uninit_vec)]
        unsafe {
            v.set_len(4)
        };
        assert_eq!(b.chunks_vectored(&mut v), 3);
        assert_eq!(v[0].len(), BLOCK_CAP - 10);
        assert_eq!(v[1].len(), BLOCK_CAP);
        assert_eq!(v[2].len(), SIZE - BLOCK_CAP * 2);
    }

    #[test]
    fn empty_chunk_vectored() {
        let mut v = Vec::with_capacity(4);
        #[allow(clippy::uninit_vec)]
        unsafe {
            v.set_len(4)
        };

        // empty
        let b = BufferMut::new();
        assert!(b.is_empty());
        assert_eq!(b.chunks_vectored(&mut v), 0);

        // empty and used
        let mut b = BufferMut::with_capacity(BLOCK_CAP);
        unsafe { b.set_len(BLOCK_CAP) };
        b.advance(BLOCK_CAP);
        assert!(b.is_empty());
        assert_eq!(b.chunks_vectored(&mut v), 0);

        // empty but has more capacity
        let mut b = BufferMut::with_capacity(BLOCK_CAP);
        unsafe { b.set_len(BLOCK_CAP) };
        b.advance(BLOCK_CAP);
        b.reserve_block();
        assert!(b.is_empty());
        assert_eq!(b.chunks_vectored(&mut v), 0);
    }

    #[test]
    fn advance_mut() {
        let mut b = BufferMut::with_capacity(BLOCK_CAP * 2);
        unsafe { b.advance_mut(BLOCK_CAP + 1) };
        assert_eq!(b.write_offset, 1);

        let mut b = BufferMut::with_capacity(BLOCK_CAP * 2);
        unsafe { b.advance_mut(BLOCK_CAP * 2) };
        assert_eq!(b.write_offset, BLOCK_CAP);
    }

    #[test]
    fn chunk_mut() {
        let mut b = BufferMut::new();
        let slice = b.chunk_mut();
        assert_eq!(slice.len(), BLOCK_CAP);

        b.put_u8(1);
        let slice = b.chunk_mut();
        assert_eq!(slice.len(), BLOCK_CAP - 1);
    }

    #[test]
    fn put_get() {
        let mut b = BufferMut::new();
        b.reserve(100);
        b.put_u16(0xabcd);

        assert_eq!(b.len(), 2);
        assert_eq!(b.chunk(), &[0xab, 0xcd]);
        assert_eq!(b.get_u16(), 0xabcd);
        assert_eq!(b.len(), 0);
    }

    #[test]
    fn put_get_across_blocks() {
        let mut b = BufferMut::new();
        while b.len() < BLOCK_CAP - 1 {
            b.put_u8(0xff);
        }
        assert_eq!(b.len(), BLOCK_CAP - 1);
        b.advance(b.len());
        assert!(b.is_empty());

        b.put_u32(0xabcd1234);
        assert_eq!(b.len(), 4);
        assert_eq!(b.get_u32(), 0xabcd1234);
    }

    #[test]
    fn put_get_big() {
        const LOOPS: usize = 100000;
        let mut b = BufferMut::new();
        for _ in 0..LOOPS {
            b.put_u64(0xdeadbeefcafebabe);
        }
        for _ in 0..LOOPS {
            assert_eq!(b.get_u64(), 0xdeadbeefcafebabe);
        }
    }
}