use std::marker::PhantomData;
use std::mem;
use std::sync::Arc;
use crate::error::{CoreError, CoreResult, ErrorContext};
#[derive(Clone)]
pub struct ZeroCopyBuffer {
data: Arc<Vec<u8>>,
offset: usize,
len: usize,
}
impl ZeroCopyBuffer {
pub fn new(data: Vec<u8>) -> Self {
let len = data.len();
ZeroCopyBuffer {
data: Arc::new(data),
offset: 0,
len,
}
}
pub fn slice(&self, start: usize, end: usize) -> CoreResult<Self> {
if start > end {
return Err(CoreError::InvalidArgument(ErrorContext::new(format!(
"ZeroCopyBuffer::slice: start ({start}) > end ({end})"
))));
}
if end > self.len {
return Err(CoreError::InvalidArgument(ErrorContext::new(format!(
"ZeroCopyBuffer::slice: end ({end}) > buffer length ({})",
self.len
))));
}
Ok(ZeroCopyBuffer {
data: Arc::clone(&self.data),
offset: self.offset + start,
len: end - start,
})
}
pub fn as_bytes(&self) -> &[u8] {
&self.data[self.offset..self.offset + self.len]
}
pub fn len(&self) -> usize {
self.len
}
pub fn is_empty(&self) -> bool {
self.len == 0
}
pub fn clone_data(&self) -> Vec<u8> {
self.as_bytes().to_vec()
}
pub fn ref_count(&self) -> usize {
Arc::strong_count(&self.data)
}
}
pub struct ZeroCopyView<T: Copy> {
buffer: ZeroCopyBuffer,
_phantom: PhantomData<T>,
}
impl<T: Copy> ZeroCopyView<T> {
pub fn new(buffer: ZeroCopyBuffer) -> CoreResult<Self> {
let size = mem::size_of::<T>();
let align = mem::align_of::<T>();
if size == 0 {
return Ok(ZeroCopyView { buffer, _phantom: PhantomData });
}
if buffer.len() % size != 0 {
return Err(CoreError::InvalidArgument(ErrorContext::new(format!(
"ZeroCopyView: buffer length ({}) is not a multiple of size_of::<T>() ({})",
buffer.len(),
size
))));
}
let ptr = buffer.as_bytes().as_ptr() as usize;
if ptr % align != 0 {
return Err(CoreError::InvalidArgument(ErrorContext::new(format!(
"ZeroCopyView: buffer address (0x{ptr:x}) is not aligned to {align}"
))));
}
Ok(ZeroCopyView { buffer, _phantom: PhantomData })
}
pub fn as_slice(&self) -> &[T] {
let bytes = self.buffer.as_bytes();
let size = mem::size_of::<T>();
if size == 0 {
return &[];
}
let count = bytes.len() / size;
unsafe { std::slice::from_raw_parts(bytes.as_ptr() as *const T, count) }
}
pub fn len(&self) -> usize {
let size = mem::size_of::<T>();
if size == 0 { 0 } else { self.buffer.len() / size }
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn get(&self, idx: usize) -> Option<&T> {
self.as_slice().get(idx)
}
}
pub struct ZeroCopyRingBuffer {
data: Vec<u8>,
read_pos: usize,
write_pos: usize,
capacity: usize,
full: bool,
}
impl ZeroCopyRingBuffer {
pub fn new(capacity: usize) -> Self {
let cap = capacity.max(1);
ZeroCopyRingBuffer {
data: vec![0u8; cap],
read_pos: 0,
write_pos: 0,
capacity: cap,
full: false,
}
}
pub fn available_read(&self) -> usize {
if self.full {
return self.capacity;
}
if self.write_pos >= self.read_pos {
self.write_pos - self.read_pos
} else {
self.capacity - self.read_pos + self.write_pos
}
}
pub fn available_write(&self) -> usize {
self.capacity - self.available_read()
}
pub fn write(&mut self, data: &[u8]) -> usize {
let space = self.available_write();
let n = data.len().min(space);
if n == 0 {
return 0;
}
let first_part = (self.capacity - self.write_pos).min(n);
self.data[self.write_pos..self.write_pos + first_part]
.copy_from_slice(&data[..first_part]);
if first_part < n {
let second_part = n - first_part;
self.data[..second_part].copy_from_slice(&data[first_part..first_part + second_part]);
}
self.write_pos = (self.write_pos + n) % self.capacity;
if self.write_pos == self.read_pos {
self.full = true;
}
n
}
pub fn read(&mut self, len: usize) -> &[u8] {
let avail = self.available_read();
let n = len.min(avail);
if n == 0 {
return &[];
}
let contiguous = (self.capacity - self.read_pos).min(n);
let start = self.read_pos;
self.read_pos = (self.read_pos + n) % self.capacity;
self.full = false;
&self.data[start..start + contiguous]
}
pub fn capacity(&self) -> usize {
self.capacity
}
pub fn clear(&mut self) {
self.read_pos = 0;
self.write_pos = 0;
self.full = false;
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_buffer_basic() {
let data = vec![10u8, 20, 30, 40, 50];
let buf = ZeroCopyBuffer::new(data.clone());
assert_eq!(buf.len(), 5);
assert!(!buf.is_empty());
assert_eq!(buf.as_bytes(), data.as_slice());
}
#[test]
fn test_buffer_slice_no_copy() {
let buf = ZeroCopyBuffer::new(vec![0u8, 1, 2, 3, 4, 5, 6, 7]);
let s1 = buf.slice(2, 5).expect("slice 2..5");
let s2 = buf.slice(0, 4).expect("slice 0..4");
assert_eq!(s1.as_bytes(), &[2, 3, 4]);
assert_eq!(s2.as_bytes(), &[0, 1, 2, 3]);
assert_eq!(buf.ref_count(), 3);
}
#[test]
fn test_buffer_slice_error_cases() {
let buf = ZeroCopyBuffer::new(vec![0u8; 8]);
assert!(buf.slice(5, 3).is_err()); assert!(buf.slice(0, 9).is_err()); assert!(buf.slice(8, 8).is_ok()); }
#[test]
fn test_buffer_clone_data() {
let original = vec![42u8; 16];
let buf = ZeroCopyBuffer::new(original.clone());
let cloned = buf.clone_data();
assert_eq!(cloned, original);
assert_eq!(buf.ref_count(), 1);
}
#[test]
fn test_buffer_empty() {
let buf = ZeroCopyBuffer::new(vec![]);
assert!(buf.is_empty());
assert_eq!(buf.len(), 0);
assert_eq!(buf.as_bytes(), &[] as &[u8]);
}
#[test]
fn test_view_u32() {
let raw: Vec<u8> = (0u32..4).flat_map(|v| v.to_le_bytes()).collect();
let buf = ZeroCopyBuffer::new(raw);
let view: ZeroCopyView<u32> = ZeroCopyView::new(buf).expect("view");
assert_eq!(view.len(), 4);
assert_eq!(view.get(0), Some(&0u32));
assert_eq!(view.get(3), Some(&3u32));
assert_eq!(view.get(4), None);
}
#[test]
fn test_view_bad_length() {
let buf = ZeroCopyBuffer::new(vec![0u8; 5]);
let result: CoreResult<ZeroCopyView<u32>> = ZeroCopyView::new(buf);
assert!(result.is_err());
}
#[test]
fn test_view_slice() {
let raw: Vec<u8> = (0u8..8).collect();
let buf = ZeroCopyBuffer::new(raw);
let view: ZeroCopyView<u8> = ZeroCopyView::new(buf).expect("view");
assert_eq!(view.as_slice(), &[0u8, 1, 2, 3, 4, 5, 6, 7]);
}
#[test]
fn test_ring_buffer_basic_write_read() {
let mut rb = ZeroCopyRingBuffer::new(16);
assert_eq!(rb.available_read(), 0);
assert_eq!(rb.available_write(), 16);
let written = rb.write(b"hello");
assert_eq!(written, 5);
assert_eq!(rb.available_read(), 5);
assert_eq!(rb.available_write(), 11);
let read_bytes = rb.read(5).to_vec();
assert_eq!(read_bytes, b"hello");
assert_eq!(rb.available_read(), 0);
}
#[test]
fn test_ring_buffer_full() {
let mut rb = ZeroCopyRingBuffer::new(4);
assert_eq!(rb.write(b"abcd"), 4); assert_eq!(rb.available_write(), 0);
assert_eq!(rb.write(b"x"), 0); }
#[test]
fn test_ring_buffer_wrap_around() {
let mut rb = ZeroCopyRingBuffer::new(8);
rb.write(b"12345678"); rb.read(6); let w = rb.write(b"ABCDEFGH"); assert!(w <= 8);
}
#[test]
fn test_ring_buffer_clear() {
let mut rb = ZeroCopyRingBuffer::new(8);
rb.write(b"hello");
rb.clear();
assert_eq!(rb.available_read(), 0);
assert_eq!(rb.available_write(), 8);
}
#[test]
fn test_ring_buffer_multiple_rounds() {
let mut rb = ZeroCopyRingBuffer::new(8);
for _ in 0..100 {
let w = rb.write(b"abc");
assert!(w > 0);
let _ = rb.read(w);
}
assert_eq!(rb.available_read(), 0);
}
#[test]
fn test_ring_buffer_partial_read() {
let mut rb = ZeroCopyRingBuffer::new(16);
rb.write(b"hello world");
let chunk = rb.read(5).to_vec();
assert_eq!(chunk, b"hello");
assert_eq!(rb.available_read(), 6);
}
}