use core::fmt;
use core::mem::MaybeUninit;
use crate::allocate::Allocator;
pub struct ReadBuf<'a> {
buf: &'a mut [MaybeUninit<u8>],
filled: usize,
initialized: usize,
}
impl<'a> ReadBuf<'a> {
#[inline]
pub fn new(buf: &'a mut [u8]) -> ReadBuf<'a> {
let initialized = buf.len();
let buf = unsafe { slice_to_uninit_mut(buf) };
ReadBuf {
buf,
filled: 0,
initialized,
}
}
#[inline]
pub fn next_out(&mut self) -> *mut MaybeUninit<u8> {
self.buf[self.filled..].as_mut_ptr()
}
#[inline]
pub fn as_mut_ptr(&mut self) -> *mut MaybeUninit<u8> {
self.buf.as_mut_ptr()
}
#[inline]
pub fn capacity(&self) -> usize {
self.buf.len()
}
#[inline]
pub fn len(&self) -> usize {
self.filled
}
#[inline]
pub fn is_empty(&self) -> bool {
self.filled == 0
}
#[inline]
pub fn filled(&self) -> &[u8] {
let slice = &self.buf[..self.filled];
unsafe { slice_assume_init(slice) }
}
#[inline]
pub unsafe fn inner_mut(&mut self) -> &mut [MaybeUninit<u8>] {
self.buf
}
#[inline]
pub fn remaining(&self) -> usize {
self.capacity() - self.filled
}
#[inline]
pub fn clear(&mut self) {
self.filled = 0;
}
#[inline]
#[track_caller]
pub fn advance(&mut self, n: usize) {
let new = self.filled.checked_add(n).expect("filled overflow");
self.set_filled(new);
}
#[inline]
#[track_caller]
pub fn set_filled(&mut self, n: usize) {
assert!(
n <= self.initialized,
"filled must not become larger than initialized"
);
self.filled = n;
}
#[inline]
pub unsafe fn assume_init(&mut self, n: usize) {
self.initialized = Ord::max(self.initialized, self.filled + n);
}
#[track_caller]
pub fn push(&mut self, byte: u8) {
assert!(
self.remaining() >= 1,
"read_buf is full ({} bytes)",
self.capacity()
);
self.buf[self.filled] = MaybeUninit::new(byte);
self.initialized = Ord::max(self.initialized, self.filled + 1);
self.filled += 1;
}
#[inline(always)]
#[track_caller]
pub fn extend(&mut self, buf: &[u8]) {
assert!(
self.remaining() >= buf.len(),
"buf.len() must fit in remaining()"
);
self.buf[self.filled..][..buf.len()].copy_from_slice(slice_to_uninit(buf));
let end = self.filled + buf.len();
self.initialized = Ord::max(self.initialized, end);
self.filled = end;
}
#[inline(always)]
pub fn copy_match(&mut self, offset_from_end: usize, length: usize) {
#[cfg(target_arch = "x86_64")]
if crate::cpu_features::is_enabled_avx512() {
return self.copy_match_help::<core::arch::x86_64::__m512i>(offset_from_end, length);
}
#[cfg(target_arch = "x86_64")]
if crate::cpu_features::is_enabled_avx2() {
return self.copy_match_help::<core::arch::x86_64::__m256i>(offset_from_end, length);
}
#[cfg(target_arch = "x86_64")]
if crate::cpu_features::is_enabled_sse() {
return self.copy_match_help::<core::arch::x86_64::__m128i>(offset_from_end, length);
}
self.copy_match_help::<u64>(offset_from_end, length)
}
fn copy_match_help<C: Chunk>(&mut self, offset_from_end: usize, length: usize) {
let current = self.filled;
let start = current.checked_sub(offset_from_end).expect("in bounds");
let end = start.checked_add(length).expect("in bounds");
if end > current {
if offset_from_end == 1 {
let element = self.buf[current - 1];
self.buf[current..][..length].fill(element);
} else {
for i in 0..length {
self.buf[current + i] = self.buf[start + i];
}
}
} else {
Self::copy_chunked_within::<C>(self.buf, current, start, end)
}
unsafe { self.assume_init(length) };
self.advance(length);
}
#[inline(always)]
fn copy_chunked_within<C: Chunk>(
buf: &mut [MaybeUninit<u8>],
current: usize,
start: usize,
end: usize,
) {
if (end - start).next_multiple_of(core::mem::size_of::<C>()) <= (buf.len() - current) {
unsafe {
Self::copy_chunk_unchecked::<C>(
buf.as_ptr().add(start),
buf.as_mut_ptr().add(current),
buf.as_ptr().add(end),
)
}
} else {
buf.copy_within(start..end, current);
}
}
#[inline(always)]
unsafe fn copy_chunk_unchecked<C: Chunk>(
mut src: *const MaybeUninit<u8>,
mut dst: *mut MaybeUninit<u8>,
end: *const MaybeUninit<u8>,
) {
while src < end {
let chunk = C::load_chunk(src);
C::store_chunk(dst, chunk);
src = src.add(core::mem::size_of::<C>());
dst = dst.add(core::mem::size_of::<C>());
}
}
pub(crate) fn new_in(alloc: &Allocator<'a>, len: usize) -> Option<Self> {
let buf = alloc.allocate_slice::<u8>(len)?;
Some(Self {
buf,
filled: 0,
initialized: 0,
})
}
pub(crate) fn clone_in(&self, alloc: &Allocator<'a>) -> Option<Self> {
let mut clone = Self::new_in(alloc, self.buf.len())?;
clone.buf.copy_from_slice(self.buf);
clone.filled = self.filled;
clone.initialized = self.initialized;
Some(clone)
}
pub(crate) unsafe fn drop_in(&mut self, alloc: &Allocator<'a>) {
if !self.buf.is_empty() {
let buf = core::mem::take(&mut self.buf);
alloc.deallocate(buf.as_mut_ptr(), buf.len());
}
}
}
impl fmt::Debug for ReadBuf<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ReadBuf")
.field("filled", &self.filled)
.field("initialized", &self.initialized)
.field("capacity", &self.capacity())
.finish()
}
}
fn slice_to_uninit(slice: &[u8]) -> &[MaybeUninit<u8>] {
unsafe { &*(slice as *const [u8] as *const [MaybeUninit<u8>]) }
}
unsafe fn slice_to_uninit_mut(slice: &mut [u8]) -> &mut [MaybeUninit<u8>] {
&mut *(slice as *mut [u8] as *mut [MaybeUninit<u8>])
}
unsafe fn slice_assume_init(slice: &[MaybeUninit<u8>]) -> &[u8] {
&*(slice as *const [MaybeUninit<u8>] as *const [u8])
}
trait Chunk {
unsafe fn load_chunk(from: *const MaybeUninit<u8>) -> Self;
unsafe fn store_chunk(out: *mut MaybeUninit<u8>, chunk: Self);
}
impl Chunk for u64 {
unsafe fn load_chunk(from: *const MaybeUninit<u8>) -> Self {
u64::to_le(core::ptr::read_unaligned(from.cast()))
}
unsafe fn store_chunk(out: *mut MaybeUninit<u8>, chunk: Self) {
core::ptr::copy_nonoverlapping(
chunk.to_le_bytes().as_ptr().cast(),
out,
core::mem::size_of::<Self>(),
)
}
}
#[cfg(target_arch = "x86_64")]
impl Chunk for core::arch::x86_64::__m128i {
#[inline(always)]
unsafe fn load_chunk(from: *const MaybeUninit<u8>) -> Self {
core::arch::x86_64::_mm_loadu_si128(from.cast())
}
#[inline(always)]
unsafe fn store_chunk(out: *mut MaybeUninit<u8>, chunk: Self) {
core::arch::x86_64::_mm_storeu_si128(out as *mut Self, chunk);
}
}
#[cfg(target_arch = "x86_64")]
impl Chunk for core::arch::x86_64::__m256i {
#[inline(always)]
unsafe fn load_chunk(from: *const MaybeUninit<u8>) -> Self {
core::arch::x86_64::_mm256_loadu_si256(from.cast())
}
#[inline(always)]
unsafe fn store_chunk(out: *mut MaybeUninit<u8>, chunk: Self) {
core::arch::x86_64::_mm256_storeu_si256(out as *mut Self, chunk);
}
}
#[cfg(target_arch = "x86_64")]
impl Chunk for core::arch::x86_64::__m512i {
#[inline(always)]
unsafe fn load_chunk(from: *const MaybeUninit<u8>) -> Self {
core::ptr::read_unaligned(from.cast())
}
#[inline(always)]
unsafe fn store_chunk(out: *mut MaybeUninit<u8>, chunk: Self) {
core::ptr::write_unaligned(out.cast(), chunk)
}
}