use std::{
cmp,
mem::{self, MaybeUninit},
ptr, slice,
};
use super::panic_advance;
pub unsafe trait BtMut {
fn remaining_mut(&self) -> usize;
unsafe fn advance_mut(&mut self, cnt: usize);
fn chunk_mut(&mut self) -> &mut [MaybeUninit<u8>];
fn has_remaining_mut(&self) -> bool {
self.remaining_mut() > 0
}
fn put<T: super::Bt>(&mut self, src: &mut T) -> usize
where
Self: Sized,
{
assert!(self.remaining_mut() >= src.remaining());
let len = src.remaining();
while src.has_remaining() {
let l;
unsafe {
let s = src.chunk();
let d = self.chunk_mut();
l = cmp::min(s.len(), d.len());
ptr::copy_nonoverlapping(s.as_ptr(), d.as_mut_ptr() as *mut u8, l);
}
src.advance(l);
unsafe {
self.advance_mut(l);
}
}
len
}
fn inner_put_slice(&mut self, src: &[u8]) -> usize {
let mut off = 0;
assert!(
self.remaining_mut() >= src.len(),
"buffer overflow; remaining = {}; src = {}",
self.remaining_mut(),
src.len()
);
while off < src.len() {
let cnt;
unsafe {
let dst = self.chunk_mut();
cnt = cmp::min(dst.len(), src.len() - off);
ptr::copy_nonoverlapping(src[off..].as_ptr(), dst.as_mut_ptr() as *mut u8, cnt);
off += cnt;
}
unsafe {
self.advance_mut(cnt);
}
}
src.len()
}
fn put_slice(&mut self, src: &[u8]) -> usize {
self.inner_put_slice(src)
}
fn put_bytes(&mut self, val: u8, cnt: usize) -> usize {
for _ in 0..cnt {
self.put_u8(val);
}
cnt
}
fn put_bool(&mut self, b: bool) -> usize {
let src = [if b { 1 } else { 0 }];
self.put_slice(&src);
1
}
fn put_u8(&mut self, n: u8) -> usize {
let src = [n];
self.put_slice(&src);
1
}
fn put_i8(&mut self, n: i8) -> usize {
let src = [n as u8];
self.put_slice(&src);
1
}
fn put_u16(&mut self, n: u16) -> usize {
self.put_slice(&n.to_be_bytes());
2
}
fn put_u16_le(&mut self, n: u16) -> usize {
self.put_slice(&n.to_le_bytes());
2
}
fn put_u16_ne(&mut self, n: u16) -> usize {
self.put_slice(&n.to_ne_bytes());
2
}
fn put_i16(&mut self, n: i16) -> usize {
self.put_slice(&n.to_be_bytes());
2
}
fn put_i16_le(&mut self, n: i16) -> usize {
self.put_slice(&n.to_le_bytes());
2
}
fn put_i16_ne(&mut self, n: i16) -> usize {
self.put_slice(&n.to_ne_bytes());
2
}
fn put_u32(&mut self, n: u32) -> usize {
self.put_slice(&n.to_be_bytes());
4
}
fn put_u32_le(&mut self, n: u32) -> usize {
self.put_slice(&n.to_le_bytes());
4
}
fn put_u32_ne(&mut self, n: u32) -> usize {
self.put_slice(&n.to_ne_bytes());
4
}
fn put_i32(&mut self, n: i32) -> usize {
self.put_slice(&n.to_be_bytes());
4
}
fn put_i32_le(&mut self, n: i32) -> usize {
self.put_slice(&n.to_le_bytes());
4
}
fn put_i32_ne(&mut self, n: i32) -> usize {
self.put_slice(&n.to_ne_bytes());
4
}
fn put_u64(&mut self, n: u64) -> usize {
self.put_slice(&n.to_be_bytes());
8
}
fn put_u64_le(&mut self, n: u64) -> usize {
self.put_slice(&n.to_le_bytes());
8
}
fn put_u64_ne(&mut self, n: u64) -> usize {
self.put_slice(&n.to_ne_bytes());
8
}
fn put_i64(&mut self, n: i64) -> usize {
self.put_slice(&n.to_be_bytes());
8
}
fn put_i64_le(&mut self, n: i64) -> usize {
self.put_slice(&n.to_le_bytes());
8
}
fn put_i64_ne(&mut self, n: i64) -> usize {
self.put_slice(&n.to_ne_bytes());
8
}
fn put_u128(&mut self, n: u128) -> usize {
self.put_slice(&n.to_be_bytes());
16
}
fn put_u128_le(&mut self, n: u128) -> usize {
self.put_slice(&n.to_le_bytes());
16
}
fn put_u128_ne(&mut self, n: u128) -> usize {
self.put_slice(&n.to_ne_bytes());
16
}
fn put_i128(&mut self, n: i128) -> usize {
self.put_slice(&n.to_be_bytes());
16
}
fn put_i128_le(&mut self, n: i128) -> usize {
self.put_slice(&n.to_le_bytes());
16
}
fn put_i128_ne(&mut self, n: i128) -> usize {
self.put_slice(&n.to_ne_bytes());
16
}
fn put_uint(&mut self, n: u64, nbytes: usize) -> usize {
self.put_slice(&n.to_be_bytes()[mem::size_of_val(&n) - nbytes..]);
nbytes
}
fn put_uint_le(&mut self, n: u64, nbytes: usize) -> usize {
self.put_slice(&n.to_le_bytes()[0..nbytes]);
nbytes
}
fn put_uint_ne(&mut self, n: u64, nbytes: usize) -> usize {
if cfg!(target_endian = "big") {
self.put_uint(n, nbytes)
} else {
self.put_uint_le(n, nbytes)
}
}
fn put_int(&mut self, n: i64, nbytes: usize) -> usize {
self.put_slice(&n.to_be_bytes()[mem::size_of_val(&n) - nbytes..]);
nbytes
}
fn put_int_le(&mut self, n: i64, nbytes: usize) -> usize {
self.put_slice(&n.to_le_bytes()[0..nbytes]);
nbytes
}
fn put_int_ne(&mut self, n: i64, nbytes: usize) -> usize {
if cfg!(target_endian = "big") {
self.put_int(n, nbytes)
} else {
self.put_int_le(n, nbytes)
}
}
fn put_f32(&mut self, n: f32) {
self.put_u32(n.to_bits());
}
fn put_f32_le(&mut self, n: f32) -> usize {
self.put_u32_le(n.to_bits());
4
}
fn put_f32_ne(&mut self, n: f32) -> usize {
self.put_u32_ne(n.to_bits());
4
}
fn put_f64(&mut self, n: f64) -> usize {
self.put_u64(n.to_bits());
8
}
fn put_f64_le(&mut self, n: f64) -> usize {
self.put_u64_le(n.to_bits());
8
}
fn put_f64_ne(&mut self, n: f64) -> usize {
self.put_u64_ne(n.to_bits());
8
}
}
unsafe impl BtMut for Vec<u8> {
#[inline]
fn remaining_mut(&self) -> usize {
core::isize::MAX as usize - self.len()
}
#[inline]
unsafe fn advance_mut(&mut self, cnt: usize) {
let len = self.len();
let remaining = self.capacity() - len;
if remaining < cnt {
panic_advance(cnt, remaining);
}
self.set_len(len + cnt);
}
#[inline]
fn chunk_mut(&mut self) -> &mut [MaybeUninit<u8>] {
if self.capacity() == self.len() {
self.reserve(64); }
let cap = self.capacity();
let len = self.len();
let ptr = self.as_mut_ptr();
unsafe {
slice::from_raw_parts_mut(
ptr.add(len) as *mut MaybeUninit<u8>,
cap - len,
)
}
}
}
unsafe impl<T: BtMut> BtMut for &mut T {
fn remaining_mut(&self) -> usize {
T::remaining_mut(self)
}
unsafe fn advance_mut(&mut self, cnt: usize) {
T::advance_mut(self, cnt)
}
fn chunk_mut(&mut self) -> &mut [MaybeUninit<u8>] {
T::chunk_mut(self)
}
}