pub(crate) const BITBUF_NBITS: u32 = 63;
#[allow(dead_code)]
pub(crate) const fn can_buffer(n: u32) -> bool {
7 + n <= BITBUF_NBITS
}
pub(crate) struct OutputBitstream<'a> {
pub bitbuf: u64,
pub bitcount: u32,
pub pos: usize,
pub buf: &'a mut [u8],
pub overflow: bool,
}
impl<'a> OutputBitstream<'a> {
pub fn new(buf: &'a mut [u8]) -> Self {
Self {
bitbuf: 0,
bitcount: 0,
pos: 0,
buf,
overflow: false,
}
}
#[inline(always)]
pub fn add_bits(&mut self, bits: u32, n: u32) {
self.bitbuf |= (bits as u64) << self.bitcount;
self.bitcount += n;
debug_assert!(self.bitcount <= BITBUF_NBITS);
}
#[inline(always)]
pub fn flush_bits(&mut self) {
if self.pos + 8 <= self.buf.len() {
crate::fast_bytes::store_u64_le(self.buf, self.pos, self.bitbuf);
self.pos += (self.bitcount >> 3) as usize;
self.bitbuf >>= self.bitcount & !7;
self.bitcount &= 7;
} else {
while self.bitcount >= 8 {
if self.pos < self.buf.len() {
self.buf[self.pos] = self.bitbuf as u8;
self.pos += 1;
self.bitcount -= 8;
self.bitbuf >>= 8;
} else {
self.overflow = true;
return;
}
}
}
}
#[inline(always)]
pub fn write_byte(&mut self, b: u8) {
if self.pos < self.buf.len() {
self.buf[self.pos] = b;
self.pos += 1;
} else {
self.overflow = true;
}
}
#[inline(always)]
pub fn write_le16(&mut self, v: u16) {
if self.pos + 2 <= self.buf.len() {
self.buf[self.pos..self.pos + 2].copy_from_slice(&v.to_le_bytes());
self.pos += 2;
} else {
self.overflow = true;
}
}
#[inline(always)]
#[allow(dead_code)]
pub fn write_le32(&mut self, v: u32) {
if self.pos + 4 <= self.buf.len() {
self.buf[self.pos..self.pos + 4].copy_from_slice(&v.to_le_bytes());
self.pos += 4;
} else {
self.overflow = true;
}
}
#[inline]
pub fn write_bytes(&mut self, data: &[u8]) {
if self.pos + data.len() <= self.buf.len() {
self.buf[self.pos..self.pos + data.len()].copy_from_slice(data);
self.pos += data.len();
} else {
self.overflow = true;
}
}
pub fn remaining(&self) -> usize {
self.buf.len().saturating_sub(self.pos)
}
#[allow(dead_code)]
pub fn fast_end(&self) -> usize {
self.buf.len().saturating_sub(7)
}
}