use cloudflare_zlib_sys::{MAX_MEM_LEVEL, Z_BEST_COMPRESSION, Z_BUF_ERROR, Z_DEFAULT_STRATEGY, Z_DEFLATED, Z_FINISH, Z_NO_FLUSH, Z_OK, Z_STREAM_END, deflateEnd, deflateInit2, deflateTune, uInt, z_stream};
use crate::ZError;
use std::mem;
use std::os::raw::c_int;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::Relaxed;
pub fn deflate(data: &[u8]) -> Result<Vec<u8>, ZError> {
let mut stream = Deflate::new_default()?;
stream.compress(data)?;
stream.finish()
}
pub struct Deflate {
stream: Box<z_stream>,
buf: Vec<u8>,
}
impl Deflate {
#[inline]
pub fn new_default() -> Result<Self, ZError> {
Self::new(Z_BEST_COMPRESSION, Z_DEFAULT_STRATEGY, 15)
}
#[inline]
pub fn new(level: c_int, strategy: c_int, window_bits: c_int) -> Result<Self, ZError> {
Self::new_with_vec(level, strategy, window_bits, Vec::with_capacity(1<<16))
}
pub fn new_with_vec(level: c_int, strategy: c_int, window_bits: c_int, buf: Vec<u8>) -> Result<Self, ZError> {
if !crate::is_supported() {
return Err(ZError::IncompatibleCPU);
}
unsafe {
let mut stream: Box<z_stream> = Box::new(mem::zeroed());
let res = deflateInit2(
&mut *stream,
level,
Z_DEFLATED,
window_bits,
MAX_MEM_LEVEL,
strategy,
);
if Z_OK != res {
return Err(ZError::new(res));
}
Ok(Deflate { stream, buf })
}
}
#[inline]
pub fn tune(&mut self, good_length: i32, max_lazy: i32, nice_length: i32, max_chain: i32) -> Result<(), ZError> {
unsafe {
match deflateTune(&mut *self.stream, good_length, max_lazy, nice_length, max_chain) {
Z_OK => Ok(()),
other => Err(ZError::new(other)),
}
}
}
#[inline]
pub fn reserve(&mut self, compressed_size: usize) {
self.buf.reserve(compressed_size);
}
#[inline]
pub fn compress(&mut self, data: &[u8]) -> Result<(), ZError> {
self.compress_internal(data, None, false)
}
#[inline]
pub fn compress_with_limit(&mut self, data: &[u8], max_size: &AtomicUsize) -> Result<(), ZError> {
self.compress_internal(data, Some(max_size), false)
}
fn compress_internal(&mut self, data: &[u8], max_size: Option<&AtomicUsize>, finish: bool) -> Result<(), ZError> {
assert!(data.len() < uInt::max_value() as usize);
self.stream.next_in = data.as_ptr() as *mut _;
self.stream.avail_in = data.len() as uInt;
loop {
let total_out_before = self.stream.total_out as usize;
let remaining = max_size.map(|max| max.load(Relaxed).saturating_sub(total_out_before));
unsafe {
let len = self.buf.len();
let mut avail_out = self.buf.capacity() - len;
if let Some(r) = remaining {
avail_out = avail_out.min(r);
}
self.stream.avail_out = avail_out as uInt;
self.stream.next_out = self.buf[len..].as_mut_ptr();
let res = cloudflare_zlib_sys::deflate(&mut *self.stream, if finish {Z_FINISH} else {Z_NO_FLUSH});
let total_out_written = self.stream.total_out as usize;
if total_out_written > total_out_before {
self.buf.set_len(len + total_out_written - total_out_before);
} else {
debug_assert_eq!(total_out_before, self.stream.total_out as usize);
}
match res {
Z_STREAM_END => {
debug_assert_eq!(0, self.stream.avail_in);
return Ok(())
},
Z_OK | Z_BUF_ERROR => {
if !finish && self.stream.avail_in == 0 {
return Ok(());
}
let mut reserve = self.buf.capacity().max(1<<16);
if let Some(rem) = remaining {
if rem == 0 {
return Err(ZError::DeflatedDataTooLarge(total_out_written));
}
reserve = reserve.min(rem);
}
self.buf.reserve(reserve);
},
other => {
return Err(ZError::new(other));
}
}
}
}
}
#[inline]
pub fn finish(mut self) -> Result<Vec<u8>, ZError> {
self.compress_internal(&[], None, true)?;
Ok(mem::take(&mut self.buf))
}
}
impl Drop for Deflate {
#[inline]
fn drop(&mut self) {
unsafe {
deflateEnd(&mut *self.stream);
}
}
}
#[test]
fn compress_test() {
let mut d = Deflate::new(1, 0, 15).unwrap();
d.reserve(1);
d.compress(b"a").unwrap();
d.compress(b"").unwrap();
d.compress_with_limit(b"zxcvbnm", &AtomicUsize::new(999)).unwrap();
let vec = d.finish().unwrap();
let res = crate::inf::inflate(&vec).unwrap();
assert_eq!(&res, b"azxcvbnm");
}