cloudflare_zlib/
def.rs

1use cloudflare_zlib_sys::{MAX_MEM_LEVEL, Z_BEST_COMPRESSION, Z_BUF_ERROR, Z_DEFAULT_STRATEGY, Z_DEFLATED, Z_FINISH, Z_NO_FLUSH, Z_OK, Z_STREAM_END, deflateEnd, deflateInit2, deflateTune, uInt, z_stream};
2use crate::ZError;
3use std::mem;
4use std::os::raw::c_int;
5use std::sync::atomic::AtomicUsize;
6use std::sync::atomic::Ordering::Relaxed;
7
8/// Compress data to `Vec` using default settings.
9/// Use `Deflate` object if you need to customize compression.
10pub fn deflate(data: &[u8]) -> Result<Vec<u8>, ZError> {
11    let mut stream = Deflate::new_default()?;
12    stream.compress(data)?;
13    stream.finish()
14}
15
16/// Compress
17pub struct Deflate {
18    stream: Box<z_stream>,
19    buf: Vec<u8>,
20}
21
22/// Compress
23impl Deflate {
24    #[inline]
25    pub fn new_default() -> Result<Self, ZError> {
26        Self::new(Z_BEST_COMPRESSION, Z_DEFAULT_STRATEGY, 15)
27    }
28
29    /// Use zlib's magic constants:
30    ///  * level = `Z_BEST_SPEED` (1) to `Z_BEST_COMPRESSION` (9)
31    ///  * strategy = `Z_DEFAULT_STRATEGY`, `Z_FILTERED`, `Z_HUFFMAN_ONLY`, `Z_RLE`, `Z_FIXED`
32    ///  * window_bits = 15
33    #[inline]
34    pub fn new(level: c_int, strategy: c_int, window_bits: c_int) -> Result<Self, ZError> {
35        Self::new_with_vec(level, strategy, window_bits, Vec::with_capacity(1<<16))
36    }
37
38    /// Same as new, but can append to any `Vec`
39    pub fn new_with_vec(level: c_int, strategy: c_int, window_bits: c_int, buf: Vec<u8>) -> Result<Self, ZError> {
40        if !crate::is_supported() {
41            return Err(ZError::IncompatibleCPU);
42        }
43        unsafe {
44            let mut stream: Box<z_stream> = Box::new(mem::zeroed());
45            let res = deflateInit2(
46                &mut *stream,
47                level,
48                Z_DEFLATED,
49                window_bits,
50                MAX_MEM_LEVEL,
51                strategy,
52            );
53            if Z_OK != res {
54                return Err(ZError::new(res));
55            }
56            Ok(Deflate { stream, buf })
57        }
58    }
59
60    /// Change compression parameters to fine tune space-speed tradeoff.
61    #[inline]
62    pub fn tune(&mut self, good_length: i32, max_lazy: i32, nice_length: i32, max_chain: i32) -> Result<(), ZError> {
63        unsafe {
64            match deflateTune(&mut *self.stream, good_length, max_lazy, nice_length, max_chain) {
65                Z_OK => Ok(()),
66                other => Err(ZError::new(other)),
67            }
68        }
69    }
70
71    /// Expect (remaining) data to take this much space after compression
72    #[inline]
73    pub fn reserve(&mut self, compressed_size: usize) {
74        self.buf.reserve(compressed_size);
75    }
76
77    /// Add bytes from `data` to compressed data
78    #[inline]
79    pub fn compress(&mut self, data: &[u8]) -> Result<(), ZError> {
80        self.compress_internal(data, None, false)
81    }
82
83    /// dd bytes from `data` to compressed data, unless the total compressed output would exceed `max_size`
84    #[inline]
85    pub fn compress_with_limit(&mut self, data: &[u8], max_size: &AtomicUsize) -> Result<(), ZError> {
86        self.compress_internal(data, Some(max_size), false)
87    }
88
89    fn compress_internal(&mut self, data: &[u8], max_size: Option<&AtomicUsize>, finish: bool) -> Result<(), ZError> {
90        assert!(data.len() < uInt::max_value() as usize);
91        self.stream.next_in = data.as_ptr() as *mut _;
92        self.stream.avail_in = data.len() as uInt;
93
94        loop {
95            // if we know max size, we don't want to compress or reserve more space than that
96            let total_out_before = self.stream.total_out as usize;
97            let remaining = max_size.map(|max| max.load(Relaxed).saturating_sub(total_out_before));
98            unsafe {
99                // unsafe - this is writing to the _reserved_ length of the vector,
100                // and updating size only after the write.
101                // this way uninitialized memory is never exposed to safe Rust.
102                let len = self.buf.len();
103                let mut avail_out = self.buf.capacity() - len;
104                if let Some(r) = remaining {
105                    avail_out = avail_out.min(r);
106                }
107                self.stream.avail_out = avail_out as uInt;
108                self.stream.next_out = self.buf[len..].as_mut_ptr();
109
110                let res = cloudflare_zlib_sys::deflate(&mut *self.stream, if finish {Z_FINISH} else {Z_NO_FLUSH});
111
112                // extend the vec length by number of bytes written by zlib
113                let total_out_written = self.stream.total_out as usize;
114                if total_out_written > total_out_before {
115                    self.buf.set_len(len + total_out_written - total_out_before);
116                } else {
117                    debug_assert_eq!(total_out_before, self.stream.total_out as usize);
118                }
119
120                match res {
121                    Z_STREAM_END => {
122                        debug_assert_eq!(0, self.stream.avail_in);
123                        return Ok(())
124                    },
125                    Z_OK | Z_BUF_ERROR => {
126                        if !finish && self.stream.avail_in == 0 {
127                            return Ok(());
128                        }
129
130                        // let remaining = max_size.get().map(|max| max.saturating_sub(self.stream.total_out as usize));
131                        // by default doubles the buffer (or 64kb for empty vec)
132                        let mut reserve = self.buf.capacity().max(1<<16);
133
134                        if let Some(rem) = remaining {
135                            if rem == 0 {
136                                return Err(ZError::DeflatedDataTooLarge(total_out_written));
137                            }
138                            reserve = reserve.min(rem);
139                        }
140                        self.buf.reserve(reserve);
141                    },
142                    other => {
143                        return Err(ZError::new(other));
144                    }
145                }
146            }
147        }
148    }
149
150    #[inline]
151    pub fn finish(mut self) -> Result<Vec<u8>, ZError> {
152        self.compress_internal(&[], None, true)?;
153        // it's like option.take(), but cheaper
154        Ok(mem::take(&mut self.buf))
155    }
156}
157
158impl Drop for Deflate {
159    #[inline]
160    fn drop(&mut self) {
161        unsafe {
162            deflateEnd(&mut *self.stream);
163        }
164    }
165}
166
167#[test]
168fn compress_test() {
169    let mut d = Deflate::new(1, 0, 15).unwrap();
170    d.reserve(1);
171    d.compress(b"a").unwrap();
172    d.compress(b"").unwrap();
173    d.compress_with_limit(b"zxcvbnm", &AtomicUsize::new(999)).unwrap();
174    let vec = d.finish().unwrap();
175
176    let res = crate::inf::inflate(&vec).unwrap();
177    assert_eq!(&res, b"azxcvbnm");
178}