Skip to main content

ai_flate2/
mem.rs

1use alloc::vec::Vec;
2use core::error::Error;
3use core::fmt;
4use core::mem::MaybeUninit;
5
6use crate::ffi::{self, Backend, Deflate, DeflateBackend, ErrorMessage, Inflate, InflateBackend};
7use crate::Compression;
8
9/// Raw in-memory compression stream for blocks of data.
10///
11/// This type is the building block for the I/O streams in the rest of this
12/// crate. It requires more management than the [`Read`]/[`Write`] API but is
13/// maximally flexible in terms of accepting input from any source and being
14/// able to produce output to any memory location.
15///
16/// It is recommended to use the I/O stream adaptors over this type as they're
17/// easier to use.
18///
19/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html
20/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html
21#[derive(Debug)]
22pub struct Compress {
23    inner: Deflate,
24}
25
26/// Raw in-memory decompression stream for blocks of data.
27///
28/// This type is the building block for the I/O streams in the rest of this
29/// crate. It requires more management than the [`Read`]/[`Write`] API but is
30/// maximally flexible in terms of accepting input from any source and being
31/// able to produce output to any memory location.
32///
33/// It is recommended to use the I/O stream adaptors over this type as they're
34/// easier to use.
35///
36/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html
37/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html
38#[derive(Debug)]
39pub struct Decompress {
40    inner: Inflate,
41}
42
43/// Values which indicate the form of flushing to be used when compressing
44/// in-memory data.
45#[derive(Copy, Clone, PartialEq, Eq, Debug)]
46#[non_exhaustive]
47#[allow(clippy::unnecessary_cast)]
48pub enum FlushCompress {
49    /// A typical parameter for passing to compression/decompression functions,
50    /// this indicates that the underlying stream to decide how much data to
51    /// accumulate before producing output in order to maximize compression.
52    None = ffi::MZ_NO_FLUSH as isize,
53
54    /// All pending output is flushed to the output buffer, but the output is
55    /// not aligned to a byte boundary.
56    ///
57    /// All input data so far will be available to the decompressor (as with
58    /// `Flush::Sync`). This completes the current deflate block and follows it
59    /// with an empty fixed codes block that is 10 bits long, and it assures
60    /// that enough bytes are output in order for the decompressor to finish the
61    /// block before the empty fixed code block.
62    Partial = ffi::MZ_PARTIAL_FLUSH as isize,
63
64    /// All pending output is flushed to the output buffer and the output is
65    /// aligned on a byte boundary so that the decompressor can get all input
66    /// data available so far.
67    ///
68    /// Flushing may degrade compression for some compression algorithms and so
69    /// it should only be used when necessary. This will complete the current
70    /// deflate block and follow it with an empty stored block.
71    Sync = ffi::MZ_SYNC_FLUSH as isize,
72
73    /// All output is flushed as with `Flush::Sync` and the compression state is
74    /// reset so decompression can restart from this point if previous
75    /// compressed data has been damaged or if random access is desired.
76    ///
77    /// Using this option too often can seriously degrade compression.
78    Full = ffi::MZ_FULL_FLUSH as isize,
79
80    /// Pending input is processed and pending output is flushed.
81    ///
82    /// The return value may indicate that the stream is not yet done and more
83    /// data has yet to be processed.
84    Finish = ffi::MZ_FINISH as isize,
85}
86
87/// Values which indicate the form of flushing to be used when
88/// decompressing in-memory data.
89#[derive(Copy, Clone, PartialEq, Eq, Debug)]
90#[non_exhaustive]
91#[allow(clippy::unnecessary_cast)]
92pub enum FlushDecompress {
93    /// A typical parameter for passing to compression/decompression functions,
94    /// this indicates that the underlying stream to decide how much data to
95    /// accumulate before producing output in order to maximize compression.
96    None = ffi::MZ_NO_FLUSH as isize,
97
98    /// All pending output is flushed to the output buffer and the output is
99    /// aligned on a byte boundary so that the decompressor can get all input
100    /// data available so far.
101    ///
102    /// Flushing may degrade compression for some compression algorithms and so
103    /// it should only be used when necessary. This will complete the current
104    /// deflate block and follow it with an empty stored block.
105    Sync = ffi::MZ_SYNC_FLUSH as isize,
106
107    /// Pending input is processed and pending output is flushed.
108    ///
109    /// The return value may indicate that the stream is not yet done and more
110    /// data has yet to be processed.
111    Finish = ffi::MZ_FINISH as isize,
112}
113
114/// The inner state for an error when decompressing
115#[derive(Clone, Debug)]
116pub(crate) enum DecompressErrorInner {
117    General { msg: ErrorMessage },
118    NeedsDictionary(u32),
119}
120
121/// Error returned when a decompression object finds that the input stream of
122/// bytes was not a valid input stream of bytes.
123#[derive(Clone, Debug)]
124pub struct DecompressError(pub(crate) DecompressErrorInner);
125
126impl DecompressError {
127    /// Indicates whether decompression failed due to requiring a dictionary.
128    ///
129    /// The resulting integer is the Adler-32 checksum of the dictionary
130    /// required.
131    pub fn needs_dictionary(&self) -> Option<u32> {
132        match self.0 {
133            DecompressErrorInner::NeedsDictionary(adler) => Some(adler),
134            _ => None,
135        }
136    }
137}
138
139#[inline]
140pub(crate) fn decompress_failed<T>(msg: ErrorMessage) -> Result<T, DecompressError> {
141    Err(DecompressError(DecompressErrorInner::General { msg }))
142}
143
144#[inline]
145pub(crate) fn decompress_need_dict<T>(adler: u32) -> Result<T, DecompressError> {
146    Err(DecompressError(DecompressErrorInner::NeedsDictionary(
147        adler,
148    )))
149}
150
151/// Error returned when a compression object is used incorrectly or otherwise
152/// generates an error.
153#[derive(Clone, Debug)]
154pub struct CompressError {
155    pub(crate) msg: ErrorMessage,
156}
157
158#[inline]
159pub(crate) fn compress_failed<T>(msg: ErrorMessage) -> Result<T, CompressError> {
160    Err(CompressError { msg })
161}
162
163/// Possible status results of compressing some data or successfully
164/// decompressing a block of data.
165#[derive(Copy, Clone, PartialEq, Eq, Debug)]
166pub enum Status {
167    /// Indicates success.
168    ///
169    /// Means that more input may be needed but isn't available
170    /// and/or there's more output to be written but the output buffer is full.
171    Ok,
172
173    /// Indicates that forward progress is not possible due to input or output
174    /// buffers being empty.
175    ///
176    /// For compression it means the input buffer needs some more data or the
177    /// output buffer needs to be freed up before trying again.
178    ///
179    /// For decompression this means that more input is needed to continue or
180    /// the output buffer isn't large enough to contain the result. The function
181    /// can be called again after fixing both.
182    BufError,
183
184    /// Indicates that all input has been consumed and all output bytes have
185    /// been written. Decompression/compression should not be called again.
186    ///
187    /// For decompression with zlib streams the adler-32 of the decompressed
188    /// data has also been verified.
189    StreamEnd,
190}
191
192impl Compress {
193    /// Creates a new object ready for compressing data that it's given.
194    ///
195    /// The `level` argument here indicates what level of compression is going
196    /// to be performed, and the `zlib_header` argument indicates whether the
197    /// output data should have a zlib header or not.
198    pub fn new(level: Compression, zlib_header: bool) -> Compress {
199        Compress {
200            inner: Deflate::make(level, zlib_header, ffi::MZ_DEFAULT_WINDOW_BITS as u8),
201        }
202    }
203
204    /// Creates a new object ready for compressing data that it's given.
205    ///
206    /// The `level` argument here indicates what level of compression is going
207    /// to be performed, and the `zlib_header` argument indicates whether the
208    /// output data should have a zlib header or not. The `window_bits` parameter
209    /// indicates the base-2 logarithm of the sliding window size and must be
210    /// between 9 and 15.
211    ///
212    /// # Panics
213    ///
214    /// If `window_bits` does not fall into the range 9 ..= 15,
215    /// this function will panic.
216    #[cfg(feature = "any_zlib")]
217    pub fn new_with_window_bits(
218        level: Compression,
219        zlib_header: bool,
220        window_bits: u8,
221    ) -> Compress {
222        assert!(
223            window_bits > 8 && window_bits < 16,
224            "window_bits must be within 9 ..= 15"
225        );
226        Compress {
227            inner: Deflate::make(level, zlib_header, window_bits),
228        }
229    }
230
231    /// Creates a new object ready for compressing data that it's given.
232    ///
233    /// The `level` argument here indicates what level of compression is going
234    /// to be performed.
235    ///
236    /// The Compress object produced by this constructor outputs gzip headers
237    /// for the compressed data.
238    ///
239    /// # Panics
240    ///
241    /// If `window_bits` does not fall into the range 9 ..= 15,
242    /// this function will panic.
243    #[cfg(feature = "any_zlib")]
244    pub fn new_gzip(level: Compression, window_bits: u8) -> Compress {
245        assert!(
246            window_bits > 8 && window_bits < 16,
247            "window_bits must be within 9 ..= 15"
248        );
249        Compress {
250            inner: Deflate::make(level, true, window_bits + 16),
251        }
252    }
253
254    /// Returns the total number of input bytes which have been processed by
255    /// this compression object.
256    pub fn total_in(&self) -> u64 {
257        self.inner.total_in()
258    }
259
260    /// Returns the total number of output bytes which have been produced by
261    /// this compression object.
262    pub fn total_out(&self) -> u64 {
263        self.inner.total_out()
264    }
265
266    /// Specifies the compression dictionary to use.
267    ///
268    /// Returns the Adler-32 checksum of the dictionary.
269    #[cfg(feature = "any_c_zlib")]
270    pub fn set_dictionary(&mut self, dictionary: &[u8]) -> Result<u32, CompressError> {
271        // SAFETY: The field `inner` must always be accessed as a raw pointer,
272        // since it points to a cyclic structure. No copies of `inner` can be
273        // retained for longer than the lifetime of `self.inner.inner.stream_wrapper`.
274        let stream = self.inner.inner.stream_wrapper.inner;
275        let rc = unsafe {
276            (*stream).msg = core::ptr::null_mut();
277            assert!(dictionary.len() < ffi::uInt::MAX as usize);
278            ffi::deflateSetDictionary(stream, dictionary.as_ptr(), dictionary.len() as ffi::uInt)
279        };
280
281        match rc {
282            ffi::MZ_STREAM_ERROR => compress_failed(self.inner.inner.msg()),
283            #[allow(clippy::unnecessary_cast)]
284            ffi::MZ_OK => Ok(unsafe { (*stream).adler } as u32),
285            c => panic!("unknown return code: {}", c),
286        }
287    }
288
289    /// Specifies the compression dictionary to use.
290    ///
291    /// Returns the Adler-32 checksum of the dictionary.
292    #[cfg(all(not(feature = "any_c_zlib"), feature = "zlib-rs"))]
293    pub fn set_dictionary(&mut self, dictionary: &[u8]) -> Result<u32, CompressError> {
294        self.inner.set_dictionary(dictionary)
295    }
296
297    /// Quickly resets this compressor without having to reallocate anything.
298    ///
299    /// This is equivalent to dropping this object and then creating a new one.
300    pub fn reset(&mut self) {
301        self.inner.reset();
302    }
303
304    /// Dynamically updates the compression level.
305    ///
306    /// This can be used to switch between compression levels for different
307    /// kinds of data, or it can be used in conjunction with a call to reset
308    /// to reuse the compressor.
309    ///
310    /// This may return an error if there wasn't enough output space to complete
311    /// the compression of the available input data before changing the
312    /// compression level. Flushing the stream before calling this method
313    /// ensures that the function will succeed on the first call.
314    #[cfg(feature = "any_zlib")]
315    pub fn set_level(&mut self, level: Compression) -> Result<(), CompressError> {
316        #[cfg(all(not(feature = "any_c_zlib"), feature = "zlib-rs"))]
317        {
318            self.inner.set_level(level)
319        }
320
321        #[cfg(feature = "any_c_zlib")]
322        {
323            use core::ffi::c_int;
324            // SAFETY: The field `inner` must always be accessed as a raw pointer,
325            // since it points to a cyclic structure. No copies of `inner` can be
326            // retained for longer than the lifetime of `self.inner.inner.stream_wrapper`.
327            let stream = self.inner.inner.stream_wrapper.inner;
328            unsafe {
329                (*stream).msg = core::ptr::null_mut();
330            }
331            let rc =
332                unsafe { ffi::deflateParams(stream, level.0 as c_int, ffi::MZ_DEFAULT_STRATEGY) };
333
334            match rc {
335                ffi::MZ_OK => Ok(()),
336                ffi::MZ_BUF_ERROR => compress_failed(self.inner.inner.msg()),
337                c => panic!("unknown return code: {}", c),
338            }
339        }
340    }
341
342    /// Compresses the input data into the output, consuming only as much
343    /// input as needed and writing as much output as possible.
344    ///
345    /// The flush option can be any of the available `FlushCompress` parameters.
346    ///
347    /// To learn how much data was consumed or how much output was produced, use
348    /// the `total_in` and `total_out` functions before/after this is called.
349    pub fn compress(
350        &mut self,
351        input: &[u8],
352        output: &mut [u8],
353        flush: FlushCompress,
354    ) -> Result<Status, CompressError> {
355        self.inner.compress(input, output, flush)
356    }
357
358    /// Similar to [`Self::compress`] but accepts uninitialized buffer.
359    ///
360    /// If you want to avoid the overhead of zero initializing the
361    /// buffer and you don't want to use a [`Vec`], then please use
362    /// this API.
363    pub fn compress_uninit(
364        &mut self,
365        input: &[u8],
366        output: &mut [MaybeUninit<u8>],
367        flush: FlushCompress,
368    ) -> Result<Status, CompressError> {
369        self.inner.compress_uninit(input, output, flush)
370    }
371
372    /// Compresses the input data into the extra space of the output, consuming
373    /// only as much input as needed and writing as much output as possible.
374    ///
375    /// This function has the same semantics as `compress`, except that the
376    /// length of `vec` is managed by this function. This will not reallocate
377    /// the vector provided or attempt to grow it, so space for the output must
378    /// be reserved in the output vector by the caller before calling this
379    /// function.
380    pub fn compress_vec(
381        &mut self,
382        input: &[u8],
383        output: &mut Vec<u8>,
384        flush: FlushCompress,
385    ) -> Result<Status, CompressError> {
386        // SAFETY: bytes_written is the number of bytes written into `out`
387        unsafe {
388            write_to_spare_capacity_of_vec(output, |out| {
389                let before = self.total_out();
390                let ret = self.compress_uninit(input, out, flush);
391                let bytes_written = self.total_out() - before;
392                (bytes_written as usize, ret)
393            })
394        }
395    }
396}
397
398impl Decompress {
399    /// Creates a new object ready for decompressing data that it's given.
400    ///
401    /// The `zlib_header` argument indicates whether the input data is expected
402    /// to have a zlib header or not.
403    pub fn new(zlib_header: bool) -> Decompress {
404        Decompress {
405            inner: Inflate::make(zlib_header, ffi::MZ_DEFAULT_WINDOW_BITS as u8),
406        }
407    }
408
409    /// Creates a new object ready for decompressing data that it's given.
410    ///
411    /// The `zlib_header` argument indicates whether the input data is expected
412    /// to have a zlib header or not. The `window_bits` parameter indicates the
413    /// base-2 logarithm of the sliding window size and must be between 9 and 15.
414    ///
415    /// # Panics
416    ///
417    /// If `window_bits` does not fall into the range 9 ..= 15,
418    /// this function will panic.
419    #[cfg(feature = "any_zlib")]
420    pub fn new_with_window_bits(zlib_header: bool, window_bits: u8) -> Decompress {
421        assert!(
422            window_bits > 8 && window_bits < 16,
423            "window_bits must be within 9 ..= 15"
424        );
425        Decompress {
426            inner: Inflate::make(zlib_header, window_bits),
427        }
428    }
429
430    /// Creates a new object ready for decompressing data that it's given.
431    ///
432    /// The Decompress object produced by this constructor expects gzip headers
433    /// for the compressed data.
434    ///
435    /// # Panics
436    ///
437    /// If `window_bits` does not fall into the range 9 ..= 15,
438    /// this function will panic.
439    #[cfg(feature = "any_zlib")]
440    pub fn new_gzip(window_bits: u8) -> Decompress {
441        assert!(
442            window_bits > 8 && window_bits < 16,
443            "window_bits must be within 9 ..= 15"
444        );
445        Decompress {
446            inner: Inflate::make(true, window_bits + 16),
447        }
448    }
449
450    /// Returns the total number of input bytes which have been processed by
451    /// this decompression object.
452    pub fn total_in(&self) -> u64 {
453        self.inner.total_in()
454    }
455
456    /// Returns the total number of output bytes which have been produced by
457    /// this decompression object.
458    pub fn total_out(&self) -> u64 {
459        self.inner.total_out()
460    }
461
462    /// Decompresses the input data into the output, consuming only as much
463    /// input as needed and writing as much output as possible.
464    ///
465    /// The flush option can be any of the available `FlushDecompress` parameters.
466    ///
467    /// If the first call passes `FlushDecompress::Finish` it is assumed that
468    /// the input and output buffers are both sized large enough to decompress
469    /// the entire stream in a single call.
470    ///
471    /// A flush value of `FlushDecompress::Finish` indicates that there are no
472    /// more source bytes available beside what's already in the input buffer,
473    /// and the output buffer is large enough to hold the rest of the
474    /// decompressed data.
475    ///
476    /// To learn how much data was consumed or how much output was produced, use
477    /// the `total_in` and `total_out` functions before/after this is called.
478    ///
479    /// # Errors
480    ///
481    /// If the input data to this instance of `Decompress` is not a valid
482    /// zlib/deflate stream then this function may return an instance of
483    /// `DecompressError` to indicate that the stream of input bytes is corrupted.
484    pub fn decompress(
485        &mut self,
486        input: &[u8],
487        output: &mut [u8],
488        flush: FlushDecompress,
489    ) -> Result<Status, DecompressError> {
490        self.inner.decompress(input, output, flush)
491    }
492
493    /// Similar to [`Self::decompress`] but accepts uninitialized buffer
494    ///
495    /// If you want to avoid the overhead of zero initializing the
496    /// buffer and you don't want to use a [`Vec`], then please use
497    /// this API.
498    pub fn decompress_uninit(
499        &mut self,
500        input: &[u8],
501        output: &mut [MaybeUninit<u8>],
502        flush: FlushDecompress,
503    ) -> Result<Status, DecompressError> {
504        self.inner.decompress_uninit(input, output, flush)
505    }
506
507    /// Decompresses the input data into the extra space in the output vector
508    /// specified by `output`.
509    ///
510    /// This function has the same semantics as `decompress`, except that the
511    /// length of `vec` is managed by this function. This will not reallocate
512    /// the vector provided or attempt to grow it, so space for the output must
513    /// be reserved in the output vector by the caller before calling this
514    /// function.
515    ///
516    /// # Errors
517    ///
518    /// If the input data to this instance of `Decompress` is not a valid
519    /// zlib/deflate stream then this function may return an instance of
520    /// `DecompressError` to indicate that the stream of input bytes is corrupted.
521    pub fn decompress_vec(
522        &mut self,
523        input: &[u8],
524        output: &mut Vec<u8>,
525        flush: FlushDecompress,
526    ) -> Result<Status, DecompressError> {
527        // SAFETY: bytes_written is the number of bytes written into `out`
528        unsafe {
529            write_to_spare_capacity_of_vec(output, |out| {
530                let before = self.total_out();
531                let ret = self.decompress_uninit(input, out, flush);
532                let bytes_written = self.total_out() - before;
533                (bytes_written as usize, ret)
534            })
535        }
536    }
537
538    /// Specifies the decompression dictionary to use.
539    #[cfg(feature = "any_c_zlib")]
540    pub fn set_dictionary(&mut self, dictionary: &[u8]) -> Result<u32, DecompressError> {
541        // SAFETY: The field `inner` must always be accessed as a raw pointer,
542        // since it points to a cyclic structure. No copies of `inner` can be
543        // retained for longer than the lifetime of `self.inner.inner.stream_wrapper`.
544        let stream = self.inner.inner.stream_wrapper.inner;
545        let rc = unsafe {
546            (*stream).msg = core::ptr::null_mut();
547            assert!(dictionary.len() < ffi::uInt::MAX as usize);
548            ffi::inflateSetDictionary(stream, dictionary.as_ptr(), dictionary.len() as ffi::uInt)
549        };
550
551        #[allow(clippy::unnecessary_cast)]
552        match rc {
553            ffi::MZ_STREAM_ERROR => decompress_failed(self.inner.inner.msg()),
554            ffi::MZ_DATA_ERROR => decompress_need_dict(unsafe { (*stream).adler } as u32),
555            ffi::MZ_OK => Ok(unsafe { (*stream).adler } as u32),
556            c => panic!("unknown return code: {}", c),
557        }
558    }
559
560    /// Specifies the decompression dictionary to use.
561    #[cfg(all(not(feature = "any_c_zlib"), feature = "zlib-rs"))]
562    pub fn set_dictionary(&mut self, dictionary: &[u8]) -> Result<u32, DecompressError> {
563        self.inner.set_dictionary(dictionary)
564    }
565
566    /// Performs the equivalent of replacing this decompression state with a
567    /// freshly allocated copy.
568    ///
569    /// This function may not allocate memory, though, and attempts to reuse any
570    /// previously existing resources.
571    ///
572    /// The argument provided here indicates whether the reset state will
573    /// attempt to decode a zlib header first or not.
574    pub fn reset(&mut self, zlib_header: bool) {
575        self.inner.reset(zlib_header);
576    }
577}
578
579impl Error for DecompressError {}
580
581impl DecompressError {
582    /// Retrieve the implementation's message about why the operation failed, if one exists.
583    pub fn message(&self) -> Option<&str> {
584        match &self.0 {
585            DecompressErrorInner::General { msg } => msg.get(),
586            _ => None,
587        }
588    }
589}
590
591impl From<DecompressError> for crate::io::Error {
592    fn from(_data: DecompressError) -> crate::io::Error {
593        #[cfg(feature = "std")]
594        {
595            crate::io::Error::other(_data)
596        }
597        #[cfg(not(feature = "std"))]
598        {
599            crate::io::Error::new(crate::io::ErrorKind::Other, "deflate decompression error")
600        }
601    }
602}
603
604impl fmt::Display for DecompressError {
605    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
606        let msg = match &self.0 {
607            DecompressErrorInner::General { msg } => msg.get(),
608            DecompressErrorInner::NeedsDictionary { .. } => Some("requires a dictionary"),
609        };
610        match msg {
611            Some(msg) => write!(f, "deflate decompression error: {msg}"),
612            None => write!(f, "deflate decompression error"),
613        }
614    }
615}
616
617impl Error for CompressError {}
618
619impl CompressError {
620    /// Retrieve the implementation's message about why the operation failed, if one exists.
621    pub fn message(&self) -> Option<&str> {
622        self.msg.get()
623    }
624}
625
626impl From<CompressError> for crate::io::Error {
627    fn from(_data: CompressError) -> crate::io::Error {
628        #[cfg(feature = "std")]
629        {
630            crate::io::Error::other(_data)
631        }
632        #[cfg(not(feature = "std"))]
633        {
634            crate::io::Error::new(crate::io::ErrorKind::Other, "deflate compression error")
635        }
636    }
637}
638
639impl fmt::Display for CompressError {
640    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
641        match self.msg.get() {
642            Some(msg) => write!(f, "deflate compression error: {msg}"),
643            None => write!(f, "deflate compression error"),
644        }
645    }
646}
647
648/// Allows `writer` to write data into the spare capacity of the `output` vector.
649/// This will not reallocate the vector provided or attempt to grow it, so space
650/// for the `output` must be reserved by the caller before calling this
651/// function.
652///
653/// `writer` needs to return the number of bytes written (and can also return
654/// another arbitrary return value).
655///
656/// # Safety:
657///
658/// The length returned by the `writer` must be equal to actual number of bytes written
659/// to the uninitialized slice passed in and initialized.
660unsafe fn write_to_spare_capacity_of_vec<T>(
661    output: &mut Vec<u8>,
662    writer: impl FnOnce(&mut [MaybeUninit<u8>]) -> (usize, T),
663) -> T {
664    let cap = output.capacity();
665    let len = output.len();
666
667    let (bytes_written, ret) = writer(output.spare_capacity_mut());
668    output.set_len(cap.min(len + bytes_written)); // Sanitizes `bytes_written`.
669
670    ret
671}
672
673#[cfg(all(test, feature = "std"))]
674mod tests {
675    use std::io::Write;
676
677    use crate::write;
678    use crate::{Compression, Decompress, FlushDecompress};
679
680    use crate::{Compress, FlushCompress};
681
682    #[test]
683    fn issue51() {
684        let data = [
685            0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xb3, 0xc9, 0x28, 0xc9,
686            0xcd, 0xb1, 0xe3, 0xe5, 0xb2, 0xc9, 0x48, 0x4d, 0x4c, 0xb1, 0xb3, 0x29, 0xc9, 0x2c,
687            0xc9, 0x49, 0xb5, 0x33, 0x31, 0x30, 0x51, 0xf0, 0xcb, 0x2f, 0x51, 0x70, 0xcb, 0x2f,
688            0xcd, 0x4b, 0xb1, 0xd1, 0x87, 0x08, 0xda, 0xe8, 0x83, 0x95, 0x00, 0x95, 0x26, 0xe5,
689            0xa7, 0x54, 0x2a, 0x24, 0xa5, 0x27, 0xe7, 0xe7, 0xe4, 0x17, 0xd9, 0x2a, 0x95, 0x67,
690            0x64, 0x96, 0xa4, 0x2a, 0x81, 0x8c, 0x48, 0x4e, 0xcd, 0x2b, 0x49, 0x2d, 0xb2, 0xb3,
691            0xc9, 0x30, 0x44, 0x37, 0x01, 0x28, 0x62, 0xa3, 0x0f, 0x95, 0x06, 0xd9, 0x05, 0x54,
692            0x04, 0xe5, 0xe5, 0xa5, 0x67, 0xe6, 0x55, 0xe8, 0x1b, 0xea, 0x99, 0xe9, 0x19, 0x21,
693            0xab, 0xd0, 0x07, 0xd9, 0x01, 0x32, 0x53, 0x1f, 0xea, 0x3e, 0x00, 0x94, 0x85, 0xeb,
694            0xe4, 0xa8, 0x00, 0x00, 0x00,
695        ];
696
697        let mut decoded = Vec::with_capacity(data.len() * 2);
698
699        let mut d = Decompress::new(false);
700        // decompressed whole deflate stream
701        d.decompress_vec(&data[10..], &mut decoded, FlushDecompress::Finish)
702            .unwrap();
703
704        // decompress data that has nothing to do with the deflate stream (this
705        // used to panic)
706        drop(d.decompress_vec(&[0], &mut decoded, FlushDecompress::None));
707    }
708
709    #[test]
710    fn reset() {
711        let string = "hello world".as_bytes();
712        let mut zlib = Vec::new();
713        let mut deflate = Vec::new();
714
715        let comp = Compression::default();
716        write::ZlibEncoder::new(&mut zlib, comp)
717            .write_all(string)
718            .unwrap();
719        write::DeflateEncoder::new(&mut deflate, comp)
720            .write_all(string)
721            .unwrap();
722
723        let mut dst = [0; 1024];
724        let mut decoder = Decompress::new(true);
725        decoder
726            .decompress(&zlib, &mut dst, FlushDecompress::Finish)
727            .unwrap();
728        assert_eq!(decoder.total_out(), string.len() as u64);
729        assert!(dst.starts_with(string));
730
731        decoder.reset(false);
732        decoder
733            .decompress(&deflate, &mut dst, FlushDecompress::Finish)
734            .unwrap();
735        assert_eq!(decoder.total_out(), string.len() as u64);
736        assert!(dst.starts_with(string));
737    }
738
739    #[cfg(feature = "any_zlib")]
740    #[test]
741    fn test_gzip_flate() {
742        let string = "hello, hello!".as_bytes();
743
744        let mut encoded = Vec::with_capacity(1024);
745
746        let mut encoder = Compress::new_gzip(Compression::default(), 9);
747
748        encoder
749            .compress_vec(string, &mut encoded, FlushCompress::Finish)
750            .unwrap();
751
752        assert_eq!(encoder.total_in(), string.len() as u64);
753        assert_eq!(encoder.total_out(), encoded.len() as u64);
754
755        let mut decoder = Decompress::new_gzip(9);
756
757        let mut decoded = [0; 1024];
758        decoder
759            .decompress(&encoded, &mut decoded, FlushDecompress::Finish)
760            .unwrap();
761
762        assert_eq!(&decoded[..decoder.total_out() as usize], string);
763    }
764
765    #[cfg(feature = "any_zlib")]
766    #[test]
767    fn test_error_message() {
768        let mut decoder = Decompress::new(false);
769        let mut decoded = [0; 128];
770        let garbage = b"xbvxzi";
771
772        let err = decoder
773            .decompress(garbage, &mut decoded, FlushDecompress::Finish)
774            .unwrap_err();
775
776        assert_eq!(err.message(), Some("invalid stored block lengths"));
777    }
778
779    fn compress_with_flush(flush: FlushCompress) -> Vec<u8> {
780        let incompressible = (0..=255).collect::<Vec<u8>>();
781        let mut output = vec![0; 1024];
782
783        // Feed in the incompressible data followed by the indicated flush type.
784        let mut w = Compress::new(Compression::default(), false);
785        w.compress(&incompressible, &mut output, flush).unwrap();
786
787        if flush != FlushCompress::None {
788            // The first instance of incompressible input should have been written uncompressed.
789            assert!(w.total_out() >= 261);
790            assert_eq!(&output[0..5], &[0, 0, 1, 0xff, !1]);
791            assert_eq!(&output[5..261], &incompressible);
792        }
793
794        // Feed in the same data again.
795        let len = w.total_out() as usize;
796        w.compress(&incompressible, &mut output[len..], FlushCompress::Finish)
797            .unwrap();
798
799        if flush != FlushCompress::Full {
800            // This time, the data should have been compressed (because it is an exact duplicate of
801            // the earlier block).
802            assert!(w.total_out() < 300);
803        }
804
805        // Assert that all input has been processed.
806        assert_eq!(w.total_in(), 256 * 2);
807
808        output.resize(w.total_out() as usize, 0);
809        output
810    }
811
812    #[test]
813    fn test_partial_flush() {
814        let output = compress_with_flush(FlushCompress::Partial);
815
816        // Check for partial flush marker.
817        assert_eq!(output[261], 0x2);
818        assert_eq!(output[262] & 0x7, 0x4);
819    }
820
821    #[test]
822    fn test_sync_flush() {
823        let output = compress_with_flush(FlushCompress::Sync);
824
825        // Check for sync flush marker.
826        assert_eq!(&output[261..][..5], &[0, 0, 0, 0xff, 0xff]);
827    }
828
829    #[test]
830    fn test_full_flush() {
831        let output = compress_with_flush(FlushCompress::Full);
832        assert_eq!(output.len(), 527);
833
834        // Check for sync flush marker.
835        assert_eq!(&output[261..][..5], &[0, 0, 0, 0xff, 0xff]);
836
837        // Check that the second instance of incompressible input was also written uncompressed.
838        assert_eq!(&output[266..][..5], &[1, 0, 1, 0xff, !1]);
839    }
840}