zlib_rs/
deflate.rs

1#![warn(unsafe_op_in_unsafe_fn)]
2use core::{ffi::CStr, marker::PhantomData, mem::MaybeUninit, ops::ControlFlow};
3
4use crate::{
5    adler32::adler32,
6    allocate::Allocator,
7    c_api::{gz_header, internal_state, z_checksum, z_stream},
8    crc32::{crc32, Crc32Fold},
9    read_buf::ReadBuf,
10    trace,
11    weak_slice::{WeakArrayMut, WeakSliceMut},
12    DeflateFlush, ReturnCode, ADLER32_INITIAL_VALUE, CRC32_INITIAL_VALUE, MAX_WBITS, MIN_WBITS,
13};
14
15use self::{
16    algorithm::CONFIGURATION_TABLE,
17    hash_calc::{HashCalcVariant, RollHashCalc, StandardHashCalc},
18    pending::Pending,
19    trees_tbl::STATIC_LTREE,
20    window::Window,
21};
22
23mod algorithm;
24mod compare256;
25mod hash_calc;
26mod longest_match;
27mod pending;
28mod slide_hash;
29mod trees_tbl;
30mod window;
31
32// Position relative to the current window
33pub(crate) type Pos = u16;
34
35// SAFETY: This struct must have the same layout as [`z_stream`], so that casts and transmutations
36// between the two can work without UB.
37#[repr(C)]
38pub struct DeflateStream<'a> {
39    pub(crate) next_in: *mut crate::c_api::Bytef,
40    pub(crate) avail_in: crate::c_api::uInt,
41    pub(crate) total_in: crate::c_api::z_size,
42    pub(crate) next_out: *mut crate::c_api::Bytef,
43    pub(crate) avail_out: crate::c_api::uInt,
44    pub(crate) total_out: crate::c_api::z_size,
45    pub(crate) msg: *const core::ffi::c_char,
46    pub(crate) state: &'a mut State<'a>,
47    pub(crate) alloc: Allocator<'a>,
48    pub(crate) data_type: core::ffi::c_int,
49    pub(crate) adler: crate::c_api::z_checksum,
50    pub(crate) reserved: crate::c_api::uLong,
51}
52
53impl<'a> DeflateStream<'a> {
54    // z_stream and DeflateStream must have the same layout. Do our best to check if this is true.
55    // (imperfect check, but should catch most mistakes.)
56    const _S: () = assert!(core::mem::size_of::<z_stream>() == core::mem::size_of::<Self>());
57    const _A: () = assert!(core::mem::align_of::<z_stream>() == core::mem::align_of::<Self>());
58
59    /// # Safety
60    ///
61    /// Behavior is undefined if any of the following conditions are violated:
62    ///
63    /// - `strm` satisfies the conditions of [`pointer::as_mut`]
64    /// - if not `NULL`, `strm` as initialized using [`init`] or similar
65    ///
66    /// [`pointer::as_mut`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.as_mut
67    #[inline(always)]
68    pub unsafe fn from_stream_mut(strm: *mut z_stream) -> Option<&'a mut Self> {
69        {
70            // Safety: ptr points to a valid value of type z_stream (if non-null)
71            let stream = unsafe { strm.as_ref() }?;
72
73            if stream.zalloc.is_none() || stream.zfree.is_none() {
74                return None;
75            }
76
77            if stream.state.is_null() {
78                return None;
79            }
80        }
81
82        // SAFETY: DeflateStream has an equivalent layout as z_stream
83        unsafe { strm.cast::<DeflateStream>().as_mut() }
84    }
85
86    /// # Safety
87    ///
88    /// Behavior is undefined if any of the following conditions are violated:
89    ///
90    /// - `strm` satisfies the conditions of [`pointer::as_ref`]
91    /// - if not `NULL`, `strm` as initialized using [`init`] or similar
92    ///
93    /// [`pointer::as_ref`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.as_ref
94    #[inline(always)]
95    pub unsafe fn from_stream_ref(strm: *const z_stream) -> Option<&'a Self> {
96        {
97            // Safety: ptr points to a valid value of type z_stream (if non-null)
98            let stream = unsafe { strm.as_ref() }?;
99
100            if stream.zalloc.is_none() || stream.zfree.is_none() {
101                return None;
102            }
103
104            if stream.state.is_null() {
105                return None;
106            }
107        }
108
109        // SAFETY: DeflateStream has an equivalent layout as z_stream
110        unsafe { strm.cast::<DeflateStream>().as_ref() }
111    }
112
113    fn as_z_stream_mut(&mut self) -> &mut z_stream {
114        // SAFETY: a valid &mut DeflateStream is also a valid &mut z_stream
115        unsafe { &mut *(self as *mut DeflateStream as *mut z_stream) }
116    }
117
118    pub fn pending(&self) -> (usize, u8) {
119        (
120            self.state.bit_writer.pending.pending,
121            self.state.bit_writer.bits_used,
122        )
123    }
124}
125
126/// number of elements in hash table
127pub(crate) const HASH_SIZE: usize = 65536;
128/// log2(HASH_SIZE)
129const HASH_BITS: usize = 16;
130
131/// Maximum value for memLevel in deflateInit2
132const MAX_MEM_LEVEL: i32 = 9;
133const DEF_MEM_LEVEL: i32 = if MAX_MEM_LEVEL > 8 { 8 } else { MAX_MEM_LEVEL };
134
135#[repr(i32)]
136#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default)]
137#[cfg_attr(feature = "__internal-fuzz", derive(arbitrary::Arbitrary))]
138pub enum Method {
139    #[default]
140    Deflated = 8,
141}
142
143impl TryFrom<i32> for Method {
144    type Error = ();
145
146    fn try_from(value: i32) -> Result<Self, Self::Error> {
147        match value {
148            8 => Ok(Self::Deflated),
149            _ => Err(()),
150        }
151    }
152}
153
154#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
155#[cfg_attr(feature = "__internal-fuzz", derive(arbitrary::Arbitrary))]
156pub struct DeflateConfig {
157    pub level: i32,
158    pub method: Method,
159    pub window_bits: i32,
160    pub mem_level: i32,
161    pub strategy: Strategy,
162}
163
164#[cfg(any(test, feature = "__internal-test"))]
165impl quickcheck::Arbitrary for DeflateConfig {
166    fn arbitrary(g: &mut quickcheck::Gen) -> Self {
167        let mem_levels: Vec<_> = (1..=9).collect();
168        let levels: Vec<_> = (0..=9).collect();
169
170        let mut window_bits = Vec::new();
171        window_bits.extend(9..=15); // zlib
172        window_bits.extend(9 + 16..=15 + 16); // gzip
173        window_bits.extend(-15..=-9); // raw
174
175        Self {
176            level: *g.choose(&levels).unwrap(),
177            method: Method::Deflated,
178            window_bits: *g.choose(&window_bits).unwrap(),
179            mem_level: *g.choose(&mem_levels).unwrap(),
180            strategy: *g
181                .choose(&[
182                    Strategy::Default,
183                    Strategy::Filtered,
184                    Strategy::HuffmanOnly,
185                    Strategy::Rle,
186                    Strategy::Fixed,
187                ])
188                .unwrap(),
189        }
190    }
191}
192
193impl DeflateConfig {
194    pub fn new(level: i32) -> Self {
195        Self {
196            level,
197            ..Self::default()
198        }
199    }
200}
201
202impl Default for DeflateConfig {
203    fn default() -> Self {
204        Self {
205            level: crate::c_api::Z_DEFAULT_COMPRESSION,
206            method: Method::Deflated,
207            window_bits: MAX_WBITS,
208            mem_level: DEF_MEM_LEVEL,
209            strategy: Strategy::Default,
210        }
211    }
212}
213
214pub fn init(stream: &mut z_stream, config: DeflateConfig) -> ReturnCode {
215    let DeflateConfig {
216        mut level,
217        method: _,
218        mut window_bits,
219        mem_level,
220        strategy,
221    } = config;
222
223    /* Todo: ignore strm->next_in if we use it as window */
224    stream.msg = core::ptr::null_mut();
225
226    // for safety we  must really make sure that alloc and free are consistent
227    // this is a (slight) deviation from stock zlib. In this crate we pick the rust
228    // allocator as the default, but `libz-rs-sys` always explicitly sets an allocator,
229    // and can configure the C allocator
230    #[cfg(feature = "rust-allocator")]
231    if stream.zalloc.is_none() || stream.zfree.is_none() {
232        stream.configure_default_rust_allocator()
233    }
234
235    #[cfg(feature = "c-allocator")]
236    if stream.zalloc.is_none() || stream.zfree.is_none() {
237        stream.configure_default_c_allocator()
238    }
239
240    if stream.zalloc.is_none() || stream.zfree.is_none() {
241        return ReturnCode::StreamError;
242    }
243
244    if level == crate::c_api::Z_DEFAULT_COMPRESSION {
245        level = 6;
246    }
247
248    let wrap = if window_bits < 0 {
249        if window_bits < -MAX_WBITS {
250            return ReturnCode::StreamError;
251        }
252        window_bits = -window_bits;
253
254        0
255    } else if window_bits > MAX_WBITS {
256        window_bits -= 16;
257        2
258    } else {
259        1
260    };
261
262    if (!(1..=MAX_MEM_LEVEL).contains(&mem_level))
263        || !(MIN_WBITS..=MAX_WBITS).contains(&window_bits)
264        || !(0..=9).contains(&level)
265        || (window_bits == 8 && wrap != 1)
266    {
267        return ReturnCode::StreamError;
268    }
269
270    let window_bits = if window_bits == 8 {
271        9 /* until 256-byte window bug fixed */
272    } else {
273        window_bits as usize
274    };
275
276    let alloc = Allocator {
277        zalloc: stream.zalloc.unwrap(),
278        zfree: stream.zfree.unwrap(),
279        opaque: stream.opaque,
280        _marker: PhantomData,
281    };
282
283    // allocated here to have the same order as zlib
284    let Some(state_allocation) = alloc.allocate_raw::<State>() else {
285        return ReturnCode::MemError;
286    };
287
288    let w_size = 1 << window_bits;
289    let window = Window::new_in(&alloc, window_bits);
290
291    let prev = alloc.allocate_slice_raw::<u16>(w_size);
292    let head = alloc.allocate_raw::<[u16; HASH_SIZE]>();
293
294    let lit_bufsize = 1 << (mem_level + 6); // 16K elements by default
295    let pending = Pending::new_in(&alloc, 4 * lit_bufsize);
296
297    // zlib-ng overlays the pending_buf and sym_buf. We cannot really do that safely
298    let sym_buf = ReadBuf::new_in(&alloc, 3 * lit_bufsize);
299
300    // if any allocation failed, clean up allocations that did succeed
301    let (window, prev, head, pending, sym_buf) = match (window, prev, head, pending, sym_buf) {
302        (Some(window), Some(prev), Some(head), Some(pending), Some(sym_buf)) => {
303            (window, prev, head, pending, sym_buf)
304        }
305        (window, prev, head, pending, sym_buf) => {
306            // SAFETY: these pointers/structures are discarded after deallocation.
307            unsafe {
308                if let Some(mut sym_buf) = sym_buf {
309                    alloc.deallocate(sym_buf.as_mut_ptr(), sym_buf.capacity())
310                }
311                if let Some(mut pending) = pending {
312                    pending.drop_in(&alloc);
313                }
314                if let Some(head) = head {
315                    alloc.deallocate(head.as_ptr(), 1)
316                }
317                if let Some(prev) = prev {
318                    alloc.deallocate(prev.as_ptr(), w_size)
319                }
320                if let Some(mut window) = window {
321                    window.drop_in(&alloc);
322                }
323
324                alloc.deallocate(state_allocation.as_ptr(), 1);
325            }
326
327            return ReturnCode::MemError;
328        }
329    };
330
331    // zero initialize the memory
332    let prev = prev.as_ptr(); // FIXME: write_bytes is stable for NonNull since 1.80.0
333    unsafe { prev.write_bytes(0, w_size) };
334    let prev = unsafe { WeakSliceMut::from_raw_parts_mut(prev, w_size) };
335
336    // zero out head's first element
337    let head = head.as_ptr(); // FIXME: write_bytes is stable for NonNull since 1.80.0
338    unsafe { head.write_bytes(0, 1) };
339    let head = unsafe { WeakArrayMut::<u16, HASH_SIZE>::from_ptr(head) };
340
341    let state = State {
342        status: Status::Init,
343
344        // window
345        w_size,
346
347        // allocated values
348        window,
349        prev,
350        head,
351        bit_writer: BitWriter::from_pending(pending),
352
353        //
354        lit_bufsize,
355
356        //
357        sym_buf,
358
359        //
360        level: level as i8, // set to zero again for testing?
361        strategy,
362
363        // these fields are not set explicitly at this point
364        last_flush: 0,
365        wrap,
366        strstart: 0,
367        block_start: 0,
368        block_open: 0,
369        window_size: 0,
370        insert: 0,
371        matches: 0,
372        opt_len: 0,
373        static_len: 0,
374        lookahead: 0,
375        ins_h: 0,
376        max_chain_length: 0,
377        max_lazy_match: 0,
378        good_match: 0,
379        nice_match: 0,
380
381        //
382        l_desc: TreeDesc::EMPTY,
383        d_desc: TreeDesc::EMPTY,
384        bl_desc: TreeDesc::EMPTY,
385
386        //
387        crc_fold: Crc32Fold::new(),
388        gzhead: None,
389        gzindex: 0,
390
391        //
392        match_start: 0,
393        prev_match: 0,
394        match_available: false,
395        prev_length: 0,
396
397        // just provide a valid default; gets set properly later
398        hash_calc_variant: HashCalcVariant::Standard,
399        _cache_line_0: (),
400        _cache_line_1: (),
401        _cache_line_2: (),
402        _cache_line_3: (),
403        _padding_0: [0; 16],
404    };
405
406    unsafe { state_allocation.as_ptr().write(state) }; // FIXME: write is stable for NonNull since 1.80.0
407
408    stream.state = state_allocation.as_ptr() as *mut internal_state;
409
410    let Some(stream) = (unsafe { DeflateStream::from_stream_mut(stream) }) else {
411        if cfg!(debug_assertions) {
412            unreachable!("we should have initialized the stream properly");
413        }
414        return ReturnCode::StreamError;
415    };
416
417    reset(stream)
418}
419
420pub fn params(stream: &mut DeflateStream, level: i32, strategy: Strategy) -> ReturnCode {
421    let level = if level == crate::c_api::Z_DEFAULT_COMPRESSION {
422        6
423    } else {
424        level
425    };
426
427    if !(0..=9).contains(&level) {
428        return ReturnCode::StreamError;
429    }
430
431    let level = level as i8;
432
433    let func = CONFIGURATION_TABLE[stream.state.level as usize].func;
434
435    let state = &mut stream.state;
436
437    // FIXME: use fn_addr_eq when it's available in our MSRV. The comparison returning false here
438    // is not functionally incorrect, but would be inconsistent with zlib-ng.
439    #[allow(unpredictable_function_pointer_comparisons)]
440    if (strategy != state.strategy || func != CONFIGURATION_TABLE[level as usize].func)
441        && state.last_flush != -2
442    {
443        // Flush the last buffer.
444        let err = deflate(stream, DeflateFlush::Block);
445        if err == ReturnCode::StreamError {
446            return err;
447        }
448
449        let state = &mut stream.state;
450
451        if stream.avail_in != 0
452            || ((state.strstart as isize - state.block_start) + state.lookahead as isize) != 0
453        {
454            return ReturnCode::BufError;
455        }
456    }
457
458    let state = &mut stream.state;
459
460    if state.level != level {
461        if state.level == 0 && state.matches != 0 {
462            if state.matches == 1 {
463                self::slide_hash::slide_hash(state);
464            } else {
465                state.head.as_mut_slice().fill(0);
466            }
467            state.matches = 0;
468        }
469
470        lm_set_level(state, level);
471    }
472
473    state.strategy = strategy;
474
475    ReturnCode::Ok
476}
477
478pub fn set_dictionary(stream: &mut DeflateStream, mut dictionary: &[u8]) -> ReturnCode {
479    let state = &mut stream.state;
480
481    let wrap = state.wrap;
482
483    if wrap == 2 || (wrap == 1 && state.status != Status::Init) || state.lookahead != 0 {
484        return ReturnCode::StreamError;
485    }
486
487    // when using zlib wrappers, compute Adler-32 for provided dictionary
488    if wrap == 1 {
489        stream.adler = adler32(stream.adler as u32, dictionary) as z_checksum;
490    }
491
492    // avoid computing Adler-32 in read_buf
493    state.wrap = 0;
494
495    // if dictionary would fill window, just replace the history
496    if dictionary.len() >= state.window.capacity() {
497        if wrap == 0 {
498            // clear the hash table
499            state.head.as_mut_slice().fill(0);
500
501            state.strstart = 0;
502            state.block_start = 0;
503            state.insert = 0;
504        } else {
505            /* already empty otherwise */
506        }
507
508        // use the tail
509        dictionary = &dictionary[dictionary.len() - state.w_size..];
510    }
511
512    // insert dictionary into window and hash
513    let avail = stream.avail_in;
514    let next = stream.next_in;
515    stream.avail_in = dictionary.len() as _;
516    stream.next_in = dictionary.as_ptr() as *mut u8;
517    fill_window(stream);
518
519    while stream.state.lookahead >= STD_MIN_MATCH {
520        let str = stream.state.strstart;
521        let n = stream.state.lookahead - (STD_MIN_MATCH - 1);
522        stream.state.insert_string(str, n);
523        stream.state.strstart = str + n;
524        stream.state.lookahead = STD_MIN_MATCH - 1;
525        fill_window(stream);
526    }
527
528    let state = &mut stream.state;
529
530    state.strstart += state.lookahead;
531    state.block_start = state.strstart as _;
532    state.insert = state.lookahead;
533    state.lookahead = 0;
534    state.prev_length = 0;
535    state.match_available = false;
536
537    // restore the state
538    stream.next_in = next;
539    stream.avail_in = avail;
540    state.wrap = wrap;
541
542    ReturnCode::Ok
543}
544
545pub fn prime(stream: &mut DeflateStream, mut bits: i32, value: i32) -> ReturnCode {
546    // our logic actually supports up to 32 bits.
547    debug_assert!(bits <= 16, "zlib only supports up to 16 bits here");
548
549    let mut value64 = value as u64;
550
551    let state = &mut stream.state;
552
553    if bits < 0
554        || bits > BitWriter::BIT_BUF_SIZE as i32
555        || bits > (core::mem::size_of_val(&value) << 3) as i32
556    {
557        return ReturnCode::BufError;
558    }
559
560    let mut put;
561
562    loop {
563        put = BitWriter::BIT_BUF_SIZE - state.bit_writer.bits_used;
564        let put = Ord::min(put as i32, bits);
565
566        if state.bit_writer.bits_used == 0 {
567            state.bit_writer.bit_buffer = value64;
568        } else {
569            state.bit_writer.bit_buffer |=
570                (value64 & ((1 << put) - 1)) << state.bit_writer.bits_used;
571        }
572
573        state.bit_writer.bits_used += put as u8;
574        state.bit_writer.flush_bits();
575        value64 >>= put;
576        bits -= put;
577
578        if bits == 0 {
579            break;
580        }
581    }
582
583    ReturnCode::Ok
584}
585
586pub fn copy<'a>(
587    dest: &mut MaybeUninit<DeflateStream<'a>>,
588    source: &mut DeflateStream<'a>,
589) -> ReturnCode {
590    // SAFETY: source and dest are both mutable references, so guaranteed not to overlap.
591    // dest being a reference to maybe uninitialized memory makes a copy of 1 DeflateStream valid.
592    unsafe {
593        core::ptr::copy_nonoverlapping(source, dest.as_mut_ptr(), 1);
594    }
595
596    let alloc = &source.alloc;
597
598    // allocated here to have the same order as zlib
599    let Some(state_allocation) = alloc.allocate_raw::<State>() else {
600        return ReturnCode::MemError;
601    };
602
603    let source_state = &source.state;
604
605    let window = source_state.window.clone_in(alloc);
606
607    let prev = alloc.allocate_slice_raw::<u16>(source_state.w_size);
608    let head = alloc.allocate_raw::<[u16; HASH_SIZE]>();
609
610    let pending = source_state.bit_writer.pending.clone_in(alloc);
611    let sym_buf = source_state.sym_buf.clone_in(alloc);
612
613    // if any allocation failed, clean up allocations that did succeed
614    let (window, prev, head, pending, sym_buf) = match (window, prev, head, pending, sym_buf) {
615        (Some(window), Some(prev), Some(head), Some(pending), Some(sym_buf)) => {
616            (window, prev, head, pending, sym_buf)
617        }
618        (window, prev, head, pending, sym_buf) => {
619            // SAFETY: this access is in-bounds
620            let field_ptr = unsafe { core::ptr::addr_of_mut!((*dest.as_mut_ptr()).state) };
621            unsafe { core::ptr::write(field_ptr as *mut *mut State, core::ptr::null_mut()) };
622
623            // SAFETY: it is an assumpion on DeflateStream that (de)allocation does not cause UB.
624            unsafe {
625                if let Some(mut sym_buf) = sym_buf {
626                    alloc.deallocate(sym_buf.as_mut_ptr(), sym_buf.capacity())
627                }
628                if let Some(mut pending) = pending {
629                    pending.drop_in(alloc);
630                }
631                if let Some(head) = head {
632                    alloc.deallocate(head.as_ptr(), HASH_SIZE)
633                }
634                if let Some(prev) = prev {
635                    alloc.deallocate(prev.as_ptr(), source_state.w_size)
636                }
637                if let Some(mut window) = window {
638                    window.drop_in(alloc);
639                }
640
641                alloc.deallocate(state_allocation.as_ptr(), 1);
642            }
643
644            return ReturnCode::MemError;
645        }
646    };
647
648    let prev = unsafe {
649        let prev = prev.as_ptr();
650        prev.copy_from_nonoverlapping(source_state.prev.as_ptr(), source_state.prev.len());
651        WeakSliceMut::from_raw_parts_mut(prev, source_state.prev.len())
652    };
653
654    // FIXME: write_bytes is stable for NonNull since 1.80.0
655    let head = unsafe {
656        let head = head.as_ptr();
657        head.write_bytes(0, 1);
658        head.cast::<u16>().write(source_state.head.as_slice()[0]);
659        WeakArrayMut::from_ptr(head)
660    };
661
662    let mut bit_writer = BitWriter::from_pending(pending);
663    bit_writer.bits_used = source_state.bit_writer.bits_used;
664    bit_writer.bit_buffer = source_state.bit_writer.bit_buffer;
665
666    let dest_state = State {
667        status: source_state.status,
668        bit_writer,
669        last_flush: source_state.last_flush,
670        wrap: source_state.wrap,
671        strategy: source_state.strategy,
672        level: source_state.level,
673        good_match: source_state.good_match,
674        nice_match: source_state.nice_match,
675        l_desc: source_state.l_desc.clone(),
676        d_desc: source_state.d_desc.clone(),
677        bl_desc: source_state.bl_desc.clone(),
678        prev_match: source_state.prev_match,
679        match_available: source_state.match_available,
680        strstart: source_state.strstart,
681        match_start: source_state.match_start,
682        prev_length: source_state.prev_length,
683        max_chain_length: source_state.max_chain_length,
684        max_lazy_match: source_state.max_lazy_match,
685        block_start: source_state.block_start,
686        block_open: source_state.block_open,
687        window,
688        sym_buf,
689        lit_bufsize: source_state.lit_bufsize,
690        window_size: source_state.window_size,
691        matches: source_state.matches,
692        opt_len: source_state.opt_len,
693        static_len: source_state.static_len,
694        insert: source_state.insert,
695        w_size: source_state.w_size,
696        lookahead: source_state.lookahead,
697        prev,
698        head,
699        ins_h: source_state.ins_h,
700        hash_calc_variant: source_state.hash_calc_variant,
701        crc_fold: source_state.crc_fold,
702        gzhead: None,
703        gzindex: source_state.gzindex,
704        _cache_line_0: (),
705        _cache_line_1: (),
706        _cache_line_2: (),
707        _cache_line_3: (),
708        _padding_0: source_state._padding_0,
709    };
710
711    // write the cloned state into state_ptr
712    unsafe { state_allocation.as_ptr().write(dest_state) }; // FIXME: write is stable for NonNull since 1.80.0
713
714    // insert the state_ptr into `dest`
715    let field_ptr = unsafe { core::ptr::addr_of_mut!((*dest.as_mut_ptr()).state) };
716    unsafe { core::ptr::write(field_ptr as *mut *mut State, state_allocation.as_ptr()) };
717
718    // update the gzhead field (it contains a mutable reference so we need to be careful
719    let field_ptr = unsafe { core::ptr::addr_of_mut!((*dest.as_mut_ptr()).state.gzhead) };
720    unsafe { core::ptr::copy(&source_state.gzhead, field_ptr, 1) };
721
722    ReturnCode::Ok
723}
724
725/// # Returns
726///
727/// - Err when deflate is not done. A common cause is insufficient output space
728/// - Ok otherwise
729pub fn end<'a>(stream: &'a mut DeflateStream) -> Result<&'a mut z_stream, &'a mut z_stream> {
730    let status = stream.state.status;
731
732    let alloc = stream.alloc;
733
734    // deallocate in reverse order of allocations
735    unsafe {
736        // SAFETY: we make sure that these fields are not used (by invalidating the state pointer)
737        stream.state.sym_buf.drop_in(&alloc);
738        stream.state.bit_writer.pending.drop_in(&alloc);
739        alloc.deallocate(stream.state.head.as_mut_ptr(), 1);
740        if !stream.state.prev.is_empty() {
741            alloc.deallocate(stream.state.prev.as_mut_ptr(), stream.state.prev.len());
742        }
743        stream.state.window.drop_in(&alloc);
744    }
745
746    let stream = stream.as_z_stream_mut();
747    let state = core::mem::replace(&mut stream.state, core::ptr::null_mut());
748
749    // SAFETY: `state` is not used later
750    unsafe {
751        alloc.deallocate(state as *mut State, 1);
752    }
753
754    match status {
755        Status::Busy => Err(stream),
756        _ => Ok(stream),
757    }
758}
759
760pub fn reset(stream: &mut DeflateStream) -> ReturnCode {
761    let ret = reset_keep(stream);
762
763    if ret == ReturnCode::Ok {
764        lm_init(stream.state);
765    }
766
767    ret
768}
769
770fn reset_keep(stream: &mut DeflateStream) -> ReturnCode {
771    stream.total_in = 0;
772    stream.total_out = 0;
773    stream.msg = core::ptr::null_mut();
774    stream.data_type = crate::c_api::Z_UNKNOWN;
775
776    let state = &mut stream.state;
777
778    state.bit_writer.pending.reset_keep();
779
780    // can be made negative by deflate(..., Z_FINISH);
781    state.wrap = state.wrap.abs();
782
783    state.status = match state.wrap {
784        2 => Status::GZip,
785        _ => Status::Init,
786    };
787
788    stream.adler = match state.wrap {
789        2 => {
790            state.crc_fold = Crc32Fold::new();
791            CRC32_INITIAL_VALUE as _
792        }
793        _ => ADLER32_INITIAL_VALUE as _,
794    };
795
796    state.last_flush = -2;
797
798    state.zng_tr_init();
799
800    ReturnCode::Ok
801}
802
803fn lm_init(state: &mut State) {
804    state.window_size = 2 * state.w_size;
805
806    // zlib uses CLEAR_HASH here
807    state.head.as_mut_slice().fill(0);
808
809    // Set the default configuration parameters:
810    lm_set_level(state, state.level);
811
812    state.strstart = 0;
813    state.block_start = 0;
814    state.lookahead = 0;
815    state.insert = 0;
816    state.prev_length = 0;
817    state.match_available = false;
818    state.match_start = 0;
819    state.ins_h = 0;
820}
821
822fn lm_set_level(state: &mut State, level: i8) {
823    state.max_lazy_match = CONFIGURATION_TABLE[level as usize].max_lazy;
824    state.good_match = CONFIGURATION_TABLE[level as usize].good_length;
825    state.nice_match = CONFIGURATION_TABLE[level as usize].nice_length;
826    state.max_chain_length = CONFIGURATION_TABLE[level as usize].max_chain;
827
828    state.hash_calc_variant = HashCalcVariant::for_max_chain_length(state.max_chain_length);
829    state.level = level;
830}
831
832pub fn tune(
833    stream: &mut DeflateStream,
834    good_length: usize,
835    max_lazy: usize,
836    nice_length: usize,
837    max_chain: usize,
838) -> ReturnCode {
839    stream.state.good_match = good_length as u16;
840    stream.state.max_lazy_match = max_lazy as u16;
841    stream.state.nice_match = nice_length as u16;
842    stream.state.max_chain_length = max_chain as u16;
843
844    ReturnCode::Ok
845}
846
847#[repr(C)]
848#[derive(Debug, Clone, Copy, PartialEq, Eq)]
849pub(crate) struct Value {
850    a: u16,
851    b: u16,
852}
853
854impl Value {
855    pub(crate) const fn new(a: u16, b: u16) -> Self {
856        Self { a, b }
857    }
858
859    pub(crate) fn freq_mut(&mut self) -> &mut u16 {
860        &mut self.a
861    }
862
863    pub(crate) fn code_mut(&mut self) -> &mut u16 {
864        &mut self.a
865    }
866
867    pub(crate) fn dad_mut(&mut self) -> &mut u16 {
868        &mut self.b
869    }
870
871    pub(crate) fn len_mut(&mut self) -> &mut u16 {
872        &mut self.b
873    }
874
875    #[inline(always)]
876    pub(crate) const fn freq(self) -> u16 {
877        self.a
878    }
879
880    #[inline(always)]
881    pub(crate) const fn code(self) -> u16 {
882        self.a
883    }
884
885    #[inline(always)]
886    pub(crate) const fn dad(self) -> u16 {
887        self.b
888    }
889
890    #[inline(always)]
891    pub(crate) const fn len(self) -> u16 {
892        self.b
893    }
894}
895
896/// number of length codes, not counting the special END_BLOCK code
897pub(crate) const LENGTH_CODES: usize = 29;
898
899/// number of literal bytes 0..255
900const LITERALS: usize = 256;
901
902/// number of Literal or Length codes, including the END_BLOCK code
903pub(crate) const L_CODES: usize = LITERALS + 1 + LENGTH_CODES;
904
905/// number of distance codes
906pub(crate) const D_CODES: usize = 30;
907
908/// number of codes used to transfer the bit lengths
909const BL_CODES: usize = 19;
910
911/// maximum heap size
912const HEAP_SIZE: usize = 2 * L_CODES + 1;
913
914/// all codes must not exceed MAX_BITS bits
915const MAX_BITS: usize = 15;
916
917/// Bit length codes must not exceed MAX_BL_BITS bits
918const MAX_BL_BITS: usize = 7;
919
920pub(crate) const DIST_CODE_LEN: usize = 512;
921
922struct BitWriter<'a> {
923    pub(crate) pending: Pending<'a>, // output still pending
924    pub(crate) bit_buffer: u64,
925    pub(crate) bits_used: u8,
926
927    /// total bit length of compressed file (NOTE: zlib-ng uses a 32-bit integer here)
928    #[cfg(feature = "ZLIB_DEBUG")]
929    compressed_len: usize,
930    /// bit length of compressed data sent (NOTE: zlib-ng uses a 32-bit integer here)
931    #[cfg(feature = "ZLIB_DEBUG")]
932    bits_sent: usize,
933}
934
935#[inline]
936const fn encode_len(ltree: &[Value], lc: u8) -> (u64, usize) {
937    let mut lc = lc as usize;
938
939    /* Send the length code, len is the match length - STD_MIN_MATCH */
940    let code = self::trees_tbl::LENGTH_CODE[lc] as usize;
941    let c = code + LITERALS + 1;
942    assert!(c < L_CODES, "bad l_code");
943    // send_code_trace(s, c);
944
945    let lnode = ltree[c];
946    let mut match_bits: u64 = lnode.code() as u64;
947    let mut match_bits_len = lnode.len() as usize;
948    let extra = StaticTreeDesc::EXTRA_LBITS[code] as usize;
949    if extra != 0 {
950        lc -= self::trees_tbl::BASE_LENGTH[code] as usize;
951        match_bits |= (lc as u64) << match_bits_len;
952        match_bits_len += extra;
953    }
954
955    (match_bits, match_bits_len)
956}
957
958#[inline]
959const fn encode_dist(dtree: &[Value], mut dist: u16) -> (u64, usize) {
960    dist -= 1; /* dist is now the match distance - 1 */
961    let code = State::d_code(dist as usize) as usize;
962    assert!(code < D_CODES, "bad d_code");
963    // send_code_trace(s, code);
964
965    /* Send the distance code */
966    let dnode = dtree[code];
967    let mut match_bits = dnode.code() as u64;
968    let mut match_bits_len = dnode.len() as usize;
969    let extra = StaticTreeDesc::EXTRA_DBITS[code] as usize;
970    if extra != 0 {
971        dist -= self::trees_tbl::BASE_DIST[code];
972        match_bits |= (dist as u64) << match_bits_len;
973        match_bits_len += extra;
974    }
975
976    (match_bits, match_bits_len)
977}
978
979impl<'a> BitWriter<'a> {
980    pub(crate) const BIT_BUF_SIZE: u8 = 64;
981
982    fn from_pending(pending: Pending<'a>) -> Self {
983        Self {
984            pending,
985            bit_buffer: 0,
986            bits_used: 0,
987
988            #[cfg(feature = "ZLIB_DEBUG")]
989            compressed_len: 0,
990            #[cfg(feature = "ZLIB_DEBUG")]
991            bits_sent: 0,
992        }
993    }
994
995    fn flush_bits(&mut self) {
996        debug_assert!(self.bits_used <= 64);
997        let removed = self.bits_used.saturating_sub(7).next_multiple_of(8);
998        let keep_bytes = self.bits_used / 8; // can never divide by zero
999
1000        let src = &self.bit_buffer.to_le_bytes();
1001        self.pending.extend(&src[..keep_bytes as usize]);
1002
1003        self.bits_used -= removed;
1004        self.bit_buffer = self.bit_buffer.checked_shr(removed as u32).unwrap_or(0);
1005    }
1006
1007    fn emit_align(&mut self) {
1008        debug_assert!(self.bits_used <= 64);
1009        let keep_bytes = self.bits_used.div_ceil(8);
1010        let src = &self.bit_buffer.to_le_bytes();
1011        self.pending.extend(&src[..keep_bytes as usize]);
1012
1013        self.bits_used = 0;
1014        self.bit_buffer = 0;
1015
1016        self.sent_bits_align();
1017    }
1018
1019    fn send_bits_trace(&self, _value: u64, _len: u8) {
1020        trace!(" l {:>2} v {:>4x} ", _len, _value);
1021    }
1022
1023    fn cmpr_bits_add(&mut self, _len: usize) {
1024        #[cfg(feature = "ZLIB_DEBUG")]
1025        {
1026            self.compressed_len += _len;
1027        }
1028    }
1029
1030    fn cmpr_bits_align(&mut self) {
1031        #[cfg(feature = "ZLIB_DEBUG")]
1032        {
1033            self.compressed_len = self.compressed_len.next_multiple_of(8);
1034        }
1035    }
1036
1037    fn sent_bits_add(&mut self, _len: usize) {
1038        #[cfg(feature = "ZLIB_DEBUG")]
1039        {
1040            self.bits_sent += _len;
1041        }
1042    }
1043
1044    fn sent_bits_align(&mut self) {
1045        #[cfg(feature = "ZLIB_DEBUG")]
1046        {
1047            self.bits_sent = self.bits_sent.next_multiple_of(8);
1048        }
1049    }
1050
1051    #[inline(always)]
1052    fn send_bits(&mut self, val: u64, len: u8) {
1053        debug_assert!(len <= 64);
1054        debug_assert!(self.bits_used <= 64);
1055
1056        let total_bits = len + self.bits_used;
1057
1058        self.send_bits_trace(val, len);
1059        self.sent_bits_add(len as usize);
1060
1061        if total_bits < Self::BIT_BUF_SIZE {
1062            self.bit_buffer |= val << self.bits_used;
1063            self.bits_used = total_bits;
1064        } else {
1065            self.send_bits_overflow(val, total_bits);
1066        }
1067    }
1068
1069    fn send_bits_overflow(&mut self, val: u64, total_bits: u8) {
1070        if self.bits_used == Self::BIT_BUF_SIZE {
1071            self.pending.extend(&self.bit_buffer.to_le_bytes());
1072            self.bit_buffer = val;
1073            self.bits_used = total_bits - Self::BIT_BUF_SIZE;
1074        } else {
1075            self.bit_buffer |= val << self.bits_used;
1076            self.pending.extend(&self.bit_buffer.to_le_bytes());
1077            self.bit_buffer = val >> (Self::BIT_BUF_SIZE - self.bits_used);
1078            self.bits_used = total_bits - Self::BIT_BUF_SIZE;
1079        }
1080    }
1081
1082    fn send_code(&mut self, code: usize, tree: &[Value]) {
1083        let node = tree[code];
1084        self.send_bits(node.code() as u64, node.len() as u8)
1085    }
1086
1087    /// Send one empty static block to give enough lookahead for inflate.
1088    /// This takes 10 bits, of which 7 may remain in the bit buffer.
1089    pub fn align(&mut self) {
1090        self.emit_tree(BlockType::StaticTrees, false);
1091        self.emit_end_block(&STATIC_LTREE, false);
1092        self.flush_bits();
1093    }
1094
1095    pub(crate) fn emit_tree(&mut self, block_type: BlockType, is_last_block: bool) {
1096        let header_bits = ((block_type as u64) << 1) | (is_last_block as u64);
1097        self.send_bits(header_bits, 3);
1098        trace!("\n--- Emit Tree: Last: {}\n", is_last_block as u8);
1099    }
1100
1101    pub(crate) fn emit_end_block_and_align(&mut self, ltree: &[Value], is_last_block: bool) {
1102        self.emit_end_block(ltree, is_last_block);
1103
1104        if is_last_block {
1105            self.emit_align();
1106        }
1107    }
1108
1109    fn emit_end_block(&mut self, ltree: &[Value], _is_last_block: bool) {
1110        const END_BLOCK: usize = 256;
1111        self.send_code(END_BLOCK, ltree);
1112
1113        trace!(
1114            "\n+++ Emit End Block: Last: {} Pending: {} Total Out: {}\n",
1115            _is_last_block as u8,
1116            self.pending.pending().len(),
1117            "<unknown>"
1118        );
1119    }
1120
1121    pub(crate) fn emit_lit(&mut self, ltree: &[Value], c: u8) -> u16 {
1122        self.send_code(c as usize, ltree);
1123
1124        #[cfg(feature = "ZLIB_DEBUG")]
1125        if let Some(c) = char::from_u32(c as u32) {
1126            if isgraph(c as u8) {
1127                trace!(" '{}' ", c);
1128            }
1129        }
1130
1131        ltree[c as usize].len()
1132    }
1133
1134    pub(crate) fn emit_dist(
1135        &mut self,
1136        ltree: &[Value],
1137        dtree: &[Value],
1138        lc: u8,
1139        dist: u16,
1140    ) -> usize {
1141        let (mut match_bits, mut match_bits_len) = encode_len(ltree, lc);
1142
1143        let (dist_match_bits, dist_match_bits_len) = encode_dist(dtree, dist);
1144
1145        match_bits |= dist_match_bits << match_bits_len;
1146        match_bits_len += dist_match_bits_len;
1147
1148        self.send_bits(match_bits, match_bits_len as u8);
1149
1150        match_bits_len
1151    }
1152
1153    pub(crate) fn emit_dist_static(&mut self, lc: u8, dist: u16) -> usize {
1154        let precomputed_len = trees_tbl::STATIC_LTREE_ENCODINGS[lc as usize];
1155        let mut match_bits = precomputed_len.code() as u64;
1156        let mut match_bits_len = precomputed_len.len() as usize;
1157
1158        let dtree = self::trees_tbl::STATIC_DTREE.as_slice();
1159        let (dist_match_bits, dist_match_bits_len) = encode_dist(dtree, dist);
1160
1161        match_bits |= dist_match_bits << match_bits_len;
1162        match_bits_len += dist_match_bits_len;
1163
1164        self.send_bits(match_bits, match_bits_len as u8);
1165
1166        match_bits_len
1167    }
1168
1169    fn compress_block_help(&mut self, sym_buf: &[u8], ltree: &[Value], dtree: &[Value]) {
1170        for chunk in sym_buf.chunks_exact(3) {
1171            let [dist_low, dist_high, lc] = *chunk else {
1172                unreachable!("out of bound access on the symbol buffer");
1173            };
1174
1175            match u16::from_le_bytes([dist_low, dist_high]) {
1176                0 => self.emit_lit(ltree, lc) as usize,
1177                dist => self.emit_dist(ltree, dtree, lc, dist),
1178            };
1179        }
1180
1181        self.emit_end_block(ltree, false)
1182    }
1183
1184    fn send_tree(&mut self, tree: &[Value], bl_tree: &[Value], max_code: usize) {
1185        /* tree: the tree to be scanned */
1186        /* max_code and its largest code of non zero frequency */
1187        let mut prevlen: isize = -1; /* last emitted length */
1188        let mut curlen; /* length of current code */
1189        let mut nextlen = tree[0].len(); /* length of next code */
1190        let mut count = 0; /* repeat count of the current code */
1191        let mut max_count = 7; /* max repeat count */
1192        let mut min_count = 4; /* min repeat count */
1193
1194        /* tree[max_code+1].Len = -1; */
1195        /* guard already set */
1196        if nextlen == 0 {
1197            max_count = 138;
1198            min_count = 3;
1199        }
1200
1201        for n in 0..=max_code {
1202            curlen = nextlen;
1203            nextlen = tree[n + 1].len();
1204            count += 1;
1205            if count < max_count && curlen == nextlen {
1206                continue;
1207            } else if count < min_count {
1208                loop {
1209                    self.send_code(curlen as usize, bl_tree);
1210
1211                    count -= 1;
1212                    if count == 0 {
1213                        break;
1214                    }
1215                }
1216            } else if curlen != 0 {
1217                if curlen as isize != prevlen {
1218                    self.send_code(curlen as usize, bl_tree);
1219                    count -= 1;
1220                }
1221                assert!((3..=6).contains(&count), " 3_6?");
1222                self.send_code(REP_3_6, bl_tree);
1223                self.send_bits(count - 3, 2);
1224            } else if count <= 10 {
1225                self.send_code(REPZ_3_10, bl_tree);
1226                self.send_bits(count - 3, 3);
1227            } else {
1228                self.send_code(REPZ_11_138, bl_tree);
1229                self.send_bits(count - 11, 7);
1230            }
1231
1232            count = 0;
1233            prevlen = curlen as isize;
1234
1235            if nextlen == 0 {
1236                max_count = 138;
1237                min_count = 3;
1238            } else if curlen == nextlen {
1239                max_count = 6;
1240                min_count = 3;
1241            } else {
1242                max_count = 7;
1243                min_count = 4;
1244            }
1245        }
1246    }
1247}
1248
1249#[repr(C, align(64))]
1250pub(crate) struct State<'a> {
1251    status: Status,
1252
1253    last_flush: i8, /* value of flush param for previous deflate call */
1254
1255    pub(crate) wrap: i8, /* bit 0 true for zlib, bit 1 true for gzip */
1256
1257    pub(crate) strategy: Strategy,
1258    pub(crate) level: i8,
1259
1260    /// Whether or not a block is currently open for the QUICK deflation scheme.
1261    /// 0 if the block is closed, 1 if there is an active block, or 2 if there
1262    /// is an active block and it is the last block.
1263    pub(crate) block_open: u8,
1264
1265    pub(crate) hash_calc_variant: HashCalcVariant,
1266
1267    pub(crate) match_available: bool, /* set if previous match exists */
1268
1269    /// Use a faster search when the previous match is longer than this
1270    pub(crate) good_match: u16,
1271
1272    /// Stop searching when current match exceeds this
1273    pub(crate) nice_match: u16,
1274
1275    pub(crate) match_start: Pos, /* start of matching string */
1276    pub(crate) prev_match: Pos,  /* previous match */
1277    pub(crate) strstart: usize,  /* start of string to insert */
1278
1279    pub(crate) window: Window<'a>,
1280    pub(crate) w_size: usize, /* LZ77 window size (32K by default) */
1281
1282    pub(crate) lookahead: usize, /* number of valid bytes ahead in window */
1283
1284    _cache_line_0: (),
1285
1286    /// prev[N], where N is an offset in the current window, contains the offset in the window
1287    /// of the previous 4-byte sequence that hashes to the same value as the 4-byte sequence
1288    /// starting at N. Together with head, prev forms a chained hash table that can be used
1289    /// to find earlier strings in the window that are potential matches for new input being
1290    /// deflated.
1291    pub(crate) prev: WeakSliceMut<'a, u16>,
1292    /// head[H] contains the offset of the last 4-character sequence seen so far in
1293    /// the current window that hashes to H (as calculated using the hash_calc_variant).
1294    pub(crate) head: WeakArrayMut<'a, u16, HASH_SIZE>,
1295
1296    /// Length of the best match at previous step. Matches not greater than this
1297    /// are discarded. This is used in the lazy match evaluation.
1298    pub(crate) prev_length: u16,
1299
1300    /// To speed up deflation, hash chains are never searched beyond this length.
1301    /// A higher limit improves compression ratio but degrades the speed.
1302    pub(crate) max_chain_length: u16,
1303
1304    // TODO untangle this mess! zlib uses the same field differently based on compression level
1305    // we should just have 2 fields for clarity!
1306    //
1307    // Insert new strings in the hash table only if the match length is not
1308    // greater than this length. This saves time but degrades compression.
1309    // max_insert_length is used only for compression levels <= 3.
1310    // define max_insert_length  max_lazy_match
1311    /// Attempt to find a better match only when the current match is strictly smaller
1312    /// than this value. This mechanism is used only for compression levels >= 4.
1313    pub(crate) max_lazy_match: u16,
1314
1315    /// number of string matches in current block
1316    /// NOTE: this is a saturating 8-bit counter, to help keep the struct compact. The code that
1317    /// makes decisions based on this field only cares whether the count is greater than 2, so
1318    /// an 8-bit counter is sufficient.
1319    pub(crate) matches: u8,
1320
1321    /// Window position at the beginning of the current output block. Gets
1322    /// negative when the window is moved backwards.
1323    pub(crate) block_start: isize,
1324
1325    pub(crate) sym_buf: ReadBuf<'a>,
1326
1327    _cache_line_1: (),
1328
1329    /// Size of match buffer for literals/lengths.  There are 4 reasons for
1330    /// limiting lit_bufsize to 64K:
1331    ///   - frequencies can be kept in 16 bit counters
1332    ///   - if compression is not successful for the first block, all input
1333    ///     data is still in the window so we can still emit a stored block even
1334    ///     when input comes from standard input.  (This can also be done for
1335    ///     all blocks if lit_bufsize is not greater than 32K.)
1336    ///   - if compression is not successful for a file smaller than 64K, we can
1337    ///     even emit a stored file instead of a stored block (saving 5 bytes).
1338    ///     This is applicable only for zip (not gzip or zlib).
1339    ///   - creating new Huffman trees less frequently may not provide fast
1340    ///     adaptation to changes in the input data statistics. (Take for
1341    ///     example a binary file with poorly compressible code followed by
1342    ///     a highly compressible string table.) Smaller buffer sizes give
1343    ///     fast adaptation but have of course the overhead of transmitting
1344    ///     trees more frequently.
1345    ///   - I can't count above 4
1346    lit_bufsize: usize,
1347
1348    /// Actual size of window: 2*w_size, except when the user input buffer is directly used as sliding window.
1349    pub(crate) window_size: usize,
1350
1351    bit_writer: BitWriter<'a>,
1352
1353    _cache_line_2: (),
1354
1355    /// bit length of current block with optimal trees
1356    opt_len: usize,
1357    /// bit length of current block with static trees
1358    static_len: usize,
1359
1360    /// bytes at end of window left to insert
1361    pub(crate) insert: usize,
1362
1363    ///  hash index of string to be inserted
1364    pub(crate) ins_h: u32,
1365
1366    gzhead: Option<&'a mut gz_header>,
1367    gzindex: usize,
1368
1369    _padding_0: [u8; 16],
1370
1371    _cache_line_3: (),
1372
1373    crc_fold: crate::crc32::Crc32Fold,
1374
1375    l_desc: TreeDesc<HEAP_SIZE>,             /* literal and length tree */
1376    d_desc: TreeDesc<{ 2 * D_CODES + 1 }>,   /* distance tree */
1377    bl_desc: TreeDesc<{ 2 * BL_CODES + 1 }>, /* Huffman tree for bit lengths */
1378}
1379
1380#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Default)]
1381#[cfg_attr(feature = "__internal-fuzz", derive(arbitrary::Arbitrary))]
1382pub enum Strategy {
1383    #[default]
1384    Default = 0,
1385    Filtered = 1,
1386    HuffmanOnly = 2,
1387    Rle = 3,
1388    Fixed = 4,
1389}
1390
1391impl TryFrom<i32> for Strategy {
1392    type Error = ();
1393
1394    fn try_from(value: i32) -> Result<Self, Self::Error> {
1395        match value {
1396            0 => Ok(Strategy::Default),
1397            1 => Ok(Strategy::Filtered),
1398            2 => Ok(Strategy::HuffmanOnly),
1399            3 => Ok(Strategy::Rle),
1400            4 => Ok(Strategy::Fixed),
1401            _ => Err(()),
1402        }
1403    }
1404}
1405
1406#[derive(Debug, PartialEq, Eq)]
1407enum DataType {
1408    Binary = 0,
1409    Text = 1,
1410    Unknown = 2,
1411}
1412
1413impl<'a> State<'a> {
1414    pub const BIT_BUF_SIZE: u8 = BitWriter::BIT_BUF_SIZE;
1415
1416    // log2(w_size)  (in the range MIN_WBITS..=MAX_WBITS)
1417    pub(crate) fn w_bits(&self) -> u32 {
1418        self.w_size.trailing_zeros()
1419    }
1420
1421    pub(crate) fn w_mask(&self) -> usize {
1422        self.w_size - 1
1423    }
1424
1425    pub(crate) fn max_dist(&self) -> usize {
1426        self.w_size - MIN_LOOKAHEAD
1427    }
1428
1429    // TODO untangle this mess! zlib uses the same field differently based on compression level
1430    // we should just have 2 fields for clarity!
1431    pub(crate) fn max_insert_length(&self) -> usize {
1432        self.max_lazy_match as usize
1433    }
1434
1435    /// Total size of the pending buf. But because `pending` shares memory with `sym_buf`, this is
1436    /// not the number of bytes that are actually in `pending`!
1437    pub(crate) fn pending_buf_size(&self) -> usize {
1438        self.lit_bufsize * 4
1439    }
1440
1441    #[inline(always)]
1442    pub(crate) fn update_hash(&self, h: u32, val: u32) -> u32 {
1443        match self.hash_calc_variant {
1444            HashCalcVariant::Standard => StandardHashCalc::update_hash(h, val),
1445            HashCalcVariant::Roll => RollHashCalc::update_hash(h, val),
1446        }
1447    }
1448
1449    #[inline(always)]
1450    pub(crate) fn quick_insert_string(&mut self, string: usize) -> u16 {
1451        match self.hash_calc_variant {
1452            HashCalcVariant::Standard => StandardHashCalc::quick_insert_string(self, string),
1453            HashCalcVariant::Roll => RollHashCalc::quick_insert_string(self, string),
1454        }
1455    }
1456
1457    #[inline(always)]
1458    pub(crate) fn insert_string(&mut self, string: usize, count: usize) {
1459        match self.hash_calc_variant {
1460            HashCalcVariant::Standard => StandardHashCalc::insert_string(self, string, count),
1461            HashCalcVariant::Roll => RollHashCalc::insert_string(self, string, count),
1462        }
1463    }
1464
1465    #[inline(always)]
1466    pub(crate) fn tally_lit(&mut self, unmatched: u8) -> bool {
1467        Self::tally_lit_help(&mut self.sym_buf, &mut self.l_desc, unmatched)
1468    }
1469
1470    #[inline(always)]
1471    pub(crate) fn tally_lit_help(
1472        sym_buf: &mut ReadBuf<'a>,
1473        l_desc: &mut TreeDesc<HEAP_SIZE>,
1474        unmatched: u8,
1475    ) -> bool {
1476        sym_buf.push_lit(unmatched);
1477
1478        *l_desc.dyn_tree[unmatched as usize].freq_mut() += 1;
1479
1480        assert!(
1481            unmatched as usize <= STD_MAX_MATCH - STD_MIN_MATCH,
1482            "zng_tr_tally: bad literal"
1483        );
1484
1485        // signal that the current block should be flushed
1486        sym_buf.len() == sym_buf.capacity() - 3
1487    }
1488
1489    const fn d_code(dist: usize) -> u8 {
1490        let index = if dist < 256 { dist } else { 256 + (dist >> 7) };
1491        self::trees_tbl::DIST_CODE[index]
1492    }
1493
1494    #[inline(always)]
1495    pub(crate) fn tally_dist(&mut self, mut dist: usize, len: usize) -> bool {
1496        self.sym_buf.push_dist(dist as u16, len as u8);
1497
1498        self.matches = self.matches.saturating_add(1);
1499        dist -= 1;
1500
1501        assert!(
1502            dist < self.max_dist() && Self::d_code(dist) < D_CODES as u8,
1503            "tally_dist: bad match"
1504        );
1505
1506        let index = self::trees_tbl::LENGTH_CODE[len] as usize + LITERALS + 1;
1507        *self.l_desc.dyn_tree[index].freq_mut() += 1;
1508
1509        *self.d_desc.dyn_tree[Self::d_code(dist) as usize].freq_mut() += 1;
1510
1511        // signal that the current block should be flushed
1512        self.sym_buf.len() == self.sym_buf.capacity() - 3
1513    }
1514
1515    fn detect_data_type(dyn_tree: &[Value]) -> DataType {
1516        // set bits 0..6, 14..25, and 28..31
1517        // 0xf3ffc07f = binary 11110011111111111100000001111111
1518        const NON_TEXT: u64 = 0xf3ffc07f;
1519        let mut mask = NON_TEXT;
1520
1521        /* Check for non-textual bytes. */
1522        for value in &dyn_tree[0..32] {
1523            if (mask & 1) != 0 && value.freq() != 0 {
1524                return DataType::Binary;
1525            }
1526
1527            mask >>= 1;
1528        }
1529
1530        /* Check for textual bytes. */
1531        if dyn_tree[9].freq() != 0 || dyn_tree[10].freq() != 0 || dyn_tree[13].freq() != 0 {
1532            return DataType::Text;
1533        }
1534
1535        if dyn_tree[32..LITERALS].iter().any(|v| v.freq() != 0) {
1536            return DataType::Text;
1537        }
1538
1539        // there are no explicit text or non-text bytes. The stream is either empty or has only
1540        // tolerated bytes
1541        DataType::Binary
1542    }
1543
1544    fn compress_block_static_trees(&mut self) {
1545        let ltree = self::trees_tbl::STATIC_LTREE.as_slice();
1546        for chunk in self.sym_buf.filled().chunks_exact(3) {
1547            let [dist_low, dist_high, lc] = *chunk else {
1548                unreachable!("out of bound access on the symbol buffer");
1549            };
1550
1551            match u16::from_le_bytes([dist_low, dist_high]) {
1552                0 => self.bit_writer.emit_lit(ltree, lc) as usize,
1553                dist => self.bit_writer.emit_dist_static(lc, dist),
1554            };
1555        }
1556
1557        self.bit_writer.emit_end_block(ltree, false)
1558    }
1559
1560    fn compress_block_dynamic_trees(&mut self) {
1561        self.bit_writer.compress_block_help(
1562            self.sym_buf.filled(),
1563            &self.l_desc.dyn_tree,
1564            &self.d_desc.dyn_tree,
1565        );
1566    }
1567
1568    fn header(&self) -> u16 {
1569        // preset dictionary flag in zlib header
1570        const PRESET_DICT: u16 = 0x20;
1571
1572        // The deflate compression method (the only one supported in this version)
1573        const Z_DEFLATED: u16 = 8;
1574
1575        let dict = match self.strstart {
1576            0 => 0,
1577            _ => PRESET_DICT,
1578        };
1579
1580        let h = ((Z_DEFLATED + ((self.w_bits() as u16 - 8) << 4)) << 8)
1581            | (self.level_flags() << 6)
1582            | dict;
1583
1584        h + 31 - (h % 31)
1585    }
1586
1587    fn level_flags(&self) -> u16 {
1588        if self.strategy >= Strategy::HuffmanOnly || self.level < 2 {
1589            0
1590        } else if self.level < 6 {
1591            1
1592        } else if self.level == 6 {
1593            2
1594        } else {
1595            3
1596        }
1597    }
1598
1599    fn zng_tr_init(&mut self) {
1600        self.l_desc.stat_desc = &StaticTreeDesc::L;
1601
1602        self.d_desc.stat_desc = &StaticTreeDesc::D;
1603
1604        self.bl_desc.stat_desc = &StaticTreeDesc::BL;
1605
1606        self.bit_writer.bit_buffer = 0;
1607        self.bit_writer.bits_used = 0;
1608
1609        #[cfg(feature = "ZLIB_DEBUG")]
1610        {
1611            self.bit_writer.compressed_len = 0;
1612            self.bit_writer.bits_sent = 0;
1613        }
1614
1615        // Initialize the first block of the first file:
1616        self.init_block();
1617    }
1618
1619    /// initializes a new block
1620    fn init_block(&mut self) {
1621        // Initialize the trees.
1622        // TODO would a memset also work here?
1623
1624        for value in &mut self.l_desc.dyn_tree[..L_CODES] {
1625            *value.freq_mut() = 0;
1626        }
1627
1628        for value in &mut self.d_desc.dyn_tree[..D_CODES] {
1629            *value.freq_mut() = 0;
1630        }
1631
1632        for value in &mut self.bl_desc.dyn_tree[..BL_CODES] {
1633            *value.freq_mut() = 0;
1634        }
1635
1636        // end of block literal code
1637        const END_BLOCK: usize = 256;
1638
1639        *self.l_desc.dyn_tree[END_BLOCK].freq_mut() = 1;
1640        self.opt_len = 0;
1641        self.static_len = 0;
1642        self.sym_buf.clear();
1643        self.matches = 0;
1644    }
1645}
1646
1647#[repr(u8)]
1648#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1649enum Status {
1650    Init = 1,
1651
1652    GZip = 4,
1653    Extra = 5,
1654    Name = 6,
1655    Comment = 7,
1656    Hcrc = 8,
1657
1658    Busy = 2,
1659    Finish = 3,
1660}
1661
1662const fn rank_flush(f: i8) -> i8 {
1663    // rank Z_BLOCK between Z_NO_FLUSH and Z_PARTIAL_FLUSH
1664    ((f) * 2) - (if (f) > 4 { 9 } else { 0 })
1665}
1666
1667#[derive(Debug)]
1668pub(crate) enum BlockState {
1669    /// block not completed, need more input or more output
1670    NeedMore = 0,
1671    /// block flush performed
1672    BlockDone = 1,
1673    /// finish started, need only more output at next deflate
1674    FinishStarted = 2,
1675    /// finish done, accept no more input or output
1676    FinishDone = 3,
1677}
1678
1679// Maximum stored block length in deflate format (not including header).
1680pub(crate) const MAX_STORED: usize = 65535; // so u16::max
1681
1682pub(crate) fn read_buf_window(stream: &mut DeflateStream, offset: usize, size: usize) -> usize {
1683    let len = Ord::min(stream.avail_in as usize, size);
1684
1685    if len == 0 {
1686        return 0;
1687    }
1688
1689    stream.avail_in -= len as u32;
1690
1691    if stream.state.wrap == 2 {
1692        // we likely cannot fuse the crc32 and the copy here because the input can be changed by
1693        // a concurrent thread. Therefore it cannot be converted into a slice!
1694        let window = &mut stream.state.window;
1695        // SAFETY: len is bounded by avail_in, so this copy is in bounds.
1696        unsafe { window.copy_and_initialize(offset..offset + len, stream.next_in) };
1697
1698        let data = &stream.state.window.filled()[offset..][..len];
1699        stream.state.crc_fold.fold(data, CRC32_INITIAL_VALUE);
1700    } else if stream.state.wrap == 1 {
1701        // we likely cannot fuse the adler32 and the copy here because the input can be changed by
1702        // a concurrent thread. Therefore it cannot be converted into a slice!
1703        let window = &mut stream.state.window;
1704        // SAFETY: len is bounded by avail_in, so this copy is in bounds.
1705        unsafe { window.copy_and_initialize(offset..offset + len, stream.next_in) };
1706
1707        let data = &stream.state.window.filled()[offset..][..len];
1708        stream.adler = adler32(stream.adler as u32, data) as _;
1709    } else {
1710        let window = &mut stream.state.window;
1711        // SAFETY: len is bounded by avail_in, so this copy is in bounds.
1712        unsafe { window.copy_and_initialize(offset..offset + len, stream.next_in) };
1713    }
1714
1715    stream.next_in = stream.next_in.wrapping_add(len);
1716    stream.total_in += len as crate::c_api::z_size;
1717
1718    len
1719}
1720
1721pub(crate) enum BlockType {
1722    StoredBlock = 0,
1723    StaticTrees = 1,
1724    DynamicTrees = 2,
1725}
1726
1727pub(crate) fn zng_tr_stored_block(
1728    state: &mut State,
1729    window_range: core::ops::Range<usize>,
1730    is_last: bool,
1731) {
1732    // send block type
1733    state.bit_writer.emit_tree(BlockType::StoredBlock, is_last);
1734
1735    // align on byte boundary
1736    state.bit_writer.emit_align();
1737
1738    state.bit_writer.cmpr_bits_align();
1739
1740    let input_block: &[u8] = &state.window.filled()[window_range];
1741    let stored_len = input_block.len() as u16;
1742
1743    state.bit_writer.pending.extend(&stored_len.to_le_bytes());
1744    state
1745        .bit_writer
1746        .pending
1747        .extend(&(!stored_len).to_le_bytes());
1748
1749    state.bit_writer.cmpr_bits_add(32);
1750    state.bit_writer.sent_bits_add(32);
1751    if stored_len > 0 {
1752        state.bit_writer.pending.extend(input_block);
1753        state.bit_writer.cmpr_bits_add((stored_len << 3) as usize);
1754        state.bit_writer.sent_bits_add((stored_len << 3) as usize);
1755    }
1756}
1757
1758/// The minimum match length mandated by the deflate standard
1759pub(crate) const STD_MIN_MATCH: usize = 3;
1760/// The maximum match length mandated by the deflate standard
1761pub(crate) const STD_MAX_MATCH: usize = 258;
1762
1763/// The minimum wanted match length, affects deflate_quick, deflate_fast, deflate_medium and deflate_slow
1764pub(crate) const WANT_MIN_MATCH: usize = 4;
1765
1766pub(crate) const MIN_LOOKAHEAD: usize = STD_MAX_MATCH + STD_MIN_MATCH + 1;
1767
1768#[inline]
1769pub(crate) fn fill_window(stream: &mut DeflateStream) {
1770    debug_assert!(stream.state.lookahead < MIN_LOOKAHEAD);
1771
1772    let wsize = stream.state.w_size;
1773
1774    loop {
1775        let state = &mut *stream.state;
1776        let mut more = state.window_size - state.lookahead - state.strstart;
1777
1778        // If the window is almost full and there is insufficient lookahead,
1779        // move the upper half to the lower one to make room in the upper half.
1780        if state.strstart >= wsize + state.max_dist() {
1781            // shift the window to the left
1782            let (old, new) = state.window.filled_mut()[..2 * wsize].split_at_mut(wsize);
1783            old.copy_from_slice(new);
1784
1785            state.match_start = state.match_start.saturating_sub(wsize as u16);
1786            if state.match_start == 0 {
1787                state.prev_length = 0;
1788            }
1789
1790            state.strstart -= wsize; /* we now have strstart >= MAX_DIST */
1791            state.block_start -= wsize as isize;
1792            state.insert = Ord::min(state.insert, state.strstart);
1793
1794            self::slide_hash::slide_hash(state);
1795
1796            more += wsize;
1797        }
1798
1799        if stream.avail_in == 0 {
1800            break;
1801        }
1802
1803        // If there was no sliding:
1804        //    strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
1805        //    more == window_size - lookahead - strstart
1806        // => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
1807        // => more >= window_size - 2*WSIZE + 2
1808        // In the BIG_MEM or MMAP case (not yet supported),
1809        //   window_size == input_size + MIN_LOOKAHEAD  &&
1810        //   strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
1811        // Otherwise, window_size == 2*WSIZE so more >= 2.
1812        // If there was sliding, more >= WSIZE. So in all cases, more >= 2.
1813        assert!(more >= 2, "more < 2");
1814
1815        let n = read_buf_window(stream, stream.state.strstart + stream.state.lookahead, more);
1816
1817        let state = &mut *stream.state;
1818        state.lookahead += n;
1819
1820        // Initialize the hash value now that we have some input:
1821        if state.lookahead + state.insert >= STD_MIN_MATCH {
1822            let string = state.strstart - state.insert;
1823            if state.max_chain_length > 1024 {
1824                let v0 = state.window.filled()[string] as u32;
1825                let v1 = state.window.filled()[string + 1] as u32;
1826                state.ins_h = state.update_hash(v0, v1);
1827            } else if string >= 1 {
1828                state.quick_insert_string(string + 2 - STD_MIN_MATCH);
1829            }
1830            let mut count = state.insert;
1831            if state.lookahead == 1 {
1832                count -= 1;
1833            }
1834            if count > 0 {
1835                state.insert_string(string, count);
1836                state.insert -= count;
1837            }
1838        }
1839
1840        // If the whole input has less than STD_MIN_MATCH bytes, ins_h is garbage,
1841        // but this is not important since only literal bytes will be emitted.
1842
1843        if !(stream.state.lookahead < MIN_LOOKAHEAD && stream.avail_in != 0) {
1844            break;
1845        }
1846    }
1847
1848    assert!(
1849        stream.state.strstart <= stream.state.window_size - MIN_LOOKAHEAD,
1850        "not enough room for search"
1851    );
1852}
1853
1854pub(crate) struct StaticTreeDesc {
1855    /// static tree or NULL
1856    pub(crate) static_tree: &'static [Value],
1857    /// extra bits for each code or NULL
1858    extra_bits: &'static [u8],
1859    /// base index for extra_bits
1860    extra_base: usize,
1861    /// max number of elements in the tree
1862    elems: usize,
1863    /// max bit length for the codes
1864    max_length: u16,
1865}
1866
1867impl StaticTreeDesc {
1868    const EMPTY: Self = Self {
1869        static_tree: &[],
1870        extra_bits: &[],
1871        extra_base: 0,
1872        elems: 0,
1873        max_length: 0,
1874    };
1875
1876    /// extra bits for each length code
1877    const EXTRA_LBITS: [u8; LENGTH_CODES] = [
1878        0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0,
1879    ];
1880
1881    /// extra bits for each distance code
1882    const EXTRA_DBITS: [u8; D_CODES] = [
1883        0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12,
1884        13, 13,
1885    ];
1886
1887    /// extra bits for each bit length code
1888    const EXTRA_BLBITS: [u8; BL_CODES] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 7];
1889
1890    /// The lengths of the bit length codes are sent in order of decreasing
1891    /// probability, to avoid transmitting the lengths for unused bit length codes.
1892    const BL_ORDER: [u8; BL_CODES] = [
1893        16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15,
1894    ];
1895
1896    pub(crate) const L: Self = Self {
1897        static_tree: &self::trees_tbl::STATIC_LTREE,
1898        extra_bits: &Self::EXTRA_LBITS,
1899        extra_base: LITERALS + 1,
1900        elems: L_CODES,
1901        max_length: MAX_BITS as u16,
1902    };
1903
1904    pub(crate) const D: Self = Self {
1905        static_tree: &self::trees_tbl::STATIC_DTREE,
1906        extra_bits: &Self::EXTRA_DBITS,
1907        extra_base: 0,
1908        elems: D_CODES,
1909        max_length: MAX_BITS as u16,
1910    };
1911
1912    pub(crate) const BL: Self = Self {
1913        static_tree: &[],
1914        extra_bits: &Self::EXTRA_BLBITS,
1915        extra_base: 0,
1916        elems: BL_CODES,
1917        max_length: MAX_BL_BITS as u16,
1918    };
1919}
1920
1921#[derive(Clone)]
1922pub(crate) struct TreeDesc<const N: usize> {
1923    dyn_tree: [Value; N],
1924    max_code: usize,
1925    stat_desc: &'static StaticTreeDesc,
1926}
1927
1928impl<const N: usize> TreeDesc<N> {
1929    const EMPTY: Self = Self {
1930        dyn_tree: [Value::new(0, 0); N],
1931        max_code: 0,
1932        stat_desc: &StaticTreeDesc::EMPTY,
1933    };
1934}
1935
1936fn build_tree<const N: usize>(state: &mut State, desc: &mut TreeDesc<N>) {
1937    let tree = &mut desc.dyn_tree;
1938    let stree = desc.stat_desc.static_tree;
1939    let elements = desc.stat_desc.elems;
1940
1941    let mut heap = Heap::new();
1942    let mut max_code = heap.initialize(&mut tree[..elements]);
1943
1944    // The pkzip format requires that at least one distance code exists,
1945    // and that at least one bit should be sent even if there is only one
1946    // possible code. So to avoid special checks later on we force at least
1947    // two codes of non zero frequency.
1948    while heap.heap_len < 2 {
1949        heap.heap_len += 1;
1950        let node = if max_code < 2 {
1951            max_code += 1;
1952            max_code
1953        } else {
1954            0
1955        };
1956
1957        debug_assert!(node >= 0);
1958        let node = node as usize;
1959
1960        heap.heap[heap.heap_len] = node as u32;
1961        *tree[node].freq_mut() = 1;
1962        heap.depth[node] = 0;
1963        state.opt_len -= 1;
1964        if !stree.is_empty() {
1965            state.static_len -= stree[node].len() as usize;
1966        }
1967        /* node is 0 or 1 so it does not have extra bits */
1968    }
1969
1970    debug_assert!(max_code >= 0);
1971    let max_code = max_code as usize;
1972    desc.max_code = max_code;
1973
1974    // The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
1975    // establish sub-heaps of increasing lengths:
1976    let mut n = heap.heap_len / 2;
1977    while n >= 1 {
1978        heap.pqdownheap(tree, n);
1979        n -= 1;
1980    }
1981
1982    heap.construct_huffman_tree(tree, elements);
1983
1984    // At this point, the fields freq and dad are set. We can now
1985    // generate the bit lengths.
1986    let bl_count = gen_bitlen(state, &mut heap, desc);
1987
1988    // The field len is now set, we can generate the bit codes
1989    gen_codes(&mut desc.dyn_tree, max_code, &bl_count);
1990}
1991
1992fn gen_bitlen<const N: usize>(
1993    state: &mut State,
1994    heap: &mut Heap,
1995    desc: &mut TreeDesc<N>,
1996) -> [u16; MAX_BITS + 1] {
1997    let tree = &mut desc.dyn_tree;
1998    let max_code = desc.max_code;
1999    let stree = desc.stat_desc.static_tree;
2000    let extra = desc.stat_desc.extra_bits;
2001    let base = desc.stat_desc.extra_base;
2002    let max_length = desc.stat_desc.max_length;
2003
2004    let mut bl_count = [0u16; MAX_BITS + 1];
2005
2006    // In a first pass, compute the optimal bit lengths (which may
2007    // overflow in the case of the bit length tree).
2008    *tree[heap.heap[heap.heap_max] as usize].len_mut() = 0; /* root of the heap */
2009
2010    // number of elements with bit length too large
2011    let mut overflow: i32 = 0;
2012
2013    for h in heap.heap_max + 1..HEAP_SIZE {
2014        let n = heap.heap[h] as usize;
2015        let mut bits = tree[tree[n].dad() as usize].len() + 1;
2016
2017        if bits > max_length {
2018            bits = max_length;
2019            overflow += 1;
2020        }
2021
2022        // We overwrite tree[n].Dad which is no longer needed
2023        *tree[n].len_mut() = bits;
2024
2025        // not a leaf node
2026        if n > max_code {
2027            continue;
2028        }
2029
2030        bl_count[bits as usize] += 1;
2031        let mut xbits = 0;
2032        if n >= base {
2033            xbits = extra[n - base] as usize;
2034        }
2035
2036        let f = tree[n].freq() as usize;
2037        state.opt_len += f * (bits as usize + xbits);
2038
2039        if !stree.is_empty() {
2040            state.static_len += f * (stree[n].len() as usize + xbits);
2041        }
2042    }
2043
2044    if overflow == 0 {
2045        return bl_count;
2046    }
2047
2048    /* Find the first bit length which could increase: */
2049    loop {
2050        let mut bits = max_length as usize - 1;
2051        while bl_count[bits] == 0 {
2052            bits -= 1;
2053        }
2054        bl_count[bits] -= 1; /* move one leaf down the tree */
2055        bl_count[bits + 1] += 2; /* move one overflow item as its brother */
2056        bl_count[max_length as usize] -= 1;
2057        /* The brother of the overflow item also moves one step up,
2058         * but this does not affect bl_count[max_length]
2059         */
2060        overflow -= 2;
2061
2062        if overflow <= 0 {
2063            break;
2064        }
2065    }
2066
2067    // Now recompute all bit lengths, scanning in increasing frequency.
2068    // h is still equal to HEAP_SIZE. (It is simpler to reconstruct all
2069    // lengths instead of fixing only the wrong ones. This idea is taken
2070    // from 'ar' written by Haruhiko Okumura.)
2071    let mut h = HEAP_SIZE;
2072    for bits in (1..=max_length).rev() {
2073        let mut n = bl_count[bits as usize];
2074        while n != 0 {
2075            h -= 1;
2076            let m = heap.heap[h] as usize;
2077            if m > max_code {
2078                continue;
2079            }
2080
2081            if tree[m].len() != bits {
2082                // Tracev((stderr, "code %d bits %d->%u\n", m, tree[m].Len, bits));
2083                state.opt_len += (bits * tree[m].freq()) as usize;
2084                state.opt_len -= (tree[m].len() * tree[m].freq()) as usize;
2085                *tree[m].len_mut() = bits;
2086            }
2087
2088            n -= 1;
2089        }
2090    }
2091    bl_count
2092}
2093
2094/// Checks that symbol is a printing character (excluding space)
2095#[allow(unused)]
2096fn isgraph(c: u8) -> bool {
2097    (c > 0x20) && (c <= 0x7E)
2098}
2099
2100fn gen_codes(tree: &mut [Value], max_code: usize, bl_count: &[u16]) {
2101    /* tree: the tree to decorate */
2102    /* max_code: largest code with non zero frequency */
2103    /* bl_count: number of codes at each bit length */
2104    let mut next_code = [0; MAX_BITS + 1]; /* next code value for each bit length */
2105    let mut code = 0; /* running code value */
2106
2107    /* The distribution counts are first used to generate the code values
2108     * without bit reversal.
2109     */
2110    for bits in 1..=MAX_BITS {
2111        code = (code + bl_count[bits - 1]) << 1;
2112        next_code[bits] = code;
2113    }
2114
2115    /* Check that the bit counts in bl_count are consistent. The last code
2116     * must be all ones.
2117     */
2118    assert!(
2119        code + bl_count[MAX_BITS] - 1 == (1 << MAX_BITS) - 1,
2120        "inconsistent bit counts"
2121    );
2122
2123    trace!("\ngen_codes: max_code {max_code} ");
2124
2125    for n in 0..=max_code {
2126        let len = tree[n].len();
2127        if len == 0 {
2128            continue;
2129        }
2130
2131        /* Now reverse the bits */
2132        assert!((1..=15).contains(&len), "code length must be 1-15");
2133        *tree[n].code_mut() = next_code[len as usize].reverse_bits() >> (16 - len);
2134        next_code[len as usize] += 1;
2135
2136        if tree != self::trees_tbl::STATIC_LTREE.as_slice() {
2137            trace!(
2138                "\nn {:>3} {} l {:>2} c {:>4x} ({:x}) ",
2139                n,
2140                if isgraph(n as u8) {
2141                    char::from_u32(n as u32).unwrap()
2142                } else {
2143                    ' '
2144                },
2145                len,
2146                tree[n].code(),
2147                next_code[len as usize] - 1
2148            );
2149        }
2150    }
2151}
2152
2153/// repeat previous bit length 3-6 times (2 bits of repeat count)
2154const REP_3_6: usize = 16;
2155
2156/// repeat a zero length 3-10 times  (3 bits of repeat count)
2157const REPZ_3_10: usize = 17;
2158
2159/// repeat a zero length 11-138 times  (7 bits of repeat count)
2160const REPZ_11_138: usize = 18;
2161
2162fn scan_tree(bl_desc: &mut TreeDesc<{ 2 * BL_CODES + 1 }>, tree: &mut [Value], max_code: usize) {
2163    /* tree: the tree to be scanned */
2164    /* max_code: and its largest code of non zero frequency */
2165    let mut prevlen = -1isize; /* last emitted length */
2166    let mut curlen: isize; /* length of current code */
2167    let mut nextlen = tree[0].len(); /* length of next code */
2168    let mut count = 0; /* repeat count of the current code */
2169    let mut max_count = 7; /* max repeat count */
2170    let mut min_count = 4; /* min repeat count */
2171
2172    if nextlen == 0 {
2173        max_count = 138;
2174        min_count = 3;
2175    }
2176
2177    *tree[max_code + 1].len_mut() = 0xffff; /* guard */
2178
2179    let bl_tree = &mut bl_desc.dyn_tree;
2180
2181    for n in 0..=max_code {
2182        curlen = nextlen as isize;
2183        nextlen = tree[n + 1].len();
2184        count += 1;
2185        if count < max_count && curlen == nextlen as isize {
2186            continue;
2187        } else if count < min_count {
2188            *bl_tree[curlen as usize].freq_mut() += count;
2189        } else if curlen != 0 {
2190            if curlen != prevlen {
2191                *bl_tree[curlen as usize].freq_mut() += 1;
2192            }
2193            *bl_tree[REP_3_6].freq_mut() += 1;
2194        } else if count <= 10 {
2195            *bl_tree[REPZ_3_10].freq_mut() += 1;
2196        } else {
2197            *bl_tree[REPZ_11_138].freq_mut() += 1;
2198        }
2199
2200        count = 0;
2201        prevlen = curlen;
2202
2203        if nextlen == 0 {
2204            max_count = 138;
2205            min_count = 3;
2206        } else if curlen == nextlen as isize {
2207            max_count = 6;
2208            min_count = 3;
2209        } else {
2210            max_count = 7;
2211            min_count = 4;
2212        }
2213    }
2214}
2215
2216fn send_all_trees(state: &mut State, lcodes: usize, dcodes: usize, blcodes: usize) {
2217    assert!(
2218        lcodes >= 257 && dcodes >= 1 && blcodes >= 4,
2219        "not enough codes"
2220    );
2221    assert!(
2222        lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,
2223        "too many codes"
2224    );
2225
2226    trace!("\nbl counts: ");
2227    state.bit_writer.send_bits(lcodes as u64 - 257, 5); /* not +255 as stated in appnote.txt */
2228    state.bit_writer.send_bits(dcodes as u64 - 1, 5);
2229    state.bit_writer.send_bits(blcodes as u64 - 4, 4); /* not -3 as stated in appnote.txt */
2230
2231    for rank in 0..blcodes {
2232        trace!("\nbl code {:>2} ", StaticTreeDesc::BL_ORDER[rank]);
2233        state.bit_writer.send_bits(
2234            state.bl_desc.dyn_tree[StaticTreeDesc::BL_ORDER[rank] as usize].len() as u64,
2235            3,
2236        );
2237    }
2238    trace!("\nbl tree: sent {}", state.bit_writer.bits_sent);
2239
2240    // literal tree
2241    state
2242        .bit_writer
2243        .send_tree(&state.l_desc.dyn_tree, &state.bl_desc.dyn_tree, lcodes - 1);
2244    trace!("\nlit tree: sent {}", state.bit_writer.bits_sent);
2245
2246    // distance tree
2247    state
2248        .bit_writer
2249        .send_tree(&state.d_desc.dyn_tree, &state.bl_desc.dyn_tree, dcodes - 1);
2250    trace!("\ndist tree: sent {}", state.bit_writer.bits_sent);
2251}
2252
2253/// Construct the Huffman tree for the bit lengths and return the index in
2254/// bl_order of the last bit length code to send.
2255fn build_bl_tree(state: &mut State) -> usize {
2256    /* Determine the bit length frequencies for literal and distance trees */
2257
2258    scan_tree(
2259        &mut state.bl_desc,
2260        &mut state.l_desc.dyn_tree,
2261        state.l_desc.max_code,
2262    );
2263
2264    scan_tree(
2265        &mut state.bl_desc,
2266        &mut state.d_desc.dyn_tree,
2267        state.d_desc.max_code,
2268    );
2269
2270    /* Build the bit length tree: */
2271    {
2272        let mut tmp = TreeDesc::EMPTY;
2273        core::mem::swap(&mut tmp, &mut state.bl_desc);
2274        build_tree(state, &mut tmp);
2275        core::mem::swap(&mut tmp, &mut state.bl_desc);
2276    }
2277
2278    /* opt_len now includes the length of the tree representations, except
2279     * the lengths of the bit lengths codes and the 5+5+4 bits for the counts.
2280     */
2281
2282    /* Determine the number of bit length codes to send. The pkzip format
2283     * requires that at least 4 bit length codes be sent. (appnote.txt says
2284     * 3 but the actual value used is 4.)
2285     */
2286    let mut max_blindex = BL_CODES - 1;
2287    while max_blindex >= 3 {
2288        let index = StaticTreeDesc::BL_ORDER[max_blindex] as usize;
2289        if state.bl_desc.dyn_tree[index].len() != 0 {
2290            break;
2291        }
2292
2293        max_blindex -= 1;
2294    }
2295
2296    /* Update opt_len to include the bit length tree and counts */
2297    state.opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4;
2298    trace!(
2299        "\ndyn trees: dyn {}, stat {}",
2300        state.opt_len,
2301        state.static_len
2302    );
2303
2304    max_blindex
2305}
2306
2307fn zng_tr_flush_block(
2308    stream: &mut DeflateStream,
2309    window_offset: Option<usize>,
2310    stored_len: u32,
2311    last: bool,
2312) {
2313    /* window_offset: offset of the input block into the window */
2314    /* stored_len: length of input block */
2315    /* last: one if this is the last block for a file */
2316
2317    let mut opt_lenb;
2318    let static_lenb;
2319    let mut max_blindex = 0;
2320
2321    let state = &mut stream.state;
2322
2323    if state.sym_buf.is_empty() {
2324        opt_lenb = 0;
2325        static_lenb = 0;
2326        state.static_len = 7;
2327    } else if state.level > 0 {
2328        if stream.data_type == DataType::Unknown as i32 {
2329            stream.data_type = State::detect_data_type(&state.l_desc.dyn_tree) as i32;
2330        }
2331
2332        {
2333            let mut tmp = TreeDesc::EMPTY;
2334            core::mem::swap(&mut tmp, &mut state.l_desc);
2335
2336            build_tree(state, &mut tmp);
2337            core::mem::swap(&mut tmp, &mut state.l_desc);
2338
2339            trace!(
2340                "\nlit data: dyn {}, stat {}",
2341                state.opt_len,
2342                state.static_len
2343            );
2344        }
2345
2346        {
2347            let mut tmp = TreeDesc::EMPTY;
2348            core::mem::swap(&mut tmp, &mut state.d_desc);
2349            build_tree(state, &mut tmp);
2350            core::mem::swap(&mut tmp, &mut state.d_desc);
2351
2352            trace!(
2353                "\ndist data: dyn {}, stat {}",
2354                state.opt_len,
2355                state.static_len
2356            );
2357        }
2358
2359        // Build the bit length tree for the above two trees, and get the index
2360        // in bl_order of the last bit length code to send.
2361        max_blindex = build_bl_tree(state);
2362
2363        // Determine the best encoding. Compute the block lengths in bytes.
2364        opt_lenb = (state.opt_len + 3 + 7) >> 3;
2365        static_lenb = (state.static_len + 3 + 7) >> 3;
2366
2367        trace!(
2368            "\nopt {}({}) stat {}({}) stored {} lit {} ",
2369            opt_lenb,
2370            state.opt_len,
2371            static_lenb,
2372            state.static_len,
2373            stored_len,
2374            state.sym_buf.len() / 3
2375        );
2376
2377        if static_lenb <= opt_lenb || state.strategy == Strategy::Fixed {
2378            opt_lenb = static_lenb;
2379        }
2380    } else {
2381        assert!(window_offset.is_some(), "lost buf");
2382        /* force a stored block */
2383        opt_lenb = stored_len as usize + 5;
2384        static_lenb = stored_len as usize + 5;
2385    }
2386
2387    if stored_len as usize + 4 <= opt_lenb && window_offset.is_some() {
2388        /* 4: two words for the lengths
2389         * The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
2390         * Otherwise we can't have processed more than WSIZE input bytes since
2391         * the last block flush, because compression would have been
2392         * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
2393         * transform a block into a stored block.
2394         */
2395        let window_offset = window_offset.unwrap();
2396        let range = window_offset..window_offset + stored_len as usize;
2397        zng_tr_stored_block(state, range, last);
2398    } else if static_lenb == opt_lenb {
2399        state.bit_writer.emit_tree(BlockType::StaticTrees, last);
2400        state.compress_block_static_trees();
2401    // cmpr_bits_add(s, s.static_len);
2402    } else {
2403        state.bit_writer.emit_tree(BlockType::DynamicTrees, last);
2404        send_all_trees(
2405            state,
2406            state.l_desc.max_code + 1,
2407            state.d_desc.max_code + 1,
2408            max_blindex + 1,
2409        );
2410
2411        state.compress_block_dynamic_trees();
2412    }
2413
2414    // TODO
2415    // This check is made mod 2^32, for files larger than 512 MB and unsigned long implemented on 32 bits.
2416    // assert_eq!(state.compressed_len, state.bits_sent, "bad compressed size");
2417
2418    state.init_block();
2419    if last {
2420        state.bit_writer.emit_align();
2421    }
2422
2423    // Tracev((stderr, "\ncomprlen {}(%lu) ", s->compressed_len>>3, s->compressed_len-7*last));
2424}
2425
2426pub(crate) fn flush_block_only(stream: &mut DeflateStream, is_last: bool) {
2427    zng_tr_flush_block(
2428        stream,
2429        (stream.state.block_start >= 0).then_some(stream.state.block_start as usize),
2430        (stream.state.strstart as isize - stream.state.block_start) as u32,
2431        is_last,
2432    );
2433
2434    stream.state.block_start = stream.state.strstart as isize;
2435    flush_pending(stream)
2436}
2437
2438fn flush_bytes(stream: &mut DeflateStream, mut bytes: &[u8]) -> ControlFlow<ReturnCode> {
2439    let mut state = &mut stream.state;
2440
2441    // we'll be using the pending buffer as temporary storage
2442    let mut beg = state.bit_writer.pending.pending().len(); /* start of bytes to update crc */
2443
2444    while state.bit_writer.pending.remaining() < bytes.len() {
2445        let copy = state.bit_writer.pending.remaining();
2446
2447        state.bit_writer.pending.extend(&bytes[..copy]);
2448
2449        stream.adler = crc32(
2450            stream.adler as u32,
2451            &state.bit_writer.pending.pending()[beg..],
2452        ) as z_checksum;
2453
2454        state.gzindex += copy;
2455        flush_pending(stream);
2456        state = &mut stream.state;
2457
2458        // could not flush all the pending output
2459        if !state.bit_writer.pending.pending().is_empty() {
2460            state.last_flush = -1;
2461            return ControlFlow::Break(ReturnCode::Ok);
2462        }
2463
2464        beg = 0;
2465        bytes = &bytes[copy..];
2466    }
2467
2468    state.bit_writer.pending.extend(bytes);
2469
2470    stream.adler = crc32(
2471        stream.adler as u32,
2472        &state.bit_writer.pending.pending()[beg..],
2473    ) as z_checksum;
2474    state.gzindex = 0;
2475
2476    ControlFlow::Continue(())
2477}
2478
2479pub fn deflate(stream: &mut DeflateStream, flush: DeflateFlush) -> ReturnCode {
2480    if stream.next_out.is_null()
2481        || (stream.avail_in != 0 && stream.next_in.is_null())
2482        || (stream.state.status == Status::Finish && flush != DeflateFlush::Finish)
2483    {
2484        let err = ReturnCode::StreamError;
2485        stream.msg = err.error_message();
2486        return err;
2487    }
2488
2489    if stream.avail_out == 0 {
2490        let err = ReturnCode::BufError;
2491        stream.msg = err.error_message();
2492        return err;
2493    }
2494
2495    let old_flush = stream.state.last_flush;
2496    stream.state.last_flush = flush as i8;
2497
2498    /* Flush as much pending output as possible */
2499    if !stream.state.bit_writer.pending.pending().is_empty() {
2500        flush_pending(stream);
2501        if stream.avail_out == 0 {
2502            /* Since avail_out is 0, deflate will be called again with
2503             * more output space, but possibly with both pending and
2504             * avail_in equal to zero. There won't be anything to do,
2505             * but this is not an error situation so make sure we
2506             * return OK instead of BUF_ERROR at next call of deflate:
2507             */
2508            stream.state.last_flush = -1;
2509            return ReturnCode::Ok;
2510        }
2511
2512        /* Make sure there is something to do and avoid duplicate consecutive
2513         * flushes. For repeated and useless calls with Z_FINISH, we keep
2514         * returning Z_STREAM_END instead of Z_BUF_ERROR.
2515         */
2516    } else if stream.avail_in == 0
2517        && rank_flush(flush as i8) <= rank_flush(old_flush)
2518        && flush != DeflateFlush::Finish
2519    {
2520        let err = ReturnCode::BufError;
2521        stream.msg = err.error_message();
2522        return err;
2523    }
2524
2525    /* User must not provide more input after the first FINISH: */
2526    if stream.state.status == Status::Finish && stream.avail_in != 0 {
2527        let err = ReturnCode::BufError;
2528        stream.msg = err.error_message();
2529        return err;
2530    }
2531
2532    /* Write the header */
2533    if stream.state.status == Status::Init && stream.state.wrap == 0 {
2534        stream.state.status = Status::Busy;
2535    }
2536
2537    if stream.state.status == Status::Init {
2538        let header = stream.state.header();
2539        stream
2540            .state
2541            .bit_writer
2542            .pending
2543            .extend(&header.to_be_bytes());
2544
2545        /* Save the adler32 of the preset dictionary: */
2546        if stream.state.strstart != 0 {
2547            let adler = stream.adler as u32;
2548            stream.state.bit_writer.pending.extend(&adler.to_be_bytes());
2549        }
2550
2551        stream.adler = ADLER32_INITIAL_VALUE as _;
2552        stream.state.status = Status::Busy;
2553
2554        // compression must start with an empty pending buffer
2555        flush_pending(stream);
2556
2557        if !stream.state.bit_writer.pending.pending().is_empty() {
2558            stream.state.last_flush = -1;
2559
2560            return ReturnCode::Ok;
2561        }
2562    }
2563
2564    if stream.state.status == Status::GZip {
2565        /* gzip header */
2566        stream.state.crc_fold = Crc32Fold::new();
2567
2568        stream.state.bit_writer.pending.extend(&[31, 139, 8]);
2569
2570        let extra_flags = if stream.state.level == 9 {
2571            2
2572        } else if stream.state.strategy >= Strategy::HuffmanOnly || stream.state.level < 2 {
2573            4
2574        } else {
2575            0
2576        };
2577
2578        match &stream.state.gzhead {
2579            None => {
2580                let bytes = [0, 0, 0, 0, 0, extra_flags, gz_header::OS_CODE];
2581                stream.state.bit_writer.pending.extend(&bytes);
2582                stream.state.status = Status::Busy;
2583
2584                /* Compression must start with an empty pending buffer */
2585                flush_pending(stream);
2586                if !stream.state.bit_writer.pending.pending().is_empty() {
2587                    stream.state.last_flush = -1;
2588                    return ReturnCode::Ok;
2589                }
2590            }
2591            Some(gzhead) => {
2592                stream.state.bit_writer.pending.extend(&[gzhead.flags()]);
2593                let bytes = (gzhead.time as u32).to_le_bytes();
2594                stream.state.bit_writer.pending.extend(&bytes);
2595                stream
2596                    .state
2597                    .bit_writer
2598                    .pending
2599                    .extend(&[extra_flags, gzhead.os as u8]);
2600
2601                if !gzhead.extra.is_null() {
2602                    let bytes = (gzhead.extra_len as u16).to_le_bytes();
2603                    stream.state.bit_writer.pending.extend(&bytes);
2604                }
2605
2606                if gzhead.hcrc != 0 {
2607                    stream.adler = crc32(
2608                        stream.adler as u32,
2609                        stream.state.bit_writer.pending.pending(),
2610                    ) as z_checksum
2611                }
2612
2613                stream.state.gzindex = 0;
2614                stream.state.status = Status::Extra;
2615            }
2616        }
2617    }
2618
2619    if stream.state.status == Status::Extra {
2620        if let Some(gzhead) = stream.state.gzhead.as_ref() {
2621            if !gzhead.extra.is_null() {
2622                let gzhead_extra = gzhead.extra;
2623
2624                let extra = unsafe {
2625                    core::slice::from_raw_parts(
2626                        // SAFETY: gzindex is always less than extra_len, and the user
2627                        // guarantees the pointer is valid for extra_len.
2628                        gzhead_extra.add(stream.state.gzindex),
2629                        (gzhead.extra_len & 0xffff) as usize - stream.state.gzindex,
2630                    )
2631                };
2632
2633                if let ControlFlow::Break(err) = flush_bytes(stream, extra) {
2634                    return err;
2635                }
2636            }
2637        }
2638        stream.state.status = Status::Name;
2639    }
2640
2641    if stream.state.status == Status::Name {
2642        if let Some(gzhead) = stream.state.gzhead.as_ref() {
2643            if !gzhead.name.is_null() {
2644                // SAFETY: user satisfies precondition that gzhead.name is a C string.
2645                let gzhead_name = unsafe { CStr::from_ptr(gzhead.name.cast()) };
2646                let bytes = gzhead_name.to_bytes_with_nul();
2647                if let ControlFlow::Break(err) = flush_bytes(stream, bytes) {
2648                    return err;
2649                }
2650            }
2651            stream.state.status = Status::Comment;
2652        }
2653    }
2654
2655    if stream.state.status == Status::Comment {
2656        if let Some(gzhead) = stream.state.gzhead.as_ref() {
2657            if !gzhead.comment.is_null() {
2658                // SAFETY: user satisfies precondition that gzhead.name is a C string.
2659                let gzhead_comment = unsafe { CStr::from_ptr(gzhead.comment.cast()) };
2660                let bytes = gzhead_comment.to_bytes_with_nul();
2661                if let ControlFlow::Break(err) = flush_bytes(stream, bytes) {
2662                    return err;
2663                }
2664            }
2665            stream.state.status = Status::Hcrc;
2666        }
2667    }
2668
2669    if stream.state.status == Status::Hcrc {
2670        if let Some(gzhead) = stream.state.gzhead.as_ref() {
2671            if gzhead.hcrc != 0 {
2672                let bytes = (stream.adler as u16).to_le_bytes();
2673                if let ControlFlow::Break(err) = flush_bytes(stream, &bytes) {
2674                    return err;
2675                }
2676            }
2677        }
2678
2679        stream.state.status = Status::Busy;
2680
2681        // compression must start with an empty pending buffer
2682        flush_pending(stream);
2683        if !stream.state.bit_writer.pending.pending().is_empty() {
2684            stream.state.last_flush = -1;
2685            return ReturnCode::Ok;
2686        }
2687    }
2688
2689    // Start a new block or continue the current one.
2690    let state = &mut stream.state;
2691    if stream.avail_in != 0
2692        || state.lookahead != 0
2693        || (flush != DeflateFlush::NoFlush && state.status != Status::Finish)
2694    {
2695        let bstate = self::algorithm::run(stream, flush);
2696
2697        let state = &mut stream.state;
2698
2699        if matches!(bstate, BlockState::FinishStarted | BlockState::FinishDone) {
2700            state.status = Status::Finish;
2701        }
2702
2703        match bstate {
2704            BlockState::NeedMore | BlockState::FinishStarted => {
2705                if stream.avail_out == 0 {
2706                    state.last_flush = -1; /* avoid BUF_ERROR next call, see above */
2707                }
2708                return ReturnCode::Ok;
2709                /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
2710                 * of deflate should use the same flush parameter to make sure
2711                 * that the flush is complete. So we don't have to output an
2712                 * empty block here, this will be done at next call. This also
2713                 * ensures that for a very small output buffer, we emit at most
2714                 * one empty block.
2715                 */
2716            }
2717            BlockState::BlockDone => {
2718                match flush {
2719                    DeflateFlush::NoFlush => unreachable!("condition of inner surrounding if"),
2720                    DeflateFlush::PartialFlush => {
2721                        state.bit_writer.align();
2722                    }
2723                    DeflateFlush::SyncFlush => {
2724                        // add an empty stored block that is marked as not final. This is useful for
2725                        // parallel deflate where we want to make sure the intermediate blocks are not
2726                        // marked as "last block".
2727                        zng_tr_stored_block(state, 0..0, false);
2728                    }
2729                    DeflateFlush::FullFlush => {
2730                        // add an empty stored block that is marked as not final. This is useful for
2731                        // parallel deflate where we want to make sure the intermediate blocks are not
2732                        // marked as "last block".
2733                        zng_tr_stored_block(state, 0..0, false);
2734
2735                        state.head.as_mut_slice().fill(0); // forget history
2736
2737                        if state.lookahead == 0 {
2738                            state.strstart = 0;
2739                            state.block_start = 0;
2740                            state.insert = 0;
2741                        }
2742                    }
2743                    DeflateFlush::Block => { /* fall through */ }
2744                    DeflateFlush::Finish => unreachable!("condition of outer surrounding if"),
2745                }
2746
2747                flush_pending(stream);
2748
2749                if stream.avail_out == 0 {
2750                    stream.state.last_flush = -1; /* avoid BUF_ERROR at next call, see above */
2751                    return ReturnCode::Ok;
2752                }
2753            }
2754            BlockState::FinishDone => { /* do nothing */ }
2755        }
2756    }
2757
2758    if flush != DeflateFlush::Finish {
2759        return ReturnCode::Ok;
2760    }
2761
2762    // write the trailer
2763    if stream.state.wrap == 2 {
2764        let crc_fold = core::mem::take(&mut stream.state.crc_fold);
2765        stream.adler = crc_fold.finish() as z_checksum;
2766
2767        let adler = stream.adler as u32;
2768        stream.state.bit_writer.pending.extend(&adler.to_le_bytes());
2769
2770        let total_in = stream.total_in as u32;
2771        stream
2772            .state
2773            .bit_writer
2774            .pending
2775            .extend(&total_in.to_le_bytes());
2776    } else if stream.state.wrap == 1 {
2777        let adler = stream.adler as u32;
2778        stream.state.bit_writer.pending.extend(&adler.to_be_bytes());
2779    }
2780
2781    flush_pending(stream);
2782
2783    // If avail_out is zero, the application will call deflate again to flush the rest.
2784    if stream.state.wrap > 0 {
2785        stream.state.wrap = -stream.state.wrap; /* write the trailer only once! */
2786    }
2787
2788    if stream.state.bit_writer.pending.pending().is_empty() {
2789        assert_eq!(stream.state.bit_writer.bits_used, 0, "bi_buf not flushed");
2790        return ReturnCode::StreamEnd;
2791    }
2792    ReturnCode::Ok
2793}
2794
2795pub(crate) fn flush_pending(stream: &mut DeflateStream) {
2796    let state = &mut stream.state;
2797
2798    state.bit_writer.flush_bits();
2799
2800    let pending = state.bit_writer.pending.pending();
2801    let len = Ord::min(pending.len(), stream.avail_out as usize);
2802
2803    if len == 0 {
2804        return;
2805    }
2806
2807    trace!("\n[FLUSH {len} bytes]");
2808    // SAFETY: len is min(pending, stream.avail_out), so we won't overrun next_out.
2809    unsafe { core::ptr::copy_nonoverlapping(pending.as_ptr(), stream.next_out, len) };
2810
2811    stream.next_out = stream.next_out.wrapping_add(len);
2812    stream.total_out += len as crate::c_api::z_size;
2813    stream.avail_out -= len as crate::c_api::uInt;
2814
2815    state.bit_writer.pending.advance(len);
2816}
2817
2818pub fn compress_slice<'a>(
2819    output: &'a mut [u8],
2820    input: &[u8],
2821    config: DeflateConfig,
2822) -> (&'a mut [u8], ReturnCode) {
2823    // SAFETY: a [u8] is a valid [MaybeUninit<u8>].
2824    let output_uninit = unsafe {
2825        core::slice::from_raw_parts_mut(output.as_mut_ptr() as *mut MaybeUninit<u8>, output.len())
2826    };
2827
2828    compress(output_uninit, input, config)
2829}
2830
2831pub fn compress<'a>(
2832    output: &'a mut [MaybeUninit<u8>],
2833    input: &[u8],
2834    config: DeflateConfig,
2835) -> (&'a mut [u8], ReturnCode) {
2836    compress_with_flush(output, input, config, DeflateFlush::Finish)
2837}
2838
2839pub fn compress_slice_with_flush<'a>(
2840    output: &'a mut [u8],
2841    input: &[u8],
2842    config: DeflateConfig,
2843    flush: DeflateFlush,
2844) -> (&'a mut [u8], ReturnCode) {
2845    // SAFETY: a [u8] is a valid [MaybeUninit<u8>], and `compress_with_flush` never uninitializes previously initialized memory.
2846    let output_uninit = unsafe {
2847        core::slice::from_raw_parts_mut(output.as_mut_ptr() as *mut MaybeUninit<u8>, output.len())
2848    };
2849
2850    compress_with_flush(output_uninit, input, config, flush)
2851}
2852
2853pub fn compress_with_flush<'a>(
2854    output: &'a mut [MaybeUninit<u8>],
2855    input: &[u8],
2856    config: DeflateConfig,
2857    final_flush: DeflateFlush,
2858) -> (&'a mut [u8], ReturnCode) {
2859    let mut stream = z_stream {
2860        next_in: input.as_ptr() as *mut u8,
2861        avail_in: 0, // for special logic in the first  iteration
2862        total_in: 0,
2863        next_out: output.as_mut_ptr() as *mut u8,
2864        avail_out: 0, // for special logic on the first iteration
2865        total_out: 0,
2866        msg: core::ptr::null_mut(),
2867        state: core::ptr::null_mut(),
2868        zalloc: None,
2869        zfree: None,
2870        opaque: core::ptr::null_mut(),
2871        data_type: 0,
2872        adler: 0,
2873        reserved: 0,
2874    };
2875
2876    let err = init(&mut stream, config);
2877    if err != ReturnCode::Ok {
2878        return (&mut [], err);
2879    }
2880
2881    let max = core::ffi::c_uint::MAX as usize;
2882
2883    let mut left = output.len();
2884    let mut source_len = input.len();
2885
2886    loop {
2887        if stream.avail_out == 0 {
2888            stream.avail_out = Ord::min(left, max) as _;
2889            left -= stream.avail_out as usize;
2890        }
2891
2892        if stream.avail_in == 0 {
2893            stream.avail_in = Ord::min(source_len, max) as _;
2894            source_len -= stream.avail_in as usize;
2895        }
2896
2897        let flush = if source_len > 0 {
2898            DeflateFlush::NoFlush
2899        } else {
2900            final_flush
2901        };
2902
2903        let err = if let Some(stream) = unsafe { DeflateStream::from_stream_mut(&mut stream) } {
2904            deflate(stream, flush)
2905        } else {
2906            ReturnCode::StreamError
2907        };
2908
2909        if err != ReturnCode::Ok {
2910            break;
2911        }
2912    }
2913
2914    // SAFETY: we have now initialized these bytes
2915    let output_slice = unsafe {
2916        core::slice::from_raw_parts_mut(output.as_mut_ptr() as *mut u8, stream.total_out as usize)
2917    };
2918
2919    // may DataError if insufficient output space
2920    let return_code = if let Some(stream) = unsafe { DeflateStream::from_stream_mut(&mut stream) } {
2921        match end(stream) {
2922            Ok(_) => ReturnCode::Ok,
2923            Err(_) => ReturnCode::DataError,
2924        }
2925    } else {
2926        ReturnCode::Ok
2927    };
2928
2929    (output_slice, return_code)
2930}
2931
2932pub const fn compress_bound(source_len: usize) -> usize {
2933    compress_bound_help(source_len, ZLIB_WRAPLEN)
2934}
2935
2936const fn compress_bound_help(source_len: usize, wrap_len: usize) -> usize {
2937    source_len // The source size itself */
2938        // Always at least one byte for any input
2939        .wrapping_add(if source_len == 0 { 1 } else { 0 })
2940        // One extra byte for lengths less than 9
2941        .wrapping_add(if source_len < 9 { 1 } else { 0 })
2942        // Source encoding overhead, padded to next full byte
2943        .wrapping_add(deflate_quick_overhead(source_len))
2944        // Deflate block overhead bytes
2945        .wrapping_add(DEFLATE_BLOCK_OVERHEAD)
2946        // none, zlib or gzip wrapper
2947        .wrapping_add(wrap_len)
2948}
2949
2950///  heap used to build the Huffman trees
2951///
2952/// The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
2953/// The same heap array is used to build all trees.
2954#[derive(Clone)]
2955struct Heap {
2956    heap: [u32; 2 * L_CODES + 1],
2957
2958    /// number of elements in the heap
2959    heap_len: usize,
2960
2961    /// element of the largest frequency
2962    heap_max: usize,
2963
2964    depth: [u8; 2 * L_CODES + 1],
2965}
2966
2967impl Heap {
2968    // an empty heap
2969    fn new() -> Self {
2970        Self {
2971            heap: [0; 2 * L_CODES + 1],
2972            heap_len: 0,
2973            heap_max: 0,
2974            depth: [0; 2 * L_CODES + 1],
2975        }
2976    }
2977
2978    /// Construct the initial heap, with least frequent element in
2979    /// heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
2980    fn initialize(&mut self, tree: &mut [Value]) -> isize {
2981        let mut max_code = -1;
2982
2983        self.heap_len = 0;
2984        self.heap_max = HEAP_SIZE;
2985
2986        for (n, node) in tree.iter_mut().enumerate() {
2987            if node.freq() > 0 {
2988                self.heap_len += 1;
2989                self.heap[self.heap_len] = n as u32;
2990                max_code = n as isize;
2991                self.depth[n] = 0;
2992            } else {
2993                *node.len_mut() = 0;
2994            }
2995        }
2996
2997        max_code
2998    }
2999
3000    /// Index within the heap array of least frequent node in the Huffman tree
3001    const SMALLEST: usize = 1;
3002
3003    fn pqdownheap(&mut self, tree: &[Value], mut k: usize) {
3004        /* tree: the tree to restore */
3005        /* k: node to move down */
3006
3007        // Given the index $i of a node in the tree, pack the node's frequency and depth
3008        // into a single integer. The heap ordering logic uses a primary sort on frequency
3009        // and a secondary sort on depth, so packing both into one integer makes it
3010        // possible to sort with fewer comparison operations.
3011        macro_rules! freq_and_depth {
3012            ($i:expr) => {
3013                (tree[$i as usize].freq() as u32) << 8 | self.depth[$i as usize] as u32
3014            };
3015        }
3016
3017        let v = self.heap[k];
3018        let v_val = freq_and_depth!(v);
3019        let mut j = k << 1; /* left son of k */
3020
3021        while j <= self.heap_len {
3022            /* Set j to the smallest of the two sons: */
3023            let mut j_val = freq_and_depth!(self.heap[j]);
3024            if j < self.heap_len {
3025                let j1_val = freq_and_depth!(self.heap[j + 1]);
3026                if j1_val <= j_val {
3027                    j += 1;
3028                    j_val = j1_val;
3029                }
3030            }
3031
3032            /* Exit if v is smaller than both sons */
3033            if v_val <= j_val {
3034                break;
3035            }
3036
3037            /* Exchange v with the smallest son */
3038            self.heap[k] = self.heap[j];
3039            k = j;
3040
3041            /* And continue down the tree, setting j to the left son of k */
3042            j <<= 1;
3043        }
3044
3045        self.heap[k] = v;
3046    }
3047
3048    /// Remove the smallest element from the heap and recreate the heap with
3049    /// one less element. Updates heap and heap_len.
3050    fn pqremove(&mut self, tree: &[Value]) -> u32 {
3051        let top = self.heap[Self::SMALLEST];
3052        self.heap[Self::SMALLEST] = self.heap[self.heap_len];
3053        self.heap_len -= 1;
3054
3055        self.pqdownheap(tree, Self::SMALLEST);
3056
3057        top
3058    }
3059
3060    /// Construct the Huffman tree by repeatedly combining the least two frequent nodes.
3061    fn construct_huffman_tree(&mut self, tree: &mut [Value], mut node: usize) {
3062        loop {
3063            let n = self.pqremove(tree) as usize; /* n = node of least frequency */
3064            let m = self.heap[Heap::SMALLEST] as usize; /* m = node of next least frequency */
3065
3066            self.heap_max -= 1;
3067            self.heap[self.heap_max] = n as u32; /* keep the nodes sorted by frequency */
3068            self.heap_max -= 1;
3069            self.heap[self.heap_max] = m as u32;
3070
3071            /* Create a new node father of n and m */
3072            *tree[node].freq_mut() = tree[n].freq() + tree[m].freq();
3073            self.depth[node] = Ord::max(self.depth[n], self.depth[m]) + 1;
3074
3075            *tree[n].dad_mut() = node as u16;
3076            *tree[m].dad_mut() = node as u16;
3077
3078            /* and insert the new node in the heap */
3079            self.heap[Heap::SMALLEST] = node as u32;
3080            node += 1;
3081
3082            self.pqdownheap(tree, Heap::SMALLEST);
3083
3084            if self.heap_len < 2 {
3085                break;
3086            }
3087        }
3088
3089        self.heap_max -= 1;
3090        self.heap[self.heap_max] = self.heap[Heap::SMALLEST];
3091    }
3092}
3093
3094/// # Safety
3095///
3096/// The caller must guarantee:
3097///
3098/// * If `head` is `Some`
3099///     - `head.extra` is `NULL` or is readable for at least `head.extra_len` bytes
3100///     - `head.name` is `NULL` or satisfies the requirements of [`core::ffi::CStr::from_ptr`]
3101///     - `head.comment` is `NULL` or satisfies the requirements of [`core::ffi::CStr::from_ptr`]
3102pub unsafe fn set_header<'a>(
3103    stream: &mut DeflateStream<'a>,
3104    head: Option<&'a mut gz_header>,
3105) -> ReturnCode {
3106    if stream.state.wrap != 2 {
3107        ReturnCode::StreamError as _
3108    } else {
3109        stream.state.gzhead = head;
3110        ReturnCode::Ok as _
3111    }
3112}
3113
3114// zlib format overhead
3115const ZLIB_WRAPLEN: usize = 6;
3116// gzip format overhead
3117const GZIP_WRAPLEN: usize = 18;
3118
3119const DEFLATE_HEADER_BITS: usize = 3;
3120const DEFLATE_EOBS_BITS: usize = 15;
3121const DEFLATE_PAD_BITS: usize = 6;
3122const DEFLATE_BLOCK_OVERHEAD: usize =
3123    (DEFLATE_HEADER_BITS + DEFLATE_EOBS_BITS + DEFLATE_PAD_BITS) >> 3;
3124
3125const DEFLATE_QUICK_LIT_MAX_BITS: usize = 9;
3126const fn deflate_quick_overhead(x: usize) -> usize {
3127    let sum = x
3128        .wrapping_mul(DEFLATE_QUICK_LIT_MAX_BITS - 8)
3129        .wrapping_add(7);
3130
3131    // imitate zlib-ng rounding behavior (on windows, c_ulong is 32 bits)
3132    (sum as core::ffi::c_ulong >> 3) as usize
3133}
3134
3135/// For the default windowBits of 15 and memLevel of 8, this function returns
3136/// a close to exact, as well as small, upper bound on the compressed size.
3137/// They are coded as constants here for a reason--if the #define's are
3138/// changed, then this function needs to be changed as well.  The return
3139/// value for 15 and 8 only works for those exact settings.
3140///
3141/// For any setting other than those defaults for windowBits and memLevel,
3142/// the value returned is a conservative worst case for the maximum expansion
3143/// resulting from using fixed blocks instead of stored blocks, which deflate
3144/// can emit on compressed data for some combinations of the parameters.
3145///
3146/// This function could be more sophisticated to provide closer upper bounds for
3147/// every combination of windowBits and memLevel.  But even the conservative
3148/// upper bound of about 14% expansion does not seem onerous for output buffer
3149/// allocation.
3150pub fn bound(stream: Option<&mut DeflateStream>, source_len: usize) -> usize {
3151    // on windows, c_ulong is only a 32-bit integer
3152    let mask = core::ffi::c_ulong::MAX as usize;
3153
3154    // conservative upper bound for compressed data
3155    let comp_len = source_len
3156        .wrapping_add((source_len.wrapping_add(7) & mask) >> 3)
3157        .wrapping_add((source_len.wrapping_add(63) & mask) >> 6)
3158        .wrapping_add(5);
3159
3160    let Some(stream) = stream else {
3161        // return conservative bound plus zlib wrapper
3162        return comp_len.wrapping_add(6);
3163    };
3164
3165    /* compute wrapper length */
3166    let wrap_len = match stream.state.wrap {
3167        0 => {
3168            // raw deflate
3169            0
3170        }
3171        1 => {
3172            // zlib wrapper
3173            if stream.state.strstart != 0 {
3174                ZLIB_WRAPLEN + 4
3175            } else {
3176                ZLIB_WRAPLEN
3177            }
3178        }
3179        2 => {
3180            // gzip wrapper
3181            let mut gz_wrap_len = GZIP_WRAPLEN;
3182
3183            if let Some(header) = &stream.state.gzhead {
3184                if !header.extra.is_null() {
3185                    gz_wrap_len += 2 + header.extra_len as usize;
3186                }
3187
3188                let mut c_string = header.name;
3189                if !c_string.is_null() {
3190                    loop {
3191                        gz_wrap_len += 1;
3192                        // SAFETY: user guarantees header.name is a valid C string.
3193                        unsafe {
3194                            if *c_string == 0 {
3195                                break;
3196                            }
3197                            c_string = c_string.add(1);
3198                        }
3199                    }
3200                }
3201
3202                let mut c_string = header.comment;
3203                if !c_string.is_null() {
3204                    loop {
3205                        gz_wrap_len += 1;
3206                        // SAFETY: user guarantees header.comment is a valid C string.
3207                        unsafe {
3208                            if *c_string == 0 {
3209                                break;
3210                            }
3211                            c_string = c_string.add(1);
3212                        }
3213                    }
3214                }
3215
3216                if header.hcrc != 0 {
3217                    gz_wrap_len += 2;
3218                }
3219            }
3220
3221            gz_wrap_len
3222        }
3223        _ => {
3224            // default
3225            ZLIB_WRAPLEN
3226        }
3227    };
3228
3229    if stream.state.w_bits() != MAX_WBITS as u32 || HASH_BITS < 15 {
3230        if stream.state.level == 0 {
3231            /* upper bound for stored blocks with length 127 (memLevel == 1) ~4% overhead plus a small constant */
3232            source_len
3233                .wrapping_add(source_len >> 5)
3234                .wrapping_add(source_len >> 7)
3235                .wrapping_add(source_len >> 11)
3236                .wrapping_add(7)
3237                .wrapping_add(wrap_len)
3238        } else {
3239            comp_len.wrapping_add(wrap_len)
3240        }
3241    } else {
3242        compress_bound_help(source_len, wrap_len)
3243    }
3244}
3245
3246/// # Safety
3247///
3248/// The `dictionary` must have enough space for the dictionary.
3249pub unsafe fn get_dictionary(stream: &DeflateStream<'_>, dictionary: *mut u8) -> usize {
3250    let s = &stream.state;
3251    let len = Ord::min(s.strstart + s.lookahead, s.w_size);
3252
3253    if !dictionary.is_null() && len > 0 {
3254        unsafe {
3255            core::ptr::copy_nonoverlapping(
3256                s.window.as_ptr().add(s.strstart + s.lookahead - len),
3257                dictionary,
3258                len,
3259            );
3260        }
3261    }
3262
3263    len
3264}
3265
3266#[cfg(test)]
3267mod test {
3268    use crate::{
3269        inflate::{uncompress_slice, InflateConfig, InflateStream},
3270        InflateFlush,
3271    };
3272
3273    use super::*;
3274
3275    use core::{ffi::CStr, sync::atomic::AtomicUsize};
3276
3277    #[test]
3278    fn detect_data_type_basic() {
3279        let empty = || [Value::new(0, 0); LITERALS];
3280
3281        assert_eq!(State::detect_data_type(&empty()), DataType::Binary);
3282
3283        let mut binary = empty();
3284        binary[0] = Value::new(1, 0);
3285        assert_eq!(State::detect_data_type(&binary), DataType::Binary);
3286
3287        let mut text = empty();
3288        text[b'\r' as usize] = Value::new(1, 0);
3289        assert_eq!(State::detect_data_type(&text), DataType::Text);
3290
3291        let mut text = empty();
3292        text[b'a' as usize] = Value::new(1, 0);
3293        assert_eq!(State::detect_data_type(&text), DataType::Text);
3294
3295        let mut non_text = empty();
3296        non_text[7] = Value::new(1, 0);
3297        assert_eq!(State::detect_data_type(&non_text), DataType::Binary);
3298    }
3299
3300    #[test]
3301    fn from_stream_mut() {
3302        unsafe {
3303            assert!(DeflateStream::from_stream_mut(core::ptr::null_mut()).is_none());
3304
3305            let mut stream = z_stream::default();
3306            assert!(DeflateStream::from_stream_mut(&mut stream).is_none());
3307
3308            // state is still NULL
3309            assert!(DeflateStream::from_stream_mut(&mut stream).is_none());
3310
3311            init(&mut stream, DeflateConfig::default());
3312            let stream = DeflateStream::from_stream_mut(&mut stream);
3313            assert!(stream.is_some());
3314
3315            assert!(end(stream.unwrap()).is_ok());
3316        }
3317    }
3318
3319    unsafe extern "C" fn fail_nth_allocation<const N: usize>(
3320        opaque: crate::c_api::voidpf,
3321        items: crate::c_api::uInt,
3322        size: crate::c_api::uInt,
3323    ) -> crate::c_api::voidpf {
3324        let count = unsafe { &*(opaque as *const AtomicUsize) };
3325
3326        if count.fetch_add(1, core::sync::atomic::Ordering::Relaxed) != N {
3327            // must use the C allocator internally because (de)allocation is based on function
3328            // pointer values and because we don't use the rust allocator directly, the allocation
3329            // logic will store the pointer to the start at the start of the allocation.
3330            unsafe { (crate::allocate::Allocator::C.zalloc)(opaque, items, size) }
3331        } else {
3332            core::ptr::null_mut()
3333        }
3334    }
3335
3336    #[test]
3337    fn init_invalid_allocator() {
3338        {
3339            let atomic = AtomicUsize::new(0);
3340            let mut stream = z_stream {
3341                zalloc: Some(fail_nth_allocation::<0>),
3342                zfree: Some(crate::allocate::Allocator::C.zfree),
3343                opaque: &atomic as *const _ as *const core::ffi::c_void as *mut _,
3344                ..z_stream::default()
3345            };
3346            assert_eq!(
3347                init(&mut stream, DeflateConfig::default()),
3348                ReturnCode::MemError
3349            );
3350        }
3351
3352        {
3353            let atomic = AtomicUsize::new(0);
3354            let mut stream = z_stream {
3355                zalloc: Some(fail_nth_allocation::<3>),
3356                zfree: Some(crate::allocate::Allocator::C.zfree),
3357                opaque: &atomic as *const _ as *const core::ffi::c_void as *mut _,
3358                ..z_stream::default()
3359            };
3360            assert_eq!(
3361                init(&mut stream, DeflateConfig::default()),
3362                ReturnCode::MemError
3363            );
3364        }
3365
3366        {
3367            let atomic = AtomicUsize::new(0);
3368            let mut stream = z_stream {
3369                zalloc: Some(fail_nth_allocation::<5>),
3370                zfree: Some(crate::allocate::Allocator::C.zfree),
3371                opaque: &atomic as *const _ as *const core::ffi::c_void as *mut _,
3372                ..z_stream::default()
3373            };
3374            assert_eq!(
3375                init(&mut stream, DeflateConfig::default()),
3376                ReturnCode::MemError
3377            );
3378        }
3379    }
3380
3381    mod copy_invalid_allocator {
3382        use super::*;
3383
3384        #[test]
3385        fn fail_0() {
3386            let mut stream = z_stream::default();
3387
3388            let atomic = AtomicUsize::new(0);
3389            stream.opaque = &atomic as *const _ as *const core::ffi::c_void as *mut _;
3390            stream.zalloc = Some(fail_nth_allocation::<6>);
3391            stream.zfree = Some(crate::allocate::Allocator::C.zfree);
3392
3393            // init performs 6 allocations; we don't want those to fail
3394            assert_eq!(init(&mut stream, DeflateConfig::default()), ReturnCode::Ok);
3395
3396            let Some(stream) = (unsafe { DeflateStream::from_stream_mut(&mut stream) }) else {
3397                unreachable!()
3398            };
3399
3400            let mut stream_copy = MaybeUninit::<DeflateStream>::zeroed();
3401
3402            assert_eq!(copy(&mut stream_copy, stream), ReturnCode::MemError);
3403
3404            assert!(end(stream).is_ok());
3405        }
3406
3407        #[test]
3408        fn fail_3() {
3409            let mut stream = z_stream::default();
3410
3411            let atomic = AtomicUsize::new(0);
3412            stream.zalloc = Some(fail_nth_allocation::<{ 6 + 3 }>);
3413            stream.zfree = Some(crate::allocate::Allocator::C.zfree);
3414            stream.opaque = &atomic as *const _ as *const core::ffi::c_void as *mut _;
3415
3416            // init performs 6 allocations; we don't want those to fail
3417            assert_eq!(init(&mut stream, DeflateConfig::default()), ReturnCode::Ok);
3418
3419            let Some(stream) = (unsafe { DeflateStream::from_stream_mut(&mut stream) }) else {
3420                unreachable!()
3421            };
3422
3423            let mut stream_copy = MaybeUninit::<DeflateStream>::zeroed();
3424
3425            assert_eq!(copy(&mut stream_copy, stream), ReturnCode::MemError);
3426
3427            assert!(end(stream).is_ok());
3428        }
3429
3430        #[test]
3431        fn fail_5() {
3432            let mut stream = z_stream::default();
3433
3434            let atomic = AtomicUsize::new(0);
3435            stream.zalloc = Some(fail_nth_allocation::<{ 6 + 5 }>);
3436            stream.zfree = Some(crate::allocate::Allocator::C.zfree);
3437            stream.opaque = &atomic as *const _ as *const core::ffi::c_void as *mut _;
3438
3439            // init performs 6 allocations; we don't want those to fail
3440            assert_eq!(init(&mut stream, DeflateConfig::default()), ReturnCode::Ok);
3441
3442            let Some(stream) = (unsafe { DeflateStream::from_stream_mut(&mut stream) }) else {
3443                unreachable!()
3444            };
3445
3446            let mut stream_copy = MaybeUninit::<DeflateStream>::zeroed();
3447
3448            assert_eq!(copy(&mut stream_copy, stream), ReturnCode::MemError);
3449
3450            assert!(end(stream).is_ok());
3451        }
3452    }
3453
3454    mod invalid_deflate_config {
3455        use super::*;
3456
3457        #[test]
3458        fn sanity_check() {
3459            let mut stream = z_stream::default();
3460            assert_eq!(init(&mut stream, DeflateConfig::default()), ReturnCode::Ok);
3461
3462            assert!(stream.zalloc.is_some());
3463            assert!(stream.zfree.is_some());
3464
3465            // this should be the default level
3466            let stream = unsafe { DeflateStream::from_stream_mut(&mut stream) }.unwrap();
3467            assert_eq!(stream.state.level, 6);
3468
3469            assert!(end(stream).is_ok());
3470        }
3471
3472        #[test]
3473        fn window_bits_correction() {
3474            // window_bits of 8 gets turned into 9 internally
3475            let mut stream = z_stream::default();
3476            let config = DeflateConfig {
3477                window_bits: 8,
3478                ..Default::default()
3479            };
3480            assert_eq!(init(&mut stream, config), ReturnCode::Ok);
3481            let stream = unsafe { DeflateStream::from_stream_mut(&mut stream) }.unwrap();
3482            assert_eq!(stream.state.w_bits(), 9);
3483
3484            assert!(end(stream).is_ok());
3485        }
3486
3487        #[test]
3488        fn window_bits_too_low() {
3489            let mut stream = z_stream::default();
3490            let config = DeflateConfig {
3491                window_bits: -16,
3492                ..Default::default()
3493            };
3494            assert_eq!(init(&mut stream, config), ReturnCode::StreamError);
3495        }
3496
3497        #[test]
3498        fn window_bits_too_high() {
3499            // window bits too high
3500            let mut stream = z_stream::default();
3501            let config = DeflateConfig {
3502                window_bits: 42,
3503                ..Default::default()
3504            };
3505            assert_eq!(init(&mut stream, config), ReturnCode::StreamError);
3506        }
3507    }
3508
3509    #[test]
3510    fn end_data_error() {
3511        let mut stream = z_stream::default();
3512        assert_eq!(init(&mut stream, DeflateConfig::default()), ReturnCode::Ok);
3513        let stream = unsafe { DeflateStream::from_stream_mut(&mut stream) }.unwrap();
3514
3515        // next deflate into too little space
3516        let input = b"Hello World\n";
3517        stream.next_in = input.as_ptr() as *mut u8;
3518        stream.avail_in = input.len() as _;
3519        let output = &mut [0, 0, 0];
3520        stream.next_out = output.as_mut_ptr();
3521        stream.avail_out = output.len() as _;
3522
3523        // the deflate is fine
3524        assert_eq!(deflate(stream, DeflateFlush::NoFlush), ReturnCode::Ok);
3525
3526        // but end is not
3527        assert!(end(stream).is_err());
3528    }
3529
3530    #[test]
3531    fn test_reset_keep() {
3532        let mut stream = z_stream::default();
3533        assert_eq!(init(&mut stream, DeflateConfig::default()), ReturnCode::Ok);
3534        let stream = unsafe { DeflateStream::from_stream_mut(&mut stream) }.unwrap();
3535
3536        // next deflate into too little space
3537        let input = b"Hello World\n";
3538        stream.next_in = input.as_ptr() as *mut u8;
3539        stream.avail_in = input.len() as _;
3540
3541        let output = &mut [0; 1024];
3542        stream.next_out = output.as_mut_ptr();
3543        stream.avail_out = output.len() as _;
3544        assert_eq!(deflate(stream, DeflateFlush::Finish), ReturnCode::StreamEnd);
3545
3546        assert_eq!(reset_keep(stream), ReturnCode::Ok);
3547
3548        let output = &mut [0; 1024];
3549        stream.next_out = output.as_mut_ptr();
3550        stream.avail_out = output.len() as _;
3551        assert_eq!(deflate(stream, DeflateFlush::Finish), ReturnCode::StreamEnd);
3552
3553        assert!(end(stream).is_ok());
3554    }
3555
3556    #[test]
3557    fn hello_world_huffman_only() {
3558        const EXPECTED: &[u8] = &[
3559            0x78, 0x01, 0xf3, 0x48, 0xcd, 0xc9, 0xc9, 0x57, 0x08, 0xcf, 0x2f, 0xca, 0x49, 0x51,
3560            0xe4, 0x02, 0x00, 0x20, 0x91, 0x04, 0x48,
3561        ];
3562
3563        let input = "Hello World!\n";
3564
3565        let mut output = vec![0; 128];
3566
3567        let config = DeflateConfig {
3568            level: 6,
3569            method: Method::Deflated,
3570            window_bits: crate::MAX_WBITS,
3571            mem_level: DEF_MEM_LEVEL,
3572            strategy: Strategy::HuffmanOnly,
3573        };
3574
3575        let (output, err) = compress_slice(&mut output, input.as_bytes(), config);
3576
3577        assert_eq!(err, ReturnCode::Ok);
3578
3579        assert_eq!(output.len(), EXPECTED.len());
3580
3581        assert_eq!(EXPECTED, output);
3582    }
3583
3584    #[test]
3585    fn hello_world_quick() {
3586        const EXPECTED: &[u8] = &[
3587            0x78, 0x01, 0xf3, 0x48, 0xcd, 0xc9, 0xc9, 0x57, 0x08, 0xcf, 0x2f, 0xca, 0x49, 0x51,
3588            0xe4, 0x02, 0x00, 0x20, 0x91, 0x04, 0x48,
3589        ];
3590
3591        let input = "Hello World!\n";
3592
3593        let mut output = vec![0; 128];
3594
3595        let config = DeflateConfig {
3596            level: 1,
3597            method: Method::Deflated,
3598            window_bits: crate::MAX_WBITS,
3599            mem_level: DEF_MEM_LEVEL,
3600            strategy: Strategy::Default,
3601        };
3602
3603        let (output, err) = compress_slice(&mut output, input.as_bytes(), config);
3604
3605        assert_eq!(err, ReturnCode::Ok);
3606
3607        assert_eq!(output.len(), EXPECTED.len());
3608
3609        assert_eq!(EXPECTED, output);
3610    }
3611
3612    #[test]
3613    fn hello_world_quick_random() {
3614        const EXPECTED: &[u8] = &[
3615            0x78, 0x01, 0x53, 0xe1, 0x50, 0x51, 0xe1, 0x52, 0x51, 0x51, 0x01, 0x00, 0x03, 0xec,
3616            0x00, 0xeb,
3617        ];
3618
3619        let input = "$\u{8}$$\n$$$";
3620
3621        let mut output = vec![0; 128];
3622
3623        let config = DeflateConfig {
3624            level: 1,
3625            method: Method::Deflated,
3626            window_bits: crate::MAX_WBITS,
3627            mem_level: DEF_MEM_LEVEL,
3628            strategy: Strategy::Default,
3629        };
3630
3631        let (output, err) = compress_slice(&mut output, input.as_bytes(), config);
3632
3633        assert_eq!(err, ReturnCode::Ok);
3634
3635        assert_eq!(output.len(), EXPECTED.len());
3636
3637        assert_eq!(EXPECTED, output);
3638    }
3639
3640    fn fuzz_based_test(input: &[u8], config: DeflateConfig, expected: &[u8]) {
3641        let mut output_rs = [0; 1 << 17];
3642        let (output_rs, err) = compress_slice(&mut output_rs, input, config);
3643        assert_eq!(err, ReturnCode::Ok);
3644
3645        assert_eq!(output_rs, expected);
3646    }
3647
3648    #[test]
3649    fn simple_rle() {
3650        fuzz_based_test(
3651            "\0\0\0\0\u{6}".as_bytes(),
3652            DeflateConfig {
3653                level: -1,
3654                method: Method::Deflated,
3655                window_bits: 11,
3656                mem_level: 4,
3657                strategy: Strategy::Rle,
3658            },
3659            &[56, 17, 99, 0, 2, 54, 0, 0, 11, 0, 7],
3660        )
3661    }
3662
3663    #[test]
3664    fn fill_window_out_of_bounds() {
3665        const INPUT: &[u8] = &[
3666            0x71, 0x71, 0x71, 0x71, 0x71, 0x6a, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71,
3667            0x71, 0x71, 0x71, 0x71, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1d, 0x1d, 0x1d, 0x1d, 0x63,
3668            0x63, 0x63, 0x63, 0x63, 0x1d, 0x1d, 0x1d, 0x1d, 0x1d, 0x1d, 0x1d, 0x1d, 0x1d, 0x1d,
3669            0x1d, 0x27, 0x0, 0x0, 0x0, 0x1d, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71, 0x71, 0x71,
3670            0x71, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x31, 0x0, 0x0,
3671            0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
3672            0x0, 0x0, 0x0, 0x0, 0x1d, 0x1d, 0x0, 0x0, 0x0, 0x0, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
3673            0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x48, 0x50,
3674            0x50, 0x50, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2c, 0x0, 0x0, 0x0, 0x0, 0x4a,
3675            0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71,
3676            0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x70, 0x71, 0x71, 0x0, 0x0,
3677            0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71, 0x71, 0x71, 0x71, 0x6a, 0x0, 0x0, 0x0, 0x0,
3678            0x71, 0x0, 0x71, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
3679            0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
3680            0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
3681            0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x31, 0x0, 0x0, 0x0, 0x0,
3682            0x0, 0x0, 0x0, 0x0, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71,
3683            0x71, 0x71, 0x0, 0x4a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71, 0x71,
3684            0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71,
3685            0x70, 0x71, 0x71, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71, 0x71, 0x71, 0x71,
3686            0x6a, 0x0, 0x0, 0x0, 0x0, 0x71, 0x0, 0x71, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
3687            0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
3688            0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
3689            0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
3690            0x31, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1d, 0x1d, 0x0, 0x0, 0x0, 0x0,
3691            0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
3692            0x50, 0x50, 0x50, 0x50, 0x48, 0x50, 0x0, 0x0, 0x71, 0x71, 0x71, 0x71, 0x3b, 0x3f, 0x71,
3693            0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x50, 0x50, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
3694            0x2c, 0x0, 0x0, 0x0, 0x0, 0x4a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71,
3695            0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71,
3696            0x71, 0x70, 0x71, 0x71, 0x0, 0x0, 0x0, 0x6, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71, 0x71, 0x71,
3697            0x71, 0x71, 0x71, 0x70, 0x71, 0x71, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
3698            0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71, 0x71, 0x71, 0x71, 0x3b, 0x3f, 0x71, 0x71, 0x71,
3699            0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71,
3700            0x71, 0x3b, 0x3f, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x20, 0x0, 0x0, 0x0, 0x0,
3701            0x0, 0x0, 0x0, 0x71, 0x75, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71,
3702            0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x10, 0x0, 0x71, 0x71,
3703            0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x3b, 0x71, 0x71, 0x71, 0x71, 0x71,
3704            0x71, 0x76, 0x71, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x71, 0x71, 0x71, 0x71, 0x71,
3705            0x71, 0x71, 0x71, 0x10, 0x0, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71,
3706            0x71, 0x3b, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x76, 0x71, 0x34, 0x34, 0x34, 0x34,
3707            0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34,
3708            0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x0, 0x0, 0x0, 0x0, 0x0, 0x34, 0x34, 0x34, 0x34,
3709            0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34,
3710            0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34,
3711            0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34,
3712            0x34, 0x34, 0x30, 0x34, 0x34, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
3713            0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
3714            0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
3715            0x0, 0x0, 0x71, 0x0, 0x0, 0x0, 0x0, 0x6,
3716        ];
3717
3718        fuzz_based_test(
3719            INPUT,
3720            DeflateConfig {
3721                level: -1,
3722                method: Method::Deflated,
3723                window_bits: 9,
3724                mem_level: 1,
3725                strategy: Strategy::HuffmanOnly,
3726            },
3727            &[
3728                0x18, 0x19, 0x4, 0xc1, 0x21, 0x1, 0xc4, 0x0, 0x10, 0x3, 0xb0, 0x18, 0x29, 0x1e,
3729                0x7e, 0x17, 0x83, 0xf5, 0x70, 0x6c, 0xac, 0xfe, 0xc9, 0x27, 0xdb, 0xb6, 0x6f, 0xdb,
3730                0xb6, 0x6d, 0xdb, 0x80, 0x24, 0xb9, 0xbb, 0xbb, 0x24, 0x49, 0x92, 0x24, 0xf, 0x2,
3731                0xd8, 0x36, 0x0, 0xf0, 0x3, 0x0, 0x0, 0x24, 0xd0, 0xb6, 0x6d, 0xdb, 0xb6, 0x6d,
3732                0xdb, 0xbe, 0x6d, 0xf9, 0x13, 0x4, 0xc7, 0x4, 0x0, 0x80, 0x30, 0x0, 0xc3, 0x22,
3733                0x68, 0xf, 0x36, 0x90, 0xc2, 0xb5, 0xfa, 0x7f, 0x48, 0x80, 0x81, 0xb, 0x40, 0x55,
3734                0x55, 0x55, 0xd5, 0x16, 0x80, 0xaa, 0x7, 0x9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
3735                0xe, 0x7c, 0x82, 0xe0, 0x98, 0x0, 0x0, 0x0, 0x4, 0x60, 0x10, 0xf9, 0x8c, 0xe2,
3736                0xe5, 0xfa, 0x3f, 0x2, 0x54, 0x55, 0x55, 0x65, 0x0, 0xa8, 0xaa, 0xaa, 0xaa, 0xba,
3737                0x2, 0x50, 0xb5, 0x90, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x78, 0x82, 0xe0, 0xd0,
3738                0x8a, 0x41, 0x0, 0x0, 0xa2, 0x58, 0x54, 0xb7, 0x60, 0x83, 0x9a, 0x6a, 0x4, 0x96,
3739                0x87, 0xba, 0x51, 0xf8, 0xfb, 0x9b, 0x26, 0xfc, 0x0, 0x1c, 0x7, 0x6c, 0xdb, 0xb6,
3740                0x6d, 0xdb, 0xb6, 0x6d, 0xf7, 0xa8, 0x3a, 0xaf, 0xaa, 0x6a, 0x3, 0xf8, 0xc2, 0x3,
3741                0x40, 0x55, 0x55, 0x55, 0xd5, 0x5b, 0xf8, 0x80, 0xaa, 0x7a, 0xb, 0x0, 0x7f, 0x82,
3742                0xe0, 0x98, 0x0, 0x40, 0x18, 0x0, 0x82, 0xd8, 0x49, 0x40, 0x2, 0x22, 0x7e, 0xeb,
3743                0x80, 0xa6, 0xc, 0xa0, 0x9f, 0xa4, 0x2a, 0x38, 0xf, 0x0, 0x0, 0xe7, 0x1, 0xdc,
3744                0x55, 0x95, 0x17, 0x0, 0x0, 0xae, 0x0, 0x38, 0xc0, 0x67, 0xdb, 0x36, 0x80, 0x2b,
3745                0x0, 0xe, 0xf0, 0xd9, 0xf6, 0x13, 0x4, 0xc7, 0x4, 0x0, 0x0, 0x30, 0xc, 0x83, 0x22,
3746                0x69, 0x7, 0xc6, 0xea, 0xff, 0x19, 0x0, 0x0, 0x80, 0xaa, 0x0, 0x0, 0x0, 0x0, 0x0,
3747                0x0, 0x8e, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0x6a,
3748                0xf5, 0x63, 0x60, 0x60, 0x3, 0x0, 0xee, 0x8a, 0x88, 0x67,
3749            ],
3750        )
3751    }
3752
3753    #[test]
3754    fn gzip_no_header() {
3755        let config = DeflateConfig {
3756            level: 9,
3757            method: Method::Deflated,
3758            window_bits: 31, // gzip
3759            ..Default::default()
3760        };
3761
3762        let input = b"Hello World!";
3763        let os = gz_header::OS_CODE;
3764
3765        fuzz_based_test(
3766            input,
3767            config,
3768            &[
3769                31, 139, 8, 0, 0, 0, 0, 0, 2, os, 243, 72, 205, 201, 201, 87, 8, 207, 47, 202, 73,
3770                81, 4, 0, 163, 28, 41, 28, 12, 0, 0, 0,
3771            ],
3772        )
3773    }
3774
3775    #[test]
3776    #[rustfmt::skip]
3777    fn gzip_stored_block_checksum() {
3778        fuzz_based_test(
3779            &[
3780                27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 9, 0,
3781            ],
3782            DeflateConfig {
3783                level: 0,
3784                method: Method::Deflated,
3785                window_bits: 26,
3786                mem_level: 6,
3787                strategy: Strategy::Default,
3788            },
3789            &[
3790                31, 139, 8, 0, 0, 0, 0, 0, 4, gz_header::OS_CODE, 1, 18, 0, 237, 255, 27, 27, 27, 27, 27, 27, 27,
3791                27, 27, 27, 27, 27, 27, 27, 27, 27, 9, 0, 60, 101, 156, 55, 18, 0, 0, 0,
3792            ],
3793        )
3794    }
3795
3796    #[test]
3797    fn gzip_header_pending_flush() {
3798        let extra = "aaaaaaaaaaaaaaaaaaaa\0";
3799        let name = "bbbbbbbbbbbbbbbbbbbb\0";
3800        let comment = "cccccccccccccccccccc\0";
3801
3802        let mut header = gz_header {
3803            text: 0,
3804            time: 0,
3805            xflags: 0,
3806            os: 0,
3807            extra: extra.as_ptr() as *mut _,
3808            extra_len: extra.len() as _,
3809            extra_max: 0,
3810            name: name.as_ptr() as *mut _,
3811            name_max: 0,
3812            comment: comment.as_ptr() as *mut _,
3813            comm_max: 0,
3814            hcrc: 1,
3815            done: 0,
3816        };
3817
3818        let config = DeflateConfig {
3819            window_bits: 31,
3820            mem_level: 1,
3821            ..Default::default()
3822        };
3823
3824        let mut stream = z_stream::default();
3825        assert_eq!(init(&mut stream, config), ReturnCode::Ok);
3826
3827        let Some(stream) = (unsafe { DeflateStream::from_stream_mut(&mut stream) }) else {
3828            unreachable!()
3829        };
3830
3831        unsafe { set_header(stream, Some(&mut header)) };
3832
3833        let input = b"Hello World\n";
3834        stream.next_in = input.as_ptr() as *mut _;
3835        stream.avail_in = input.len() as _;
3836
3837        let mut output = [0u8; 1024];
3838        stream.next_out = output.as_mut_ptr();
3839        stream.avail_out = 100;
3840
3841        assert_eq!(stream.state.bit_writer.pending.capacity(), 512);
3842
3843        // only 12 bytes remain, so to write the name the pending buffer must be flushed.
3844        // but there is insufficient output space to flush (only 100 bytes)
3845        stream.state.bit_writer.pending.extend(&[0; 500]);
3846
3847        assert_eq!(deflate(stream, DeflateFlush::Finish), ReturnCode::Ok);
3848
3849        // now try that again but with sufficient output space
3850        stream.avail_out = output.len() as _;
3851        assert_eq!(deflate(stream, DeflateFlush::Finish), ReturnCode::StreamEnd);
3852
3853        let n = stream.total_out as usize;
3854
3855        assert!(end(stream).is_ok());
3856
3857        let output_rs = &mut output[..n];
3858
3859        assert_eq!(output_rs.len(), 500 + 99);
3860    }
3861
3862    #[test]
3863    fn gzip_with_header() {
3864        // this test is here mostly so we get some MIRI action on the gzip header. A test that
3865        // compares behavior with zlib-ng is in the libz-rs-sys test suite
3866
3867        let extra = "some extra stuff\0";
3868        let name = "nomen est omen\0";
3869        let comment = "such comment\0";
3870
3871        let mut header = gz_header {
3872            text: 0,
3873            time: 0,
3874            xflags: 0,
3875            os: 0,
3876            extra: extra.as_ptr() as *mut _,
3877            extra_len: extra.len() as _,
3878            extra_max: 0,
3879            name: name.as_ptr() as *mut _,
3880            name_max: 0,
3881            comment: comment.as_ptr() as *mut _,
3882            comm_max: 0,
3883            hcrc: 1,
3884            done: 0,
3885        };
3886
3887        let config = DeflateConfig {
3888            window_bits: 31,
3889            ..Default::default()
3890        };
3891
3892        let mut stream = z_stream::default();
3893        assert_eq!(init(&mut stream, config), ReturnCode::Ok);
3894
3895        let Some(stream) = (unsafe { DeflateStream::from_stream_mut(&mut stream) }) else {
3896            unreachable!()
3897        };
3898
3899        unsafe { set_header(stream, Some(&mut header)) };
3900
3901        let input = b"Hello World\n";
3902        stream.next_in = input.as_ptr() as *mut _;
3903        stream.avail_in = input.len() as _;
3904
3905        let mut output = [0u8; 256];
3906        stream.next_out = output.as_mut_ptr();
3907        stream.avail_out = output.len() as _;
3908
3909        assert_eq!(deflate(stream, DeflateFlush::Finish), ReturnCode::StreamEnd);
3910
3911        let n = stream.total_out as usize;
3912
3913        assert!(end(stream).is_ok());
3914
3915        let output_rs = &mut output[..n];
3916
3917        assert_eq!(output_rs.len(), 81);
3918
3919        {
3920            let mut stream = z_stream::default();
3921
3922            let config = InflateConfig {
3923                window_bits: config.window_bits,
3924            };
3925
3926            assert_eq!(crate::inflate::init(&mut stream, config), ReturnCode::Ok);
3927
3928            let Some(stream) = (unsafe { InflateStream::from_stream_mut(&mut stream) }) else {
3929                unreachable!();
3930            };
3931
3932            stream.next_in = output_rs.as_mut_ptr() as _;
3933            stream.avail_in = output_rs.len() as _;
3934
3935            let mut output = [0u8; 12];
3936            stream.next_out = output.as_mut_ptr();
3937            stream.avail_out = output.len() as _;
3938
3939            let mut extra_buf = [0u8; 64];
3940            let mut name_buf = [0u8; 64];
3941            let mut comment_buf = [0u8; 64];
3942
3943            let mut header = gz_header {
3944                text: 0,
3945                time: 0,
3946                xflags: 0,
3947                os: 0,
3948                extra: extra_buf.as_mut_ptr(),
3949                extra_len: 0,
3950                extra_max: extra_buf.len() as _,
3951                name: name_buf.as_mut_ptr(),
3952                name_max: name_buf.len() as _,
3953                comment: comment_buf.as_mut_ptr(),
3954                comm_max: comment_buf.len() as _,
3955                hcrc: 0,
3956                done: 0,
3957            };
3958
3959            assert_eq!(
3960                unsafe { crate::inflate::get_header(stream, Some(&mut header)) },
3961                ReturnCode::Ok
3962            );
3963
3964            assert_eq!(
3965                unsafe { crate::inflate::inflate(stream, InflateFlush::Finish) },
3966                ReturnCode::StreamEnd
3967            );
3968
3969            crate::inflate::end(stream);
3970
3971            assert!(!header.comment.is_null());
3972            assert_eq!(
3973                unsafe { CStr::from_ptr(header.comment.cast()) }
3974                    .to_str()
3975                    .unwrap(),
3976                comment.trim_end_matches('\0')
3977            );
3978
3979            assert!(!header.name.is_null());
3980            assert_eq!(
3981                unsafe { CStr::from_ptr(header.name.cast()) }
3982                    .to_str()
3983                    .unwrap(),
3984                name.trim_end_matches('\0')
3985            );
3986
3987            assert!(!header.extra.is_null());
3988            assert_eq!(
3989                unsafe { CStr::from_ptr(header.extra.cast()) }
3990                    .to_str()
3991                    .unwrap(),
3992                extra.trim_end_matches('\0')
3993            );
3994        }
3995    }
3996
3997    #[test]
3998    fn insufficient_compress_space() {
3999        const DATA: &[u8] = include_bytes!("deflate/test-data/inflate_buf_error.dat");
4000
4001        fn helper(deflate_buf: &mut [u8]) -> ReturnCode {
4002            let config = DeflateConfig {
4003                level: 0,
4004                method: Method::Deflated,
4005                window_bits: 10,
4006                mem_level: 6,
4007                strategy: Strategy::Default,
4008            };
4009
4010            let (output, err) = compress_slice(deflate_buf, DATA, config);
4011            assert_eq!(err, ReturnCode::Ok);
4012
4013            let config = InflateConfig {
4014                window_bits: config.window_bits,
4015            };
4016
4017            let mut uncompr = [0; 1 << 17];
4018            let (uncompr, err) = uncompress_slice(&mut uncompr, output, config);
4019
4020            if err == ReturnCode::Ok {
4021                assert_eq!(DATA, uncompr);
4022            }
4023
4024            err
4025        }
4026
4027        let mut output = [0; 1 << 17];
4028
4029        // this is too little space
4030        assert_eq!(helper(&mut output[..1 << 16]), ReturnCode::DataError);
4031
4032        // this is sufficient space
4033        assert_eq!(helper(&mut output), ReturnCode::Ok);
4034    }
4035
4036    fn test_flush(flush: DeflateFlush, expected: &[u8]) {
4037        let input = b"Hello World!\n";
4038
4039        let config = DeflateConfig {
4040            level: 6, // use gzip
4041            method: Method::Deflated,
4042            window_bits: 16 + crate::MAX_WBITS,
4043            mem_level: DEF_MEM_LEVEL,
4044            strategy: Strategy::Default,
4045        };
4046
4047        let mut output_rs = vec![0; 128];
4048
4049        // with the flush modes that we test here, the deflate process still has `Status::Busy`,
4050        // and the `deflateEnd` function will return `DataError`.
4051        let expected_err = ReturnCode::DataError;
4052
4053        let (rs, err) = compress_slice_with_flush(&mut output_rs, input, config, flush);
4054        assert_eq!(expected_err, err);
4055
4056        assert_eq!(rs, expected);
4057    }
4058
4059    #[test]
4060    #[rustfmt::skip]
4061    fn sync_flush() {
4062        test_flush(
4063            DeflateFlush::SyncFlush,
4064            &[
4065                31, 139, 8, 0, 0, 0, 0, 0, 0, gz_header::OS_CODE, 242, 72, 205, 201, 201, 87, 8, 207, 47, 202, 73,
4066                81, 228, 2, 0, 0, 0, 255, 255,
4067            ],
4068        )
4069    }
4070
4071    #[test]
4072    #[rustfmt::skip]
4073    fn partial_flush() {
4074        test_flush(
4075            DeflateFlush::PartialFlush,
4076            &[
4077                31, 139, 8, 0, 0, 0, 0, 0, 0, gz_header::OS_CODE, 242, 72, 205, 201, 201, 87, 8, 207, 47, 202, 73,
4078                81, 228, 2, 8,
4079            ],
4080        );
4081    }
4082
4083    #[test]
4084    #[rustfmt::skip]
4085    fn full_flush() {
4086        test_flush(
4087            DeflateFlush::FullFlush,
4088            &[
4089                31, 139, 8, 0, 0, 0, 0, 0, 0, gz_header::OS_CODE, 242, 72, 205, 201, 201, 87, 8, 207, 47, 202, 73,
4090                81, 228, 2, 0, 0, 0, 255, 255,
4091            ],
4092        );
4093    }
4094
4095    #[test]
4096    #[rustfmt::skip]
4097    fn block_flush() {
4098        test_flush(
4099            DeflateFlush::Block,
4100            &[
4101                31, 139, 8, 0, 0, 0, 0, 0, 0, gz_header::OS_CODE, 242, 72, 205, 201, 201, 87, 8, 207, 47, 202, 73,
4102                81, 228, 2,
4103            ],
4104        );
4105    }
4106
4107    #[test]
4108    // splits the input into two, deflates them seperately and then joins the deflated byte streams
4109    // into something that can be correctly inflated again. This is the basic idea behind pigz, and
4110    // allows for parallel compression.
4111    fn split_deflate() {
4112        let input = "Hello World!\n";
4113
4114        let (input1, input2) = input.split_at(6);
4115
4116        let mut output1 = vec![0; 128];
4117        let mut output2 = vec![0; 128];
4118
4119        let config = DeflateConfig {
4120            level: 6, // use gzip
4121            method: Method::Deflated,
4122            window_bits: 16 + crate::MAX_WBITS,
4123            mem_level: DEF_MEM_LEVEL,
4124            strategy: Strategy::Default,
4125        };
4126
4127        // see also the docs on `SyncFlush`. it makes sure everything is flushed, ends on a byte
4128        // boundary, and that the final block does not have the "last block" bit set.
4129        let (prefix, err) = compress_slice_with_flush(
4130            &mut output1,
4131            input1.as_bytes(),
4132            config,
4133            DeflateFlush::SyncFlush,
4134        );
4135        assert_eq!(err, ReturnCode::DataError);
4136
4137        let (output2, err) = compress_slice_with_flush(
4138            &mut output2,
4139            input2.as_bytes(),
4140            config,
4141            DeflateFlush::Finish,
4142        );
4143        assert_eq!(err, ReturnCode::Ok);
4144
4145        let inflate_config = crate::inflate::InflateConfig {
4146            window_bits: 16 + 15,
4147        };
4148
4149        // cuts off the length and crc
4150        let (suffix, end) = output2.split_at(output2.len() - 8);
4151        let (crc2, len2) = end.split_at(4);
4152        let crc2 = u32::from_le_bytes(crc2.try_into().unwrap());
4153
4154        // cuts off the gzip header (10 bytes) from the front
4155        let suffix = &suffix[10..];
4156
4157        let mut result: Vec<u8> = Vec::new();
4158        result.extend(prefix.iter());
4159        result.extend(suffix);
4160
4161        // it would be more proper to use `stream.total_in` here, but the slice helpers hide the
4162        // stream so we're cheating a bit here
4163        let len1 = input1.len() as u32;
4164        let len2 = u32::from_le_bytes(len2.try_into().unwrap());
4165        assert_eq!(len2 as usize, input2.len());
4166
4167        let crc1 = crate::crc32(0, input1.as_bytes());
4168        let crc = crate::crc32_combine(crc1, crc2, len2 as u64);
4169
4170        // combined crc of the parts should be the crc of the whole
4171        let crc_cheating = crate::crc32(0, input.as_bytes());
4172        assert_eq!(crc, crc_cheating);
4173
4174        // write the trailer
4175        result.extend(crc.to_le_bytes());
4176        result.extend((len1 + len2).to_le_bytes());
4177
4178        let mut output = vec![0; 128];
4179        let (output, err) = crate::inflate::uncompress_slice(&mut output, &result, inflate_config);
4180        assert_eq!(err, ReturnCode::Ok);
4181
4182        assert_eq!(output, input.as_bytes());
4183    }
4184
4185    #[test]
4186    fn inflate_window_copy_slice() {
4187        let uncompressed = [
4188            9, 126, 0, 0, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 76, 33, 8, 2, 0, 0,
4189            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4190            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4191            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4192            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12,
4193            10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 76, 33, 8, 2, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0,
4194            0, 0, 0, 12, 10, 0, 0, 0, 0, 14, 0, 0, 0, 0, 0, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69,
4195            69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 14, 0, 0, 0, 0, 0, 69, 69, 69, 69, 69, 69, 69,
4196            69, 69, 69, 69, 69, 69, 69, 69, 9, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69,
4197            69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69,
4198            69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69,
4199            69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 12, 28, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0,
4200            0, 0, 12, 10, 0, 0, 0, 0, 14, 0, 0, 0, 0, 0, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69,
4201            69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 14, 0, 0, 0, 0, 0, 69, 69, 69, 69, 69, 69, 69,
4202            69, 69, 69, 69, 69, 69, 69, 69, 9, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69,
4203            69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69,
4204            69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69,
4205            69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 12, 28, 0, 2, 0, 0, 0, 63, 1, 0, 12, 2,
4206            36, 0, 28, 0, 0, 0, 1, 0, 0, 63, 63, 13, 0, 0, 0, 0, 0, 0, 0, 63, 63, 63, 63, 0, 0, 0,
4207            0, 0, 0, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0,
4208            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 45, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4209            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4210            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 91, 0, 0, 0, 9, 0, 0, 0, 9, 0, 0, 12, 33, 2, 0, 0, 8,
4211            0, 4, 0, 0, 0, 12, 10, 41, 12, 10, 47,
4212        ];
4213
4214        let compressed = &[
4215            31, 139, 8, 0, 0, 0, 0, 0, 4, 3, 181, 193, 49, 14, 194, 32, 24, 128, 209, 175, 192, 0,
4216            228, 151, 232, 206, 66, 226, 226, 96, 60, 2, 113, 96, 235, 13, 188, 139, 103, 23, 106,
4217            104, 108, 100, 49, 169, 239, 185, 39, 11, 199, 7, 51, 39, 171, 248, 118, 226, 63, 52,
4218            157, 120, 86, 102, 78, 86, 209, 104, 58, 241, 84, 129, 166, 12, 4, 154, 178, 229, 202,
4219            30, 36, 130, 166, 19, 79, 21, 104, 202, 64, 160, 41, 91, 174, 236, 65, 34, 10, 200, 19,
4220            162, 206, 68, 96, 130, 156, 15, 188, 229, 138, 197, 157, 161, 35, 3, 87, 126, 245, 0,
4221            28, 224, 64, 146, 2, 139, 1, 196, 95, 196, 223, 94, 10, 96, 92, 33, 86, 2, 0, 0,
4222        ];
4223
4224        let config = InflateConfig { window_bits: 25 };
4225
4226        let mut dest_vec_rs = vec![0u8; uncompressed.len()];
4227        let (output_rs, error) =
4228            crate::inflate::uncompress_slice(&mut dest_vec_rs, compressed, config);
4229
4230        assert_eq!(ReturnCode::Ok, error);
4231        assert_eq!(output_rs, uncompressed);
4232    }
4233
4234    #[test]
4235    fn hash_calc_difference() {
4236        let input = [
4237            0, 0, 0, 0, 0, 43, 0, 0, 0, 0, 0, 0, 43, 0, 0, 0, 0, 0, 0, 0, 0, 48, 0, 0, 0, 0, 0, 0,
4238            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4239            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4240            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4241            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 0, 0, 0,
4242            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4243            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 19, 0, 0, 0, 0, 0,
4244            0, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 55, 0, 64, 0, 0,
4245            0, 0, 0, 0, 0, 0, 0, 0, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 102, 102, 102, 102, 102,
4246            102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102,
4247            102, 102, 102, 102, 102, 102, 102, 102, 112, 102, 102, 102, 102, 102, 102, 102, 102,
4248            102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 0,
4249            0, 0, 0, 0, 0, 49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4250            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4251            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4252            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4253            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4254            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 19, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0,
4255            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 55, 0, 64, 0, 0, 0, 0, 0, 0, 0,
4256            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0,
4257            50, 0,
4258        ];
4259
4260        let config = DeflateConfig {
4261            level: 6,
4262            method: Method::Deflated,
4263            window_bits: 9,
4264            mem_level: 8,
4265            strategy: Strategy::Default,
4266        };
4267
4268        let expected = [
4269            24, 149, 99, 96, 96, 96, 96, 208, 6, 17, 112, 138, 129, 193, 128, 1, 29, 24, 50, 208,
4270            1, 200, 146, 169, 79, 24, 74, 59, 96, 147, 52, 71, 22, 70, 246, 88, 26, 94, 80, 128,
4271            83, 6, 162, 219, 144, 76, 183, 210, 5, 8, 67, 105, 36, 159, 35, 128, 57, 118, 97, 100,
4272            160, 197, 192, 192, 96, 196, 0, 0, 3, 228, 25, 128,
4273        ];
4274
4275        fuzz_based_test(&input, config, &expected);
4276    }
4277
4278    #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
4279    mod _cache_lines {
4280        use super::State;
4281        // FIXME: once zlib-rs Minimum Supported Rust Version >= 1.77, switch to core::mem::offset_of
4282        // and move this _cache_lines module from up a level from tests to super::
4283        use memoffset::offset_of;
4284
4285        const _: () = assert!(offset_of!(State, status) == 0);
4286        const _: () = assert!(offset_of!(State, _cache_line_0) == 64);
4287        const _: () = assert!(offset_of!(State, _cache_line_1) == 128);
4288        const _: () = assert!(offset_of!(State, _cache_line_2) == 192);
4289        const _: () = assert!(offset_of!(State, _cache_line_3) == 256);
4290    }
4291}