vortex_btrblocks/
integer.rs

1// SPDX-License-Identifier: Apache-2.0
2// SPDX-FileCopyrightText: Copyright the Vortex contributors
3
4pub mod dictionary;
5mod stats;
6
7use std::fmt::Debug;
8use std::hash::Hash;
9
10pub use stats::IntegerStats;
11use vortex_array::arrays::{ConstantArray, PrimitiveArray, PrimitiveVTable};
12use vortex_array::{ArrayRef, IntoArray, ToCanonical};
13use vortex_dict::DictArray;
14use vortex_error::{VortexExpect, VortexResult, VortexUnwrap, vortex_bail, vortex_err};
15use vortex_fastlanes::{FoRArray, bit_width_histogram, bitpack_encode, find_best_bit_width};
16use vortex_runend::RunEndArray;
17use vortex_runend::compress::runend_encode;
18use vortex_scalar::Scalar;
19use vortex_sequence::sequence_encode;
20use vortex_sparse::{SparseArray, SparseVTable};
21use vortex_zigzag::{ZigZagArray, zigzag_encode};
22
23use crate::integer::dictionary::dictionary_encode;
24use crate::patches::compress_patches;
25use crate::{
26    Compressor, CompressorStats, GenerateStatsOptions, Scheme,
27    estimate_compression_ratio_with_sampling,
28};
29
30/// [`Compressor`] for signed and unsigned integers.
31pub struct IntCompressor;
32
33impl Compressor for IntCompressor {
34    type ArrayVTable = PrimitiveVTable;
35    type SchemeType = dyn IntegerScheme;
36    type StatsType = IntegerStats;
37
38    fn schemes() -> &'static [&'static dyn IntegerScheme] {
39        &[
40            &ConstantScheme,
41            &FORScheme,
42            &ZigZagScheme,
43            &BitPackingScheme,
44            &SparseScheme,
45            &DictScheme,
46            &RunEndScheme,
47            &SequenceScheme,
48        ]
49    }
50
51    fn default_scheme() -> &'static Self::SchemeType {
52        &UncompressedScheme
53    }
54
55    fn dict_scheme_code() -> IntCode {
56        DICT_SCHEME
57    }
58}
59
60impl IntCompressor {
61    pub(crate) fn compress_no_dict(
62        array: &PrimitiveArray,
63        is_sample: bool,
64        allowed_cascading: usize,
65        excludes: &[IntCode],
66    ) -> VortexResult<ArrayRef> {
67        let stats = IntegerStats::generate_opts(
68            array,
69            GenerateStatsOptions {
70                count_distinct_values: false,
71            },
72        );
73
74        let scheme = Self::choose_scheme(&stats, is_sample, allowed_cascading, excludes)?;
75        let output = scheme.compress(&stats, is_sample, allowed_cascading, excludes)?;
76
77        if output.nbytes() < array.nbytes() {
78            Ok(output)
79        } else {
80            log::debug!("resulting tree too large: {}", output.display_tree());
81            Ok(array.to_array())
82        }
83    }
84}
85
86pub trait IntegerScheme: Scheme<StatsType = IntegerStats, CodeType = IntCode> {}
87
88// Auto-impl
89impl<T> IntegerScheme for T where T: Scheme<StatsType = IntegerStats, CodeType = IntCode> {}
90
91#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
92pub struct IntCode(u8);
93
94const UNCOMPRESSED_SCHEME: IntCode = IntCode(0);
95const CONSTANT_SCHEME: IntCode = IntCode(1);
96const FOR_SCHEME: IntCode = IntCode(2);
97const ZIGZAG_SCHEME: IntCode = IntCode(3);
98const BITPACKING_SCHEME: IntCode = IntCode(4);
99const SPARSE_SCHEME: IntCode = IntCode(5);
100const DICT_SCHEME: IntCode = IntCode(6);
101const RUNEND_SCHEME: IntCode = IntCode(7);
102const SEQUENCE_SCHEME: IntCode = IntCode(8);
103
104#[derive(Debug, Copy, Clone)]
105pub struct UncompressedScheme;
106
107#[derive(Debug, Copy, Clone)]
108pub struct ConstantScheme;
109
110#[derive(Debug, Copy, Clone)]
111pub struct FORScheme;
112
113#[derive(Debug, Copy, Clone)]
114pub struct ZigZagScheme;
115
116#[derive(Debug, Copy, Clone)]
117pub struct BitPackingScheme;
118
119#[derive(Debug, Copy, Clone)]
120pub struct SparseScheme;
121
122#[derive(Debug, Copy, Clone)]
123pub struct DictScheme;
124
125#[derive(Debug, Copy, Clone)]
126pub struct RunEndScheme;
127
128#[derive(Debug, Copy, Clone)]
129pub struct SequenceScheme;
130
131/// Threshold for the average run length in an array before we consider run-end encoding.
132const RUN_END_THRESHOLD: u32 = 4;
133
134impl Scheme for UncompressedScheme {
135    type StatsType = IntegerStats;
136    type CodeType = IntCode;
137
138    fn code(&self) -> IntCode {
139        UNCOMPRESSED_SCHEME
140    }
141
142    fn expected_compression_ratio(
143        &self,
144        _stats: &IntegerStats,
145        _is_sample: bool,
146        _allowed_cascading: usize,
147        _excludes: &[IntCode],
148    ) -> VortexResult<f64> {
149        // no compression
150        Ok(1.0)
151    }
152
153    fn compress(
154        &self,
155        stats: &IntegerStats,
156        _is_sample: bool,
157        _allowed_cascading: usize,
158        _excludes: &[IntCode],
159    ) -> VortexResult<ArrayRef> {
160        Ok(stats.source().to_array())
161    }
162}
163
164impl Scheme for ConstantScheme {
165    type StatsType = IntegerStats;
166    type CodeType = IntCode;
167
168    fn code(&self) -> IntCode {
169        CONSTANT_SCHEME
170    }
171
172    fn is_constant(&self) -> bool {
173        true
174    }
175
176    fn expected_compression_ratio(
177        &self,
178        stats: &IntegerStats,
179        is_sample: bool,
180        _allowed_cascading: usize,
181        _excludes: &[IntCode],
182    ) -> VortexResult<f64> {
183        // Never yield ConstantScheme for a sample, it could be a false-positive.
184        if is_sample {
185            return Ok(0.0);
186        }
187
188        // Only arrays with one distinct values can be constant compressed.
189        if stats.distinct_values_count != 1 {
190            return Ok(0.0);
191        }
192
193        // Cannot have mix of nulls and non-nulls
194        if stats.null_count > 0 && stats.value_count > 0 {
195            return Ok(0.0);
196        }
197
198        Ok(stats.value_count as f64)
199    }
200
201    fn compress(
202        &self,
203        stats: &IntegerStats,
204        _is_sample: bool,
205        _allowed_cascading: usize,
206        _excludes: &[IntCode],
207    ) -> VortexResult<ArrayRef> {
208        // We only use Constant encoding if the entire array is constant, never if one of
209        // the child arrays yields a constant value.
210        let scalar = stats
211            .source()
212            .as_constant()
213            .vortex_expect("constant array expected");
214
215        Ok(ConstantArray::new(scalar, stats.src.len()).into_array())
216    }
217}
218
219impl Scheme for FORScheme {
220    type StatsType = IntegerStats;
221    type CodeType = IntCode;
222
223    fn code(&self) -> IntCode {
224        FOR_SCHEME
225    }
226
227    fn expected_compression_ratio(
228        &self,
229        stats: &IntegerStats,
230        _is_sample: bool,
231        allowed_cascading: usize,
232        _excludes: &[IntCode],
233    ) -> VortexResult<f64> {
234        // Only apply if we are not at the leaf
235        if allowed_cascading == 0 {
236            return Ok(0.0);
237        }
238
239        // All-null cannot be FOR compressed.
240        if stats.value_count == 0 {
241            return Ok(0.0);
242        }
243
244        // Only apply when the min is not already zero.
245        if stats.typed.min_is_zero() {
246            return Ok(0.0);
247        }
248
249        // Difference between max and min
250        let full_width: u32 = stats.src.ptype().bit_width().try_into().vortex_unwrap();
251        let bw = match stats.typed.max_minus_min().checked_ilog2() {
252            Some(l) => l + 1,
253            // If max-min == 0, it we should use a different compression scheme
254            // as we don't want to bitpack down to 0 bits.
255            None => return Ok(0.0),
256        };
257
258        // If we're not saving at least 1 byte, don't bother with FOR
259        if full_width - bw < 8 {
260            return Ok(0.0);
261        }
262
263        Ok(full_width as f64 / bw as f64)
264    }
265
266    fn compress(
267        &self,
268        stats: &IntegerStats,
269        is_sample: bool,
270        _allowed_cascading: usize,
271        excludes: &[IntCode],
272    ) -> VortexResult<ArrayRef> {
273        let for_array = FoRArray::encode(stats.src.clone())?;
274        let biased = for_array.encoded().to_primitive();
275        let biased_stats = IntegerStats::generate_opts(
276            &biased,
277            GenerateStatsOptions {
278                count_distinct_values: false,
279            },
280        );
281
282        // Immediately bitpack. If any other scheme was preferable, it would be chosen instead
283        // of bitpacking.
284        // NOTE: we could delegate in the future if we had another downstream codec that performs
285        //  as well.
286        let compressed = BitPackingScheme.compress(&biased_stats, is_sample, 0, excludes)?;
287
288        Ok(FoRArray::try_new(compressed, for_array.reference_scalar().clone())?.into_array())
289    }
290}
291
292impl Scheme for ZigZagScheme {
293    type StatsType = IntegerStats;
294    type CodeType = IntCode;
295
296    fn code(&self) -> IntCode {
297        ZIGZAG_SCHEME
298    }
299
300    fn expected_compression_ratio(
301        &self,
302        stats: &IntegerStats,
303        is_sample: bool,
304        allowed_cascading: usize,
305        excludes: &[IntCode],
306    ) -> VortexResult<f64> {
307        // ZigZag is only useful when we cascade it with another encoding
308        if allowed_cascading == 0 {
309            return Ok(0.0);
310        }
311
312        // Don't try and compress all-null arrays
313        if stats.value_count == 0 {
314            return Ok(0.0);
315        }
316
317        // ZigZag is only useful when there are negative values.
318        if !stats.typed.min_is_negative() {
319            return Ok(0.0);
320        }
321
322        // Run compression on a sample to see how it performs.
323        estimate_compression_ratio_with_sampling(
324            self,
325            stats,
326            is_sample,
327            allowed_cascading,
328            excludes,
329        )
330    }
331
332    fn compress(
333        &self,
334        stats: &IntegerStats,
335        is_sample: bool,
336        allowed_cascading: usize,
337        excludes: &[IntCode],
338    ) -> VortexResult<ArrayRef> {
339        // Zigzag encode the values, then recursively compress the inner values.
340        let zag = zigzag_encode(stats.src.clone())?;
341        let encoded = zag.encoded().to_primitive();
342
343        // ZigZag should be after Dict, RunEnd or Sparse.
344        // We should only do these "container" style compressors once.
345        let mut new_excludes = vec![
346            ZigZagScheme.code(),
347            DictScheme.code(),
348            RunEndScheme.code(),
349            SparseScheme.code(),
350        ];
351        new_excludes.extend_from_slice(excludes);
352
353        let compressed =
354            IntCompressor::compress(&encoded, is_sample, allowed_cascading - 1, &new_excludes)?;
355
356        log::debug!("zigzag output: {}", compressed.display_tree());
357
358        Ok(ZigZagArray::try_new(compressed)?.into_array())
359    }
360}
361
362impl Scheme for BitPackingScheme {
363    type StatsType = IntegerStats;
364    type CodeType = IntCode;
365
366    fn code(&self) -> IntCode {
367        BITPACKING_SCHEME
368    }
369
370    #[allow(clippy::cast_possible_truncation)]
371    fn expected_compression_ratio(
372        &self,
373        stats: &IntegerStats,
374        is_sample: bool,
375        allowed_cascading: usize,
376        excludes: &[IntCode],
377    ) -> VortexResult<f64> {
378        // BitPacking only works for non-negative values
379        if stats.typed.min_is_negative() {
380            return Ok(0.0);
381        }
382
383        // Don't compress all-null arrays
384        if stats.value_count == 0 {
385            return Ok(0.0);
386        }
387
388        estimate_compression_ratio_with_sampling(
389            self,
390            stats,
391            is_sample,
392            allowed_cascading,
393            excludes,
394        )
395    }
396
397    #[allow(clippy::cast_possible_truncation)]
398    fn compress(
399        &self,
400        stats: &IntegerStats,
401        _is_sample: bool,
402        _allowed_cascading: usize,
403        _excludes: &[IntCode],
404    ) -> VortexResult<ArrayRef> {
405        let histogram = bit_width_histogram(stats.source())?;
406        let bw = find_best_bit_width(stats.source().ptype(), &histogram)?;
407        // If best bw is determined to be the current bit-width, return the original array.
408        if bw as usize == stats.source().ptype().bit_width() {
409            return Ok(stats.source().clone().into_array());
410        }
411        let mut packed = bitpack_encode(stats.source(), bw, Some(&histogram))?;
412
413        let patches = packed.patches().map(compress_patches).transpose()?;
414        packed.replace_patches(patches);
415
416        Ok(packed.into_array())
417    }
418}
419
420impl Scheme for SparseScheme {
421    type StatsType = IntegerStats;
422    type CodeType = IntCode;
423
424    fn code(&self) -> IntCode {
425        SPARSE_SCHEME
426    }
427
428    // We can avoid asserting the encoding tree instead.
429    fn expected_compression_ratio(
430        &self,
431        stats: &IntegerStats,
432        _is_sample: bool,
433        _allowed_cascading: usize,
434        _excludes: &[IntCode],
435    ) -> VortexResult<f64> {
436        if stats.value_count == 0 {
437            // All nulls should use ConstantScheme
438            return Ok(0.0);
439        }
440
441        // If the majority is null, will compress well.
442        if stats.null_count as f64 / stats.src.len() as f64 > 0.9 {
443            return Ok(stats.src.len() as f64 / stats.value_count as f64);
444        }
445
446        // See if the top value accounts for >= 90% of the set values.
447        let (_, top_count) = stats.typed.top_value_and_count();
448
449        if top_count == stats.value_count {
450            // top_value is the only value, should use ConstantScheme instead
451            return Ok(0.0);
452        }
453
454        let freq = top_count as f64 / stats.value_count as f64;
455        if freq >= 0.9 {
456            // We only store the positions of the non-top values.
457            return Ok(stats.value_count as f64 / (stats.value_count - top_count) as f64);
458        }
459
460        Ok(0.0)
461    }
462
463    fn compress(
464        &self,
465        stats: &IntegerStats,
466        is_sample: bool,
467        allowed_cascading: usize,
468        excludes: &[IntCode],
469    ) -> VortexResult<ArrayRef> {
470        assert!(allowed_cascading > 0);
471        let (top_pvalue, top_count) = stats.typed.top_value_and_count();
472        if top_count as usize == stats.src.len() {
473            // top_value is the only value, use ConstantScheme
474            return Ok(ConstantArray::new(
475                Scalar::primitive_value(
476                    top_pvalue,
477                    top_pvalue.ptype(),
478                    stats.src.dtype().nullability(),
479                ),
480                stats.src.len(),
481            )
482            .into_array());
483        }
484
485        let sparse_encoded = SparseArray::encode(
486            stats.src.as_ref(),
487            Some(Scalar::primitive_value(
488                top_pvalue,
489                top_pvalue.ptype(),
490                stats.src.dtype().nullability(),
491            )),
492        )?;
493
494        if let Some(sparse) = sparse_encoded.as_opt::<SparseVTable>() {
495            // Compress the values
496            let mut new_excludes = vec![SparseScheme.code()];
497            new_excludes.extend_from_slice(excludes);
498
499            let compressed_values = IntCompressor::compress_no_dict(
500                &sparse.patches().values().to_primitive(),
501                is_sample,
502                allowed_cascading - 1,
503                &new_excludes,
504            )?;
505
506            let indices = sparse.patches().indices().to_primitive().downcast()?;
507
508            let compressed_indices = IntCompressor::compress_no_dict(
509                &indices,
510                is_sample,
511                allowed_cascading - 1,
512                &new_excludes,
513            )?;
514
515            SparseArray::try_new(
516                compressed_indices,
517                compressed_values,
518                sparse.len(),
519                sparse.fill_scalar().clone(),
520            )
521            .map(|a| a.into_array())
522        } else {
523            Ok(sparse_encoded)
524        }
525    }
526}
527
528impl Scheme for DictScheme {
529    type StatsType = IntegerStats;
530    type CodeType = IntCode;
531
532    fn code(&self) -> IntCode {
533        DICT_SCHEME
534    }
535
536    fn expected_compression_ratio(
537        &self,
538        stats: &IntegerStats,
539        _is_sample: bool,
540        allowed_cascading: usize,
541        _excludes: &[IntCode],
542    ) -> VortexResult<f64> {
543        // Dict should not be terminal.
544        if allowed_cascading == 0 {
545            return Ok(0.0);
546        }
547
548        if stats.value_count == 0 {
549            return Ok(0.0);
550        }
551
552        // If > 50% of the values are distinct, skip dict.
553        if stats.distinct_values_count > stats.value_count / 2 {
554            return Ok(0.0);
555        }
556
557        // Ignore nulls encoding for the estimate. We only focus on values.
558        let values_size = stats.source().ptype().bit_width() * stats.distinct_values_count as usize;
559
560        // Assume codes are compressed RLE + BitPacking.
561        let codes_bw = usize::BITS - stats.distinct_values_count.leading_zeros();
562
563        let n_runs = stats.value_count / stats.average_run_length;
564
565        // Assume that codes will either be BitPack or RLE-BitPack
566        let codes_size_bp = (codes_bw * stats.value_count) as usize;
567        let codes_size_rle_bp = (codes_bw + 32) * n_runs;
568
569        let codes_size = usize::min(codes_size_bp, codes_size_rle_bp as usize);
570
571        let before = stats.value_count as usize * stats.source().ptype().bit_width();
572
573        Ok(before as f64 / (values_size + codes_size) as f64)
574    }
575
576    fn compress(
577        &self,
578        stats: &IntegerStats,
579        is_sample: bool,
580        allowed_cascading: usize,
581        excludes: &[IntCode],
582    ) -> VortexResult<ArrayRef> {
583        assert!(allowed_cascading > 0);
584
585        // TODO(aduffy): we can be more prescriptive: we know that codes will EITHER be
586        //    RLE or FOR + BP. Cascading probably wastes some time here.
587
588        let dict = dictionary_encode(stats);
589
590        // Cascade the codes child
591        // Don't allow SequenceArray as the codes child as it merely adds extra indirection without actually compressing data.
592        let mut new_excludes = vec![DICT_SCHEME, SEQUENCE_SCHEME];
593        new_excludes.extend_from_slice(excludes);
594
595        let compressed_codes = IntCompressor::compress_no_dict(
596            &dict.codes().to_primitive().downcast()?,
597            is_sample,
598            allowed_cascading - 1,
599            &new_excludes,
600        )?;
601
602        // SAFETY: compressing codes does not change their values
603        unsafe {
604            Ok(DictArray::new_unchecked(compressed_codes, dict.values().clone()).into_array())
605        }
606    }
607}
608
609impl Scheme for RunEndScheme {
610    type StatsType = IntegerStats;
611    type CodeType = IntCode;
612
613    fn code(&self) -> IntCode {
614        RUNEND_SCHEME
615    }
616
617    fn expected_compression_ratio(
618        &self,
619        stats: &IntegerStats,
620        is_sample: bool,
621        allowed_cascading: usize,
622        excludes: &[IntCode],
623    ) -> VortexResult<f64> {
624        // If the run length is below the threshold, drop it.
625        if stats.average_run_length < RUN_END_THRESHOLD {
626            return Ok(0.0);
627        }
628
629        if allowed_cascading == 0 {
630            return Ok(0.0);
631        }
632
633        // Run compression on a sample, see how it performs.
634        estimate_compression_ratio_with_sampling(
635            self,
636            stats,
637            is_sample,
638            allowed_cascading,
639            excludes,
640        )
641    }
642
643    fn compress(
644        &self,
645        stats: &IntegerStats,
646        is_sample: bool,
647        allowed_cascading: usize,
648        excludes: &[IntCode],
649    ) -> VortexResult<ArrayRef> {
650        assert!(allowed_cascading > 0);
651
652        // run-end encode the ends
653        let (ends, values) = runend_encode(&stats.src);
654
655        let mut new_excludes = vec![RunEndScheme.code(), DictScheme.code()];
656        new_excludes.extend_from_slice(excludes);
657
658        let ends_stats = IntegerStats::generate_opts(
659            &ends.to_primitive(),
660            GenerateStatsOptions {
661                count_distinct_values: false,
662            },
663        );
664        let ends_scheme = IntCompressor::choose_scheme(
665            &ends_stats,
666            is_sample,
667            allowed_cascading - 1,
668            &new_excludes,
669        )?;
670        let compressed_ends =
671            ends_scheme.compress(&ends_stats, is_sample, allowed_cascading - 1, &new_excludes)?;
672
673        let compressed_values = IntCompressor::compress_no_dict(
674            &values.to_primitive(),
675            is_sample,
676            allowed_cascading - 1,
677            &new_excludes,
678        )?;
679
680        // SAFETY: compression doesn't affect invariants
681        unsafe {
682            Ok(
683                RunEndArray::new_unchecked(compressed_ends, compressed_values, 0, stats.src.len())
684                    .into_array(),
685            )
686        }
687    }
688}
689
690impl Scheme for SequenceScheme {
691    type StatsType = IntegerStats;
692    type CodeType = IntCode;
693
694    fn code(&self) -> Self::CodeType {
695        SEQUENCE_SCHEME
696    }
697
698    fn expected_compression_ratio(
699        &self,
700        stats: &Self::StatsType,
701        _is_sample: bool,
702        _allowed_cascading: usize,
703        _excludes: &[Self::CodeType],
704    ) -> VortexResult<f64> {
705        if stats.null_count > 0 {
706            return Ok(0.0);
707        }
708        // Since two values are required to store base and multiplier the
709        // compression ratio is divided by 2.
710        Ok(sequence_encode(&stats.src)?
711            .map(|_| stats.src.len() as f64 / 2.0)
712            .unwrap_or(0.0))
713    }
714
715    fn compress(
716        &self,
717        stats: &Self::StatsType,
718        _is_sample: bool,
719        _allowed_cascading: usize,
720        _excludes: &[Self::CodeType],
721    ) -> VortexResult<ArrayRef> {
722        if stats.null_count > 0 {
723            vortex_bail!("sequence encoding does not support nulls");
724        }
725        sequence_encode(&stats.src)?.ok_or_else(|| vortex_err!("cannot sequence encode array"))
726    }
727}
728
729#[cfg(test)]
730mod tests {
731    use itertools::Itertools;
732    use log::LevelFilter;
733    use rand::rngs::StdRng;
734    use rand::{RngCore, SeedableRng};
735    use vortex_array::arrays::PrimitiveArray;
736    use vortex_array::validity::Validity;
737    use vortex_array::vtable::ValidityHelper;
738    use vortex_array::{Array, IntoArray, ToCanonical};
739    use vortex_buffer::{Buffer, BufferMut, buffer, buffer_mut};
740    use vortex_dict::DictEncoding;
741    use vortex_sequence::SequenceEncoding;
742    use vortex_sparse::SparseEncoding;
743    use vortex_utils::aliases::hash_set::HashSet;
744
745    use crate::integer::{IntCompressor, IntegerStats, SequenceScheme, SparseScheme};
746    use crate::{Compressor, CompressorStats, Scheme};
747
748    #[test]
749    fn test_empty() {
750        // Make sure empty array compression does not fail
751        let result = IntCompressor::compress(
752            &PrimitiveArray::new(Buffer::<i32>::empty(), Validity::NonNullable),
753            false,
754            3,
755            &[],
756        )
757        .unwrap();
758
759        assert!(result.is_empty());
760    }
761
762    #[test]
763    fn test_dict_encodable() {
764        let mut codes = BufferMut::<i32>::with_capacity(65_535);
765        // Write some runs of length 3 of a handful of different values. Interrupted by some
766        // one-off values.
767
768        let numbers = [0, 10, 50, 100, 1000, 3000]
769            .into_iter()
770            .map(|i| 1234 * i)
771            .collect_vec();
772
773        let mut rng = StdRng::seed_from_u64(1u64);
774        while codes.len() < 64000 {
775            let run_length = rng.next_u32() % 5;
776            let value = numbers[rng.next_u32() as usize % numbers.len()];
777            for _ in 0..run_length {
778                codes.push(value);
779            }
780        }
781
782        let primitive = codes.freeze().into_array().to_primitive();
783        let compressed = IntCompressor::compress(&primitive, false, 3, &[]).unwrap();
784        assert_eq!(compressed.encoding_id(), DictEncoding.id());
785    }
786
787    #[test]
788    fn test_window_name() {
789        env_logger::builder()
790            .filter(None, LevelFilter::Debug)
791            .try_init()
792            .ok();
793
794        // A test that's meant to mirror the WindowName column from ClickBench.
795        let mut values = buffer_mut![-1i32; 1_000_000];
796        let mut visited = HashSet::new();
797        let mut rng = StdRng::seed_from_u64(1u64);
798        while visited.len() < 223 {
799            let random = (rng.next_u32() as usize) % 1_000_000;
800            if visited.contains(&random) {
801                continue;
802            }
803            visited.insert(random);
804            // Pick 100 random values to insert.
805            values[random] = 5 * (rng.next_u64() % 100) as i32;
806        }
807
808        let array = values.freeze().into_array().to_primitive();
809        let compressed = IntCompressor::compress(&array, false, 3, &[]).unwrap();
810        log::info!("WindowName compressed: {}", compressed.display_tree());
811    }
812
813    #[test]
814    fn sparse_with_nulls() {
815        let array = PrimitiveArray::new(
816            buffer![189u8, 189, 189, 0, 46],
817            Validity::from_iter(vec![true, true, true, true, false]),
818        );
819        let compressed = SparseScheme
820            .compress(&IntegerStats::generate(&array), false, 3, &[])
821            .unwrap();
822        assert_eq!(compressed.encoding_id(), SparseEncoding.id());
823        let decoded = compressed.to_primitive();
824        let expected = [189u8, 189, 189, 0, 0];
825        assert_eq!(decoded.as_slice::<u8>(), &expected);
826        assert_eq!(decoded.validity(), array.validity());
827    }
828
829    #[test]
830    fn sparse_mostly_nulls() {
831        let array = PrimitiveArray::new(
832            buffer![189u8, 189, 189, 189, 189, 189, 189, 189, 189, 0, 46],
833            Validity::from_iter(vec![
834                false, false, false, false, false, false, false, false, false, false, true,
835            ]),
836        );
837        let compressed = SparseScheme
838            .compress(&IntegerStats::generate(&array), false, 3, &[])
839            .unwrap();
840        assert_eq!(compressed.encoding_id(), SparseEncoding.id());
841        let decoded = compressed.to_primitive();
842        let expected = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 46];
843        assert_eq!(decoded.as_slice::<u8>(), &expected);
844        assert_eq!(decoded.validity(), array.validity());
845    }
846
847    #[test]
848    fn nullable_sequence() {
849        let values = (0i32..20).step_by(7).collect_vec();
850        let array = PrimitiveArray::from_option_iter(values.clone().into_iter().map(Some));
851        let compressed = SequenceScheme
852            .compress(&IntegerStats::generate(&array), false, 3, &[])
853            .unwrap();
854        assert_eq!(compressed.encoding_id(), SequenceEncoding.id());
855        let decoded = compressed.to_primitive();
856        assert_eq!(decoded.as_slice::<i32>(), values.as_slice());
857    }
858}