structured-zstd 0.0.5

Pure Rust zstd implementation — managed fork of ruzstd. Dictionary decompression, no FFI.
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
//! Roundtrip integrity tests: compress → decompress → verify data unchanged.
//!
//! Tests run 1000 iterations with random data of varying sizes and patterns
//! to ensure no data corruption in the compress/decompress pipeline.

extern crate std;

#[allow(unused_imports)]
use alloc::vec;
use alloc::vec::Vec;

use crate::decoding::StreamingDecoder;
use crate::encoding::{CompressionLevel, FrameCompressor, compress_to_vec};
use crate::io::Read;

/// Generate deterministic pseudo-random data using a simple LCG.
fn generate_data(seed: u64, len: usize) -> Vec<u8> {
    let mut state = seed;
    let mut data = Vec::with_capacity(len);
    for _ in 0..len {
        state = state
            .wrapping_mul(6364136223846793005)
            .wrapping_add(1442695040888963407);
        data.push((state >> 33) as u8);
    }
    data
}

/// Generate highly compressible data (repeating patterns).
fn generate_compressible(seed: u64, len: usize) -> Vec<u8> {
    let pattern_len = ((seed % 16) + 1) as usize;
    let pattern = generate_data(seed, pattern_len);
    let mut data = Vec::with_capacity(len);
    for i in 0..len {
        data.push(pattern[i % pattern_len]);
    }
    data
}

/// Roundtrip using compress_to_vec at the given level.
fn roundtrip_at_level(data: &[u8], level: CompressionLevel) -> Vec<u8> {
    let compressed = compress_to_vec(data, level);
    let mut decoder = StreamingDecoder::new(compressed.as_slice()).unwrap();
    let mut result = Vec::new();
    decoder.read_to_end(&mut result).unwrap();
    result
}

fn roundtrip_simple(data: &[u8]) -> Vec<u8> {
    roundtrip_at_level(data, CompressionLevel::Fastest)
}

fn compress_streaming(data: &[u8]) -> Vec<u8> {
    let mut compressed = Vec::new();
    let mut compressor = FrameCompressor::new(CompressionLevel::Fastest);
    compressor.set_source(data);
    compressor.set_drain(&mut compressed);
    compressor.compress();
    compressed
}

/// Roundtrip using FrameCompressor at the given level.
fn roundtrip_streaming_at_level(data: &[u8], level: CompressionLevel) -> Vec<u8> {
    let mut compressed = Vec::new();
    let mut compressor = FrameCompressor::new(level);
    compressor.set_source(data);
    compressor.set_drain(&mut compressed);
    compressor.compress();
    let mut decoder = StreamingDecoder::new(compressed.as_slice()).unwrap();
    let mut result = Vec::new();
    decoder.read_to_end(&mut result).unwrap();
    result
}

fn roundtrip_streaming(data: &[u8]) -> Vec<u8> {
    roundtrip_streaming_at_level(data, CompressionLevel::Fastest)
}

fn roundtrip_default(data: &[u8]) -> Vec<u8> {
    roundtrip_at_level(data, CompressionLevel::Default)
}

fn roundtrip_better(data: &[u8]) -> Vec<u8> {
    roundtrip_at_level(data, CompressionLevel::Better)
}

fn roundtrip_better_streaming(data: &[u8]) -> Vec<u8> {
    roundtrip_streaming_at_level(data, CompressionLevel::Better)
}

fn roundtrip_best(data: &[u8]) -> Vec<u8> {
    roundtrip_at_level(data, CompressionLevel::Best)
}

fn roundtrip_best_streaming(data: &[u8]) -> Vec<u8> {
    roundtrip_streaming_at_level(data, CompressionLevel::Best)
}

/// Generate data with limited alphabet for better Huffman compressibility
/// but enough variety to avoid RLE path.
fn generate_huffman_friendly(seed: u64, len: usize, alphabet_size: u8) -> Vec<u8> {
    assert!(alphabet_size > 0, "alphabet_size must be non-zero");
    let mut state = seed;
    let mut data = Vec::with_capacity(len);
    for _ in 0..len {
        state = state
            .wrapping_mul(6364136223846793005)
            .wrapping_add(1442695040888963407);
        data.push(((state >> 33) as u8) % alphabet_size);
    }
    data
}

fn repeat_offset_fixture(pattern: &[u8], chunks: usize) -> Vec<u8> {
    let mut data = Vec::with_capacity(chunks * (pattern.len() + 2));
    for i in 0..chunks {
        data.extend_from_slice(pattern);
        data.extend_from_slice(&(i as u16).to_le_bytes());
    }
    data
}

// Cross-validation tests (pure Rust ↔ C FFI) are in tests/cross_validation.rs
// because dev-dependencies (zstd) aren't available in library test modules.

#[test]
fn roundtrip_random_data_1000_iterations() {
    for i in 0..1000u64 {
        // Vary data sizes: 0 to ~64KB
        let len = (i * 67 % 65536) as usize;
        let data = generate_data(i, len);

        let result = roundtrip_simple(&data);
        assert_eq!(
            data, result,
            "Simple API roundtrip failed at iteration {i}, len={len}"
        );
    }
}

#[test]
fn roundtrip_compressible_data_1000_iterations() {
    for i in 0..1000u64 {
        let len = (i * 131 % 65536) as usize;
        let data = generate_compressible(i, len);

        let result = roundtrip_simple(&data);
        assert_eq!(
            data, result,
            "Compressible roundtrip failed at iteration {i}, len={len}"
        );
    }
}

#[test]
fn roundtrip_streaming_api_1000_iterations() {
    for i in 0..1000u64 {
        let len = (i * 97 % 32768) as usize;
        let data = generate_data(i.wrapping_add(0xDEAD), len);

        let result = roundtrip_streaming(&data);
        assert_eq!(
            data, result,
            "Streaming API roundtrip failed at iteration {i}, len={len}"
        );
    }
}

#[test]
fn roundtrip_edge_cases() {
    // Empty data
    assert_eq!(roundtrip_simple(&[]), Vec::<u8>::new());

    // Single byte
    assert_eq!(roundtrip_simple(&[0x42]), vec![0x42]);

    // All zeros (maximally compressible)
    let zeros = vec![0u8; 100_000];
    assert_eq!(roundtrip_simple(&zeros), zeros);

    // All 0xFF
    let ones = vec![0xFFu8; 100_000];
    assert_eq!(roundtrip_simple(&ones), ones);

    // Ascending bytes (moderately compressible)
    let ascending: Vec<u8> = (0..=255u8).cycle().take(100_000).collect();
    assert_eq!(roundtrip_simple(&ascending), ascending);

    // 1 byte repeated (RLE-like)
    let rle = vec![0xABu8; 1_000_000];
    assert_eq!(roundtrip_simple(&rle), rle);
}

/// Roundtrip tests with large inputs that produce large literal sections.
///
/// The encoder uses `compress_literals` (Huffman) for literals > 1024 bytes,
/// so these inputs exercise the 14-bit (0b10) and 18-bit (0b11) size formats.
/// The exact literals size depends on how many matches the encoder finds,
/// so we verify roundtrip correctness rather than specific format selection.
#[test]
fn roundtrip_large_literals() {
    // ~1KB input — just above the raw→Huffman threshold.
    let data_1025 = generate_huffman_friendly(42, 1025, 16);
    assert_eq!(roundtrip_simple(&data_1025), data_1025);
    assert_eq!(roundtrip_streaming(&data_1025), data_1025);

    // ~16KB input — near the 14-bit/18-bit boundary.
    let data_16383 = generate_huffman_friendly(43, 16383, 32);
    assert_eq!(roundtrip_simple(&data_16383), data_16383);

    let data_16384 = generate_huffman_friendly(44, 16384, 32);
    assert_eq!(roundtrip_simple(&data_16384), data_16384);
    assert_eq!(roundtrip_streaming(&data_16384), data_16384);

    // 64KB input — well within the 18-bit range.
    let data_64k = generate_huffman_friendly(45, 65536, 64);
    assert_eq!(roundtrip_simple(&data_64k), data_64k);

    // 128KB input — MAX_BLOCK_SIZE, the largest single block.
    let data_128k = generate_huffman_friendly(46, 128 * 1024, 64);
    assert_eq!(roundtrip_simple(&data_128k), data_128k);
    assert_eq!(roundtrip_streaming(&data_128k), data_128k);
}

/// Multi-block data larger than MAX_BLOCK_SIZE that exercises the 4-stream
/// Huffman encoding across multiple blocks, each with large literal sections.
#[test]
fn roundtrip_multi_block_large_literals() {
    // 512KB of Huffman-friendly data — will be split into multiple 128KB blocks,
    // each exercising the 18-bit (0b11) size format with 4-stream encoding.
    let data = generate_huffman_friendly(100, 512 * 1024, 48);
    assert_eq!(roundtrip_simple(&data), data);
    assert_eq!(roundtrip_streaming(&data), data);
}

/// Repeat offset encoding: data with many repeated match offsets should compress
/// better than data where every offset is unique, and must roundtrip correctly.
#[test]
fn roundtrip_repeat_offsets() {
    // Break each repeated chunk with a changing 2-byte sentinel so the matcher
    // has to re-emit the same offset instead of collapsing everything into one
    // maximal match.
    let data = repeat_offset_fixture(b"ABCDE12345", 10_000);
    let result = roundtrip_simple(&data);
    assert_eq!(data, result, "Repeat offset roundtrip failed");

    // Also verify with streaming API
    let result = roundtrip_streaming(&data);
    assert_eq!(data, result, "Repeat offset streaming roundtrip failed");
}

/// Verify that highly repetitive data compresses significantly better than random data.
#[test]
fn repetitive_data_compresses_better_than_random() {
    // Repetitive data: fixed-offset matches separated by a changing sentinel.
    let repetitive = repeat_offset_fixture(b"ABCDE12345", 5_000);
    let compressed_repetitive = compress_to_vec(&repetitive[..], CompressionLevel::Fastest);

    // Random data of same size (incompressible)
    let random = generate_data(999, repetitive.len());
    let compressed_random = compress_to_vec(&random[..], CompressionLevel::Fastest);

    // Repetitive data should still beat random data, without pinning an exact
    // ratio that may drift as encoder heuristics evolve.
    assert!(
        compressed_repetitive.len() < compressed_random.len(),
        "Repetitive input should compress better than random input. \
         repetitive={} bytes, random={} bytes",
        compressed_repetitive.len(),
        compressed_random.len()
    );
}

/// Multi-block data exercises FSE table reuse across blocks and offset history
/// persistence across block boundaries.
#[test]
fn roundtrip_multi_block_repeat_offsets() {
    // 512KB of data with fixed-offset repeats broken by a changing sentinel —
    // spans multiple 128KB blocks, so offset history and FSE tables must
    // persist correctly across block boundaries.
    let mut data = repeat_offset_fixture(b"HelloWorld", (512 * 1024) / 12 + 1);
    data.truncate(512 * 1024);

    let result = roundtrip_simple(&data);
    assert_eq!(data, result, "Multi-block repeat offset roundtrip failed");

    let result = roundtrip_streaming(&data);
    assert_eq!(
        data, result,
        "Multi-block repeat offset streaming roundtrip failed"
    );

    let whole_frame = compress_streaming(&data);
    let frame_overhead = compress_to_vec(&[][..], CompressionLevel::Fastest).len();
    let independent_chunks: usize = data
        .chunks(128 * 1024)
        .map(|chunk| {
            compress_to_vec(chunk, CompressionLevel::Fastest)
                .len()
                .saturating_sub(frame_overhead)
        })
        .sum::<usize>()
        .saturating_add(frame_overhead);
    assert!(
        whole_frame.len() < independent_chunks,
        "Cross-block reuse should beat per-block resets. whole={} bytes, split={} bytes",
        whole_frame.len(),
        independent_chunks
    );
}

/// Zero literal length sequences (back-to-back matches with no literals between them)
/// exercise the shifted repeat-offset remap path instead of only generic new offsets.
#[test]
fn roundtrip_zero_literal_length_sequences() {
    // Alternate a base prefix with a one-byte-shifted version so the encoder
    // sees back-to-back zero-literal matches that must use a shifted repeat
    // remap path instead of only generic new offsets.
    let mut data = Vec::with_capacity(10_000);
    // Initial unique segment
    for i in 0..100u8 {
        data.push(i);
    }
    // Repeat the first 50 bytes, then alternate with a shifted 50-byte window.
    let prefix = data[..50].to_vec();
    let shifted_prefix = data[1..51].to_vec();
    data.extend_from_slice(&prefix);
    for _ in 0..100 {
        data.extend_from_slice(&shifted_prefix);
        data.extend_from_slice(&prefix);
    }

    let result = roundtrip_simple(&data);
    assert_eq!(data, result, "Zero ll sequence roundtrip failed");
}

/// Reusing the same `FrameCompressor` across frames must reset per-frame FSE repeat tables.
#[test]
fn roundtrip_reused_frame_compressor_across_frames() {
    let first = generate_huffman_friendly(700, 512 * 1024, 48);
    let second = generate_huffman_friendly(701, 512 * 1024, 48);

    let mut first_compressed = Vec::new();
    let mut second_compressed = Vec::new();
    {
        let mut compressor = FrameCompressor::new(CompressionLevel::Fastest);
        compressor.set_source(first.as_slice());
        compressor.set_drain(&mut first_compressed);
        compressor.compress();

        compressor.set_source(second.as_slice());
        compressor.set_drain(&mut second_compressed);
        compressor.compress();
    }

    let mut decoder = StreamingDecoder::new(first_compressed.as_slice()).unwrap();
    let mut first_roundtrip = Vec::new();
    decoder.read_to_end(&mut first_roundtrip).unwrap();
    assert_eq!(
        first, first_roundtrip,
        "First reused-frame roundtrip failed"
    );

    let mut decoder = StreamingDecoder::new(second_compressed.as_slice()).unwrap();
    let mut second_roundtrip = Vec::new();
    decoder.read_to_end(&mut second_roundtrip).unwrap();
    assert_eq!(
        second, second_roundtrip,
        "Second reused-frame roundtrip failed"
    );
}

#[test]
fn roundtrip_default_level_regression() {
    let data = generate_compressible(777, 64 * 1024);
    assert_eq!(roundtrip_default(&data), data);
}

#[test]
fn roundtrip_default_level_multi_block_regression() {
    let data = generate_compressible(1337, 512 * 1024);
    assert_eq!(roundtrip_default(&data), data);
}

/// Standard roundtrip test suite for a compression level. Generates 7 tests
/// covering compressible, random, multi-block, streaming, edge-case,
/// repeat-offset, and large-literal inputs inside a named module.
macro_rules! level_roundtrip_suite {
    (mod $mod_name:ident, $level:expr, $seed_base:expr) => {
        mod $mod_name {
            use super::*;

            fn rt(data: &[u8]) -> Vec<u8> {
                roundtrip_at_level(data, $level)
            }
            fn rt_stream(data: &[u8]) -> Vec<u8> {
                roundtrip_streaming_at_level(data, $level)
            }

            #[test]
            fn compressible() {
                let data = generate_compressible($seed_base, 64 * 1024);
                assert_eq!(rt(&data), data);
            }
            #[test]
            fn random() {
                let data = generate_data($seed_base + 111, 64 * 1024);
                assert_eq!(rt(&data), data);
            }
            #[test]
            fn multi_block() {
                let data = generate_compressible($seed_base + 222, 512 * 1024);
                assert_eq!(rt(&data), data);
            }
            #[test]
            fn streaming() {
                let data = generate_compressible($seed_base + 333, 64 * 1024);
                assert_eq!(rt_stream(&data), data);
            }
            #[test]
            fn edge_cases() {
                assert_eq!(rt(&[]), Vec::<u8>::new());
                assert_eq!(rt(&[0x42]), vec![0x42]);
                let zeros = vec![0u8; 100_000];
                assert_eq!(rt(&zeros), zeros);
                let ascending: Vec<u8> = (0..=255u8).cycle().take(100_000).collect();
                assert_eq!(rt(&ascending), ascending);
            }
            #[test]
            fn repeat_offsets() {
                let data = repeat_offset_fixture(b"ABCDE12345", 10_000);
                assert_eq!(rt(&data), data);
            }
            #[test]
            fn large_literals() {
                let data = generate_huffman_friendly($seed_base + 444, 128 * 1024, 64);
                assert_eq!(rt(&data), data);
            }
        }
    };
}

level_roundtrip_suite!(mod better_level, CompressionLevel::Better, 888);
level_roundtrip_suite!(mod best_level, CompressionLevel::Best, 1111);

/// Better (lazy2) should compress close to or better than Default (lazy) on
/// structured, compressible data. Lazy2 may be marginally worse on some inputs
/// due to skipping otherwise-adequate matches while looking further ahead.
#[test]
fn better_level_compresses_close_to_default() {
    let data = repeat_offset_fixture(b"HelloWorld", (256 * 1024) / 12 + 1);
    let compressed_default = compress_to_vec(&data[..], CompressionLevel::Default);
    let compressed_better = compress_to_vec(&data[..], CompressionLevel::Better);
    // Allow up to 1% regression; lazy2 optimizes for broader data patterns.
    assert!(
        (compressed_better.len() as u64) * 100 <= (compressed_default.len() as u64) * 101,
        "Better level should stay within 1% of Default. \
         better={} bytes, default={} bytes",
        compressed_better.len(),
        compressed_default.len(),
    );
}

/// Exercise the 8 MiB window: place a repeated pattern beyond Default's
/// 4 MiB window so only Better (8 MiB) can match it.
#[test]
fn roundtrip_better_level_large_window() {
    // Two identical 256 KiB regions separated by a 4.5 MiB compressible gap.
    // The gap uses a different seed so it doesn't share patterns with the
    // regions, but being compressible means hash chains aren't fully
    // destroyed by random noise. Better's 8 MiB window can still reach the
    // first region; Default's 4 MiB window cannot.
    let region = generate_compressible(42, 256 * 1024);
    let gap = generate_compressible(9999, 4 * 1024 * 1024 + 512 * 1024);
    let mut data = Vec::with_capacity(region.len() + gap.len() + region.len());
    data.extend_from_slice(&region);
    data.extend_from_slice(&gap);
    data.extend_from_slice(&region);

    assert_eq!(roundtrip_better(&data), data);

    // Better should compress the duplicated region; Default cannot reach it.
    let compressed_better = compress_to_vec(&data[..], CompressionLevel::Better);
    let compressed_default = compress_to_vec(&data[..], CompressionLevel::Default);
    assert!(
        compressed_better.len() < compressed_default.len(),
        "Better (8 MiB window) should beat Default (4 MiB) across 4.5 MiB gap. \
         better={} default={}",
        compressed_better.len(),
        compressed_default.len(),
    );
}

/// Best must not regress vs Better on this repetitive fixture. Equal
/// output is expected here (HC finds identical matches at any depth);
/// the strict Best < Better check lives in cross_validation.rs on the
/// more diverse decodecorpus sample.
#[test]
fn best_level_does_not_regress_vs_better() {
    let data = repeat_offset_fixture(b"HelloWorld", (256 * 1024) / 12 + 1);
    let compressed_better = compress_to_vec(&data[..], CompressionLevel::Better);
    let compressed_best = compress_to_vec(&data[..], CompressionLevel::Best);
    assert!(
        compressed_best.len() <= compressed_better.len(),
        "Best must not regress vs Better. best={} bytes, better={} bytes",
        compressed_best.len(),
        compressed_better.len(),
    );
}

/// Exercise the 16 MiB window: place a repeated pattern beyond Better's
/// 8 MiB window so only Best (16 MiB) can match it.
#[test]
fn roundtrip_best_level_large_window() {
    // Two identical 256 KiB high-entropy regions separated by a 9 MiB
    // compressible gap. The region is random so the only way to compress
    // the second copy is via long-distance matching (window reach).
    // Best's 16 MiB window can still reach the first region;
    // Better's 8 MiB window cannot.
    let region = generate_data(42, 256 * 1024);
    let gap = generate_compressible(7777, 9 * 1024 * 1024);
    let mut data = Vec::with_capacity(region.len() + gap.len() + region.len());
    data.extend_from_slice(&region);
    data.extend_from_slice(&gap);
    data.extend_from_slice(&region);

    assert_eq!(roundtrip_best(&data), data);

    // Best should compress the duplicated region; Better cannot reach it.
    let compressed_best = compress_to_vec(&data[..], CompressionLevel::Best);
    let compressed_better = compress_to_vec(&data[..], CompressionLevel::Better);
    assert!(
        compressed_best.len() < compressed_better.len(),
        "Best (16 MiB window) should beat Better (8 MiB) across 9 MiB gap. \
         best={} better={}",
        compressed_best.len(),
        compressed_better.len(),
    );
}

/// Best level streaming should produce identical decompressed output.
#[test]
fn roundtrip_best_level_streaming_multi_block() {
    let data = generate_compressible(5555, 512 * 1024);
    assert_eq!(roundtrip_best_streaming(&data), data);
}