Skip to main content

hermes_core/compression/
zstd.rs

1//! Zstd compression backend with dictionary support
2//!
3////! For static indexes, we use:
4//! - Maximum compression level (22) for best compression ratio
5//! - Trained dictionaries for even better compression of similar documents
6//! - Larger block sizes to improve compression efficiency
7
8use std::io::{self, Write};
9
10/// Compression level (1-22 for zstd)
11#[derive(Debug, Clone, Copy)]
12pub struct CompressionLevel(pub i32);
13
14impl CompressionLevel {
15    /// Fast compression (level 1)
16    pub const FAST: Self = Self(1);
17    /// Default compression (level 3)
18    pub const DEFAULT: Self = Self(3);
19    /// Better compression (level 9)
20    pub const BETTER: Self = Self(9);
21    /// Best compression (level 19)
22    pub const BEST: Self = Self(19);
23    /// Maximum compression (level 22) - slowest but smallest
24    pub const MAX: Self = Self(22);
25}
26
27impl Default for CompressionLevel {
28    fn default() -> Self {
29        Self::MAX // Use maximum compression for static indexes
30    }
31}
32
33/// Trained Zstd dictionary for improved compression
34#[derive(Clone)]
35pub struct CompressionDict {
36    raw_dict: Vec<u8>,
37}
38
39impl CompressionDict {
40    /// Train a dictionary from sample data
41    ///
42    /// For best results, provide many small samples (e.g., serialized documents)
43    /// The dictionary size should typically be 16KB-112KB
44    pub fn train(samples: &[&[u8]], dict_size: usize) -> io::Result<Self> {
45        let raw_dict = zstd::dict::from_samples(samples, dict_size).map_err(io::Error::other)?;
46        Ok(Self { raw_dict })
47    }
48
49    /// Create dictionary from raw bytes (for loading saved dictionaries)
50    pub fn from_bytes(bytes: Vec<u8>) -> Self {
51        Self { raw_dict: bytes }
52    }
53
54    /// Get raw dictionary bytes (for saving)
55    pub fn as_bytes(&self) -> &[u8] {
56        &self.raw_dict
57    }
58
59    /// Dictionary size in bytes
60    pub fn len(&self) -> usize {
61        self.raw_dict.len()
62    }
63
64    /// Check if dictionary is empty
65    pub fn is_empty(&self) -> bool {
66        self.raw_dict.is_empty()
67    }
68}
69
70/// Compress data using Zstd
71pub fn compress(data: &[u8], level: CompressionLevel) -> io::Result<Vec<u8>> {
72    zstd::encode_all(data, level.0).map_err(io::Error::other)
73}
74
75/// Compress data using Zstd with a trained dictionary
76pub fn compress_with_dict(
77    data: &[u8],
78    level: CompressionLevel,
79    dict: &CompressionDict,
80) -> io::Result<Vec<u8>> {
81    let mut encoder = zstd::Encoder::with_dictionary(Vec::new(), level.0, &dict.raw_dict)
82        .map_err(io::Error::other)?;
83    encoder.write_all(data)?;
84    encoder.finish().map_err(io::Error::other)
85}
86
87/// Decompress data using Zstd
88///
89/// Uses the bulk (single-shot) API which reads the content size from
90/// the frame header and allocates the exact output buffer upfront,
91/// avoiding the repeated reallocations of the streaming API.
92pub fn decompress(data: &[u8]) -> io::Result<Vec<u8>> {
93    // 512KB upper bound covers store blocks (256KB uncompressed).
94    // The bulk API reads the frame's content-size field if available
95    // (zstd always writes it) and allocates exactly, falling back to
96    // this capacity as a hard limit.
97    zstd::bulk::decompress(data, 512 * 1024).map_err(io::Error::other)
98}
99
100/// Decompress data using Zstd with a trained dictionary
101///
102/// Uses the bulk API with pre-allocated output to avoid repeated
103/// reallocations of the streaming `read_to_end` approach.
104pub fn decompress_with_dict(data: &[u8], dict: &CompressionDict) -> io::Result<Vec<u8>> {
105    let mut decompressor =
106        zstd::bulk::Decompressor::with_dictionary(&dict.raw_dict).map_err(io::Error::other)?;
107    decompressor
108        .decompress(data, 512 * 1024)
109        .map_err(io::Error::other)
110}
111
112#[cfg(test)]
113mod tests {
114    use super::*;
115
116    #[test]
117    fn test_roundtrip() {
118        let data = b"Hello, World! This is a test of compression.".repeat(100);
119        let compressed = compress(&data, CompressionLevel::default()).unwrap();
120        let decompressed = decompress(&compressed).unwrap();
121        assert_eq!(data, decompressed.as_slice());
122        assert!(compressed.len() < data.len());
123    }
124
125    #[test]
126    fn test_empty_data() {
127        let data: &[u8] = &[];
128        let compressed = compress(data, CompressionLevel::default()).unwrap();
129        let decompressed = decompress(&compressed).unwrap();
130        assert!(decompressed.is_empty());
131    }
132
133    #[test]
134    fn test_compression_levels() {
135        let data = b"Test data for compression levels".repeat(100);
136        for level in [1, 3, 9, 19] {
137            let compressed = compress(&data, CompressionLevel(level)).unwrap();
138            let decompressed = decompress(&compressed).unwrap();
139            assert_eq!(data.as_slice(), decompressed.as_slice());
140        }
141    }
142}