Skip to main content

hermes_core/compression/
zstd.rs

1//! Zstd compression backend with dictionary support
2//!
3////! For static indexes, we use:
4//! - Maximum compression level (22) for best compression ratio
5//! - Trained dictionaries for even better compression of similar documents
6//! - Larger block sizes to improve compression efficiency
7
8use std::io::{self, Write};
9
10/// Compression level (1-22 for zstd)
11#[derive(Debug, Clone, Copy)]
12pub struct CompressionLevel(pub i32);
13
14impl CompressionLevel {
15    /// Fast compression (level 1)
16    pub const FAST: Self = Self(1);
17    /// Default compression (level 3)
18    pub const DEFAULT: Self = Self(3);
19    /// Better compression (level 9)
20    pub const BETTER: Self = Self(9);
21    /// Best compression (level 19)
22    pub const BEST: Self = Self(19);
23    /// Maximum compression (level 22) - slowest but smallest
24    pub const MAX: Self = Self(22);
25}
26
27impl Default for CompressionLevel {
28    fn default() -> Self {
29        Self::MAX // Use maximum compression for static indexes
30    }
31}
32
33/// Trained Zstd dictionary for improved compression
34#[derive(Clone)]
35pub struct CompressionDict {
36    raw_dict: crate::directories::OwnedBytes,
37}
38
39impl CompressionDict {
40    /// Train a dictionary from sample data
41    ///
42    /// For best results, provide many small samples (e.g., serialized documents)
43    /// The dictionary size should typically be 16KB-112KB
44    pub fn train(samples: &[&[u8]], dict_size: usize) -> io::Result<Self> {
45        let raw_dict = zstd::dict::from_samples(samples, dict_size).map_err(io::Error::other)?;
46        Ok(Self {
47            raw_dict: crate::directories::OwnedBytes::new(raw_dict),
48        })
49    }
50
51    /// Create dictionary from raw bytes (for loading saved dictionaries)
52    pub fn from_bytes(bytes: Vec<u8>) -> Self {
53        Self {
54            raw_dict: crate::directories::OwnedBytes::new(bytes),
55        }
56    }
57
58    /// Create dictionary from OwnedBytes (zero-copy for mmap)
59    pub fn from_owned_bytes(bytes: crate::directories::OwnedBytes) -> Self {
60        Self { raw_dict: bytes }
61    }
62
63    /// Get raw dictionary bytes (for saving)
64    pub fn as_bytes(&self) -> &[u8] {
65        self.raw_dict.as_slice()
66    }
67
68    /// Dictionary size in bytes
69    pub fn len(&self) -> usize {
70        self.raw_dict.len()
71    }
72
73    /// Check if dictionary is empty
74    pub fn is_empty(&self) -> bool {
75        self.raw_dict.is_empty()
76    }
77}
78
79/// Compress data using Zstd
80pub fn compress(data: &[u8], level: CompressionLevel) -> io::Result<Vec<u8>> {
81    zstd::encode_all(data, level.0).map_err(io::Error::other)
82}
83
84/// Compress data using Zstd with a trained dictionary
85pub fn compress_with_dict(
86    data: &[u8],
87    level: CompressionLevel,
88    dict: &CompressionDict,
89) -> io::Result<Vec<u8>> {
90    let mut encoder = zstd::Encoder::with_dictionary(Vec::new(), level.0, dict.raw_dict.as_slice())
91        .map_err(io::Error::other)?;
92    encoder.write_all(data)?;
93    encoder.finish().map_err(io::Error::other)
94}
95
96/// Upper bound for decompressed output (512KB covers 256KB store blocks).
97const DECOMPRESS_CAPACITY: usize = 512 * 1024;
98
99/// Decompress data using Zstd
100///
101/// Reuses a thread-local `Decompressor` to avoid re-initializing the
102/// zstd context on every call. The bulk API reads the content-size
103/// field from the frame header and allocates the exact output buffer.
104pub fn decompress(data: &[u8]) -> io::Result<Vec<u8>> {
105    thread_local! {
106        static DECOMPRESSOR: std::cell::RefCell<zstd::bulk::Decompressor<'static>> =
107            std::cell::RefCell::new(zstd::bulk::Decompressor::new().unwrap());
108    }
109    DECOMPRESSOR.with(|dc| {
110        dc.borrow_mut()
111            .decompress(data, DECOMPRESS_CAPACITY)
112            .map_err(io::Error::other)
113    })
114}
115
116/// Decompress data using Zstd with a trained dictionary
117///
118/// Note: dictionary decompressors are NOT reused via thread-local because
119/// each store/sstable may use a different dictionary. The caller (block
120/// cache) ensures this is called only on cache misses.
121pub fn decompress_with_dict(data: &[u8], dict: &CompressionDict) -> io::Result<Vec<u8>> {
122    let mut decompressor = zstd::bulk::Decompressor::with_dictionary(dict.raw_dict.as_slice())
123        .map_err(io::Error::other)?;
124    decompressor
125        .decompress(data, DECOMPRESS_CAPACITY)
126        .map_err(io::Error::other)
127}
128
129#[cfg(test)]
130mod tests {
131    use super::*;
132
133    #[test]
134    fn test_roundtrip() {
135        let data = b"Hello, World! This is a test of compression.".repeat(100);
136        let compressed = compress(&data, CompressionLevel::default()).unwrap();
137        let decompressed = decompress(&compressed).unwrap();
138        assert_eq!(data, decompressed.as_slice());
139        assert!(compressed.len() < data.len());
140    }
141
142    #[test]
143    fn test_empty_data() {
144        let data: &[u8] = &[];
145        let compressed = compress(data, CompressionLevel::default()).unwrap();
146        let decompressed = decompress(&compressed).unwrap();
147        assert!(decompressed.is_empty());
148    }
149
150    #[test]
151    fn test_compression_levels() {
152        let data = b"Test data for compression levels".repeat(100);
153        for level in [1, 3, 9, 19] {
154            let compressed = compress(&data, CompressionLevel(level)).unwrap();
155            let decompressed = decompress(&compressed).unwrap();
156            assert_eq!(data.as_slice(), decompressed.as_slice());
157        }
158    }
159}