Skip to main content

hermes_core/compression/
zstd.rs

1//! Zstd compression backend with dictionary support
2//!
3//! For static indexes, we use:
4//! - Maximum compression level (22) for best compression ratio
5//! - Trained dictionaries for even better compression of similar documents
6//! - Larger block sizes to improve compression efficiency
7
8use std::io::{self, Write};
9
10/// Compression level (1-22 for zstd)
11#[derive(Debug, Clone, Copy)]
12pub struct CompressionLevel(pub i32);
13
14impl CompressionLevel {
15    /// Fast compression (level 1)
16    pub const FAST: Self = Self(1);
17    /// Default compression (level 3)
18    pub const DEFAULT: Self = Self(3);
19    /// Better compression (level 9)
20    pub const BETTER: Self = Self(9);
21    /// Best compression (level 19)
22    pub const BEST: Self = Self(19);
23    /// Maximum compression (level 22) - slowest but smallest
24    pub const MAX: Self = Self(22);
25}
26
27impl Default for CompressionLevel {
28    fn default() -> Self {
29        Self::MAX // Use maximum compression for static indexes
30    }
31}
32
33/// Trained Zstd dictionary for improved compression
34#[derive(Clone)]
35pub struct CompressionDict {
36    raw_dict: crate::directories::OwnedBytes,
37}
38
39impl CompressionDict {
40    /// Train a dictionary from sample data
41    ///
42    /// For best results, provide many small samples (e.g., serialized documents)
43    /// The dictionary size should typically be 16KB-112KB
44    pub fn train(samples: &[&[u8]], dict_size: usize) -> io::Result<Self> {
45        let raw_dict = zstd::dict::from_samples(samples, dict_size).map_err(io::Error::other)?;
46        Ok(Self {
47            raw_dict: crate::directories::OwnedBytes::new(raw_dict),
48        })
49    }
50
51    /// Create dictionary from raw bytes (for loading saved dictionaries)
52    pub fn from_bytes(bytes: Vec<u8>) -> Self {
53        Self {
54            raw_dict: crate::directories::OwnedBytes::new(bytes),
55        }
56    }
57
58    /// Create dictionary from OwnedBytes (zero-copy for mmap)
59    pub fn from_owned_bytes(bytes: crate::directories::OwnedBytes) -> Self {
60        Self { raw_dict: bytes }
61    }
62
63    /// Get raw dictionary bytes (for saving)
64    pub fn as_bytes(&self) -> &[u8] {
65        self.raw_dict.as_slice()
66    }
67
68    /// Dictionary size in bytes
69    pub fn len(&self) -> usize {
70        self.raw_dict.len()
71    }
72
73    /// Check if dictionary is empty
74    pub fn is_empty(&self) -> bool {
75        self.raw_dict.is_empty()
76    }
77}
78
79/// Compress data using Zstd
80pub fn compress(data: &[u8], level: CompressionLevel) -> io::Result<Vec<u8>> {
81    zstd::encode_all(data, level.0).map_err(io::Error::other)
82}
83
84/// Compress data using Zstd with a trained dictionary
85pub fn compress_with_dict(
86    data: &[u8],
87    level: CompressionLevel,
88    dict: &CompressionDict,
89) -> io::Result<Vec<u8>> {
90    let mut encoder = zstd::Encoder::with_dictionary(Vec::new(), level.0, dict.raw_dict.as_slice())
91        .map_err(io::Error::other)?;
92    encoder.write_all(data)?;
93    encoder.finish().map_err(io::Error::other)
94}
95
96/// Upper bound for decompressed output (512KB covers 256KB store blocks).
97const DECOMPRESS_CAPACITY: usize = 512 * 1024;
98
99/// Decompress data using Zstd
100///
101/// Reuses a thread-local `Decompressor` to avoid re-initializing the
102/// zstd context on every call. The bulk API reads the content-size
103/// field from the frame header and allocates the exact output buffer.
104pub fn decompress(data: &[u8]) -> io::Result<Vec<u8>> {
105    thread_local! {
106        static DECOMPRESSOR: std::cell::RefCell<zstd::bulk::Decompressor<'static>> =
107            std::cell::RefCell::new(zstd::bulk::Decompressor::new().unwrap());
108    }
109    DECOMPRESSOR.with(|dc| {
110        dc.borrow_mut()
111            .decompress(data, DECOMPRESS_CAPACITY)
112            .map_err(io::Error::other)
113    })
114}
115
116/// Decompress data using Zstd with a trained dictionary
117///
118/// Caches the dictionary decompressor in a thread-local, keyed by the
119/// dictionary's data pointer. Since a given `AsyncStoreReader` always holds
120/// the same `CompressionDict` (behind `Arc<OwnedBytes>`), the pointer is
121/// stable for the reader's lifetime. The decompressor is only rebuilt when
122/// a different dictionary is encountered (e.g., switching between segments).
123pub fn decompress_with_dict(data: &[u8], dict: &CompressionDict) -> io::Result<Vec<u8>> {
124    thread_local! {
125        static DICT_DC: std::cell::RefCell<Option<(usize, zstd::bulk::Decompressor<'static>)>> =
126            const { std::cell::RefCell::new(None) };
127    }
128    // Use the raw dict slice pointer as a stable identity key.
129    let dict_key = dict.as_bytes().as_ptr() as usize;
130
131    DICT_DC.with(|cell| {
132        let mut slot = cell.borrow_mut();
133        // Rebuild decompressor only if dict changed
134        if slot.as_ref().is_none_or(|(k, _)| *k != dict_key) {
135            let dc = zstd::bulk::Decompressor::with_dictionary(dict.as_bytes())
136                .map_err(io::Error::other)?;
137            *slot = Some((dict_key, dc));
138        }
139        slot.as_mut()
140            .unwrap()
141            .1
142            .decompress(data, DECOMPRESS_CAPACITY)
143            .map_err(io::Error::other)
144    })
145}
146
147#[cfg(test)]
148mod tests {
149    use super::*;
150
151    #[test]
152    fn test_roundtrip() {
153        let data = b"Hello, World! This is a test of compression.".repeat(100);
154        let compressed = compress(&data, CompressionLevel::default()).unwrap();
155        let decompressed = decompress(&compressed).unwrap();
156        assert_eq!(data, decompressed.as_slice());
157        assert!(compressed.len() < data.len());
158    }
159
160    #[test]
161    fn test_empty_data() {
162        let data: &[u8] = &[];
163        let compressed = compress(data, CompressionLevel::default()).unwrap();
164        let decompressed = decompress(&compressed).unwrap();
165        assert!(decompressed.is_empty());
166    }
167
168    #[test]
169    fn test_compression_levels() {
170        let data = b"Test data for compression levels".repeat(100);
171        for level in [1, 3, 9, 19] {
172            let compressed = compress(&data, CompressionLevel(level)).unwrap();
173            let decompressed = decompress(&compressed).unwrap();
174            assert_eq!(data.as_slice(), decompressed.as_slice());
175        }
176    }
177}