blosc-rs 0.1.0

Unsafe Rust bindings for blosc - a blocking, shuffling and lossless compression library
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
#![cfg_attr(deny_warnings, deny(warnings))]
#![cfg_attr(deny_warnings, deny(missing_docs))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]

//! Rust bindings for blosc - a blocking, shuffling and lossless compression library.
//!
//! Provide a safe interface to the [blosc](https://github.com/Blosc/c-blosc) library.
//! The crate has zero dependencies.
//!
//! # Getting Started
//!
//! To use this library, add the following to your `Cargo.toml`:
//! ```toml
//! [dependencies]
//! blosc-rs = "0.1"
//!
//! # Or alternatively, rename the crate to `blosc`
//! blosc = { package = "blosc-rs", version = "0.1" }
//! ```
//!
//! In the following example we compress a vector of integers and then decompress it back:
//! ```rust
//! use blosc_rs::{CLevel, CompressAlgo, Shuffle, compress, decompress};
//!
//! let data: [i32; 7] = [1, 2, 3, 4, 5, 6, 7];
//!
//! let data_bytes = unsafe {
//!     std::slice::from_raw_parts(
//!         data.as_ptr() as *const u8,
//!         data.len() * std::mem::size_of::<i32>(),
//!     )
//! };
//! let numinternalthreads = 4;
//! let compressed = compress(
//!     CLevel::L5,
//!     Shuffle::Byte,
//!     std::mem::size_of::<i32>(), // itemsize
//!     data_bytes,
//!     &CompressAlgo::Blosclz,
//!     None, // automatic block size
//!     numinternalthreads,
//! )
//! .unwrap();
//!
//! let decompressed = decompress(&compressed, numinternalthreads).unwrap();
//! // SAFETY: we know the data is of type i32
//! let decompressed: &[i32] = unsafe {
//!     std::slice::from_raw_parts(
//!         decompressed.as_ptr() as *const i32,
//!         decompressed.len() / std::mem::size_of::<i32>(),
//!     )
//! };
//!
//! assert_eq!(data, *decompressed);
//! ```

use std::ffi::{CStr, CString};
use std::mem::MaybeUninit;
use std::num::NonZeroUsize;

/// The version of the underlying C-blosc library used by this crate.
pub const BLOSC_C_VERSION: &str = {
    let version = match CStr::from_bytes_until_nul(blosc_rs_sys::BLOSC_VERSION_STRING) {
        Ok(v) => v,
        Err(_) => unreachable!(),
    };
    match version.to_str() {
        Ok(s) => s,
        Err(_) => unreachable!(),
    }
};

/// Compress a block of data in the `src` buffer and returns the compressed data.
///
/// Note that this function allocates a new `Vec<u8>` for the compressed data with the maximum possible size required
/// for it (uncompressed size + 16), which may be larger than whats actually needed. If this function is used in a
/// critical performance path, consider using `compress_into` instead, allowing you to provide a pre-allocated
/// buffer which can be used repeatedly without the overhead of allocations.
///
/// # Arguments
///
/// * `clevel`: The desired compression level.
/// * `shuffle`: Specifies which (if any) shuffle compression filters should be applied.
/// * `typesize`: The number of bytes for the atomic type in the binary `src` buffer. This is mainly useful for the
///   shuffle filters. For implementation reasons, only a `typesize` in the range 1 < `typesize` < 256 will allow the
///   shuffle filter to work. When `typesize` is not in this range, shuffle will be silently disabled.
/// * `src`: The source data to compress.
/// * `compressor`: The compression algorithm to use.
/// * `blocksize`: Optional block size for compression. If `None`, an automatic block size will be used.
/// * `numinternalthreads`: The number of threads to use internally.
///
/// # Returns
///
/// A `Result` containing the compressed data as a `Vec<u8>`, or a `CompressError` if an error occurs.
pub fn compress(
    clevel: CLevel,
    shuffle: Shuffle,
    typesize: usize,
    src: &[u8],
    compressor: &CompressAlgo,
    blocksize: Option<NonZeroUsize>,
    numinternalthreads: u32,
) -> Result<Vec<u8>, CompressError> {
    let dst_max_len = src.len() + blosc_rs_sys::BLOSC_MAX_OVERHEAD as usize;
    let mut dst = Vec::<MaybeUninit<u8>>::with_capacity(dst_max_len);
    unsafe { dst.set_len(dst_max_len) };

    let len = compress_into(
        clevel,
        shuffle,
        typesize,
        src,
        dst.as_mut_slice(),
        compressor,
        blocksize,
        numinternalthreads,
    )?;
    assert!(len <= dst_max_len);
    unsafe { dst.set_len(len) };
    // SAFETY: every element from 0 to len was initialized
    let vec = unsafe { std::mem::transmute::<Vec<MaybeUninit<u8>>, Vec<u8>>(dst) };
    Ok(vec)
}

/// Compress a block of data in the `src` buffer into the `dst` buffer.
///
/// # Arguments
///
/// * `clevel`: The desired compression level.
/// * `shuffle`: Specifies which (if any) shuffle compression filters should be applied.
/// * `typesize`: The number of bytes for the atomic type in the binary `src` buffer. This is mainly useful for the
///   shuffle filters. For implementation reasons, only a `typesize` in the range 1 < `typesize` < 256 will allow the
///   shuffle filter to work. When `typesize` is not in this range, shuffle will be silently disabled.
/// * `src`: The source data to compress.
/// * `dst`: The destination buffer where the compressed data will be written.
/// * `compressor`: The compression algorithm to use.
/// * `blocksize`: Optional block size for compression. If `None`, an automatic block size will be used.
/// * `numinternalthreads`: The number of threads to use internally.
///
/// # Returns
///
/// A `Result` containing the number of bytes written to the `dst` buffer, or a `CompressError` if an error occurs.
#[allow(clippy::too_many_arguments)]
pub fn compress_into(
    clevel: CLevel,
    shuffle: Shuffle,
    typesize: usize,
    src: &[u8],
    dst: &mut [MaybeUninit<u8>],
    compressor: &CompressAlgo,
    blocksize: Option<NonZeroUsize>,
    numinternalthreads: u32,
) -> Result<usize, CompressError> {
    let status = unsafe {
        blosc_rs_sys::blosc_compress_ctx(
            clevel as i32 as std::ffi::c_int,
            shuffle as u32 as std::ffi::c_int,
            typesize,
            src.len(),
            src.as_ptr() as *const std::ffi::c_void,
            dst.as_mut_ptr() as *mut std::ffi::c_void,
            dst.len(),
            compressor.as_ref().as_ptr(),
            blocksize.map(|b| b.get()).unwrap_or(0),
            numinternalthreads as std::ffi::c_int,
        )
    };
    match status {
        len if len > 0 => {
            assert!(len as usize <= dst.len());
            Ok(len as usize)
        }
        0 => Err(CompressError::DestinationBufferTooSmall),
        _ => {
            debug_assert!(status < 0);
            Err(CompressError::InternalError(status))
        }
    }
}

/// Error that can occur during compression.
#[derive(Debug)]
pub enum CompressError {
    /// Error indicating that the destination buffer is too small to hold the compressed data.
    DestinationBufferTooSmall,
    /// blosc internal error.
    InternalError(i32),
}
impl std::fmt::Display for CompressError {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        match self {
            CompressError::DestinationBufferTooSmall => {
                f.write_str("destination buffer is too small")
            }
            CompressError::InternalError(status) => write!(f, "blosc internal error: {status}"),
        }
    }
}
impl std::error::Error for CompressError {}

/// Represents the compression levels used by Blosc.
///
/// The levels range from 0 to 9, where 0 is no compression and 9 is maximum compression.
#[allow(missing_docs)]
#[repr(i32)]
pub enum CLevel {
    L0 = 0,
    L1 = 1,
    L2 = 2,
    L3 = 3,
    L4 = 4,
    L5 = 5,
    L6 = 6,
    L7 = 7,
    L8 = 8,
    L9 = 9,
}
impl TryFrom<i32> for CLevel {
    type Error = ();

    fn try_from(value: i32) -> Result<Self, Self::Error> {
        match value {
            0 => Ok(CLevel::L0),
            1 => Ok(CLevel::L1),
            2 => Ok(CLevel::L2),
            3 => Ok(CLevel::L3),
            4 => Ok(CLevel::L4),
            5 => Ok(CLevel::L5),
            6 => Ok(CLevel::L6),
            7 => Ok(CLevel::L7),
            8 => Ok(CLevel::L8),
            9 => Ok(CLevel::L9),
            _ => Err(()),
        }
    }
}

/// Represents the shuffle filters used by Blosc.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u32)]
pub enum Shuffle {
    /// no shuffle
    None = blosc_rs_sys::BLOSC_NOSHUFFLE,
    /// byte-wise shuffle
    Byte = blosc_rs_sys::BLOSC_SHUFFLE,
    /// bit-wise shuffle
    Bit = blosc_rs_sys::BLOSC_BITSHUFFLE,
}

/// Represents the compression algorithms supported by Blosc.
#[derive(Debug, Clone, PartialEq, Eq)]
#[allow(missing_docs)]
pub enum CompressAlgo {
    Blosclz,
    Lz4,
    Lz4hc,
    // Snappy,
    Zlib,
    Zstd,
    Other(CString),
}
impl AsRef<CStr> for CompressAlgo {
    fn as_ref(&self) -> &CStr {
        match self {
            CompressAlgo::Blosclz => c"blosclz",
            CompressAlgo::Lz4 => c"lz4",
            CompressAlgo::Lz4hc => c"lz4hc",
            // CompressAlgo::Snappy => c"snappy",
            CompressAlgo::Zlib => c"zlib",
            CompressAlgo::Zstd => c"zstd",
            CompressAlgo::Other(c) => c.as_ref(),
        }
    }
}

/// Decompress a block of compressed data in `src` and returns the decompressed data.
///
/// # Arguments
///
/// * `src`: The compressed data to decompress.
/// * `numinternalthreads`: The number of threads to use internally.
///
/// # Returns
///
/// A `Result` containing the decompressed data as a `Vec<u8>`, or a `DecompressError` if an error occurs.
pub fn decompress(src: &[u8], numinternalthreads: u32) -> Result<Vec<u8>, DecompressError> {
    let dst_len = validate_compressed_slice_and_get_uncompressed_len(src)
        .ok_or(DecompressError::DecompressingError)?;
    let mut dst = Vec::<MaybeUninit<u8>>::with_capacity(dst_len);
    unsafe { dst.set_len(dst_len) };

    let len = unsafe { decompress_into_unchecked(src, dst.as_mut_slice(), numinternalthreads)? };
    assert!(len <= dst_len);
    unsafe { dst.set_len(len) };
    // SAFETY: every element from 0 to len was initialized
    let vec = unsafe { std::mem::transmute::<Vec<MaybeUninit<u8>>, Vec<u8>>(dst) };
    Ok(vec)
}

/// Decompress a block of compressed data in `src` into the `dst` buffer.
///
/// # Arguments
///
/// * `src`: The compressed data to decompress.
/// * `dst`: The destination buffer where the decompressed data will be written.
/// * `numinternalthreads`: The number of threads to use internally.
///
/// # Returns
///
/// A `Result` containing the number of bytes written to the `dst` buffer, or a `DecompressError` if an error occurs.
pub fn decompress_into(
    src: &[u8],
    dst: &mut [MaybeUninit<u8>],
    numinternalthreads: u32,
) -> Result<usize, DecompressError> {
    let dst_len = validate_compressed_slice_and_get_uncompressed_len(src)
        .ok_or(DecompressError::DecompressingError)?;
    if dst.len() < dst_len {
        return Err(DecompressError::DestinationBufferTooSmall);
    }
    let len = unsafe { decompress_into_unchecked(src, dst, numinternalthreads)? };
    assert!(len <= dst_len);
    Ok(len)
}

unsafe fn decompress_into_unchecked(
    src: &[u8],
    dst: &mut [MaybeUninit<u8>],
    numinternalthreads: u32,
) -> Result<usize, DecompressError> {
    let status = unsafe {
        blosc_rs_sys::blosc_decompress_ctx(
            src.as_ptr() as *const std::ffi::c_void,
            dst.as_mut_ptr() as *mut std::ffi::c_void,
            dst.len(),
            numinternalthreads as std::ffi::c_int,
        )
    };
    match status {
        len if len >= 0 => Ok(len as usize),
        _ => Err(DecompressError::InternalError(status)),
    }
}

/// Error that can occur during decompression.
#[derive(Debug)]
pub enum DecompressError {
    /// Error indicating that the destination buffer is too small to hold the decompressed data.
    DestinationBufferTooSmall,
    /// Error indicating that the data could not be decompressed.
    DecompressingError,
    /// blosc internal error.
    InternalError(i32),
}
impl std::fmt::Display for DecompressError {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        match self {
            DecompressError::DestinationBufferTooSmall => {
                f.write_str("destination buffer is too small")
            }
            DecompressError::DecompressingError => f.write_str("failed to decompress the data"),
            DecompressError::InternalError(status) => write!(f, "blosc internal error: {status}"),
        }
    }
}
impl std::error::Error for DecompressError {}

fn validate_compressed_slice_and_get_uncompressed_len(src: &[u8]) -> Option<usize> {
    let mut dst_len = 0;
    let status = unsafe {
        blosc_rs_sys::blosc_cbuffer_validate(
            src.as_ptr() as *const std::ffi::c_void,
            src.len(),
            &mut dst_len,
        )
    };
    if status < 0 {
        None
    } else {
        Some(dst_len)
    }
}

#[cfg(test)]
mod tests {
    use std::num::NonZeroUsize;

    use rand::rngs::StdRng;
    use rand::{Rng, SeedableRng};

    use crate::{CLevel, CompressAlgo, Shuffle};

    #[test]
    fn round_trip() {
        let mut rand = StdRng::seed_from_u64(0xb1ba0c326dc4dbba);

        for _ in 0..100 {
            let src_len = {
                let max_lens = [0x1, 0x10, 0x100, 0x1000, 0x10000, 0x100000];
                let max_len = max_lens[rand.random_range(0..max_lens.len())];
                rand.random_range(0..=max_len)
            };
            let src = (0..rand.random_range(0..=src_len))
                .map(|_| rand.random_range(0..=255) as u8)
                .collect::<Vec<u8>>();
            let clevel: CLevel = rand.random_range(0..=9).try_into().unwrap();
            let shuffle = {
                let shuffles = [Shuffle::None, Shuffle::Byte, Shuffle::Bit];
                shuffles[rand.random_range(0..shuffles.len())]
            };
            let typesize = (1..=8)
                .map(|i| rand.random_range(1..=(1 << (8 - i))))
                .find(|&ts| src.len() % ts == 0)
                .unwrap();
            let compressor = {
                let compressors = [
                    CompressAlgo::Blosclz,
                    CompressAlgo::Lz4,
                    CompressAlgo::Lz4hc,
                    // CompressAlgo::Snappy,
                    CompressAlgo::Zlib,
                    CompressAlgo::Zstd,
                ];
                compressors[rand.random_range(0..compressors.len())].clone()
            };
            let blocksize = {
                let blocksizes = [
                    Option::<NonZeroUsize>::None,
                    Some(1.try_into().unwrap()),
                    Some(64.try_into().unwrap()),
                    Some(4096.try_into().unwrap()),
                    Some(262144.try_into().unwrap()),
                    Some(rand.random_range(1..4096).try_into().unwrap()),
                ];
                blocksizes[rand.random_range(0..blocksizes.len())]
            };
            let numinternalthreads = rand.random_range(1..=16);

            let compressed = crate::compress(
                clevel,
                shuffle,
                typesize,
                &src,
                &compressor,
                blocksize,
                numinternalthreads,
            )
            .unwrap();

            let decompressed = crate::decompress(&compressed, numinternalthreads).unwrap();

            assert_eq!(src, decompressed);
        }
    }
}