#![cfg_attr(feature = "nvcomp-gpu", deny(unsafe_op_in_unsafe_fn))]
#![allow(unsafe_code)]
#![allow(dead_code)]
mod algo;
mod error;
#[cfg(feature = "nvcomp-gpu")]
pub mod nvcomp_sys;
#[cfg(feature = "nvcomp-gpu")]
mod nvcomp;
#[cfg(feature = "nvcomp-gpu")]
mod nvcomp_hlif;
#[cfg(feature = "nvcomp-gpu")]
mod bitcomp_device;
#[cfg(feature = "nvcomp-gpu")]
mod slab_alloc;
pub use algo::{Algo, BitcompDataType, Tier};
pub use error::{Error, Result};
#[cfg(feature = "nvcomp-gpu")]
pub use nvcomp::NvcompCodec;
#[cfg(feature = "nvcomp-gpu")]
pub use nvcomp_hlif::{
BitcompHlifBackend, DEFAULT_HLIF_CHUNK_SIZE, ZstdHlifBackend, cuda_available,
};
#[cfg(feature = "nvcomp-gpu")]
pub use bitcomp_device::BitcompDeviceCodec;
#[cfg(feature = "nvcomp-gpu")]
pub use slab_alloc::{SLAB_MAX_BUCKET_BYTES, SLAB_MIN_BUCKET_BYTES, SlabAllocator};
pub trait Codec: Send + Sync {
fn algo(&self) -> Algo;
fn compress(&self, input: &[u8], output: &mut Vec<u8>) -> Result<()>;
fn decompress(&self, input: &[u8], output: &mut Vec<u8>) -> Result<()>;
fn compress_batch(&self, inputs: &[&[u8]], outputs: &mut [Vec<u8>]) -> Result<()> {
if inputs.len() != outputs.len() {
return Err(Error::BatchLenMismatch {
inputs: inputs.len(),
outputs: outputs.len(),
});
}
for (i, out) in inputs.iter().zip(outputs.iter_mut()) {
self.compress(i, out)?;
}
Ok(())
}
fn decompress_batch(&self, inputs: &[&[u8]], outputs: &mut [Vec<u8>]) -> Result<()> {
if inputs.len() != outputs.len() {
return Err(Error::BatchLenMismatch {
inputs: inputs.len(),
outputs: outputs.len(),
});
}
for (i, out) in inputs.iter().zip(outputs.iter_mut()) {
self.decompress(i, out)?;
}
Ok(())
}
fn max_compressed_len(&self, uncompressed_len: usize) -> usize;
}