pub(crate) mod binary_index;
pub(crate) mod decoder;
mod encoder;
pub mod hash_index;
mod header;
mod offset;
mod trailer;
mod r#type;
pub(crate) use decoder::{Decodable, Decoder, ParsedItem};
pub(crate) use encoder::{Encodable, Encoder};
pub use header::Header;
pub use offset::BlockOffset;
pub(crate) use trailer::{TRAILER_START_MARKER, Trailer};
pub use r#type::BlockType;
#[cfg(zstd_any)]
use crate::compression::CompressionProvider as _;
use crate::{
Checksum, CompressionType, Slice,
coding::{Decode, Encode},
encryption::EncryptionProvider,
fs::FsFile,
table::BlockHandle,
};
const MAX_DECOMPRESSION_SIZE: u32 = 256 * 1024 * 1024;
#[derive(Clone)]
pub struct Block {
pub header: Header,
pub data: Slice,
}
impl Block {
#[must_use]
pub fn size(&self) -> usize {
self.data.len()
}
pub fn write_into<W: std::io::Write>(
mut writer: &mut W,
data: &[u8],
block_type: BlockType,
compression: CompressionType,
encryption: Option<&dyn EncryptionProvider>,
#[cfg(zstd_any)] zstd_dict: Option<&crate::compression::ZstdDictionary>,
) -> crate::Result<Header> {
if data.len() > MAX_DECOMPRESSION_SIZE as usize {
return Err(crate::Error::DecompressedSizeTooLarge {
declared: data.len() as u64,
limit: u64::from(MAX_DECOMPRESSION_SIZE),
});
}
let mut header = Header {
block_type,
checksum: Checksum::from_raw(0), data_length: 0,
#[expect(clippy::cast_possible_truncation, reason = "blocks are limited to u32")]
uncompressed_length: data.len() as u32,
};
#[cfg(any(feature = "lz4", zstd_any))]
let mut compressed_buf: Option<Vec<u8>> = None;
match compression {
CompressionType::None => {}
#[cfg(feature = "lz4")]
CompressionType::Lz4 => {
compressed_buf = Some(lz4_flex::compress(data));
}
#[cfg(zstd_any)]
CompressionType::Zstd(level) => {
compressed_buf = Some(crate::compression::ZstdBackend::compress(data, level)?);
}
#[cfg(zstd_any)]
CompressionType::ZstdDict { level, dict_id } => {
let dict = zstd_dict.ok_or(crate::Error::ZstdDictMismatch {
expected: dict_id,
got: None,
})?;
if dict.id() != dict_id {
return Err(crate::Error::ZstdDictMismatch {
expected: dict_id,
got: Some(dict.id()),
});
}
compressed_buf = Some(crate::compression::ZstdBackend::compress_with_dict(
data,
level,
dict.raw(),
)?);
}
}
let encrypted_buf: Option<Vec<u8>>;
#[cfg(any(feature = "lz4", zstd_any))]
{
encrypted_buf = if let Some(enc) = encryption {
Some(match compressed_buf.take() {
Some(owned) => enc.encrypt_vec(owned)?,
None => enc.encrypt(data)?,
})
} else {
None
};
}
#[cfg(not(any(feature = "lz4", zstd_any)))]
{
encrypted_buf = encryption.map(|enc| enc.encrypt(data)).transpose()?;
}
let payload: &[u8] = if let Some(ref enc) = encrypted_buf {
enc
} else {
#[cfg(any(feature = "lz4", zstd_any))]
{
compressed_buf.as_deref().unwrap_or(data)
}
#[cfg(not(any(feature = "lz4", zstd_any)))]
{
data
}
};
let max_payload = (u64::from(MAX_DECOMPRESSION_SIZE)
+ encryption.map_or(0u64, |enc| u64::from(enc.max_overhead())))
.min(u64::from(u32::MAX));
if payload.len() as u64 > max_payload {
return Err(crate::Error::DecompressedSizeTooLarge {
declared: payload.len() as u64,
limit: max_payload,
});
}
#[expect(clippy::cast_possible_truncation, reason = "bounded by check above")]
let payload_len = payload.len() as u32;
header.data_length = payload_len;
header.checksum = Checksum::from_raw(crate::hash::hash128(payload));
header.encode_into(&mut writer)?;
writer.write_all(payload)?;
log::trace!(
"Writing block with size {}B (on-disk: {}B) (excluding header of {}B)",
header.uncompressed_length,
header.data_length,
Header::serialized_len(),
);
Ok(header)
}
#[expect(
clippy::too_many_lines,
reason = "encrypt/no-encrypt branches duplicate compression match — see comment above"
)]
pub fn from_reader<R: std::io::Read>(
reader: &mut R,
compression: CompressionType,
encryption: Option<&dyn EncryptionProvider>,
#[cfg(zstd_any)] zstd_dict: Option<&crate::compression::ZstdDictionary>,
) -> crate::Result<Self> {
let header = Header::decode_from(reader)?;
let enc_overhead = encryption.map_or(0u64, |e| u64::from(e.max_overhead()));
let max_data_length = u64::from(MAX_DECOMPRESSION_SIZE) + enc_overhead;
if u64::from(header.data_length) > max_data_length {
return Err(crate::Error::DecompressedSizeTooLarge {
declared: u64::from(header.data_length),
limit: max_data_length,
});
}
if header.uncompressed_length > MAX_DECOMPRESSION_SIZE {
return Err(crate::Error::DecompressedSizeTooLarge {
declared: u64::from(header.uncompressed_length),
limit: u64::from(MAX_DECOMPRESSION_SIZE),
});
}
let data = if let Some(enc) = encryption {
let mut raw_vec = vec![0u8; header.data_length as usize];
reader.read_exact(&mut raw_vec)?;
let checksum = Checksum::from_raw(crate::hash::hash128(&raw_vec));
checksum.check(header.checksum).inspect_err(|_| {
log::error!(
"Checksum mismatch for <bufreader>, got={}, expected={}",
checksum,
header.checksum,
);
})?;
let decrypted = enc.decrypt_vec(raw_vec)?;
match compression {
CompressionType::None => {
#[expect(
clippy::cast_possible_truncation,
reason = "values are u32 length max"
)]
let actual_len = decrypted.len() as u32;
if header.uncompressed_length != actual_len {
return Err(crate::Error::InvalidHeader("Block"));
}
Slice::from(decrypted)
}
#[cfg(feature = "lz4")]
CompressionType::Lz4 => {
let mut buf = vec![0u8; header.uncompressed_length as usize];
let bytes_written = lz4_flex::decompress_into(&decrypted, &mut buf)
.map_err(|_| crate::Error::Decompress(compression))?;
if bytes_written != header.uncompressed_length as usize {
return Err(crate::Error::Decompress(compression));
}
Slice::from(buf)
}
#[cfg(zstd_any)]
CompressionType::Zstd(_) => {
let decompressed = crate::compression::ZstdBackend::decompress(
&decrypted,
header.uncompressed_length as usize,
)
.map_err(|_| crate::Error::Decompress(compression))?;
if decompressed.len() != header.uncompressed_length as usize {
return Err(crate::Error::Decompress(compression));
}
Slice::from(decompressed)
}
#[cfg(zstd_any)]
CompressionType::ZstdDict { dict_id, .. } => {
let dict = zstd_dict.ok_or(crate::Error::ZstdDictMismatch {
expected: dict_id,
got: None,
})?;
if dict.id() != dict_id {
return Err(crate::Error::ZstdDictMismatch {
expected: dict_id,
got: Some(dict.id()),
});
}
let decompressed = crate::compression::ZstdBackend::decompress_with_dict(
&decrypted,
dict.raw(),
header.uncompressed_length as usize,
)
.map_err(|_| crate::Error::Decompress(compression))?;
if decompressed.len() != header.uncompressed_length as usize {
return Err(crate::Error::Decompress(compression));
}
Slice::from(decompressed)
}
}
} else {
let raw_data = Slice::from_reader(reader, header.data_length as usize)?;
let checksum = Checksum::from_raw(crate::hash::hash128(&raw_data));
checksum.check(header.checksum).inspect_err(|_| {
log::error!(
"Checksum mismatch for <bufreader>, got={}, expected={}",
checksum,
header.checksum,
);
})?;
match compression {
CompressionType::None => {
#[expect(
clippy::cast_possible_truncation,
reason = "values are u32 length max"
)]
let actual_len = raw_data.len() as u32;
if header.uncompressed_length != actual_len {
return Err(crate::Error::InvalidHeader("Block"));
}
raw_data
}
#[cfg(feature = "lz4")]
CompressionType::Lz4 => {
let mut buf = vec![0u8; header.uncompressed_length as usize];
let bytes_written = lz4_flex::decompress_into(&raw_data, &mut buf)
.map_err(|_| crate::Error::Decompress(compression))?;
if bytes_written != header.uncompressed_length as usize {
return Err(crate::Error::Decompress(compression));
}
Slice::from(buf)
}
#[cfg(zstd_any)]
CompressionType::Zstd(_) => {
let decompressed = crate::compression::ZstdBackend::decompress(
&raw_data,
header.uncompressed_length as usize,
)
.map_err(|_| crate::Error::Decompress(compression))?;
if decompressed.len() != header.uncompressed_length as usize {
return Err(crate::Error::Decompress(compression));
}
Slice::from(decompressed)
}
#[cfg(zstd_any)]
CompressionType::ZstdDict { dict_id, .. } => {
let dict = zstd_dict.ok_or(crate::Error::ZstdDictMismatch {
expected: dict_id,
got: None,
})?;
if dict.id() != dict_id {
return Err(crate::Error::ZstdDictMismatch {
expected: dict_id,
got: Some(dict.id()),
});
}
let decompressed = crate::compression::ZstdBackend::decompress_with_dict(
&raw_data,
dict.raw(),
header.uncompressed_length as usize,
)
.map_err(|_| crate::Error::Decompress(compression))?;
if decompressed.len() != header.uncompressed_length as usize {
return Err(crate::Error::Decompress(compression));
}
Slice::from(decompressed)
}
}
};
Ok(Self { header, data })
}
#[expect(
clippy::too_many_lines,
reason = "encrypt/no-encrypt branches duplicate compression match — see from_reader"
)]
pub fn from_file(
file: &dyn FsFile,
handle: BlockHandle,
compression: CompressionType,
encryption: Option<&dyn EncryptionProvider>,
#[cfg(zstd_any)] zstd_dict: Option<&crate::compression::ZstdDictionary>,
) -> crate::Result<Self> {
let enc_overhead = encryption.map_or(0u64, |e| u64::from(e.max_overhead()));
let max_on_disk_size =
u64::from(MAX_DECOMPRESSION_SIZE) + Header::serialized_len() as u64 + enc_overhead;
if u64::from(handle.size()) > max_on_disk_size {
return Err(crate::Error::DecompressedSizeTooLarge {
declared: u64::from(handle.size()),
limit: max_on_disk_size,
});
}
let (header, data) = if let Some(enc) = encryption {
let header_len = Header::serialized_len();
let block_size = handle.size() as usize;
if block_size < header_len {
return Err(crate::Error::InvalidHeader("Block"));
}
let mut buf = vec![0u8; block_size];
let n = file.read_at(&mut buf, *handle.offset())?;
if n != block_size {
return Err(crate::Error::Io(std::io::Error::new(
std::io::ErrorKind::UnexpectedEof,
format!(
"block read_at: expected {block_size} bytes, got {n} at offset {}",
*handle.offset(),
),
)));
}
#[expect(
clippy::indexing_slicing,
reason = "buf.len() == block_size == handle.size() ≥ Header::serialized_len()"
)]
let parsed_header = Header::decode_from(&mut &buf[..header_len])?;
let actual_data_len = block_size.saturating_sub(header_len);
if parsed_header.data_length as usize != actual_data_len {
return Err(crate::Error::InvalidHeader("Block"));
}
if parsed_header.uncompressed_length > MAX_DECOMPRESSION_SIZE {
return Err(crate::Error::DecompressedSizeTooLarge {
declared: u64::from(parsed_header.uncompressed_length),
limit: u64::from(MAX_DECOMPRESSION_SIZE),
});
}
#[expect(clippy::indexing_slicing, reason = "header was decoded from buf")]
let checksum = Checksum::from_raw(crate::hash::hash128(&buf[header_len..]));
checksum.check(parsed_header.checksum).inspect_err(|_| {
log::error!(
"Checksum mismatch for block {handle:?}, got={}, expected={}",
checksum,
parsed_header.checksum,
);
})?;
buf.copy_within(header_len.., 0);
buf.truncate(actual_data_len);
let decrypted = enc.decrypt_vec(buf)?;
let data = match compression {
CompressionType::None => {
#[expect(
clippy::cast_possible_truncation,
reason = "values are u32 length max"
)]
let actual_len = decrypted.len() as u32;
if parsed_header.uncompressed_length != actual_len {
return Err(crate::Error::InvalidHeader("Block"));
}
Slice::from(decrypted)
}
#[cfg(feature = "lz4")]
CompressionType::Lz4 => {
let mut decompressed = vec![0u8; parsed_header.uncompressed_length as usize];
let bytes_written = lz4_flex::decompress_into(&decrypted, &mut decompressed)
.map_err(|_| crate::Error::Decompress(compression))?;
if bytes_written != parsed_header.uncompressed_length as usize {
return Err(crate::Error::Decompress(compression));
}
Slice::from(decompressed)
}
#[cfg(zstd_any)]
CompressionType::Zstd(_) => {
let decompressed = crate::compression::ZstdBackend::decompress(
&decrypted,
parsed_header.uncompressed_length as usize,
)
.map_err(|_| crate::Error::Decompress(compression))?;
if decompressed.len() != parsed_header.uncompressed_length as usize {
return Err(crate::Error::Decompress(compression));
}
Slice::from(decompressed)
}
#[cfg(zstd_any)]
CompressionType::ZstdDict { dict_id, .. } => {
let dict = zstd_dict.ok_or(crate::Error::ZstdDictMismatch {
expected: dict_id,
got: None,
})?;
if dict.id() != dict_id {
return Err(crate::Error::ZstdDictMismatch {
expected: dict_id,
got: Some(dict.id()),
});
}
let decompressed = crate::compression::ZstdBackend::decompress_with_dict(
&decrypted,
dict.raw(),
parsed_header.uncompressed_length as usize,
)
.map_err(|_| crate::Error::Decompress(compression))?;
if decompressed.len() != parsed_header.uncompressed_length as usize {
return Err(crate::Error::Decompress(compression));
}
Slice::from(decompressed)
}
};
(parsed_header, data)
} else {
let buf = crate::file::read_exact(file, *handle.offset(), handle.size() as usize)?;
let parsed_header = Header::decode_from(&mut &buf[..])?;
let actual_data_len = buf.len().saturating_sub(Header::serialized_len());
if parsed_header.data_length as usize != actual_data_len {
return Err(crate::Error::InvalidHeader("Block"));
}
if parsed_header.uncompressed_length > MAX_DECOMPRESSION_SIZE {
return Err(crate::Error::DecompressedSizeTooLarge {
declared: u64::from(parsed_header.uncompressed_length),
limit: u64::from(MAX_DECOMPRESSION_SIZE),
});
}
#[expect(clippy::indexing_slicing, reason = "header was decoded from buf")]
let checksum =
Checksum::from_raw(crate::hash::hash128(&buf[Header::serialized_len()..]));
checksum.check(parsed_header.checksum).inspect_err(|_| {
log::error!(
"Checksum mismatch for block {handle:?}, got={}, expected={}",
checksum,
parsed_header.checksum,
);
})?;
let data = match compression {
CompressionType::None => {
let value = buf.slice(Header::serialized_len()..);
#[expect(
clippy::cast_possible_truncation,
reason = "values are u32 length max"
)]
let actual_len = value.len() as u32;
if parsed_header.uncompressed_length != actual_len {
return Err(crate::Error::InvalidHeader("Block"));
}
value
}
#[cfg(feature = "lz4")]
CompressionType::Lz4 => {
#[expect(clippy::indexing_slicing, reason = "header was decoded from buf")]
let compressed_data = &buf[Header::serialized_len()..];
let mut decompressed = vec![0u8; parsed_header.uncompressed_length as usize];
let bytes_written =
lz4_flex::decompress_into(compressed_data, &mut decompressed)
.map_err(|_| crate::Error::Decompress(compression))?;
if bytes_written != parsed_header.uncompressed_length as usize {
return Err(crate::Error::Decompress(compression));
}
Slice::from(decompressed)
}
#[cfg(zstd_any)]
CompressionType::Zstd(_) => {
#[expect(clippy::indexing_slicing, reason = "header was decoded from buf")]
let compressed_data = &buf[Header::serialized_len()..];
let decompressed = crate::compression::ZstdBackend::decompress(
compressed_data,
parsed_header.uncompressed_length as usize,
)
.map_err(|_| crate::Error::Decompress(compression))?;
if decompressed.len() != parsed_header.uncompressed_length as usize {
return Err(crate::Error::Decompress(compression));
}
Slice::from(decompressed)
}
#[cfg(zstd_any)]
CompressionType::ZstdDict { dict_id, .. } => {
#[expect(clippy::indexing_slicing, reason = "header was decoded from buf")]
let compressed_data = &buf[Header::serialized_len()..];
let dict = zstd_dict.ok_or(crate::Error::ZstdDictMismatch {
expected: dict_id,
got: None,
})?;
if dict.id() != dict_id {
return Err(crate::Error::ZstdDictMismatch {
expected: dict_id,
got: Some(dict.id()),
});
}
let decompressed = crate::compression::ZstdBackend::decompress_with_dict(
compressed_data,
dict.raw(),
parsed_header.uncompressed_length as usize,
)
.map_err(|_| crate::Error::Decompress(compression))?;
if decompressed.len() != parsed_header.uncompressed_length as usize {
return Err(crate::Error::Decompress(compression));
}
Slice::from(decompressed)
}
};
(parsed_header, data)
};
Ok(Self { header, data })
}
}
#[cfg(test)]
#[allow(
clippy::unwrap_used,
clippy::indexing_slicing,
clippy::useless_vec,
clippy::cast_possible_truncation,
clippy::expect_used,
reason = "test code"
)]
mod tests {
use super::*;
use test_log::test;
#[test]
fn block_from_file_roundtrip_uncompressed() -> crate::Result<()> {
use std::io::Write;
let data = b"abcdefabcdefabcdef";
let mut buf = vec![];
let header = Block::write_into(
&mut buf,
data,
BlockType::Data,
CompressionType::None,
None,
#[cfg(zstd_any)]
None,
)?;
let dir = tempfile::tempdir()?;
let path = dir.path().join("block");
let mut file = std::fs::File::create(&path)?;
file.write_all(&buf)?;
file.sync_all()?;
drop(file);
let file = std::fs::File::open(&path)?;
let handle = crate::table::BlockHandle::new(
BlockOffset(0),
header.data_length + Header::serialized_len() as u32,
);
let block = Block::from_file(
&file,
handle,
CompressionType::None,
None,
#[cfg(zstd_any)]
None,
)?;
assert_eq!(data, &*block.data);
Ok(())
}
#[test]
#[cfg(feature = "lz4")]
fn block_from_file_roundtrip_lz4() -> crate::Result<()> {
use std::io::Write;
let data = b"abcdefabcdefabcdef";
let mut buf = vec![];
let header = Block::write_into(
&mut buf,
data,
BlockType::Data,
CompressionType::Lz4,
None,
#[cfg(zstd_any)]
None,
)?;
let dir = tempfile::tempdir()?;
let path = dir.path().join("block");
let mut file = std::fs::File::create(&path)?;
file.write_all(&buf)?;
file.sync_all()?;
drop(file);
let file = std::fs::File::open(&path)?;
let handle = crate::table::BlockHandle::new(
BlockOffset(0),
header.data_length + Header::serialized_len() as u32,
);
let block = Block::from_file(
&file,
handle,
CompressionType::Lz4,
None,
#[cfg(zstd_any)]
None,
)?;
assert_eq!(data, &*block.data);
Ok(())
}
#[test]
#[cfg(zstd_any)]
fn block_from_file_roundtrip_zstd() -> crate::Result<()> {
use std::io::Write;
let data = b"abcdefabcdefabcdef";
let mut buf = vec![];
let header = Block::write_into(
&mut buf,
data,
BlockType::Data,
CompressionType::Zstd(3),
None,
None,
)?;
let dir = tempfile::tempdir()?;
let path = dir.path().join("block");
let mut file = std::fs::File::create(&path)?;
file.write_all(&buf)?;
file.sync_all()?;
drop(file);
let file = std::fs::File::open(&path)?;
let handle = crate::table::BlockHandle::new(
BlockOffset(0),
header.data_length + Header::serialized_len() as u32,
);
let block = Block::from_file(&file, handle, CompressionType::Zstd(3), None, None)?;
assert_eq!(data, &*block.data);
Ok(())
}
#[test]
fn block_roundtrip_uncompressed() -> crate::Result<()> {
let mut writer = vec![];
Block::write_into(
&mut writer,
b"abcdefabcdefabcdef",
BlockType::Data,
CompressionType::None,
None,
#[cfg(zstd_any)]
None,
)?;
{
let mut reader = &writer[..];
let block = Block::from_reader(
&mut reader,
CompressionType::None,
None,
#[cfg(zstd_any)]
None,
)?;
assert_eq!(b"abcdefabcdefabcdef", &*block.data);
}
Ok(())
}
#[test]
#[cfg(feature = "lz4")]
fn block_roundtrip_lz4() -> crate::Result<()> {
let mut writer = vec![];
Block::write_into(
&mut writer,
b"abcdefabcdefabcdef",
BlockType::Data,
CompressionType::Lz4,
None,
#[cfg(zstd_any)]
None,
)?;
{
let mut reader = &writer[..];
let block = Block::from_reader(
&mut reader,
CompressionType::Lz4,
None,
#[cfg(zstd_any)]
None,
)?;
assert_eq!(b"abcdefabcdefabcdef", &*block.data);
}
Ok(())
}
#[test]
#[cfg(feature = "lz4")]
fn block_reject_absurd_uncompressed_length() {
use crate::coding::Encode;
let mut buf = vec![];
Block::write_into(
&mut buf,
b"hello",
BlockType::Data,
CompressionType::Lz4,
None,
#[cfg(zstd_any)]
None,
)
.unwrap();
let mut reader = &buf[..];
let mut header = Header::decode_from(&mut reader).unwrap();
let compressed_payload: Vec<u8> = reader.to_vec();
header.uncompressed_length = u32::MAX;
let mut tampered = header.encode_into_vec();
tampered.extend_from_slice(&compressed_payload);
let mut r = &tampered[..];
let result = Block::from_reader(
&mut r,
CompressionType::Lz4,
None,
#[cfg(zstd_any)]
None,
);
assert!(
matches!(&result, Err(crate::Error::DecompressedSizeTooLarge { .. })),
"expected DecompressedSizeTooLarge, got: {:?}",
result.err(),
);
}
#[test]
#[cfg(feature = "lz4")]
fn block_zero_uncompressed_length_with_data_fails_decompress() {
use crate::coding::Encode;
let mut buf = vec![];
Block::write_into(
&mut buf,
b"hello",
BlockType::Data,
CompressionType::Lz4,
None,
#[cfg(zstd_any)]
None,
)
.unwrap();
let mut reader = &buf[..];
let mut header = Header::decode_from(&mut reader).unwrap();
let compressed_payload: Vec<u8> = reader.to_vec();
header.uncompressed_length = 0;
let mut tampered = header.encode_into_vec();
tampered.extend_from_slice(&compressed_payload);
let mut r = &tampered[..];
let result = Block::from_reader(
&mut r,
CompressionType::Lz4,
None,
#[cfg(zstd_any)]
None,
);
assert!(
matches!(&result, Err(crate::Error::Decompress(_))),
"expected Decompress error, got: {:?}",
result.err(),
);
}
#[test]
#[cfg(feature = "lz4")]
fn lz4_corrupted_uncompressed_length_triggers_decompress_error() {
use crate::coding::Encode;
use std::io::Cursor;
let payload: &[u8] = b"hello world";
let compressed = lz4_flex::compress(payload);
let data_length = compressed.len() as u32;
let uncompressed_length_correct = payload.len() as u32;
let uncompressed_length_corrupted = uncompressed_length_correct + 1;
let checksum = Checksum::from_raw(crate::hash::hash128(&compressed));
let header = Header {
data_length,
uncompressed_length: uncompressed_length_corrupted,
checksum,
block_type: BlockType::Data,
};
let mut buf = header.encode_into_vec();
buf.extend_from_slice(&compressed);
let mut cursor = Cursor::new(buf);
let result = Block::from_reader(
&mut cursor,
CompressionType::Lz4,
None,
#[cfg(zstd_any)]
None,
);
match result {
Err(crate::Error::Decompress(CompressionType::Lz4)) => { }
Ok(_) => panic!("expected Error::Decompress, but got Ok(Block)"),
Err(other) => panic!("expected Error::Decompress, got different error: {other:?}"),
}
}
#[test]
#[cfg(feature = "lz4")]
fn block_from_file_reject_absurd_uncompressed_length() {
use crate::coding::Encode;
use std::io::Write;
let mut buf = vec![];
Block::write_into(
&mut buf,
b"hello",
BlockType::Data,
CompressionType::Lz4,
None,
#[cfg(zstd_any)]
None,
)
.unwrap();
let mut reader = &buf[..];
let mut header = Header::decode_from(&mut reader).unwrap();
let compressed_payload: Vec<u8> = reader.to_vec();
header.uncompressed_length = u32::MAX;
let mut tampered = header.encode_into_vec();
tampered.extend_from_slice(&compressed_payload);
let mut tmp = tempfile::NamedTempFile::new().unwrap();
tmp.write_all(&tampered).unwrap();
tmp.flush().unwrap();
let file = std::fs::File::open(tmp.path()).unwrap();
let handle = crate::table::BlockHandle::new(BlockOffset(0), tampered.len() as u32);
let result = Block::from_file(
&file,
handle,
CompressionType::Lz4,
None,
#[cfg(zstd_any)]
None,
);
assert!(
matches!(&result, Err(crate::Error::DecompressedSizeTooLarge { .. })),
"expected DecompressedSizeTooLarge, got: {:?}",
result.err(),
);
}
#[test]
#[cfg(feature = "lz4")]
fn block_from_file_zero_uncompressed_length_with_data_fails_decompress() {
use crate::coding::Encode;
use std::io::Write;
let mut buf = vec![];
Block::write_into(
&mut buf,
b"hello",
BlockType::Data,
CompressionType::Lz4,
None,
#[cfg(zstd_any)]
None,
)
.unwrap();
let mut reader = &buf[..];
let mut header = Header::decode_from(&mut reader).unwrap();
let compressed_payload: Vec<u8> = reader.to_vec();
header.uncompressed_length = 0;
let mut tampered = header.encode_into_vec();
tampered.extend_from_slice(&compressed_payload);
let mut tmp = tempfile::NamedTempFile::new().unwrap();
tmp.write_all(&tampered).unwrap();
tmp.flush().unwrap();
let file = std::fs::File::open(tmp.path()).unwrap();
let handle = crate::table::BlockHandle::new(BlockOffset(0), tampered.len() as u32);
let result = Block::from_file(
&file,
handle,
CompressionType::Lz4,
None,
#[cfg(zstd_any)]
None,
);
assert!(
matches!(&result, Err(crate::Error::Decompress(_))),
"expected Decompress error, got: {:?}",
result.err(),
);
}
#[test]
fn block_from_reader_reject_absurd_data_length() {
use crate::coding::Encode;
let mut buf = vec![];
Block::write_into(
&mut buf,
b"hello",
BlockType::Data,
CompressionType::None,
None,
#[cfg(zstd_any)]
None,
)
.unwrap();
let mut reader = &buf[..];
let mut header = Header::decode_from(&mut reader).unwrap();
let payload: Vec<u8> = reader.to_vec();
header.data_length = MAX_DECOMPRESSION_SIZE + 1;
let mut tampered = header.encode_into_vec();
tampered.extend_from_slice(&payload);
let mut r = &tampered[..];
let result = Block::from_reader(
&mut r,
CompressionType::None,
None,
#[cfg(zstd_any)]
None,
);
assert!(
matches!(&result, Err(crate::Error::DecompressedSizeTooLarge { .. })),
"expected DecompressedSizeTooLarge, got: {:?}",
result.err(),
);
}
#[test]
fn block_from_file_reject_oversized_handle() {
use std::io::Write;
let mut tmp = tempfile::NamedTempFile::new().unwrap();
tmp.write_all(b"dummy").unwrap();
tmp.flush().unwrap();
let file = std::fs::File::open(tmp.path()).unwrap();
let handle = crate::table::BlockHandle::new(BlockOffset(0), u32::MAX);
let result = Block::from_file(
&file,
handle,
CompressionType::None,
None,
#[cfg(zstd_any)]
None,
);
assert!(
matches!(&result, Err(crate::Error::DecompressedSizeTooLarge { .. })),
"expected DecompressedSizeTooLarge, got: {:?}",
result.err(),
);
}
#[test]
#[cfg(zstd_any)]
fn zstd_corrupted_uncompressed_length_triggers_decompress_error() {
use crate::coding::Encode;
use std::io::Cursor;
let payload: &[u8] = b"hello world";
let compressed =
crate::compression::ZstdBackend::compress(payload, 3).expect("zstd compress failed");
let data_length = compressed.len() as u32;
let uncompressed_length_corrupted = payload.len() as u32 + 1;
let checksum = Checksum::from_raw(crate::hash::hash128(&compressed));
let header = Header {
data_length,
uncompressed_length: uncompressed_length_corrupted,
checksum,
block_type: BlockType::Data,
};
let mut buf = header.encode_into_vec();
buf.extend_from_slice(&compressed);
let mut cursor = Cursor::new(buf);
let result = Block::from_reader(&mut cursor, CompressionType::Zstd(3), None, None);
match result {
Err(crate::Error::Decompress(CompressionType::Zstd(_))) => { }
Ok(_) => panic!("expected Error::Decompress, but got Ok(Block)"),
Err(other) => panic!("expected Error::Decompress, got different error: {other:?}"),
}
}
#[test]
#[cfg(zstd_any)]
fn zstd_decreased_uncompressed_length_triggers_decompress_error() {
use crate::coding::Encode;
use std::io::Cursor;
let payload: &[u8] = b"hello world hello world hello world";
let compressed =
crate::compression::ZstdBackend::compress(payload, 3).expect("zstd compress failed");
let data_length = compressed.len() as u32;
let uncompressed_length_too_small = payload.len() as u32 - 1;
let checksum = Checksum::from_raw(crate::hash::hash128(&compressed));
let header = Header {
data_length,
uncompressed_length: uncompressed_length_too_small,
checksum,
block_type: BlockType::Data,
};
let mut buf = header.encode_into_vec();
buf.extend_from_slice(&compressed);
let mut cursor = Cursor::new(buf);
let result = Block::from_reader(&mut cursor, CompressionType::Zstd(3), None, None);
match result {
Err(crate::Error::Decompress(CompressionType::Zstd(_))) => { }
Ok(_) => panic!("expected Error::Decompress, but got Ok(Block)"),
Err(other) => panic!("expected Error::Decompress, got different error: {other:?}"),
}
}
#[test]
#[cfg(zstd_any)]
fn block_roundtrip_zstd() -> crate::Result<()> {
let mut writer = vec![];
Block::write_into(
&mut writer,
b"abcdefabcdefabcdef",
BlockType::Data,
CompressionType::Zstd(3),
None,
None,
)?;
{
let mut reader = &writer[..];
let block = Block::from_reader(&mut reader, CompressionType::Zstd(3), None, None)?;
assert_eq!(b"abcdefabcdefabcdef", &*block.data);
}
Ok(())
}
#[test]
fn block_write_rejects_oversized_payload() {
let oversized = vec![0u8; MAX_DECOMPRESSION_SIZE as usize + 1];
let mut sink = std::io::sink();
let result = Block::write_into(
&mut sink,
&oversized,
BlockType::Data,
CompressionType::None,
None,
#[cfg(zstd_any)]
None,
);
assert!(
matches!(result, Err(crate::Error::DecompressedSizeTooLarge { .. })),
"expected DecompressedSizeTooLarge, got: {result:?}",
);
}
#[test]
#[cfg(zstd_any)]
fn block_roundtrip_zstd_large_data() -> crate::Result<()> {
let data = vec![0xABu8; 64 * 1024]; let mut writer = vec![];
Block::write_into(
&mut writer,
&data,
BlockType::Data,
CompressionType::Zstd(3),
None,
None,
)?;
assert!(
writer.len() < data.len(),
"zstd should compress repeated data"
);
{
let mut reader = &writer[..];
let block = Block::from_reader(&mut reader, CompressionType::Zstd(3), None, None)?;
assert_eq!(&*block.data, &data[..]);
}
Ok(())
}
#[cfg(feature = "encryption")]
mod encrypted {
use crate::table::block::*;
fn test_provider() -> crate::encryption::Aes256GcmProvider {
crate::encryption::Aes256GcmProvider::new(&[0x42; 32])
}
#[test]
fn block_roundtrip_encrypted_uncompressed() -> crate::Result<()> {
let enc = test_provider();
let data = b"plaintext block data for encryption test";
let mut writer = vec![];
Block::write_into(
&mut writer,
data,
BlockType::Data,
CompressionType::None,
Some(&enc),
#[cfg(zstd_any)]
None,
)?;
let mut reader = &writer[..];
let block = Block::from_reader(
&mut reader,
CompressionType::None,
Some(&enc),
#[cfg(zstd_any)]
None,
)?;
assert_eq!(data, &*block.data);
Ok(())
}
#[test]
#[cfg(feature = "lz4")]
fn block_roundtrip_encrypted_lz4() -> crate::Result<()> {
let enc = test_provider();
let data = b"abcdefabcdefabcdef";
let mut writer = vec![];
Block::write_into(
&mut writer,
data,
BlockType::Data,
CompressionType::Lz4,
Some(&enc),
#[cfg(zstd_any)]
None,
)?;
let mut reader = &writer[..];
let block = Block::from_reader(
&mut reader,
CompressionType::Lz4,
Some(&enc),
#[cfg(zstd_any)]
None,
)?;
assert_eq!(data, &*block.data);
Ok(())
}
#[test]
#[cfg(zstd_any)]
fn block_roundtrip_encrypted_zstd() -> crate::Result<()> {
let enc = test_provider();
let data = b"abcdefabcdefabcdef";
let mut writer = vec![];
Block::write_into(
&mut writer,
data,
BlockType::Data,
CompressionType::Zstd(3),
Some(&enc),
#[cfg(zstd_any)]
None,
)?;
let mut reader = &writer[..];
let block = Block::from_reader(
&mut reader,
CompressionType::Zstd(3),
Some(&enc),
#[cfg(zstd_any)]
None,
)?;
assert_eq!(data, &*block.data);
Ok(())
}
#[test]
fn block_from_file_encrypted_uncompressed() -> crate::Result<()> {
use std::io::Write;
let enc = test_provider();
let data = b"plaintext block data for from_file encryption test";
let mut buf = vec![];
let header = Block::write_into(
&mut buf,
data,
BlockType::Data,
CompressionType::None,
Some(&enc),
#[cfg(zstd_any)]
None,
)?;
let dir = tempfile::tempdir()?;
let path = dir.path().join("block");
let mut file = std::fs::File::create(&path)?;
file.write_all(&buf)?;
file.sync_all()?;
drop(file);
let file = std::fs::File::open(&path)?;
let handle = crate::table::BlockHandle::new(
BlockOffset(0),
header.data_length + Header::serialized_len() as u32,
);
let block = Block::from_file(
&file,
handle,
CompressionType::None,
Some(&enc),
#[cfg(zstd_any)]
None,
)?;
assert_eq!(data, &*block.data);
Ok(())
}
#[test]
#[cfg(feature = "lz4")]
fn block_from_file_encrypted_lz4() -> crate::Result<()> {
use std::io::Write;
let enc = test_provider();
let data = b"abcdefabcdefabcdef";
let mut buf = vec![];
let header = Block::write_into(
&mut buf,
data,
BlockType::Data,
CompressionType::Lz4,
Some(&enc),
#[cfg(zstd_any)]
None,
)?;
let dir = tempfile::tempdir()?;
let path = dir.path().join("block");
let mut file = std::fs::File::create(&path)?;
file.write_all(&buf)?;
file.sync_all()?;
drop(file);
let file = std::fs::File::open(&path)?;
let handle = crate::table::BlockHandle::new(
BlockOffset(0),
header.data_length + Header::serialized_len() as u32,
);
let block = Block::from_file(
&file,
handle,
CompressionType::Lz4,
Some(&enc),
#[cfg(zstd_any)]
None,
)?;
assert_eq!(data, &*block.data);
Ok(())
}
#[test]
#[cfg(zstd_any)]
fn block_from_file_encrypted_zstd() -> crate::Result<()> {
use std::io::Write;
let enc = test_provider();
let data = b"abcdefabcdefabcdef";
let mut buf = vec![];
let header = Block::write_into(
&mut buf,
data,
BlockType::Data,
CompressionType::Zstd(3),
Some(&enc),
#[cfg(zstd_any)]
None,
)?;
let dir = tempfile::tempdir()?;
let path = dir.path().join("block");
let mut file = std::fs::File::create(&path)?;
file.write_all(&buf)?;
file.sync_all()?;
drop(file);
let file = std::fs::File::open(&path)?;
let handle = crate::table::BlockHandle::new(
BlockOffset(0),
header.data_length + Header::serialized_len() as u32,
);
let block = Block::from_file(
&file,
handle,
CompressionType::Zstd(3),
Some(&enc),
#[cfg(zstd_any)]
None,
)?;
assert_eq!(data, &*block.data);
Ok(())
}
#[test]
fn block_from_file_encrypted_wrong_key_fails() -> crate::Result<()> {
use std::io::Write;
let enc_write = test_provider();
let enc_read = crate::encryption::Aes256GcmProvider::new(&[0x99; 32]);
let data = b"encrypted block data";
let mut buf = vec![];
let header = Block::write_into(
&mut buf,
data,
BlockType::Data,
CompressionType::None,
Some(&enc_write),
#[cfg(zstd_any)]
None,
)?;
let dir = tempfile::tempdir()?;
let path = dir.path().join("block");
let mut file = std::fs::File::create(&path)?;
file.write_all(&buf)?;
file.sync_all()?;
drop(file);
let file = std::fs::File::open(&path)?;
let handle = crate::table::BlockHandle::new(
BlockOffset(0),
header.data_length + Header::serialized_len() as u32,
);
let result = Block::from_file(
&file,
handle,
CompressionType::None,
Some(&enc_read),
#[cfg(zstd_any)]
None,
);
assert!(
matches!(result, Err(crate::Error::Decrypt(_))),
"expected Decrypt error for wrong key, got: {:?}",
result.err(),
);
Ok(())
}
#[test]
fn block_from_reader_encrypted_wrong_key_fails() -> crate::Result<()> {
let enc_write = test_provider();
let enc_read = crate::encryption::Aes256GcmProvider::new(&[0x99; 32]);
let data = b"encrypted block data";
let mut writer = vec![];
Block::write_into(
&mut writer,
data,
BlockType::Data,
CompressionType::None,
Some(&enc_write),
#[cfg(zstd_any)]
None,
)?;
let mut reader = &writer[..];
let result = Block::from_reader(
&mut reader,
CompressionType::None,
Some(&enc_read),
#[cfg(zstd_any)]
None,
);
assert!(
matches!(result, Err(crate::Error::Decrypt(_))),
"expected Decrypt error for wrong key, got: {:?}",
result.err(),
);
Ok(())
}
#[test]
fn block_from_file_encrypted_checksum_tamper_detected() -> crate::Result<()> {
use std::io::Write;
let enc = test_provider();
let data = b"data for tamper test";
let mut buf = vec![];
let header = Block::write_into(
&mut buf,
data,
BlockType::Data,
CompressionType::None,
Some(&enc),
#[cfg(zstd_any)]
None,
)?;
let mid = Header::serialized_len() + 1;
if mid < buf.len() {
#[expect(clippy::indexing_slicing, reason = "mid < buf.len() checked above")]
{
buf[mid] ^= 0xFF;
}
}
let dir = tempfile::tempdir()?;
let path = dir.path().join("block");
let mut file = std::fs::File::create(&path)?;
file.write_all(&buf)?;
file.sync_all()?;
drop(file);
let file = std::fs::File::open(&path)?;
let handle = crate::table::BlockHandle::new(
BlockOffset(0),
header.data_length + Header::serialized_len() as u32,
);
let result = Block::from_file(
&file,
handle,
CompressionType::None,
Some(&enc),
#[cfg(zstd_any)]
None,
);
assert!(
matches!(result, Err(crate::Error::ChecksumMismatch { .. })),
"expected ChecksumMismatch for tampered data, got: {:?}",
result.err(),
);
Ok(())
}
#[test]
fn block_from_file_encrypted_undersized_handle_rejected() -> crate::Result<()> {
use std::io::Write;
let enc = test_provider();
let dir = tempfile::tempdir()?;
let path = dir.path().join("block");
let mut file = std::fs::File::create(&path)?;
file.write_all(b"tiny")?;
file.sync_all()?;
drop(file);
let file = std::fs::File::open(&path)?;
let handle = crate::table::BlockHandle::new(BlockOffset(0), 2);
let result = Block::from_file(
&file,
handle,
CompressionType::None,
Some(&enc),
#[cfg(zstd_any)]
None,
);
assert!(
matches!(result, Err(crate::Error::InvalidHeader(_))),
"expected InvalidHeader for undersized handle, got: {:?}",
result.err(),
);
Ok(())
}
#[test]
fn block_from_file_encrypted_uncompressed_large_payload() -> crate::Result<()> {
use std::io::Write;
let enc = test_provider();
let data = vec![0xBB_u8; 32 * 1024]; let mut buf = vec![];
let header = Block::write_into(
&mut buf,
&data,
BlockType::Data,
CompressionType::None,
Some(&enc),
#[cfg(zstd_any)]
None,
)?;
let dir = tempfile::tempdir()?;
let path = dir.path().join("block");
let mut file = std::fs::File::create(&path)?;
file.write_all(&buf)?;
file.sync_all()?;
drop(file);
let file = std::fs::File::open(&path)?;
let handle = crate::table::BlockHandle::new(
BlockOffset(0),
header.data_length + Header::serialized_len() as u32,
);
let block = Block::from_file(
&file,
handle,
CompressionType::None,
Some(&enc),
#[cfg(zstd_any)]
None,
)?;
assert_eq!(&*block.data, &data[..]);
Ok(())
}
#[test]
fn block_roundtrip_encrypted_uncompressed_large() -> crate::Result<()> {
let enc = test_provider();
let data = vec![0xCC_u8; 32 * 1024]; let mut writer = vec![];
Block::write_into(
&mut writer,
&data,
BlockType::Data,
CompressionType::None,
Some(&enc),
#[cfg(zstd_any)]
None,
)?;
let mut reader = &writer[..];
let block = Block::from_reader(
&mut reader,
CompressionType::None,
Some(&enc),
#[cfg(zstd_any)]
None,
)?;
assert_eq!(&*block.data, &data[..]);
Ok(())
}
#[test]
#[cfg(feature = "lz4")]
fn block_roundtrip_encrypted_lz4_large() -> crate::Result<()> {
let enc = test_provider();
let data = vec![0xDD_u8; 32 * 1024]; let mut writer = vec![];
Block::write_into(
&mut writer,
&data,
BlockType::Data,
CompressionType::Lz4,
Some(&enc),
#[cfg(zstd_any)]
None,
)?;
let mut reader = &writer[..];
let block = Block::from_reader(
&mut reader,
CompressionType::Lz4,
Some(&enc),
#[cfg(zstd_any)]
None,
)?;
assert_eq!(&*block.data, &data[..]);
Ok(())
}
#[test]
#[cfg(zstd_any)]
fn block_roundtrip_encrypted_zstd_large() -> crate::Result<()> {
let enc = test_provider();
let data = vec![0xEE_u8; 32 * 1024]; let mut writer = vec![];
Block::write_into(
&mut writer,
&data,
BlockType::Data,
CompressionType::Zstd(3),
Some(&enc),
#[cfg(zstd_any)]
None,
)?;
let mut reader = &writer[..];
let block = Block::from_reader(
&mut reader,
CompressionType::Zstd(3),
Some(&enc),
#[cfg(zstd_any)]
None,
)?;
assert_eq!(&*block.data, &data[..]);
Ok(())
}
}
#[cfg(feature = "zstd")]
mod zstd_dict {
use super::*;
use crate::compression::ZstdDictionary;
use test_log::test;
fn test_dict() -> ZstdDictionary {
let mut samples = Vec::new();
for i in 0u32..500 {
samples.extend_from_slice(format!("key-{i:05}val-{i:05}").as_bytes());
}
ZstdDictionary::new(&samples)
}
fn test_compression(dict: &ZstdDictionary) -> CompressionType {
CompressionType::ZstdDict {
level: 3,
dict_id: dict.id(),
}
}
#[test]
fn block_roundtrip_zstd_dict_reader() -> crate::Result<()> {
let dict = test_dict();
let compression = test_compression(&dict);
let data = b"abcdefabcdefabcdef";
let mut writer = vec![];
Block::write_into(
&mut writer,
data,
BlockType::Data,
compression,
None,
Some(&dict),
)?;
let mut reader = &writer[..];
let block = Block::from_reader(&mut reader, compression, None, Some(&dict))?;
assert_eq!(data, &*block.data);
Ok(())
}
#[test]
fn block_roundtrip_zstd_dict_file() -> crate::Result<()> {
use std::io::Write;
let dict = test_dict();
let compression = test_compression(&dict);
let data = b"abcdefabcdefabcdef";
let mut buf = vec![];
let header = Block::write_into(
&mut buf,
data,
BlockType::Data,
compression,
None,
Some(&dict),
)?;
let dir = tempfile::tempdir()?;
let path = dir.path().join("block");
let mut file = std::fs::File::create(&path)?;
file.write_all(&buf)?;
file.sync_all()?;
drop(file);
let file = std::fs::File::open(&path)?;
let handle = crate::table::BlockHandle::new(
BlockOffset(0),
header.data_length + Header::serialized_len() as u32,
);
let block = Block::from_file(&file, handle, compression, None, Some(&dict))?;
assert_eq!(data, &*block.data);
Ok(())
}
#[test]
fn block_roundtrip_zstd_dict_large_data() -> crate::Result<()> {
let dict = test_dict();
let compression = test_compression(&dict);
let data = vec![0xAB_u8; 64 * 1024]; let mut writer = vec![];
Block::write_into(
&mut writer,
&data,
BlockType::Data,
compression,
None,
Some(&dict),
)?;
assert!(
writer.len() < data.len(),
"dict compression should reduce size"
);
let mut reader = &writer[..];
let block = Block::from_reader(&mut reader, compression, None, Some(&dict))?;
assert_eq!(&*block.data, &data[..]);
Ok(())
}
#[test]
fn block_zstd_dict_missing_returns_error() -> crate::Result<()> {
let dict = test_dict();
let compression = test_compression(&dict);
let mut sink = vec![];
Block::write_into(
&mut sink,
b"hello",
BlockType::Data,
compression,
None,
Some(&dict),
)?;
let mut reader = &sink[..];
let result = Block::from_reader(&mut reader, compression, None, None);
assert!(
matches!(
result,
Err(crate::Error::ZstdDictMismatch { got: None, .. })
),
"expected ZstdDictMismatch with got=None",
);
Ok(())
}
#[test]
fn block_zstd_dict_wrong_dict_returns_error() -> crate::Result<()> {
let dict = test_dict();
let compression = test_compression(&dict);
let wrong_dict = ZstdDictionary::new(b"completely different dictionary bytes");
let mut sink = vec![];
Block::write_into(
&mut sink,
b"hello",
BlockType::Data,
compression,
None,
Some(&dict),
)?;
let mut reader = &sink[..];
let result = Block::from_reader(&mut reader, compression, None, Some(&wrong_dict));
assert!(
matches!(
result,
Err(crate::Error::ZstdDictMismatch { got: Some(_), .. })
),
"expected ZstdDictMismatch with got=Some",
);
Ok(())
}
#[test]
fn block_write_zstd_dict_missing_returns_error() {
let dict = test_dict();
let compression = test_compression(&dict);
let mut sink = std::io::sink();
let result = Block::write_into(
&mut sink,
b"hello",
BlockType::Data,
compression,
None,
None, );
assert!(
matches!(
result,
Err(crate::Error::ZstdDictMismatch { got: None, .. })
),
"expected ZstdDictMismatch, got: {result:?}",
);
}
#[test]
#[cfg(feature = "encryption")]
fn block_roundtrip_zstd_dict_encrypted_reader() -> crate::Result<()> {
let enc = crate::Aes256GcmProvider::new(&[0x42; 32]);
let dict = test_dict();
let compression = test_compression(&dict);
let data = b"encrypted-dict-compressed-data-for-test";
let mut writer = vec![];
Block::write_into(
&mut writer,
data,
BlockType::Data,
compression,
Some(&enc),
Some(&dict),
)?;
let mut reader = &writer[..];
let block = Block::from_reader(&mut reader, compression, Some(&enc), Some(&dict))?;
assert_eq!(data, &*block.data);
Ok(())
}
#[test]
#[cfg(feature = "encryption")]
fn block_roundtrip_zstd_dict_encrypted_file() -> crate::Result<()> {
use std::io::Write;
let enc = crate::Aes256GcmProvider::new(&[0x42; 32]);
let dict = test_dict();
let compression = test_compression(&dict);
let data = vec![0xCC_u8; 16 * 1024]; let mut buf = vec![];
let header = Block::write_into(
&mut buf,
&data,
BlockType::Data,
compression,
Some(&enc),
Some(&dict),
)?;
let dir = tempfile::tempdir()?;
let path = dir.path().join("block");
let mut file = std::fs::File::create(&path)?;
file.write_all(&buf)?;
file.sync_all()?;
drop(file);
let file = std::fs::File::open(&path)?;
let handle = crate::table::BlockHandle::new(
BlockOffset(0),
header.data_length + Header::serialized_len() as u32,
);
let block = Block::from_file(&file, handle, compression, Some(&enc), Some(&dict))?;
assert_eq!(&*block.data, &data[..]);
Ok(())
}
}
}