use crate::blob_store::sorted_uint_vec::{SortedUintVec, SortedUintVecConfig};
use crate::blob_store::traits::{
BlobStore, BlobStoreStats, CompressedBlobStore, CompressionStats,
};
use crate::containers::FastVec;
use crate::error::{Result, ZiporaError};
use crate::memory::SecureMemoryPool;
use crate::memory::simd_ops::{fast_copy, fast_compare, fast_fill};
use crate::RecordId;
use std::io::{Read, Write};
use std::path::Path;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
const MAGIC_SIGNATURE: &[u8; 20] = b"zipora-blob-store\0\0\0";
const CLASS_NAME: &[u8; 20] = b"ZipOffsetBlobStore\0\0";
const FORMAT_VERSION: u16 = 1;
const HEADER_SIZE: usize = 128;
const FOOTER_SIZE: usize = 64;
const SIMD_THRESHOLD: usize = 64;
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct ZipOffsetBlobStoreConfig {
pub compress_level: u8,
pub checksum_level: u8,
pub offset_config: SortedUintVecConfig,
pub use_secure_memory: bool,
pub enable_simd: bool,
}
impl Default for ZipOffsetBlobStoreConfig {
fn default() -> Self {
Self {
compress_level: 3, checksum_level: 2, offset_config: SortedUintVecConfig::default(),
use_secure_memory: true,
enable_simd: true,
}
}
}
impl ZipOffsetBlobStoreConfig {
pub fn performance_optimized() -> Self {
Self {
compress_level: 1, checksum_level: 1, offset_config: SortedUintVecConfig::performance_optimized(),
use_secure_memory: true,
enable_simd: true,
}
}
pub fn compression_optimized() -> Self {
Self {
compress_level: 9, checksum_level: 3, offset_config: SortedUintVecConfig::memory_optimized(),
use_secure_memory: true,
enable_simd: true,
}
}
pub fn security_optimized() -> Self {
Self {
compress_level: 6, checksum_level: 3, offset_config: SortedUintVecConfig::default(),
use_secure_memory: true,
enable_simd: false, }
}
pub fn validate(&self) -> Result<()> {
if self.compress_level > 22 {
return Err(ZiporaError::invalid_data("compress_level must be 0-22"));
}
if self.checksum_level > 3 {
return Err(ZiporaError::invalid_data("checksum_level must be 0-3"));
}
self.offset_config.validate()?;
Ok(())
}
}
#[repr(C, packed)]
#[derive(Debug, Clone)]
struct FileHeader {
magic: [u8; 20],
class_name: [u8; 20],
file_size: u64,
unzip_size: u64,
records_checksum_version: u64,
content_bytes: u64,
offsets_bytes: u64,
offsets_log2_block_units: u8,
checksum_level: u8,
compress_level: u8,
_padding: [u8; 29],
}
impl FileHeader {
fn new(
file_size: u64,
unzip_size: u64,
records: u64,
content_bytes: u64,
offsets_bytes: u64,
config: &ZipOffsetBlobStoreConfig,
) -> Self {
let records_checksum_version =
(records & 0xFFFFFFFFFF) | ((config.checksum_level as u64) << 40) | ((FORMAT_VERSION as u64) << 48);
Self {
magic: *MAGIC_SIGNATURE,
class_name: *CLASS_NAME,
file_size,
unzip_size,
records_checksum_version,
content_bytes,
offsets_bytes,
offsets_log2_block_units: config.offset_config.log2_block_units,
checksum_level: config.checksum_level,
compress_level: config.compress_level,
_padding: [0; 29],
}
}
fn records(&self) -> u64 {
self.records_checksum_version & 0xFFFFFFFFFF
}
fn checksum_type(&self) -> u8 {
((self.records_checksum_version >> 40) & 0xFF) as u8
}
fn format_version(&self) -> u16 {
((self.records_checksum_version >> 48) & 0xFFFF) as u16
}
fn file_size(&self) -> u64 {
self.file_size
}
fn unzip_size(&self) -> u64 {
self.unzip_size
}
fn validate(&self) -> Result<()> {
if self.magic != *MAGIC_SIGNATURE {
return Err(ZiporaError::invalid_data("invalid magic signature"));
}
if self.class_name != *CLASS_NAME {
return Err(ZiporaError::invalid_data("invalid class name"));
}
if self.format_version() != FORMAT_VERSION {
return Err(ZiporaError::invalid_data("unsupported format version"));
}
Ok(())
}
fn to_bytes(&self) -> [u8; HEADER_SIZE] {
let mut bytes = [0u8; HEADER_SIZE];
bytes[0..20].copy_from_slice(&self.magic);
bytes[20..40].copy_from_slice(&self.class_name);
bytes[40..48].copy_from_slice(&self.file_size.to_le_bytes());
bytes[48..56].copy_from_slice(&self.unzip_size.to_le_bytes());
bytes[56..64].copy_from_slice(&self.records_checksum_version.to_le_bytes());
bytes[64..72].copy_from_slice(&self.content_bytes.to_le_bytes());
bytes[72..80].copy_from_slice(&self.offsets_bytes.to_le_bytes());
bytes[80] = self.offsets_log2_block_units;
bytes[81] = self.checksum_level;
bytes[82] = self.compress_level;
bytes
}
fn from_bytes(bytes: &[u8; HEADER_SIZE]) -> Self {
let mut magic = [0u8; 20];
let mut class_name = [0u8; 20];
let padding = [0u8; 29];
magic.copy_from_slice(&bytes[0..20]);
class_name.copy_from_slice(&bytes[20..40]);
let file_size = u64::from_le_bytes([
bytes[40], bytes[41], bytes[42], bytes[43],
bytes[44], bytes[45], bytes[46], bytes[47]
]);
let unzip_size = u64::from_le_bytes([
bytes[48], bytes[49], bytes[50], bytes[51],
bytes[52], bytes[53], bytes[54], bytes[55]
]);
let records_checksum_version = u64::from_le_bytes([
bytes[56], bytes[57], bytes[58], bytes[59],
bytes[60], bytes[61], bytes[62], bytes[63]
]);
let content_bytes = u64::from_le_bytes([
bytes[64], bytes[65], bytes[66], bytes[67],
bytes[68], bytes[69], bytes[70], bytes[71]
]);
let offsets_bytes = u64::from_le_bytes([
bytes[72], bytes[73], bytes[74], bytes[75],
bytes[76], bytes[77], bytes[78], bytes[79]
]);
Self {
magic,
class_name,
file_size,
unzip_size,
records_checksum_version,
content_bytes,
offsets_bytes,
offsets_log2_block_units: bytes[80],
checksum_level: bytes[81],
compress_level: bytes[82],
_padding: padding,
}
}
}
#[derive(Debug)]
struct CacheOffsets {
block_id: usize,
offsets: Vec<u64>,
}
impl CacheOffsets {
fn new(block_size: usize) -> Self {
Self {
block_id: usize::MAX, offsets: vec![0; block_size + 1], }
}
}
pub struct ZipOffsetBlobStore {
content: FastVec<u8>,
offsets: SortedUintVec,
config: ZipOffsetBlobStoreConfig,
stats: CompressionStats,
pool: Option<SecureMemoryPool>,
offset_cache: Option<CacheOffsets>,
}
impl ZipOffsetBlobStore {
pub fn new() -> Result<Self> {
Self::with_config(ZipOffsetBlobStoreConfig::default())
}
pub fn with_config(config: ZipOffsetBlobStoreConfig) -> Result<Self> {
config.validate()?;
let pool: Option<SecureMemoryPool> = None;
let offsets = SortedUintVec::with_config(config.offset_config.clone())?;
Ok(Self {
content: FastVec::new(),
offsets,
config,
stats: CompressionStats::default(),
pool,
offset_cache: None,
})
}
pub fn with_pool(config: ZipOffsetBlobStoreConfig, pool: SecureMemoryPool) -> Result<Self> {
config.validate()?;
let offsets = SortedUintVec::with_config(config.offset_config.clone())?;
Ok(Self {
content: FastVec::new(),
offsets,
config,
stats: CompressionStats::default(),
pool: Some(pool),
offset_cache: None,
})
}
pub fn load_from_file<P: AsRef<Path>>(path: P) -> Result<Self> {
let mut file = std::fs::File::open(path)?;
Self::load_from_reader(&mut file)
}
pub fn load_from_reader<R: Read>(reader: &mut R) -> Result<Self> {
let mut header_bytes = [0u8; HEADER_SIZE];
reader.read_exact(&mut header_bytes)?;
let header = FileHeader::from_bytes(&header_bytes);
header.validate()?;
let config = ZipOffsetBlobStoreConfig {
compress_level: header.compress_level,
checksum_level: header.checksum_level,
offset_config: SortedUintVecConfig {
log2_block_units: header.offsets_log2_block_units,
..Default::default()
},
use_secure_memory: true,
enable_simd: true,
};
let mut store = Self::with_config(config)?;
store.content.reserve(header.content_bytes as usize)?;
let mut content_bytes = vec![0u8; header.content_bytes as usize];
reader.read_exact(&mut content_bytes)?;
if store.should_use_simd(content_bytes.len()) {
let current_len = store.content.len();
store.content.resize(current_len + content_bytes.len(), 0)?;
{
let content_slice = &mut store.content.as_mut_slice()[current_len..];
if let Err(_) = fast_copy(&content_bytes, content_slice) {
drop(content_slice); store.content.resize(current_len, 0)?;
store.content.extend(content_bytes.into_iter())?;
}
}
} else {
store.content.extend(content_bytes.into_iter())?;
}
let content_padding = (16 - (header.content_bytes % 16)) % 16;
if content_padding > 0 {
let mut padding = vec![0u8; content_padding as usize];
reader.read_exact(&mut padding)?;
}
store.stats.uncompressed_size = header.unzip_size as usize;
store.stats.compressed_size = header.content_bytes as usize;
store.stats.compressed_count = header.records() as usize;
store.stats.compression_ratio = store.stats.ratio();
Ok(store)
}
pub fn save_to_file<P: AsRef<Path>>(&self, path: P) -> Result<()> {
let mut file = std::fs::File::create(path)?;
self.save_to_writer(&mut file)
}
pub fn save_to_writer<W: Write>(&self, writer: &mut W) -> Result<()> {
let content_bytes = self.content.len() as u64;
let offsets_bytes = self.offsets.memory_usage() as u64;
let content_padding = (16 - (content_bytes % 16)) % 16;
let file_size = HEADER_SIZE as u64 + content_bytes + content_padding + offsets_bytes + FOOTER_SIZE as u64;
let header = FileHeader::new(
file_size,
self.stats.uncompressed_size as u64,
self.offsets.len() as u64,
content_bytes,
offsets_bytes,
&self.config,
);
writer.write_all(&header.to_bytes())?;
writer.write_all(&self.content)?;
if content_padding > 0 {
let mut padding = vec![0u8; content_padding as usize];
self.simd_fill(&mut padding, 0);
writer.write_all(&padding)?;
}
Ok(())
}
pub fn config(&self) -> &ZipOffsetBlobStoreConfig {
&self.config
}
#[inline]
pub fn memory_usage(&self) -> usize {
self.content.len() +
self.offsets.memory_usage() +
std::mem::size_of::<Self>()
}
fn get_record_impl<const COMPRESS: bool, const CHECKSUM_LEN: u8, const FIBER_PREFETCH: bool>(
&self,
id: RecordId,
) -> Result<Vec<u8>> {
if id as usize >= self.offsets.len() {
return Err(ZiporaError::invalid_data("record ID out of bounds"));
}
let (start_offset, end_offset) = self.offsets.get2(id as usize)?;
let mut record_len = (end_offset - start_offset) as usize;
if start_offset >= self.content.len() as u64 || end_offset > self.content.len() as u64 {
return Err(ZiporaError::invalid_data("offset out of bounds"));
}
let record_data = &self.content.as_slice()[start_offset as usize..end_offset as usize];
if FIBER_PREFETCH && self.config.enable_simd {
#[cfg(target_arch = "x86_64")]
unsafe {
std::arch::x86_64::_mm_prefetch(
record_data.as_ptr() as *const i8,
std::arch::x86_64::_MM_HINT_T0,
);
}
}
if CHECKSUM_LEN > 0 {
if record_len < CHECKSUM_LEN as usize {
return Err(ZiporaError::invalid_data("record too small for checksum"));
}
record_len -= CHECKSUM_LEN as usize;
let data_part = &record_data[..record_len];
let checksum_part = &record_data[record_len..];
if CHECKSUM_LEN == 4 {
if !self.verify_checksum(data_part, checksum_part)? {
return Err(ZiporaError::invalid_data("checksum verification failed"));
}
}
}
let final_data = &record_data[..record_len];
if COMPRESS {
#[cfg(feature = "zstd")]
{
zstd::decode_all(final_data)
.map_err(|e| ZiporaError::io_error(format!("ZSTD decompression failed: {}", e)))
}
#[cfg(not(feature = "zstd"))]
{
Err(ZiporaError::invalid_data("ZSTD support not enabled"))
}
} else {
Ok(final_data.to_vec())
}
}
fn calculate_crc32c(&self, data: &[u8]) -> u32 {
if self.should_use_simd(data.len()) {
let mut checksum = 0u32;
let chunk_size = 64;
for chunk in data.chunks(chunk_size) {
checksum = chunk.iter().fold(checksum, |acc, &byte| acc.wrapping_add(byte as u32));
}
checksum
} else {
data.iter().fold(0u32, |acc, &byte| acc.wrapping_add(byte as u32))
}
}
fn verify_checksum(&self, data: &[u8], stored_checksum: &[u8]) -> Result<bool> {
let calculated = self.calculate_crc32c(data);
let calculated_bytes = calculated.to_le_bytes();
let comparison_result = self.simd_compare(&calculated_bytes, stored_checksum);
Ok(comparison_result == 0)
}
pub fn enable_offset_cache(&mut self) {
if self.offset_cache.is_none() {
let block_size = self.config.offset_config.block_size();
self.offset_cache = Some(CacheOffsets::new(block_size));
}
}
#[inline]
fn should_use_simd(&self, size: usize) -> bool {
self.config.enable_simd && size >= SIMD_THRESHOLD
}
fn simd_copy(&self, src: &[u8], dst: &mut [u8]) -> Result<()> {
if self.should_use_simd(src.len()) {
fast_copy(src, dst)
} else {
if src.len() != dst.len() {
return Err(ZiporaError::invalid_data("source and destination length mismatch"));
}
dst.copy_from_slice(src);
Ok(())
}
}
fn simd_compare(&self, a: &[u8], b: &[u8]) -> i32 {
if self.should_use_simd(a.len().min(b.len())) {
fast_compare(a, b)
} else {
match a.len().cmp(&b.len()) {
std::cmp::Ordering::Less => -1,
std::cmp::Ordering::Greater => 1,
std::cmp::Ordering::Equal => {
for (av, bv) in a.iter().zip(b.iter()) {
match av.cmp(bv) {
std::cmp::Ordering::Less => return -1,
std::cmp::Ordering::Greater => return 1,
std::cmp::Ordering::Equal => continue,
}
}
0
}
}
}
}
fn simd_fill(&self, slice: &mut [u8], value: u8) {
if self.should_use_simd(slice.len()) {
fast_fill(slice, value);
} else {
slice.fill(value);
}
}
fn get_record_cached<const COMPRESS: bool, const CHECKSUM_LEN: u8>(
&mut self,
id: RecordId,
) -> Result<Vec<u8>> {
if self.offset_cache.is_none() {
self.enable_offset_cache();
}
let cache = self.offset_cache.as_mut().expect("cache initialized when caching enabled");
let block_idx = (id as usize) >> self.config.offset_config.log2_block_units;
let offset_idx = (id as usize) & self.config.offset_config.block_mask();
if block_idx != cache.block_id {
self.offsets.get_block(block_idx, &mut cache.offsets[..self.config.offset_config.block_size()])?;
if block_idx + 1 < self.offsets.num_blocks() {
cache.offsets[self.config.offset_config.block_size()] = self.offsets.get((block_idx + 1) << self.config.offset_config.log2_block_units)?;
} else {
cache.offsets[self.config.offset_config.block_size()] = self.content.len() as u64;
}
cache.block_id = block_idx;
}
let start_offset = cache.offsets[offset_idx];
let end_offset = cache.offsets[offset_idx + 1];
let mut record_len = (end_offset - start_offset) as usize;
let record_data = &self.content.as_slice()[start_offset as usize..end_offset as usize];
if CHECKSUM_LEN > 0 {
if record_len < CHECKSUM_LEN as usize {
return Err(ZiporaError::invalid_data("record too small for checksum"));
}
record_len -= CHECKSUM_LEN as usize;
let data_part = &record_data[..record_len];
let checksum_part = &record_data[record_len..];
if CHECKSUM_LEN == 4 {
if !self.verify_checksum(data_part, checksum_part)? {
return Err(ZiporaError::invalid_data("checksum verification failed"));
}
}
}
let final_data = &record_data[..record_len];
if COMPRESS {
#[cfg(feature = "zstd")]
{
zstd::decode_all(final_data)
.map_err(|e| ZiporaError::io_error(format!("ZSTD decompression failed: {}", e)))
}
#[cfg(not(feature = "zstd"))]
{
Err(ZiporaError::invalid_data("ZSTD support not enabled"))
}
} else {
Ok(final_data.to_vec())
}
}
}
impl BlobStore for ZipOffsetBlobStore {
fn get(&self, id: RecordId) -> Result<Vec<u8>> {
match (self.config.compress_level > 0, self.config.checksum_level) {
(true, 0) => self.get_record_impl::<true, 0, false>(id),
(true, 2) => self.get_record_impl::<true, 4, false>(id),
(true, 3) => self.get_record_impl::<true, 4, false>(id),
(false, 0) => self.get_record_impl::<false, 0, false>(id),
(false, 2) => self.get_record_impl::<false, 4, false>(id),
(false, 3) => self.get_record_impl::<false, 4, false>(id),
_ => self.get_record_impl::<false, 0, false>(id),
}
}
fn put(&mut self, _data: &[u8]) -> Result<RecordId> {
Err(ZiporaError::invalid_operation("ZipOffsetBlobStore is read-only, use builder to create"))
}
fn remove(&mut self, _id: RecordId) -> Result<()> {
Err(ZiporaError::invalid_operation("ZipOffsetBlobStore doesn't support removal"))
}
fn contains(&self, id: RecordId) -> bool {
(id as usize) < self.offsets.len()
}
fn size(&self, id: RecordId) -> Result<Option<usize>> {
if !self.contains(id) {
return Ok(None);
}
let (start_offset, end_offset) = self.offsets.get2(id as usize)?;
let mut size = (end_offset - start_offset) as usize;
if self.config.checksum_level == 2 || self.config.checksum_level == 3 {
size = size.saturating_sub(4); }
Ok(Some(size))
}
fn len(&self) -> usize {
self.offsets.len()
}
fn flush(&mut self) -> Result<()> {
Ok(())
}
fn stats(&self) -> BlobStoreStats {
BlobStoreStats {
blob_count: self.len(),
total_size: self.stats.uncompressed_size,
average_size: if self.len() > 0 {
self.stats.uncompressed_size as f64 / self.len() as f64
} else {
0.0
},
get_count: 0,
put_count: 0,
remove_count: 0,
cache_hit_ratio: 0.0,
}
}
}
impl CompressedBlobStore for ZipOffsetBlobStore {
fn compression_ratio(&self, id: RecordId) -> Result<Option<f32>> {
if !self.contains(id) {
return Ok(None);
}
Ok(Some(self.stats.compression_ratio))
}
fn compressed_size(&self, id: RecordId) -> Result<Option<usize>> {
if !self.contains(id) {
return Ok(None);
}
let (start_offset, end_offset) = self.offsets.get2(id as usize)?;
Ok(Some((end_offset - start_offset) as usize))
}
fn compression_stats(&self) -> CompressionStats {
self.stats.clone()
}
}
impl Default for ZipOffsetBlobStore {
fn default() -> Self {
Self::new().unwrap_or_else(|e| {
panic!("ZipOffsetBlobStore creation failed in Default: {}. \
This indicates severe memory pressure.", e)
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_zip_offset_blob_store_config() {
let config = ZipOffsetBlobStoreConfig::default();
assert!(config.validate().is_ok());
assert_eq!(config.compress_level, 3);
assert_eq!(config.checksum_level, 2);
let perf_config = ZipOffsetBlobStoreConfig::performance_optimized();
assert!(perf_config.validate().is_ok());
assert_eq!(perf_config.compress_level, 1);
let compression_config = ZipOffsetBlobStoreConfig::compression_optimized();
assert!(compression_config.validate().is_ok());
assert_eq!(compression_config.compress_level, 9);
let security_config = ZipOffsetBlobStoreConfig::security_optimized();
assert!(security_config.validate().is_ok());
assert_eq!(security_config.checksum_level, 3);
}
#[test]
fn test_file_header() {
let config = ZipOffsetBlobStoreConfig::default();
let header = FileHeader::new(1000, 800, 10, 600, 100, &config);
assert_eq!(header.magic, *MAGIC_SIGNATURE);
assert_eq!(header.class_name, *CLASS_NAME);
assert_eq!(header.file_size(), 1000);
assert_eq!(header.unzip_size(), 800);
assert_eq!(header.records(), 10);
assert_eq!(header.checksum_type(), config.checksum_level);
assert_eq!(header.format_version(), FORMAT_VERSION);
let bytes = header.to_bytes();
let header2 = FileHeader::from_bytes(&bytes);
assert!(header2.validate().is_ok());
assert_eq!(header2.file_size(), header.file_size());
}
#[test]
fn test_zip_offset_blob_store_creation() {
let store = ZipOffsetBlobStore::new().unwrap();
assert_eq!(store.len(), 0);
assert!(store.is_empty());
let config = ZipOffsetBlobStoreConfig::performance_optimized();
let store = ZipOffsetBlobStore::with_config(config).unwrap();
assert_eq!(store.len(), 0);
}
#[test]
fn test_zip_offset_blob_store_read_only() {
let mut store = ZipOffsetBlobStore::new().unwrap();
assert!(store.put(b"test data").is_err());
assert!(store.remove(0).is_err());
}
#[test]
fn test_zip_offset_blob_store_bounds_checking() {
let store = ZipOffsetBlobStore::new().unwrap();
assert!(!store.contains(0));
assert!(store.get(0).is_err());
assert_eq!(store.size(0).unwrap(), None);
}
#[test]
fn test_cache_offsets() {
let mut cache = CacheOffsets::new(64);
assert_eq!(cache.block_id, usize::MAX);
assert_eq!(cache.offsets.len(), 65); }
#[test]
fn test_zip_offset_blob_store_memory_usage() {
let store = ZipOffsetBlobStore::new().unwrap();
let usage = store.memory_usage();
assert!(usage >= std::mem::size_of::<ZipOffsetBlobStore>());
}
#[test]
fn test_zip_offset_blob_store_enable_cache() {
let mut store = ZipOffsetBlobStore::new().unwrap();
assert!(store.offset_cache.is_none());
store.enable_offset_cache();
assert!(store.offset_cache.is_some());
}
#[test]
fn test_simd_optimization_threshold() {
let store = ZipOffsetBlobStore::new().unwrap();
assert!(!store.should_use_simd(32)); assert!(!store.should_use_simd(63)); assert!(store.should_use_simd(64)); assert!(store.should_use_simd(128)); assert!(store.should_use_simd(4096));
let config = ZipOffsetBlobStoreConfig {
enable_simd: false,
..Default::default()
};
let store_no_simd = ZipOffsetBlobStore::with_config(config).unwrap();
assert!(!store_no_simd.should_use_simd(128)); }
#[test]
fn test_simd_memory_operations() {
let store = ZipOffsetBlobStore::new().unwrap();
let src = vec![1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let mut dst = vec![0u8; src.len()];
assert!(store.simd_copy(&src, &mut dst).is_ok());
assert_eq!(src, dst);
let a = vec![1u8, 2, 3, 4];
let b = vec![1u8, 2, 3, 4];
let c = vec![1u8, 2, 3, 5];
assert_eq!(store.simd_compare(&a, &b), 0); assert!(store.simd_compare(&a, &c) < 0); assert!(store.simd_compare(&c, &a) > 0);
let mut buffer = vec![1u8; 10];
store.simd_fill(&mut buffer, 42);
assert_eq!(buffer, vec![42u8; 10]);
}
#[test]
fn test_simd_checksum_operations() {
let store = ZipOffsetBlobStore::new().unwrap();
let data = vec![1u8, 2, 3, 4, 5, 6, 7, 8];
let checksum = store.calculate_crc32c(&data);
assert!(checksum > 0);
let checksum_bytes = checksum.to_le_bytes();
assert!(store.verify_checksum(&data, &checksum_bytes).unwrap());
let invalid_checksum = [0u8, 0, 0, 0];
assert!(!store.verify_checksum(&data, &invalid_checksum).unwrap());
}
#[test]
fn test_simd_with_large_data() {
let store = ZipOffsetBlobStore::new().unwrap();
let large_data = vec![42u8; 1024];
let mut dst = vec![0u8; large_data.len()];
assert!(store.simd_copy(&large_data, &mut dst).is_ok());
assert_eq!(large_data, dst);
let comparison_result = store.simd_compare(&large_data, &dst);
assert_eq!(comparison_result, 0);
let mut buffer = vec![0u8; 1024];
store.simd_fill(&mut buffer, 255);
assert_eq!(buffer, vec![255u8; 1024]);
let checksum = store.calculate_crc32c(&large_data);
assert!(checksum > 0);
}
#[test]
fn test_simd_fallback_behavior() {
let store = ZipOffsetBlobStore::new().unwrap();
let src = vec![1u8, 2, 3];
let mut dst = vec![0u8; 5]; assert!(store.simd_copy(&src, &mut dst).is_err());
let a = vec![1u8, 2, 3];
let b = vec![1u8, 2];
let result = store.simd_compare(&a, &b);
assert!(result != 0); }
}