use crate::traits::BlockStore;
use async_trait::async_trait;
use dashmap::DashMap;
use ipfrs_core::{Block, Cid, Error, Result};
use parking_lot::RwLock;
use std::sync::Arc;
#[derive(Debug, Clone)]
pub struct ChunkingConfig {
pub min_chunk_size: usize,
pub target_chunk_size: usize,
pub max_chunk_size: usize,
pub hash_mask: u32,
}
impl Default for ChunkingConfig {
fn default() -> Self {
Self {
min_chunk_size: 256 * 1024, target_chunk_size: 1024 * 1024, max_chunk_size: 4 * 1024 * 1024, hash_mask: 0xFFFF, }
}
}
impl ChunkingConfig {
pub fn small() -> Self {
Self {
min_chunk_size: 64 * 1024, target_chunk_size: 256 * 1024, max_chunk_size: 1024 * 1024, hash_mask: 0x3FFF, }
}
pub fn large() -> Self {
Self {
min_chunk_size: 1024 * 1024, target_chunk_size: 4 * 1024 * 1024, max_chunk_size: 16 * 1024 * 1024, hash_mask: 0x1FFFF, }
}
}
#[derive(Debug, Clone)]
struct ChunkMeta {
cid: Cid,
ref_count: usize,
size: usize,
}
#[derive(Debug, Clone)]
struct BlockManifest {
original_size: usize,
chunks: Vec<Cid>,
}
#[derive(Debug, Clone, Default)]
pub struct DedupStats {
pub blocks_stored: usize,
pub bytes_original: usize,
pub bytes_stored: usize,
pub unique_chunks: usize,
pub duplicate_chunks_avoided: usize,
}
impl DedupStats {
pub fn dedup_ratio(&self) -> f64 {
if self.bytes_original == 0 {
return 0.0;
}
1.0 - (self.bytes_stored as f64 / self.bytes_original as f64)
}
pub fn bytes_saved(&self) -> usize {
self.bytes_original.saturating_sub(self.bytes_stored)
}
pub fn avg_chunk_size(&self) -> usize {
if self.unique_chunks == 0 {
return 0;
}
self.bytes_stored / self.unique_chunks
}
}
#[allow(dead_code)]
const GEAR: [u64; 256] = [
0x5c95c078, 0x22408989, 0x2d48a214, 0x12842087, 0x530f8afb, 0x2aaa3f86, 0x7f1bd89f, 0x62534467,
0x22c4b83b, 0x3e36d3e7, 0x4c9fa05b, 0x0b20f0e3, 0x441c8a8c, 0x7cc27988, 0x5505c6c0, 0x3c9ae0da,
0x153e46cd, 0x0d05f5b5, 0x51c9c3b5, 0x02e57b86, 0x74a8d4ba, 0x6f16cbb5, 0x2ffc27ea, 0x5fa83e0f,
0x75ab67e2, 0x3ff15813, 0x2ec58ac7, 0x6f1f0520, 0x0c5d7dba, 0x4a9f5e76, 0x4ec58e64, 0x6a470c8e,
0x40edf2ca, 0x1a1c0c8d, 0x4e32e5e4, 0x6c7a7fda, 0x4b3be9e4, 0x64d8e67b, 0x2ef8ad98, 0x34d9f7e5,
0x7e7e4a36, 0x1a1c54d1, 0x5e2a9e7a, 0x3e5f0a8e, 0x0e01d1a0, 0x1f31aa27, 0x049c9e3e, 0x7c38f56e,
0x4b8d9ef0, 0x0b9c4d05, 0x55f59f0d, 0x3e8e02ae, 0x25c46f84, 0x6e6fdc6f, 0x440ae4a7, 0x3e38a0e6,
0x5b96c3d1, 0x72a06105, 0x52cd5e2d, 0x3d015fb3, 0x4d7c7064, 0x1c8c169c, 0x5c95e834, 0x0c4d9d42,
0x3c9c8ea3, 0x10a5d9d6, 0x7dcb9d63, 0x3ecf9e96, 0x1f5c9e5f, 0x7e7854c5, 0x48a05ae3, 0x0c4e9419,
0x6b5c9b6f, 0x7e1a6dc0, 0x3b8f9fe8, 0x6f6e8e3f, 0x39f48adb, 0x7b8d9e72, 0x29e18dc5, 0x7e6c3fc4,
0x5d9c4ab8, 0x1f6e9dc2, 0x3e8f9fc3, 0x7d9c8ea6, 0x0e1f8d9c, 0x5f9d8e72, 0x3e9f8dcb, 0x7d8e9f72,
0x2f9d8ea5, 0x6e8f9dc4, 0x3d9f8ec5, 0x7e8d9f63, 0x1f9e8dc3, 0x6d8f9ec4, 0x3e9d8fc5, 0x7d9e8f62,
0x2e9f8dc4, 0x6f8d9ec5, 0x3d9e8fc3, 0x7e9d8f64, 0x1f8e9dc5, 0x6e9f8dc4, 0x3d8e9fc5, 0x7d9f8e63,
0x2f8d9ec4, 0x6e8f9dc5, 0x3e9d8fc4, 0x7d8e9f65, 0x1f9d8ec5, 0x6d9f8dc4, 0x3e8d9fc5, 0x7e9f8d62,
0x2d8e9fc4, 0x6f9d8ec5, 0x3d8f9dc4, 0x7e8d9f66, 0x1e9f8dc5, 0x6d8e9fc4, 0x3f9d8ec5, 0x7d9e8f61,
0x2f9d8ec4, 0x6e8d9fc5, 0x3d9f8dc4, 0x7e8f9d67, 0x1f8d9ec5, 0x6e9d8fc4, 0x3d8e9fc5, 0x7f9d8e60,
0x2e8f9dc4, 0x6f9e8dc5, 0x3d8d9fc4, 0x7e9f8d68, 0x1d9e8fc5, 0x6f8d9ec4, 0x3e9f8dc5, 0x7d8e9f5f,
0x2f8e9dc4, 0x6d9f8ec5, 0x3e8d9fc4, 0x7f9e8d69, 0x1f9d8ec5, 0x6e8f9dc4, 0x3d9e8fc5, 0x7e8d9f5e,
0x2d9f8ec4, 0x6f8e9dc5, 0x3d8f9fc4, 0x7e9d8e6a, 0x1e8f9dc5, 0x6d9e8fc4, 0x3f8d9ec5, 0x7d9f8e5d,
0x2f8d9fc4, 0x6e9f8ec5, 0x3d8e9dc4, 0x7f8d9e6b, 0x1f8e9fc5, 0x6e8d9ec4, 0x3d9f8fc5, 0x7e9e8d5c,
0x2e9d8fc4, 0x6f8e9dc5, 0x3e8f9ec4, 0x7d9e8f6c, 0x1f9e8dc5, 0x6d8f9fc4, 0x3e9d8ec5, 0x7d8f9e5b,
0x2f9e8dc4, 0x6e8d9fc5, 0x3d9f8ec4, 0x7e8f9d6d, 0x1e9d8fc5, 0x6f8e9dc4, 0x3d8f9ec5, 0x7e9d8f5a,
0x2d8f9ec4, 0x6e9d8fc5, 0x3f8e9dc4, 0x7d9f8e6e, 0x1f8d9fc5, 0x6e9e8dc4, 0x3d8f9fc5, 0x7f8e9d59,
0x2e8d9fc4, 0x6f9e8dc5, 0x3d9f8ec4, 0x7e8d9f6f, 0x1d9f8ec5, 0x6f8d9dc4, 0x3e8e9fc5, 0x7d9f8e58,
0x2f8e9fc4, 0x6d9f8dc5, 0x3e8d9ec4, 0x7f9e8d70, 0x1f8e9dc5, 0x6d8f9ec4, 0x3f9d8fc5, 0x7e8f9d57,
0x2d9e8fc4, 0x6f8e9dc5, 0x3d8f9ec4, 0x7e9d8f71, 0x1e9f8dc5, 0x6f8d9ec4, 0x3d9e8fc5, 0x7f8d9e56,
0x2f8d9ec4, 0x6e9f8dc5, 0x3e8d9fc4, 0x7d9e8f72, 0x1f9d8fc5, 0x6e8f9dc4, 0x3d8e9fc5, 0x7e9f8d55,
0x2e8f9fc4, 0x6d9e8dc5, 0x3f8d9ec4, 0x7e8f9d73, 0x1d9f8fc5, 0x6f8e9dc4, 0x3e8d9fc5, 0x7d8f9e54,
0x2f9e8dc4, 0x6e8f9fc5, 0x3d9d8ec4, 0x7f8e9d74, 0x1e8d9fc5, 0x6d9f8ec4, 0x3f8e9dc5, 0x7e9d8f53,
0x2d8e9fc4, 0x6f9d8ec5, 0x3d8f9fc4, 0x7e9f8d75, 0x1f8d9ec5, 0x6e9d8fc4, 0x3d9f8ec5, 0x7f8e9d52,
0x2e9f8dc4, 0x6d8e9fc5, 0x3f9d8ec4, 0x7d8f9e76, 0x1f9e8dc5, 0x6f8d9ec4, 0x3e9f8fc5, 0x7d9e8f51,
0x2f8d9fc4, 0x6e9e8dc5, 0x3d8f9ec4, 0x7e8d9f77, 0x1e9f8dc5, 0x6d8f9fc4, 0x3f8e9dc5, 0x7e9d8e50,
];
struct Chunker {
config: ChunkingConfig,
}
impl Chunker {
fn new(config: ChunkingConfig) -> Self {
Self { config }
}
fn chunk(&self, data: &[u8]) -> Vec<Vec<u8>> {
if data.len() <= self.config.min_chunk_size {
return vec![data.to_vec()];
}
let mut chunks = Vec::new();
let mut start = 0;
while start < data.len() {
let remaining = data.len() - start;
if remaining <= self.config.min_chunk_size {
chunks.push(data[start..].to_vec());
break;
}
let boundary = self.find_boundary(&data[start..]);
let end = start + boundary;
chunks.push(data[start..end].to_vec());
start = end;
}
chunks
}
#[allow(clippy::needless_range_loop)]
fn find_boundary(&self, data: &[u8]) -> usize {
let max_scan = self.config.max_chunk_size.min(data.len());
let min_size = self.config.min_chunk_size.min(data.len());
let nc_level = min_size + (self.config.target_chunk_size - min_size) / 4;
let mut hash: u64 = 0;
const PRIME: u64 = 0x01000193; let mask_s = self.config.hash_mask as u64; let mask_l = (self.config.hash_mask >> 1) as u64;
for idx in min_size..max_scan {
let byte = data[idx];
hash = hash.wrapping_mul(PRIME) ^ (byte as u64);
let mask = if idx < nc_level { mask_s } else { mask_l };
if (hash & mask) == 0 {
return idx + 1;
}
}
max_scan
}
}
pub struct DedupBlockStore<S> {
inner: S,
config: ChunkingConfig,
chunk_index: Arc<DashMap<Cid, ChunkMeta>>,
manifests: Arc<DashMap<Cid, BlockManifest>>,
stats: Arc<RwLock<DedupStats>>,
}
impl<S: BlockStore> DedupBlockStore<S> {
pub fn new(inner: S, config: ChunkingConfig) -> Self {
Self {
inner,
config,
chunk_index: Arc::new(DashMap::new()),
manifests: Arc::new(DashMap::new()),
stats: Arc::new(RwLock::new(DedupStats::default())),
}
}
pub fn with_defaults(inner: S) -> Self {
Self::new(inner, ChunkingConfig::default())
}
pub fn stats(&self) -> DedupStats {
self.stats.read().clone()
}
pub fn into_inner(self) -> S {
self.inner
}
pub fn inner(&self) -> &S {
&self.inner
}
async fn store_chunk(&self, chunk_data: &[u8]) -> Result<Cid> {
let chunk_block = Block::new(bytes::Bytes::copy_from_slice(chunk_data))?;
let chunk_cid = *chunk_block.cid();
if let Some(mut meta) = self.chunk_index.get_mut(&chunk_cid) {
meta.ref_count += 1;
let mut stats = self.stats.write();
stats.duplicate_chunks_avoided += 1;
return Ok(meta.cid);
}
self.inner.put(&chunk_block).await?;
self.chunk_index.insert(
chunk_cid,
ChunkMeta {
cid: chunk_cid,
ref_count: 1,
size: chunk_data.len(),
},
);
let mut stats = self.stats.write();
stats.unique_chunks += 1;
stats.bytes_stored += chunk_data.len();
Ok(chunk_cid)
}
async fn reconstruct_block(&self, manifest: &BlockManifest) -> Result<Block> {
let mut data = Vec::with_capacity(manifest.original_size);
for chunk_cid in &manifest.chunks {
let chunk_block = self
.inner
.get(chunk_cid)
.await?
.ok_or_else(|| Error::BlockNotFound(chunk_cid.to_string()))?;
data.extend_from_slice(chunk_block.data());
}
Block::new(bytes::Bytes::from(data))
}
async fn decrement_chunk_refs(&self, chunk_cids: &[Cid]) -> Result<()> {
let mut to_delete = Vec::new();
for cid in chunk_cids {
let should_delete = {
if let Some(mut entry) = self.chunk_index.get_mut(cid) {
entry.ref_count = entry.ref_count.saturating_sub(1);
entry.ref_count == 0
} else {
false
}
};
if should_delete {
to_delete.push(*cid);
}
}
for cid in to_delete {
if let Some((_, meta)) = self.chunk_index.remove(&cid) {
self.inner.delete(&cid).await?;
let mut stats = self.stats.write();
stats.unique_chunks = stats.unique_chunks.saturating_sub(1);
stats.bytes_stored = stats.bytes_stored.saturating_sub(meta.size);
}
}
Ok(())
}
}
#[async_trait]
impl<S: BlockStore> BlockStore for DedupBlockStore<S> {
async fn put(&self, block: &Block) -> Result<()> {
let data = block.data();
let original_size = data.len();
let block_cid = *block.cid();
let is_new_block = !self.manifests.contains_key(&block_cid);
if !is_new_block {
return Ok(());
}
let chunker = Chunker::new(self.config.clone());
let chunks = chunker.chunk(data);
let mut chunk_cids = Vec::new();
for chunk in chunks {
let cid = self.store_chunk(&chunk).await?;
chunk_cids.push(cid);
}
let manifest = BlockManifest {
original_size,
chunks: chunk_cids,
};
self.manifests.insert(block_cid, manifest);
let mut stats = self.stats.write();
stats.blocks_stored += 1;
stats.bytes_original += original_size;
Ok(())
}
async fn get(&self, cid: &Cid) -> Result<Option<Block>> {
let manifest = match self.manifests.get(cid) {
Some(m) => m.clone(),
None => return Ok(None),
};
let block = self.reconstruct_block(&manifest).await?;
Ok(Some(block))
}
async fn has(&self, cid: &Cid) -> Result<bool> {
Ok(self.manifests.contains_key(cid))
}
async fn delete(&self, cid: &Cid) -> Result<()> {
let manifest = match self.manifests.remove(cid) {
Some((_, m)) => m,
None => return Ok(()),
};
self.decrement_chunk_refs(&manifest.chunks).await?;
let mut stats = self.stats.write();
stats.blocks_stored = stats.blocks_stored.saturating_sub(1);
stats.bytes_original = stats.bytes_original.saturating_sub(manifest.original_size);
Ok(())
}
fn list_cids(&self) -> Result<Vec<Cid>> {
let cids: Vec<Cid> = self.manifests.iter().map(|entry| *entry.key()).collect();
Ok(cids)
}
fn len(&self) -> usize {
self.manifests.len()
}
fn is_empty(&self) -> bool {
self.manifests.is_empty()
}
async fn flush(&self) -> Result<()> {
self.inner.flush().await
}
async fn close(&self) -> Result<()> {
self.inner.close().await
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::blockstore::{BlockStoreConfig, SledBlockStore};
use std::path::PathBuf;
#[test]
fn test_chunking_config() {
let config = ChunkingConfig::default();
assert_eq!(config.min_chunk_size, 256 * 1024);
assert_eq!(config.target_chunk_size, 1024 * 1024);
let small = ChunkingConfig::small();
assert!(small.min_chunk_size < config.min_chunk_size);
let large = ChunkingConfig::large();
assert!(large.min_chunk_size > config.min_chunk_size);
}
#[test]
fn test_chunker_basic() {
let config = ChunkingConfig {
min_chunk_size: 16 * 1024,
target_chunk_size: 64 * 1024,
max_chunk_size: 128 * 1024,
hash_mask: 0xFFF,
};
let chunker = Chunker::new(config.clone());
let small_data: Vec<u8> = (0..10240).map(|i| (i % 256) as u8).collect(); let chunks = chunker.chunk(&small_data);
assert_eq!(chunks.len(), 1, "10KB data should be 1 chunk (min is 16KB)");
assert_eq!(chunks[0].len(), 10240);
let small_data2: Vec<u8> = (0..10240).map(|i| (i % 256) as u8).collect(); let chunks2 = chunker.chunk(&small_data2);
assert_eq!(chunks2.len(), 1);
assert_eq!(
chunks[0], chunks2[0],
"Identical data should produce identical chunks"
);
let chunk_block1 = Block::new(bytes::Bytes::copy_from_slice(&chunks[0])).unwrap();
let chunk_block2 = Block::new(bytes::Bytes::copy_from_slice(&chunks2[0])).unwrap();
assert_eq!(
chunk_block1.cid(),
chunk_block2.cid(),
"Identical chunks should have same CID"
);
}
#[test]
fn test_dedup_stats() {
let stats = DedupStats {
blocks_stored: 0,
bytes_original: 1000,
bytes_stored: 600,
unique_chunks: 0,
duplicate_chunks_avoided: 0,
};
assert_eq!(stats.dedup_ratio(), 0.4); assert_eq!(stats.bytes_saved(), 400);
}
#[test]
fn test_chunker() {
let config = ChunkingConfig::small();
let chunker = Chunker::new(config.clone());
let small_data = vec![0u8; 32 * 1024]; let chunks = chunker.chunk(&small_data);
assert_eq!(chunks.len(), 1);
let mut large_data = Vec::new();
for i in 0..500 {
let block: Vec<u8> = (0..1024).map(|j| ((i * 1024 + j) % 256) as u8).collect();
large_data.extend_from_slice(&block);
}
let chunks = chunker.chunk(&large_data);
assert!(
chunks.len() > 1,
"Expected multiple chunks for 500KB of varied data"
);
for (i, chunk) in chunks.iter().enumerate() {
if i < chunks.len() - 1 {
assert!(
chunk.len() >= config.min_chunk_size,
"Chunk {} size {} < min {}",
i,
chunk.len(),
config.min_chunk_size
);
assert!(
chunk.len() <= config.max_chunk_size,
"Chunk {} size {} > max {}",
i,
chunk.len(),
config.max_chunk_size
);
}
}
}
#[tokio::test]
async fn test_dedup_blockstore_basic() {
let config = BlockStoreConfig {
path: PathBuf::from("/tmp/ipfrs-test-dedup-basic"),
cache_size: 1024 * 1024,
};
let _ = std::fs::remove_dir_all(&config.path);
let inner = SledBlockStore::new(config).unwrap();
let store = DedupBlockStore::with_defaults(inner);
let data = bytes::Bytes::from(vec![1u8; 100 * 1024]); let block = Block::new(data.clone()).unwrap();
store.put(&block).await.unwrap();
let retrieved = store.get(block.cid()).await.unwrap().unwrap();
assert_eq!(retrieved.data(), block.data());
let stats = store.stats();
assert_eq!(stats.blocks_stored, 1);
assert_eq!(stats.bytes_original, 100 * 1024);
}
#[tokio::test]
async fn test_dedup_duplicate_blocks() {
let config = BlockStoreConfig {
path: PathBuf::from("/tmp/ipfrs-test-dedup-duplicates"),
cache_size: 1024 * 1024,
};
let _ = std::fs::remove_dir_all(&config.path);
let inner = SledBlockStore::new(config).unwrap();
let chunk_config = ChunkingConfig {
min_chunk_size: 32 * 1024, target_chunk_size: 64 * 1024, max_chunk_size: 128 * 1024, hash_mask: 0x1FFF, };
let store = DedupBlockStore::new(inner, chunk_config);
let mut chunk_data = Vec::new();
for i in 0..40 {
let pattern: Vec<u8> = (0..1024).map(|j| ((i * 1024 + j) % 256) as u8).collect();
chunk_data.extend_from_slice(&pattern);
}
let block1 = Block::new(bytes::Bytes::from(chunk_data.clone())).unwrap();
let mut data2 = chunk_data.clone();
data2.extend_from_slice(&chunk_data); let block2 = Block::new(bytes::Bytes::from(data2)).unwrap();
store.put(&block1).await.unwrap();
let stats_after_first = store.stats();
let first_chunks = stats_after_first.unique_chunks;
assert!(first_chunks >= 1, "Expected at least 1 chunk");
store.put(&block2).await.unwrap();
let stats = store.stats();
assert_eq!(stats.blocks_stored, 2);
assert!(
stats.duplicate_chunks_avoided > 0,
"Expected some duplicate chunks to be avoided"
);
let retrieved1 = store.get(block1.cid()).await.unwrap().unwrap();
let retrieved2 = store.get(block2.cid()).await.unwrap().unwrap();
assert_eq!(retrieved1.data(), block1.data());
assert_eq!(retrieved2.data(), block2.data());
}
#[tokio::test]
async fn test_dedup_delete() {
let config = BlockStoreConfig {
path: PathBuf::from("/tmp/ipfrs-test-dedup-delete"),
cache_size: 1024 * 1024,
};
let _ = std::fs::remove_dir_all(&config.path);
let inner = SledBlockStore::new(config).unwrap();
let store = DedupBlockStore::with_defaults(inner);
let data = bytes::Bytes::from(vec![3u8; 200 * 1024]);
let block = Block::new(data).unwrap();
store.put(&block).await.unwrap();
let stats_before = store.stats();
assert_eq!(stats_before.blocks_stored, 1);
store.delete(block.cid()).await.unwrap();
let stats_after = store.stats();
assert_eq!(stats_after.blocks_stored, 0);
let retrieved = store.get(block.cid()).await.unwrap();
assert!(retrieved.is_none());
}
#[tokio::test]
async fn test_dedup_reference_counting() {
let config = BlockStoreConfig {
path: PathBuf::from("/tmp/ipfrs-test-dedup-refcount"),
cache_size: 1024 * 1024,
};
let _ = std::fs::remove_dir_all(&config.path);
let inner = SledBlockStore::new(config).unwrap();
let chunk_config = ChunkingConfig {
min_chunk_size: 16 * 1024,
target_chunk_size: 64 * 1024,
max_chunk_size: 128 * 1024,
hash_mask: 0xFFF,
};
let store = DedupBlockStore::new(inner, chunk_config);
let data1: Vec<u8> = (0..10240).map(|i| (i % 256) as u8).collect(); let data2 = data1.clone(); let data3: Vec<u8> = (0..10240).map(|i| ((i + 100) % 256) as u8).collect();
let block1 = Block::new(bytes::Bytes::from(data1)).unwrap();
let block2 = Block::new(bytes::Bytes::from(data2)).unwrap();
let block3 = Block::new(bytes::Bytes::from(data3)).unwrap();
assert_eq!(block1.cid(), block2.cid());
assert_ne!(block1.cid(), block3.cid());
store.put(&block1).await.unwrap();
let stats1 = store.stats();
assert_eq!(stats1.unique_chunks, 1, "block1 should be 1 chunk");
assert_eq!(stats1.blocks_stored, 1);
store.put(&block2).await.unwrap();
let stats2 = store.stats();
assert_eq!(
stats2.unique_chunks, 1,
"block2 is same as block1 (same CID)"
);
assert_eq!(stats2.blocks_stored, 1, "Still 1 block (same CID)");
assert_eq!(
stats2.duplicate_chunks_avoided, 0,
"No chunking happened for duplicate CID"
);
store.put(&block3).await.unwrap();
let stats3 = store.stats();
assert_eq!(stats3.unique_chunks, 2, "block3 adds a new unique chunk");
assert_eq!(stats3.blocks_stored, 2, "Now have 2 different blocks");
let retrieved1 = store.get(block1.cid()).await.unwrap().unwrap();
assert_eq!(retrieved1.data(), block1.data());
let retrieved3 = store.get(block3.cid()).await.unwrap().unwrap();
assert_eq!(retrieved3.data(), block3.data());
store.delete(block1.cid()).await.unwrap();
let stats_after_delete = store.stats();
assert_eq!(
stats_after_delete.unique_chunks, 1,
"Only block3's chunk remains"
);
assert_eq!(stats_after_delete.blocks_stored, 1);
store.delete(block3.cid()).await.unwrap();
let stats_final = store.stats();
assert_eq!(stats_final.unique_chunks, 0);
assert_eq!(stats_final.bytes_stored, 0);
assert_eq!(stats_final.blocks_stored, 0);
}
}