use crate::{
RepairingChunkSet,
chunk::{self, ProofCarryingChunk},
chunkset::{self, ChunkSet},
consts::DECDS_BINCODE_CONFIG,
errors::DecdsError,
merkle_tree::MerkleTree,
};
use blake3;
use rayon::prelude::*;
use serde::{Deserialize, Serialize};
use std::{collections::HashMap, ops::RangeBounds, usize};
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)]
pub struct BlobHeader {
byte_length: usize,
num_chunksets: usize,
digest: blake3::Hash,
root_commitment: blake3::Hash,
chunkset_root_commitments: Vec<blake3::Hash>,
}
impl BlobHeader {
pub fn get_blob_size(&self) -> usize {
self.byte_length
}
pub fn get_num_chunksets(&self) -> usize {
self.num_chunksets
}
pub fn get_num_chunks(&self) -> usize {
self.get_num_chunksets() * chunkset::ChunkSet::NUM_ERASURE_CODED_CHUNKS
}
pub fn get_blob_digest(&self) -> blake3::Hash {
self.digest
}
pub fn get_root_commitment(&self) -> blake3::Hash {
self.root_commitment
}
pub fn get_chunkset_commitment(&self, chunkset_id: usize) -> Result<blake3::Hash, DecdsError> {
self.chunkset_root_commitments
.get(chunkset_id)
.and_then(|&v| Some(v))
.ok_or(DecdsError::InvalidChunksetId(chunkset_id, self.get_num_chunksets()))
}
pub fn get_chunkset_size(&self, chunkset_id: usize) -> Result<usize, DecdsError> {
if chunkset_id < self.get_num_chunksets() {
let from = chunkset_id * ChunkSet::BYTE_LENGTH;
let to = (from + ChunkSet::BYTE_LENGTH).min(self.get_blob_size());
let effective_len = to - from;
Ok(effective_len)
} else {
Err(DecdsError::InvalidChunksetId(chunkset_id, self.get_num_chunksets()))
}
}
pub fn get_byte_range_for_chunkset(&self, chunkset_id: usize) -> Result<(usize, usize), DecdsError> {
if chunkset_id < self.get_num_chunksets() {
let from = chunkset_id * ChunkSet::BYTE_LENGTH;
let to = (from + ChunkSet::BYTE_LENGTH).min(self.get_blob_size());
Ok((from, to))
} else {
Err(DecdsError::InvalidChunksetId(chunkset_id, self.get_num_chunksets()))
}
}
pub fn get_chunkset_ids_for_byte_range(&self, byte_range: impl RangeBounds<usize>) -> Result<Vec<usize>, DecdsError> {
let start = match byte_range.start_bound() {
std::ops::Bound::Unbounded => 0,
std::ops::Bound::Included(&x) => x,
_ => return Err(DecdsError::InvalidStartBound),
};
let end = match byte_range.end_bound() {
std::ops::Bound::Included(&x) => x,
std::ops::Bound::Excluded(&x) => {
if x == 0 {
return Err(DecdsError::InvalidEndBound(x));
}
x - 1
}
_ => return Err(DecdsError::InvalidEndBound(usize::MAX)),
};
let start_chunkset_id = start / ChunkSet::BYTE_LENGTH;
let end_chunkset_id = end / ChunkSet::BYTE_LENGTH;
if end_chunkset_id >= self.get_num_chunksets() {
return Err(DecdsError::InvalidChunksetId(end_chunkset_id, self.get_num_chunksets()));
}
Ok((start_chunkset_id..=end_chunkset_id).collect())
}
pub fn to_bytes(&self) -> Result<Vec<u8>, DecdsError> {
bincode::serde::encode_to_vec(self, DECDS_BINCODE_CONFIG).map_err(|err| DecdsError::BlobHeaderSerializationFailed(err.to_string()))
}
pub fn from_bytes(bytes: &[u8]) -> Result<(Self, usize), DecdsError> {
match bincode::serde::decode_from_slice::<BlobHeader, bincode::config::Configuration>(bytes, DECDS_BINCODE_CONFIG) {
Ok((header, n)) => {
if header.num_chunksets != header.chunkset_root_commitments.len() {
return Err(DecdsError::BlobHeaderDeserializationFailed(
"number of chunksets and root commitments do not match".to_string(),
));
}
Ok((header, n))
}
Err(err) => Err(DecdsError::BlobHeaderDeserializationFailed(err.to_string())),
}
}
pub fn validate_chunk(&self, chunk: &chunk::ProofCarryingChunk) -> bool {
chunk.validate_inclusion_in_blob(self.root_commitment)
&& (chunk.get_chunkset_id() < self.num_chunksets)
&& chunk.validate_inclusion_in_chunkset(self.chunkset_root_commitments[chunk.get_chunkset_id()])
}
}
pub struct BlobBuilder {
hasher: blake3::Hasher,
num_bytes_absorbed: usize,
num_chunksets: usize,
offset: usize,
buffer: Vec<u8>,
chunkset_root_commitments: Vec<blake3::Hash>,
}
impl BlobBuilder {
pub fn init() -> Self {
BlobBuilder {
hasher: blake3::Hasher::new(),
num_bytes_absorbed: 0,
num_chunksets: 0,
offset: 0,
buffer: vec![0u8; ChunkSet::BYTE_LENGTH],
chunkset_root_commitments: vec![],
}
}
pub fn num_bytes_absorbed_so_far(&self) -> usize {
self.num_bytes_absorbed
}
pub fn update(&mut self, data: &[u8]) -> Option<Vec<ProofCarryingChunk>> {
if data.is_empty() {
return None;
}
self.hasher.update(data);
self.num_bytes_absorbed += data.len();
let total_num_bytes = self.offset + data.len();
let num_chunksets = total_num_bytes / ChunkSet::BYTE_LENGTH;
if num_chunksets == 0 {
self.buffer[self.offset..total_num_bytes].copy_from_slice(data);
self.offset = total_num_bytes;
return None;
} else {
let remaining_num_bytes = total_num_bytes - num_chunksets * ChunkSet::BYTE_LENGTH;
let dont_use_from_idx = data.len() - remaining_num_bytes;
let mut chunks = Vec::with_capacity(num_chunksets * ChunkSet::NUM_ERASURE_CODED_CHUNKS);
if num_chunksets == 1 {
self.buffer[self.offset..].copy_from_slice(&data[..dont_use_from_idx]);
let chunkset_id = self.num_chunksets;
let owned_buffer = std::mem::replace(&mut self.buffer, vec![0u8; ChunkSet::BYTE_LENGTH]);
let chunkset = unsafe { chunkset::ChunkSet::new(chunkset_id, owned_buffer).unwrap_unchecked() };
chunks.extend((0..ChunkSet::NUM_ERASURE_CODED_CHUNKS).map(|chunk_id| unsafe { chunkset.get_chunk(chunk_id).unwrap_unchecked().clone() }));
self.chunkset_root_commitments.push(chunkset.get_root_commitment());
self.num_chunksets += 1;
} else {
let mut working_mem = vec![0u8; num_chunksets * ChunkSet::BYTE_LENGTH];
working_mem[..self.offset].copy_from_slice(&self.buffer[..self.offset]);
working_mem[self.offset..].copy_from_slice(&data[..dont_use_from_idx]);
let mut chunkset_root_commitments = Vec::with_capacity(num_chunksets);
let mut nested_chunks: Vec<Vec<ProofCarryingChunk>> = Vec::with_capacity(num_chunksets);
working_mem
.par_chunks_exact(ChunkSet::BYTE_LENGTH)
.enumerate()
.map(|(data_chunk_idx, data_chunk)| {
let chunkset_id = self.num_chunksets + data_chunk_idx;
let chunkset = unsafe { chunkset::ChunkSet::new(chunkset_id, data_chunk.to_vec()).unwrap_unchecked() };
(
chunkset.get_root_commitment(),
(0..ChunkSet::NUM_ERASURE_CODED_CHUNKS)
.map(|chunk_id| unsafe { chunkset.get_chunk(chunk_id).unwrap_unchecked().clone() })
.collect(),
)
})
.unzip_into_vecs(&mut chunkset_root_commitments, &mut nested_chunks);
self.chunkset_root_commitments.append(&mut chunkset_root_commitments);
chunks.extend(nested_chunks.into_iter().flatten());
self.num_chunksets += num_chunksets;
}
if remaining_num_bytes > 0 {
self.buffer[..remaining_num_bytes].copy_from_slice(&data[dont_use_from_idx..]);
self.offset = remaining_num_bytes;
}
Some(chunks)
}
}
pub fn finalize(mut self) -> Result<(Vec<ProofCarryingChunk>, BlobHeader), DecdsError> {
if self.num_bytes_absorbed == 0 {
return Err(DecdsError::EmptyDataForBlob);
}
let chunks = if self.offset != 0 {
self.buffer[self.offset..].fill(0);
let chunkset_id = self.num_chunksets;
let chunkset = unsafe { chunkset::ChunkSet::new(chunkset_id, self.buffer).unwrap_unchecked() };
self.chunkset_root_commitments.push(chunkset.get_root_commitment());
self.num_chunksets += 1;
(0..ChunkSet::NUM_ERASURE_CODED_CHUNKS)
.map(|chunk_id| unsafe { chunkset.get_chunk(chunk_id).unwrap_unchecked().clone() })
.collect()
} else {
Vec::new()
};
let blob_digest = self.hasher.finalize();
let merkle_tree = MerkleTree::new(self.chunkset_root_commitments.clone())?;
let blob_root_commitment = merkle_tree.get_root_commitment();
Ok((
chunks,
BlobHeader {
byte_length: self.num_bytes_absorbed,
num_chunksets: self.num_chunksets,
digest: blob_digest,
root_commitment: blob_root_commitment,
chunkset_root_commitments: self.chunkset_root_commitments,
},
))
}
}
pub struct RepairingBlob {
header: BlobHeader,
body: HashMap<usize, Option<chunkset::RepairingChunkSet>>,
}
impl RepairingBlob {
pub fn new(header: BlobHeader) -> Self {
RepairingBlob {
body: HashMap::from_iter((0..header.get_num_chunksets()).map(|chunkset_id| {
(
chunkset_id,
Some(RepairingChunkSet::new(chunkset_id, unsafe {
header.get_chunkset_commitment(chunkset_id).unwrap_unchecked()
})),
)
})),
header: header,
}
}
pub fn add_chunk(&mut self, chunk: &chunk::ProofCarryingChunk) -> Result<(), DecdsError> {
let chunkset_id = chunk.get_chunkset_id();
match self
.body
.get_mut(&chunkset_id)
.ok_or(DecdsError::InvalidChunksetId(chunkset_id, self.header.get_num_chunksets()))?
{
Some(chunkset) => {
if self.header.validate_chunk(chunk) {
if !chunkset.is_ready_to_repair() {
chunkset.add_chunk_unvalidated(chunk)
} else {
Err(DecdsError::ChunksetReadyToRepair(chunkset_id))
}
} else {
Err(DecdsError::InvalidProofInChunk(chunkset_id))
}
}
None => Err(DecdsError::ChunksetAlreadyRepaired(chunkset_id)),
}
}
pub fn is_chunkset_ready_to_repair(&self, chunkset_id: usize) -> Result<bool, DecdsError> {
Ok(self
.body
.get(&chunkset_id)
.ok_or(DecdsError::InvalidChunksetId(chunkset_id, self.header.get_num_chunksets()))?
.as_ref()
.is_some_and(|x| x.is_ready_to_repair()))
}
pub fn is_chunkset_already_repaired(&self, chunkset_id: usize) -> Result<bool, DecdsError> {
Ok(self
.body
.get(&chunkset_id)
.ok_or(DecdsError::InvalidChunksetId(chunkset_id, self.header.get_num_chunksets()))?
.is_none())
}
pub fn get_repaired_chunkset(&mut self, chunkset_id: usize) -> Result<Vec<u8>, DecdsError> {
self.is_chunkset_already_repaired(chunkset_id).and_then(|yes| {
if yes {
Err(DecdsError::ChunksetAlreadyRepaired(chunkset_id))
} else {
self.is_chunkset_ready_to_repair(chunkset_id).and_then(|yes| unsafe {
if yes {
self.body
.insert(chunkset_id, None)
.unwrap_unchecked()
.unwrap_unchecked()
.repair()
.map(|mut repaired| {
repaired.truncate(self.header.get_chunkset_size(chunkset_id).unwrap_unchecked());
repaired
})
} else {
Err(DecdsError::ChunksetNotYetReadyToRepair(chunkset_id))
}
})
}
})
}
}
#[cfg(test)]
mod tests {
use crate::{BlobHeader, RepairingBlob, blob::BlobBuilder, chunkset::ChunkSet, errors::DecdsError, merkle_tree::MerkleTree};
use blake3;
use rand::Rng;
use rayon::prelude::*;
use std::collections::HashMap;
#[test]
fn prop_test_blob_preparation_and_commitment_works() {
const NUM_TEST_ITERATIONS: usize = 10;
const MIN_BLOB_DATA_BYTE_LEN: usize = 1usize;
const MAX_BLOB_DATA_BYTE_LEN: usize = 1usize << 30;
let mut rng = rand::rng();
(0..NUM_TEST_ITERATIONS).for_each(|_| {
let blob_byte_len = rng.random_range(MIN_BLOB_DATA_BYTE_LEN..=MAX_BLOB_DATA_BYTE_LEN);
let blob_data = (0..blob_byte_len).map(|_| rng.random()).collect::<Vec<u8>>();
let (mut chunks, blob_header) = {
let mut all_chunks = Vec::new();
let mut blob_builder = BlobBuilder::init();
if let Some(chunks) = blob_builder.update(&blob_data) {
all_chunks.extend(chunks);
}
let (chunks, blob_header) = blob_builder.finalize().expect("Must be able to prepare blob");
all_chunks.extend(chunks);
(all_chunks, blob_header)
};
let chunkset_root_commitments = (0..blob_header.get_num_chunksets())
.map(|chunkset_id| unsafe { blob_header.get_chunkset_commitment(chunkset_id).unwrap_unchecked() })
.collect();
let merkle_tree = MerkleTree::new(chunkset_root_commitments).expect("Must be able to build Merkle tree");
let merkle_proofs = (0..blob_header.get_num_chunksets())
.into_par_iter()
.map(|chunkset_id| unsafe { (chunkset_id, merkle_tree.generate_proof(chunkset_id).unwrap_unchecked()) })
.collect::<HashMap<usize, Vec<blake3::Hash>>>();
chunks.par_iter_mut().for_each(|chunk| {
chunk.append_proof_to_blob_root(&merkle_proofs[&chunk.get_chunkset_id()]);
});
assert!(chunks.iter().all(|chunk| { blob_header.validate_chunk(chunk) }));
});
}
#[test]
fn test_get_chunkset_commitment() {
let mut rng = rand::rng();
let blob_byte_len = ChunkSet::BYTE_LENGTH * 2 + ChunkSet::BYTE_LENGTH / 2;
let blob_data = (0..blob_byte_len).map(|_| rng.random()).collect::<Vec<u8>>();
let (_, header) = {
let mut all_chunks = Vec::new();
let mut blob_builder = BlobBuilder::init();
if let Some(chunks) = blob_builder.update(&blob_data) {
all_chunks.extend(chunks);
}
let (chunks, header) = blob_builder.finalize().expect("Must be able to prepare blob");
all_chunks.extend(chunks);
(all_chunks, header)
};
let commitment = header.get_chunkset_commitment(0);
assert!(commitment.is_ok());
let commitment = header.get_chunkset_commitment(1);
assert!(commitment.is_ok());
let err = header.get_chunkset_commitment(header.get_num_chunksets());
assert_eq!(err, Err(DecdsError::InvalidChunksetId(header.get_num_chunksets(), header.get_num_chunksets())));
}
#[test]
fn test_get_chunkset_size() {
let mut rng = rand::rng();
let blob_byte_len = ChunkSet::BYTE_LENGTH * 2 + ChunkSet::BYTE_LENGTH / 2;
let blob_data = (0..blob_byte_len).map(|_| rng.random()).collect::<Vec<u8>>();
let (_, header) = {
let mut all_chunks = Vec::new();
let mut blob_builder = BlobBuilder::init();
if let Some(chunks) = blob_builder.update(&blob_data) {
all_chunks.extend(chunks);
}
let (chunks, header) = blob_builder.finalize().expect("Must be able to prepare blob");
all_chunks.extend(chunks);
(all_chunks, header)
};
assert_eq!(header.get_chunkset_size(0).unwrap(), ChunkSet::BYTE_LENGTH);
assert_eq!(header.get_chunkset_size(1).unwrap(), ChunkSet::BYTE_LENGTH);
assert_eq!(header.get_chunkset_size(2).unwrap(), ChunkSet::BYTE_LENGTH / 2);
assert_eq!(
header.get_chunkset_size(header.get_num_chunksets()).unwrap_err(),
DecdsError::InvalidChunksetId(header.get_num_chunksets(), header.get_num_chunksets())
);
}
#[test]
fn test_get_byte_range_for_chunkset() {
let mut rng = rand::rng();
let blob_byte_len = ChunkSet::BYTE_LENGTH * 2 + ChunkSet::BYTE_LENGTH / 2;
let blob_data = (0..blob_byte_len).map(|_| rng.random()).collect::<Vec<u8>>();
let (_, header) = {
let mut all_chunks = Vec::new();
let mut blob_builder = BlobBuilder::init();
if let Some(chunks) = blob_builder.update(&blob_data) {
all_chunks.extend(chunks);
}
let (chunks, header) = blob_builder.finalize().expect("Must be able to prepare blob");
all_chunks.extend(chunks);
(all_chunks, header)
};
assert_eq!(header.get_byte_range_for_chunkset(0).unwrap(), (0, ChunkSet::BYTE_LENGTH));
assert_eq!(
header.get_byte_range_for_chunkset(1).unwrap(),
(ChunkSet::BYTE_LENGTH, ChunkSet::BYTE_LENGTH * 2)
);
assert_eq!(header.get_byte_range_for_chunkset(2).unwrap(), (ChunkSet::BYTE_LENGTH * 2, blob_byte_len));
assert_eq!(
header.get_byte_range_for_chunkset(header.get_num_chunksets()).unwrap_err(),
DecdsError::InvalidChunksetId(header.get_num_chunksets(), header.get_num_chunksets())
);
}
#[test]
fn test_get_chunkset_ids_for_byte_range() {
let mut rng = rand::rng();
let blob_byte_len = ChunkSet::BYTE_LENGTH * 2 + ChunkSet::BYTE_LENGTH / 2;
let blob_data = (0..blob_byte_len).map(|_| rng.random()).collect::<Vec<u8>>();
let (_, header) = {
let mut all_chunks = Vec::new();
let mut blob_builder = BlobBuilder::init();
if let Some(chunks) = blob_builder.update(&blob_data) {
all_chunks.extend(chunks);
}
let (chunks, header) = blob_builder.finalize().expect("Must be able to prepare blob");
all_chunks.extend(chunks);
(all_chunks, header)
};
assert_eq!(header.get_chunkset_ids_for_byte_range(0..10).unwrap(), vec![0]);
assert_eq!(
header
.get_chunkset_ids_for_byte_range(ChunkSet::BYTE_LENGTH + 10..ChunkSet::BYTE_LENGTH + 20)
.unwrap(),
vec![1]
);
assert_eq!(
header.get_chunkset_ids_for_byte_range(10..(ChunkSet::BYTE_LENGTH * 1 + 10)).unwrap(),
vec![0, 1]
);
assert_eq!(header.get_chunkset_ids_for_byte_range(10..blob_byte_len).unwrap(), vec![0, 1, 2]);
assert_eq!(header.get_chunkset_ids_for_byte_range(0..ChunkSet::BYTE_LENGTH).unwrap(), vec![0]);
assert_eq!(header.get_chunkset_ids_for_byte_range(0..=(ChunkSet::BYTE_LENGTH - 1)).unwrap(), vec![0]);
assert_eq!(header.get_chunkset_ids_for_byte_range(0..0).unwrap_err(), DecdsError::InvalidEndBound(0));
assert_eq!(header.get_chunkset_ids_for_byte_range(0..=0).unwrap(), vec![0]);
let end_beyond_blob = header.get_blob_size() + ChunkSet::BYTE_LENGTH;
let expected_end_chunkset_id = end_beyond_blob.saturating_sub(1) / ChunkSet::BYTE_LENGTH;
assert_eq!(
header.get_chunkset_ids_for_byte_range(0..end_beyond_blob).unwrap_err(),
DecdsError::InvalidChunksetId(expected_end_chunkset_id, header.get_num_chunksets())
);
assert_eq!(header.get_chunkset_ids_for_byte_range(..).unwrap_err(), DecdsError::InvalidEndBound(usize::MAX));
assert_eq!(
header.get_chunkset_ids_for_byte_range(0..).unwrap_err(),
DecdsError::InvalidEndBound(usize::MAX)
);
}
#[test]
fn test_blob_header_serialization_deserialization() {
let mut rng = rand::rng();
let blob_byte_len = ChunkSet::BYTE_LENGTH * 3;
let blob_data = (0..blob_byte_len).map(|_| rng.random()).collect::<Vec<u8>>();
let (_, original_header) = {
let mut all_chunks = Vec::new();
let mut blob_builder = BlobBuilder::init();
if let Some(chunks) = blob_builder.update(&blob_data) {
all_chunks.extend(chunks);
}
let (chunks, header) = blob_builder.finalize().expect("Must be able to prepare blob");
all_chunks.extend(chunks);
(all_chunks, header)
};
let serialized_header = original_header.to_bytes().expect("Header serialization failed");
let (deserialized_header, bytes_read) = BlobHeader::from_bytes(&serialized_header).expect("Header deserialization failed");
assert_eq!(original_header, deserialized_header);
assert_eq!(serialized_header.len(), bytes_read);
assert!(BlobHeader::from_bytes(&serialized_header[..(serialized_header.len() / 2)]).is_err());
}
#[test]
fn test_blob_new_empty_data() {
assert_eq!(BlobBuilder::init().finalize().err(), Some(DecdsError::EmptyDataForBlob));
}
#[test]
fn test_repairing_blob_new() {
let mut rng = rand::rng();
let blob_byte_len = ChunkSet::BYTE_LENGTH * 2 + ChunkSet::BYTE_LENGTH / 2;
let blob_data: Vec<u8> = (0..blob_byte_len).map(|_| rng.random()).collect();
let (_, header) = {
let mut all_chunks = Vec::new();
let mut blob_builder = BlobBuilder::init();
if let Some(chunks) = blob_builder.update(&blob_data) {
all_chunks.extend(chunks);
}
let (chunks, header) = blob_builder.finalize().expect("Must be able to prepare blob");
all_chunks.extend(chunks);
(all_chunks, header)
};
let repairer = RepairingBlob::new(header.clone());
assert_eq!(repairer.header.get_blob_size(), header.get_blob_size());
assert_eq!(repairer.header.get_num_chunksets(), header.get_num_chunksets());
assert_eq!(repairer.body.len(), header.get_num_chunksets());
for i in 0..header.get_num_chunksets() {
assert!(repairer.body.get(&i).unwrap().is_some());
assert!(!repairer.is_chunkset_ready_to_repair(i).unwrap());
assert!(!repairer.is_chunkset_already_repaired(i).unwrap());
}
}
#[test]
fn test_repairing_blob_add_chunk() {
let mut rng = rand::rng();
let blob_data: Vec<u8> = (0..ChunkSet::BYTE_LENGTH * 2).map(|_| rng.random()).collect();
let (mut chunks, blob_header) = {
let mut all_chunks = Vec::new();
let mut blob_builder = BlobBuilder::init();
if let Some(chunks) = blob_builder.update(&blob_data) {
all_chunks.extend(chunks);
}
let (chunks, header) = blob_builder.finalize().expect("Must be able to prepare blob");
all_chunks.extend(chunks);
(all_chunks, header)
};
let chunkset_root_commitments = (0..blob_header.get_num_chunksets())
.map(|chunkset_id| unsafe { blob_header.get_chunkset_commitment(chunkset_id).unwrap_unchecked() })
.collect();
let merkle_tree = MerkleTree::new(chunkset_root_commitments).expect("Must be able to build Merkle tree");
let merkle_proofs = (0..blob_header.get_num_chunksets())
.into_par_iter()
.map(|chunkset_id| unsafe { (chunkset_id, merkle_tree.generate_proof(chunkset_id).unwrap_unchecked()) })
.collect::<HashMap<usize, Vec<blake3::Hash>>>();
chunks.par_iter_mut().for_each(|chunk| {
chunk.append_proof_to_blob_root(&merkle_proofs[&chunk.get_chunkset_id()]);
});
let mut repairer = RepairingBlob::new(blob_header.clone());
let chunk_to_add = &chunks[0];
assert!(repairer.add_chunk(chunk_to_add).is_ok());
let mut invalid_header = blob_header.clone();
invalid_header.root_commitment = blake3::hash(b"fake_root_commitment");
let mut repairer_invalid_header = RepairingBlob::new(invalid_header);
assert_eq!(
repairer_invalid_header.add_chunk(chunk_to_add).unwrap_err(),
DecdsError::InvalidProofInChunk(chunk_to_add.get_chunkset_id())
);
let mut repairer_ready = RepairingBlob::new(blob_header.clone());
let chunkset_id = chunks[0].get_chunkset_id();
for chunk in &chunks {
if chunk.get_chunkset_id() == chunkset_id {
let _ = repairer_ready.add_chunk(chunk);
if repairer_ready.is_chunkset_ready_to_repair(chunkset_id).unwrap() {
break;
}
}
}
assert!(repairer_ready.is_chunkset_ready_to_repair(chunkset_id).unwrap());
let extra_chunk = &chunks
.iter()
.find(|c| c.get_chunkset_id() == chunkset_id && c.get_global_chunk_id() != chunks[0].get_global_chunk_id())
.unwrap();
assert_eq!(
repairer_ready.add_chunk(extra_chunk).unwrap_err(),
DecdsError::ChunksetReadyToRepair(chunkset_id)
);
repairer_ready.get_repaired_chunkset(chunkset_id).unwrap();
assert!(!repairer_ready.is_chunkset_ready_to_repair(chunkset_id).unwrap());
assert!(repairer_ready.is_chunkset_already_repaired(chunkset_id).unwrap());
assert_eq!(
repairer_ready.add_chunk(chunk_to_add).unwrap_err(),
DecdsError::ChunksetAlreadyRepaired(chunkset_id)
);
}
#[test]
fn test_repairing_blob_get_repaired_chunkset() {
let mut rng = rand::rng();
let blob_data: Vec<u8> = (0..(ChunkSet::BYTE_LENGTH * 2 + ChunkSet::BYTE_LENGTH / 2)).map(|_| rng.random()).collect();
let original_blob_data_copy = blob_data.clone();
let (mut chunks, blob_header) = {
let mut all_chunks = Vec::new();
let mut blob_builder = BlobBuilder::init();
if let Some(chunks) = blob_builder.update(&blob_data) {
all_chunks.extend(chunks);
}
let (chunks, header) = blob_builder.finalize().expect("Must be able to prepare blob");
all_chunks.extend(chunks);
(all_chunks, header)
};
let chunkset_root_commitments = (0..blob_header.get_num_chunksets())
.map(|chunkset_id| unsafe { blob_header.get_chunkset_commitment(chunkset_id).unwrap_unchecked() })
.collect();
let merkle_tree = MerkleTree::new(chunkset_root_commitments).expect("Must be able to build Merkle tree");
let merkle_proofs = (0..blob_header.get_num_chunksets())
.into_par_iter()
.map(|chunkset_id| unsafe { (chunkset_id, merkle_tree.generate_proof(chunkset_id).unwrap_unchecked()) })
.collect::<HashMap<usize, Vec<blake3::Hash>>>();
chunks.par_iter_mut().for_each(|chunk| {
chunk.append_proof_to_blob_root(&merkle_proofs[&chunk.get_chunkset_id()]);
});
let mut repairer = RepairingBlob::new(blob_header.clone());
let chunkset_id_0 = 0;
assert_eq!(
repairer.get_repaired_chunkset(chunkset_id_0).unwrap_err(),
DecdsError::ChunksetNotYetReadyToRepair(chunkset_id_0)
);
for chunk in &chunks {
if chunk.get_chunkset_id() == chunkset_id_0 {
let _ = repairer.add_chunk(chunk);
if repairer.is_chunkset_ready_to_repair(chunkset_id_0).unwrap() {
break;
}
}
}
assert!(repairer.is_chunkset_ready_to_repair(chunkset_id_0).unwrap());
let repaired_data_0 = repairer.get_repaired_chunkset(chunkset_id_0).unwrap();
let expected_data_0 = original_blob_data_copy[0..ChunkSet::BYTE_LENGTH].to_vec();
assert_eq!(repaired_data_0, expected_data_0);
assert!(repairer.is_chunkset_already_repaired(chunkset_id_0).unwrap());
assert_eq!(
repairer.get_repaired_chunkset(chunkset_id_0).unwrap_err(),
DecdsError::ChunksetAlreadyRepaired(chunkset_id_0)
);
let chunkset_id_2 = 2;
for chunk in &chunks {
if chunk.get_chunkset_id() == chunkset_id_2 {
let _ = repairer.add_chunk(chunk);
if repairer.is_chunkset_ready_to_repair(chunkset_id_2).unwrap() {
break;
}
}
}
assert!(repairer.is_chunkset_ready_to_repair(chunkset_id_2).unwrap());
let repaired_data_2 = repairer.get_repaired_chunkset(chunkset_id_2).unwrap();
let expected_data_2 = original_blob_data_copy[ChunkSet::BYTE_LENGTH * 2..].to_vec();
assert_eq!(repaired_data_2, expected_data_2);
let invalid_chunkset_id = blob_header.get_num_chunksets();
assert_eq!(
repairer.get_repaired_chunkset(invalid_chunkset_id).unwrap_err(),
DecdsError::InvalidChunksetId(invalid_chunkset_id, blob_header.get_num_chunksets())
);
}
}