1use crate::{
2 RepairingChunkSet,
3 chunk::{self, ProofCarryingChunk},
4 chunkset::{self, ChunkSet},
5 consts::DECDS_BINCODE_CONFIG,
6 errors::DecdsError,
7 merkle_tree::MerkleTree,
8};
9use blake3;
10use rayon::prelude::*;
11use serde::{Deserialize, Serialize};
12use std::{collections::HashMap, ops::RangeBounds, usize};
13
14#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)]
18pub struct BlobHeader {
19 byte_length: usize,
20 num_chunksets: usize,
21 digest: blake3::Hash,
22 root_commitment: blake3::Hash,
23 chunkset_root_commitments: Vec<blake3::Hash>,
24}
25
26impl BlobHeader {
27 pub fn get_blob_size(&self) -> usize {
29 self.byte_length
30 }
31
32 pub fn get_num_chunksets(&self) -> usize {
34 self.num_chunksets
35 }
36
37 pub fn get_num_chunks(&self) -> usize {
39 self.get_num_chunksets() * chunkset::ChunkSet::NUM_ERASURE_CODED_CHUNKS
40 }
41
42 pub fn get_blob_digest(&self) -> blake3::Hash {
44 self.digest
45 }
46
47 pub fn get_root_commitment(&self) -> blake3::Hash {
51 self.root_commitment
52 }
53
54 pub fn get_chunkset_commitment(&self, chunkset_id: usize) -> Result<blake3::Hash, DecdsError> {
66 self.chunkset_root_commitments
67 .get(chunkset_id)
68 .and_then(|&v| Some(v))
69 .ok_or(DecdsError::InvalidChunksetId(chunkset_id, self.get_num_chunksets()))
70 }
71
72 pub fn get_chunkset_size(&self, chunkset_id: usize) -> Result<usize, DecdsError> {
85 if chunkset_id < self.get_num_chunksets() {
86 let from = chunkset_id * ChunkSet::BYTE_LENGTH;
87 let to = (from + ChunkSet::BYTE_LENGTH).min(self.get_blob_size());
88 let effective_len = to - from;
89
90 Ok(effective_len)
91 } else {
92 Err(DecdsError::InvalidChunksetId(chunkset_id, self.get_num_chunksets()))
93 }
94 }
95
96 pub fn get_byte_range_for_chunkset(&self, chunkset_id: usize) -> Result<(usize, usize), DecdsError> {
109 if chunkset_id < self.get_num_chunksets() {
110 let from = chunkset_id * ChunkSet::BYTE_LENGTH;
111 let to = (from + ChunkSet::BYTE_LENGTH).min(self.get_blob_size());
112
113 Ok((from, to))
114 } else {
115 Err(DecdsError::InvalidChunksetId(chunkset_id, self.get_num_chunksets()))
116 }
117 }
118
119 pub fn get_chunkset_ids_for_byte_range(&self, byte_range: impl RangeBounds<usize>) -> Result<Vec<usize>, DecdsError> {
133 let start = match byte_range.start_bound() {
134 std::ops::Bound::Unbounded => 0,
135 std::ops::Bound::Included(&x) => x,
136 _ => return Err(DecdsError::InvalidStartBound),
137 };
138
139 let end = match byte_range.end_bound() {
140 std::ops::Bound::Included(&x) => x,
141 std::ops::Bound::Excluded(&x) => {
142 if x == 0 {
143 return Err(DecdsError::InvalidEndBound(x));
144 }
145
146 x - 1
147 }
148 _ => return Err(DecdsError::InvalidEndBound(usize::MAX)),
149 };
150
151 let start_chunkset_id = start / ChunkSet::BYTE_LENGTH;
152 let end_chunkset_id = end / ChunkSet::BYTE_LENGTH;
153
154 if end_chunkset_id >= self.get_num_chunksets() {
155 return Err(DecdsError::InvalidChunksetId(end_chunkset_id, self.get_num_chunksets()));
156 }
157
158 Ok((start_chunkset_id..=end_chunkset_id).collect())
159 }
160
161 pub fn to_bytes(&self) -> Result<Vec<u8>, DecdsError> {
169 bincode::serde::encode_to_vec(self, DECDS_BINCODE_CONFIG).map_err(|err| DecdsError::BlobHeaderSerializationFailed(err.to_string()))
170 }
171
172 pub fn from_bytes(bytes: &[u8]) -> Result<(Self, usize), DecdsError> {
185 match bincode::serde::decode_from_slice::<BlobHeader, bincode::config::Configuration>(bytes, DECDS_BINCODE_CONFIG) {
186 Ok((header, n)) => {
187 if header.num_chunksets != header.chunkset_root_commitments.len() {
188 return Err(DecdsError::BlobHeaderDeserializationFailed(
189 "number of chunksets and root commitments do not match".to_string(),
190 ));
191 }
192
193 Ok((header, n))
194 }
195 Err(err) => Err(DecdsError::BlobHeaderDeserializationFailed(err.to_string())),
196 }
197 }
198
199 pub fn validate_chunk(&self, chunk: &chunk::ProofCarryingChunk) -> bool {
212 chunk.validate_inclusion_in_blob(self.root_commitment)
213 && (chunk.get_chunkset_id() < self.num_chunksets)
214 && chunk.validate_inclusion_in_chunkset(self.chunkset_root_commitments[chunk.get_chunkset_id()])
215 }
216}
217
218pub struct BlobBuilder {
223 hasher: blake3::Hasher,
224 num_bytes_absorbed: usize,
225 num_chunksets: usize,
226 offset: usize,
227 buffer: Vec<u8>,
228 chunkset_root_commitments: Vec<blake3::Hash>,
229}
230
231impl BlobBuilder {
232 pub fn init() -> Self {
234 BlobBuilder {
235 hasher: blake3::Hasher::new(),
236 num_bytes_absorbed: 0,
237 num_chunksets: 0,
238 offset: 0,
239 buffer: vec![0u8; ChunkSet::BYTE_LENGTH],
240 chunkset_root_commitments: vec![],
241 }
242 }
243
244 pub fn num_bytes_absorbed_so_far(&self) -> usize {
245 self.num_bytes_absorbed
246 }
247
248 pub fn update(&mut self, data: &[u8]) -> Option<Vec<ProofCarryingChunk>> {
268 if data.is_empty() {
269 return None;
270 }
271
272 self.hasher.update(data);
273 self.num_bytes_absorbed += data.len();
274
275 let total_num_bytes = self.offset + data.len();
276 let num_chunksets = total_num_bytes / ChunkSet::BYTE_LENGTH;
277
278 if num_chunksets == 0 {
279 self.buffer[self.offset..total_num_bytes].copy_from_slice(data);
280 self.offset = total_num_bytes;
281
282 return None;
283 } else {
284 let remaining_num_bytes = total_num_bytes - num_chunksets * ChunkSet::BYTE_LENGTH;
285 let dont_use_from_idx = data.len() - remaining_num_bytes;
286
287 let mut chunks = Vec::with_capacity(num_chunksets * ChunkSet::NUM_ERASURE_CODED_CHUNKS);
288
289 if num_chunksets == 1 {
290 self.buffer[self.offset..].copy_from_slice(&data[..dont_use_from_idx]);
291
292 let chunkset_id = self.num_chunksets;
293 let owned_buffer = std::mem::replace(&mut self.buffer, vec![0u8; ChunkSet::BYTE_LENGTH]);
294 let chunkset = unsafe { chunkset::ChunkSet::new(chunkset_id, owned_buffer).unwrap_unchecked() };
295
296 chunks.extend((0..ChunkSet::NUM_ERASURE_CODED_CHUNKS).map(|chunk_id| unsafe { chunkset.get_chunk(chunk_id).unwrap_unchecked().clone() }));
297 self.chunkset_root_commitments.push(chunkset.get_root_commitment());
298
299 self.num_chunksets += 1;
300 } else {
301 let mut working_mem = vec![0u8; num_chunksets * ChunkSet::BYTE_LENGTH];
302 working_mem[..self.offset].copy_from_slice(&self.buffer[..self.offset]);
303 working_mem[self.offset..].copy_from_slice(&data[..dont_use_from_idx]);
304
305 let mut chunkset_root_commitments = Vec::with_capacity(num_chunksets);
306 let mut nested_chunks: Vec<Vec<ProofCarryingChunk>> = Vec::with_capacity(num_chunksets);
307
308 working_mem
309 .par_chunks_exact(ChunkSet::BYTE_LENGTH)
310 .enumerate()
311 .map(|(data_chunk_idx, data_chunk)| {
312 let chunkset_id = self.num_chunksets + data_chunk_idx;
313 let chunkset = unsafe { chunkset::ChunkSet::new(chunkset_id, data_chunk.to_vec()).unwrap_unchecked() };
314
315 (
316 chunkset.get_root_commitment(),
317 (0..ChunkSet::NUM_ERASURE_CODED_CHUNKS)
318 .map(|chunk_id| unsafe { chunkset.get_chunk(chunk_id).unwrap_unchecked().clone() })
319 .collect(),
320 )
321 })
322 .unzip_into_vecs(&mut chunkset_root_commitments, &mut nested_chunks);
323
324 self.chunkset_root_commitments.append(&mut chunkset_root_commitments);
325 chunks.extend(nested_chunks.into_iter().flatten());
326
327 self.num_chunksets += num_chunksets;
328 }
329
330 if remaining_num_bytes > 0 {
331 self.buffer[..remaining_num_bytes].copy_from_slice(&data[dont_use_from_idx..]);
332 self.offset = remaining_num_bytes;
333 }
334
335 Some(chunks)
336 }
337 }
338
339 pub fn finalize(mut self) -> Result<(Vec<ProofCarryingChunk>, BlobHeader), DecdsError> {
355 if self.num_bytes_absorbed == 0 {
356 return Err(DecdsError::EmptyDataForBlob);
357 }
358
359 let chunks = if self.offset != 0 {
360 self.buffer[self.offset..].fill(0);
361
362 let chunkset_id = self.num_chunksets;
363 let chunkset = unsafe { chunkset::ChunkSet::new(chunkset_id, self.buffer).unwrap_unchecked() };
364
365 self.chunkset_root_commitments.push(chunkset.get_root_commitment());
366 self.num_chunksets += 1;
367
368 (0..ChunkSet::NUM_ERASURE_CODED_CHUNKS)
369 .map(|chunk_id| unsafe { chunkset.get_chunk(chunk_id).unwrap_unchecked().clone() })
370 .collect()
371 } else {
372 Vec::new()
373 };
374
375 let blob_digest = self.hasher.finalize();
376
377 let merkle_tree = MerkleTree::new(self.chunkset_root_commitments.clone())?;
378 let blob_root_commitment = merkle_tree.get_root_commitment();
379
380 Ok((
381 chunks,
382 BlobHeader {
383 byte_length: self.num_bytes_absorbed,
384 num_chunksets: self.num_chunksets,
385 digest: blob_digest,
386 root_commitment: blob_root_commitment,
387 chunkset_root_commitments: self.chunkset_root_commitments,
388 },
389 ))
390 }
391}
392
393pub struct RepairingBlob {
396 header: BlobHeader,
397 body: HashMap<usize, Option<chunkset::RepairingChunkSet>>,
398}
399
400impl RepairingBlob {
401 pub fn new(header: BlobHeader) -> Self {
415 RepairingBlob {
416 body: HashMap::from_iter((0..header.get_num_chunksets()).map(|chunkset_id| {
417 (
418 chunkset_id,
419 Some(RepairingChunkSet::new(chunkset_id, unsafe {
420 header.get_chunkset_commitment(chunkset_id).unwrap_unchecked()
421 })),
422 )
423 })),
424 header: header,
425 }
426 }
427
428 pub fn add_chunk(&mut self, chunk: &chunk::ProofCarryingChunk) -> Result<(), DecdsError> {
447 let chunkset_id = chunk.get_chunkset_id();
448
449 match self
450 .body
451 .get_mut(&chunkset_id)
452 .ok_or(DecdsError::InvalidChunksetId(chunkset_id, self.header.get_num_chunksets()))?
453 {
454 Some(chunkset) => {
455 if self.header.validate_chunk(chunk) {
456 if !chunkset.is_ready_to_repair() {
457 chunkset.add_chunk_unvalidated(chunk)
458 } else {
459 Err(DecdsError::ChunksetReadyToRepair(chunkset_id))
460 }
461 } else {
462 Err(DecdsError::InvalidProofInChunk(chunkset_id))
463 }
464 }
465 None => Err(DecdsError::ChunksetAlreadyRepaired(chunkset_id)),
466 }
467 }
468
469 pub fn is_chunkset_ready_to_repair(&self, chunkset_id: usize) -> Result<bool, DecdsError> {
481 Ok(self
482 .body
483 .get(&chunkset_id)
484 .ok_or(DecdsError::InvalidChunksetId(chunkset_id, self.header.get_num_chunksets()))?
485 .as_ref()
486 .is_some_and(|x| x.is_ready_to_repair()))
487 }
488
489 pub fn is_chunkset_already_repaired(&self, chunkset_id: usize) -> Result<bool, DecdsError> {
501 Ok(self
502 .body
503 .get(&chunkset_id)
504 .ok_or(DecdsError::InvalidChunksetId(chunkset_id, self.header.get_num_chunksets()))?
505 .is_none())
506 }
507
508 pub fn get_repaired_chunkset(&mut self, chunkset_id: usize) -> Result<Vec<u8>, DecdsError> {
525 self.is_chunkset_already_repaired(chunkset_id).and_then(|yes| {
526 if yes {
527 Err(DecdsError::ChunksetAlreadyRepaired(chunkset_id))
528 } else {
529 self.is_chunkset_ready_to_repair(chunkset_id).and_then(|yes| unsafe {
530 if yes {
531 self.body
532 .insert(chunkset_id, None)
533 .unwrap_unchecked()
534 .unwrap_unchecked()
535 .repair()
536 .map(|mut repaired| {
537 repaired.truncate(self.header.get_chunkset_size(chunkset_id).unwrap_unchecked());
538 repaired
539 })
540 } else {
541 Err(DecdsError::ChunksetNotYetReadyToRepair(chunkset_id))
542 }
543 })
544 }
545 })
546 }
547}
548
549#[cfg(test)]
550mod tests {
551 use crate::{BlobHeader, RepairingBlob, blob::BlobBuilder, chunkset::ChunkSet, errors::DecdsError, merkle_tree::MerkleTree};
552 use blake3;
553 use rand::Rng;
554 use rayon::prelude::*;
555 use std::collections::HashMap;
556
557 #[test]
558 fn prop_test_blob_preparation_and_commitment_works() {
559 const NUM_TEST_ITERATIONS: usize = 10;
560
561 const MIN_BLOB_DATA_BYTE_LEN: usize = 1usize;
562 const MAX_BLOB_DATA_BYTE_LEN: usize = 1usize << 30;
563
564 let mut rng = rand::rng();
565
566 (0..NUM_TEST_ITERATIONS).for_each(|_| {
567 let blob_byte_len = rng.random_range(MIN_BLOB_DATA_BYTE_LEN..=MAX_BLOB_DATA_BYTE_LEN);
568 let blob_data = (0..blob_byte_len).map(|_| rng.random()).collect::<Vec<u8>>();
569
570 let (mut chunks, blob_header) = {
571 let mut all_chunks = Vec::new();
572
573 let mut blob_builder = BlobBuilder::init();
574 if let Some(chunks) = blob_builder.update(&blob_data) {
575 all_chunks.extend(chunks);
576 }
577
578 let (chunks, blob_header) = blob_builder.finalize().expect("Must be able to prepare blob");
579 all_chunks.extend(chunks);
580
581 (all_chunks, blob_header)
582 };
583
584 let chunkset_root_commitments = (0..blob_header.get_num_chunksets())
585 .map(|chunkset_id| unsafe { blob_header.get_chunkset_commitment(chunkset_id).unwrap_unchecked() })
586 .collect();
587
588 let merkle_tree = MerkleTree::new(chunkset_root_commitments).expect("Must be able to build Merkle tree");
589 let merkle_proofs = (0..blob_header.get_num_chunksets())
590 .into_par_iter()
591 .map(|chunkset_id| unsafe { (chunkset_id, merkle_tree.generate_proof(chunkset_id).unwrap_unchecked()) })
592 .collect::<HashMap<usize, Vec<blake3::Hash>>>();
593
594 chunks.par_iter_mut().for_each(|chunk| {
595 chunk.append_proof_to_blob_root(&merkle_proofs[&chunk.get_chunkset_id()]);
596 });
597
598 assert!(chunks.iter().all(|chunk| { blob_header.validate_chunk(chunk) }));
599 });
600 }
601
602 #[test]
603 fn test_get_chunkset_commitment() {
604 let mut rng = rand::rng();
605
606 let blob_byte_len = ChunkSet::BYTE_LENGTH * 2 + ChunkSet::BYTE_LENGTH / 2;
607 let blob_data = (0..blob_byte_len).map(|_| rng.random()).collect::<Vec<u8>>();
608
609 let (_, header) = {
610 let mut all_chunks = Vec::new();
611
612 let mut blob_builder = BlobBuilder::init();
613 if let Some(chunks) = blob_builder.update(&blob_data) {
614 all_chunks.extend(chunks);
615 }
616
617 let (chunks, header) = blob_builder.finalize().expect("Must be able to prepare blob");
618 all_chunks.extend(chunks);
619
620 (all_chunks, header)
621 };
622
623 let commitment = header.get_chunkset_commitment(0);
625 assert!(commitment.is_ok());
626
627 let commitment = header.get_chunkset_commitment(1);
628 assert!(commitment.is_ok());
629
630 let err = header.get_chunkset_commitment(header.get_num_chunksets());
632 assert_eq!(err, Err(DecdsError::InvalidChunksetId(header.get_num_chunksets(), header.get_num_chunksets())));
633 }
634
635 #[test]
636 fn test_get_chunkset_size() {
637 let mut rng = rand::rng();
638
639 let blob_byte_len = ChunkSet::BYTE_LENGTH * 2 + ChunkSet::BYTE_LENGTH / 2;
641 let blob_data = (0..blob_byte_len).map(|_| rng.random()).collect::<Vec<u8>>();
642
643 let (_, header) = {
644 let mut all_chunks = Vec::new();
645
646 let mut blob_builder = BlobBuilder::init();
647 if let Some(chunks) = blob_builder.update(&blob_data) {
648 all_chunks.extend(chunks);
649 }
650
651 let (chunks, header) = blob_builder.finalize().expect("Must be able to prepare blob");
652 all_chunks.extend(chunks);
653
654 (all_chunks, header)
655 };
656
657 assert_eq!(header.get_chunkset_size(0).unwrap(), ChunkSet::BYTE_LENGTH);
659 assert_eq!(header.get_chunkset_size(1).unwrap(), ChunkSet::BYTE_LENGTH);
660
661 assert_eq!(header.get_chunkset_size(2).unwrap(), ChunkSet::BYTE_LENGTH / 2);
663
664 assert_eq!(
666 header.get_chunkset_size(header.get_num_chunksets()).unwrap_err(),
667 DecdsError::InvalidChunksetId(header.get_num_chunksets(), header.get_num_chunksets())
668 );
669 }
670
671 #[test]
672 fn test_get_byte_range_for_chunkset() {
673 let mut rng = rand::rng();
674
675 let blob_byte_len = ChunkSet::BYTE_LENGTH * 2 + ChunkSet::BYTE_LENGTH / 2;
676 let blob_data = (0..blob_byte_len).map(|_| rng.random()).collect::<Vec<u8>>();
677
678 let (_, header) = {
679 let mut all_chunks = Vec::new();
680
681 let mut blob_builder = BlobBuilder::init();
682 if let Some(chunks) = blob_builder.update(&blob_data) {
683 all_chunks.extend(chunks);
684 }
685
686 let (chunks, header) = blob_builder.finalize().expect("Must be able to prepare blob");
687 all_chunks.extend(chunks);
688
689 (all_chunks, header)
690 };
691
692 assert_eq!(header.get_byte_range_for_chunkset(0).unwrap(), (0, ChunkSet::BYTE_LENGTH));
694
695 assert_eq!(
697 header.get_byte_range_for_chunkset(1).unwrap(),
698 (ChunkSet::BYTE_LENGTH, ChunkSet::BYTE_LENGTH * 2)
699 );
700
701 assert_eq!(header.get_byte_range_for_chunkset(2).unwrap(), (ChunkSet::BYTE_LENGTH * 2, blob_byte_len));
703
704 assert_eq!(
706 header.get_byte_range_for_chunkset(header.get_num_chunksets()).unwrap_err(),
707 DecdsError::InvalidChunksetId(header.get_num_chunksets(), header.get_num_chunksets())
708 );
709 }
710
711 #[test]
712 fn test_get_chunkset_ids_for_byte_range() {
713 let mut rng = rand::rng();
714
715 let blob_byte_len = ChunkSet::BYTE_LENGTH * 2 + ChunkSet::BYTE_LENGTH / 2;
716 let blob_data = (0..blob_byte_len).map(|_| rng.random()).collect::<Vec<u8>>();
717
718 let (_, header) = {
719 let mut all_chunks = Vec::new();
720
721 let mut blob_builder = BlobBuilder::init();
722 if let Some(chunks) = blob_builder.update(&blob_data) {
723 all_chunks.extend(chunks);
724 }
725
726 let (chunks, header) = blob_builder.finalize().expect("Must be able to prepare blob");
727 all_chunks.extend(chunks);
728
729 (all_chunks, header)
730 };
731
732 assert_eq!(header.get_chunkset_ids_for_byte_range(0..10).unwrap(), vec![0]);
734 assert_eq!(
735 header
736 .get_chunkset_ids_for_byte_range(ChunkSet::BYTE_LENGTH + 10..ChunkSet::BYTE_LENGTH + 20)
737 .unwrap(),
738 vec![1]
739 );
740
741 assert_eq!(
743 header.get_chunkset_ids_for_byte_range(10..(ChunkSet::BYTE_LENGTH * 1 + 10)).unwrap(),
744 vec![0, 1]
745 );
746 assert_eq!(header.get_chunkset_ids_for_byte_range(10..blob_byte_len).unwrap(), vec![0, 1, 2]);
747
748 assert_eq!(header.get_chunkset_ids_for_byte_range(0..ChunkSet::BYTE_LENGTH).unwrap(), vec![0]);
750 assert_eq!(header.get_chunkset_ids_for_byte_range(0..=(ChunkSet::BYTE_LENGTH - 1)).unwrap(), vec![0]);
751
752 assert_eq!(header.get_chunkset_ids_for_byte_range(0..0).unwrap_err(), DecdsError::InvalidEndBound(0));
754 assert_eq!(header.get_chunkset_ids_for_byte_range(0..=0).unwrap(), vec![0]); let end_beyond_blob = header.get_blob_size() + ChunkSet::BYTE_LENGTH;
758 let expected_end_chunkset_id = end_beyond_blob.saturating_sub(1) / ChunkSet::BYTE_LENGTH;
759 assert_eq!(
760 header.get_chunkset_ids_for_byte_range(0..end_beyond_blob).unwrap_err(),
761 DecdsError::InvalidChunksetId(expected_end_chunkset_id, header.get_num_chunksets())
762 );
763
764 assert_eq!(header.get_chunkset_ids_for_byte_range(..).unwrap_err(), DecdsError::InvalidEndBound(usize::MAX));
766 assert_eq!(
767 header.get_chunkset_ids_for_byte_range(0..).unwrap_err(),
768 DecdsError::InvalidEndBound(usize::MAX)
769 );
770 }
771
772 #[test]
773 fn test_blob_header_serialization_deserialization() {
774 let mut rng = rand::rng();
775
776 let blob_byte_len = ChunkSet::BYTE_LENGTH * 3;
777 let blob_data = (0..blob_byte_len).map(|_| rng.random()).collect::<Vec<u8>>();
778
779 let (_, original_header) = {
780 let mut all_chunks = Vec::new();
781
782 let mut blob_builder = BlobBuilder::init();
783 if let Some(chunks) = blob_builder.update(&blob_data) {
784 all_chunks.extend(chunks);
785 }
786
787 let (chunks, header) = blob_builder.finalize().expect("Must be able to prepare blob");
788 all_chunks.extend(chunks);
789
790 (all_chunks, header)
791 };
792
793 let serialized_header = original_header.to_bytes().expect("Header serialization failed");
794 let (deserialized_header, bytes_read) = BlobHeader::from_bytes(&serialized_header).expect("Header deserialization failed");
795
796 assert_eq!(original_header, deserialized_header);
797 assert_eq!(serialized_header.len(), bytes_read);
798
799 assert!(BlobHeader::from_bytes(&serialized_header[..(serialized_header.len() / 2)]).is_err());
801 }
802
803 #[test]
804 fn test_blob_new_empty_data() {
805 assert_eq!(BlobBuilder::init().finalize().err(), Some(DecdsError::EmptyDataForBlob));
806 }
807
808 #[test]
809 fn test_repairing_blob_new() {
810 let mut rng = rand::rng();
811
812 let blob_byte_len = ChunkSet::BYTE_LENGTH * 2 + ChunkSet::BYTE_LENGTH / 2;
813 let blob_data: Vec<u8> = (0..blob_byte_len).map(|_| rng.random()).collect();
814
815 let (_, header) = {
816 let mut all_chunks = Vec::new();
817
818 let mut blob_builder = BlobBuilder::init();
819 if let Some(chunks) = blob_builder.update(&blob_data) {
820 all_chunks.extend(chunks);
821 }
822
823 let (chunks, header) = blob_builder.finalize().expect("Must be able to prepare blob");
824 all_chunks.extend(chunks);
825
826 (all_chunks, header)
827 };
828
829 let repairer = RepairingBlob::new(header.clone());
830
831 assert_eq!(repairer.header.get_blob_size(), header.get_blob_size());
832 assert_eq!(repairer.header.get_num_chunksets(), header.get_num_chunksets());
833 assert_eq!(repairer.body.len(), header.get_num_chunksets());
834
835 for i in 0..header.get_num_chunksets() {
836 assert!(repairer.body.get(&i).unwrap().is_some());
837
838 assert!(!repairer.is_chunkset_ready_to_repair(i).unwrap());
839 assert!(!repairer.is_chunkset_already_repaired(i).unwrap());
840 }
841 }
842
843 #[test]
844 fn test_repairing_blob_add_chunk() {
845 let mut rng = rand::rng();
846
847 let blob_data: Vec<u8> = (0..ChunkSet::BYTE_LENGTH * 2).map(|_| rng.random()).collect(); let (mut chunks, blob_header) = {
850 let mut all_chunks = Vec::new();
851
852 let mut blob_builder = BlobBuilder::init();
853 if let Some(chunks) = blob_builder.update(&blob_data) {
854 all_chunks.extend(chunks);
855 }
856
857 let (chunks, header) = blob_builder.finalize().expect("Must be able to prepare blob");
858 all_chunks.extend(chunks);
859
860 (all_chunks, header)
861 };
862
863 let chunkset_root_commitments = (0..blob_header.get_num_chunksets())
864 .map(|chunkset_id| unsafe { blob_header.get_chunkset_commitment(chunkset_id).unwrap_unchecked() })
865 .collect();
866
867 let merkle_tree = MerkleTree::new(chunkset_root_commitments).expect("Must be able to build Merkle tree");
868 let merkle_proofs = (0..blob_header.get_num_chunksets())
869 .into_par_iter()
870 .map(|chunkset_id| unsafe { (chunkset_id, merkle_tree.generate_proof(chunkset_id).unwrap_unchecked()) })
871 .collect::<HashMap<usize, Vec<blake3::Hash>>>();
872
873 chunks.par_iter_mut().for_each(|chunk| {
874 chunk.append_proof_to_blob_root(&merkle_proofs[&chunk.get_chunkset_id()]);
875 });
876
877 let mut repairer = RepairingBlob::new(blob_header.clone());
878
879 let chunk_to_add = &chunks[0];
881 assert!(repairer.add_chunk(chunk_to_add).is_ok());
882
883 let mut invalid_header = blob_header.clone();
885 invalid_header.root_commitment = blake3::hash(b"fake_root_commitment");
886
887 let mut repairer_invalid_header = RepairingBlob::new(invalid_header);
888 assert_eq!(
889 repairer_invalid_header.add_chunk(chunk_to_add).unwrap_err(),
890 DecdsError::InvalidProofInChunk(chunk_to_add.get_chunkset_id())
891 );
892
893 let mut repairer_ready = RepairingBlob::new(blob_header.clone());
895 let chunkset_id = chunks[0].get_chunkset_id();
896
897 for chunk in &chunks {
898 if chunk.get_chunkset_id() == chunkset_id {
899 let _ = repairer_ready.add_chunk(chunk);
900
901 if repairer_ready.is_chunkset_ready_to_repair(chunkset_id).unwrap() {
902 break;
903 }
904 }
905 }
906
907 assert!(repairer_ready.is_chunkset_ready_to_repair(chunkset_id).unwrap());
908
909 let extra_chunk = &chunks
911 .iter()
912 .find(|c| c.get_chunkset_id() == chunkset_id && c.get_global_chunk_id() != chunks[0].get_global_chunk_id())
913 .unwrap();
914
915 assert_eq!(
916 repairer_ready.add_chunk(extra_chunk).unwrap_err(),
917 DecdsError::ChunksetReadyToRepair(chunkset_id)
918 );
919
920 repairer_ready.get_repaired_chunkset(chunkset_id).unwrap();
922
923 assert!(!repairer_ready.is_chunkset_ready_to_repair(chunkset_id).unwrap());
924 assert!(repairer_ready.is_chunkset_already_repaired(chunkset_id).unwrap());
925 assert_eq!(
926 repairer_ready.add_chunk(chunk_to_add).unwrap_err(),
927 DecdsError::ChunksetAlreadyRepaired(chunkset_id)
928 );
929 }
930
931 #[test]
932 fn test_repairing_blob_get_repaired_chunkset() {
933 let mut rng = rand::rng();
934
935 let blob_data: Vec<u8> = (0..(ChunkSet::BYTE_LENGTH * 2 + ChunkSet::BYTE_LENGTH / 2)).map(|_| rng.random()).collect();
936 let original_blob_data_copy = blob_data.clone();
937
938 let (mut chunks, blob_header) = {
939 let mut all_chunks = Vec::new();
940
941 let mut blob_builder = BlobBuilder::init();
942 if let Some(chunks) = blob_builder.update(&blob_data) {
943 all_chunks.extend(chunks);
944 }
945
946 let (chunks, header) = blob_builder.finalize().expect("Must be able to prepare blob");
947 all_chunks.extend(chunks);
948
949 (all_chunks, header)
950 };
951
952 let chunkset_root_commitments = (0..blob_header.get_num_chunksets())
953 .map(|chunkset_id| unsafe { blob_header.get_chunkset_commitment(chunkset_id).unwrap_unchecked() })
954 .collect();
955
956 let merkle_tree = MerkleTree::new(chunkset_root_commitments).expect("Must be able to build Merkle tree");
957 let merkle_proofs = (0..blob_header.get_num_chunksets())
958 .into_par_iter()
959 .map(|chunkset_id| unsafe { (chunkset_id, merkle_tree.generate_proof(chunkset_id).unwrap_unchecked()) })
960 .collect::<HashMap<usize, Vec<blake3::Hash>>>();
961
962 chunks.par_iter_mut().for_each(|chunk| {
963 chunk.append_proof_to_blob_root(&merkle_proofs[&chunk.get_chunkset_id()]);
964 });
965
966 let mut repairer = RepairingBlob::new(blob_header.clone());
967
968 let chunkset_id_0 = 0;
970 assert_eq!(
971 repairer.get_repaired_chunkset(chunkset_id_0).unwrap_err(),
972 DecdsError::ChunksetNotYetReadyToRepair(chunkset_id_0)
973 );
974
975 for chunk in &chunks {
977 if chunk.get_chunkset_id() == chunkset_id_0 {
978 let _ = repairer.add_chunk(chunk);
979
980 if repairer.is_chunkset_ready_to_repair(chunkset_id_0).unwrap() {
981 break;
982 }
983 }
984 }
985 assert!(repairer.is_chunkset_ready_to_repair(chunkset_id_0).unwrap());
986
987 let repaired_data_0 = repairer.get_repaired_chunkset(chunkset_id_0).unwrap();
989 let expected_data_0 = original_blob_data_copy[0..ChunkSet::BYTE_LENGTH].to_vec();
990
991 assert_eq!(repaired_data_0, expected_data_0);
992 assert!(repairer.is_chunkset_already_repaired(chunkset_id_0).unwrap());
993
994 assert_eq!(
996 repairer.get_repaired_chunkset(chunkset_id_0).unwrap_err(),
997 DecdsError::ChunksetAlreadyRepaired(chunkset_id_0)
998 );
999
1000 let chunkset_id_2 = 2;
1002
1003 for chunk in &chunks {
1004 if chunk.get_chunkset_id() == chunkset_id_2 {
1005 let _ = repairer.add_chunk(chunk);
1006
1007 if repairer.is_chunkset_ready_to_repair(chunkset_id_2).unwrap() {
1008 break;
1009 }
1010 }
1011 }
1012 assert!(repairer.is_chunkset_ready_to_repair(chunkset_id_2).unwrap());
1013
1014 let repaired_data_2 = repairer.get_repaired_chunkset(chunkset_id_2).unwrap();
1015 let expected_data_2 = original_blob_data_copy[ChunkSet::BYTE_LENGTH * 2..].to_vec();
1016 assert_eq!(repaired_data_2, expected_data_2);
1017
1018 let invalid_chunkset_id = blob_header.get_num_chunksets();
1020 assert_eq!(
1021 repairer.get_repaired_chunkset(invalid_chunkset_id).unwrap_err(),
1022 DecdsError::InvalidChunksetId(invalid_chunkset_id, blob_header.get_num_chunksets())
1023 );
1024 }
1025}