1use crate::{
2 RepairingChunkSet,
3 chunk::{self, ProofCarryingChunk},
4 chunkset::{self, ChunkSet},
5 consts::{DECDS_BINCODE_CONFIG, DECDS_NUM_ERASURE_CODED_SHARES},
6 errors::DecdsError,
7 merkle_tree::MerkleTree,
8};
9use blake3;
10use rayon::prelude::*;
11use serde::{Deserialize, Serialize};
12use std::{collections::HashMap, ops::RangeBounds, usize};
13
14#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)]
18pub struct BlobHeader {
19 byte_length: usize,
20 num_chunksets: usize,
21 digest: blake3::Hash,
22 root_commitment: blake3::Hash,
23 chunkset_root_commitments: Vec<blake3::Hash>,
24}
25
26impl BlobHeader {
27 pub fn get_blob_size(&self) -> usize {
29 self.byte_length
30 }
31
32 pub fn get_num_chunksets(&self) -> usize {
34 self.num_chunksets
35 }
36
37 pub fn get_num_chunks(&self) -> usize {
39 self.get_num_chunksets() * chunkset::ChunkSet::NUM_ERASURE_CODED_CHUNKS
40 }
41
42 pub fn get_blob_digest(&self) -> blake3::Hash {
44 self.digest
45 }
46
47 pub fn get_root_commitment(&self) -> blake3::Hash {
51 self.root_commitment
52 }
53
54 pub fn get_chunkset_commitment(&self, chunkset_id: usize) -> Result<blake3::Hash, DecdsError> {
66 self.chunkset_root_commitments
67 .get(chunkset_id)
68 .and_then(|&v| Some(v))
69 .ok_or(DecdsError::InvalidChunksetId(chunkset_id, self.get_num_chunksets()))
70 }
71
72 pub fn get_chunkset_size(&self, chunkset_id: usize) -> Result<usize, DecdsError> {
85 if chunkset_id < self.get_num_chunksets() {
86 let from = chunkset_id * ChunkSet::BYTE_LENGTH;
87 let to = (from + ChunkSet::BYTE_LENGTH).min(self.get_blob_size());
88 let effective_len = to - from;
89
90 Ok(effective_len)
91 } else {
92 Err(DecdsError::InvalidChunksetId(chunkset_id, self.get_num_chunksets()))
93 }
94 }
95
96 pub fn get_byte_range_for_chunkset(&self, chunkset_id: usize) -> Result<(usize, usize), DecdsError> {
109 if chunkset_id < self.get_num_chunksets() {
110 let from = chunkset_id * ChunkSet::BYTE_LENGTH;
111 let to = (from + ChunkSet::BYTE_LENGTH).min(self.get_blob_size());
112
113 Ok((from, to))
114 } else {
115 Err(DecdsError::InvalidChunksetId(chunkset_id, self.get_num_chunksets()))
116 }
117 }
118
119 pub fn get_chunkset_ids_for_byte_range(&self, byte_range: impl RangeBounds<usize>) -> Result<Vec<usize>, DecdsError> {
133 let start = match byte_range.start_bound() {
134 std::ops::Bound::Unbounded => 0,
135 std::ops::Bound::Included(&x) => x,
136 _ => return Err(DecdsError::InvalidStartBound),
137 };
138
139 let end = match byte_range.end_bound() {
140 std::ops::Bound::Included(&x) => x,
141 std::ops::Bound::Excluded(&x) => {
142 if x == 0 {
143 return Err(DecdsError::InvalidEndBound(x));
144 }
145
146 x - 1
147 }
148 _ => return Err(DecdsError::InvalidEndBound(usize::MAX)),
149 };
150
151 let start_chunkset_id = start / ChunkSet::BYTE_LENGTH;
152 let end_chunkset_id = end / ChunkSet::BYTE_LENGTH;
153
154 if end_chunkset_id >= self.get_num_chunksets() {
155 return Err(DecdsError::InvalidChunksetId(end_chunkset_id, self.get_num_chunksets()));
156 }
157
158 Ok((start_chunkset_id..=end_chunkset_id).collect())
159 }
160
161 pub fn to_bytes(&self) -> Result<Vec<u8>, DecdsError> {
169 bincode::serde::encode_to_vec(self, DECDS_BINCODE_CONFIG).map_err(|err| DecdsError::BlobHeaderSerializationFailed(err.to_string()))
170 }
171
172 pub fn from_bytes(bytes: &[u8]) -> Result<(Self, usize), DecdsError> {
185 match bincode::serde::decode_from_slice::<BlobHeader, bincode::config::Configuration>(bytes, DECDS_BINCODE_CONFIG) {
186 Ok((header, n)) => {
187 if header.num_chunksets != header.chunkset_root_commitments.len() {
188 return Err(DecdsError::BlobHeaderDeserializationFailed(
189 "number of chunksets and root commitments do not match".to_string(),
190 ));
191 }
192
193 Ok((header, n))
194 }
195 Err(err) => Err(DecdsError::BlobHeaderDeserializationFailed(err.to_string())),
196 }
197 }
198
199 pub fn validate_chunk(&self, chunk: &chunk::ProofCarryingChunk) -> bool {
212 chunk.validate_inclusion_in_blob(self.root_commitment)
213 && (chunk.get_chunkset_id() < self.num_chunksets)
214 && chunk.validate_inclusion_in_chunkset(self.chunkset_root_commitments[chunk.get_chunkset_id()])
215 }
216}
217
218pub struct Blob {
221 header: BlobHeader,
222 body: Vec<chunkset::ChunkSet>,
223}
224
225impl Blob {
226 pub fn new(mut data: Vec<u8>) -> Result<Self, DecdsError> {
245 if data.is_empty() {
246 return Err(DecdsError::EmptyDataForBlob);
247 }
248
249 let blob_digest = blake3::hash(&data);
250 let blob_length = data.len();
251
252 let num_chunksets = blob_length.div_ceil(chunkset::ChunkSet::BYTE_LENGTH);
253 let zero_padded_blob_len = num_chunksets * chunkset::ChunkSet::BYTE_LENGTH;
254 data.resize(zero_padded_blob_len, 0);
255
256 let mut chunksets = (0..num_chunksets)
257 .into_par_iter()
258 .map(|chunkset_id| {
259 let offset = chunkset_id * chunkset::ChunkSet::BYTE_LENGTH;
260 let till = offset + chunkset::ChunkSet::BYTE_LENGTH;
261
262 unsafe { chunkset::ChunkSet::new(chunkset_id, data[offset..till].to_vec()).unwrap_unchecked() }
263 })
264 .collect::<Vec<chunkset::ChunkSet>>();
265
266 let merkle_leaves = chunksets.iter().map(|chunkset| chunkset.get_root_commitment()).collect::<Vec<blake3::Hash>>();
267 let merkle_tree = MerkleTree::new(merkle_leaves)?;
268 let commitment = merkle_tree.get_root_commitment();
269
270 chunksets.par_iter_mut().enumerate().for_each(|(chunkset_idx, chunkset)| {
271 let blob_proof = unsafe { merkle_tree.generate_proof(chunkset_idx).unwrap_unchecked() };
272 chunkset.append_blob_inclusion_proof(&blob_proof);
273 });
274
275 Ok(Blob {
276 header: BlobHeader {
277 byte_length: blob_length,
278 num_chunksets,
279 digest: blob_digest,
280 root_commitment: commitment,
281 chunkset_root_commitments: chunksets.iter().map(|chunkset| chunkset.get_root_commitment()).collect(),
282 },
283 body: chunksets,
284 })
285 }
286
287 pub fn get_blob_header(&self) -> &BlobHeader {
289 &self.header
290 }
291
292 pub fn get_share(&self, share_id: usize) -> Result<Vec<ProofCarryingChunk>, DecdsError> {
307 if share_id >= DECDS_NUM_ERASURE_CODED_SHARES {
308 return Err(DecdsError::InvalidErasureCodedShareId(share_id));
309 }
310
311 Ok((0..self.header.num_chunksets)
312 .map(|chunkset_id| unsafe {
313 let chunkset = &self.body[chunkset_id];
314 chunkset.get_chunk(share_id).unwrap_unchecked().clone()
315 })
316 .collect::<Vec<ProofCarryingChunk>>())
317 }
318}
319
320pub struct RepairingBlob {
323 header: BlobHeader,
324 body: HashMap<usize, Option<chunkset::RepairingChunkSet>>,
325}
326
327impl RepairingBlob {
328 pub fn new(header: BlobHeader) -> Self {
342 RepairingBlob {
343 body: HashMap::from_iter((0..header.get_num_chunksets()).map(|chunkset_id| {
344 (
345 chunkset_id,
346 Some(RepairingChunkSet::new(chunkset_id, unsafe {
347 header.get_chunkset_commitment(chunkset_id).unwrap_unchecked()
348 })),
349 )
350 })),
351 header: header,
352 }
353 }
354
355 pub fn add_chunk(&mut self, chunk: &chunk::ProofCarryingChunk) -> Result<(), DecdsError> {
374 let chunkset_id = chunk.get_chunkset_id();
375
376 match self
377 .body
378 .get_mut(&chunkset_id)
379 .ok_or(DecdsError::InvalidChunksetId(chunkset_id, self.header.get_num_chunksets()))?
380 {
381 Some(chunkset) => {
382 if self.header.validate_chunk(chunk) {
383 if !chunkset.is_ready_to_repair() {
384 chunkset.add_chunk_unvalidated(chunk)
385 } else {
386 Err(DecdsError::ChunksetReadyToRepair(chunkset_id))
387 }
388 } else {
389 Err(DecdsError::InvalidProofInChunk(chunkset_id))
390 }
391 }
392 None => Err(DecdsError::ChunksetAlreadyRepaired(chunkset_id)),
393 }
394 }
395
396 pub fn is_chunkset_ready_to_repair(&self, chunkset_id: usize) -> Result<bool, DecdsError> {
408 Ok(self
409 .body
410 .get(&chunkset_id)
411 .ok_or(DecdsError::InvalidChunksetId(chunkset_id, self.header.get_num_chunksets()))?
412 .as_ref()
413 .is_some_and(|x| x.is_ready_to_repair()))
414 }
415
416 pub fn is_chunkset_already_repaired(&self, chunkset_id: usize) -> Result<bool, DecdsError> {
428 Ok(self
429 .body
430 .get(&chunkset_id)
431 .ok_or(DecdsError::InvalidChunksetId(chunkset_id, self.header.get_num_chunksets()))?
432 .is_none())
433 }
434
435 pub fn get_repaired_chunkset(&mut self, chunkset_id: usize) -> Result<Vec<u8>, DecdsError> {
452 self.is_chunkset_already_repaired(chunkset_id).and_then(|yes| {
453 if yes {
454 Err(DecdsError::ChunksetAlreadyRepaired(chunkset_id))
455 } else {
456 self.is_chunkset_ready_to_repair(chunkset_id).and_then(|yes| unsafe {
457 if yes {
458 self.body
459 .insert(chunkset_id, None)
460 .unwrap_unchecked()
461 .unwrap_unchecked()
462 .repair()
463 .map(|mut repaired| {
464 repaired.truncate(self.header.get_chunkset_size(chunkset_id).unwrap_unchecked());
465 repaired
466 })
467 } else {
468 Err(DecdsError::ChunksetNotYetReadyToRepair(chunkset_id))
469 }
470 })
471 }
472 })
473 }
474}
475
476#[cfg(test)]
477mod tests {
478 use crate::{BlobHeader, ProofCarryingChunk, RepairingBlob, blob::Blob, chunkset::ChunkSet, consts, errors::DecdsError};
479 use blake3;
480 use rand::Rng;
481
482 #[test]
483 fn prop_test_blob_preparation_and_commitment_works() {
484 const NUM_TEST_ITERATIONS: usize = 10;
485
486 const MIN_BLOB_DATA_BYTE_LEN: usize = 1usize;
487 const MAX_BLOB_DATA_BYTE_LEN: usize = 1usize << 30;
488
489 let mut rng = rand::rng();
490
491 (0..NUM_TEST_ITERATIONS).for_each(|_| {
492 let blob_byte_len = rng.random_range(MIN_BLOB_DATA_BYTE_LEN..=MAX_BLOB_DATA_BYTE_LEN);
493 let blob_data = (0..blob_byte_len).map(|_| rng.random()).collect::<Vec<u8>>();
494
495 let blob = Blob::new(blob_data).expect("Must be able to prepare blob");
496 let blob_header = blob.get_blob_header();
497
498 assert!(
499 (0..consts::DECDS_NUM_ERASURE_CODED_SHARES)
500 .flat_map(|share_id| blob.get_share(share_id).expect("Must be able to get erasure coded shares"))
501 .all(|share| blob_header.validate_chunk(&share))
502 );
503 });
504 }
505
506 #[test]
507 fn test_get_chunkset_commitment() {
508 let mut rng = rand::rng();
509
510 let blob_byte_len = (ChunkSet::BYTE_LENGTH * 2) + (ChunkSet::BYTE_LENGTH / 2);
511 let blob_data = (0..blob_byte_len).map(|_| rng.random()).collect::<Vec<u8>>();
512
513 let blob = Blob::new(blob_data).unwrap();
514 let header = blob.get_blob_header();
515
516 let commitment = header.get_chunkset_commitment(0);
518 assert!(commitment.is_ok());
519
520 let commitment = header.get_chunkset_commitment(1);
521 assert!(commitment.is_ok());
522
523 let err = header.get_chunkset_commitment(header.get_num_chunksets());
525 assert_eq!(err, Err(DecdsError::InvalidChunksetId(header.get_num_chunksets(), header.get_num_chunksets())));
526 }
527
528 #[test]
529 fn test_get_chunkset_size() {
530 let mut rng = rand::rng();
531
532 let blob_byte_len = (ChunkSet::BYTE_LENGTH * 2) + (ChunkSet::BYTE_LENGTH / 2);
534 let blob_data = (0..blob_byte_len).map(|_| rng.random()).collect::<Vec<u8>>();
535
536 let blob = Blob::new(blob_data).unwrap();
537 let header = blob.get_blob_header();
538
539 assert_eq!(header.get_chunkset_size(0).unwrap(), ChunkSet::BYTE_LENGTH);
541 assert_eq!(header.get_chunkset_size(1).unwrap(), ChunkSet::BYTE_LENGTH);
542
543 assert_eq!(header.get_chunkset_size(2).unwrap(), ChunkSet::BYTE_LENGTH / 2);
545
546 assert_eq!(
548 header.get_chunkset_size(header.get_num_chunksets()).unwrap_err(),
549 DecdsError::InvalidChunksetId(header.get_num_chunksets(), header.get_num_chunksets())
550 );
551 }
552
553 #[test]
554 fn test_get_byte_range_for_chunkset() {
555 let mut rng = rand::rng();
556
557 let blob_byte_len = (ChunkSet::BYTE_LENGTH * 2) + (ChunkSet::BYTE_LENGTH / 2);
558 let blob_data = (0..blob_byte_len).map(|_| rng.random()).collect::<Vec<u8>>();
559
560 let blob = Blob::new(blob_data).unwrap();
561 let header = blob.get_blob_header();
562
563 assert_eq!(header.get_byte_range_for_chunkset(0).unwrap(), (0, ChunkSet::BYTE_LENGTH));
565
566 assert_eq!(
568 header.get_byte_range_for_chunkset(1).unwrap(),
569 (ChunkSet::BYTE_LENGTH, ChunkSet::BYTE_LENGTH * 2)
570 );
571
572 assert_eq!(header.get_byte_range_for_chunkset(2).unwrap(), (ChunkSet::BYTE_LENGTH * 2, blob_byte_len));
574
575 assert_eq!(
577 header.get_byte_range_for_chunkset(header.get_num_chunksets()).unwrap_err(),
578 DecdsError::InvalidChunksetId(header.get_num_chunksets(), header.get_num_chunksets())
579 );
580 }
581
582 #[test]
583 fn test_get_chunkset_ids_for_byte_range() {
584 let mut rng = rand::rng();
585
586 let blob_byte_len = (ChunkSet::BYTE_LENGTH * 2) + (ChunkSet::BYTE_LENGTH / 2);
587 let blob_data = (0..blob_byte_len).map(|_| rng.random()).collect::<Vec<u8>>();
588
589 let blob = Blob::new(blob_data).unwrap();
590 let header = blob.get_blob_header();
591
592 assert_eq!(header.get_chunkset_ids_for_byte_range(0..10).unwrap(), vec![0]);
594 assert_eq!(
595 header
596 .get_chunkset_ids_for_byte_range(ChunkSet::BYTE_LENGTH + 10..ChunkSet::BYTE_LENGTH + 20)
597 .unwrap(),
598 vec![1]
599 );
600
601 assert_eq!(
603 header.get_chunkset_ids_for_byte_range(10..(ChunkSet::BYTE_LENGTH * 1 + 10)).unwrap(),
604 vec![0, 1]
605 );
606 assert_eq!(header.get_chunkset_ids_for_byte_range(10..blob_byte_len).unwrap(), vec![0, 1, 2]);
607
608 assert_eq!(header.get_chunkset_ids_for_byte_range(0..ChunkSet::BYTE_LENGTH).unwrap(), vec![0]);
610 assert_eq!(header.get_chunkset_ids_for_byte_range(0..=(ChunkSet::BYTE_LENGTH - 1)).unwrap(), vec![0]);
611
612 assert_eq!(header.get_chunkset_ids_for_byte_range(0..0).unwrap_err(), DecdsError::InvalidEndBound(0));
614 assert_eq!(header.get_chunkset_ids_for_byte_range(0..=0).unwrap(), vec![0]); let end_beyond_blob = header.get_blob_size() + ChunkSet::BYTE_LENGTH;
618 let expected_end_chunkset_id = end_beyond_blob.saturating_sub(1) / ChunkSet::BYTE_LENGTH;
619 assert_eq!(
620 header.get_chunkset_ids_for_byte_range(0..end_beyond_blob).unwrap_err(),
621 DecdsError::InvalidChunksetId(expected_end_chunkset_id, header.get_num_chunksets())
622 );
623
624 assert_eq!(header.get_chunkset_ids_for_byte_range(..).unwrap_err(), DecdsError::InvalidEndBound(usize::MAX));
626 assert_eq!(
627 header.get_chunkset_ids_for_byte_range(0..).unwrap_err(),
628 DecdsError::InvalidEndBound(usize::MAX)
629 );
630 }
631
632 #[test]
633 fn test_blob_header_serialization_deserialization() {
634 let mut rng = rand::rng();
635
636 let blob_byte_len = ChunkSet::BYTE_LENGTH * 3;
637 let blob_data = (0..blob_byte_len).map(|_| rng.random()).collect::<Vec<u8>>();
638
639 let blob = Blob::new(blob_data).unwrap();
640 let original_header = blob.get_blob_header().clone();
641
642 let serialized_header = original_header.to_bytes().expect("Header serialization failed");
643 let (deserialized_header, bytes_read) = BlobHeader::from_bytes(&serialized_header).expect("Header deserialization failed");
644
645 assert_eq!(original_header, deserialized_header);
646 assert_eq!(serialized_header.len(), bytes_read);
647
648 assert!(BlobHeader::from_bytes(&serialized_header[..(serialized_header.len() / 2)]).is_err());
650 }
651
652 #[test]
653 fn test_blob_new_empty_data() {
654 assert_eq!(Blob::new(Vec::new()).err(), Some(DecdsError::EmptyDataForBlob));
655 }
656
657 #[test]
658 fn test_blob_get_share_invalid_id() {
659 let mut rng = rand::rng();
660
661 let blob_data: Vec<u8> = (0..(ChunkSet::BYTE_LENGTH * 2)).map(|_| rng.random()).collect();
662 let blob = Blob::new(blob_data).unwrap();
663
664 let invalid_share_id = consts::DECDS_NUM_ERASURE_CODED_SHARES;
666 assert_eq!(
667 blob.get_share(invalid_share_id).unwrap_err(),
668 DecdsError::InvalidErasureCodedShareId(invalid_share_id)
669 );
670
671 let large_invalid_share_id = consts::DECDS_NUM_ERASURE_CODED_SHARES + 100;
673 assert_eq!(
674 blob.get_share(large_invalid_share_id).unwrap_err(),
675 DecdsError::InvalidErasureCodedShareId(large_invalid_share_id)
676 );
677 }
678
679 #[test]
680 fn test_repairing_blob_new() {
681 let mut rng = rand::rng();
682
683 let blob_data: Vec<u8> = (0..(ChunkSet::BYTE_LENGTH * 2 + ChunkSet::BYTE_LENGTH / 2)).map(|_| rng.random()).collect();
684 let blob = Blob::new(blob_data).unwrap();
685 let header = blob.get_blob_header().clone();
686
687 let repairer = RepairingBlob::new(header.clone());
688
689 assert_eq!(repairer.header.get_blob_size(), header.get_blob_size());
690 assert_eq!(repairer.header.get_num_chunksets(), header.get_num_chunksets());
691 assert_eq!(repairer.body.len(), header.get_num_chunksets());
692
693 for i in 0..header.get_num_chunksets() {
694 assert!(repairer.body.get(&i).unwrap().is_some());
695
696 assert!(!repairer.is_chunkset_ready_to_repair(i).unwrap());
697 assert!(!repairer.is_chunkset_already_repaired(i).unwrap());
698 }
699 }
700
701 #[test]
702 fn test_repairing_blob_add_chunk() {
703 let mut rng = rand::rng();
704
705 let blob_data: Vec<u8> = (0..(ChunkSet::BYTE_LENGTH * 2)).map(|_| rng.random()).collect(); let blob = Blob::new(blob_data).unwrap();
707
708 let blob_header = blob.get_blob_header().clone();
709 let mut repairer = RepairingBlob::new(blob_header.clone());
710
711 let all_chunks: Vec<ProofCarryingChunk> = (0..consts::DECDS_NUM_ERASURE_CODED_SHARES)
712 .flat_map(|share_id| blob.get_share(share_id).unwrap())
713 .collect();
714
715 let chunk_to_add = &all_chunks[0];
717 assert!(repairer.add_chunk(chunk_to_add).is_ok());
718
719 let mut invalid_header = blob_header.clone();
721 invalid_header.root_commitment = blake3::hash(b"fake_root_commitment");
722
723 let mut repairer_invalid_header = RepairingBlob::new(invalid_header);
724 assert_eq!(
725 repairer_invalid_header.add_chunk(chunk_to_add).unwrap_err(),
726 DecdsError::InvalidProofInChunk(chunk_to_add.get_chunkset_id())
727 );
728
729 let mut repairer_ready = RepairingBlob::new(blob_header.clone());
731 let chunkset_id = all_chunks[0].get_chunkset_id();
732
733 for chunk in &all_chunks {
734 if chunk.get_chunkset_id() == chunkset_id {
735 repairer_ready.add_chunk(chunk).unwrap();
736
737 if repairer_ready.is_chunkset_ready_to_repair(chunkset_id).unwrap() {
738 break;
739 }
740 }
741 }
742
743 assert!(repairer_ready.is_chunkset_ready_to_repair(chunkset_id).unwrap());
744
745 let extra_chunk = &all_chunks
747 .iter()
748 .find(|c| c.get_chunkset_id() == chunkset_id && c.get_global_chunk_id() != all_chunks[0].get_global_chunk_id())
749 .unwrap();
750
751 assert_eq!(
752 repairer_ready.add_chunk(extra_chunk).unwrap_err(),
753 DecdsError::ChunksetReadyToRepair(chunkset_id)
754 );
755
756 repairer_ready.get_repaired_chunkset(chunkset_id).unwrap();
758
759 assert!(!repairer_ready.is_chunkset_ready_to_repair(chunkset_id).unwrap());
760 assert!(repairer_ready.is_chunkset_already_repaired(chunkset_id).unwrap());
761 assert_eq!(
762 repairer_ready.add_chunk(chunk_to_add).unwrap_err(),
763 DecdsError::ChunksetAlreadyRepaired(chunkset_id)
764 );
765 }
766
767 #[test]
768 fn test_repairing_blob_get_repaired_chunkset() {
769 let mut rng = rand::rng();
770
771 let blob_data: Vec<u8> = (0..(ChunkSet::BYTE_LENGTH * 2 + ChunkSet::BYTE_LENGTH / 2)).map(|_| rng.random()).collect();
772 let blob = Blob::new(blob_data.clone()).unwrap();
773
774 let blob_header = blob.get_blob_header().clone();
775 let mut repairer = RepairingBlob::new(blob_header.clone());
776
777 let all_chunks: Vec<ProofCarryingChunk> = (0..consts::DECDS_NUM_ERASURE_CODED_SHARES)
778 .flat_map(|share_id| blob.get_share(share_id).unwrap())
779 .collect();
780
781 let chunkset_id_0 = 0;
783 assert_eq!(
784 repairer.get_repaired_chunkset(chunkset_id_0).unwrap_err(),
785 DecdsError::ChunksetNotYetReadyToRepair(chunkset_id_0)
786 );
787
788 for chunk in &all_chunks {
790 if chunk.get_chunkset_id() == chunkset_id_0 {
791 repairer.add_chunk(chunk).unwrap();
792
793 if repairer.is_chunkset_ready_to_repair(chunkset_id_0).unwrap() {
794 break;
795 }
796 }
797 }
798 assert!(repairer.is_chunkset_ready_to_repair(chunkset_id_0).unwrap());
799
800 let repaired_data_0 = repairer.get_repaired_chunkset(chunkset_id_0).unwrap();
802 let expected_data_0 = blob_data[0..ChunkSet::BYTE_LENGTH].to_vec();
803
804 assert_eq!(repaired_data_0, expected_data_0);
805 assert!(repairer.is_chunkset_already_repaired(chunkset_id_0).unwrap());
806
807 assert_eq!(
809 repairer.get_repaired_chunkset(chunkset_id_0).unwrap_err(),
810 DecdsError::ChunksetAlreadyRepaired(chunkset_id_0)
811 );
812
813 let chunkset_id_2 = 2;
815
816 for chunk in &all_chunks {
817 if chunk.get_chunkset_id() == chunkset_id_2 {
818 repairer.add_chunk(chunk).unwrap();
819
820 if repairer.is_chunkset_ready_to_repair(chunkset_id_2).unwrap() {
821 break;
822 }
823 }
824 }
825 assert!(repairer.is_chunkset_ready_to_repair(chunkset_id_2).unwrap());
826
827 let repaired_data_2 = repairer.get_repaired_chunkset(chunkset_id_2).unwrap();
828 let expected_data_2 = blob_data[ChunkSet::BYTE_LENGTH * 2..].to_vec();
829 assert_eq!(repaired_data_2, expected_data_2);
830
831 let invalid_chunkset_id = blob_header.get_num_chunksets();
833 assert_eq!(
834 repairer.get_repaired_chunkset(invalid_chunkset_id).unwrap_err(),
835 DecdsError::InvalidChunksetId(invalid_chunkset_id, blob_header.get_num_chunksets())
836 );
837 }
838}