commonware_coding/reed_solomon/
mod.rs

1//! A SIMD-optimized Reed-Solomon coder that emits [Chunk]s that can be proven against a [bmt].
2//!
3//! # Behavior
4//!
5//! The encoder takes input data, splits it into `k` data shards, and generates `m` recovery
6//! shards using [Reed-Solomon encoding](https://en.wikipedia.org/wiki/Reed%E2%80%93Solomon_error_correction).
7//! All `n = k + m` shards are then used to build a [bmt], producing a single root hash. Each shard
8//! is packaged as a [Chunk] containing the shard data, its index, and a Merkle proof against the [bmt] root.
9//!
10//! ## Encoding
11//!
12//! ```text
13//!               +--------------------------------------+
14//!               |         Original Data (Bytes)        |
15//!               +--------------------------------------+
16//!                                  |
17//!                                  v
18//!               +--------------------------------------+
19//!               | [Length Prefix | Original Data...]   |
20//!               +--------------------------------------+
21//!                                  |
22//!                                  v
23//!              +----------+ +----------+    +-----------+
24//!              |  Shard 0 | |  Shard 1 | .. | Shard k-1 |  (Data Shards)
25//!              +----------+ +----------+    +-----------+
26//!                     |            |             |
27//!                     |            |             |
28//!                     +------------+-------------+
29//!                                  |
30//!                                  v
31//!                        +------------------+
32//!                        | Reed-Solomon     |
33//!                        | Encoder (k, m)   |
34//!                        +------------------+
35//!                                  |
36//!                                  v
37//!              +----------+ +----------+    +-----------+
38//!              |  Shard k | | Shard k+1| .. | Shard n-1 |  (Recovery Shards)
39//!              +----------+ +----------+    +-----------+
40//! ```
41//!
42//! ## Merkle Tree Construction
43//!
44//! All `n` shards (data and recovery) are hashed and used as leaves to build a [bmt].
45//!
46//! ```text
47//! Shards:    [Shard 0, Shard 1, ..., Shard n-1]
48//!             |        |              |
49//!             v        v              v
50//! Hashes:    [H(S_0), H(S_1), ..., H(S_n-1)]
51//!             \       / \       /
52//!              \     /   \     /
53//!               +---+     +---+
54//!                 |         |
55//!                 \         /
56//!                  \       /
57//!                   +-----+
58//!                      |
59//!                      v
60//!                +----------+
61//!                |   Root   |
62//!                +----------+
63//! ```
64//!
65//! The final output is the [bmt] root and a set of `n` [Chunk]s.
66//!
67//! `(Root, [Chunk 0, Chunk 1, ..., Chunk n-1])`
68//!
69//! Each [Chunk] contains:
70//! - `shard`: The shard data (original or recovery).
71//! - `index`: The shard's original index (0 to n-1).
72//! - `proof`: A Merkle proof of the shard's inclusion in the [bmt].
73//!
74//! ## Decoding and Verification
75//!
76//! The decoder requires any `k` [Chunk]s to reconstruct the original data.
77//! 1. Each [Chunk]'s Merkle proof is verified against the [bmt] root.
78//! 2. The shards from the valid [Chunk]s are used to reconstruct the original `k` data shards.
79//! 3. To ensure consistency, the recovered data shards are re-encoded, and a new [bmt] root is
80//!    generated. This new root MUST match the original [bmt] root. This prevents attacks where
81//!    an adversary provides a valid set of chunks that decode to different data.
82//! 4. If the roots match, the original data is extracted from the reconstructed data shards.
83//!
84//! # Example
85//!
86//! ```rust
87//! use commonware_coding::reed_solomon::{encode, decode};
88//! use commonware_cryptography::Sha256;
89//!
90//! // Generate data to encode
91//! let data = b"Hello, world! This is a test of Reed-Solomon encoding.";
92//!
93//! // Configure the encoder to generate 7 total chunks, with a minimum of 4 required for decoding
94//! let total = 7u16;
95//! let min = 4u16;
96//!
97//! // Encode the data
98//! let (root, chunks) = encode::<Sha256>(total, min, data.to_vec()).unwrap();
99//!
100//! // Pick a few chunks to recover from (a mix of original and recovery shards)
101//! let some_chunks = vec![
102//!     chunks[0].clone(), // original
103//!     chunks[2].clone(), // original
104//!     chunks[5].clone(), // recovery
105//!     chunks[6].clone(), // recovery
106//! ];
107//!
108//! // Decode the data from the subset of chunks
109//! let decoded_data = decode::<Sha256>(total, min, &root, some_chunks).unwrap();
110//!
111//! // Verify that the decoded data matches the original data
112//! assert_eq!(decoded_data, data);
113//! ```
114
115use bytes::{Buf, BufMut};
116use commonware_codec::{EncodeSize, FixedSize, Read, ReadExt, ReadRangeExt, Write};
117use commonware_cryptography::Hasher;
118use commonware_storage::bmt::{self, Builder};
119use reed_solomon_simd::{Error as RsError, ReedSolomonDecoder, ReedSolomonEncoder};
120use std::collections::HashSet;
121use thiserror::Error;
122
123/// Errors that can occur when interacting with the Reed-Solomon coder.
124#[derive(Error, Debug)]
125pub enum Error {
126    #[error("reed-solomon error: {0}")]
127    ReedSolomon(#[from] RsError),
128    #[error("inconsistent")]
129    Inconsistent,
130    #[error("invalid proof")]
131    InvalidProof,
132    #[error("not enough chunks")]
133    NotEnoughChunks,
134    #[error("duplicate chunk index: {0}")]
135    DuplicateIndex(u16),
136    #[error("invalid data length: {0}")]
137    InvalidDataLength(usize),
138    #[error("invalid index: {0}")]
139    InvalidIndex(u16),
140}
141
142/// Data that has been encoded using a Reed-Solomon coder and inserted into a [bmt].
143#[derive(Clone)]
144pub struct Chunk<H: Hasher> {
145    /// The shard of encoded data.
146    pub shard: Vec<u8>,
147
148    /// The index of [Chunk] in the original data.
149    pub index: u16,
150
151    /// The proof of the shard in the [bmt] at the given index.
152    pub proof: bmt::Proof<H>,
153}
154
155impl<H: Hasher> Chunk<H> {
156    /// Create a new [Chunk] from the given shard, index, and proof.
157    pub fn new(shard: Vec<u8>, index: u16, proof: bmt::Proof<H>) -> Self {
158        Self {
159            shard,
160            index,
161            proof,
162        }
163    }
164
165    /// Verify a [Chunk] against the given root.
166    pub fn verify(&self, root: &H::Digest) -> bool {
167        // Compute shard digest
168        let mut hasher = H::new();
169        hasher.update(&self.shard);
170        let shard_digest = hasher.finalize();
171
172        // Verify proof
173        self.proof
174            .verify(&mut hasher, &shard_digest, self.index as u32, root)
175            .is_ok()
176    }
177}
178
179impl<H: Hasher> Write for Chunk<H> {
180    fn write(&self, writer: &mut impl BufMut) {
181        self.shard.write(writer);
182        self.index.write(writer);
183        self.proof.write(writer);
184    }
185}
186
187impl<H: Hasher> Read for Chunk<H> {
188    /// The maximum size of the shard.
189    type Cfg = usize;
190
191    fn read_cfg(reader: &mut impl Buf, cfg: &Self::Cfg) -> Result<Self, commonware_codec::Error> {
192        let shard = Vec::<u8>::read_range(reader, ..=*cfg)?;
193        let index = u16::read(reader)?;
194        let proof = bmt::Proof::<H>::read(reader)?;
195        Ok(Self {
196            shard,
197            index,
198            proof,
199        })
200    }
201}
202
203impl<H: Hasher> EncodeSize for Chunk<H> {
204    fn encode_size(&self) -> usize {
205        self.shard.encode_size() + self.index.encode_size() + self.proof.encode_size()
206    }
207}
208
209/// Prepare data for encoding.
210fn prepare_data(data: Vec<u8>, k: usize, m: usize) -> Vec<Vec<u8>> {
211    // Compute shard length
212    let data_len = data.len();
213    let prefixed_len = u32::SIZE + data_len;
214    let mut shard_len = prefixed_len.div_ceil(k);
215
216    // Ensure shard length is even (required for optimizations in `reed-solomon-simd`)
217    if shard_len % 2 != 0 {
218        shard_len += 1;
219    }
220
221    // Prepare data
222    let length_bytes = (data_len as u32).to_be_bytes();
223    let mut src = length_bytes.into_iter().chain(data);
224    let mut shards = Vec::with_capacity(k + m); // assume recovery shards will be added later
225    for _ in 0..k {
226        let mut shard = Vec::with_capacity(shard_len);
227        for _ in 0..shard_len {
228            shard.push(src.next().unwrap_or(0));
229        }
230        shards.push(shard);
231    }
232    shards
233}
234
235/// Extract data from encoded shards.
236fn extract_data(shards: Vec<Vec<u8>>, k: usize) -> Vec<u8> {
237    // Concatenate shards
238    let mut data = shards.into_iter().take(k).flatten();
239
240    // Extract length prefix
241    let data_len = (&mut data)
242        .take(u32::SIZE)
243        .collect::<Vec<_>>()
244        .try_into()
245        .expect("insufficient data");
246    let data_len = u32::from_be_bytes(data_len) as usize;
247
248    // Extract data
249    data.take(data_len).collect()
250}
251
252/// Encode data using a Reed-Solomon coder and insert it into a [bmt].
253///
254/// # Parameters
255///
256/// - `total`: The total number of chunks to generate.
257/// - `min`: The minimum number of chunks required to decode the data.
258/// - `data`: The data to encode.
259///
260/// # Returns
261///
262/// - `root`: The root of the [bmt].
263/// - `chunks`: [Chunk]s of encoded data (that can be proven against `root`).
264pub fn encode<H: Hasher>(
265    total: u16,
266    min: u16,
267    data: Vec<u8>,
268) -> Result<(H::Digest, Vec<Chunk<H>>), Error> {
269    // Validate parameters
270    assert!(total > min);
271    assert!(min > 0);
272    let n = total as usize;
273    let k = min as usize;
274    let m = n - k;
275    if data.len() > u32::MAX as usize {
276        return Err(Error::InvalidDataLength(data.len()));
277    }
278
279    // Prepare data
280    let mut shards = prepare_data(data, k, m);
281    let shard_len = shards[0].len();
282
283    // Create encoder
284    let mut encoder = ReedSolomonEncoder::new(k, m, shard_len).map_err(Error::ReedSolomon)?;
285    for shard in &shards {
286        encoder
287            .add_original_shard(shard)
288            .map_err(Error::ReedSolomon)?;
289    }
290
291    // Compute recovery shards
292    let encoding = encoder.encode().map_err(Error::ReedSolomon)?;
293    let recovery_shards: Vec<Vec<u8>> = encoding
294        .recovery_iter()
295        .map(|shard| shard.to_vec())
296        .collect();
297    shards.extend(recovery_shards);
298
299    // Build Merkle tree
300    let mut builder = Builder::<H>::new(n);
301    let mut hasher = H::new();
302    for shard in &shards {
303        builder.add(&{
304            hasher.update(shard);
305            hasher.finalize()
306        });
307    }
308    let tree = builder.build();
309    let root = tree.root();
310
311    // Generate chunks
312    let mut chunks = Vec::with_capacity(n);
313    for (i, shard) in shards.into_iter().enumerate() {
314        let proof = tree.proof(i as u32).map_err(|_| Error::InvalidProof)?;
315        chunks.push(Chunk::new(shard, i as u16, proof));
316    }
317
318    Ok((root, chunks))
319}
320
321/// Decode data from a set of [Chunk]s.
322///
323/// # Parameters
324///
325/// - `total`: The total number of chunks to generate.
326/// - `min`: The minimum number of chunks required to decode the data.
327/// - `root`: The root of the [bmt].
328/// - `chunks`: [Chunk]s of encoded data (that can be proven against `root`).
329///
330/// # Returns
331///
332/// - `data`: The decoded data.
333pub fn decode<H: Hasher>(
334    total: u16,
335    min: u16,
336    root: &H::Digest,
337    chunks: Vec<Chunk<H>>,
338) -> Result<Vec<u8>, Error> {
339    // Validate parameters
340    assert!(total > min);
341    assert!(min > 0);
342    let n = total as usize;
343    let k = min as usize;
344    let m = n - k;
345    if chunks.len() < k {
346        return Err(Error::NotEnoughChunks);
347    }
348
349    // Verify chunks
350    let shard_len = chunks[0].shard.len();
351    let mut seen = HashSet::new();
352    let mut provided_originals: Vec<(usize, Vec<u8>)> = Vec::new();
353    let mut provided_recoveries: Vec<(usize, Vec<u8>)> = Vec::new();
354    for chunk in chunks {
355        // Check for duplicate index
356        let index = chunk.index;
357        if index >= total {
358            return Err(Error::InvalidIndex(index));
359        }
360        if seen.contains(&index) {
361            return Err(Error::DuplicateIndex(index));
362        }
363        seen.insert(index);
364
365        // Verify Merkle proof
366        if !chunk.verify(root) {
367            return Err(Error::InvalidProof);
368        }
369
370        // Add to provided shards
371        if index < min {
372            provided_originals.push((index as usize, chunk.shard));
373        } else {
374            provided_recoveries.push((index as usize - k, chunk.shard));
375        }
376    }
377
378    // Decode original data
379    let mut decoder = ReedSolomonDecoder::new(k, m, shard_len).map_err(Error::ReedSolomon)?;
380    for (idx, ref shard) in &provided_originals {
381        decoder
382            .add_original_shard(*idx, shard)
383            .map_err(Error::ReedSolomon)?;
384    }
385    for (idx, ref shard) in &provided_recoveries {
386        decoder
387            .add_recovery_shard(*idx, shard)
388            .map_err(Error::ReedSolomon)?;
389    }
390    let decoding = decoder.decode().map_err(Error::ReedSolomon)?;
391
392    // Reconstruct all original shards
393    let mut shards = Vec::with_capacity(n);
394    shards.resize(k, Vec::new());
395    for (idx, shard) in provided_originals {
396        shards[idx] = shard;
397    }
398    for (idx, shard) in decoding.restored_original_iter() {
399        shards[idx] = shard.to_vec();
400    }
401
402    // Encode recovered data to get recovery shards
403    let mut encoder = ReedSolomonEncoder::new(k, m, shard_len).map_err(Error::ReedSolomon)?;
404    for shard in shards.iter().take(k) {
405        encoder
406            .add_original_shard(shard)
407            .map_err(Error::ReedSolomon)?;
408    }
409    let encoding = encoder.encode().map_err(Error::ReedSolomon)?;
410    let recovery_shards: Vec<Vec<u8>> = encoding
411        .recovery_iter()
412        .map(|shard| shard.to_vec())
413        .collect();
414    shards.extend(recovery_shards);
415
416    // Build Merkle tree
417    let mut builder = Builder::<H>::new(n);
418    let mut hasher = H::new();
419    for shard in &shards {
420        builder.add(&{
421            hasher.update(shard);
422            hasher.finalize()
423        });
424    }
425    let computed_tree = builder.build();
426
427    // Confirm root is consistent
428    if computed_tree.root() != *root {
429        return Err(Error::Inconsistent);
430    }
431
432    // Extract original data
433    Ok(extract_data(shards, k))
434}
435
436#[cfg(test)]
437mod tests {
438    use super::*;
439    use commonware_cryptography::Sha256;
440
441    #[test]
442    fn test_basic() {
443        let data = b"Hello, Reed-Solomon!";
444        let total = 7u16;
445        let min = 4u16;
446
447        // Encode the data
448        let (root, chunks) = encode::<Sha256>(total, min, data.to_vec()).unwrap();
449        assert_eq!(chunks.len(), total as usize);
450
451        // Try to decode with exactly min (all original shards)
452        let minimal = chunks.into_iter().take(min as usize).collect();
453        let decoded = decode::<Sha256>(total, min, &root, minimal).unwrap();
454        assert_eq!(decoded, data);
455    }
456
457    #[test]
458    fn test_moderate() {
459        let data = b"Testing with more pieces than minimum";
460        let total = 10u16;
461        let min = 4u16;
462
463        // Encode the data
464        let (root, chunks) = encode::<Sha256>(total, min, data.to_vec()).unwrap();
465
466        // Try to decode with min (all original shards)
467        let minimal = chunks.into_iter().take(min as usize).collect();
468        let decoded = decode::<Sha256>(total, min, &root, minimal).unwrap();
469        assert_eq!(decoded, data);
470    }
471
472    #[test]
473    fn test_recovery() {
474        let data = b"Testing recovery pieces";
475        let total = 8u16;
476        let min = 3u16;
477
478        // Encode the data
479        let (root, chunks) = encode::<Sha256>(total, min, data.to_vec()).unwrap();
480
481        // Use a mix of original and recovery pieces
482        let pieces: Vec<_> = vec![
483            chunks[0].clone(), // original
484            chunks[4].clone(), // recovery
485            chunks[6].clone(), // recovery
486        ];
487
488        // Try to decode with a mix of original and recovery pieces
489        let decoded = decode::<Sha256>(total, min, &root, pieces).unwrap();
490        assert_eq!(decoded, data);
491    }
492
493    #[test]
494    fn test_not_enough_pieces() {
495        let data = b"Test insufficient pieces";
496        let total = 6u16;
497        let min = 4u16;
498
499        // Encode data
500        let (root, chunks) = encode::<Sha256>(total, min, data.to_vec()).unwrap();
501
502        // Try with fewer than min
503        let pieces: Vec<_> = chunks.into_iter().take(2).collect();
504
505        // Fail to decode
506        let result = decode::<Sha256>(total, min, &root, pieces);
507        assert!(matches!(result, Err(Error::NotEnoughChunks)));
508    }
509
510    #[test]
511    fn test_duplicate_index() {
512        let data = b"Test duplicate detection";
513        let total = 5u16;
514        let min = 3u16;
515
516        // Encode data
517        let (root, chunks) = encode::<Sha256>(total, min, data.to_vec()).unwrap();
518
519        // Include duplicate index by cloning the first chunk
520        let pieces = vec![chunks[0].clone(), chunks[0].clone(), chunks[1].clone()];
521
522        // Fail to decode
523        let result = decode::<Sha256>(total, min, &root, pieces);
524        assert!(matches!(result, Err(Error::DuplicateIndex(0))));
525    }
526
527    #[test]
528    #[should_panic(expected = "assertion failed: total > min")]
529    fn test_invalid_total() {
530        let data = b"Test parameter validation";
531
532        // total <= min should panic
533        encode::<Sha256>(3, 3, data.to_vec()).unwrap();
534    }
535
536    #[test]
537    #[should_panic(expected = "assertion failed: min > 0")]
538    fn test_invalid_min() {
539        let data = b"Test parameter validation";
540
541        // min = 0 should panic
542        encode::<Sha256>(5, 0, data.to_vec()).unwrap();
543    }
544
545    #[test]
546    fn test_empty_data() {
547        let data = b"";
548        let total = 100u16;
549        let min = 30u16;
550
551        // Encode data
552        let (root, chunks) = encode::<Sha256>(total, min, data.to_vec()).unwrap();
553
554        // Try to decode with min
555        let minimal = chunks.into_iter().take(min as usize).collect();
556        let decoded = decode::<Sha256>(total, min, &root, minimal).unwrap();
557        assert_eq!(decoded, data);
558    }
559
560    #[test]
561    fn test_large_data() {
562        let data = vec![42u8; 1000]; // 1KB of data
563        let total = 7u16;
564        let min = 4u16;
565
566        // Encode data
567        let (root, chunks) = encode::<Sha256>(total, min, data.clone()).unwrap();
568
569        // Try to decode with min
570        let minimal = chunks.into_iter().take(min as usize).collect();
571        let decoded = decode::<Sha256>(total, min, &root, minimal).unwrap();
572        assert_eq!(decoded, data);
573    }
574
575    #[test]
576    fn test_malicious_root_detection() {
577        let data = b"Original data that should be protected";
578        let total = 7u16;
579        let min = 4u16;
580
581        // Encode data correctly to get valid chunks
582        let (_correct_root, chunks) = encode::<Sha256>(total, min, data.to_vec()).unwrap();
583
584        // Create a malicious/fake root (simulating a malicious encoder)
585        let mut hasher = Sha256::new();
586        hasher.update(b"malicious_data_that_wasnt_actually_encoded");
587        let malicious_root = hasher.finalize();
588
589        // Collect valid pieces (these are legitimate fragments)
590        let minimal = chunks.into_iter().take(min as usize).collect();
591
592        // Attempt to decode with malicious root
593        let result = decode::<Sha256>(total, min, &malicious_root, minimal);
594        assert!(matches!(result, Err(Error::InvalidProof)));
595    }
596
597    #[test]
598    fn test_manipulated_chunk_detection() {
599        let data = b"Data integrity must be maintained";
600        let total = 6u16;
601        let min = 3u16;
602
603        // Encode data
604        let (root, mut chunks) = encode::<Sha256>(total, min, data.to_vec()).unwrap();
605
606        // Tamper with one of the chunks by modifying the shard data
607        if !chunks[1].shard.is_empty() {
608            chunks[1].shard[0] ^= 0xFF; // Flip bits in first byte
609        }
610
611        // Try to decode with the tampered chunk
612        let result = decode::<Sha256>(total, min, &root, chunks);
613        assert!(matches!(result, Err(Error::InvalidProof)));
614    }
615
616    #[test]
617    fn test_inconsistent_shards() {
618        let data = b"Test data for malicious encoding";
619        let total = 5u16;
620        let min = 3u16;
621        let m = total - min;
622
623        // Compute original data encoding
624        let shards = prepare_data(data.to_vec(), min as usize, total as usize - min as usize);
625        let shard_size = shards[0].len();
626
627        // Re-encode the data
628        let mut encoder = ReedSolomonEncoder::new(min as usize, m as usize, shard_size).unwrap();
629        for shard in &shards {
630            encoder.add_original_shard(shard).unwrap();
631        }
632        let recovery_result = encoder.encode().unwrap();
633        let mut recovery_shards: Vec<Vec<u8>> = recovery_result
634            .recovery_iter()
635            .map(|s| s.to_vec())
636            .collect();
637
638        // Tamper with one recovery shard
639        if !recovery_shards[0].is_empty() {
640            recovery_shards[0][0] ^= 0xFF;
641        }
642
643        // Build malicious shards
644        let mut malicious_shards = shards.clone();
645        malicious_shards.extend(recovery_shards);
646
647        // Build malicious tree
648        let mut builder = Builder::<Sha256>::new(total as usize);
649        for shard in &malicious_shards {
650            let mut hasher = Sha256::new();
651            hasher.update(shard);
652            builder.add(&hasher.finalize());
653        }
654        let malicious_tree = builder.build();
655        let malicious_root = malicious_tree.root();
656
657        // Generate chunks for min pieces, including the tampered recovery
658        let selected_indices = vec![0, 1, 3]; // originals 0,1 and recovery 0 (index 3)
659        let mut pieces = Vec::new();
660        for &i in &selected_indices {
661            let merkle_proof = malicious_tree.proof(i as u32).unwrap();
662            let shard = malicious_shards[i].clone();
663            let chunk = Chunk::new(shard, i as u16, merkle_proof);
664            pieces.push(chunk);
665        }
666
667        // Fail to decode
668        let result = decode::<Sha256>(total, min, &malicious_root, pieces);
669        assert!(matches!(result, Err(Error::Inconsistent)));
670    }
671
672    #[test]
673    fn test_odd_shard_len() {
674        let data = b"a";
675        let total = 3u16;
676        let min = 2u16;
677
678        // Encode the data
679        let (root, chunks) = encode::<Sha256>(total, min, data.to_vec()).unwrap();
680
681        // Use a mix of original and recovery pieces
682        let pieces: Vec<_> = vec![
683            chunks[0].clone(), // original
684            chunks[2].clone(), // recovery
685        ];
686
687        // Try to decode with a mix of original and recovery pieces
688        let decoded = decode::<Sha256>(total, min, &root, pieces).unwrap();
689        assert_eq!(decoded, data);
690    }
691
692    #[test]
693    fn test_invalid_index() {
694        let data = b"Testing recovery pieces";
695        let total = 8u16;
696        let min = 3u16;
697
698        // Encode the data
699        let (root, mut chunks) = encode::<Sha256>(total, min, data.to_vec()).unwrap();
700
701        // Use a mix of original and recovery pieces
702        chunks[1].index = 8;
703        let pieces: Vec<_> = vec![
704            chunks[0].clone(), // original
705            chunks[1].clone(), // recovery with invalid index
706            chunks[6].clone(), // recovery
707        ];
708
709        // Fail to decode
710        let result = decode::<Sha256>(total, min, &root, pieces);
711        assert!(matches!(result, Err(Error::InvalidIndex(8))));
712    }
713
714    #[test]
715    fn test_max_chunks() {
716        let data = vec![42u8; 1000]; // 1KB of data
717        let total = u16::MAX;
718        let min = u16::MAX / 2;
719
720        // Encode data
721        let (root, chunks) = encode::<Sha256>(total, min, data.clone()).unwrap();
722
723        // Try to decode with min
724        let minimal = chunks.into_iter().take(min as usize).collect();
725        let decoded = decode::<Sha256>(total, min, &root, minimal).unwrap();
726        assert_eq!(decoded, data);
727    }
728
729    #[test]
730    fn test_too_many_chunks() {
731        let data = vec![42u8; 1000]; // 1KB of data
732        let total = u16::MAX;
733        let min = u16::MAX / 2 - 1;
734
735        // Encode data
736        let result = encode::<Sha256>(total, min, data.clone());
737        assert!(matches!(
738            result,
739            Err(Error::ReedSolomon(
740                reed_solomon_simd::Error::UnsupportedShardCount {
741                    original_count: _,
742                    recovery_count: _,
743                }
744            ))
745        ));
746    }
747}