Skip to main content

rust_par2/
repair.rs

1//! PAR2 repair engine.
2//!
3//! Repairs damaged or missing files using Reed-Solomon recovery data.
4//!
5//! Algorithm (D×D reduced approach — D = damaged blocks, N = total blocks):
6//! 1. Verify to identify damaged/missing blocks
7//! 2. Load recovery blocks from volume files
8//! 3. Build a D×D Vandermonde submatrix (only damaged columns × recovery rows)
9//! 4. Invert the D×D matrix — O(D³) instead of O(N²D) for the full N×N
10//! 5. Compute adjusted recovery: subtract intact-block contributions from recovery data
11//! 6. Apply the D×D inverse to produce repaired blocks
12//! 7. Write repaired blocks back to files
13
14use std::collections::HashMap;
15use std::collections::hash_map::Entry;
16use std::fmt;
17use std::io::{Read, Seek, SeekFrom, Write};
18use std::path::Path;
19
20use rayon::prelude::*;
21use tracing::{debug, info};
22
23use crate::gf;
24use crate::gf_simd;
25use crate::matrix::{GfMatrix, par2_input_constants};
26use crate::recovery::{RecoveryBlock, load_recovery_blocks};
27use crate::types::{Par2FileSet, VerifyResult};
28use crate::verify;
29
30/// Result of a repair operation.
31#[derive(Debug)]
32pub struct RepairResult {
33    /// Whether the repair succeeded (all files now intact).
34    pub success: bool,
35    /// Number of blocks repaired.
36    pub blocks_repaired: u32,
37    /// Number of files repaired.
38    pub files_repaired: usize,
39    /// Descriptive message.
40    pub message: String,
41}
42
43impl fmt::Display for RepairResult {
44    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
45        if self.success {
46            write!(
47                f,
48                "Repair complete: {} blocks repaired across {} files",
49                self.blocks_repaired, self.files_repaired
50            )
51        } else {
52            write!(f, "Repair failed: {}", self.message)
53        }
54    }
55}
56
57/// Errors that can occur during repair.
58#[derive(Debug, thiserror::Error)]
59pub enum RepairError {
60    #[error("I/O error: {0}")]
61    Io(#[from] std::io::Error),
62    #[error("Insufficient recovery data: need {needed} blocks, have {available}")]
63    InsufficientRecovery { needed: u32, available: u32 },
64    #[error("Decode matrix is singular — cannot repair with these recovery blocks")]
65    SingularMatrix,
66    #[error("No damage detected — nothing to repair")]
67    NoDamage,
68    #[error("Verification after repair failed: {0}")]
69    VerifyFailed(String),
70}
71
72/// Repair damaged/missing files in a PAR2 set.
73///
74/// This is a blocking operation. For async contexts, wrap in `spawn_blocking`.
75///
76/// Runs verification internally to identify damage. If you already have a
77/// [`VerifyResult`] from a prior [`verify()`](crate::verify) call, use
78/// [`repair_from_verify`] instead to skip the redundant verification pass.
79pub fn repair(file_set: &Par2FileSet, dir: &Path) -> Result<RepairResult, RepairError> {
80    let verify_result = verify::verify(file_set, dir);
81    repair_from_verify_inner(file_set, dir, &verify_result, true)
82}
83
84/// Repair using a pre-computed [`VerifyResult`].
85///
86/// This skips the initial verification pass, saving significant time when the
87/// caller has already called [`verify()`](crate::verify). The `verify_result`
88/// must have been computed against the same `file_set` and `dir`.
89///
90/// Uses a reduced D×D matrix approach where D = number of damaged blocks.
91/// The full N×N decode matrix is never constructed — only a small Vandermonde
92/// submatrix covering damaged positions is built and inverted. This reduces
93/// matrix inversion from O(N²D) to O(D³), which is the difference between
94/// minutes and milliseconds for typical repair scenarios.
95///
96/// After repair, a full MD5 verification pass confirms all files are correct.
97/// To skip re-verification, use [`repair_from_verify_no_reverify`].
98///
99/// This is a blocking operation. For async contexts, wrap in `spawn_blocking`.
100pub fn repair_from_verify(
101    file_set: &Par2FileSet,
102    dir: &Path,
103    verify_result: &VerifyResult,
104) -> Result<RepairResult, RepairError> {
105    repair_from_verify_inner(file_set, dir, verify_result, true)
106}
107
108/// Like [`repair_from_verify`], but skips re-verification after repair.
109///
110/// The Reed-Solomon math is deterministic — if the matrix inverted and I/O
111/// succeeded, the output is correct. Skipping re-verify saves a full read
112/// of all files (~6s per 5GB).
113pub fn repair_from_verify_no_reverify(
114    file_set: &Par2FileSet,
115    dir: &Path,
116    verify_result: &VerifyResult,
117) -> Result<RepairResult, RepairError> {
118    repair_from_verify_inner(file_set, dir, verify_result, false)
119}
120
121fn repair_from_verify_inner(
122    file_set: &Par2FileSet,
123    dir: &Path,
124    verify_result: &VerifyResult,
125    re_verify: bool,
126) -> Result<RepairResult, RepairError> {
127    if verify_result.all_correct() {
128        return Err(RepairError::NoDamage);
129    }
130
131    let blocks_needed = verify_result.blocks_needed();
132    info!(
133        blocks_needed,
134        damaged = verify_result.damaged.len(),
135        missing = verify_result.missing.len(),
136        "Repair: damage detected"
137    );
138
139    // Step 1: Load recovery blocks from all volume files
140    let recovery_blocks = load_recovery_blocks(dir, &file_set.recovery_set_id, file_set.slice_size);
141
142    if (recovery_blocks.len() as u32) < blocks_needed {
143        return Err(RepairError::InsufficientRecovery {
144            needed: blocks_needed,
145            available: recovery_blocks.len() as u32,
146        });
147    }
148
149    // Step 2: Map files to global block indices
150    let block_map = build_block_map(file_set);
151    let total_input_blocks = block_map.total_blocks as usize;
152
153    let damaged_indices = find_damaged_block_indices(verify_result, &block_map);
154    let num_damaged = damaged_indices.len();
155    info!(
156        damaged_block_count = num_damaged,
157        total_input_blocks, "Mapped damaged blocks to global indices"
158    );
159
160    // Select exactly D recovery blocks (one per damaged block)
161    let recovery_to_use: Vec<&RecoveryBlock> = recovery_blocks.iter().take(num_damaged).collect();
162    let recovery_exponents: Vec<u32> = recovery_to_use.iter().map(|b| b.exponent).collect();
163
164    // Step 3: Build and invert the D×D Vandermonde submatrix.
165    //
166    // The full encoding equation for recovery block e is:
167    //   R_e = Σ_i (c_i^exp_e × D_i)   for all input blocks i
168    //
169    // Rearranging for the damaged blocks only:
170    //   R_e - Σ_{intact i} (c_i^exp_e × D_i) = Σ_{damaged j} (c_j^exp_e × D_j)
171    //
172    // The D×D matrix V[e][j] = c_{damaged_j}^{exp_e} is what we invert.
173    let constants = par2_input_constants(total_input_blocks);
174
175    let mut vandermonde = GfMatrix::zeros(num_damaged, num_damaged);
176    for (e, &exp) in recovery_exponents.iter().enumerate() {
177        for (j, &dmg_idx) in damaged_indices.iter().enumerate() {
178            vandermonde.set(e, j, gf::pow(constants[dmg_idx], exp));
179        }
180    }
181
182    let inverse = vandermonde.invert().ok_or(RepairError::SingularMatrix)?;
183
184    info!(
185        "D×D decode matrix inverted ({}×{})",
186        num_damaged, num_damaged
187    );
188
189    // Step 4: Compute adjusted recovery buffers.
190    //
191    // adjusted[e] = R_e ⊕ Σ_{intact i} (c_i^exp_e × D_i)
192    //
193    // In GF(2^16), subtraction = addition = XOR, so we XOR-accumulate the
194    // intact-block contributions into the recovery data.
195    //
196    // Cache-tiled approach: the D output buffers (D × slice_size) typically
197    // exceed L3 cache. Iterating source-major (one source at a time against
198    // all D outputs) thrashes cache on every source. Instead we read batches
199    // of B source blocks, then process each output buffer against the entire
200    // batch — keeping the output buffer hot in L1/L2 while applying B sources
201    // via mul_add_multi's pair-batched AVX2 path.
202
203    let slice_size = file_set.slice_size as usize;
204
205    // Build a fast lookup for damaged indices
206    let damaged_set: std::collections::HashSet<usize> = damaged_indices.iter().copied().collect();
207
208    // Initialize adjusted recovery from the actual recovery block data
209    let mut adjusted: Vec<Vec<u8>> = recovery_to_use.iter().map(|rb| rb.data.clone()).collect();
210
211    // Collect intact block indices for batch reading
212    let intact_indices: Vec<usize> = (0..total_input_blocks)
213        .filter(|i| !damaged_set.contains(i))
214        .collect();
215
216    // Batch size: chosen so B × slice_size fits comfortably in L3 cache (~16-32MB).
217    // With 768KB slices, 24 blocks = ~18MB. Leaves room for output buffer in L1/L2.
218    const BATCH_SIZE: usize = 24;
219
220    // Read and process source blocks in cache-friendly batches.
221    // I/O is sequential so OS readahead keeps the pipeline full.
222    let mut file_handles: HashMap<String, std::fs::File> = HashMap::new();
223
224    for batch in intact_indices.chunks(BATCH_SIZE) {
225        // Read this batch of source blocks
226        let batch_data: Vec<Vec<u8>> = batch
227            .iter()
228            .map(|&idx| read_source_block(dir, &block_map, idx, slice_size, &mut file_handles))
229            .collect::<std::io::Result<Vec<_>>>()?;
230
231        let batch_refs: Vec<&[u8]> = batch_data.iter().map(|v| v.as_slice()).collect();
232
233        // For each output buffer (parallelized across cores), apply all sources
234        // in this batch at once. The output buffer stays in L1/L2 for the whole
235        // batch, and mul_add_multi uses pair-batched AVX2 to halve dst bandwidth.
236        adjusted
237            .par_iter_mut()
238            .enumerate()
239            .for_each(|(e, adj_buf)| {
240                let coeffs: Vec<u16> = batch
241                    .iter()
242                    .map(|&src_idx| gf::pow(constants[src_idx], recovery_exponents[e]))
243                    .collect();
244                gf_simd::mul_add_multi(adj_buf, &batch_refs, &coeffs);
245            });
246    }
247
248    info!("Intact-block contributions subtracted from recovery data");
249
250    // Step 5: Apply the D×D inverse to produce repaired blocks.
251    //
252    // repaired[j] = Σ_e (V^{-1}[j][e] × adjusted[e])
253    //
254    // This is D × D mul-adds of slice_size — small relative to the streaming phase.
255
256    let adj_refs: Vec<&[u8]> = adjusted.iter().map(|v| v.as_slice()).collect();
257
258    let mut outputs: Vec<Vec<u8>> = (0..num_damaged).map(|_| vec![0u8; slice_size]).collect();
259
260    outputs.par_iter_mut().enumerate().for_each(|(j, dst)| {
261        let coeffs: Vec<u16> = (0..num_damaged).map(|e| inverse.get(j, e)).collect();
262        gf_simd::mul_add_multi(dst, &adj_refs, &coeffs);
263    });
264
265    info!("Repaired blocks reconstructed via D×D inverse");
266
267    let repaired_blocks: Vec<(usize, Vec<u8>)> =
268        damaged_indices.iter().copied().zip(outputs).collect();
269
270    // Step 6: Write repaired blocks back to files
271    let mut files_touched = std::collections::HashSet::new();
272
273    for (global_idx, data) in &repaired_blocks {
274        let (filename, file_offset, write_len) = block_map.global_to_file(*global_idx, slice_size);
275
276        let file_path = dir.join(&filename);
277        debug!(
278            filename,
279            global_block = global_idx,
280            offset = file_offset,
281            len = write_len,
282            "Writing repaired block"
283        );
284
285        let mut f = std::fs::OpenOptions::new()
286            .create(true)
287            .truncate(false)
288            .write(true)
289            .open(&file_path)?;
290
291        // Ensure file is the right size (for missing files)
292        let expected_size = block_map
293            .files
294            .iter()
295            .find(|bf| bf.filename == filename)
296            .map(|bf| bf.file_size)
297            .unwrap_or(0);
298        let current_size = f.metadata()?.len();
299        if current_size < expected_size {
300            f.set_len(expected_size)?;
301        }
302
303        f.seek(SeekFrom::Start(file_offset as u64))?;
304        f.write_all(&data[..write_len])?;
305        files_touched.insert(filename.clone());
306    }
307
308    // Step 7: Optional re-verify
309    if re_verify {
310        let verification = verify::verify(file_set, dir);
311        if verification.all_correct() {
312            info!(
313                blocks = repaired_blocks.len(),
314                files = files_touched.len(),
315                "Repair successful — all files verified"
316            );
317            Ok(RepairResult {
318                success: true,
319                blocks_repaired: repaired_blocks.len() as u32,
320                files_repaired: files_touched.len(),
321                message: "All files repaired and verified".to_string(),
322            })
323        } else {
324            Err(RepairError::VerifyFailed(format!("{verification}")))
325        }
326    } else {
327        info!(
328            blocks = repaired_blocks.len(),
329            files = files_touched.len(),
330            "Repair complete (re-verify skipped)"
331        );
332        Ok(RepairResult {
333            success: true,
334            blocks_repaired: repaired_blocks.len() as u32,
335            files_repaired: files_touched.len(),
336            message: "All files repaired (re-verify skipped)".to_string(),
337        })
338    }
339}
340
341// ---------------------------------------------------------------------------
342// Block mapping
343// ---------------------------------------------------------------------------
344
345/// Maps between global block indices and per-file block positions.
346struct BlockMap {
347    files: Vec<BlockFile>,
348    total_blocks: u32,
349}
350
351struct BlockFile {
352    filename: String,
353    file_size: u64,
354    block_count: u32,
355    /// First global block index for this file.
356    start_block: u32,
357}
358
359fn build_block_map(file_set: &Par2FileSet) -> BlockMap {
360    let slice_size = file_set.slice_size;
361    let mut files = Vec::new();
362    let mut block_offset = 0u32;
363
364    // Sort files by file ID for deterministic ordering (same as par2cmdline)
365    let mut sorted_files: Vec<_> = file_set.files.values().collect();
366    sorted_files.sort_by_key(|f| f.file_id);
367
368    for f in sorted_files {
369        let block_count = if slice_size == 0 {
370            0
371        } else {
372            f.size.div_ceil(slice_size) as u32
373        };
374        files.push(BlockFile {
375            filename: f.filename.clone(),
376            file_size: f.size,
377            block_count,
378            start_block: block_offset,
379        });
380        block_offset += block_count;
381    }
382
383    BlockMap {
384        files,
385        total_blocks: block_offset,
386    }
387}
388
389impl BlockMap {
390    /// Convert a global block index to (filename, file_byte_offset, bytes_to_write).
391    fn global_to_file(&self, global_idx: usize, slice_size: usize) -> (String, usize, usize) {
392        let global = global_idx as u32;
393        for f in &self.files {
394            if global >= f.start_block && global < f.start_block + f.block_count {
395                let local_block = (global - f.start_block) as usize;
396                let file_offset = local_block * slice_size;
397                // Last block may be shorter than slice_size
398                let remaining = f.file_size as usize - file_offset;
399                let write_len = remaining.min(slice_size);
400                return (f.filename.clone(), file_offset, write_len);
401            }
402        }
403        panic!("Global block index {global_idx} out of range");
404    }
405}
406
407fn find_damaged_block_indices(verify_result: &VerifyResult, block_map: &BlockMap) -> Vec<usize> {
408    let mut indices = Vec::new();
409
410    for damaged in &verify_result.damaged {
411        if let Some(bf) = block_map
412            .files
413            .iter()
414            .find(|f| f.filename == damaged.filename)
415        {
416            if damaged.damaged_block_indices.is_empty() {
417                // No per-block info — assume all blocks damaged
418                for i in 0..bf.block_count {
419                    indices.push((bf.start_block + i) as usize);
420                }
421            } else {
422                // Use precise per-block damage info
423                for &local_idx in &damaged.damaged_block_indices {
424                    indices.push((bf.start_block + local_idx) as usize);
425                }
426            }
427        }
428    }
429
430    for missing in &verify_result.missing {
431        if let Some(bf) = block_map
432            .files
433            .iter()
434            .find(|f| f.filename == missing.filename)
435        {
436            for i in 0..bf.block_count {
437                indices.push((bf.start_block + i) as usize);
438            }
439        }
440    }
441
442    indices.sort();
443    indices.dedup();
444    indices
445}
446
447/// Read a single source block from disk, reusing file handles.
448fn read_source_block(
449    dir: &Path,
450    block_map: &BlockMap,
451    global_idx: usize,
452    slice_size: usize,
453    file_handles: &mut HashMap<String, std::fs::File>,
454) -> std::io::Result<Vec<u8>> {
455    let (filename, file_offset, _) = block_map.global_to_file(global_idx, slice_size);
456
457    let handle = match file_handles.entry(filename.clone()) {
458        Entry::Occupied(e) => e.into_mut(),
459        Entry::Vacant(e) => {
460            let path = dir.join(&filename);
461            e.insert(std::fs::File::open(&path)?)
462        }
463    };
464    handle.seek(SeekFrom::Start(file_offset as u64))?;
465
466    let mut buf = vec![0u8; slice_size]; // zero-initialized for last-block padding
467    let mut total = 0;
468    while total < slice_size {
469        match handle.read(&mut buf[total..]) {
470            Ok(0) => break,
471            Ok(n) => total += n,
472            Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
473            Err(e) => return Err(e),
474        }
475    }
476    Ok(buf)
477}
478
479#[cfg(test)]
480mod tests {
481    use super::*;
482
483    /// Test that the basic RS encode→decode round-trip works with the D×D approach.
484    /// 2 data blocks, 2 recovery blocks, lose both data blocks, recover.
485    #[test]
486    fn test_rs_roundtrip_simple() {
487        // 2 input "blocks" of 4 bytes each (2 u16 values)
488        let input0: Vec<u8> = vec![0x01, 0x00, 0x02, 0x00]; // [1, 2] as u16 LE
489        let input1: Vec<u8> = vec![0x03, 0x00, 0x04, 0x00]; // [3, 4] as u16 LE
490
491        let input_count = 2;
492        let recovery_exponents = vec![0u32, 1u32];
493
494        // Build encoding matrix to compute recovery blocks
495        let enc = GfMatrix::par2_encoding_matrix(input_count, &recovery_exponents);
496
497        // Compute recovery blocks
498        let slice_size = 4;
499        let u16_per_slice = slice_size / 2;
500        let inputs = [&input0, &input1];
501
502        let mut recovery0 = vec![0u8; slice_size];
503        let mut recovery1 = vec![0u8; slice_size];
504
505        for pos in 0..u16_per_slice {
506            let off = pos * 2;
507            let mut r0: u16 = 0;
508            let mut r1: u16 = 0;
509            for (i, inp) in inputs.iter().enumerate() {
510                let val = u16::from_le_bytes([inp[off], inp[off + 1]]);
511                r0 = gf::add(r0, gf::mul(enc.get(2, i), val));
512                r1 = gf::add(r1, gf::mul(enc.get(3, i), val));
513            }
514            recovery0[off] = r0 as u8;
515            recovery0[off + 1] = (r0 >> 8) as u8;
516            recovery1[off] = r1 as u8;
517            recovery1[off + 1] = (r1 >> 8) as u8;
518        }
519
520        // Now "lose" both input blocks. Use D×D approach to recover.
521        // D = 2 (both blocks damaged), N = 2.
522        // Since all blocks are damaged, there are no intact blocks to subtract.
523        // adjusted[e] = recovery[e] (no intact contributions to remove).
524
525        let constants = par2_input_constants(input_count);
526        let damaged_indices = [0usize, 1usize];
527
528        // Build D×D Vandermonde: V[e][j] = c_{damaged_j}^{exp_e}
529        let num_damaged = damaged_indices.len();
530        let mut vandermonde = GfMatrix::zeros(num_damaged, num_damaged);
531        for (e, &exp) in recovery_exponents.iter().enumerate() {
532            for (j, &dmg_idx) in damaged_indices.iter().enumerate() {
533                vandermonde.set(e, j, gf::pow(constants[dmg_idx], exp));
534            }
535        }
536
537        let inv = vandermonde.invert().expect("Should be invertible");
538
539        // Apply inverse: repaired[j] = Σ_e inv[j][e] * adjusted[e]
540        let adjusted = [&recovery0[..], &recovery1[..]];
541        let mut result0 = vec![0u8; slice_size];
542        let mut result1 = vec![0u8; slice_size];
543
544        for pos in 0..u16_per_slice {
545            let off = pos * 2;
546            let mut out0: u16 = 0;
547            let mut out1: u16 = 0;
548            for (e, adj) in adjusted.iter().enumerate() {
549                let val = u16::from_le_bytes([adj[off], adj[off + 1]]);
550                out0 = gf::add(out0, gf::mul(inv.get(0, e), val));
551                out1 = gf::add(out1, gf::mul(inv.get(1, e), val));
552            }
553            result0[off] = out0 as u8;
554            result0[off + 1] = (out0 >> 8) as u8;
555            result1[off] = out1 as u8;
556            result1[off + 1] = (out1 >> 8) as u8;
557        }
558
559        assert_eq!(result0, input0, "Recovered block 0 should match original");
560        assert_eq!(result1, input1, "Recovered block 1 should match original");
561    }
562
563    /// Test D×D approach with partial damage (some blocks intact).
564    /// 4 data blocks, lose 2, recover using 2 recovery blocks.
565    #[test]
566    fn test_rs_roundtrip_partial_damage() {
567        let slice_size = 4;
568        let input_count = 4;
569        let recovery_exponents = vec![0u32, 1u32];
570
571        let inputs: Vec<Vec<u8>> = vec![
572            vec![0x01, 0x00, 0x02, 0x00],
573            vec![0x03, 0x00, 0x04, 0x00],
574            vec![0x05, 0x00, 0x06, 0x00],
575            vec![0x07, 0x00, 0x08, 0x00],
576        ];
577
578        let enc = GfMatrix::par2_encoding_matrix(input_count, &recovery_exponents);
579
580        // Compute recovery blocks
581        let mut recovery = vec![vec![0u8; slice_size]; 2];
582        for pos in 0..(slice_size / 2) {
583            let off = pos * 2;
584            for (e, rec) in recovery.iter_mut().enumerate() {
585                let mut val: u16 = 0;
586                for (i, inp) in inputs.iter().enumerate() {
587                    let d = u16::from_le_bytes([inp[off], inp[off + 1]]);
588                    val = gf::add(val, gf::mul(enc.get(input_count + e, i), d));
589                }
590                rec[off] = val as u8;
591                rec[off + 1] = (val >> 8) as u8;
592            }
593        }
594
595        // "Lose" blocks 1 and 3. Blocks 0 and 2 are intact.
596        let damaged_indices = [1usize, 3usize];
597        let intact_indices: Vec<usize> = (0..input_count)
598            .filter(|i| !damaged_indices.contains(i))
599            .collect();
600        let num_damaged = damaged_indices.len();
601
602        let constants = par2_input_constants(input_count);
603
604        // Build D×D Vandermonde
605        let mut vandermonde = GfMatrix::zeros(num_damaged, num_damaged);
606        for (e, &exp) in recovery_exponents.iter().enumerate() {
607            for (j, &dmg_idx) in damaged_indices.iter().enumerate() {
608                vandermonde.set(e, j, gf::pow(constants[dmg_idx], exp));
609            }
610        }
611        let inv = vandermonde.invert().expect("Should be invertible");
612
613        // Compute adjusted recovery: subtract intact contributions
614        let mut adjusted = recovery.clone();
615        for &intact_idx in &intact_indices {
616            let c_i = constants[intact_idx];
617            for (e, adj) in adjusted.iter_mut().enumerate() {
618                let coeff = gf::pow(c_i, recovery_exponents[e]);
619                gf_simd::mul_add_buffer(adj, &inputs[intact_idx], coeff);
620            }
621        }
622
623        // Apply inverse
624        let adj_refs: Vec<&[u8]> = adjusted.iter().map(|v| v.as_slice()).collect();
625        let mut outputs: Vec<Vec<u8>> = (0..num_damaged).map(|_| vec![0u8; slice_size]).collect();
626
627        for (j, dst) in outputs.iter_mut().enumerate() {
628            let coeffs: Vec<u16> = (0..num_damaged).map(|e| inv.get(j, e)).collect();
629            gf_simd::mul_add_multi(dst, &adj_refs, &coeffs);
630        }
631
632        // Verify recovery
633        assert_eq!(outputs[0], inputs[1], "Recovered block 1 should match");
634        assert_eq!(outputs[1], inputs[3], "Recovered block 3 should match");
635    }
636}