const PRIM_POLY: u16 = 0x11D;
const N_MAX: usize = 255;
const K_DEFAULT: usize = 191;
const PARITY_LEN: usize = N_MAX - K_DEFAULT;
pub const T_MAX: usize = PARITY_LEN / 2;
pub const PARITY_TIERS: [usize; 4] = [64, 128, 192, 240];
struct GfTables {
exp: [u8; 512],
log: [u8; 256],
}
fn build_gf_tables() -> GfTables {
let mut exp = [0u8; 512];
let mut log = [0u8; 256];
let mut x: u16 = 1;
for i in 0..255u16 {
exp[i as usize] = x as u8;
exp[(i + 255) as usize] = x as u8; log[x as usize] = i as u8;
x <<= 1;
if x & 0x100 != 0 {
x ^= PRIM_POLY;
}
}
exp[510] = exp[0];
exp[511] = exp[1];
GfTables { exp, log }
}
fn gf_tables() -> &'static GfTables {
use std::sync::OnceLock;
static TABLES: OnceLock<GfTables> = OnceLock::new();
TABLES.get_or_init(build_gf_tables)
}
fn gf_mul(a: u8, b: u8) -> u8 {
if a == 0 || b == 0 {
return 0;
}
let t = gf_tables();
let log_sum = t.log[a as usize] as usize + t.log[b as usize] as usize;
t.exp[log_sum]
}
fn gf_add(a: u8, b: u8) -> u8 {
a ^ b
}
fn gf_inv(a: u8) -> u8 {
assert_ne!(a, 0, "cannot invert zero in GF(2^8)");
let t = gf_tables();
t.exp[255 - t.log[a as usize] as usize]
}
#[cfg(test)]
fn gf_pow(a: u8, n: u32) -> u8 {
if a == 0 {
return if n == 0 { 1 } else { 0 };
}
let t = gf_tables();
let log_a = t.log[a as usize] as u32;
let exp_idx = (log_a * n) % 255;
t.exp[exp_idx as usize]
}
fn poly_eval(poly: &[u8], x: u8) -> u8 {
let mut result = 0u8;
for &coeff in poly {
result = gf_add(gf_mul(result, x), coeff);
}
result
}
fn poly_mul(a: &[u8], b: &[u8]) -> Vec<u8> {
let mut result = vec![0u8; a.len() + b.len() - 1];
for (i, &ac) in a.iter().enumerate() {
for (j, &bc) in b.iter().enumerate() {
result[i + j] = gf_add(result[i + j], gf_mul(ac, bc));
}
}
result
}
fn build_gen_poly(parity_len: usize) -> Vec<u8> {
let t = gf_tables();
let mut gpoly = vec![1u8];
for i in 0..parity_len {
let root = t.exp[i]; gpoly = poly_mul(&gpoly, &[1, root]);
}
gpoly
}
fn gen_poly() -> &'static Vec<u8> {
use std::sync::OnceLock;
static GEN: OnceLock<Vec<u8>> = OnceLock::new();
GEN.get_or_init(|| build_gen_poly(PARITY_LEN))
}
fn gen_poly_for(parity_len: usize) -> &'static Vec<u8> {
use std::sync::OnceLock;
static GEN_4: OnceLock<Vec<u8>> = OnceLock::new();
static GEN_8: OnceLock<Vec<u8>> = OnceLock::new();
static GEN_16: OnceLock<Vec<u8>> = OnceLock::new();
static GEN_32: OnceLock<Vec<u8>> = OnceLock::new();
static GEN_64: OnceLock<Vec<u8>> = OnceLock::new();
static GEN_128: OnceLock<Vec<u8>> = OnceLock::new();
static GEN_192: OnceLock<Vec<u8>> = OnceLock::new();
static GEN_240: OnceLock<Vec<u8>> = OnceLock::new();
match parity_len {
4 => GEN_4.get_or_init(|| build_gen_poly(4)),
8 => GEN_8.get_or_init(|| build_gen_poly(8)),
16 => GEN_16.get_or_init(|| build_gen_poly(16)),
32 => GEN_32.get_or_init(|| build_gen_poly(32)),
64 => GEN_64.get_or_init(|| build_gen_poly(64)),
128 => GEN_128.get_or_init(|| build_gen_poly(128)),
192 => GEN_192.get_or_init(|| build_gen_poly(192)),
240 => GEN_240.get_or_init(|| build_gen_poly(240)),
_ => {
if parity_len == PARITY_LEN {
gen_poly()
} else {
panic!("unsupported parity length: {parity_len}")
}
}
}
}
pub fn rs_encode(data: &[u8]) -> Vec<u8> {
assert!(
data.len() <= K_DEFAULT,
"data length {} exceeds max {}",
data.len(),
K_DEFAULT
);
let gpoly = gen_poly();
let parity_len = PARITY_LEN;
let mut shift_reg = vec![0u8; parity_len];
for &byte in data {
let feedback = gf_add(byte, shift_reg[0]);
for j in 0..parity_len - 1 {
shift_reg[j] = gf_add(shift_reg[j + 1], gf_mul(feedback, gpoly[j + 1]));
}
shift_reg[parity_len - 1] = gf_mul(feedback, gpoly[parity_len]);
}
let mut encoded = Vec::with_capacity(data.len() + parity_len);
encoded.extend_from_slice(data);
encoded.extend_from_slice(&shift_reg);
encoded
}
pub fn rs_encode_blocks(payload: &[u8]) -> Vec<u8> {
let mut encoded = Vec::new();
for chunk in payload.chunks(K_DEFAULT) {
encoded.extend_from_slice(&rs_encode(chunk));
}
encoded
}
fn compute_syndromes(received: &[u8]) -> Vec<u8> {
let tab = gf_tables();
let two_t = PARITY_LEN;
let mut syndromes = vec![0u8; two_t];
for i in 0..two_t {
syndromes[i] = poly_eval(received, tab.exp[i]); }
syndromes
}
fn syndromes_are_zero(syndromes: &[u8]) -> bool {
syndromes.iter().all(|&s| s == 0)
}
fn berlekamp_massey(syndromes: &[u8]) -> Vec<u8> {
let n = syndromes.len();
let mut c = vec![0u8; n + 1];
c[0] = 1;
let mut c_len = 1usize;
let mut b = vec![0u8; n + 1];
b[0] = 1;
let mut b_len = 1usize;
let mut ell = 0usize; let mut bval = 1u8; let mut m = 1usize;
for r in 0..n {
let mut delta = syndromes[r];
for i in 1..c_len {
delta = gf_add(delta, gf_mul(c[i], syndromes[r - i]));
}
if delta == 0 {
m += 1;
continue;
}
let factor = gf_mul(delta, gf_inv(bval));
if 2 * ell <= r {
let old_c = c[..c_len].to_vec();
let old_c_len = c_len;
let new_len = (b_len + m).max(c_len);
c_len = new_len;
for j in 0..b_len {
c[j + m] = gf_add(c[j + m], gf_mul(factor, b[j]));
}
b[..old_c_len].copy_from_slice(&old_c[..old_c_len]);
for j in old_c_len..b.len() {
b[j] = 0;
}
b_len = old_c_len;
ell = r + 1 - ell;
bval = delta;
m = 1;
} else {
let new_len = (b_len + m).max(c_len);
c_len = new_len;
for j in 0..b_len {
c[j + m] = gf_add(c[j + m], gf_mul(factor, b[j]));
}
m += 1;
}
}
c[..c_len].to_vec()
}
fn eval_asc(poly: &[u8], x: u8) -> u8 {
let mut result = 0u8;
let mut x_pow = 1u8;
for &coeff in poly {
result = gf_add(result, gf_mul(coeff, x_pow));
x_pow = gf_mul(x_pow, x);
}
result
}
fn chien_search(sigma_asc: &[u8], n: usize) -> Option<Vec<(usize, usize)>> {
if n == 0 {
return None;
}
let tab = gf_tables();
let num_errors = sigma_asc.len() - 1;
let mut found = Vec::with_capacity(num_errors);
for p in 0..n {
let x = if p == 0 {
1u8
} else {
tab.exp[(255 - (p % 255)) % 255] };
if eval_asc(sigma_asc, x) == 0 {
found.push((p, n - 1 - p));
}
}
if found.len() != num_errors {
return None;
}
Some(found)
}
fn forney(
sigma_asc: &[u8],
syndromes: &[u8],
found: &[(usize, usize)],
) -> Vec<u8> {
let tab = gf_tables();
let two_t = syndromes.len();
let mut omega = vec![0u8; two_t];
for i in 0..sigma_asc.len().min(two_t) {
for j in 0..two_t {
if i + j < two_t {
omega[i + j] = gf_add(omega[i + j], gf_mul(sigma_asc[i], syndromes[j]));
}
}
}
let deriv_len = sigma_asc.len().saturating_sub(1);
let mut sigma_prime = vec![0u8; deriv_len];
for i in (1..sigma_asc.len()).step_by(2) {
sigma_prime[i - 1] = sigma_asc[i];
}
let mut magnitudes = Vec::with_capacity(found.len());
for &(gf_pos, _) in found {
let x_val = if gf_pos == 0 {
1u8
} else {
tab.exp[gf_pos % 255] };
let x_inv = if gf_pos == 0 {
1u8
} else {
tab.exp[(255 - (gf_pos % 255)) % 255] };
let omega_val = eval_asc(&omega, x_inv);
let sp_val = eval_asc(&sigma_prime, x_inv);
if sp_val == 0 {
magnitudes.push(0);
continue;
}
magnitudes.push(gf_mul(x_val, gf_mul(omega_val, gf_inv(sp_val))));
}
magnitudes
}
#[derive(Debug, PartialEq)]
pub struct RsDecodeError;
impl core::fmt::Display for RsDecodeError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "Reed-Solomon: too many errors to correct")
}
}
pub fn rs_decode(received: &[u8], data_len: usize) -> Result<(Vec<u8>, usize), RsDecodeError> {
let block_len = data_len + PARITY_LEN;
assert_eq!(
received.len(),
block_len,
"received length {} != expected {}",
received.len(),
block_len
);
let padding = N_MAX - block_len;
let mut full_block = vec![0u8; N_MAX];
full_block[padding..].copy_from_slice(received);
let syndromes = compute_syndromes(&full_block);
if syndromes_are_zero(&syndromes) {
return Ok((received[..data_len].to_vec(), 0));
}
let sigma_asc = berlekamp_massey(&syndromes);
let num_errors = sigma_asc.len() - 1;
if num_errors > T_MAX {
return Err(RsDecodeError);
}
let found = chien_search(&sigma_asc, N_MAX).ok_or(RsDecodeError)?;
let magnitudes = forney(&sigma_asc, &syndromes, &found);
let mut corrected = full_block;
for (i, &(_, array_pos)) in found.iter().enumerate() {
if array_pos < padding {
return Err(RsDecodeError);
}
corrected[array_pos] = gf_add(corrected[array_pos], magnitudes[i]);
}
let check_syndromes = compute_syndromes(&corrected);
if !syndromes_are_zero(&check_syndromes) {
return Err(RsDecodeError);
}
Ok((corrected[padding..padding + data_len].to_vec(), num_errors))
}
#[derive(Debug, Clone, Default)]
pub struct RsDecodeStats {
pub total_errors: usize,
pub error_capacity: usize,
pub max_block_errors: usize,
pub num_blocks: usize,
}
pub fn rs_decode_blocks(encoded: &[u8], total_data_len: usize) -> Result<(Vec<u8>, RsDecodeStats), RsDecodeError> {
let mut decoded = Vec::with_capacity(total_data_len);
let mut remaining_data = total_data_len;
let mut offset = 0;
let mut stats = RsDecodeStats::default();
while remaining_data > 0 {
let chunk_data_len = remaining_data.min(K_DEFAULT);
let block_len = chunk_data_len + PARITY_LEN;
if offset + block_len > encoded.len() {
return Err(RsDecodeError);
}
let block = &encoded[offset..offset + block_len];
let (data, errors) = rs_decode(block, chunk_data_len)?;
decoded.extend_from_slice(&data);
stats.total_errors += errors;
stats.num_blocks += 1;
if errors > stats.max_block_errors {
stats.max_block_errors = errors;
}
offset += block_len;
remaining_data -= chunk_data_len;
}
stats.error_capacity = stats.num_blocks * T_MAX;
Ok((decoded, stats))
}
pub fn rs_encoded_len(data_len: usize) -> usize {
let full_blocks = data_len / K_DEFAULT;
let remainder = data_len % K_DEFAULT;
let mut total = full_blocks * (K_DEFAULT + PARITY_LEN);
if remainder > 0 {
total += remainder + PARITY_LEN;
}
total
}
pub const fn parity_len() -> usize {
PARITY_LEN
}
pub fn rs_encode_with_parity(data: &[u8], parity_len: usize) -> Vec<u8> {
if parity_len == 0 { return data.to_vec(); }
let k_max = N_MAX - parity_len;
assert!(
data.len() <= k_max,
"data length {} exceeds max {} for parity_len={}",
data.len(),
k_max,
parity_len
);
assert!(parity_len <= 240, "parity_len {} exceeds 240", parity_len);
let gpoly = gen_poly_for(parity_len);
let mut shift_reg = vec![0u8; parity_len];
for &byte in data {
let feedback = gf_add(byte, shift_reg[0]);
for j in 0..parity_len - 1 {
shift_reg[j] = gf_add(shift_reg[j + 1], gf_mul(feedback, gpoly[j + 1]));
}
shift_reg[parity_len - 1] = gf_mul(feedback, gpoly[parity_len]);
}
let mut encoded = Vec::with_capacity(data.len() + parity_len);
encoded.extend_from_slice(data);
encoded.extend_from_slice(&shift_reg);
encoded
}
pub fn rs_decode_with_parity(
received: &[u8],
data_len: usize,
parity_len: usize,
) -> Result<(Vec<u8>, usize), RsDecodeError> {
let block_len = data_len + parity_len;
assert_eq!(
received.len(),
block_len,
"received length {} != expected {}",
received.len(),
block_len
);
let padding = N_MAX - block_len;
let mut full_block = vec![0u8; N_MAX];
full_block[padding..].copy_from_slice(received);
let tab = gf_tables();
let mut syndromes = vec![0u8; parity_len];
for i in 0..parity_len {
syndromes[i] = poly_eval(&full_block, tab.exp[i]);
}
if syndromes.iter().all(|&s| s == 0) {
return Ok((received[..data_len].to_vec(), 0));
}
let t_max = parity_len / 2;
let sigma_asc = berlekamp_massey(&syndromes);
let num_errors = sigma_asc.len() - 1;
if num_errors > t_max {
return Err(RsDecodeError);
}
let found = chien_search(&sigma_asc, N_MAX).ok_or(RsDecodeError)?;
let magnitudes = forney(&sigma_asc, &syndromes, &found);
let mut corrected = full_block;
for (i, &(_, array_pos)) in found.iter().enumerate() {
if array_pos < padding {
return Err(RsDecodeError);
}
corrected[array_pos] = gf_add(corrected[array_pos], magnitudes[i]);
}
let mut check_ok = true;
for i in 0..parity_len {
if poly_eval(&corrected, tab.exp[i]) != 0 {
check_ok = false;
break;
}
}
if !check_ok {
return Err(RsDecodeError);
}
Ok((corrected[padding..padding + data_len].to_vec(), num_errors))
}
pub fn rs_encode_blocks_with_parity(payload: &[u8], parity_len: usize) -> Vec<u8> {
let k_max = N_MAX - parity_len;
let mut encoded = Vec::new();
for chunk in payload.chunks(k_max) {
encoded.extend_from_slice(&rs_encode_with_parity(chunk, parity_len));
}
encoded
}
pub fn rs_decode_blocks_with_parity(
encoded: &[u8],
total_data_len: usize,
parity_len: usize,
) -> Result<(Vec<u8>, RsDecodeStats), RsDecodeError> {
let k_max = N_MAX - parity_len;
let t_max = parity_len / 2;
let mut decoded = Vec::with_capacity(total_data_len);
let mut remaining_data = total_data_len;
let mut offset = 0;
let mut stats = RsDecodeStats::default();
while remaining_data > 0 {
let chunk_data_len = remaining_data.min(k_max);
let block_len = chunk_data_len + parity_len;
if offset + block_len > encoded.len() {
return Err(RsDecodeError);
}
let block = &encoded[offset..offset + block_len];
let (data, errors) = rs_decode_with_parity(block, chunk_data_len, parity_len)?;
decoded.extend_from_slice(&data);
stats.total_errors += errors;
stats.num_blocks += 1;
if errors > stats.max_block_errors {
stats.max_block_errors = errors;
}
offset += block_len;
remaining_data -= chunk_data_len;
}
stats.error_capacity = stats.num_blocks * t_max;
Ok((decoded, stats))
}
pub fn rs_encoded_len_with_parity(data_len: usize, parity_len: usize) -> usize {
let k_max = N_MAX - parity_len;
let full_blocks = data_len / k_max;
let remainder = data_len % k_max;
let mut total = full_blocks * (k_max + parity_len);
if remainder > 0 {
total += remainder + parity_len;
}
total
}
pub fn choose_parity_tier(frame_len: usize, num_units: usize) -> usize {
let mut best = PARITY_TIERS[0]; for &tier in &PARITY_TIERS {
let rs_bits = rs_encoded_len_with_parity(frame_len, tier) * 8;
if rs_bits <= num_units {
best = tier;
} else {
break;
}
}
best
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn gf_mul_identity() {
for a in 0..=255u16 {
assert_eq!(gf_mul(a as u8, 1), a as u8);
assert_eq!(gf_mul(1, a as u8), a as u8);
}
}
#[test]
fn gf_mul_zero() {
for a in 0..=255u16 {
assert_eq!(gf_mul(a as u8, 0), 0);
assert_eq!(gf_mul(0, a as u8), 0);
}
}
#[test]
fn gf_inverse_roundtrip() {
for a in 1..=255u16 {
let inv = gf_inv(a as u8);
assert_eq!(gf_mul(a as u8, inv), 1, "a={a}, inv={inv}");
}
}
#[test]
fn gf_pow_consistency() {
let t = gf_tables();
for a in 1..=255u16 {
assert_eq!(gf_pow(a as u8, 1), a as u8);
assert_eq!(gf_pow(a as u8, 0), 1);
assert_eq!(gf_pow(a as u8, 255), 1, "a={a}");
}
let _ = t;
}
#[test]
fn encode_decode_no_errors() {
let data = b"Hello, Reed-Solomon!";
let encoded = rs_encode(data);
let (decoded, errors) = rs_decode(&encoded, data.len()).unwrap();
assert_eq!(decoded, data);
assert_eq!(errors, 0);
}
#[test]
fn encode_decode_with_errors() {
let data = b"Test message for RS error correction.";
let mut encoded = rs_encode(data);
encoded[0] ^= 0xFF;
encoded[5] ^= 0xAA;
encoded[10] ^= 0x55;
encoded[15] ^= 0x11;
encoded[20] ^= 0x22;
encoded[25] ^= 0x33;
encoded[30] ^= 0x01;
encoded[data.len()] ^= 0x77; encoded[data.len() + 10] ^= 0x88;
encoded[data.len() + 30] ^= 0x99;
let (decoded, errors) = rs_decode(&encoded, data.len()).unwrap();
assert_eq!(decoded, data);
assert_eq!(errors, 10);
}
#[test]
fn encode_decode_max_correctable() {
let data = vec![42u8; 100];
let mut encoded = rs_encode(&data);
for i in 0..32 {
encoded[i * 3] ^= 0xFF;
}
let (decoded, errors) = rs_decode(&encoded, data.len()).unwrap();
assert_eq!(decoded, data);
assert_eq!(errors, 32);
}
#[test]
fn too_many_errors_fails() {
let data = vec![0u8; 50];
let mut encoded = rs_encode(&data);
for i in 0..33 {
encoded[i] ^= 0xFF;
}
assert!(rs_decode(&encoded, data.len()).is_err());
}
#[test]
fn shortened_code_works() {
let data = b"Hi";
let encoded = rs_encode(data);
assert_eq!(encoded.len(), data.len() + PARITY_LEN);
let (decoded, errors) = rs_decode(&encoded, data.len()).unwrap();
assert_eq!(decoded, data);
assert_eq!(errors, 0);
}
#[test]
fn shortened_code_with_errors() {
let data = b"Short";
let mut encoded = rs_encode(data);
encoded[0] ^= 0xFF;
encoded[2] ^= 0xAA;
let (decoded, errors) = rs_decode(&encoded, data.len()).unwrap();
assert_eq!(decoded, data);
assert_eq!(errors, 2);
}
#[test]
fn blocks_roundtrip() {
let data: Vec<u8> = (0..400).map(|i| (i % 256) as u8).collect();
let encoded = rs_encode_blocks(&data);
assert_eq!(encoded.len(), rs_encoded_len(data.len()));
let (decoded, stats) = rs_decode_blocks(&encoded, data.len()).unwrap();
assert_eq!(decoded, data);
assert_eq!(stats.total_errors, 0);
}
#[test]
fn blocks_with_errors() {
let data: Vec<u8> = (0..400).map(|i| (i % 256) as u8).collect();
let mut encoded = rs_encode_blocks(&data);
encoded[10] ^= 0xFF;
encoded[100] ^= 0xAA;
encoded[260] ^= 0x55;
encoded[300] ^= 0x11;
encoded[520] ^= 0x33;
let (decoded, stats) = rs_decode_blocks(&encoded, data.len()).unwrap();
assert_eq!(decoded, data);
assert_eq!(stats.total_errors, 5);
assert!(stats.max_block_errors <= 2);
}
#[test]
fn empty_data() {
let data: &[u8] = &[];
let encoded = rs_encode(data);
assert_eq!(encoded.len(), PARITY_LEN);
let (decoded, errors) = rs_decode(&encoded, 0).unwrap();
assert_eq!(decoded, data);
assert_eq!(errors, 0);
}
#[test]
fn rs_encoded_len_correct() {
assert_eq!(rs_encoded_len(100), 100 + 64);
assert_eq!(rs_encoded_len(191), 191 + 64);
assert_eq!(rs_encoded_len(192), (191 + 64) + (1 + 64));
assert_eq!(rs_encoded_len(400), 2 * (191 + 64) + (18 + 64));
}
#[test]
fn rs_encoded_len_edge_cases() {
assert_eq!(rs_encoded_len(0), 0);
assert_eq!(rs_encoded_len(1), 1 + 64);
assert_eq!(rs_encoded_len(191), 191 + 64);
assert_eq!(rs_encoded_len(192), (191 + 64) + (1 + 64));
}
#[test]
fn single_error_full_block() {
let data = vec![42u8; K_DEFAULT];
let mut encoded = rs_encode(&data);
encoded[50] ^= 0x01;
let (decoded, errors) = rs_decode(&encoded, K_DEFAULT).unwrap();
assert_eq!(decoded, data);
assert_eq!(errors, 1);
}
#[test]
fn single_error_shortened() {
let data = b"Short";
let mut encoded = rs_encode(data);
encoded[0] ^= 0xFF;
let (decoded, errors) = rs_decode(&encoded, data.len()).unwrap();
assert_eq!(decoded, data);
assert_eq!(errors, 1);
}
#[test]
fn two_errors_full_block() {
let data = vec![42u8; K_DEFAULT];
let mut encoded = rs_encode(&data);
encoded[0] ^= 0xFF;
encoded[50] ^= 0xAA;
let (decoded, errors) = rs_decode(&encoded, K_DEFAULT).unwrap();
assert_eq!(decoded, data);
assert_eq!(errors, 2);
}
#[test]
fn two_errors_shortened() {
let data = b"Short";
let mut encoded = rs_encode(data);
encoded[0] ^= 0xFF;
encoded[2] ^= 0xAA;
let (decoded, errors) = rs_decode(&encoded, data.len()).unwrap();
assert_eq!(decoded, data);
assert_eq!(errors, 2);
}
#[test]
fn generator_polynomial_correct() {
let gpoly = gen_poly();
assert_eq!(gpoly.len(), PARITY_LEN + 1);
assert_eq!(gpoly[0], 1);
let t = gf_tables();
for i in 0..PARITY_LEN {
assert_eq!(poly_eval(gpoly, t.exp[i]), 0, "root alpha^{i} failed");
}
}
#[test]
fn adaptive_rs_roundtrip_each_tier() {
for &parity in &PARITY_TIERS {
let k_max = N_MAX - parity;
let data: Vec<u8> = (0..k_max.min(100)).map(|i| (i % 256) as u8).collect();
let encoded = rs_encode_with_parity(&data, parity);
assert_eq!(encoded.len(), data.len() + parity);
let (decoded, errors) = rs_decode_with_parity(&encoded, data.len(), parity).unwrap();
assert_eq!(decoded, data, "parity={parity}");
assert_eq!(errors, 0, "parity={parity}");
}
}
#[test]
fn adaptive_rs_corrects_errors_at_each_tier() {
for &parity in &PARITY_TIERS {
let k_max = N_MAX - parity;
let t = parity / 2;
let data: Vec<u8> = (0..k_max.min(50)).map(|i| (i % 256) as u8).collect();
let mut encoded = rs_encode_with_parity(&data, parity);
let num_errors = (t / 2).min(encoded.len());
let elen = encoded.len();
for i in 0..num_errors {
encoded[i * 2 % elen] ^= 0xFF;
}
let (decoded, errors) = rs_decode_with_parity(&encoded, data.len(), parity).unwrap();
assert_eq!(decoded, data, "parity={parity}");
assert!(errors > 0, "parity={parity}");
}
}
#[test]
fn adaptive_rs_blocks_roundtrip() {
let data: Vec<u8> = (0..200).map(|i| (i % 256) as u8).collect();
for &parity in &PARITY_TIERS {
let encoded = rs_encode_blocks_with_parity(&data, parity);
assert_eq!(encoded.len(), rs_encoded_len_with_parity(data.len(), parity));
let (decoded, stats) = rs_decode_blocks_with_parity(&encoded, data.len(), parity).unwrap();
assert_eq!(decoded, data, "parity={parity}");
assert_eq!(stats.total_errors, 0, "parity={parity}");
}
}
#[test]
fn rs_encoded_len_with_parity_correct() {
assert_eq!(rs_encoded_len_with_parity(100, 128), 100 + 128);
assert_eq!(rs_encoded_len_with_parity(127, 128), 127 + 128);
assert_eq!(rs_encoded_len_with_parity(128, 128), (127 + 128) + (1 + 128));
}
#[test]
fn choose_parity_tier_picks_largest_fitting() {
let tier = choose_parity_tier(100, 10000);
assert_eq!(tier, 192);
}
}