1const PRIM_POLY: u16 = 0x11D;
13
14const N_MAX: usize = 255;
16
17const K_DEFAULT: usize = 191;
19
20const PARITY_LEN: usize = N_MAX - K_DEFAULT; pub const T_MAX: usize = PARITY_LEN / 2;
25
26pub const PARITY_TIERS: [usize; 4] = [64, 128, 192, 240];
28
29struct GfTables {
33 exp: [u8; 512],
34 log: [u8; 256],
35}
36
37fn build_gf_tables() -> GfTables {
40 let mut exp = [0u8; 512];
41 let mut log = [0u8; 256];
42
43 let mut x: u16 = 1;
44 for i in 0..255u16 {
45 exp[i as usize] = x as u8;
46 exp[(i + 255) as usize] = x as u8; log[x as usize] = i as u8;
48 x <<= 1;
49 if x & 0x100 != 0 {
50 x ^= PRIM_POLY;
51 }
52 }
53 exp[510] = exp[0];
56 exp[511] = exp[1];
57
58 GfTables { exp, log }
59}
60
61fn gf_tables() -> &'static GfTables {
62 use std::sync::OnceLock;
63 static TABLES: OnceLock<GfTables> = OnceLock::new();
64 TABLES.get_or_init(build_gf_tables)
65}
66
67fn gf_mul(a: u8, b: u8) -> u8 {
69 if a == 0 || b == 0 {
70 return 0;
71 }
72 let t = gf_tables();
73 let log_sum = t.log[a as usize] as usize + t.log[b as usize] as usize;
74 t.exp[log_sum]
75}
76
77fn gf_add(a: u8, b: u8) -> u8 {
79 a ^ b
80}
81
82fn gf_inv(a: u8) -> u8 {
84 assert_ne!(a, 0, "cannot invert zero in GF(2^8)");
85 let t = gf_tables();
86 t.exp[255 - t.log[a as usize] as usize]
87}
88
89#[cfg(test)]
91fn gf_pow(a: u8, n: u32) -> u8 {
92 if a == 0 {
93 return if n == 0 { 1 } else { 0 };
94 }
95 let t = gf_tables();
96 let log_a = t.log[a as usize] as u32;
97 let exp_idx = (log_a * n) % 255;
98 t.exp[exp_idx as usize]
99}
100
101fn poly_eval(poly: &[u8], x: u8) -> u8 {
103 let mut result = 0u8;
104 for &coeff in poly {
105 result = gf_add(gf_mul(result, x), coeff);
106 }
107 result
108}
109
110fn poly_mul(a: &[u8], b: &[u8]) -> Vec<u8> {
112 let mut result = vec![0u8; a.len() + b.len() - 1];
113 for (i, &ac) in a.iter().enumerate() {
114 for (j, &bc) in b.iter().enumerate() {
115 result[i + j] = gf_add(result[i + j], gf_mul(ac, bc));
116 }
117 }
118 result
119}
120
121fn build_gen_poly(parity_len: usize) -> Vec<u8> {
126 let t = gf_tables();
127 let mut gpoly = vec![1u8]; for i in 0..parity_len {
130 let root = t.exp[i]; gpoly = poly_mul(&gpoly, &[1, root]);
132 }
133 gpoly
134}
135
136fn gen_poly() -> &'static Vec<u8> {
137 use std::sync::OnceLock;
138 static GEN: OnceLock<Vec<u8>> = OnceLock::new();
139 GEN.get_or_init(|| build_gen_poly(PARITY_LEN))
140}
141
142fn gen_poly_for(parity_len: usize) -> &'static Vec<u8> {
145 use std::sync::OnceLock;
146 static GEN_4: OnceLock<Vec<u8>> = OnceLock::new();
147 static GEN_8: OnceLock<Vec<u8>> = OnceLock::new();
148 static GEN_16: OnceLock<Vec<u8>> = OnceLock::new();
149 static GEN_32: OnceLock<Vec<u8>> = OnceLock::new();
150 static GEN_64: OnceLock<Vec<u8>> = OnceLock::new();
151 static GEN_128: OnceLock<Vec<u8>> = OnceLock::new();
152 static GEN_192: OnceLock<Vec<u8>> = OnceLock::new();
153 static GEN_240: OnceLock<Vec<u8>> = OnceLock::new();
154
155 match parity_len {
156 4 => GEN_4.get_or_init(|| build_gen_poly(4)),
157 8 => GEN_8.get_or_init(|| build_gen_poly(8)),
158 16 => GEN_16.get_or_init(|| build_gen_poly(16)),
159 32 => GEN_32.get_or_init(|| build_gen_poly(32)),
160 64 => GEN_64.get_or_init(|| build_gen_poly(64)),
161 128 => GEN_128.get_or_init(|| build_gen_poly(128)),
162 192 => GEN_192.get_or_init(|| build_gen_poly(192)),
163 240 => GEN_240.get_or_init(|| build_gen_poly(240)),
164 _ => {
165 if parity_len == PARITY_LEN {
167 gen_poly()
168 } else {
169 panic!("unsupported parity length: {parity_len}")
170 }
171 }
172 }
173}
174
175pub fn rs_encode(data: &[u8]) -> Vec<u8> {
193 assert!(
194 data.len() <= K_DEFAULT,
195 "data length {} exceeds max {}",
196 data.len(),
197 K_DEFAULT
198 );
199
200 let gpoly = gen_poly();
201 let parity_len = PARITY_LEN;
202
203 let mut shift_reg = vec![0u8; parity_len];
206
207 for &byte in data {
208 let feedback = gf_add(byte, shift_reg[0]);
209 for j in 0..parity_len - 1 {
211 shift_reg[j] = gf_add(shift_reg[j + 1], gf_mul(feedback, gpoly[j + 1]));
212 }
213 shift_reg[parity_len - 1] = gf_mul(feedback, gpoly[parity_len]);
214 }
215
216 let mut encoded = Vec::with_capacity(data.len() + parity_len);
218 encoded.extend_from_slice(data);
219 encoded.extend_from_slice(&shift_reg);
220 encoded
221}
222
223pub fn rs_encode_blocks(payload: &[u8]) -> Vec<u8> {
229 let mut encoded = Vec::new();
230 for chunk in payload.chunks(K_DEFAULT) {
231 encoded.extend_from_slice(&rs_encode(chunk));
232 }
233 encoded
234}
235
236fn compute_syndromes(received: &[u8]) -> Vec<u8> {
241 let tab = gf_tables();
242 let two_t = PARITY_LEN;
243 let mut syndromes = vec![0u8; two_t];
244 for i in 0..two_t {
245 syndromes[i] = poly_eval(received, tab.exp[i]); }
247 syndromes
248}
249
250fn syndromes_are_zero(syndromes: &[u8]) -> bool {
251 syndromes.iter().all(|&s| s == 0)
252}
253
254fn berlekamp_massey(syndromes: &[u8]) -> Vec<u8> {
258 let n = syndromes.len(); let mut c = vec![0u8; n + 1];
262 c[0] = 1;
263 let mut c_len = 1usize;
264
265 let mut b = vec![0u8; n + 1];
267 b[0] = 1;
268 let mut b_len = 1usize;
269
270 let mut ell = 0usize; let mut bval = 1u8; let mut m = 1usize; for r in 0..n {
275 let mut delta = syndromes[r];
277 for i in 1..c_len {
278 delta = gf_add(delta, gf_mul(c[i], syndromes[r - i]));
279 }
280
281 if delta == 0 {
282 m += 1;
283 continue;
284 }
285
286 let factor = gf_mul(delta, gf_inv(bval));
287
288 if 2 * ell <= r {
289 let old_c = c[..c_len].to_vec();
291 let old_c_len = c_len;
292
293 let new_len = (b_len + m).max(c_len);
294 c_len = new_len;
295 for j in 0..b_len {
296 c[j + m] = gf_add(c[j + m], gf_mul(factor, b[j]));
297 }
298
299 b[..old_c_len].copy_from_slice(&old_c[..old_c_len]);
300 for j in old_c_len..b.len() {
301 b[j] = 0;
302 }
303 b_len = old_c_len;
304 ell = r + 1 - ell;
305 bval = delta;
306 m = 1;
307 } else {
308 let new_len = (b_len + m).max(c_len);
309 c_len = new_len;
310 for j in 0..b_len {
311 c[j + m] = gf_add(c[j + m], gf_mul(factor, b[j]));
312 }
313 m += 1;
314 }
315 }
316
317 c[..c_len].to_vec()
318}
319
320fn eval_asc(poly: &[u8], x: u8) -> u8 {
322 let mut result = 0u8;
323 let mut x_pow = 1u8;
324 for &coeff in poly {
325 result = gf_add(result, gf_mul(coeff, x_pow));
326 x_pow = gf_mul(x_pow, x);
327 }
328 result
329}
330
331fn chien_search(sigma_asc: &[u8], n: usize) -> Option<Vec<(usize, usize)>> {
339 if n == 0 {
340 return None;
341 }
342 let tab = gf_tables();
343 let num_errors = sigma_asc.len() - 1;
344 let mut found = Vec::with_capacity(num_errors);
345
346 for p in 0..n {
349 let x = if p == 0 {
350 1u8
351 } else {
352 tab.exp[(255 - (p % 255)) % 255] };
354 if eval_asc(sigma_asc, x) == 0 {
355 found.push((p, n - 1 - p));
356 }
357 }
358
359 if found.len() != num_errors {
360 return None;
361 }
362
363 Some(found)
364}
365
366fn forney(
372 sigma_asc: &[u8],
373 syndromes: &[u8],
374 found: &[(usize, usize)],
375) -> Vec<u8> {
376 let tab = gf_tables();
377 let two_t = syndromes.len();
378
379 let mut omega = vec![0u8; two_t];
381 for i in 0..sigma_asc.len().min(two_t) {
382 for j in 0..two_t {
383 if i + j < two_t {
384 omega[i + j] = gf_add(omega[i + j], gf_mul(sigma_asc[i], syndromes[j]));
385 }
386 }
387 }
388
389 let deriv_len = sigma_asc.len().saturating_sub(1);
394 let mut sigma_prime = vec![0u8; deriv_len];
395 for i in (1..sigma_asc.len()).step_by(2) {
396 sigma_prime[i - 1] = sigma_asc[i];
397 }
398
399 let mut magnitudes = Vec::with_capacity(found.len());
400 for &(gf_pos, _) in found {
401 let x_val = if gf_pos == 0 {
402 1u8
403 } else {
404 tab.exp[gf_pos % 255] };
406 let x_inv = if gf_pos == 0 {
407 1u8
408 } else {
409 tab.exp[(255 - (gf_pos % 255)) % 255] };
411
412 let omega_val = eval_asc(&omega, x_inv);
413 let sp_val = eval_asc(&sigma_prime, x_inv);
414
415 if sp_val == 0 {
416 magnitudes.push(0);
417 continue;
418 }
419
420 magnitudes.push(gf_mul(x_val, gf_mul(omega_val, gf_inv(sp_val))));
422 }
423
424 magnitudes
425}
426
427#[derive(Debug, PartialEq)]
429pub struct RsDecodeError;
430
431impl core::fmt::Display for RsDecodeError {
432 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
433 write!(f, "Reed-Solomon: too many errors to correct")
434 }
435}
436
437pub fn rs_decode(received: &[u8], data_len: usize) -> Result<(Vec<u8>, usize), RsDecodeError> {
457 let block_len = data_len + PARITY_LEN;
458 assert_eq!(
459 received.len(),
460 block_len,
461 "received length {} != expected {}",
462 received.len(),
463 block_len
464 );
465
466 let padding = N_MAX - block_len;
468 let mut full_block = vec![0u8; N_MAX];
469 full_block[padding..].copy_from_slice(received);
470
471 let syndromes = compute_syndromes(&full_block);
473
474 if syndromes_are_zero(&syndromes) {
475 return Ok((received[..data_len].to_vec(), 0));
476 }
477
478 let sigma_asc = berlekamp_massey(&syndromes);
480 let num_errors = sigma_asc.len() - 1;
481
482 if num_errors > T_MAX {
483 return Err(RsDecodeError);
484 }
485
486 let found = chien_search(&sigma_asc, N_MAX).ok_or(RsDecodeError)?;
488
489 let magnitudes = forney(&sigma_asc, &syndromes, &found);
491
492 let mut corrected = full_block;
494 for (i, &(_, array_pos)) in found.iter().enumerate() {
495 if array_pos < padding {
496 return Err(RsDecodeError);
498 }
499 corrected[array_pos] = gf_add(corrected[array_pos], magnitudes[i]);
500 }
501
502 let check_syndromes = compute_syndromes(&corrected);
504 if !syndromes_are_zero(&check_syndromes) {
505 return Err(RsDecodeError);
506 }
507
508 Ok((corrected[padding..padding + data_len].to_vec(), num_errors))
510}
511
512#[derive(Debug, Clone, Default)]
514pub struct RsDecodeStats {
515 pub total_errors: usize,
517 pub error_capacity: usize,
519 pub max_block_errors: usize,
521 pub num_blocks: usize,
523}
524
525pub fn rs_decode_blocks(encoded: &[u8], total_data_len: usize) -> Result<(Vec<u8>, RsDecodeStats), RsDecodeError> {
539 let mut decoded = Vec::with_capacity(total_data_len);
540 let mut remaining_data = total_data_len;
541 let mut offset = 0;
542 let mut stats = RsDecodeStats::default();
543
544 while remaining_data > 0 {
545 let chunk_data_len = remaining_data.min(K_DEFAULT);
546 let block_len = chunk_data_len + PARITY_LEN;
547
548 if offset + block_len > encoded.len() {
549 return Err(RsDecodeError);
550 }
551
552 let block = &encoded[offset..offset + block_len];
553 let (data, errors) = rs_decode(block, chunk_data_len)?;
554 decoded.extend_from_slice(&data);
555
556 stats.total_errors += errors;
557 stats.num_blocks += 1;
558 if errors > stats.max_block_errors {
559 stats.max_block_errors = errors;
560 }
561
562 offset += block_len;
563 remaining_data -= chunk_data_len;
564 }
565
566 stats.error_capacity = stats.num_blocks * T_MAX;
567 Ok((decoded, stats))
568}
569
570pub fn rs_encoded_len(data_len: usize) -> usize {
572 let full_blocks = data_len / K_DEFAULT;
573 let remainder = data_len % K_DEFAULT;
574 let mut total = full_blocks * (K_DEFAULT + PARITY_LEN);
575 if remainder > 0 {
576 total += remainder + PARITY_LEN;
577 }
578 total
579}
580
581pub const fn parity_len() -> usize {
583 PARITY_LEN
584}
585
586pub fn rs_encode_with_parity(data: &[u8], parity_len: usize) -> Vec<u8> {
597 if parity_len == 0 { return data.to_vec(); }
598 let k_max = N_MAX - parity_len;
599 assert!(
600 data.len() <= k_max,
601 "data length {} exceeds max {} for parity_len={}",
602 data.len(),
603 k_max,
604 parity_len
605 );
606 assert!(parity_len <= 240, "parity_len {} exceeds 240", parity_len);
607
608 let gpoly = gen_poly_for(parity_len);
609 let mut shift_reg = vec![0u8; parity_len];
610
611 for &byte in data {
612 let feedback = gf_add(byte, shift_reg[0]);
613 for j in 0..parity_len - 1 {
614 shift_reg[j] = gf_add(shift_reg[j + 1], gf_mul(feedback, gpoly[j + 1]));
615 }
616 shift_reg[parity_len - 1] = gf_mul(feedback, gpoly[parity_len]);
617 }
618
619 let mut encoded = Vec::with_capacity(data.len() + parity_len);
620 encoded.extend_from_slice(data);
621 encoded.extend_from_slice(&shift_reg);
622 encoded
623}
624
625pub fn rs_decode_with_parity(
635 received: &[u8],
636 data_len: usize,
637 parity_len: usize,
638) -> Result<(Vec<u8>, usize), RsDecodeError> {
639 let block_len = data_len + parity_len;
640 assert_eq!(
641 received.len(),
642 block_len,
643 "received length {} != expected {}",
644 received.len(),
645 block_len
646 );
647
648 let padding = N_MAX - block_len;
649 let mut full_block = vec![0u8; N_MAX];
650 full_block[padding..].copy_from_slice(received);
651
652 let tab = gf_tables();
654 let mut syndromes = vec![0u8; parity_len];
655 for i in 0..parity_len {
656 syndromes[i] = poly_eval(&full_block, tab.exp[i]);
657 }
658
659 if syndromes.iter().all(|&s| s == 0) {
660 return Ok((received[..data_len].to_vec(), 0));
661 }
662
663 let t_max = parity_len / 2;
664 let sigma_asc = berlekamp_massey(&syndromes);
665 let num_errors = sigma_asc.len() - 1;
666
667 if num_errors > t_max {
668 return Err(RsDecodeError);
669 }
670
671 let found = chien_search(&sigma_asc, N_MAX).ok_or(RsDecodeError)?;
672 let magnitudes = forney(&sigma_asc, &syndromes, &found);
673
674 let mut corrected = full_block;
675 for (i, &(_, array_pos)) in found.iter().enumerate() {
676 if array_pos < padding {
677 return Err(RsDecodeError);
678 }
679 corrected[array_pos] = gf_add(corrected[array_pos], magnitudes[i]);
680 }
681
682 let mut check_ok = true;
684 for i in 0..parity_len {
685 if poly_eval(&corrected, tab.exp[i]) != 0 {
686 check_ok = false;
687 break;
688 }
689 }
690 if !check_ok {
691 return Err(RsDecodeError);
692 }
693
694 Ok((corrected[padding..padding + data_len].to_vec(), num_errors))
695}
696
697pub fn rs_encode_blocks_with_parity(payload: &[u8], parity_len: usize) -> Vec<u8> {
699 let k_max = N_MAX - parity_len;
700 let mut encoded = Vec::new();
701 for chunk in payload.chunks(k_max) {
702 encoded.extend_from_slice(&rs_encode_with_parity(chunk, parity_len));
703 }
704 encoded
705}
706
707pub fn rs_decode_blocks_with_parity(
709 encoded: &[u8],
710 total_data_len: usize,
711 parity_len: usize,
712) -> Result<(Vec<u8>, RsDecodeStats), RsDecodeError> {
713 let k_max = N_MAX - parity_len;
714 let t_max = parity_len / 2;
715 let mut decoded = Vec::with_capacity(total_data_len);
716 let mut remaining_data = total_data_len;
717 let mut offset = 0;
718 let mut stats = RsDecodeStats::default();
719
720 while remaining_data > 0 {
721 let chunk_data_len = remaining_data.min(k_max);
722 let block_len = chunk_data_len + parity_len;
723
724 if offset + block_len > encoded.len() {
725 return Err(RsDecodeError);
726 }
727
728 let block = &encoded[offset..offset + block_len];
729 let (data, errors) = rs_decode_with_parity(block, chunk_data_len, parity_len)?;
730 decoded.extend_from_slice(&data);
731
732 stats.total_errors += errors;
733 stats.num_blocks += 1;
734 if errors > stats.max_block_errors {
735 stats.max_block_errors = errors;
736 }
737
738 offset += block_len;
739 remaining_data -= chunk_data_len;
740 }
741
742 stats.error_capacity = stats.num_blocks * t_max;
743 Ok((decoded, stats))
744}
745
746pub fn rs_encoded_len_with_parity(data_len: usize, parity_len: usize) -> usize {
748 let k_max = N_MAX - parity_len;
749 let full_blocks = data_len / k_max;
750 let remainder = data_len % k_max;
751 let mut total = full_blocks * (k_max + parity_len);
752 if remainder > 0 {
753 total += remainder + parity_len;
754 }
755 total
756}
757
758pub fn choose_parity_tier(frame_len: usize, num_units: usize) -> usize {
763 let mut best = PARITY_TIERS[0]; for &tier in &PARITY_TIERS {
765 let rs_bits = rs_encoded_len_with_parity(frame_len, tier) * 8;
766 if rs_bits <= num_units {
767 best = tier;
768 } else {
769 break;
770 }
771 }
772 best
773}
774
775#[cfg(test)]
776mod tests {
777 use super::*;
778
779 #[test]
780 fn gf_mul_identity() {
781 for a in 0..=255u16 {
782 assert_eq!(gf_mul(a as u8, 1), a as u8);
783 assert_eq!(gf_mul(1, a as u8), a as u8);
784 }
785 }
786
787 #[test]
788 fn gf_mul_zero() {
789 for a in 0..=255u16 {
790 assert_eq!(gf_mul(a as u8, 0), 0);
791 assert_eq!(gf_mul(0, a as u8), 0);
792 }
793 }
794
795 #[test]
796 fn gf_inverse_roundtrip() {
797 for a in 1..=255u16 {
798 let inv = gf_inv(a as u8);
799 assert_eq!(gf_mul(a as u8, inv), 1, "a={a}, inv={inv}");
800 }
801 }
802
803 #[test]
804 fn gf_pow_consistency() {
805 let t = gf_tables();
806 for a in 1..=255u16 {
807 assert_eq!(gf_pow(a as u8, 1), a as u8);
809 assert_eq!(gf_pow(a as u8, 0), 1);
811 assert_eq!(gf_pow(a as u8, 255), 1, "a={a}");
813 }
814 let _ = t;
815 }
816
817 #[test]
818 fn encode_decode_no_errors() {
819 let data = b"Hello, Reed-Solomon!";
820 let encoded = rs_encode(data);
821 let (decoded, errors) = rs_decode(&encoded, data.len()).unwrap();
822 assert_eq!(decoded, data);
823 assert_eq!(errors, 0);
824 }
825
826 #[test]
827 fn encode_decode_with_errors() {
828 let data = b"Test message for RS error correction.";
829 let mut encoded = rs_encode(data);
830
831 encoded[0] ^= 0xFF;
835 encoded[5] ^= 0xAA;
836 encoded[10] ^= 0x55;
837 encoded[15] ^= 0x11;
838 encoded[20] ^= 0x22;
839 encoded[25] ^= 0x33;
840 encoded[30] ^= 0x01;
841 encoded[data.len()] ^= 0x77; encoded[data.len() + 10] ^= 0x88;
843 encoded[data.len() + 30] ^= 0x99;
844
845 let (decoded, errors) = rs_decode(&encoded, data.len()).unwrap();
846 assert_eq!(decoded, data);
847 assert_eq!(errors, 10);
848 }
849
850 #[test]
851 fn encode_decode_max_correctable() {
852 let data = vec![42u8; 100];
853 let mut encoded = rs_encode(&data);
854
855 for i in 0..32 {
857 encoded[i * 3] ^= 0xFF;
858 }
859
860 let (decoded, errors) = rs_decode(&encoded, data.len()).unwrap();
861 assert_eq!(decoded, data);
862 assert_eq!(errors, 32);
863 }
864
865 #[test]
866 fn too_many_errors_fails() {
867 let data = vec![0u8; 50];
868 let mut encoded = rs_encode(&data);
869
870 for i in 0..33 {
872 encoded[i] ^= 0xFF;
873 }
874
875 assert!(rs_decode(&encoded, data.len()).is_err());
876 }
877
878 #[test]
879 fn shortened_code_works() {
880 let data = b"Hi";
882 let encoded = rs_encode(data);
883 assert_eq!(encoded.len(), data.len() + PARITY_LEN);
884
885 let (decoded, errors) = rs_decode(&encoded, data.len()).unwrap();
886 assert_eq!(decoded, data);
887 assert_eq!(errors, 0);
888 }
889
890 #[test]
891 fn shortened_code_with_errors() {
892 let data = b"Short";
893 let mut encoded = rs_encode(data);
894 encoded[0] ^= 0xFF;
895 encoded[2] ^= 0xAA;
896
897 let (decoded, errors) = rs_decode(&encoded, data.len()).unwrap();
898 assert_eq!(decoded, data);
899 assert_eq!(errors, 2);
900 }
901
902 #[test]
903 fn blocks_roundtrip() {
904 let data: Vec<u8> = (0..400).map(|i| (i % 256) as u8).collect();
906 let encoded = rs_encode_blocks(&data);
907
908 assert_eq!(encoded.len(), rs_encoded_len(data.len()));
910
911 let (decoded, stats) = rs_decode_blocks(&encoded, data.len()).unwrap();
912 assert_eq!(decoded, data);
913 assert_eq!(stats.total_errors, 0);
914 }
915
916 #[test]
917 fn blocks_with_errors() {
918 let data: Vec<u8> = (0..400).map(|i| (i % 256) as u8).collect();
919 let mut encoded = rs_encode_blocks(&data);
920
921 encoded[10] ^= 0xFF;
923 encoded[100] ^= 0xAA;
924 encoded[260] ^= 0x55;
926 encoded[300] ^= 0x11;
927 encoded[520] ^= 0x33;
929
930 let (decoded, stats) = rs_decode_blocks(&encoded, data.len()).unwrap();
931 assert_eq!(decoded, data);
932 assert_eq!(stats.total_errors, 5);
933 assert!(stats.max_block_errors <= 2);
934 }
935
936 #[test]
937 fn empty_data() {
938 let data: &[u8] = &[];
939 let encoded = rs_encode(data);
940 assert_eq!(encoded.len(), PARITY_LEN);
941 let (decoded, errors) = rs_decode(&encoded, 0).unwrap();
942 assert_eq!(decoded, data);
943 assert_eq!(errors, 0);
944 }
945
946 #[test]
947 fn rs_encoded_len_correct() {
948 assert_eq!(rs_encoded_len(100), 100 + 64);
949 assert_eq!(rs_encoded_len(191), 191 + 64);
950 assert_eq!(rs_encoded_len(192), (191 + 64) + (1 + 64));
951 assert_eq!(rs_encoded_len(400), 2 * (191 + 64) + (18 + 64));
953 }
954
955 #[test]
956 fn rs_encoded_len_edge_cases() {
957 assert_eq!(rs_encoded_len(0), 0);
958 assert_eq!(rs_encoded_len(1), 1 + 64);
959 assert_eq!(rs_encoded_len(191), 191 + 64);
961 assert_eq!(rs_encoded_len(192), (191 + 64) + (1 + 64));
963 }
964
965 #[test]
966 fn single_error_full_block() {
967 let data = vec![42u8; K_DEFAULT];
968 let mut encoded = rs_encode(&data);
969 encoded[50] ^= 0x01;
970 let (decoded, errors) = rs_decode(&encoded, K_DEFAULT).unwrap();
971 assert_eq!(decoded, data);
972 assert_eq!(errors, 1);
973 }
974
975 #[test]
976 fn single_error_shortened() {
977 let data = b"Short";
978 let mut encoded = rs_encode(data);
979 encoded[0] ^= 0xFF;
980 let (decoded, errors) = rs_decode(&encoded, data.len()).unwrap();
981 assert_eq!(decoded, data);
982 assert_eq!(errors, 1);
983 }
984
985 #[test]
986 fn two_errors_full_block() {
987 let data = vec![42u8; K_DEFAULT];
988 let mut encoded = rs_encode(&data);
989 encoded[0] ^= 0xFF;
990 encoded[50] ^= 0xAA;
991 let (decoded, errors) = rs_decode(&encoded, K_DEFAULT).unwrap();
992 assert_eq!(decoded, data);
993 assert_eq!(errors, 2);
994 }
995
996 #[test]
997 fn two_errors_shortened() {
998 let data = b"Short";
999 let mut encoded = rs_encode(data);
1000 encoded[0] ^= 0xFF;
1001 encoded[2] ^= 0xAA;
1002 let (decoded, errors) = rs_decode(&encoded, data.len()).unwrap();
1003 assert_eq!(decoded, data);
1004 assert_eq!(errors, 2);
1005 }
1006
1007 #[test]
1008 fn generator_polynomial_correct() {
1009 let gpoly = gen_poly();
1010 assert_eq!(gpoly.len(), PARITY_LEN + 1);
1012 assert_eq!(gpoly[0], 1);
1014 let t = gf_tables();
1016 for i in 0..PARITY_LEN {
1017 assert_eq!(poly_eval(gpoly, t.exp[i]), 0, "root alpha^{i} failed");
1018 }
1019 }
1020
1021 #[test]
1024 fn adaptive_rs_roundtrip_each_tier() {
1025 for &parity in &PARITY_TIERS {
1026 let k_max = N_MAX - parity;
1027 let data: Vec<u8> = (0..k_max.min(100)).map(|i| (i % 256) as u8).collect();
1028 let encoded = rs_encode_with_parity(&data, parity);
1029 assert_eq!(encoded.len(), data.len() + parity);
1030 let (decoded, errors) = rs_decode_with_parity(&encoded, data.len(), parity).unwrap();
1031 assert_eq!(decoded, data, "parity={parity}");
1032 assert_eq!(errors, 0, "parity={parity}");
1033 }
1034 }
1035
1036 #[test]
1037 fn adaptive_rs_corrects_errors_at_each_tier() {
1038 for &parity in &PARITY_TIERS {
1039 let k_max = N_MAX - parity;
1040 let t = parity / 2;
1041 let data: Vec<u8> = (0..k_max.min(50)).map(|i| (i % 256) as u8).collect();
1042 let mut encoded = rs_encode_with_parity(&data, parity);
1043
1044 let num_errors = (t / 2).min(encoded.len());
1046 let elen = encoded.len();
1047 for i in 0..num_errors {
1048 encoded[i * 2 % elen] ^= 0xFF;
1049 }
1050
1051 let (decoded, errors) = rs_decode_with_parity(&encoded, data.len(), parity).unwrap();
1052 assert_eq!(decoded, data, "parity={parity}");
1053 assert!(errors > 0, "parity={parity}");
1054 }
1055 }
1056
1057 #[test]
1058 fn adaptive_rs_blocks_roundtrip() {
1059 let data: Vec<u8> = (0..200).map(|i| (i % 256) as u8).collect();
1060 for &parity in &PARITY_TIERS {
1061 let encoded = rs_encode_blocks_with_parity(&data, parity);
1062 assert_eq!(encoded.len(), rs_encoded_len_with_parity(data.len(), parity));
1063 let (decoded, stats) = rs_decode_blocks_with_parity(&encoded, data.len(), parity).unwrap();
1064 assert_eq!(decoded, data, "parity={parity}");
1065 assert_eq!(stats.total_errors, 0, "parity={parity}");
1066 }
1067 }
1068
1069 #[test]
1070 fn rs_encoded_len_with_parity_correct() {
1071 assert_eq!(rs_encoded_len_with_parity(100, 128), 100 + 128);
1073 assert_eq!(rs_encoded_len_with_parity(127, 128), 127 + 128);
1074 assert_eq!(rs_encoded_len_with_parity(128, 128), (127 + 128) + (1 + 128));
1076 }
1077
1078 #[test]
1079 fn choose_parity_tier_picks_largest_fitting() {
1080 let tier = choose_parity_tier(100, 10000);
1088 assert_eq!(tier, 192);
1089 }
1090}