zfec_rs/
lib.rs

1/* Copyright (C) 2022, Walker Thornley
2 *
3 * This program is free software: you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation, either version 3 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program.  If not, see <https://www.gnu.org/licenses/>.
15 */
16
17//! A pure Rust implementation of the Zfec library.
18//!
19//! The general concept of Zfec is to break a message into blocks, or "chunks", then generate additional chunks
20//! with parity information that can be used to identify any missing chunks.
21//!
22//! Notice: Zfec only provides Forward Error Correcting functionality, not encryption. Any message coded with
23//! Zfec should be encrypted first, if security is necessary.
24//!
25//! Implemented directly from https://github.com/tahoe-lafs/zfec
26
27#[cfg(test)]
28mod tests;
29
30use std::fmt;
31
32/*
33 * Primitive polynomials - see Lin & Costello, Appendix A,
34 * and  Lee & Messerschmitt, p. 453.
35 */
36const PP: &[u8; 9] = b"101110001";
37
38/* To make sure that we stay within cache in the inner loops of fec_encode().  (It would
39probably help to also do this for fec_decode().*/
40const STRIDE: usize = 8192;
41
42//static UNROLL: usize = 16; /* 1, 4, 8, 16 */
43// TODO: Implement unrolling
44// could be done at build time. Run some basic unrolling tests
45// in the build.rs, whichever unrolling is fastest, have a macro
46// that does the unrolling
47
48//const FEC_MAGIC: u32 = 0xFECC0DEC;
49
50#[derive(Debug)]
51/// Possible errors
52pub enum Error {
53    ZeroK,
54    ZeroM,
55    BigN,
56    KGtN,
57    NotEnoughChunks,
58    Tbd,
59}
60impl std::fmt::Display for Error {
61    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
62        write!(
63            f,
64            "Zfec error: {}",
65            match self {
66                Self::ZeroK => "'k' must be greater than 0",
67                Self::ZeroM => "'m' must be greater than 0",
68                Self::BigN => "'n' must be less than 257",
69                Self::KGtN => "'k' must be less than 'n'",
70                Self::NotEnoughChunks => "Not enough chunks were provided",
71                Self::Tbd => "Unknown error",
72            }
73        )
74    }
75}
76impl std::error::Error for Error {}
77
78type Gf = u8;
79type Result<Fec> = std::result::Result<Fec, Error>;
80
81/// A chunk of encoded data
82///
83/// A `Chunk` can be deconstructed into a `(Vec<u8>, usize)` tuple
84///
85/// # Example
86///
87/// ```
88/// use zfec_rs::Chunk;
89///
90/// let val: Vec<u8> = vec![0, 1, 2, 3, 4];
91/// let chunk = Chunk::new(val.clone(), 0);
92/// let (chunk_vec, chunk_i): (Vec<u8>, usize) = chunk.into();
93/// assert_eq!(val, chunk_vec);
94/// ```
95#[derive(Debug, Clone)]
96pub struct Chunk {
97    pub data: Vec<u8>,
98    pub index: usize,
99}
100impl Chunk {
101    /// Creates a new chunk
102    pub fn new(data: Vec<u8>, index: usize) -> Self {
103        Self {
104            data: data,
105            index: index,
106        }
107    }
108}
109impl From<Chunk> for (Vec<Gf>, usize) {
110    fn from(val: Chunk) -> Self {
111        (val.data, val.index)
112    }
113}
114impl std::fmt::Display for Chunk {
115    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
116        write!(f, "{}, {:?}", self.index, self.data)
117    }
118}
119
120/// Forward Error Correcting encoder/decoder.
121///
122/// The encoder can be defined with 2 values: `k` and `m`
123///
124/// * `k` is the number of chunks needed to reconstruct the original message
125/// * `m` is the total number of chunks that will be produced
126///
127/// The first `k` chunks contain the original unaltered data, meaning that if all the original chunks are
128/// available on the decoding end, no decoding needs to take place.
129///
130/// The final `(m-k)` chunks contain the parity coding necessary to reproduce any one of the original chunks.
131///
132/// Coding is done with respect to the chunk's location within the encoded data. This means that each chunk's
133/// sequence number is needed for correct reconstruction.
134///
135/// # Example
136///
137/// ```
138/// use zfec_rs::Fec;
139///
140/// let message = b"Message to be sent";
141///
142/// let fec = Fec::new(5, 8).unwrap();
143///
144/// let (mut encoded_chunks, padding) = fec.encode(&message[..]).unwrap();
145/// encoded_chunks.remove(2);
146/// let decoded_message = fec.decode(&encoded_chunks, padding).unwrap();
147///
148/// assert_eq!(message.to_vec(), decoded_message);
149/// ```
150pub struct Fec {
151    // magic: u64, // I'm not sure what magic does. It's never used except in new and free. My guess is it's some kind of way to make sure you're freeing the right memory?
152    k: usize,
153    m: usize,
154    enc_matrix: Vec<Gf>,
155
156    /*
157     * To speed up computations, we have tables for logarithm, exponent and
158     * inverse of a number.  We use a table for multiplication as well (it takes
159     * 64K, no big deal even on a PDA, especially because it can be
160     * pre-initialized an put into a ROM!), otherwhise we use a table of
161     * logarithms. In any case the macro gf_mul(x,y) takes care of
162     * multiplications.
163     */
164    // gf_exp: [Gf; 510],
165    // gf_log: [u32; 256],
166    // inverse: [Gf; 256],
167    // gf_mul_table: [[Gf; 256]; 256],
168    statics: Statics,
169}
170impl Fec {
171    /*
172     * This section contains the proper FEC encoding/decoding routines.
173     * The encoding matrix is computed starting with a Vandermonde matrix,
174     * and then transforming it into a systematic matrix.
175     */
176    /*
177     * param k the number of blocks required to reconstruct
178     * param m the total number of blocks created
179     */
180    /// Generates a new encoder/decoder
181    pub fn new(k: usize, m: usize) -> Result<Fec> {
182        //// eprintln!("Creating new - k: {}, n: {}", k, n);
183        if k < 1 {
184            return Err(Error::ZeroK);
185        }
186        if m < 1 {
187            return Err(Error::ZeroM);
188        }
189        if m > 256 {
190            return Err(Error::BigN);
191        }
192        if k > m {
193            return Err(Error::KGtN);
194        }
195        let mut tmp_m: Vec<Gf> = vec![0; m * k];
196
197        // let mut gf_exp = [0; 510];
198        // let mut gf_log = [0; 256];
199        // let mut inverse = [0; 256];
200        // let mut gf_mul_table = [[0; 256]; 256];
201
202        // Self::init_fec(&mut gf_mul_table, &mut gf_exp, &mut gf_log, &mut inverse);
203        let statics = Statics::new();
204
205        // m rows by k columns
206        let mut enc_matrix: Vec<Gf> = vec![0; m * k];
207
208        let mut ret_val = Fec {
209            k: k,
210            m: m,
211            enc_matrix: vec![], // needs to be added in below
212            // gf_exp: gf_exp,
213            // gf_log: gf_log,
214            // inverse: inverse,
215            // // magic: (((FEC_MAGIC ^ k as u32) ^ m as u32) ^ (enc_matrix)) as u64,
216            // gf_mul_table: gf_mul_table,
217            statics: statics,
218        };
219
220        /*
221         * fill the matrix with powers of field elements, starting from 0.
222         * The first row is special, cannot be computed with exp. table.
223         */
224        tmp_m[0] = 1;
225        for col in 1..k {
226            tmp_m[col] = 0;
227        }
228        for row in 0..(m - 1) {
229            //// eprintln!("row: {}", row);
230            let p: &mut [u8] = &mut tmp_m[(row + 1) * k..];
231            for col in 0..k {
232                p[col] = ret_val.statics.gf_exp[Statics::modnn((row * col) as i32) as usize];
233            }
234        }
235
236        /*
237         * quick code to build systematic matrix: invert the top
238         * k*k vandermonde matrix, multiply right the bottom n-k rows
239         * by the inverse, and construct the identity matrix at the top.
240         */
241        // eprintln!("tmp_m: {:02x?}", tmp_m);
242        ret_val.statics._invert_vdm(&mut tmp_m, k); /* much faster than _invert_mat */
243        ret_val.statics._matmul(
244            &tmp_m[k * k..],
245            &tmp_m[..],
246            &mut enc_matrix[k * k..],
247            m - k,
248            k,
249            k,
250        );
251        /*
252         * the upper matrix is I so do not bother with a slow multiply
253         */
254        // the Vec is initialized to 0's when defined
255        // memset(retval->enc_matrix, '\0', k * k * sizeof(gf));
256        for i in 0..k {
257            //// eprintln!("i: {}", i);
258            enc_matrix[i * (k + 1)] = 1;
259        }
260
261        // unnecessary in Rust, tmp_m gets dropped
262        // free(tmp_m);
263
264        ret_val.enc_matrix = enc_matrix;
265
266        Ok(ret_val)
267    }
268    /// Performs the encoding, returning the encoded chunks and the amount of padding
269    ///
270    /// Because all chunks need to be the same size, the data is padded with `0`s at the end as needed
271    pub fn encode(&self, data: &[u8]) -> Result<(Vec<Chunk>, usize)> {
272        // eprintln!("\nEncoding k: {}, m: {}", self.k, self.m);
273        // clean side
274        let chunk_size = self.chunk_size(data.len());
275        // eprintln!("chunk_size: {}", chunk_size);
276        let data_slice = &data[..];
277
278        let mut chunks = vec![];
279
280        // eprintln!("data: {:02x?}", data);
281        // eprintln!("data len: {:02x?}", data.len());
282        let mut padding = 0;
283        for i in 0..self.k {
284            let mut temp_vec = vec![];
285            if (i * chunk_size) >= data_slice.len() {
286                // eprintln!("empty chunk");
287                temp_vec.append(&mut vec![0; chunk_size].to_vec());
288                padding += chunk_size;
289            } else if ((i * chunk_size) < data_slice.len())
290                && (((i + 1) * chunk_size) > data_slice.len())
291            {
292                // finish current chunk
293                temp_vec.append(&mut data_slice[i * chunk_size..].to_vec());
294                // add padding
295                let added = ((i + 1) * chunk_size) as usize - data_slice.len();
296                // eprint!("final slice, padding");
297                for _ in 0..added {
298                    // eprint!!(".");
299                    temp_vec.push(0);
300                }
301                padding += added;
302            } else {
303                let new_chunk =
304                    &data_slice[(i * chunk_size) as usize..((i + 1) * chunk_size) as usize];
305                // eprintln!("normal chunk: {:02x?}", new_chunk);
306                temp_vec.append(&mut new_chunk.to_vec())
307            }
308            chunks.push(temp_vec);
309        }
310        // eprintln!("Finished chunking");
311
312        let num_check_blocks_produced = self.m - self.k;
313        let mut check_blocks_produced = vec![vec![0; chunk_size]; num_check_blocks_produced];
314        let check_block_ids: Vec<usize> = (self.k..self.m).map(|x| x as usize).collect();
315        // eprintln!("num: {}", num_check_blocks_produced);
316        // eprintln!("blocks: {:?}", check_blocks_produced);
317        // eprintln!("ids: {:?}", check_block_ids);
318
319        ///////// internals
320
321        let mut k = 0;
322        while k < chunk_size {
323            let stride = if (chunk_size - k) < STRIDE {
324                chunk_size - k
325            } else {
326                STRIDE
327            };
328            for i in 0..num_check_blocks_produced {
329                let fecnum = check_block_ids[i];
330                if fecnum < self.k {
331                    return Err(Error::Tbd);
332                }
333                let p = &self.enc_matrix[fecnum as usize * self.k..];
334                // eprintln!("enc_matrix: {:02x?}", &self.enc_matrix);
335                // eprintln!("p: {:02x?}", p);
336                for j in 0..self.k {
337                    // eprintln!("Loc 2");
338                    self.statics.addmul(
339                        &mut check_blocks_produced[i][k..],
340                        &chunks[j][k..k + stride],
341                        p[j],
342                        stride,
343                    );
344                }
345            }
346
347            k += STRIDE;
348        }
349
350        ///////// end internals
351
352        let mut ret_chunks = vec![];
353        ret_chunks.append(&mut chunks);
354        ret_chunks.append(&mut check_blocks_produced);
355        // eprintln!("ret_chunks: {:02x?}", ret_chunks);
356        let mut ret_vec = vec![];
357        for (i, chunk) in ret_chunks.iter().enumerate() {
358            ret_vec.push(Chunk {
359                index: i,
360                data: chunk.to_vec(),
361            });
362        }
363        Ok((ret_vec, padding))
364    }
365    /// Performs the decoding
366    pub fn decode(&self, encoded_data: &Vec<Chunk>, padding: usize) -> Result<Vec<u8>> {
367        // eprintln!("\nDecoding");
368        if encoded_data.len() < self.k {
369            return Err(Error::NotEnoughChunks);
370        }
371
372        let mut share_nums: Vec<usize> = vec![];
373        let mut chunks: Vec<Vec<u8>> = vec![vec![]; self.m];
374
375        for chunk in encoded_data {
376            let num = chunk.index;
377            share_nums.push(num);
378            chunks[num] = chunk.data.clone();
379        }
380        // eprintln!("encoded data: {:02x?}", encoded_data);
381        // eprintln!("share_nums: {:02x?}", share_nums);
382        // eprintln!("chunks: {:02x?}", chunks);
383
384        let sz = chunks[share_nums[0] as usize].len();
385        let mut ret_chunks = vec![vec![0; sz]; self.k];
386
387        let mut complete = true;
388        let mut missing = std::collections::VecDeque::new();
389        let mut replaced = vec![];
390        // check which of the original chunks are missing
391        for i in 0..self.k {
392            if !share_nums.contains(&i) {
393                complete = false;
394                missing.push_back(i);
395                // eprintln!("Missing {}", i);
396            }
397        }
398
399        // replace the missing chunks with fec chunks
400        for i in self.k..self.m {
401            if chunks[i].len() != 0 {
402                match missing.pop_front() {
403                    Some(index) => {
404                        // eprintln!("Moving {} to {}", i, index);
405                        replaced.push(index);
406                        share_nums.insert(index, i);
407                        chunks[index] = chunks[i].to_vec();
408                        // eprintln!("share_nums: {:02x?}", share_nums);
409                        // eprintln!("chunks: {:02x?}", chunks);
410                    }
411                    None => {}
412                }
413            }
414        }
415
416        if complete {
417            let flat = Self::flatten(&mut chunks[..self.k].to_vec());
418            return Ok(flat[..flat.len() - padding].to_vec());
419        }
420
421        /////////////// internal decode
422
423        let mut m_dec = vec![0; self.k * self.k];
424        let mut outix = 0;
425
426        self.build_decode_matrix_into_space(&share_nums, self.k, &mut m_dec[..]);
427
428        for row in 0..self.k {
429            assert!((share_nums[row] >= self.k) || (share_nums[row] == row));
430            if share_nums[row] >= self.k {
431                // if it's not a normal block
432                // memset(outpkts[outix], 0, sz);
433                for i in 0..sz {
434                    ret_chunks[outix][i] = 0;
435                }
436                for col in 0..self.k {
437                    // eprintln!("Loc 2");
438                    self.statics.addmul(
439                        &mut ret_chunks[outix][..],
440                        &chunks[col][..],
441                        m_dec[row * self.k + col],
442                        sz,
443                    );
444                }
445                outix += 1;
446            }
447        }
448
449        /////////////// end internal decode
450
451        // eprintln!("replaced: {:02x?}", replaced);
452        // eprintln!("ret_chunks: {:02x?}", ret_chunks);
453        // fix the replaced chunks
454        for i in 0..replaced.len() {
455            chunks[replaced[i]] = ret_chunks[i].to_vec();
456            // eprintln!("chunks: {:02x?}", chunks);
457        }
458        let ret_vec = Self::flatten(&mut chunks[0..self.k].to_vec());
459
460        // remove padding
461        Ok(ret_vec[..ret_vec.len() - padding].to_vec())
462    }
463    fn chunk_size(&self, data_len: usize) -> usize {
464        (data_len as f64 / self.k as f64).ceil() as usize
465    }
466    fn flatten(square: &mut Vec<Vec<u8>>) -> Vec<u8> {
467        let mut ret_vec = vec![];
468        for chunk in square {
469            ret_vec.append(chunk);
470        }
471        ret_vec
472    }
473    fn build_decode_matrix_into_space(&self, index: &[usize], k: usize, matrix: &mut [Gf]) {
474        for i in 0..k {
475            let p = &mut matrix[i * k..];
476            if index[i] < k {
477                // we'll assume it's already 0
478                // memset(p, 0, k);
479                p[i] = 1;
480            } else {
481                // memcpy(p, &(code->enc_matrix[index[i] * code->k]), k);
482                for j in 0..k {
483                    p[j] = self.enc_matrix[(index[i] * self.k) + j];
484                }
485            }
486        }
487        self.statics._invert_mat(matrix, k);
488    }
489}
490
491/*
492 * To speed up computations, we have tables for logarithm, exponent and
493 * inverse of a number.  We use a table for multiplication as well (it takes
494 * 64K, no big deal even on a PDA, especially because it can be
495 * pre-initialized an put into a ROM!), otherwhise we use a table of
496 * logarithms. In any case the macro gf_mul(x,y) takes care of
497 * multiplications.
498 */
499struct Statics {
500    gf_exp: [Gf; 510],
501    //gf_log: [i32; 256],
502    inverse: [Gf; 256],
503    gf_mul_table: [[Gf; 256]; 256],
504}
505impl Statics {
506    pub fn new() -> Self {
507        let mut gf_exp: [Gf; 510] = [0; 510];
508        // only used in initializing other fields
509        let mut gf_log: [i32; 256] = [0; 256];
510        let mut inverse: [Gf; 256] = [0; 256];
511        let mut gf_mul_table: [[Gf; 256]; 256] = [[0; 256]; 256];
512        Self::generate_gf(&mut gf_exp, &mut gf_log, &mut inverse);
513        Self::_init_mul_table(&mut gf_mul_table, &mut gf_exp, &mut gf_log);
514        Self {
515            gf_exp: gf_exp,
516            //gf_log: gf_log,
517            inverse: inverse,
518            gf_mul_table: gf_mul_table,
519        }
520    }
521    /// Initialize the data structures used for computations in GF
522    fn generate_gf(gf_exp: &mut [Gf; 510], gf_log: &mut [i32; 256], inverse: &mut [Gf; 256]) {
523        let mut mask: Gf;
524
525        mask = 1; /* x ** 0 = 1 */
526        gf_exp[8] = 0; /* will be updated at the end of the 1st loop */
527        /*
528         * first, generate the (polynomial representation of) powers of \alpha,
529         * which are stored in gf_exp[i] = \alpha ** i .
530         * At the same time build gf_log[gf_exp[i]] = i .
531         * The first 8 powers are simply bits shifted to the left.
532         */
533        for i in 0..8 {
534            gf_exp[i] = mask;
535            gf_log[gf_exp[i] as usize] = i as i32;
536            /*
537             * If Pp[i] == 1 then \alpha ** i occurs in poly-repr
538             * gf_exp[8] = \alpha ** 8
539             */
540            if PP[i] == b'1' {
541                gf_exp[8] ^= mask;
542            }
543            mask <<= 1;
544        }
545        /*
546         * now gf_exp[8] = \alpha ** 8 is complete, so can also
547         * compute its inverse.
548         */
549        gf_log[gf_exp[8] as usize] = 8;
550
551        /*
552         * Poly-repr of \alpha ** (i+1) is given by poly-repr of
553         * \alpha ** i shifted left one-bit and accounting for any
554         * \alpha ** 8 term that may occur when poly-repr of
555         * \alpha ** i is shifted.
556         */
557        mask = 1 << 7;
558        for i in 9..255 {
559            if gf_exp[i - 1] >= mask {
560                gf_exp[i] = gf_exp[8] ^ ((gf_exp[i - 1] ^ mask) << 1);
561            } else {
562                gf_exp[i] = gf_exp[i - 1] << 1;
563            }
564            gf_log[gf_exp[i] as usize] = i as i32;
565        }
566        /*
567         * log(0) is not defined, so use a special value
568         */
569        gf_log[0] = 255;
570        /* set the extended gf_exp values for fast multiply */
571        for i in 0..255 {
572            gf_exp[i + 255] = gf_exp[i];
573        }
574        /*
575         * again special cases. 0 has no inverse. This used to
576         * be initialized to 255, but it should make no difference
577         * since noone is supposed to read from here.
578         */
579        inverse[0] = 0;
580        inverse[1] = 1;
581        for i in 2..=255 {
582            inverse[i] = gf_exp[255 - gf_log[i] as usize];
583        }
584    }
585    fn _init_mul_table(
586        gf_mul_table: &mut [[Gf; 256]; 256],
587        gf_exp: &[Gf; 510],
588        gf_log: &[i32; 256],
589    ) {
590        for i in 0..256 {
591            for j in 0..256 {
592                gf_mul_table[i][j] = gf_exp[Self::modnn(gf_log[i] + gf_log[j]) as usize];
593            }
594        }
595        for j in 0..256 {
596            gf_mul_table[j][0] = 0;
597            gf_mul_table[0][j] = 0;
598        }
599    }
600    fn modnn(mut x: i32) -> Gf {
601        while x >= 255 {
602            x -= 255;
603            x = (x >> 8) + (x & 255);
604        }
605        x as Gf
606    }
607    pub fn addmul(&self, dst: &mut [Gf], src: &[Gf], c: Gf, sz: usize) {
608        // eprintln!("c: {:02x}, sz: {}", c, sz);
609        // eprintln!("dst: {:02x?}", dst);
610        // eprintln!("src: {:02x?}", src);
611        if c != 0 {
612            self._addmul1(dst, src, c, sz);
613        }
614    }
615    fn _addmul1(&self, dst: &mut [Gf], src: &[Gf], c: Gf, sz: usize) {
616        if src.len() > 0 {
617            let mulc = self.gf_mul_table[c as usize];
618            //let lim = &dst[sz - UNROLL + 1..];
619            // they unroll, for now I'll just do it directly
620            for i in 0..sz {
621                dst[i] ^= mulc[src[i] as usize];
622            }
623            // eprintln!("dst: {:02x?}", dst);
624        }
625    }
626    /*
627     * computes C = AB where A is n*k, B is k*m, C is n*m
628     */
629    fn _matmul(&self, a: &[Gf], b: &[Gf], c: &mut [Gf], n: usize, k: usize, m: usize) {
630        // eprintln!("a: {:02x?}", a);
631        // eprintln!("b: {:02x?}", b);
632        // eprintln!("c: {:02x?}", c);
633        for row in 0..n {
634            for col in 0..m {
635                let mut acc: Gf = 0;
636                for i in 0..k {
637                    let pa: Gf = a[(row * k) + i];
638                    let pb: Gf = b[col + (i * m)];
639                    acc ^= self.gf_mul(pa, pb);
640                }
641                c[row * m + col] = acc;
642            }
643        }
644        // eprintln!("c: {:02x?}", c);
645    }
646    /*
647     * fast code for inverting a vandermonde matrix.
648     *
649     * NOTE: It assumes that the matrix is not singular and _IS_ a vandermonde
650     * matrix. Only uses the second column of the matrix, containing the p_i's.
651     *
652     * Algorithm borrowed from "Numerical recipes in C" -- sec.2.8, but largely
653     * revised for my purposes.
654     * p = coefficients of the matrix (p_i)
655     * q = values of the polynomial (known)
656     */
657    fn _invert_vdm(&self, src: &mut Vec<Gf>, k: usize) {
658        /*
659         * b holds the coefficient for the matrix inversion
660         * c holds the coefficient of P(x) = Prod (x - p_i), i=0..k-1
661         */
662        let (mut b, mut c, mut p): (Vec<Gf>, Vec<Gf>, Vec<Gf>) =
663            (vec![0; k], vec![0; k], vec![0; k]);
664        let (mut t, mut xx): (Gf, Gf);
665
666        /* degenerate case, matrix must be p^0 = 1 */
667        if k == 1 {
668            return;
669        }
670        let mut j = 1;
671        for i in 0..k {
672            c[i] = 0;
673            p[i] = src[j];
674            j += k;
675        }
676
677        /*
678         * construct coeffs. recursively. We know c[k] = 1 (implicit)
679         * and start P_0 = x - p_0, then at each stage multiply by
680         * x - p_i generating P_i = x P_{i-1} - p_i P_{i-1}
681         * After k steps we are done.
682         */
683        c[k - 1] = p[0]; /* really -p(0), but x = -x in GF(2^m) */
684        for i in 1..k {
685            let p_i = p[i]; /* see above comment */
686            for j in (k - 1 - (i - 1))..(k - 1) {
687                c[j] ^= self.gf_mul(p_i, c[j + 1]);
688            }
689            c[k - 1] ^= p_i;
690        }
691
692        for row in 0..k {
693            /*
694             * synthetic division etc.
695             */
696            xx = p[row];
697            t = 1;
698            b[k - 1] = 1; /* this is in fact c[k] */
699            for i in (1..=(k - 1)).rev() {
700                b[i - 1] = c[i] ^ self.gf_mul(xx, b[i]);
701                t = self.gf_mul(xx, t) ^ b[i - 1];
702            }
703            for col in 0..k {
704                src[col * k + row] = self.gf_mul(self.inverse[t as usize], b[col]);
705            }
706        }
707
708        // unnecessary for Rust
709        // free(c);
710        // free(b);
711        // free(p);
712
713        return;
714    }
715    fn gf_mul(&self, x: Gf, y: Gf) -> Gf {
716        self.gf_mul_table[x as usize][y as usize]
717    }
718    fn _invert_mat(&self, src: &mut [Gf], k: usize) {
719        let mut c: Gf;
720        let (mut irow, mut icol) = (0, 0);
721
722        let mut indxc = vec![0; k];
723        let mut indxr = vec![0; k];
724        let mut ipiv = vec![0; k];
725        let mut id_row = vec![0; k];
726
727        /*
728         * ipiv marks elements already used as pivots.
729         */
730        for i in 0..k {
731            ipiv[i] = 0;
732        }
733
734        for col in 0..k {
735            let mut piv_found: bool = false;
736
737            /*
738             * Zeroing column 'col', look for a non-zero element.
739             * First try on the diagonal, if it fails, look elsewhere.
740             */
741            if ipiv[col] != 1 && src[col * k + col] != 0 {
742                irow = col;
743                icol = col;
744                // goto found_piv;
745            }
746            for row in 0..k {
747                if ipiv[row] != 1 {
748                    for ix in 0..k {
749                        if ipiv[ix] == 0 {
750                            if src[row * k + ix] != 0 {
751                                irow = row;
752                                icol = ix;
753                                // goto found_piv;
754                                piv_found = true;
755                            }
756                        } else {
757                            assert!(ipiv[ix] <= 1);
758                        }
759                        if piv_found {
760                            break;
761                        }
762                    }
763                }
764                if piv_found {
765                    break;
766                }
767            }
768
769            // found_piv:
770            ipiv[icol] += 1;
771            /*
772             * swap rows irow and icol, so afterwards the diagonal
773             * element will be correct. Rarely done, not worth
774             * optimizing.
775             */
776            if irow != icol {
777                for ix in 0..k {
778                    // direct implementation is easiest solution for the "SWAP" macro
779                    let tmp = src[irow * k + ix];
780                    src[irow * k + ix] = src[icol * k + ix];
781                    src[icol * k + ix] = tmp;
782                }
783            }
784            indxr[col] = irow;
785            indxc[col] = icol;
786            let pivot_row = &mut src[icol * k..(icol + 1) * k];
787            c = pivot_row[icol];
788            assert!(c != 0);
789            if c != 1 {
790                /* otherwhise this is a NOP */
791                /*
792                 * this is done often , but optimizing is not so
793                 * fruitful, at least in the obvious ways (unrolling)
794                 */
795                c = self.inverse[c as usize];
796                pivot_row[icol] = 1;
797                for ix in 0..k {
798                    pivot_row[ix] = self.gf_mul(c, pivot_row[ix]);
799                }
800            }
801            /*
802             * from all rows, remove multiples of the selected row
803             * to zero the relevant entry (in fact, the entry is not zero
804             * because we know it must be zero).
805             * (Here, if we know that the pivot_row is the identity,
806             * we can optimize the addmul).
807             */
808            id_row[icol] = 1;
809            if pivot_row != id_row {
810                // create a copy of pivot row, since we can't
811                // have mut and immut references at the same time
812                // if we know what size it'll be, might as well
813                // start it there and save realloc time
814                let mut pivot_clone = vec![0; pivot_row.len()];
815                for val in pivot_row.iter() {
816                    pivot_clone.push(*val);
817                }
818
819                for ix in 0..k {
820                    let p = &mut src[ix * k..(ix + 1) * k];
821                    if ix != icol {
822                        c = p[icol];
823                        p[icol] = 0;
824                        // eprintln!("Loc 1");
825                        self.addmul(p, &pivot_clone[k..], c, k);
826                    }
827                }
828            }
829            id_row[icol] = 0;
830        } /* done all columns */
831        for col in (1..=k).rev() {
832            if indxr[col - 1] != indxc[col - 1] {
833                for row in 0..k {
834                    // direct implementation is easiest solution for the "SWAP" macro
835                    let tmp = src[row * k + indxr[col - 1]];
836                    src[row * k + indxr[col - 1]] = src[row * k + indxc[col - 1]];
837                    src[row * k + indxc[col - 1]] = tmp;
838                }
839            }
840        }
841    }
842}