guff_ida/lib.rs
1
2//! # A crate for working with Cauchy and Vandermonde matrices
3//!
4//! A small collection of routines for creating matrices that can be
5//! used to implement erasure (error-correction) schemes or threshold
6//! schemes using Galois fields.
7//!
8//! Note that this crate only provides data for insertion into a
9//! matrix. For the missing functionality, see:
10//!
11//! * [guff](https://crates.io/crates/guff) : basic operations over
12//! finite fields, including vector operations
13//! * [guff-matrix](https://crates.io/crates/guff-matrix) : full set
14//! of matrix types and operations
15//!
16//! # Using finite field matrix operations for threshold schemes
17//!
18//! A "threshold scheme" is a mathematical method for securely
19//! splitting a secret into a number of "shares" such that:
20//!
21//! * if a set number (the "threshold") of shares are combined, the
22//! original secret can be recovered
23//!
24//! * if fewer shares than the threshold are combined, no information
25//! about the secret is revealed
26//!
27//! ## Module Focus
28//!
29//! This module focuses in particular on Michael O. Rabin's
30//! "Information Dispersal Algorithm" (IDA). In it, splitting a secret
31//! is achieved by:
32//!
33//! * creating a transform matrix that has the required threshold property
34//!
35//! * placing the secret into an input matrix, padding it if needed
36//!
37//! * calculating transform x input to get an output matrix
38//!
39//! * reading off each share as a row of the transform matrix and the
40//! corresponding row of the output matrix
41//!
42//! To reconstruct the secret:
43//!
44//! * take the supplied transform matrix rows and put them into a
45//! square matrix
46//!
47//! * calculate the inverse of the matrix
48//!
49//! * form a new input matrix from the corresponding output data rows
50//!
51//! * calculate inverse x input
52//!
53//! * read the secret back from the output matrix
54//!
55//! More details of the algorithm can be found [later](todo).
56//!
57
58// will need to use external gf_2px
59//use num;
60use num_traits::identities::{One,Zero};
61use guff::*;
62
63//impl From<u32> for NumericOps {
64// fn from(val: u32) -> Self { val as Self }
65//}
66
67
68/// ## Vandermonde-form matrix
69/// ```ascii
70///
71/// | 0 1 2 k-1 |
72/// | 0 0 0 ... 0 |
73/// | |
74/// | 0 1 2 k-1 |
75/// | 1 1 1 ... 1 |
76/// | |
77/// | : : : : : |
78/// | |
79/// | 0 1 2 k-1 |
80/// | n-1 n-1 n-1 ... n-1 |
81/// ```
82///
83/// Can be used for Reed-Solomon coding, or a version of it,
84/// anyway. This is not the most general form of a Vandermonde matrix,
85/// but it is useful as a particular case since it doesn't require any
86/// parameters to produce it.
87///
88/// Return is as a single vector of n rows, each of k elements
89
90pub fn vandermonde_matrix<G> (field : &G, k: usize, n : usize)
91 -> Vec<G::E>
92where G : GaloisField, G::E : Into<usize>, G::EE : Into<usize>
93{
94 let zero = G::E::zero();
95 let one = G::E::one();
96
97 let mut v = Vec::<G::E>::new();
98 if k < 1 || n < 1 {
99 return v
100 }
101
102 // If pow() is expensive, can use repeated multiplications below.
103
104 // range op won't work, so use while loop
105
106 let mut row = zero;
107 while row.into() < n {
108 let mut col = G::EE::zero();
109 while col.into() < k {
110 v.push(field.pow(row, col));
111 col = col + G::EE::one();
112 }
113 row = row + one
114 }
115 v
116}
117
118/// ## Cauchy-form matrix generated from a key
119///
120/// ```ascii
121/// k columns
122///
123/// | 1 1 1 |
124/// | ------- ------- ... ------- |
125/// | x1 + y1 x1 + y2 x1 + yk |
126/// | |
127/// | 1 1 1 |
128/// | ------- ------- ... ------- |
129/// | x2 + y1 x2 + y2 x2 + yk | n rows
130/// | |
131/// | : : : : |
132/// | |
133/// | 1 1 1 |
134/// | ------- ------- ... ------- |
135/// | xn + y1 xn + y2 xn + yk |
136///```
137///
138/// All [y1 .. yk, x1 .. xn] field values must be distinct non-zero
139/// values
140///
141/// **TODO**: Check that all input values are distinct. Can put all
142/// elements on a heap and then check that no parent node has a child
143/// node equal to it. Alternatively, check the condition as we're
144/// building the heap? That should work, too.
145///
146/// **TODO**: use a random number number generator to select k + n distinct
147/// field elements (eg, Floyd's algorithm for shuffling/selection from
148/// an array of all field elements if the field size is small, or a
149/// modification of the heap approach above for when it's impractical
150/// to list all the field elements)
151///
152/// I've ordered the elements with y values first since these will be
153/// reused across all rows. I will allow passing in a vector that has
154/// more x values than are required
155///
156/// We don't operate on k, n to produce field values, so they can be
157/// passed in as regular types
158pub fn cauchy_matrix<G>
159 (field : &G, key : &Vec<G::E>, k: usize, n : usize)
160 -> Vec<G::E>
161where G : GaloisField,
162{
163 let mut v = Vec::<G::E>::with_capacity(k * n);
164 let vlen = key.len();
165
166 // I can change signature to return a Result later, but for now
167 // can return a null vector to indicate failure.
168 let min_key_size = k + n;
169 if vlen < min_key_size {
170 println!("Key should have at least {} elements", min_key_size);
171 return v
172 }
173
174 // slice format?
175 let y : &[G::E] = &key[..k];
176 let x : &[G::E] = &key[k..];
177
178 // populate vector row by row
179 for i in 0..n {
180 for j in 0..k {
181 v.push(field.inv(x[i] ^ y[j]))
182 }
183 }
184 v
185}
186
187/// # Generate inverse Cauchy matrix using a key
188///
189/// If the "key" used to generate the forward Cauchy matrix is saved,
190/// it can be used to calculate the inverse more efficiently than
191/// doing full Gaussian elimination.
192///
193/// See:
194/// * <https://en.wikipedia.org/wiki/Cauchy_matrix>
195/// * <https://proofwiki.org/wiki/Inverse_of_Cauchy_Matrix>
196///
197/// Note that the inverse is a k\*k matrix, so 2\*k distinct values
198/// must be passed in:
199///
200/// * the fixed `y1 .. yk` values
201/// * a selection of k x values corresponding to the
202/// k rows being combined
203
204pub fn cauchy_inverse_matrix<G>
205 (field : &G, key : &Vec<G::E>, k: usize) -> Vec<G::E>
206where G : GaloisField
207{
208 let one = G::E::one();
209
210 let mut v = Vec::<G::E>::new();
211
212 let vlen = key.len();
213 if vlen != 2 * k {
214 println!("Must supply k y values and k x values");
215 return v
216 }
217
218 // slice as before
219 let y : &[G::E] = &key[..k];
220 let x : &[G::E] = &key[k..];
221
222 // the reference version works, but the optimised version
223 // doesn't. Only enabling reference version for now.
224 if k != 0 { // was k < 3
225 // this is the basic reference algorithm
226 let n = k; // alias to use i, j, k for loops
227 for i in 0..n { // row
228 for j in 0..n { // column
229 let mut top = one;
230 for k in 0..n {
231 top = field.mul(top, x[j] ^ y[k]);
232 top = field.mul(top, x[k] ^ y[i]);
233 }
234 let mut bot = x[j] ^ y[i];
235 for k in 0..n {
236 if k == j { continue }
237 bot = field.mul(bot, x[j] ^ x[k]);
238 }
239 for k in 0..n {
240 if k == i { continue }
241 bot = field.mul(bot, y[i] ^ y[k]);
242 }
243 top = field.mul(top, field.inv(bot));
244 v.push(top); // row-wise
245 }
246 }
247
248 } else {
249 // TODO: find bug in this code
250 //
251 // optimised version of the above that notes that we
252 // calculate the following in the inner loop:
253 //
254 // * product of row except for ... (n-1 multiplications)
255 // * product of col except for ... (n-1 multiplications)
256 //
257 // These can be calculated outside the main loop and reused.
258 //
259 //
260 let n = k;
261 let mut imemo = Vec::<G::E>::with_capacity(k);
262 for i in 0..n {
263 let mut bot = one;
264 let yi = y[i];
265 for k in 0..n {
266 if k == i { continue }
267 bot = field.mul(bot, yi ^ y[k]);
268 }
269 imemo.push(bot)
270 }
271 let mut jmemo = Vec::<G::E>::with_capacity(k);
272 for j in 0..n {
273 let mut bot = one;
274 let xj = x[j];
275 for k in 0..n {
276 if k == j { continue }
277 bot = field.mul(bot, xj ^ x[k]);
278 }
279 jmemo.push(bot)
280 }
281 for i in 0..n {
282 for j in 0..n {
283 let mut top = field.mul(x[j] ^ y[0], x[0] ^ y[i]);
284 for k in 0..n {
285 top = field.mul(top, x[j] ^ y[k]);
286 top = field.mul(top, x[k] ^ y[i]);
287 }
288 let mut bot = x[j] ^ y[i];
289 // inner loops eliminated:
290 bot = field.mul(bot, imemo[i]);
291 bot = field.mul(bot, jmemo[j]);
292 top = field.mul(top, field.inv(bot));
293 v.push(top)
294 }
295 }
296 }
297 v
298}
299
300
301// I guess that I might have to implement a matrix here for now. The
302// easiest way to test that the two Cauchy fns are correct is to
303// calculate the forward and inverse matrices from the same key,
304// multiply them together and check if we have an identity matrix.
305//
306// I'll need a matrix inversion routine if I want to check the
307// correctness of the Vandermonde matrix code, though. I wonder,
308// though, are there any short-cuts to calculating the inverse of
309// that?
310
311// Can't derive Copy due to Vec not implementing it. Will need to
312// implement a copy constructor so.
313// #[derive(Debug)]
314// pub struct Matrix<T, P>
315// where T : NumericOps, P : NumericOps {
316// rows : usize,
317// cols : usize,
318// data : Vec<T>,
319// _phantom : P,
320// rowwise : bool,
321// }
322
323// Vector stuff done in guff crate
324/*
325// vector product ... multiply each vector element -> new vector
326fn vector_mul<G>(field : &G,
327 dst : &mut [G::E], a : &[G::E], b : &[G::E])
328where G : GaloisField {
329 // let prod = T::one;
330 let (mut a_iter, mut b_iter) = (a.iter(), b.iter());
331 for d in dst.iter_mut() {
332 *d = field.mul(*a_iter.next().unwrap(), *b_iter.next().unwrap())
333 }
334}
335
336// dot product ... sum of items in vector product -> value
337fn dot_product<G>(field : &G,
338 a : &[G::E], b : &[G::E]) -> G::E
339where G : GaloisField {
340 let mut sum = G::E::zero();
341 for (a_item, b_item) in a.iter().zip(b) {
342 sum = sum ^ field.mul(*a_item, *b_item);
343 }
344 sum
345}
346 */
347
348// Matrix stuff done in guff-matrix
349
350// I think that I'll make rowwise and colwise matrices different
351// types. Different type constraints, anyway. This makes sense for
352// matrices that are passed into a multiplication routine, but I'm not
353// sure about return values...
354//
355// There are two patterns I'm interested in.
356//
357// Split pattern:
358//
359// k window window
360// +----+ +----- … -----+ +----- … -----+
361// |-> | || | |-> | > contiguous
362// n | | k |v | n |-> |
363// | | +----- … -----+ | |
364// +----+ v contiguous +----- … -----+
365//
366// transform x input = output
367//
368// ROWWISE COLWISE ROWWISE
369//
370//
371// Combine pattern:
372//
373// k window window
374// +----+ +----- … -----+ +----- … -----+
375// |-> | |-> | || | v contiguous
376// k | | k |-> | k |v |
377// +----+ +----- … -----+ +----- … -----+
378// > contiguous
379//
380// transform x input = output
381//
382// ROWWISE ROWWISE COLWISE
383//
384//
385// The arrows show the optimal organisation for I/O: not necessarily
386// for actual matrix multiplication.
387//
388// viz.:
389//
390// When splitting, single input stream is read into a contiguous block
391// of memory, one column at a time, while several output streams are
392// also contiguous.
393//
394// When combining, each of the several (rowwise) input streams are
395// contiguous, and the single (colwise) output stream is contiguous.
396//
397// So basically the split version has an unavoidable scatter pattern
398// at the output, while the combine has an unavoidable gather pattern
399// at the input. We could try out some buffering strategy that
400// internally transposes sub-matrix blocks (storing them in SIMD
401// registers and working on them there) and translating their
402// reads/writes so that they're in the "correct" form in
403// memory. That's for another day, though.
404//
405// The most important thing from the above is that (ignoring the
406// organisation of the transform matrix, which is always ROWWISE) our
407// multiply will always take input data in one layout and output a
408// matrix of the opposite layout. That basically answers my question
409// about whether it's a good idea to have ROWWISE/COLWISE variants of
410// matrices. It does seem to be.
411//
412
413// sketch of the above idea...
414
415/*
416struct RowWiseAdaptedMatrix<T> {
417 // copy of all fields that a regular Matrix has, except layout
418}
419struct ColWiseAdaptedMatrix<T> {
420 // copy of all fields that a regular Matrix has, except layout
421}
422
423trait MatrixOps<T> {
424 // default, non-specialised stuff
425 fn rows();
426 // all the accessors and regular stuff that a Matrix does above.
427
428 // Then, gaps for specialised (composable) methods below:
429 fn kernel();
430}
431impl MatrixOps<T,P> {
432 // default implementations
433
434}
435
436impl<T> MatrixOps<T> for RowWiseAdaptedMatrix<T> {
437
438 fn kernel () {
439 // specialised kernel goes here
440 }
441}
442
443// Another way to do it is to just use type constraints on
444// functions. That is probably simpler:
445
446fn split<T,P>(field : &impl GenericField<T,P>,
447 transform<T> : R, input<T> : C, output<T> : R)
448where T : NumericOps,
449 P : NumericOps,
450 R : MatrixOps + RowWise,
451 C : MatrixOps + ColWise
452{
453 // write split-specific kernel here
454}
455
456// This would be supported by:
457// * empty traits for RowWise and ColWise
458// * two specialised structs that have the traits composed in
459// * different constructors for giving us concrete instances
460// * coordination between other matrix fns that always make
461// clear which subtypes of matrix they expect/provide
462// * helper functions for swapping (transposing) or reinterpreting(?)
463// matrix subtype
464 */
465
466// comment out matrix code
467
468/*
469// Put Accessors into a separate trait. I'm trying to see if I can get
470// a default implementation that has the same members. Mmm... probably
471// not. I suppose that a macro is the only solution to avoiding all
472// the boilerplate.
473// Needs to be generic on T due to dealing with Vec<T>
474pub trait Accessors<T : NumericOps> {
475 fn rows(&self) -> usize;
476 fn cols(&self) -> usize;
477 fn rowcol(&self) -> (usize, usize) {
478 (self.rows(), self.cols())
479 }
480
481 // internally, assume all implementations work with Vec<T>
482 fn vec(&self) -> &Vec<T>;
483 fn vec_as_mut(&mut self) -> &mut Vec<T>;
484 fn vec_as_mut_slice(&mut self) -> &[T];
485}
486
487
488// Things that we can do with a matrix without knowing its layout
489pub trait LayoutAgnostic<T : NumericOps> : Accessors<T>
490{
491 fn zero(&mut self) {
492 let slice : &mut[T] = &mut self.vec_as_mut()[..];
493 for elem in slice.iter_mut() { *elem = T::zero() }
494 }
495 fn one(&mut self) { // sets matrix as identity
496 let (rows, cols) = self.rowcol();
497 if rows != cols {
498 panic!("Must have rows==cols to set up as identity matrix")
499 }
500 let slice : &mut[T] = &mut self.vec_as_mut()[..];
501 let diag = rows + 1; // count mod this
502 let mut index = 0;
503 for elem in slice.iter_mut() {
504 if index == 0 {
505 *elem = T::one()
506 } else {
507 *elem = T::zero()
508 }
509 index = (index + 1) % diag
510 }
511 }
512
513 // will have external transpose() for non-square matrices which
514 // will create a new matrix with a new layout. This one flips
515 // values along the diagonal without changing the layout
516 fn transpose_square(&mut self) {
517 let rows = self.rows();
518 if rows != self.cols() {
519 panic!("Matrix is not square")
520 }
521 let v = self.vec_as_mut();
522 // swap values at i,j with j,i for all i != j
523 // we don't need to know anything about layout
524 for i in 1..rows {
525 for j in 0..i {
526 // safe way of swapping values without temp variable?
527 // should be...
528 // (v[i * rows + j], v[j * rows + i])
529 // = (v[j * rows + i], v[i * rows + j])
530 let t = v[i * rows + j];
531 v[i * rows + j] = v[j * rows + i];
532 v[j * rows + i] = t;
533 }
534 }
535 }
536}
537
538// Things that require knowing about the matrix's layout
539pub trait LayoutSpecific<T : NumericOps> : Accessors<T> {
540
541 // These essentially fix the layout
542 fn is_rowwise(&self) -> bool;
543 fn is_colwise(&self) -> bool { ! &self.is_rowwise() }
544 fn _rowcol_to_index(&self, row : usize, col : usize) -> usize;
545
546 // cursor/index pointer into vec (derived from above; no overflow
547 // checks since Vec will catch it). The use case for these is to
548 // make one initial call to whichever/both you want to use,
549 // setting the index to 0. Then, in your loops, simply add the
550 // correct offset. Calling move_* inside your loop isn't too bad
551 // either assuming LLVM can notice that self.rows()/.cols() is a
552 // loop invariant. It might even do the same for .is_rowwise().
553 fn move_right(&self, index : usize) -> usize {
554 if self.is_rowwise() { index + 1 } else { index + self.rows() }
555 }
556 fn move_down(&self, index : usize) -> usize {
557 if self.is_rowwise() { index + self.cols() } else { index + 1 }
558 }
559
560 // higher-level users of the first three
561 fn getval(&self, row : usize, col : usize) -> T
562 {
563 self.vec()[self._rowcol_to_index(row, col)]
564 }
565 fn setval(&mut self, row : usize, col : usize, val : T)
566 {
567 // need to use two statements below because you can't use
568 // self both mutably (updating the vector) and immutably
569 let index = self._rowcol_to_index(row, col);
570 self.vec_as_mut()[index] = val;
571 }
572}
573
574// What do they call fns in rust that are called with
575// module::something()? Whatever they're called, it probably makes
576// more sense to call things like matrix multiply that way
577
578/*
579// All variants will use the same structure format, but we give them
580// different names to distinguish them
581struct RowwiseMatrix<T : NumericOps> {
582 rows : usize,
583 cols : usize,
584 v : Vec<T>,
585}
586impl<T : NumericOps> Accessors<T> for RowwiseMatrix<T> {
587 fn rows(&self) -> usize { self.rows }
588 fn cols(&self) -> usize { self.cols }
589 fn vec_as_mut_slice(&self) -> &[T] { &self.v[..] }
590 fn vec_as_mut(&mut self) -> &mut Vec<T> { &mut (self.v) }
591}
592struct ColwiseMatrix<T : NumericOps> {
593 rows : usize,
594 cols : usize,
595 v : Vec<T>,
596}
597impl<T : NumericOps> Accessors<T> for ColwiseMatrix<T> {
598 fn rows(&self) -> usize { self.rows }
599 fn cols(&self) -> usize { self.cols }
600 fn vec_as_mut_slice(&self) -> & [T] { &self.v[..] }
601 fn vec_as_mut(&mut self) -> &mut Vec<T> { &mut (self.v) }
602}
603
604*/
605pub struct CheckedMatrix<T : NumericOps> {
606 rows : usize,
607 cols : usize,
608 v : Vec<T>,
609 is_rowwise : bool, // checked at run time
610}
611impl<T : NumericOps> Accessors<T> for CheckedMatrix<T> {
612 fn rows(&self) -> usize { self.rows }
613 fn cols(&self) -> usize { self.cols }
614 fn vec(&self) -> &Vec<T> { &self.v }
615 fn vec_as_mut(&mut self) -> &mut Vec<T> { &mut (self.v) }
616 fn vec_as_mut_slice(&mut self) -> &[T] { &self.v[..] }
617}
618impl<T : NumericOps> LayoutAgnostic<T> for CheckedMatrix<T> { }
619impl<T : NumericOps> LayoutSpecific<T> for CheckedMatrix<T> {
620 fn is_rowwise(&self) -> bool { self.is_rowwise }
621 fn _rowcol_to_index(&self, row : usize, col : usize) -> usize {
622 // won't check that row/col are within allowed range
623 if self.is_rowwise {
624 row * self.cols + col
625 } else {
626 col * self.rows + row
627 }
628 }
629}
630
631// what should our return type be?
632//
633// * just the struct type?
634// * an impl line?
635// x combine both? (can't have -> Struct : Foo Bar)
636
637fn construct_checked_matrix<T : NumericOps>
638 (rows : usize, cols : usize, rowwise : bool)
639 -> CheckedMatrix<T>
640// where T: NumericOps, M : Accessors<T> + LayoutAgnostic<T> + LayoutSpecific<T>
641{
642 let v = vec![T::zero(); rows * cols];
643
644 CheckedMatrix::<T> { // gains a concrete type
645 rows : rows, cols : cols,
646 v : v, is_rowwise : rowwise
647 }
648}
649
650*/
651
652// Following on from the names above, I think I'll rename the two
653// traits that each struct should implement according to whether it's
654// layout-neutral or layout-specific/-sensitive.
655
656// Is this a good approach?
657//
658// I think it's OK. In "normal" OO, we'd have a base "Matrix" class
659// from which we'd extend into row-wise and column-wise forms. Then
660// the constructors would return types that are still classed as
661// "Matrix" objects, no matter what the subclass name.
662//
663// We can't do that in Rust because there's no object inheritance.
664//
665// What we can do is return an object that satisfies trait interfaces.
666//
667// A problem arises when we want to store different struct variants in
668// a single vector or variable. Even if all the variants are the same
669// size, a return type of "implements Foo" does not guarantee that,
670// and the type system can't go and check.
671//
672// Actually, though, that's also a problem in standard OO. The
673// solution there is that we allocate objects on the heap and store
674// pointers to them.
675//
676// I think that the way I'm handling it is the proper Rust way of
677// doing things. For example, if the distinction between the different
678// object variants matters, we can call the appropriate constructor
679// (or manually construct the object) and store it with the
680// appropriate struct type. As an example, a "transform" object that
681// does encoding or decoding using IDA or Reed-Solomon coding will
682// probably want to use specific row-wise or column-wise variants for
683// the input, transform and output matrices.
684//
685// If the application doesn't care about the internal details, it can
686// cast the returned object as something that "implements matrix
687// operations on <some numeric type>". It loses the ability to
688// ascertain the exact type of the underlying struct from that point
689// on, though.
690//
691// There is a bit of extra boilerplate for all the three matrix types
692// above. The accessors have to be attached to the structs (because
693// traits deal with methods, not attributes)
694
695
696#[cfg(test)]
697
698
699mod tests {
700 use super::*;
701 // use num_traits::identities::{One,Zero};
702
703 // External crate only used in development/testing
704 use guff_matrix::*;
705 use guff_matrix::x86::*;
706
707 #[test]
708 fn vandermonde_works() {
709 let f = new_gf8(0x11b, 0x1b);
710 let v = vandermonde_matrix(&f, 5, 5);
711 for i in 5..10 {
712 assert_eq!(1, v[i], "expect row 1 all 1s");
713 }
714 for i in 0..5 {
715 // check column 0, including 0**0 = 1
716 assert_eq!(1, v[i * 5], "expect col 0 all 1s");
717 // check column 1
718 let index = i * 5 + 1;
719 assert_eq!(i, v[index].into(), "expect col 1 ascending 0..n");
720 }
721 }
722
723 // The only sure-fire way to test the functions is to do a matrix
724 // multiplication, then do the inverse and check whether the
725 // result is the same as the original...
726 //
727 // I should have most of what I need in guff-matrix crate
728
729 const SAMPLE_DATA : [u8; 20] = [
730 1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10,
731 11, 12, 13, 14, 15, 16, 17, 18, 19, 20
732 ];
733
734 // Direct copy from guff_matrix::simulator
735 pub fn interleave_streams(dest : &mut [u8], slices : &Vec<&[u8]>) {
736
737 let cols = dest.len() / slices.len();
738 let mut dest = dest.iter_mut();
739 let mut slice_iters : Vec::<_> = Vec::with_capacity(slices.len());
740 for s in slices {
741 let iter = s.iter();
742 slice_iters.push(iter);
743 }
744
745 for _ in 0 .. cols {
746 for slice in &mut slice_iters {
747 *dest.next().unwrap() = *slice.next().unwrap();
748 }
749 }
750 }
751
752 #[test]
753 fn test_cauchy_transform() {
754
755 // 4x4 xform matrix is the smallest allowed right now
756 let key = vec![ 1, 2, 3, 4, 5, 6, 7, 8 ];
757 let field = new_gf8(0x11b, 0x1b);
758 let cauchy_data = cauchy_matrix(&field, &key, 4, 4);
759
760 // guff-matrix needs either/both/all:
761 // * architecture-neutral matrix type (NoSimd option)
762 // * automatic selection of arch-specific type (with fallback)
763 // * new_matrix() type constructors?
764 //
765 // For now, though, the only concrete matrix types that are
766 // implemented are for supporting x86 simd operation, so use
767 // those (clunky) names...
768
769 let mut xform = X86SimpleMatrix::<x86::X86u8x16Long0x11b>
770 ::new(4,4,true);
771 xform.fill(&cauchy_data);
772
773 // must choose cols appropriately (gcd requirement)
774 let mut input = X86SimpleMatrix::<x86::X86u8x16Long0x11b>
775 ::new(4,5,false);
776 input.fill(&SAMPLE_DATA);
777
778 let mut output = X86SimpleMatrix::<x86::X86u8x16Long0x11b>
779 ::new(4,5,true);
780
781 // use non-SIMD multiply
782
783 reference_matrix_multiply(&mut xform, &mut input,
784 &mut output, &field);
785
786 // We will also need a transposition (interleaving) step to
787 // convert the rowwise output matrix from above into colwise
788 // format for the inverse transform
789
790 // Right now, only implementation of interleaver is in the
791 // simulation module... copying it in here
792
793 // we need to do some up-front work to use that:
794 let array = output.as_slice();
795 let slices : Vec<&[u8]> = array.chunks(5).collect();
796 let mut dest = [0u8; 20];
797
798 interleave_streams(&mut dest, &slices);
799
800 // Do the inverse transform (same key)
801
802 let cauchy_data = cauchy_inverse_matrix(&field, &key, 4);
803 let mut xform = X86SimpleMatrix::<x86::X86u8x16Long0x11b>
804 ::new(4,4,true);
805 xform.fill(&cauchy_data);
806
807 let mut input = X86SimpleMatrix::<x86::X86u8x16Long0x11b>
808 ::new(4,5,false);
809 input.fill(&dest);
810
811 let mut output = X86SimpleMatrix::<x86::X86u8x16Long0x11b>
812 ::new(4,5,false);
813
814 // use non-SIMD multiply
815 reference_matrix_multiply(&mut xform, &mut input,
816 &mut output, &field);
817
818 assert_eq!(output.as_slice(), &SAMPLE_DATA);
819
820 unsafe {
821 // use SIMD multiply
822 simd_warm_multiply(&mut xform, &mut input,
823 &mut output);
824 }
825
826 assert_eq!(output.as_slice(), &SAMPLE_DATA);
827
828
829 }
830
831 // Another test I could do would be to multiply the Cauchy matrix
832 // by its inverse and check that the result is the identity
833 // matrix. However, as it currently stands, guff-matrix doesn't
834 // allow for general-purpose matrix multiply yet, since it's
835 // optimised for xform/input matrix pairs that satisfy the gcd
836 // property.
837 //
838 // Ah, I think I can ... no gcd checks in matrix_multiply
839 #[test]
840 fn test_inv_inv_cauchy() {
841 // 4x4 xform matrix is the smallest allowed right now
842 let key = vec![ 1, 2, 3, 4, 5, 6, 7, 8 ];
843 let field = new_gf8(0x11b, 0x1b);
844
845 let forward_data = cauchy_matrix(&field, &key, 4, 4);
846 let mut xform = X86SimpleMatrix::<x86::X86u8x16Long0x11b>
847 ::new(4,4,true);
848 xform.fill(&forward_data);
849
850 // data returned from cauchy_inverse_matrix needs to be
851 // interleaved if it's in the 'input' position
852
853 let inverse_data = cauchy_inverse_matrix(&field, &key, 4);
854
855 let array = inverse_data;
856 let slices : Vec<&[u8]> = array.chunks(4).collect();
857 let mut dest = [0u8; 16];
858
859 interleave_streams(&mut dest, &slices);
860
861 let mut input = X86SimpleMatrix::<x86::X86u8x16Long0x11b>
862 ::new(4,4,false);
863 input.fill(&dest);
864
865 let mut output = X86SimpleMatrix::<x86::X86u8x16Long0x11b>
866 ::new(4,4,true);
867
868 // use non-SIMD multiply
869 reference_matrix_multiply(&mut xform, &mut input,
870 &mut output, &field);
871
872 assert_eq!(output.as_slice(),
873 [ 1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1 ]
874 )
875
876 }
877
878
879 // Can't test Vandermonde yet because I haven't implemented matrix
880 // inversion here or in guff-matrix
881
882 /*
883 // Might as well start writing some test cases
884 #[test]
885 fn test_making_vectors() {
886 // can we convert u8 to T?
887 let a = [0u8,1,2,3,4];
888 // do I have to clone() to prevent getting refs?
889 let v1 : Vec<u8> = a.iter().cloned().collect();
890 let v2 : Vec<u8> = a.iter().cloned().collect();
891 let mut v3 : Vec<u8> = a.iter().cloned().collect();
892 let f = new_gf8(0x11b,0x1b);
893
894 // should change v3
895 assert_eq!(v1, v3);
896 vector_mul(&f, &mut v3, &v1, &v2);
897 assert_ne!(v1, v3);
898 }
899 */
900
901 /*
902
903 #[test]
904 fn test_dot_products() {
905 // I'll only use mul by zero or one so that I can mentally
906 // calculate the result without needing to know results of
907 // more complex field multiplications
908 let a = [0u8, 1, 1, 9, 4, 8, 4];
909 let b = [0u8, 0, 2, 0, 1, 1, 1];
910 // do I have to clone() to prevent getting refs?
911 let v1 : Vec<u8> = a.iter().cloned().collect();
912 let v2 : Vec<u8> = b.iter().cloned().collect();
913 let f = new_gf8(0x11b,0x1b);
914
915 // should change v3
916 assert_ne!(v1, v2);
917 let sum = dot_product(&f, &v1, &v2);
918 assert_eq!(sum, 0 ^ 0 ^ 2 ^ 0 ^ 4 ^ 8 ^ 4);
919 }
920 #[test]
921 fn test_construct_checked_matrix() {
922 let mat = construct_checked_matrix::<u8>(3, 4, false);
923 assert_eq!(3, mat.rows());
924 assert_eq!(4, mat.cols());
925 assert_eq!(false, mat.is_rowwise());
926 assert_eq!(true , mat.is_colwise());
927 }
928
929 // fn sig that requires a certain trait be implemented
930 fn looking_for_accessors<T>(mat : &impl Accessors<T>)
931 where T : NumericOps {
932 assert!(mat.rows() > 0)
933 }
934
935 // we get a struct, so pass it to looking_for_accessors() above
936 #[test]
937 fn test_impl_accessors_satisfied() {
938 let mat = construct_checked_matrix::<u8>(3, 4, false);
939 // no compile error, and assert above passes
940 looking_for_accessors(&mat);
941 }
942
943 // Various simple things to test ...
944 //
945 // * initial matrix is zeroed
946 // * identity works
947 // * we can compare output of vec accessor with a list
948
949 #[test]
950 fn basic_vec_comparison() {
951 let mut mat = construct_checked_matrix::<u8>(3, 3, true);
952 // it seems we have to compare as slice ... no big deal
953 assert_eq!([0u8; 9], &mat.vec()[..]);
954
955 mat.one();
956 assert_eq!([1u8, 0, 0, 0, 1, 0, 0, 0, 1], &mat.vec()[..]);
957
958 mat.zero();
959 assert_eq!([0u8; 9], &mat.vec()[..]);
960
961 // What if we compare shared ref with mutable slice?
962 // surprisingly, this works with no compiler complaint.
963 assert_eq!([0u8; 9], mat.vec_as_mut_slice());
964 }
965
966 #[test]
967 fn mutate_single_vec_elements() {
968 let mut mat = construct_checked_matrix::<u8>(3, 3, true);
969 { // drop v after use: see below
970 let mut v = mat.vec_as_mut();
971 v[0] = 1; v[4] = 1; v[8] = 1;
972 }
973 let mut identity = construct_checked_matrix::<u8>(3, 3, true);
974 identity.one(); // maybe make this a fluent interface?
975
976 assert_eq!(mat.vec(), identity.vec());
977
978 // without the braces above, the assignment below would cause
979 // the compiler to complain about mixing mutable/immutable
980 // borrows. So we drop the original v and then recreate it
981 // here after the immutable borrow in mat.vec() above.
982 let mut v = mat.vec_as_mut();
983 v[0] = 1;
984 }
985
986 // basic test of _rowcol_to_index (suffices for all matrix sizes)
987 #[test]
988 fn test_rowwise_checked() {
989 // 2x2 matrix gives a truth table of sorts
990 let rowwise = construct_checked_matrix::<u8>(2, 2, true);
991 assert_eq!(0, rowwise._rowcol_to_index(0,0));
992 assert_eq!(1, rowwise._rowcol_to_index(0,1));
993 assert_eq!(2, rowwise._rowcol_to_index(1,0));
994 assert_eq!(3, rowwise._rowcol_to_index(1,1));
995
996 let colwise = construct_checked_matrix::<u8>(2, 2, false);
997 assert_eq!(0, colwise._rowcol_to_index(0,0));
998 assert_eq!(1, colwise._rowcol_to_index(1,0)); // these two
999 assert_eq!(2, colwise._rowcol_to_index(0,1)); // swapped
1000 assert_eq!(3, colwise._rowcol_to_index(1,1));
1001 }
1002
1003 // "cursors" are another way to traverse rows/columns. If these
1004 // work, we don't need to check any larger matrix sizes.
1005 #[test]
1006 fn test_cursor_checked() {
1007 // 2x2 matrix gives a truth table of sorts
1008 let rowwise = construct_checked_matrix::<u8>(2, 2, true);
1009 assert_eq!(1, rowwise.move_right(0));
1010 assert_eq!(2, rowwise.move_down(0));
1011
1012 let colwise = construct_checked_matrix::<u8>(2, 2, false);
1013 assert_eq!(2, colwise.move_right(0));
1014 assert_eq!(1, colwise.move_down(0));
1015 }
1016
1017 // Other stuff relating to orientation also needs checking. We
1018 // can't assume anything about how any other layout-specific
1019 // functions will reason about it, and which of the above
1020 // functions they use.
1021 //
1022 // I might refactor the above tests so that it's easier to reuse
1023 // them when I implement the other two (non-checked) layouts
1024 //
1025 // something like "assert_rowwise_cursor(mat)"
1026
1027 // I can nearly test cauchy functions ... just need a multiply routine
1028 #[test]
1029 fn test_cauchy_inverse_identity() {
1030 todo!()
1031 }
1032*/
1033}