feanor_math/algorithms/convolution/
mod.rs

1use std::alloc::{Allocator, Global};
2use std::ops::Deref;
3
4use crate::ring::*;
5use crate::seq::subvector::SubvectorView;
6use crate::seq::*;
7
8use karatsuba::*;
9
10///
11/// Contains an optimized implementation of Karatsuba's for computing convolutions
12/// 
13pub mod karatsuba;
14
15///
16/// Contains an implementation of computing convolutions using complex floating-point FFTs.
17/// 
18pub mod fft;
19
20///
21/// Contains an implementation of computing convolutions using NTTs, i.e. FFTs over
22/// a finite field that has suitable roots of unity.
23/// 
24pub mod ntt;
25
26///
27/// Contains an implementation of computing convolutions by considering them modulo
28/// various primes that are either smaller or allow for suitable roots of unity.
29/// 
30pub mod rns;
31
32///
33/// Trait for objects that can compute a convolution over some ring.
34/// 
35/// # Example
36/// ```rust
37/// # use std::cmp::{min, max};
38/// # use feanor_math::ring::*;
39/// # use feanor_math::primitive_int::*;
40/// # use feanor_math::seq::*;
41/// # use feanor_math::algorithms::convolution::*;
42/// struct NaiveConvolution;
43/// // we support all rings!
44/// impl<R: ?Sized + RingBase> ConvolutionAlgorithm<R> for NaiveConvolution {
45///     fn compute_convolution<S: RingStore<Type = R>, V1: VectorView<R::Element>, V2: VectorView<R::Element>>(&self, lhs: V1, rhs: V2, dst: &mut [R::Element], ring: S) {
46///         for i in 0..(lhs.len() + rhs.len() - 1) {
47///             for j in max(0, i as isize - rhs.len() as isize + 1)..min(lhs.len() as isize, i as isize + 1) {
48///                 ring.add_assign(&mut dst[i], ring.mul_ref(lhs.at(j as usize), rhs.at(i - j as usize)));
49///             }
50///         }
51///     }
52///     fn supports_ring<S: RingStore<Type = R>>(&self, _: S) -> bool
53///         where S: Copy
54///     { true }
55/// }
56/// let lhs = [1, 2, 3, 4, 5];
57/// let rhs = [2, 3, 4, 5, 6];
58/// let mut expected = [0; 10];
59/// let mut actual = [0; 10];
60/// STANDARD_CONVOLUTION.compute_convolution(lhs, rhs, &mut expected, StaticRing::<i64>::RING);
61/// NaiveConvolution.compute_convolution(lhs, rhs, &mut actual, StaticRing::<i64>::RING);
62/// assert_eq!(expected, actual);
63/// ```
64/// 
65pub trait ConvolutionAlgorithm<R: ?Sized + RingBase> {
66
67    ///
68    /// Additional data associated to a list of ring elements, which can be used to
69    /// compute a convolution where this list is one of the operands faster.
70    ///
71    /// For more details, see [`ConvolutionAlgorithm::prepare_convolution_operand()`].
72    /// Note that a `PreparedConvolutionOperand` can only be used for convolutions
73    /// with the same list of values it was created for.
74    /// 
75    #[stability::unstable(feature = "enable")]
76    type PreparedConvolutionOperand = ();
77
78    ///
79    /// Elementwise adds the convolution of `lhs` and `rhs` to `dst`.
80    /// 
81    /// In other words, computes `dst[i] += sum_j lhs[j] * rhs[i - j]` for all `i`, where
82    /// `j` runs through all positive integers for which `lhs[j]` and `rhs[i - j]` are defined,
83    /// i.e. not out-of-bounds.
84    /// 
85    /// In particular, it is necessary that `dst.len() >= lhs.len() + rhs.len() - 1`. However,
86    /// to allow for more efficient implementations, it is instead required that 
87    /// `dst.len() >= lhs.len() + rhs.len()`.
88    /// 
89    /// # Panic
90    /// 
91    /// Panics if `dst` is shorter than `lhs.len() + rhs.len() - 1`. May panic if `dst` is shorter
92    /// than `lhs.len() + rhs.len()`.
93    /// 
94    /// TODO: On next breaking release, just take slice instead of [`VectorView`]s.
95    /// This might require the user to copy the data once, but so far most algorithms copy
96    /// it anyway, because this will make subsequent memory accesses more predictable and
97    /// better optimized.
98    /// 
99    /// Maybe also consider taking the ring by `&RingBase`, since this would then allow
100    /// for dynamic dispatch.
101    /// 
102    fn compute_convolution<S: RingStore<Type = R> + Copy, V1: VectorView<R::Element>, V2: VectorView<R::Element>>(&self, lhs: V1, rhs: V2, dst: &mut [R::Element], ring: S);
103
104    ///
105    /// Returns whether this convolution algorithm supports computations of 
106    /// the given ring.
107    /// 
108    /// Note that most algorithms will support all rings of type `R`. However in some cases,
109    /// e.g. for finite fields, required data might only be precomputed for some moduli,
110    /// and thus only these will be supported.
111    /// 
112    fn supports_ring<S: RingStore<Type = R> + Copy>(&self, ring: S) -> bool;
113
114    ///
115    /// Takes an input list of values and computes an opaque [`ConvolutionAlgorithm::PreparedConvolutionOperand`],
116    /// which can be used to compute future convolutions with this list of values faster.
117    /// 
118    /// Although the [`ConvolutionAlgorithm::PreparedConvolutionOperand`] does not have any explicit reference
119    /// to the list of values it was created for, passing it to [`ConvolutionAlgorithm::compute_convolution_prepared()`]
120    /// with another list of values will give erroneous results.
121    /// 
122    /// # Length-dependence when preparing a convolution
123    /// 
124    /// For some algorithms, different data is required to speed up the convolution with an operand, depending on the 
125    /// length of the other operand. For example, for FFT-based convolutions, the prepared data would consist of the
126    /// Fourier transform of the list of values, zero-padded to a length that can store the complete result of the
127    /// (future) convolution.
128    /// 
129    /// To handle this, implementations can make use of the `length_hint`, which - if given - should be an upper bound
130    /// to the length of the output of any future convolution that uses the given operand. Alternatively, implementations
131    /// are encouraged to not compute any data during [`ConvolutionAlgorithm::prepare_convolution_operand()`],
132    /// but initialize an object with interior mutability, and use it to cache data computed during
133    /// [`ConvolutionAlgorithm::compute_convolution_prepared()`].
134    /// 
135    /// TODO: At next breaking release, remove the default implementation
136    /// 
137    /// TODO: On next breaking release, just take slice instead of [`VectorView`]s.
138    /// This might require the user to copy the data once, but so far most algorithms copy
139    /// it anyway, because this will make subsequent memory accesses more predictable and
140    /// better optimized.
141    /// 
142    /// # Example
143    /// 
144    /// ```rust
145    /// # use feanor_math::ring::*;
146    /// # use feanor_math::algorithms::convolution::*;
147    /// # use feanor_math::algorithms::convolution::ntt::*;
148    /// # use feanor_math::rings::zn::*;
149    /// # use feanor_math::rings::zn::zn_64::*;
150    /// # use feanor_math::rings::finite::*;
151    /// let ring = Zn::new(65537);
152    /// let convolution = NTTConvolution::new(ring);
153    /// let lhs = ring.elements().take(10).collect::<Vec<_>>();
154    /// let rhs = ring.elements().take(10).collect::<Vec<_>>();
155    /// // "standard" use
156    /// let mut expected = (0..19).map(|_| ring.zero()).collect::<Vec<_>>();
157    /// convolution.compute_convolution(&lhs, &rhs, &mut expected, ring);
158    /// 
159    /// // "prepared" variant
160    /// let lhs_prep = convolution.prepare_convolution_operand(&lhs, None, ring);
161    /// let rhs_prep = convolution.prepare_convolution_operand(&rhs, None, ring);
162    /// let mut actual = (0..19).map(|_| ring.zero()).collect::<Vec<_>>();
163    /// // this will now be faster than `convolution.compute_convolution()`
164    /// convolution.compute_convolution_prepared(&lhs, Some(&lhs_prep), &rhs, Some(&rhs_prep), &mut actual, ring);
165    /// println!("{:?}, {:?}", actual.iter().map(|x| ring.format(x)).collect::<Vec<_>>(), expected.iter().map(|x| ring.format(x)).collect::<Vec<_>>());
166    /// assert!(expected.iter().zip(actual.iter()).all(|(l, r)| ring.eq_el(l, r)));
167    /// ```
168    /// 
169    /// TODO: On next breaking release, just take slice instead of [`VectorView`]s.
170    /// This might require the user to copy the data once, but so far most algorithms copy
171    /// it anyway, because this will make subsequent memory accesses more predictable and
172    /// better optimized.
173    /// 
174    /// Maybe also consider taking the ring by `&RingBase`, since this would then allow
175    /// for dynamic dispatch.
176    /// 
177    #[stability::unstable(feature = "enable")]
178    fn prepare_convolution_operand<S, V>(&self, _val: V, _length_hint: Option<usize>, _ring: S) -> Self::PreparedConvolutionOperand
179        where S: RingStore<Type = R> + Copy, V: VectorView<R::Element>
180    {
181        struct ProduceUnitType;
182        trait ProduceValue<T> {
183            fn produce() -> T;
184        }
185        impl<T> ProduceValue<T> for ProduceUnitType {
186            default fn produce() -> T {
187                panic!("if you specialize ConvolutionAlgorithm::PreparedConvolutionOperand, you must also specialize ConvolutionAlgorithm::prepare_convolution_operand()")
188            }
189        }
190        impl ProduceValue<()> for ProduceUnitType {
191            fn produce() -> () {}
192        }
193        return <ProduceUnitType as ProduceValue<Self::PreparedConvolutionOperand>>::produce();
194    }
195    
196    ///
197    /// Elementwise adds the convolution of `lhs` and `rhs` to `dst`. If provided, the given
198    /// prepared convolution operands are used for a faster computation.
199    /// 
200    /// When called with `None` as both the prepared convolution operands, this is exactly
201    /// equivalent to [`ConvolutionAlgorithm::compute_convolution()`].
202    /// 
203    /// TODO: On next breaking release, just take slice instead of [`VectorView`]s.
204    /// This might require the user to copy the data once, but so far most algorithms copy
205    /// it anyway, because this will make subsequent memory accesses more predictable and
206    /// better optimized.
207    /// 
208    #[stability::unstable(feature = "enable")]
209    fn compute_convolution_prepared<S, V1, V2>(&self, lhs: V1, _lhs_prep: Option<&Self::PreparedConvolutionOperand>, rhs: V2, _rhs_prep: Option<&Self::PreparedConvolutionOperand>, dst: &mut [R::Element], ring: S)
210        where S: RingStore<Type = R> + Copy, V1: VectorView<R::Element>, V2: VectorView<R::Element>
211    {
212        self.compute_convolution(lhs, rhs, dst, ring)
213    }
214
215    ///
216    /// Computes a convolution for each tuple in the given sequence, and sums the result of each convolution
217    /// to `dst`.
218    /// 
219    /// In other words, this computes `dst[k] += sum_l sum_(i + j = k) values[l][i] * values[l][k]`.
220    /// It can be faster than calling [`ConvolutionAlgorithm::prepare_convolution_operand()`].
221    /// 
222    /// TODO: On next breaking release, just take slice instead of [`VectorView`]s.
223    /// This might require the user to copy the data once, but so far most algorithms copy
224    /// it anyway, because this will make subsequent memory accesses more predictable and
225    /// better optimized.
226    /// 
227    #[stability::unstable(feature = "enable")]
228    fn compute_convolution_sum<'a, S, I, V1, V2>(&self, values: I, dst: &mut [R::Element], ring: S) 
229        where S: RingStore<Type = R> + Copy, 
230            I: ExactSizeIterator<Item = (V1, Option<&'a Self::PreparedConvolutionOperand>, V2, Option<&'a Self::PreparedConvolutionOperand>)>,
231            V1: VectorView<R::Element>,
232            V2: VectorView<R::Element>,
233            Self: 'a,
234            R: 'a
235    {
236        for (lhs, lhs_prep, rhs, rhs_prep) in values {
237            self.compute_convolution_prepared(lhs, lhs_prep, rhs, rhs_prep, dst, ring)
238        }
239    }
240}
241
242impl<'a, R, C> ConvolutionAlgorithm<R> for C
243    where R: ?Sized + RingBase,
244        C: Deref,
245        C::Target: ConvolutionAlgorithm<R>
246{
247    type PreparedConvolutionOperand = <C::Target as ConvolutionAlgorithm<R>>::PreparedConvolutionOperand;
248
249    fn compute_convolution<S: RingStore<Type = R> + Copy, V1: VectorView<R::Element>, V2: VectorView<R::Element>>(&self, lhs: V1, rhs: V2, dst: &mut [R::Element], ring: S) {
250        (**self).compute_convolution(lhs, rhs, dst, ring)
251    }
252
253    fn supports_ring<S: RingStore<Type = R> + Copy>(&self, ring: S) -> bool {
254        (**self).supports_ring(ring)
255    }
256
257    fn prepare_convolution_operand<S, V>(&self, val: V, len_hint: Option<usize>, ring: S) -> Self::PreparedConvolutionOperand
258        where S: RingStore<Type = R> + Copy, V: VectorView<R::Element>
259    {
260        (**self).prepare_convolution_operand(val, len_hint, ring)
261    }
262
263    fn compute_convolution_prepared<S, V1, V2>(&self, lhs: V1, lhs_prep: Option<&Self::PreparedConvolutionOperand>, rhs: V2, rhs_prep: Option<&Self::PreparedConvolutionOperand>, dst: &mut [R::Element], ring: S)
264        where S: RingStore<Type = R> + Copy, V1: VectorView<R::Element>, V2: VectorView<R::Element>
265    {
266        (**self).compute_convolution_prepared(lhs, lhs_prep, rhs, rhs_prep, dst, ring);
267    }
268
269    fn compute_convolution_sum<'b, S, I, V1, V2>(&self, values: I, dst: &mut [R::Element], ring: S) 
270        where S: RingStore<Type = R> + Copy, 
271            I: ExactSizeIterator<Item = (V1, Option<&'b Self::PreparedConvolutionOperand>, V2, Option<&'b Self::PreparedConvolutionOperand>)>,
272            V1: VectorView<R::Element>,
273            V2: VectorView<R::Element>,
274            Self: 'b,
275            R: 'b
276    {
277        (**self).compute_convolution_sum(values, dst, ring);
278    }
279}
280
281///
282/// Implementation of convolutions that uses Karatsuba's algorithm
283/// with a threshold defined by [`KaratsubaHint`].
284/// 
285#[derive(Clone, Copy, Debug)]
286pub struct KaratsubaAlgorithm<A: Allocator = Global> {
287    allocator: A
288}
289
290///
291/// Good default algorithm for computing convolutions, using Karatsuba's algorithm
292/// with a threshold defined by [`KaratsubaHint`].
293/// 
294pub const STANDARD_CONVOLUTION: KaratsubaAlgorithm = KaratsubaAlgorithm::new(Global);
295
296impl<A: Allocator> KaratsubaAlgorithm<A> {
297    
298    #[stability::unstable(feature = "enable")]
299    pub const fn new(allocator: A) -> Self {
300        Self { allocator }
301    }
302}
303
304impl<R: ?Sized + RingBase, A: Allocator> ConvolutionAlgorithm<R> for KaratsubaAlgorithm<A> {
305
306    fn compute_convolution<S: RingStore<Type = R>, V1: VectorView<<R as RingBase>::Element>, V2: VectorView<<R as RingBase>::Element>>(&self, lhs: V1, rhs: V2, dst: &mut[<R as RingBase>::Element], ring: S) {
307        karatsuba(ring.get_ring().karatsuba_threshold(), dst, SubvectorView::new(&lhs), SubvectorView::new(&rhs), &ring, &self.allocator)
308    }
309
310    fn supports_ring<S: RingStore<Type = R> + Copy>(&self, _ring: S) -> bool {
311        true
312    }
313}
314
315///
316/// Trait to allow rings to customize the parameters with which [`KaratsubaAlgorithm`] will
317/// compute convolutions over the ring.
318/// 
319#[stability::unstable(feature = "enable")]
320pub trait KaratsubaHint: RingBase {
321
322    ///
323    /// Define a threshold from which on [`KaratsubaAlgorithm`] will use the Karatsuba algorithm.
324    /// 
325    /// Concretely, when this returns `k`, [`KaratsubaAlgorithm`] will reduce the 
326    /// convolution down to ones on slices of size `2^k`, and compute their convolution naively. The default
327    /// value is `0`, but if the considered rings have fast multiplication (compared to addition), then setting
328    /// it higher may result in a performance gain.
329    /// 
330    fn karatsuba_threshold(&self) -> usize;
331}
332
333impl<R: RingBase + ?Sized> KaratsubaHint for R {
334
335    default fn karatsuba_threshold(&self) -> usize {
336        0
337    }
338}
339
340#[cfg(test)]
341use test;
342#[cfg(test)]
343use crate::primitive_int::*;
344
345#[bench]
346fn bench_naive_mul(bencher: &mut test::Bencher) {
347    let a: Vec<i32> = (0..32).collect();
348    let b: Vec<i32> = (0..32).collect();
349    let mut c: Vec<i32> = (0..64).collect();
350    bencher.iter(|| {
351        c.clear();
352        c.resize(64, 0);
353        karatsuba(10, &mut c[..], &a[..], &b[..], StaticRing::<i32>::RING, &Global);
354        assert_eq!(c[31], 31 * 31 * 32 / 2 - 31 * (31 + 1) * (31 * 2 + 1) / 6);
355        assert_eq!(c[62], 31 * 31);
356    });
357}
358
359#[bench]
360fn bench_karatsuba_mul(bencher: &mut test::Bencher) {
361    let a: Vec<i32> = (0..32).collect();
362    let b: Vec<i32> = (0..32).collect();
363    let mut c: Vec<i32> = (0..64).collect();
364    bencher.iter(|| {
365        c.clear();
366        c.resize(64, 0);
367        karatsuba(4, &mut c[..], &a[..], &b[..], StaticRing::<i32>::RING, &Global);
368        assert_eq!(c[31], 31 * 31 * 32 / 2 - 31 * (31 + 1) * (31 * 2 + 1) / 6);
369        assert_eq!(c[62], 31 * 31);
370    });
371}
372
373
374#[allow(missing_docs)]
375#[cfg(any(test, feature = "generic_tests"))]
376pub mod generic_tests {
377    use std::cmp::min;
378    use crate::homomorphism::*;
379    use crate::ring::*;
380    use super::*;
381
382    pub fn test_convolution<C, R>(convolution: C, ring: R, scale: El<R>)
383        where C: ConvolutionAlgorithm<R::Type>,
384            R: RingStore
385    {
386        for lhs_len in [2, 3, 4, 15] {
387            for rhs_len in [2, 3, 4, 15, 31, 32, 33] {
388                let lhs = (0..lhs_len).map(|i| ring.mul_ref_snd(ring.int_hom().map(i), &scale)).collect::<Vec<_>>();
389                let rhs = (0..rhs_len).map(|i| ring.mul_ref_snd(ring.int_hom().map(i), &scale)).collect::<Vec<_>>();
390                let expected = (0..(lhs_len + rhs_len)).map(|i| if i < lhs_len + rhs_len {
391                    min(i, lhs_len - 1) * (min(i, lhs_len - 1) + 1) * (3 * i - 2 * min(i, lhs_len - 1) - 1) / 6 - 
392                    (i - 1 - min(i, rhs_len - 1)) * (i - min(i, rhs_len - 1)) * (i + 2 * min(i, rhs_len - 1) + 1) / 6
393                } else {
394                    0
395                }).map(|x| ring.mul(ring.int_hom().map(x), ring.pow(ring.clone_el(&scale), 2))).collect::<Vec<_>>();
396    
397                let mut actual = Vec::new();
398                actual.resize_with((lhs_len + rhs_len) as usize, || ring.zero());
399                convolution.compute_convolution(&lhs, &rhs, &mut actual, &ring);
400                for i in 0..(lhs_len + rhs_len) {
401                    assert_el_eq!(&ring, &expected[i as usize], &actual[i as usize]);
402                }
403
404                let expected = (0..(lhs_len + rhs_len)).map(|i| if i < lhs_len + rhs_len {
405                    i * i +
406                    min(i, lhs_len - 1) * (min(i, lhs_len - 1) + 1) * (3 * i - 2 * min(i, lhs_len - 1) - 1) / 6 - 
407                    (i - 1 - min(i, rhs_len - 1)) * (i - min(i, rhs_len - 1)) * (i + 2 * min(i, rhs_len - 1) + 1) / 6
408                } else {
409                    0
410                }).map(|x| ring.mul(ring.int_hom().map(x), ring.pow(ring.clone_el(&scale), 2))).collect::<Vec<_>>();
411    
412                let mut actual = Vec::new();
413                actual.extend((0..(lhs_len + rhs_len)).map(|i| ring.mul(ring.int_hom().map(i * i), ring.pow(ring.clone_el(&scale), 2))));
414                convolution.compute_convolution(&lhs, &rhs, &mut actual, &ring);
415                for i in 0..(lhs_len + rhs_len) {
416                    assert_el_eq!(&ring, &expected[i as usize], &actual[i as usize]);
417                }
418            }
419        }
420        test_prepared_convolution(convolution, ring, scale);
421    }
422
423    fn test_prepared_convolution<C, R>(convolution: C, ring: R, scale: El<R>)
424        where C: ConvolutionAlgorithm<R::Type>,
425            R: RingStore
426    {
427        for lhs_len in [2, 3, 4, 14, 15] {
428            for rhs_len in [2, 3, 4, 15, 31, 32, 33] {
429                let lhs = (0..lhs_len).map(|i| ring.mul_ref_snd(ring.int_hom().map(i), &scale)).collect::<Vec<_>>();
430                let rhs = (0..rhs_len).map(|i| ring.mul_ref_snd(ring.int_hom().map(i), &scale)).collect::<Vec<_>>();
431                let expected = (0..(lhs_len + rhs_len)).map(|i| if i < lhs_len + rhs_len {
432                    min(i, lhs_len - 1) * (min(i, lhs_len - 1) + 1) * (3 * i - 2 * min(i, lhs_len - 1) - 1) / 6 - 
433                    (i - 1 - min(i, rhs_len - 1)) * (i - min(i, rhs_len - 1)) * (i + 2 * min(i, rhs_len - 1) + 1) / 6
434                } else {
435                    0
436                }).map(|x| ring.mul(ring.int_hom().map(x), ring.pow(ring.clone_el(&scale), 2))).collect::<Vec<_>>();
437    
438                let mut actual = Vec::new();
439                actual.resize_with((lhs_len + rhs_len) as usize, || ring.zero());
440                convolution.compute_convolution_prepared(
441                    &lhs,
442                    Some(&convolution.prepare_convolution_operand(&lhs, None, &ring)),
443                    &rhs,
444                    Some(&convolution.prepare_convolution_operand(&rhs, None, &ring)),
445                    &mut actual, 
446                    &ring
447                );
448                for i in 0..(lhs_len + rhs_len) {
449                    assert_el_eq!(&ring, &expected[i as usize], &actual[i as usize]);
450                }
451
452                let mut actual = Vec::new();
453                actual.resize_with((lhs_len + rhs_len) as usize, || ring.zero());
454                convolution.compute_convolution_prepared(
455                    &lhs,
456                    Some(&convolution.prepare_convolution_operand(&lhs, None, &ring)),
457                    &rhs,
458                    None,
459                    &mut actual, 
460                    &ring
461                );
462                for i in 0..(lhs_len + rhs_len) {
463                    assert_el_eq!(&ring, &expected[i as usize], &actual[i as usize]);
464                }
465                
466                let mut actual = Vec::new();
467                actual.resize_with((lhs_len + rhs_len) as usize, || ring.zero());
468                convolution.compute_convolution_prepared(
469                    &lhs,
470                    None,
471                    &rhs,
472                    Some(&convolution.prepare_convolution_operand(&rhs, None, &ring)),
473                    &mut actual, 
474                    &ring
475                );
476                for i in 0..(lhs_len + rhs_len) {
477                    assert_el_eq!(&ring, &expected[i as usize], &actual[i as usize]);
478                }
479
480                let mut actual = Vec::new();
481                actual.resize_with((lhs_len + rhs_len) as usize, || ring.zero());
482                let data = [
483                    (&lhs[..], Some(convolution.prepare_convolution_operand(&lhs, None, &ring)), &rhs[..], Some(convolution.prepare_convolution_operand(&rhs, None, &ring))),
484                    (&rhs[..], None, &lhs[..], None)
485                ];
486                convolution.compute_convolution_sum(
487                    data.as_fn().map_fn(|(l, l_prep, r, r_prep): &(_, _, _, _)| (l, l_prep.as_ref(), r, r_prep.as_ref())).iter(),
488                    &mut actual, 
489                    &ring
490                );
491                for i in 0..(lhs_len + rhs_len) {
492                    assert_el_eq!(&ring, &ring.add_ref(&expected[i as usize], &expected[i as usize]), &actual[i as usize]);
493                }
494
495                let mut actual = Vec::new();
496                actual.resize_with((lhs_len + rhs_len) as usize, || ring.zero());
497                let data = [
498                    (&lhs[..], Some(convolution.prepare_convolution_operand(&lhs, None, &ring)), &rhs[..], None),
499                    (&rhs[..], None, &lhs[..], Some(convolution.prepare_convolution_operand(&lhs, None, &ring)))
500                ];
501                convolution.compute_convolution_sum(
502                    data.as_fn().map_fn(|(l, l_prep, r, r_prep)| (l, l_prep.as_ref(), r, r_prep.as_ref())).iter(),
503                    &mut actual, 
504                    &ring
505                );
506                for i in 0..(lhs_len + rhs_len) {
507                    assert_el_eq!(&ring, &ring.add_ref(&expected[i as usize], &expected[i as usize]), &actual[i as usize]);
508                }
509            }
510        }
511    }
512}