poulpy_hal/delegates/
vec_znx.rs

1use crate::{
2    api::{
3        VecZnxAdd, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalar, VecZnxAddScalarInplace, VecZnxAutomorphism,
4        VecZnxAutomorphismInplace, VecZnxAutomorphismInplaceTmpBytes, VecZnxCopy, VecZnxFillNormal, VecZnxFillUniform, VecZnxLsh,
5        VecZnxLshInplace, VecZnxLshTmpBytes, VecZnxMergeRings, VecZnxMergeRingsTmpBytes, VecZnxMulXpMinusOne,
6        VecZnxMulXpMinusOneInplace, VecZnxMulXpMinusOneInplaceTmpBytes, VecZnxNegate, VecZnxNegateInplace, VecZnxNormalize,
7        VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes,
8        VecZnxRsh, VecZnxRshInplace, VecZnxRshTmpBytes, VecZnxSplitRing, VecZnxSplitRingTmpBytes, VecZnxSub, VecZnxSubABInplace,
9        VecZnxSubBAInplace, VecZnxSubScalar, VecZnxSubScalarInplace, VecZnxSwitchRing,
10    },
11    layouts::{Backend, Module, ScalarZnxToRef, Scratch, VecZnxToMut, VecZnxToRef},
12    oep::{
13        VecZnxAddImpl, VecZnxAddInplaceImpl, VecZnxAddNormalImpl, VecZnxAddScalarImpl, VecZnxAddScalarInplaceImpl,
14        VecZnxAutomorphismImpl, VecZnxAutomorphismInplaceImpl, VecZnxAutomorphismInplaceTmpBytesImpl, VecZnxCopyImpl,
15        VecZnxFillNormalImpl, VecZnxFillUniformImpl, VecZnxLshImpl, VecZnxLshInplaceImpl, VecZnxLshTmpBytesImpl,
16        VecZnxMergeRingsImpl, VecZnxMergeRingsTmpBytesImpl, VecZnxMulXpMinusOneImpl, VecZnxMulXpMinusOneInplaceImpl,
17        VecZnxMulXpMinusOneInplaceTmpBytesImpl, VecZnxNegateImpl, VecZnxNegateInplaceImpl, VecZnxNormalizeImpl,
18        VecZnxNormalizeInplaceImpl, VecZnxNormalizeTmpBytesImpl, VecZnxRotateImpl, VecZnxRotateInplaceImpl,
19        VecZnxRotateInplaceTmpBytesImpl, VecZnxRshImpl, VecZnxRshInplaceImpl, VecZnxRshTmpBytesImpl, VecZnxSplitRingImpl,
20        VecZnxSplitRingTmpBytesImpl, VecZnxSubABInplaceImpl, VecZnxSubBAInplaceImpl, VecZnxSubImpl, VecZnxSubScalarImpl,
21        VecZnxSubScalarInplaceImpl, VecZnxSwitchRingImpl,
22    },
23    source::Source,
24};
25
26impl<B> VecZnxNormalizeTmpBytes for Module<B>
27where
28    B: Backend + VecZnxNormalizeTmpBytesImpl<B>,
29{
30    fn vec_znx_normalize_tmp_bytes(&self) -> usize {
31        B::vec_znx_normalize_tmp_bytes_impl(self)
32    }
33}
34
35impl<B> VecZnxNormalize<B> for Module<B>
36where
37    B: Backend + VecZnxNormalizeImpl<B>,
38{
39    fn vec_znx_normalize<R, A>(&self, basek: usize, res: &mut R, res_col: usize, a: &A, a_col: usize, scratch: &mut Scratch<B>)
40    where
41        R: VecZnxToMut,
42        A: VecZnxToRef,
43    {
44        B::vec_znx_normalize_impl(self, basek, res, res_col, a, a_col, scratch)
45    }
46}
47
48impl<B> VecZnxNormalizeInplace<B> for Module<B>
49where
50    B: Backend + VecZnxNormalizeInplaceImpl<B>,
51{
52    fn vec_znx_normalize_inplace<A>(&self, basek: usize, a: &mut A, a_col: usize, scratch: &mut Scratch<B>)
53    where
54        A: VecZnxToMut,
55    {
56        B::vec_znx_normalize_inplace_impl(self, basek, a, a_col, scratch)
57    }
58}
59
60impl<B> VecZnxAdd for Module<B>
61where
62    B: Backend + VecZnxAddImpl<B>,
63{
64    fn vec_znx_add<R, A, C>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &C, b_col: usize)
65    where
66        R: VecZnxToMut,
67        A: VecZnxToRef,
68        C: VecZnxToRef,
69    {
70        B::vec_znx_add_impl(self, res, res_col, a, a_col, b, b_col)
71    }
72}
73
74impl<B> VecZnxAddInplace for Module<B>
75where
76    B: Backend + VecZnxAddInplaceImpl<B>,
77{
78    fn vec_znx_add_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
79    where
80        R: VecZnxToMut,
81        A: VecZnxToRef,
82    {
83        B::vec_znx_add_inplace_impl(self, res, res_col, a, a_col)
84    }
85}
86
87impl<B> VecZnxAddScalar for Module<B>
88where
89    B: Backend + VecZnxAddScalarImpl<B>,
90{
91    fn vec_znx_add_scalar<R, A, D>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &D, b_col: usize, b_limb: usize)
92    where
93        R: VecZnxToMut,
94        A: ScalarZnxToRef,
95        D: VecZnxToRef,
96    {
97        B::vec_znx_add_scalar_impl(self, res, res_col, a, a_col, b, b_col, b_limb)
98    }
99}
100
101impl<B> VecZnxAddScalarInplace for Module<B>
102where
103    B: Backend + VecZnxAddScalarInplaceImpl<B>,
104{
105    fn vec_znx_add_scalar_inplace<R, A>(&self, res: &mut R, res_col: usize, res_limb: usize, a: &A, a_col: usize)
106    where
107        R: VecZnxToMut,
108        A: ScalarZnxToRef,
109    {
110        B::vec_znx_add_scalar_inplace_impl(self, res, res_col, res_limb, a, a_col)
111    }
112}
113
114impl<B> VecZnxSub for Module<B>
115where
116    B: Backend + VecZnxSubImpl<B>,
117{
118    fn vec_znx_sub<R, A, C>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &C, b_col: usize)
119    where
120        R: VecZnxToMut,
121        A: VecZnxToRef,
122        C: VecZnxToRef,
123    {
124        B::vec_znx_sub_impl(self, res, res_col, a, a_col, b, b_col)
125    }
126}
127
128impl<B> VecZnxSubABInplace for Module<B>
129where
130    B: Backend + VecZnxSubABInplaceImpl<B>,
131{
132    fn vec_znx_sub_ab_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
133    where
134        R: VecZnxToMut,
135        A: VecZnxToRef,
136    {
137        B::vec_znx_sub_ab_inplace_impl(self, res, res_col, a, a_col)
138    }
139}
140
141impl<B> VecZnxSubBAInplace for Module<B>
142where
143    B: Backend + VecZnxSubBAInplaceImpl<B>,
144{
145    fn vec_znx_sub_ba_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
146    where
147        R: VecZnxToMut,
148        A: VecZnxToRef,
149    {
150        B::vec_znx_sub_ba_inplace_impl(self, res, res_col, a, a_col)
151    }
152}
153
154impl<B> VecZnxSubScalar for Module<B>
155where
156    B: Backend + VecZnxSubScalarImpl<B>,
157{
158    fn vec_znx_sub_scalar<R, A, D>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &D, b_col: usize, b_limb: usize)
159    where
160        R: VecZnxToMut,
161        A: ScalarZnxToRef,
162        D: VecZnxToRef,
163    {
164        B::vec_znx_sub_scalar_impl(self, res, res_col, a, a_col, b, b_col, b_limb)
165    }
166}
167
168impl<B> VecZnxSubScalarInplace for Module<B>
169where
170    B: Backend + VecZnxSubScalarInplaceImpl<B>,
171{
172    fn vec_znx_sub_scalar_inplace<R, A>(&self, res: &mut R, res_col: usize, res_limb: usize, a: &A, a_col: usize)
173    where
174        R: VecZnxToMut,
175        A: ScalarZnxToRef,
176    {
177        B::vec_znx_sub_scalar_inplace_impl(self, res, res_col, res_limb, a, a_col)
178    }
179}
180
181impl<B> VecZnxNegate for Module<B>
182where
183    B: Backend + VecZnxNegateImpl<B>,
184{
185    fn vec_znx_negate<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
186    where
187        R: VecZnxToMut,
188        A: VecZnxToRef,
189    {
190        B::vec_znx_negate_impl(self, res, res_col, a, a_col)
191    }
192}
193
194impl<B> VecZnxNegateInplace for Module<B>
195where
196    B: Backend + VecZnxNegateInplaceImpl<B>,
197{
198    fn vec_znx_negate_inplace<A>(&self, a: &mut A, a_col: usize)
199    where
200        A: VecZnxToMut,
201    {
202        B::vec_znx_negate_inplace_impl(self, a, a_col)
203    }
204}
205
206impl<B> VecZnxRshTmpBytes for Module<B>
207where
208    B: Backend + VecZnxRshTmpBytesImpl<B>,
209{
210    fn vec_znx_rsh_tmp_bytes(&self) -> usize {
211        B::vec_znx_rsh_tmp_bytes_impl(self)
212    }
213}
214
215impl<B> VecZnxLshTmpBytes for Module<B>
216where
217    B: Backend + VecZnxLshTmpBytesImpl<B>,
218{
219    fn vec_znx_lsh_tmp_bytes(&self) -> usize {
220        B::vec_znx_lsh_tmp_bytes_impl(self)
221    }
222}
223
224impl<B> VecZnxLsh<B> for Module<B>
225where
226    B: Backend + VecZnxLshImpl<B>,
227{
228    fn vec_znx_lsh<R, A>(
229        &self,
230        basek: usize,
231        k: usize,
232        res: &mut R,
233        res_col: usize,
234        a: &A,
235        a_col: usize,
236        scratch: &mut Scratch<B>,
237    ) where
238        R: VecZnxToMut,
239        A: VecZnxToRef,
240    {
241        B::vec_znx_lsh_inplace_impl(self, basek, k, res, res_col, a, a_col, scratch);
242    }
243}
244
245impl<B> VecZnxRsh<B> for Module<B>
246where
247    B: Backend + VecZnxRshImpl<B>,
248{
249    fn vec_znx_rsh<R, A>(
250        &self,
251        basek: usize,
252        k: usize,
253        res: &mut R,
254        res_col: usize,
255        a: &A,
256        a_col: usize,
257        scratch: &mut Scratch<B>,
258    ) where
259        R: VecZnxToMut,
260        A: VecZnxToRef,
261    {
262        B::vec_znx_rsh_inplace_impl(self, basek, k, res, res_col, a, a_col, scratch);
263    }
264}
265
266impl<B> VecZnxLshInplace<B> for Module<B>
267where
268    B: Backend + VecZnxLshInplaceImpl<B>,
269{
270    fn vec_znx_lsh_inplace<A>(&self, basek: usize, k: usize, a: &mut A, a_col: usize, scratch: &mut Scratch<B>)
271    where
272        A: VecZnxToMut,
273    {
274        B::vec_znx_lsh_inplace_impl(self, basek, k, a, a_col, scratch)
275    }
276}
277
278impl<B> VecZnxRshInplace<B> for Module<B>
279where
280    B: Backend + VecZnxRshInplaceImpl<B>,
281{
282    fn vec_znx_rsh_inplace<A>(&self, basek: usize, k: usize, a: &mut A, a_col: usize, scratch: &mut Scratch<B>)
283    where
284        A: VecZnxToMut,
285    {
286        B::vec_znx_rsh_inplace_impl(self, basek, k, a, a_col, scratch)
287    }
288}
289
290impl<B> VecZnxRotate for Module<B>
291where
292    B: Backend + VecZnxRotateImpl<B>,
293{
294    fn vec_znx_rotate<R, A>(&self, k: i64, res: &mut R, res_col: usize, a: &A, a_col: usize)
295    where
296        R: VecZnxToMut,
297        A: VecZnxToRef,
298    {
299        B::vec_znx_rotate_impl(self, k, res, res_col, a, a_col)
300    }
301}
302
303impl<B> VecZnxRotateInplaceTmpBytes for Module<B>
304where
305    B: Backend + VecZnxRotateInplaceTmpBytesImpl<B>,
306{
307    fn vec_znx_rotate_inplace_tmp_bytes(&self) -> usize {
308        B::vec_znx_rotate_inplace_tmp_bytes_impl(self)
309    }
310}
311
312impl<B> VecZnxRotateInplace<B> for Module<B>
313where
314    B: Backend + VecZnxRotateInplaceImpl<B>,
315{
316    fn vec_znx_rotate_inplace<A>(&self, k: i64, a: &mut A, a_col: usize, scratch: &mut Scratch<B>)
317    where
318        A: VecZnxToMut,
319    {
320        B::vec_znx_rotate_inplace_impl(self, k, a, a_col, scratch)
321    }
322}
323
324impl<B> VecZnxAutomorphism for Module<B>
325where
326    B: Backend + VecZnxAutomorphismImpl<B>,
327{
328    fn vec_znx_automorphism<R, A>(&self, k: i64, res: &mut R, res_col: usize, a: &A, a_col: usize)
329    where
330        R: VecZnxToMut,
331        A: VecZnxToRef,
332    {
333        B::vec_znx_automorphism_impl(self, k, res, res_col, a, a_col)
334    }
335}
336
337impl<B> VecZnxAutomorphismInplaceTmpBytes for Module<B>
338where
339    B: Backend + VecZnxAutomorphismInplaceTmpBytesImpl<B>,
340{
341    fn vec_znx_automorphism_inplace_tmp_bytes(&self) -> usize {
342        B::vec_znx_automorphism_inplace_tmp_bytes_impl(self)
343    }
344}
345
346impl<B> VecZnxAutomorphismInplace<B> for Module<B>
347where
348    B: Backend + VecZnxAutomorphismInplaceImpl<B>,
349{
350    fn vec_znx_automorphism_inplace<R>(&self, k: i64, res: &mut R, res_col: usize, scratch: &mut Scratch<B>)
351    where
352        R: VecZnxToMut,
353    {
354        B::vec_znx_automorphism_inplace_impl(self, k, res, res_col, scratch)
355    }
356}
357
358impl<B> VecZnxMulXpMinusOne for Module<B>
359where
360    B: Backend + VecZnxMulXpMinusOneImpl<B>,
361{
362    fn vec_znx_mul_xp_minus_one<R, A>(&self, p: i64, res: &mut R, res_col: usize, a: &A, a_col: usize)
363    where
364        R: VecZnxToMut,
365        A: VecZnxToRef,
366    {
367        B::vec_znx_mul_xp_minus_one_impl(self, p, res, res_col, a, a_col);
368    }
369}
370
371impl<B> VecZnxMulXpMinusOneInplaceTmpBytes for Module<B>
372where
373    B: Backend + VecZnxMulXpMinusOneInplaceTmpBytesImpl<B>,
374{
375    fn vec_znx_mul_xp_minus_one_inplace_tmp_bytes(&self) -> usize {
376        B::vec_znx_mul_xp_minus_one_inplace_tmp_bytes_impl(self)
377    }
378}
379
380impl<B> VecZnxMulXpMinusOneInplace<B> for Module<B>
381where
382    B: Backend + VecZnxMulXpMinusOneInplaceImpl<B>,
383{
384    fn vec_znx_mul_xp_minus_one_inplace<R>(&self, p: i64, res: &mut R, res_col: usize, scratch: &mut Scratch<B>)
385    where
386        R: VecZnxToMut,
387    {
388        B::vec_znx_mul_xp_minus_one_inplace_impl(self, p, res, res_col, scratch);
389    }
390}
391
392impl<B> VecZnxSplitRingTmpBytes for Module<B>
393where
394    B: Backend + VecZnxSplitRingTmpBytesImpl<B>,
395{
396    fn vec_znx_split_ring_tmp_bytes(&self) -> usize {
397        B::vec_znx_split_ring_tmp_bytes_impl(self)
398    }
399}
400
401impl<B> VecZnxSplitRing<B> for Module<B>
402where
403    B: Backend + VecZnxSplitRingImpl<B>,
404{
405    fn vec_znx_split_ring<R, A>(&self, res: &mut [R], res_col: usize, a: &A, a_col: usize, scratch: &mut Scratch<B>)
406    where
407        R: VecZnxToMut,
408        A: VecZnxToRef,
409    {
410        B::vec_znx_split_ring_impl(self, res, res_col, a, a_col, scratch)
411    }
412}
413
414impl<B> VecZnxMergeRingsTmpBytes for Module<B>
415where
416    B: Backend + VecZnxMergeRingsTmpBytesImpl<B>,
417{
418    fn vec_znx_merge_rings_tmp_bytes(&self) -> usize {
419        B::vec_znx_merge_rings_tmp_bytes_impl(self)
420    }
421}
422
423impl<B> VecZnxMergeRings<B> for Module<B>
424where
425    B: Backend + VecZnxMergeRingsImpl<B>,
426{
427    fn vec_znx_merge_rings<R, A>(&self, res: &mut R, res_col: usize, a: &[A], a_col: usize, scratch: &mut Scratch<B>)
428    where
429        R: VecZnxToMut,
430        A: VecZnxToRef,
431    {
432        B::vec_znx_merge_rings_impl(self, res, res_col, a, a_col, scratch)
433    }
434}
435
436impl<B> VecZnxSwitchRing for Module<B>
437where
438    B: Backend + VecZnxSwitchRingImpl<B>,
439{
440    fn vec_znx_switch_ring<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
441    where
442        R: VecZnxToMut,
443        A: VecZnxToRef,
444    {
445        B::vec_znx_switch_ring_impl(self, res, res_col, a, a_col)
446    }
447}
448
449impl<B> VecZnxCopy for Module<B>
450where
451    B: Backend + VecZnxCopyImpl<B>,
452{
453    fn vec_znx_copy<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
454    where
455        R: VecZnxToMut,
456        A: VecZnxToRef,
457    {
458        B::vec_znx_copy_impl(self, res, res_col, a, a_col)
459    }
460}
461
462impl<B> VecZnxFillUniform for Module<B>
463where
464    B: Backend + VecZnxFillUniformImpl<B>,
465{
466    fn vec_znx_fill_uniform<R>(&self, basek: usize, res: &mut R, res_col: usize, source: &mut Source)
467    where
468        R: VecZnxToMut,
469    {
470        B::vec_znx_fill_uniform_impl(self, basek, res, res_col, source);
471    }
472}
473
474impl<B> VecZnxFillNormal for Module<B>
475where
476    B: Backend + VecZnxFillNormalImpl<B>,
477{
478    fn vec_znx_fill_normal<R>(
479        &self,
480        basek: usize,
481        res: &mut R,
482        res_col: usize,
483        k: usize,
484        source: &mut Source,
485        sigma: f64,
486        bound: f64,
487    ) where
488        R: VecZnxToMut,
489    {
490        B::vec_znx_fill_normal_impl(self, basek, res, res_col, k, source, sigma, bound);
491    }
492}
493
494impl<B> VecZnxAddNormal for Module<B>
495where
496    B: Backend + VecZnxAddNormalImpl<B>,
497{
498    fn vec_znx_add_normal<R>(
499        &self,
500        basek: usize,
501        res: &mut R,
502        res_col: usize,
503        k: usize,
504        source: &mut Source,
505        sigma: f64,
506        bound: f64,
507    ) where
508        R: VecZnxToMut,
509    {
510        B::vec_znx_add_normal_impl(self, basek, res, res_col, k, source, sigma, bound);
511    }
512}