1use crate::{
2 api::{
3 VecZnxAdd, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalar, VecZnxAddScalarInplace, VecZnxAutomorphism,
4 VecZnxAutomorphismInplace, VecZnxAutomorphismInplaceTmpBytes, VecZnxCopy, VecZnxFillNormal, VecZnxFillUniform, VecZnxLsh,
5 VecZnxLshInplace, VecZnxLshTmpBytes, VecZnxMergeRings, VecZnxMergeRingsTmpBytes, VecZnxMulXpMinusOne,
6 VecZnxMulXpMinusOneInplace, VecZnxMulXpMinusOneInplaceTmpBytes, VecZnxNegate, VecZnxNegateInplace, VecZnxNormalize,
7 VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes,
8 VecZnxRsh, VecZnxRshInplace, VecZnxRshTmpBytes, VecZnxSplitRing, VecZnxSplitRingTmpBytes, VecZnxSub, VecZnxSubInplace,
9 VecZnxSubNegateInplace, VecZnxSubScalar, VecZnxSubScalarInplace, VecZnxSwitchRing,
10 },
11 layouts::{Backend, Module, ScalarZnxToRef, Scratch, VecZnxToMut, VecZnxToRef},
12 oep::{
13 VecZnxAddImpl, VecZnxAddInplaceImpl, VecZnxAddNormalImpl, VecZnxAddScalarImpl, VecZnxAddScalarInplaceImpl,
14 VecZnxAutomorphismImpl, VecZnxAutomorphismInplaceImpl, VecZnxAutomorphismInplaceTmpBytesImpl, VecZnxCopyImpl,
15 VecZnxFillNormalImpl, VecZnxFillUniformImpl, VecZnxLshImpl, VecZnxLshInplaceImpl, VecZnxLshTmpBytesImpl,
16 VecZnxMergeRingsImpl, VecZnxMergeRingsTmpBytesImpl, VecZnxMulXpMinusOneImpl, VecZnxMulXpMinusOneInplaceImpl,
17 VecZnxMulXpMinusOneInplaceTmpBytesImpl, VecZnxNegateImpl, VecZnxNegateInplaceImpl, VecZnxNormalizeImpl,
18 VecZnxNormalizeInplaceImpl, VecZnxNormalizeTmpBytesImpl, VecZnxRotateImpl, VecZnxRotateInplaceImpl,
19 VecZnxRotateInplaceTmpBytesImpl, VecZnxRshImpl, VecZnxRshInplaceImpl, VecZnxRshTmpBytesImpl, VecZnxSplitRingImpl,
20 VecZnxSplitRingTmpBytesImpl, VecZnxSubImpl, VecZnxSubInplaceImpl, VecZnxSubNegateInplaceImpl, VecZnxSubScalarImpl,
21 VecZnxSubScalarInplaceImpl, VecZnxSwitchRingImpl,
22 },
23 source::Source,
24};
25
26impl<B> VecZnxNormalizeTmpBytes for Module<B>
27where
28 B: Backend + VecZnxNormalizeTmpBytesImpl<B>,
29{
30 fn vec_znx_normalize_tmp_bytes(&self) -> usize {
31 B::vec_znx_normalize_tmp_bytes_impl(self)
32 }
33}
34
35impl<B> VecZnxNormalize<B> for Module<B>
36where
37 B: Backend + VecZnxNormalizeImpl<B>,
38{
39 #[allow(clippy::too_many_arguments)]
40 fn vec_znx_normalize<R, A>(
41 &self,
42 res_basek: usize,
43 res: &mut R,
44 res_col: usize,
45 a_basek: usize,
46 a: &A,
47 a_col: usize,
48 scratch: &mut Scratch<B>,
49 ) where
50 R: VecZnxToMut,
51 A: VecZnxToRef,
52 {
53 B::vec_znx_normalize_impl(self, res_basek, res, res_col, a_basek, a, a_col, scratch)
54 }
55}
56
57impl<B> VecZnxNormalizeInplace<B> for Module<B>
58where
59 B: Backend + VecZnxNormalizeInplaceImpl<B>,
60{
61 fn vec_znx_normalize_inplace<A>(&self, base2k: usize, a: &mut A, a_col: usize, scratch: &mut Scratch<B>)
62 where
63 A: VecZnxToMut,
64 {
65 B::vec_znx_normalize_inplace_impl(self, base2k, a, a_col, scratch)
66 }
67}
68
69impl<B> VecZnxAdd for Module<B>
70where
71 B: Backend + VecZnxAddImpl<B>,
72{
73 fn vec_znx_add<R, A, C>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &C, b_col: usize)
74 where
75 R: VecZnxToMut,
76 A: VecZnxToRef,
77 C: VecZnxToRef,
78 {
79 B::vec_znx_add_impl(self, res, res_col, a, a_col, b, b_col)
80 }
81}
82
83impl<B> VecZnxAddInplace for Module<B>
84where
85 B: Backend + VecZnxAddInplaceImpl<B>,
86{
87 fn vec_znx_add_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
88 where
89 R: VecZnxToMut,
90 A: VecZnxToRef,
91 {
92 B::vec_znx_add_inplace_impl(self, res, res_col, a, a_col)
93 }
94}
95
96impl<B> VecZnxAddScalar for Module<B>
97where
98 B: Backend + VecZnxAddScalarImpl<B>,
99{
100 fn vec_znx_add_scalar<R, A, D>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &D, b_col: usize, b_limb: usize)
101 where
102 R: VecZnxToMut,
103 A: ScalarZnxToRef,
104 D: VecZnxToRef,
105 {
106 B::vec_znx_add_scalar_impl(self, res, res_col, a, a_col, b, b_col, b_limb)
107 }
108}
109
110impl<B> VecZnxAddScalarInplace for Module<B>
111where
112 B: Backend + VecZnxAddScalarInplaceImpl<B>,
113{
114 fn vec_znx_add_scalar_inplace<R, A>(&self, res: &mut R, res_col: usize, res_limb: usize, a: &A, a_col: usize)
115 where
116 R: VecZnxToMut,
117 A: ScalarZnxToRef,
118 {
119 B::vec_znx_add_scalar_inplace_impl(self, res, res_col, res_limb, a, a_col)
120 }
121}
122
123impl<B> VecZnxSub for Module<B>
124where
125 B: Backend + VecZnxSubImpl<B>,
126{
127 fn vec_znx_sub<R, A, C>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &C, b_col: usize)
128 where
129 R: VecZnxToMut,
130 A: VecZnxToRef,
131 C: VecZnxToRef,
132 {
133 B::vec_znx_sub_impl(self, res, res_col, a, a_col, b, b_col)
134 }
135}
136
137impl<B> VecZnxSubInplace for Module<B>
138where
139 B: Backend + VecZnxSubInplaceImpl<B>,
140{
141 fn vec_znx_sub_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
142 where
143 R: VecZnxToMut,
144 A: VecZnxToRef,
145 {
146 B::vec_znx_sub_inplace_impl(self, res, res_col, a, a_col)
147 }
148}
149
150impl<B> VecZnxSubNegateInplace for Module<B>
151where
152 B: Backend + VecZnxSubNegateInplaceImpl<B>,
153{
154 fn vec_znx_sub_negate_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
155 where
156 R: VecZnxToMut,
157 A: VecZnxToRef,
158 {
159 B::vec_znx_sub_negate_inplace_impl(self, res, res_col, a, a_col)
160 }
161}
162
163impl<B> VecZnxSubScalar for Module<B>
164where
165 B: Backend + VecZnxSubScalarImpl<B>,
166{
167 fn vec_znx_sub_scalar<R, A, D>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &D, b_col: usize, b_limb: usize)
168 where
169 R: VecZnxToMut,
170 A: ScalarZnxToRef,
171 D: VecZnxToRef,
172 {
173 B::vec_znx_sub_scalar_impl(self, res, res_col, a, a_col, b, b_col, b_limb)
174 }
175}
176
177impl<B> VecZnxSubScalarInplace for Module<B>
178where
179 B: Backend + VecZnxSubScalarInplaceImpl<B>,
180{
181 fn vec_znx_sub_scalar_inplace<R, A>(&self, res: &mut R, res_col: usize, res_limb: usize, a: &A, a_col: usize)
182 where
183 R: VecZnxToMut,
184 A: ScalarZnxToRef,
185 {
186 B::vec_znx_sub_scalar_inplace_impl(self, res, res_col, res_limb, a, a_col)
187 }
188}
189
190impl<B> VecZnxNegate for Module<B>
191where
192 B: Backend + VecZnxNegateImpl<B>,
193{
194 fn vec_znx_negate<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
195 where
196 R: VecZnxToMut,
197 A: VecZnxToRef,
198 {
199 B::vec_znx_negate_impl(self, res, res_col, a, a_col)
200 }
201}
202
203impl<B> VecZnxNegateInplace for Module<B>
204where
205 B: Backend + VecZnxNegateInplaceImpl<B>,
206{
207 fn vec_znx_negate_inplace<A>(&self, a: &mut A, a_col: usize)
208 where
209 A: VecZnxToMut,
210 {
211 B::vec_znx_negate_inplace_impl(self, a, a_col)
212 }
213}
214
215impl<B> VecZnxRshTmpBytes for Module<B>
216where
217 B: Backend + VecZnxRshTmpBytesImpl<B>,
218{
219 fn vec_znx_rsh_tmp_bytes(&self) -> usize {
220 B::vec_znx_rsh_tmp_bytes_impl(self)
221 }
222}
223
224impl<B> VecZnxLshTmpBytes for Module<B>
225where
226 B: Backend + VecZnxLshTmpBytesImpl<B>,
227{
228 fn vec_znx_lsh_tmp_bytes(&self) -> usize {
229 B::vec_znx_lsh_tmp_bytes_impl(self)
230 }
231}
232
233impl<B> VecZnxLsh<B> for Module<B>
234where
235 B: Backend + VecZnxLshImpl<B>,
236{
237 fn vec_znx_lsh<R, A>(
238 &self,
239 base2k: usize,
240 k: usize,
241 res: &mut R,
242 res_col: usize,
243 a: &A,
244 a_col: usize,
245 scratch: &mut Scratch<B>,
246 ) where
247 R: VecZnxToMut,
248 A: VecZnxToRef,
249 {
250 B::vec_znx_lsh_impl(self, base2k, k, res, res_col, a, a_col, scratch);
251 }
252}
253
254impl<B> VecZnxRsh<B> for Module<B>
255where
256 B: Backend + VecZnxRshImpl<B>,
257{
258 fn vec_znx_rsh<R, A>(
259 &self,
260 base2k: usize,
261 k: usize,
262 res: &mut R,
263 res_col: usize,
264 a: &A,
265 a_col: usize,
266 scratch: &mut Scratch<B>,
267 ) where
268 R: VecZnxToMut,
269 A: VecZnxToRef,
270 {
271 B::vec_znx_rsh_impl(self, base2k, k, res, res_col, a, a_col, scratch);
272 }
273}
274
275impl<B> VecZnxLshInplace<B> for Module<B>
276where
277 B: Backend + VecZnxLshInplaceImpl<B>,
278{
279 fn vec_znx_lsh_inplace<A>(&self, base2k: usize, k: usize, a: &mut A, a_col: usize, scratch: &mut Scratch<B>)
280 where
281 A: VecZnxToMut,
282 {
283 B::vec_znx_lsh_inplace_impl(self, base2k, k, a, a_col, scratch)
284 }
285}
286
287impl<B> VecZnxRshInplace<B> for Module<B>
288where
289 B: Backend + VecZnxRshInplaceImpl<B>,
290{
291 fn vec_znx_rsh_inplace<A>(&self, base2k: usize, k: usize, a: &mut A, a_col: usize, scratch: &mut Scratch<B>)
292 where
293 A: VecZnxToMut,
294 {
295 B::vec_znx_rsh_inplace_impl(self, base2k, k, a, a_col, scratch)
296 }
297}
298
299impl<B> VecZnxRotate for Module<B>
300where
301 B: Backend + VecZnxRotateImpl<B>,
302{
303 fn vec_znx_rotate<R, A>(&self, k: i64, res: &mut R, res_col: usize, a: &A, a_col: usize)
304 where
305 R: VecZnxToMut,
306 A: VecZnxToRef,
307 {
308 B::vec_znx_rotate_impl(self, k, res, res_col, a, a_col)
309 }
310}
311
312impl<B> VecZnxRotateInplaceTmpBytes for Module<B>
313where
314 B: Backend + VecZnxRotateInplaceTmpBytesImpl<B>,
315{
316 fn vec_znx_rotate_inplace_tmp_bytes(&self) -> usize {
317 B::vec_znx_rotate_inplace_tmp_bytes_impl(self)
318 }
319}
320
321impl<B> VecZnxRotateInplace<B> for Module<B>
322where
323 B: Backend + VecZnxRotateInplaceImpl<B>,
324{
325 fn vec_znx_rotate_inplace<A>(&self, k: i64, a: &mut A, a_col: usize, scratch: &mut Scratch<B>)
326 where
327 A: VecZnxToMut,
328 {
329 B::vec_znx_rotate_inplace_impl(self, k, a, a_col, scratch)
330 }
331}
332
333impl<B> VecZnxAutomorphism for Module<B>
334where
335 B: Backend + VecZnxAutomorphismImpl<B>,
336{
337 fn vec_znx_automorphism<R, A>(&self, k: i64, res: &mut R, res_col: usize, a: &A, a_col: usize)
338 where
339 R: VecZnxToMut,
340 A: VecZnxToRef,
341 {
342 B::vec_znx_automorphism_impl(self, k, res, res_col, a, a_col)
343 }
344}
345
346impl<B> VecZnxAutomorphismInplaceTmpBytes for Module<B>
347where
348 B: Backend + VecZnxAutomorphismInplaceTmpBytesImpl<B>,
349{
350 fn vec_znx_automorphism_inplace_tmp_bytes(&self) -> usize {
351 B::vec_znx_automorphism_inplace_tmp_bytes_impl(self)
352 }
353}
354
355impl<B> VecZnxAutomorphismInplace<B> for Module<B>
356where
357 B: Backend + VecZnxAutomorphismInplaceImpl<B>,
358{
359 fn vec_znx_automorphism_inplace<R>(&self, k: i64, res: &mut R, res_col: usize, scratch: &mut Scratch<B>)
360 where
361 R: VecZnxToMut,
362 {
363 B::vec_znx_automorphism_inplace_impl(self, k, res, res_col, scratch)
364 }
365}
366
367impl<B> VecZnxMulXpMinusOne for Module<B>
368where
369 B: Backend + VecZnxMulXpMinusOneImpl<B>,
370{
371 fn vec_znx_mul_xp_minus_one<R, A>(&self, p: i64, res: &mut R, res_col: usize, a: &A, a_col: usize)
372 where
373 R: VecZnxToMut,
374 A: VecZnxToRef,
375 {
376 B::vec_znx_mul_xp_minus_one_impl(self, p, res, res_col, a, a_col);
377 }
378}
379
380impl<B> VecZnxMulXpMinusOneInplaceTmpBytes for Module<B>
381where
382 B: Backend + VecZnxMulXpMinusOneInplaceTmpBytesImpl<B>,
383{
384 fn vec_znx_mul_xp_minus_one_inplace_tmp_bytes(&self) -> usize {
385 B::vec_znx_mul_xp_minus_one_inplace_tmp_bytes_impl(self)
386 }
387}
388
389impl<B> VecZnxMulXpMinusOneInplace<B> for Module<B>
390where
391 B: Backend + VecZnxMulXpMinusOneInplaceImpl<B>,
392{
393 fn vec_znx_mul_xp_minus_one_inplace<R>(&self, p: i64, res: &mut R, res_col: usize, scratch: &mut Scratch<B>)
394 where
395 R: VecZnxToMut,
396 {
397 B::vec_znx_mul_xp_minus_one_inplace_impl(self, p, res, res_col, scratch);
398 }
399}
400
401impl<B> VecZnxSplitRingTmpBytes for Module<B>
402where
403 B: Backend + VecZnxSplitRingTmpBytesImpl<B>,
404{
405 fn vec_znx_split_ring_tmp_bytes(&self) -> usize {
406 B::vec_znx_split_ring_tmp_bytes_impl(self)
407 }
408}
409
410impl<B> VecZnxSplitRing<B> for Module<B>
411where
412 B: Backend + VecZnxSplitRingImpl<B>,
413{
414 fn vec_znx_split_ring<R, A>(&self, res: &mut [R], res_col: usize, a: &A, a_col: usize, scratch: &mut Scratch<B>)
415 where
416 R: VecZnxToMut,
417 A: VecZnxToRef,
418 {
419 B::vec_znx_split_ring_impl(self, res, res_col, a, a_col, scratch)
420 }
421}
422
423impl<B> VecZnxMergeRingsTmpBytes for Module<B>
424where
425 B: Backend + VecZnxMergeRingsTmpBytesImpl<B>,
426{
427 fn vec_znx_merge_rings_tmp_bytes(&self) -> usize {
428 B::vec_znx_merge_rings_tmp_bytes_impl(self)
429 }
430}
431
432impl<B> VecZnxMergeRings<B> for Module<B>
433where
434 B: Backend + VecZnxMergeRingsImpl<B>,
435{
436 fn vec_znx_merge_rings<R, A>(&self, res: &mut R, res_col: usize, a: &[A], a_col: usize, scratch: &mut Scratch<B>)
437 where
438 R: VecZnxToMut,
439 A: VecZnxToRef,
440 {
441 B::vec_znx_merge_rings_impl(self, res, res_col, a, a_col, scratch)
442 }
443}
444
445impl<B> VecZnxSwitchRing for Module<B>
446where
447 B: Backend + VecZnxSwitchRingImpl<B>,
448{
449 fn vec_znx_switch_ring<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
450 where
451 R: VecZnxToMut,
452 A: VecZnxToRef,
453 {
454 B::vec_znx_switch_ring_impl(self, res, res_col, a, a_col)
455 }
456}
457
458impl<B> VecZnxCopy for Module<B>
459where
460 B: Backend + VecZnxCopyImpl<B>,
461{
462 fn vec_znx_copy<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
463 where
464 R: VecZnxToMut,
465 A: VecZnxToRef,
466 {
467 B::vec_znx_copy_impl(self, res, res_col, a, a_col)
468 }
469}
470
471impl<B> VecZnxFillUniform for Module<B>
472where
473 B: Backend + VecZnxFillUniformImpl<B>,
474{
475 fn vec_znx_fill_uniform<R>(&self, base2k: usize, res: &mut R, res_col: usize, source: &mut Source)
476 where
477 R: VecZnxToMut,
478 {
479 B::vec_znx_fill_uniform_impl(self, base2k, res, res_col, source);
480 }
481}
482
483impl<B> VecZnxFillNormal for Module<B>
484where
485 B: Backend + VecZnxFillNormalImpl<B>,
486{
487 fn vec_znx_fill_normal<R>(
488 &self,
489 base2k: usize,
490 res: &mut R,
491 res_col: usize,
492 k: usize,
493 source: &mut Source,
494 sigma: f64,
495 bound: f64,
496 ) where
497 R: VecZnxToMut,
498 {
499 B::vec_znx_fill_normal_impl(self, base2k, res, res_col, k, source, sigma, bound);
500 }
501}
502
503impl<B> VecZnxAddNormal for Module<B>
504where
505 B: Backend + VecZnxAddNormalImpl<B>,
506{
507 fn vec_znx_add_normal<R>(
508 &self,
509 base2k: usize,
510 res: &mut R,
511 res_col: usize,
512 k: usize,
513 source: &mut Source,
514 sigma: f64,
515 bound: f64,
516 ) where
517 R: VecZnxToMut,
518 {
519 B::vec_znx_add_normal_impl(self, base2k, res, res_col, k, source, sigma, bound);
520 }
521}