1use rand_distr::Distribution;
2
3use crate::{
4 api::{
5 VecZnxBigAdd, VecZnxBigAddDistF64, VecZnxBigAddInplace, VecZnxBigAddNormal, VecZnxBigAddSmall, VecZnxBigAddSmallInplace,
6 VecZnxBigAlloc, VecZnxBigAllocBytes, VecZnxBigAutomorphism, VecZnxBigAutomorphismInplace, VecZnxBigFillDistF64,
7 VecZnxBigFillNormal, VecZnxBigFromBytes, VecZnxBigNegateInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
8 VecZnxBigSub, VecZnxBigSubABInplace, VecZnxBigSubBAInplace, VecZnxBigSubSmallA, VecZnxBigSubSmallAInplace,
9 VecZnxBigSubSmallB, VecZnxBigSubSmallBInplace,
10 },
11 layouts::{Backend, Module, Scratch, VecZnxBigOwned, VecZnxBigToMut, VecZnxBigToRef, VecZnxToMut, VecZnxToRef},
12 oep::{
13 VecZnxBigAddDistF64Impl, VecZnxBigAddImpl, VecZnxBigAddInplaceImpl, VecZnxBigAddNormalImpl, VecZnxBigAddSmallImpl,
14 VecZnxBigAddSmallInplaceImpl, VecZnxBigAllocBytesImpl, VecZnxBigAllocImpl, VecZnxBigAutomorphismImpl,
15 VecZnxBigAutomorphismInplaceImpl, VecZnxBigFillDistF64Impl, VecZnxBigFillNormalImpl, VecZnxBigFromBytesImpl,
16 VecZnxBigNegateInplaceImpl, VecZnxBigNormalizeImpl, VecZnxBigNormalizeTmpBytesImpl, VecZnxBigSubABInplaceImpl,
17 VecZnxBigSubBAInplaceImpl, VecZnxBigSubImpl, VecZnxBigSubSmallAImpl, VecZnxBigSubSmallAInplaceImpl,
18 VecZnxBigSubSmallBImpl, VecZnxBigSubSmallBInplaceImpl,
19 },
20 source::Source,
21};
22
23impl<B> VecZnxBigAlloc<B> for Module<B>
24where
25 B: Backend + VecZnxBigAllocImpl<B>,
26{
27 fn vec_znx_big_alloc(&self, n: usize, cols: usize, size: usize) -> VecZnxBigOwned<B> {
28 B::vec_znx_big_alloc_impl(n, cols, size)
29 }
30}
31
32impl<B> VecZnxBigFromBytes<B> for Module<B>
33where
34 B: Backend + VecZnxBigFromBytesImpl<B>,
35{
36 fn vec_znx_big_from_bytes(&self, n: usize, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxBigOwned<B> {
37 B::vec_znx_big_from_bytes_impl(n, cols, size, bytes)
38 }
39}
40
41impl<B> VecZnxBigAllocBytes for Module<B>
42where
43 B: Backend + VecZnxBigAllocBytesImpl<B>,
44{
45 fn vec_znx_big_alloc_bytes(&self, n: usize, cols: usize, size: usize) -> usize {
46 B::vec_znx_big_alloc_bytes_impl(n, cols, size)
47 }
48}
49
50impl<B> VecZnxBigAddDistF64<B> for Module<B>
51where
52 B: Backend + VecZnxBigAddDistF64Impl<B>,
53{
54 fn vec_znx_big_add_dist_f64<R: VecZnxBigToMut<B>, D: Distribution<f64>>(
55 &self,
56 basek: usize,
57 res: &mut R,
58 res_col: usize,
59 k: usize,
60 source: &mut Source,
61 dist: D,
62 bound: f64,
63 ) {
64 B::add_dist_f64_impl(self, basek, res, res_col, k, source, dist, bound);
65 }
66}
67
68impl<B> VecZnxBigAddNormal<B> for Module<B>
69where
70 B: Backend + VecZnxBigAddNormalImpl<B>,
71{
72 fn vec_znx_big_add_normal<R: VecZnxBigToMut<B>>(
73 &self,
74 basek: usize,
75 res: &mut R,
76 res_col: usize,
77 k: usize,
78 source: &mut Source,
79 sigma: f64,
80 bound: f64,
81 ) {
82 B::add_normal_impl(self, basek, res, res_col, k, source, sigma, bound);
83 }
84}
85
86impl<B> VecZnxBigFillDistF64<B> for Module<B>
87where
88 B: Backend + VecZnxBigFillDistF64Impl<B>,
89{
90 fn vec_znx_big_fill_dist_f64<R: VecZnxBigToMut<B>, D: Distribution<f64>>(
91 &self,
92 basek: usize,
93 res: &mut R,
94 res_col: usize,
95 k: usize,
96 source: &mut Source,
97 dist: D,
98 bound: f64,
99 ) {
100 B::fill_dist_f64_impl(self, basek, res, res_col, k, source, dist, bound);
101 }
102}
103
104impl<B> VecZnxBigFillNormal<B> for Module<B>
105where
106 B: Backend + VecZnxBigFillNormalImpl<B>,
107{
108 fn vec_znx_big_fill_normal<R: VecZnxBigToMut<B>>(
109 &self,
110 basek: usize,
111 res: &mut R,
112 res_col: usize,
113 k: usize,
114 source: &mut Source,
115 sigma: f64,
116 bound: f64,
117 ) {
118 B::fill_normal_impl(self, basek, res, res_col, k, source, sigma, bound);
119 }
120}
121
122impl<B> VecZnxBigAdd<B> for Module<B>
123where
124 B: Backend + VecZnxBigAddImpl<B>,
125{
126 fn vec_znx_big_add<R, A, C>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &C, b_col: usize)
127 where
128 R: VecZnxBigToMut<B>,
129 A: VecZnxBigToRef<B>,
130 C: VecZnxBigToRef<B>,
131 {
132 B::vec_znx_big_add_impl(self, res, res_col, a, a_col, b, b_col);
133 }
134}
135
136impl<B> VecZnxBigAddInplace<B> for Module<B>
137where
138 B: Backend + VecZnxBigAddInplaceImpl<B>,
139{
140 fn vec_znx_big_add_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
141 where
142 R: VecZnxBigToMut<B>,
143 A: VecZnxBigToRef<B>,
144 {
145 B::vec_znx_big_add_inplace_impl(self, res, res_col, a, a_col);
146 }
147}
148
149impl<B> VecZnxBigAddSmall<B> for Module<B>
150where
151 B: Backend + VecZnxBigAddSmallImpl<B>,
152{
153 fn vec_znx_big_add_small<R, A, C>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &C, b_col: usize)
154 where
155 R: VecZnxBigToMut<B>,
156 A: VecZnxBigToRef<B>,
157 C: VecZnxToRef,
158 {
159 B::vec_znx_big_add_small_impl(self, res, res_col, a, a_col, b, b_col);
160 }
161}
162
163impl<B> VecZnxBigAddSmallInplace<B> for Module<B>
164where
165 B: Backend + VecZnxBigAddSmallInplaceImpl<B>,
166{
167 fn vec_znx_big_add_small_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
168 where
169 R: VecZnxBigToMut<B>,
170 A: VecZnxToRef,
171 {
172 B::vec_znx_big_add_small_inplace_impl(self, res, res_col, a, a_col);
173 }
174}
175
176impl<B> VecZnxBigSub<B> for Module<B>
177where
178 B: Backend + VecZnxBigSubImpl<B>,
179{
180 fn vec_znx_big_sub<R, A, C>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &C, b_col: usize)
181 where
182 R: VecZnxBigToMut<B>,
183 A: VecZnxBigToRef<B>,
184 C: VecZnxBigToRef<B>,
185 {
186 B::vec_znx_big_sub_impl(self, res, res_col, a, a_col, b, b_col);
187 }
188}
189
190impl<B> VecZnxBigSubABInplace<B> for Module<B>
191where
192 B: Backend + VecZnxBigSubABInplaceImpl<B>,
193{
194 fn vec_znx_big_sub_ab_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
195 where
196 R: VecZnxBigToMut<B>,
197 A: VecZnxBigToRef<B>,
198 {
199 B::vec_znx_big_sub_ab_inplace_impl(self, res, res_col, a, a_col);
200 }
201}
202
203impl<B> VecZnxBigSubBAInplace<B> for Module<B>
204where
205 B: Backend + VecZnxBigSubBAInplaceImpl<B>,
206{
207 fn vec_znx_big_sub_ba_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
208 where
209 R: VecZnxBigToMut<B>,
210 A: VecZnxBigToRef<B>,
211 {
212 B::vec_znx_big_sub_ba_inplace_impl(self, res, res_col, a, a_col);
213 }
214}
215
216impl<B> VecZnxBigSubSmallA<B> for Module<B>
217where
218 B: Backend + VecZnxBigSubSmallAImpl<B>,
219{
220 fn vec_znx_big_sub_small_a<R, A, C>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &C, b_col: usize)
221 where
222 R: VecZnxBigToMut<B>,
223 A: VecZnxToRef,
224 C: VecZnxBigToRef<B>,
225 {
226 B::vec_znx_big_sub_small_a_impl(self, res, res_col, a, a_col, b, b_col);
227 }
228}
229
230impl<B> VecZnxBigSubSmallAInplace<B> for Module<B>
231where
232 B: Backend + VecZnxBigSubSmallAInplaceImpl<B>,
233{
234 fn vec_znx_big_sub_small_a_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
235 where
236 R: VecZnxBigToMut<B>,
237 A: VecZnxToRef,
238 {
239 B::vec_znx_big_sub_small_a_inplace_impl(self, res, res_col, a, a_col);
240 }
241}
242
243impl<B> VecZnxBigSubSmallB<B> for Module<B>
244where
245 B: Backend + VecZnxBigSubSmallBImpl<B>,
246{
247 fn vec_znx_big_sub_small_b<R, A, C>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &C, b_col: usize)
248 where
249 R: VecZnxBigToMut<B>,
250 A: VecZnxBigToRef<B>,
251 C: VecZnxToRef,
252 {
253 B::vec_znx_big_sub_small_b_impl(self, res, res_col, a, a_col, b, b_col);
254 }
255}
256
257impl<B> VecZnxBigSubSmallBInplace<B> for Module<B>
258where
259 B: Backend + VecZnxBigSubSmallBInplaceImpl<B>,
260{
261 fn vec_znx_big_sub_small_b_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
262 where
263 R: VecZnxBigToMut<B>,
264 A: VecZnxToRef,
265 {
266 B::vec_znx_big_sub_small_b_inplace_impl(self, res, res_col, a, a_col);
267 }
268}
269
270impl<B> VecZnxBigNegateInplace<B> for Module<B>
271where
272 B: Backend + VecZnxBigNegateInplaceImpl<B>,
273{
274 fn vec_znx_big_negate_inplace<A>(&self, a: &mut A, a_col: usize)
275 where
276 A: VecZnxBigToMut<B>,
277 {
278 B::vec_znx_big_negate_inplace_impl(self, a, a_col);
279 }
280}
281
282impl<B> VecZnxBigNormalizeTmpBytes for Module<B>
283where
284 B: Backend + VecZnxBigNormalizeTmpBytesImpl<B>,
285{
286 fn vec_znx_big_normalize_tmp_bytes(&self, n: usize) -> usize {
287 B::vec_znx_big_normalize_tmp_bytes_impl(self, n)
288 }
289}
290
291impl<B> VecZnxBigNormalize<B> for Module<B>
292where
293 B: Backend + VecZnxBigNormalizeImpl<B>,
294{
295 fn vec_znx_big_normalize<R, A>(
296 &self,
297 basek: usize,
298 res: &mut R,
299 res_col: usize,
300 a: &A,
301 a_col: usize,
302 scratch: &mut Scratch<B>,
303 ) where
304 R: VecZnxToMut,
305 A: VecZnxBigToRef<B>,
306 {
307 B::vec_znx_big_normalize_impl(self, basek, res, res_col, a, a_col, scratch);
308 }
309}
310
311impl<B> VecZnxBigAutomorphism<B> for Module<B>
312where
313 B: Backend + VecZnxBigAutomorphismImpl<B>,
314{
315 fn vec_znx_big_automorphism<R, A>(&self, k: i64, res: &mut R, res_col: usize, a: &A, a_col: usize)
316 where
317 R: VecZnxBigToMut<B>,
318 A: VecZnxBigToRef<B>,
319 {
320 B::vec_znx_big_automorphism_impl(self, k, res, res_col, a, a_col);
321 }
322}
323
324impl<B> VecZnxBigAutomorphismInplace<B> for Module<B>
325where
326 B: Backend + VecZnxBigAutomorphismInplaceImpl<B>,
327{
328 fn vec_znx_big_automorphism_inplace<A>(&self, k: i64, a: &mut A, a_col: usize)
329 where
330 A: VecZnxBigToMut<B>,
331 {
332 B::vec_znx_big_automorphism_inplace_impl(self, k, a, a_col);
333 }
334}