1#![cfg_attr(rustfmt, rustfmt::skip)]
2use core::cmp::Ordering;
8use core::ops::{Add, AddAssign, Mul, MulAssign};
9
10macro_rules! debug_assert_bits {
11 ($x: expr, $n: expr) => {
12 debug_assert!($x >> $n == 0);
13 }
14}
15
16#[derive(Debug, Clone, Copy)]
17pub struct Field {
19 n: [u32; 10],
25 magnitude: u32,
26 normalized: bool,
27}
28
29impl Field {
30 pub const fn new_raw(
31 d9: u32, d8: u32, d7: u32, d6: u32, d5: u32, d4: u32, d3: u32, d2: u32, d1: u32, d0: u32,
32 ) -> Self {
33 Self {
34 n: [d0, d1, d2, d3, d4, d5, d6, d7, d8, d9],
35 magnitude: 1,
36 normalized: false,
37 }
38 }
39
40 pub const fn new(
41 d7: u32, d6: u32, d5: u32, d4: u32, d3: u32, d2: u32, d1: u32, d0: u32
42 ) -> Self {
43 Self {
44 n: [
45 d0 & 0x3ffffff,
46 (d0 >> 26) | ((d1 & 0xfffff) << 6),
47 (d1 >> 20) | ((d2 & 0x3fff) << 12),
48 (d2 >> 14) | ((d3 & 0xff) << 18),
49 (d3 >> 8) | ((d4 & 0x3) << 24),
50 (d4 >> 2) & 0x3ffffff,
51 (d4 >> 28) | ((d5 & 0x3fffff) << 4),
52 (d5 >> 22) | ((d6 & 0xffff) << 10),
53 (d6 >> 16) | ((d7 & 0x3ff) << 16),
54 (d7 >> 10),
55 ],
56 magnitude: 1,
57 normalized: true,
58 }
59 }
60
61 pub fn from_int(a: u32) -> Field {
62 let mut f = Field::default();
63 f.set_int(a);
64 f
65 }
66
67 fn verify(&self) -> bool {
68 let m = if self.normalized { 1 } else { 2 } * self.magnitude;
69 let mut r = true;
70 r = r && (self.n[0] <= 0x3ffffff * m);
71 r = r && (self.n[1] <= 0x3ffffff * m);
72 r = r && (self.n[2] <= 0x3ffffff * m);
73 r = r && (self.n[3] <= 0x3ffffff * m);
74 r = r && (self.n[4] <= 0x3ffffff * m);
75 r = r && (self.n[5] <= 0x3ffffff * m);
76 r = r && (self.n[6] <= 0x3ffffff * m);
77 r = r && (self.n[7] <= 0x3ffffff * m);
78 r = r && (self.n[8] <= 0x3ffffff * m);
79 r = r && (self.n[9] <= 0x03fffff * m);
80 r = r && (self.magnitude <= 32);
81 if self.normalized {
82 r = r && self.magnitude <= 1;
83 if r && (self.n[9] == 0x03fffff) {
84 let mid = self.n[8] & self.n[7] & self.n[6] & self.n[5] & self.n[4] & self.n[3] & self.n[2];
85 if mid == 0x3ffffff {
86 r = r && ((self.n[1] + 0x40 + ((self.n[0] + 0x3d1) >> 26)) <= 0x3ffffff)
87 }
88 }
89 }
90 r
91 }
92
93 pub fn normalize(&mut self) {
95 let mut t0 = self.n[0];
96 let mut t1 = self.n[1];
97 let mut t2 = self.n[2];
98 let mut t3 = self.n[3];
99 let mut t4 = self.n[4];
100 let mut t5 = self.n[5];
101 let mut t6 = self.n[6];
102 let mut t7 = self.n[7];
103 let mut t8 = self.n[8];
104 let mut t9 = self.n[9];
105
106 let mut m: u32;
107 let mut x = t9 >> 22;
108 t9 &= 0x03fffff;
109
110 t0 += x * 0x3d1; t1 += x << 6;
111 t1 += t0 >> 26; t0 &= 0x3ffffff;
112 t2 += t1 >> 26; t1 &= 0x3ffffff;
113 t3 += t2 >> 26; t2 &= 0x3ffffff; m = t2;
114 t4 += t3 >> 26; t3 &= 0x3ffffff; m &= t3;
115 t5 += t4 >> 26; t4 &= 0x3ffffff; m &= t4;
116 t6 += t5 >> 26; t5 &= 0x3ffffff; m &= t5;
117 t7 += t6 >> 26; t6 &= 0x3ffffff; m &= t6;
118 t8 += t7 >> 26; t7 &= 0x3ffffff; m &= t7;
119 t9 += t8 >> 26; t8 &= 0x3ffffff; m &= t8;
120
121 debug_assert!(t9 >> 23 == 0);
122
123 x = (t9 >> 22) | (if t9 == 0x03fffff { 1 } else { 0 } & if m == 0x3ffffff { 1 } else { 0 } & (if (t1 + 0x40 + ((t0 + 0x3d1) >> 26)) > 0x3ffffff { 1 } else { 0 }));
124
125 t0 += x * 0x3d1; t1 += x << 6;
126 t1 += t0 >> 26; t0 &= 0x3ffffff;
127 t2 += t1 >> 26; t1 &= 0x3ffffff;
128 t3 += t2 >> 26; t2 &= 0x3ffffff;
129 t4 += t3 >> 26; t3 &= 0x3ffffff;
130 t5 += t4 >> 26; t4 &= 0x3ffffff;
131 t6 += t5 >> 26; t5 &= 0x3ffffff;
132 t7 += t6 >> 26; t6 &= 0x3ffffff;
133 t8 += t7 >> 26; t7 &= 0x3ffffff;
134 t9 += t8 >> 26; t8 &= 0x3ffffff;
135
136 debug_assert!(t9 >> 22 == x);
137
138 t9 &= 0x03fffff;
139
140 self.n = [t0, t1, t2, t3, t4, t5, t6, t7, t8, t9];
141 self.magnitude = 1;
142 self.normalized = true;
143 debug_assert!(self.verify());
144 }
145
146 pub fn normalize_weak(&mut self) {
149 let mut t0 = self.n[0];
150 let mut t1 = self.n[1];
151 let mut t2 = self.n[2];
152 let mut t3 = self.n[3];
153 let mut t4 = self.n[4];
154 let mut t5 = self.n[5];
155 let mut t6 = self.n[6];
156 let mut t7 = self.n[7];
157 let mut t8 = self.n[8];
158 let mut t9 = self.n[9];
159
160 let x = t9 >> 22; t9 &= 0x03fffff;
161
162 t0 += x * 0x3d1; t1 += x << 6;
163 t1 += t0 >> 26; t0 &= 0x3ffffff;
164 t2 += t1 >> 26; t1 &= 0x3ffffff;
165 t3 += t2 >> 26; t2 &= 0x3ffffff;
166 t4 += t3 >> 26; t3 &= 0x3ffffff;
167 t5 += t4 >> 26; t4 &= 0x3ffffff;
168 t6 += t5 >> 26; t5 &= 0x3ffffff;
169 t7 += t6 >> 26; t6 &= 0x3ffffff;
170 t8 += t7 >> 26; t7 &= 0x3ffffff;
171 t9 += t8 >> 26; t8 &= 0x3ffffff;
172
173 debug_assert!(t9 >> 23 == 0);
174
175 self.n = [t0, t1, t2, t3, t4, t5, t6, t7, t8, t9];
176 self.magnitude = 1;
177 debug_assert!(self.verify());
178 }
179
180 pub fn normalize_var(&mut self) {
182 let mut t0 = self.n[0];
183 let mut t1 = self.n[1];
184 let mut t2 = self.n[2];
185 let mut t3 = self.n[3];
186 let mut t4 = self.n[4];
187 let mut t5 = self.n[5];
188 let mut t6 = self.n[6];
189 let mut t7 = self.n[7];
190 let mut t8 = self.n[8];
191 let mut t9 = self.n[9];
192
193 let mut m: u32;
194 let mut x = t9 >> 22; t9 &= 0x03fffff;
195
196 t0 += x * 0x3d1; t1 += x << 6;
197 t1 += t0 >> 26; t0 &= 0x3ffffff;
198 t2 += t1 >> 26; t1 &= 0x3ffffff;
199 t3 += t2 >> 26; t2 &= 0x3ffffff; m = t2;
200 t4 += t3 >> 26; t3 &= 0x3ffffff; m &= t3;
201 t5 += t4 >> 26; t4 &= 0x3ffffff; m &= t4;
202 t6 += t5 >> 26; t5 &= 0x3ffffff; m &= t5;
203 t7 += t6 >> 26; t6 &= 0x3ffffff; m &= t6;
204 t8 += t7 >> 26; t7 &= 0x3ffffff; m &= t7;
205 t9 += t8 >> 26; t8 &= 0x3ffffff; m &= t8;
206
207 debug_assert!(t9 >> 23 == 0);
208
209 x = (t9 >> 22) | (if t9 == 0x03fffff { 1 } else { 0 } & if m == 0x3ffffff { 1 } else { 0 } & (if (t1 + 0x40 + ((t0 + 0x3d1) >> 26)) > 0x3ffffff { 1 } else { 0 }));
210
211 if x > 0 {
212 t0 += 0x3d1; t1 += x << 6;
213 t1 += t0 >> 26; t0 &= 0x3ffffff;
214 t2 += t1 >> 26; t1 &= 0x3ffffff;
215 t3 += t2 >> 26; t2 &= 0x3ffffff;
216 t4 += t3 >> 26; t3 &= 0x3ffffff;
217 t5 += t4 >> 26; t4 &= 0x3ffffff;
218 t6 += t5 >> 26; t5 &= 0x3ffffff;
219 t7 += t6 >> 26; t6 &= 0x3ffffff;
220 t8 += t7 >> 26; t7 &= 0x3ffffff;
221 t9 += t8 >> 26; t8 &= 0x3ffffff;
222
223 debug_assert!(t9 >> 22 == x);
224
225 t9 &= 0x03fffff;
226 }
227
228 self.n = [t0, t1, t2, t3, t4, t5, t6, t7, t8, t9];
229 self.magnitude = 1;
230 self.normalized = true;
231 debug_assert!(self.verify());
232 }
233
234 pub fn normalizes_to_zero(&self) -> bool {
239 let mut t0 = self.n[0];
240 let mut t1 = self.n[1];
241 let mut t2 = self.n[2];
242 let mut t3 = self.n[3];
243 let mut t4 = self.n[4];
244 let mut t5 = self.n[5];
245 let mut t6 = self.n[6];
246 let mut t7 = self.n[7];
247 let mut t8 = self.n[8];
248 let mut t9 = self.n[9];
249
250 let mut z0: u32; let mut z1: u32;
251
252 let x = t9 >> 22; t9 &= 0x03fffff;
253
254 t0 += x * 0x3d1; t1 += x << 6;
255 t1 += t0 >> 26; t0 &= 0x3ffffff; z0 = t0; z1 = t0 ^ 0x3d0;
256 t2 += t1 >> 26; t1 &= 0x3ffffff; z0 |= t1; z1 &= t1 ^ 0x40;
257 t3 += t2 >> 26; t2 &= 0x3ffffff; z0 |= t2; z1 &= t2;
258 t4 += t3 >> 26; t3 &= 0x3ffffff; z0 |= t3; z1 &= t3;
259 t5 += t4 >> 26; t4 &= 0x3ffffff; z0 |= t4; z1 &= t4;
260 t6 += t5 >> 26; t5 &= 0x3ffffff; z0 |= t5; z1 &= t5;
261 t7 += t6 >> 26; t6 &= 0x3ffffff; z0 |= t6; z1 &= t6;
262 t8 += t7 >> 26; t7 &= 0x3ffffff; z0 |= t7; z1 &= t7;
263 t9 += t8 >> 26; t8 &= 0x3ffffff; z0 |= t8; z1 &= t8;
264 z0 |= t9; z1 &= t9 ^ 0x3c00000;
265
266 debug_assert!(t9 >> 23 == 0);
267
268 return z0 == 0 || z1 == 0x3ffffff;
269 }
270
271 pub fn normalizes_to_zero_var(&self) -> bool {
276 let mut t0: u32; let mut t1: u32;
277 let mut t2: u32; let mut t3: u32;
278 let mut t4: u32; let mut t5: u32;
279 let mut t6: u32; let mut t7: u32;
280 let mut t8: u32; let mut t9: u32;
281 let mut z0: u32; let mut z1: u32;
282 let x: u32;
283
284 t0 = self.n[0];
285 t9 = self.n[9];
286
287 x = t9 >> 22;
288 t0 += x * 0x3d1;
289
290 z0 = t0 & 0x3ffffff;
291 z1 = z0 ^ 0x3d0;
292
293 if z0 != 0 && z1 != 0x3ffffff {
294 return false;
295 }
296
297 t1 = self.n[1];
298 t2 = self.n[2];
299 t3 = self.n[3];
300 t4 = self.n[4];
301 t5 = self.n[5];
302 t6 = self.n[6];
303 t7 = self.n[7];
304 t8 = self.n[8];
305
306 t9 &= 0x03fffff;
307 t1 += x << 6;
308
309 t1 += t0 >> 26;
310 t2 += t1 >> 26; t1 &= 0x3ffffff; z0 |= t1; z1 &= t1 ^ 0x40;
311 t3 += t2 >> 26; t2 &= 0x3ffffff; z0 |= t2; z1 &= t2;
312 t4 += t3 >> 26; t3 &= 0x3ffffff; z0 |= t3; z1 &= t3;
313 t5 += t4 >> 26; t4 &= 0x3ffffff; z0 |= t4; z1 &= t4;
314 t6 += t5 >> 26; t5 &= 0x3ffffff; z0 |= t5; z1 &= t5;
315 t7 += t6 >> 26; t6 &= 0x3ffffff; z0 |= t6; z1 &= t6;
316 t8 += t7 >> 26; t7 &= 0x3ffffff; z0 |= t7; z1 &= t7;
317 t9 += t8 >> 26; t8 &= 0x3ffffff; z0 |= t8; z1 &= t8;
318 z0 |= t9; z1 &= t9 ^ 0x3c00000;
319
320 debug_assert!(t9 >> 23 == 0);
321
322 return z0 == 0 || z1 == 0x3ffffff;
323 }
324
325 pub fn set_int(&mut self, a: u32) {
328 self.n = [a, 0, 0, 0, 0, 0, 0, 0, 0, 0];
329 self.magnitude = 1;
330 self.normalized = true;
331 debug_assert!(self.verify());
332 }
333
334 pub fn is_zero(&self) -> bool {
337 debug_assert!(self.normalized);
338 debug_assert!(self.verify());
339 return (self.n[0] | self.n[1] | self.n[2] | self.n[3] | self.n[4] | self.n[5] | self.n[6] | self.n[7] | self.n[8] | self.n[9]) == 0;
340 }
341
342 pub fn is_odd(&self) -> bool {
345 debug_assert!(self.normalized);
346 debug_assert!(self.verify());
347 return self.n[0] & 1 != 0;
348 }
349
350 pub fn clear(&mut self) {
352 self.magnitude = 0;
353 self.normalized = true;
354 self.n = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
355 }
356
357 #[must_use]
360 pub fn set_b32(&mut self, a: &[u8; 32]) -> bool {
361 self.n[0] = (a[31] as u32) | ((a[30] as u32) << 8) | ((a[29] as u32) << 16) | (((a[28] & 0x3) as u32) << 24);
362 self.n[1] = (((a[28] >> 2) & 0x3f) as u32) | ((a[27] as u32) << 6) | ((a[26] as u32) << 14) | (((a[25] & 0xf) as u32) << 22);
363 self.n[2] = (((a[25] >> 4) & 0xf) as u32) | ((a[24] as u32) << 4) | ((a[23] as u32) << 12) | (((a[22] as u32) & 0x3f) << 20);
364 self.n[3] = (((a[22] >> 6) & 0x3) as u32) | ((a[21] as u32) << 2) | ((a[20] as u32) << 10) | ((a[19] as u32) << 18);
365 self.n[4] = (a[18] as u32) | ((a[17] as u32) << 8) | ((a[16] as u32) << 16) | (((a[15] & 0x3) as u32) << 24);
366 self.n[5] = (((a[15] >> 2) & 0x3f) as u32) | ((a[14] as u32) << 6) | ((a[13] as u32) << 14) | (((a[12] as u32) & 0xf) << 22);
367 self.n[6] = (((a[12] >> 4) & 0xf) as u32) | ((a[11] as u32) << 4) | ((a[10] as u32) << 12) | (((a[9] & 0x3f) as u32) << 20);
368 self.n[7] = (((a[9] >> 6) & 0x3) as u32) | ((a[8] as u32) << 2) | ((a[7] as u32) << 10) | ((a[6] as u32) << 18);
369 self.n[8] = (a[5] as u32) | ((a[4] as u32) << 8) | ((a[3] as u32) << 16) | (((a[2] & 0x3) as u32) << 24);
370 self.n[9] = (((a[2] >> 2) & 0x3f) as u32) | ((a[1] as u32) << 6) | ((a[0] as u32) << 14);
371
372 if self.n[9] == 0x03fffff && (self.n[8] & self.n[7] & self.n[6] & self.n[5] & self.n[4] & self.n[3] & self.n[2]) == 0x3ffffff && (self.n[1] + 0x40 + ((self.n[0] + 0x3d1) >> 26)) > 0x3ffffff {
373 return false;
374 }
375
376 self.magnitude = 1;
377 self.normalized = true;
378 debug_assert!(self.verify());
379
380 return true;
381 }
382
383 pub fn fill_b32(&self, r: &mut [u8; 32]) {
384 debug_assert!(self.normalized);
385 debug_assert!(self.verify());
386
387 r[0] = ((self.n[9] >> 14) & 0xff) as u8;
388 r[1] = ((self.n[9] >> 6) & 0xff) as u8;
389 r[2] = (((self.n[9] & 0x3f) << 2) | ((self.n[8] >> 24) & 0x3)) as u8;
390 r[3] = ((self.n[8] >> 16) & 0xff) as u8;
391 r[4] = ((self.n[8] >> 8) & 0xff) as u8;
392 r[5] = (self.n[8] & 0xff) as u8;
393 r[6] = ((self.n[7] >> 18) & 0xff) as u8;
394 r[7] = ((self.n[7] >> 10) & 0xff) as u8;
395 r[8] = ((self.n[7] >> 2) & 0xff) as u8;
396 r[9] = (((self.n[7] & 0x3) << 6) | ((self.n[6] >> 20) & 0x3f)) as u8;
397 r[10] = ((self.n[6] >> 12) & 0xff) as u8;
398 r[11] = ((self.n[6] >> 4) & 0xff) as u8;
399 r[12] = (((self.n[6] & 0xf) << 4) | ((self.n[5] >> 22) & 0xf)) as u8;
400 r[13] = ((self.n[5] >> 14) & 0xff) as u8;
401 r[14] = ((self.n[5] >> 6) & 0xff) as u8;
402 r[15] = (((self.n[5] & 0x3f) << 2) | ((self.n[4] >> 24) & 0x3)) as u8;
403 r[16] = ((self.n[4] >> 16) & 0xff) as u8;
404 r[17] = ((self.n[4] >> 8) & 0xff) as u8;
405 r[18] = (self.n[4] & 0xff) as u8;
406 r[19] = ((self.n[3] >> 18) & 0xff) as u8;
407 r[20] = ((self.n[3] >> 10) & 0xff) as u8;
408 r[21] = ((self.n[3] >> 2) & 0xff) as u8;
409 r[22] = (((self.n[3] & 0x3) << 6) | ((self.n[2] >> 20) & 0x3f)) as u8;
410 r[23] = ((self.n[2] >> 12) & 0xff) as u8;
411 r[24] = ((self.n[2] >> 4) & 0xff) as u8;
412 r[25] = (((self.n[2] & 0xf) << 4) | ((self.n[1] >> 22) & 0xf)) as u8;
413 r[26] = ((self.n[1] >> 14) & 0xff) as u8;
414 r[27] = ((self.n[1] >> 6) & 0xff) as u8;
415 r[28] = (((self.n[1] & 0x3f) << 2) | ((self.n[0] >> 24) & 0x3)) as u8;
416 r[29] = ((self.n[0] >> 16) & 0xff) as u8;
417 r[30] = ((self.n[0] >> 8) & 0xff) as u8;
418 r[31] = (self.n[0] & 0xff) as u8;
419 }
420
421 pub fn b32(&self) -> [u8; 32] {
424 let mut r = [0u8; 32];
425 self.fill_b32(&mut r);
426 r
427 }
428
429 pub fn neg_in_place(&mut self, other: &Field, m: u32) {
433 debug_assert!(other.magnitude <= m);
434 debug_assert!(other.verify());
435
436 self.n[0] = 0x3fffc2f * 2 * (m + 1) - other.n[0];
437 self.n[1] = 0x3ffffbf * 2 * (m + 1) - other.n[1];
438 self.n[2] = 0x3ffffff * 2 * (m + 1) - other.n[2];
439 self.n[3] = 0x3ffffff * 2 * (m + 1) - other.n[3];
440 self.n[4] = 0x3ffffff * 2 * (m + 1) - other.n[4];
441 self.n[5] = 0x3ffffff * 2 * (m + 1) - other.n[5];
442 self.n[6] = 0x3ffffff * 2 * (m + 1) - other.n[6];
443 self.n[7] = 0x3ffffff * 2 * (m + 1) - other.n[7];
444 self.n[8] = 0x3ffffff * 2 * (m + 1) - other.n[8];
445 self.n[9] = 0x03fffff * 2 * (m + 1) - other.n[9];
446
447 self.magnitude = m + 1;
448 self.normalized = false;
449 debug_assert!(self.verify());
450 }
451
452 pub fn neg(&self, m: u32) -> Field {
455 let mut ret = Field::default();
456 ret.neg_in_place(self, m);
457 ret
458 }
459
460 pub fn mul_int(&mut self, a: u32) {
463 self.n[0] *= a;
464 self.n[1] *= a;
465 self.n[2] *= a;
466 self.n[3] *= a;
467 self.n[4] *= a;
468 self.n[5] *= a;
469 self.n[6] *= a;
470 self.n[7] *= a;
471 self.n[8] *= a;
472 self.n[9] *= a;
473
474 self.magnitude *= a;
475 self.normalized = false;
476 debug_assert!(self.verify());
477 }
478
479 pub fn cmp_var(&self, other: &Field) -> Ordering {
482 debug_assert!(self.normalized);
484 debug_assert!(other.normalized);
485 debug_assert!(self.verify());
486 debug_assert!(other.verify());
487
488 for i in (0..10).rev() {
489 if self.n[i] > other.n[i] {
490 return Ordering::Greater;
491 }
492 if self.n[i] < other.n[i] {
493 return Ordering::Less;
494 }
495 }
496 return Ordering::Equal;
497 }
498
499 pub fn eq_var(&self, other: &Field) -> bool {
500 let mut na = self.neg(1);
501 na += other;
502 return na.normalizes_to_zero_var();
503 }
504
505 fn mul_inner(&mut self, a: &Field, b: &Field) {
506 const M: u64 = 0x3ffffff;
507 const R0: u64 = 0x3d10;
508 const R1: u64 = 0x400;
509
510 let (mut c, mut d): (u64, u64);
511 let (v0, v1, v2, v3, v4, v5, v6, v7, v8): (u64, u64, u64, u64, u64, u64, u64, u64, u64);
512 let (t9, t1, t0, t2, t3, t4, t5, t6, t7): (u32, u32, u32, u32, u32, u32, u32, u32, u32);
513
514 debug_assert_bits!(a.n[0], 30);
515 debug_assert_bits!(a.n[1], 30);
516 debug_assert_bits!(a.n[2], 30);
517 debug_assert_bits!(a.n[3], 30);
518 debug_assert_bits!(a.n[4], 30);
519 debug_assert_bits!(a.n[5], 30);
520 debug_assert_bits!(a.n[6], 30);
521 debug_assert_bits!(a.n[7], 30);
522 debug_assert_bits!(a.n[8], 30);
523 debug_assert_bits!(a.n[9], 26);
524 debug_assert_bits!(b.n[0], 30);
525 debug_assert_bits!(b.n[1], 30);
526 debug_assert_bits!(b.n[2], 30);
527 debug_assert_bits!(b.n[3], 30);
528 debug_assert_bits!(b.n[4], 30);
529 debug_assert_bits!(b.n[5], 30);
530 debug_assert_bits!(b.n[6], 30);
531 debug_assert_bits!(b.n[7], 30);
532 debug_assert_bits!(b.n[8], 30);
533 debug_assert_bits!(b.n[9], 26);
534
535 d = ((a.n[0] as u64) * (b.n[9] as u64)).wrapping_add(
540 (a.n[1] as u64) * (b.n[8] as u64)).wrapping_add(
541 (a.n[2] as u64) * (b.n[7] as u64)).wrapping_add(
542 (a.n[3] as u64) * (b.n[6] as u64)).wrapping_add(
543 (a.n[4] as u64) * (b.n[5] as u64)).wrapping_add(
544 (a.n[5] as u64) * (b.n[4] as u64)).wrapping_add(
545 (a.n[6] as u64) * (b.n[3] as u64)).wrapping_add(
546 (a.n[7] as u64) * (b.n[2] as u64)).wrapping_add(
547 (a.n[8] as u64) * (b.n[1] as u64)).wrapping_add(
548 (a.n[9] as u64) * (b.n[0] as u64));
549 t9 = (d & M) as u32; d >>= 26;
553 debug_assert_bits!(t9, 26);
554 debug_assert_bits!(d, 38);
555 c = (a.n[0] as u64) * (b.n[0] as u64);
558 debug_assert_bits!(c, 60);
559 d = d.wrapping_add(
562 (a.n[1] as u64) * (b.n[9] as u64)).wrapping_add(
563 (a.n[2] as u64) * (b.n[8] as u64)).wrapping_add(
564 (a.n[3] as u64) * (b.n[7] as u64)).wrapping_add(
565 (a.n[4] as u64) * (b.n[6] as u64)).wrapping_add(
566 (a.n[5] as u64) * (b.n[5] as u64)).wrapping_add(
567 (a.n[6] as u64) * (b.n[4] as u64)).wrapping_add(
568 (a.n[7] as u64) * (b.n[3] as u64)).wrapping_add(
569 (a.n[8] as u64) * (b.n[2] as u64)).wrapping_add(
570 (a.n[9] as u64) * (b.n[1] as u64));
571 debug_assert_bits!(d, 63);
572 v0 = d & M; d >>= 26; c += v0 * R0;
574 debug_assert_bits!(v0, 26);
575 debug_assert_bits!(d, 37);
576 debug_assert_bits!(c, 61);
577 t0 = (c & M) as u32; c >>= 26; c += v0 * R1;
579
580 debug_assert_bits!(t0, 26);
581 debug_assert_bits!(c, 37);
582 c = c.wrapping_add(
586 (a.n[0] as u64) * (b.n[1] as u64)).wrapping_add(
587 (a.n[1] as u64) * (b.n[0] as u64));
588 debug_assert_bits!(c, 62);
589 d = d.wrapping_add(
591 (a.n[2] as u64) * (b.n[9] as u64)).wrapping_add(
592 (a.n[3] as u64) * (b.n[8] as u64)).wrapping_add(
593 (a.n[4] as u64) * (b.n[7] as u64)).wrapping_add(
594 (a.n[5] as u64) * (b.n[6] as u64)).wrapping_add(
595 (a.n[6] as u64) * (b.n[5] as u64)).wrapping_add(
596 (a.n[7] as u64) * (b.n[4] as u64)).wrapping_add(
597 (a.n[8] as u64) * (b.n[3] as u64)).wrapping_add(
598 (a.n[9] as u64) * (b.n[2] as u64));
599 debug_assert_bits!(d, 63);
600 v1 = d & M; d >>= 26; c += v1 * R0;
602 debug_assert_bits!(v1, 26);
603 debug_assert_bits!(d, 37);
604 debug_assert_bits!(c, 63);
605 t1 = (c & M) as u32; c >>= 26; c += v1 * R1;
607 debug_assert_bits!(t1, 26);
608 debug_assert_bits!(c, 38);
609 c = c.wrapping_add(
613 (a.n[0] as u64) * (b.n[2] as u64)).wrapping_add(
614 (a.n[1] as u64) * (b.n[1] as u64)).wrapping_add(
615 (a.n[2] as u64) * (b.n[0] as u64));
616 debug_assert_bits!(c, 62);
617 d = d.wrapping_add(
619 (a.n[3] as u64) * (b.n[9] as u64)).wrapping_add(
620 (a.n[4] as u64) * (b.n[8] as u64)).wrapping_add(
621 (a.n[5] as u64) * (b.n[7] as u64)).wrapping_add(
622 (a.n[6] as u64) * (b.n[6] as u64)).wrapping_add(
623 (a.n[7] as u64) * (b.n[5] as u64)).wrapping_add(
624 (a.n[8] as u64) * (b.n[4] as u64)).wrapping_add(
625 (a.n[9] as u64) * (b.n[3] as u64));
626 debug_assert_bits!(d, 63);
627 v2 = d & M; d >>= 26; c += v2 * R0;
629 debug_assert_bits!(v2, 26);
630 debug_assert_bits!(d, 37);
631 debug_assert_bits!(c, 63);
632 t2 = (c & M) as u32; c >>= 26; c += v2 * R1;
634 debug_assert_bits!(t2, 26);
635 debug_assert_bits!(c, 38);
636 c = c.wrapping_add(
640 (a.n[0] as u64) * (b.n[3] as u64)).wrapping_add(
641 (a.n[1] as u64) * (b.n[2] as u64)).wrapping_add(
642 (a.n[2] as u64) * (b.n[1] as u64)).wrapping_add(
643 (a.n[3] as u64) * (b.n[0] as u64));
644 debug_assert_bits!(c, 63);
645 d = d.wrapping_add(
647 (a.n[4] as u64) * (b.n[9] as u64)).wrapping_add(
648 (a.n[5] as u64) * (b.n[8] as u64)).wrapping_add(
649 (a.n[6] as u64) * (b.n[7] as u64)).wrapping_add(
650 (a.n[7] as u64) * (b.n[6] as u64)).wrapping_add(
651 (a.n[8] as u64) * (b.n[5] as u64)).wrapping_add(
652 (a.n[9] as u64) * (b.n[4] as u64));
653 debug_assert_bits!(d, 63);
654 v3 = d & M; d >>= 26; c += v3 * R0;
656 debug_assert_bits!(v3, 26);
657 debug_assert_bits!(d, 37);
658 t3 = (c & M) as u32; c >>= 26; c += v3 * R1;
661 debug_assert_bits!(t3, 26);
662 debug_assert_bits!(c, 39);
663 c = c.wrapping_add(
667 (a.n[0] as u64) * (b.n[4] as u64)).wrapping_add(
668 (a.n[1] as u64) * (b.n[3] as u64)).wrapping_add(
669 (a.n[2] as u64) * (b.n[2] as u64)).wrapping_add(
670 (a.n[3] as u64) * (b.n[1] as u64)).wrapping_add(
671 (a.n[4] as u64) * (b.n[0] as u64));
672 debug_assert_bits!(c, 63);
673 d = d.wrapping_add(
675 (a.n[5] as u64) * (b.n[9] as u64)).wrapping_add(
676 (a.n[6] as u64) * (b.n[8] as u64)).wrapping_add(
677 (a.n[7] as u64) * (b.n[7] as u64)).wrapping_add(
678 (a.n[8] as u64) * (b.n[6] as u64)).wrapping_add(
679 (a.n[9] as u64) * (b.n[5] as u64));
680 debug_assert_bits!(d, 62);
681 v4 = d & M; d >>= 26; c += v4 * R0;
683 debug_assert_bits!(v4, 26);
684 debug_assert_bits!(d, 36);
685 t4 = (c & M) as u32; c >>= 26; c += v4 * R1;
688 debug_assert_bits!(t4, 26);
689 debug_assert_bits!(c, 39);
690 c = c.wrapping_add(
694 (a.n[0] as u64) * (b.n[5] as u64)).wrapping_add(
695 (a.n[1] as u64) * (b.n[4] as u64)).wrapping_add(
696 (a.n[2] as u64) * (b.n[3] as u64)).wrapping_add(
697 (a.n[3] as u64) * (b.n[2] as u64)).wrapping_add(
698 (a.n[4] as u64) * (b.n[1] as u64)).wrapping_add(
699 (a.n[5] as u64) * (b.n[0] as u64));
700 debug_assert_bits!(c, 63);
701 d = d.wrapping_add(
703 (a.n[6] as u64) * (b.n[9] as u64)).wrapping_add(
704 (a.n[7] as u64) * (b.n[8] as u64)).wrapping_add(
705 (a.n[8] as u64) * (b.n[7] as u64)).wrapping_add(
706 (a.n[9] as u64) * (b.n[6] as u64));
707 debug_assert_bits!(d, 62);
708 v5 = d & M; d >>= 26; c += v5 * R0;
710 debug_assert_bits!(v5, 26);
711 debug_assert_bits!(d, 36);
712 t5 = (c & M) as u32; c >>= 26; c += v5 * R1;
715 debug_assert_bits!(t5, 26);
716 debug_assert_bits!(c, 39);
717 c = c.wrapping_add(
721 (a.n[0] as u64) * (b.n[6] as u64)).wrapping_add(
722 (a.n[1] as u64) * (b.n[5] as u64)).wrapping_add(
723 (a.n[2] as u64) * (b.n[4] as u64)).wrapping_add(
724 (a.n[3] as u64) * (b.n[3] as u64)).wrapping_add(
725 (a.n[4] as u64) * (b.n[2] as u64)).wrapping_add(
726 (a.n[5] as u64) * (b.n[1] as u64)).wrapping_add(
727 (a.n[6] as u64) * (b.n[0] as u64));
728 debug_assert_bits!(c, 63);
729 d = d.wrapping_add(
731 (a.n[7] as u64) * (b.n[9] as u64)).wrapping_add(
732 (a.n[8] as u64) * (b.n[8] as u64)).wrapping_add(
733 (a.n[9] as u64) * (b.n[7] as u64));
734 debug_assert_bits!(d, 61);
735 v6 = d & M; d >>= 26; c += v6 * R0;
737 debug_assert_bits!(v6, 26);
738 debug_assert_bits!(d, 35);
739 t6 = (c & M) as u32; c >>= 26; c += v6 * R1;
742 debug_assert_bits!(t6, 26);
743 debug_assert_bits!(c, 39);
744 c = c.wrapping_add(
748 (a.n[0] as u64) * (b.n[7] as u64)).wrapping_add(
749 (a.n[1] as u64) * (b.n[6] as u64)).wrapping_add(
750 (a.n[2] as u64) * (b.n[5] as u64)).wrapping_add(
751 (a.n[3] as u64) * (b.n[4] as u64)).wrapping_add(
752 (a.n[4] as u64) * (b.n[3] as u64)).wrapping_add(
753 (a.n[5] as u64) * (b.n[2] as u64)).wrapping_add(
754 (a.n[6] as u64) * (b.n[1] as u64)).wrapping_add(
755 (a.n[7] as u64) * (b.n[0] as u64));
756 debug_assert!(c <= 0x8000007c00000007);
758 d = d.wrapping_add(
760 (a.n[8] as u64) * (b.n[9] as u64)).wrapping_add(
761 (a.n[9] as u64) * (b.n[8] as u64));
762 debug_assert_bits!(d, 58);
763 v7 = d & M; d >>= 26; c += v7 * R0;
765 debug_assert_bits!(v7, 26);
766 debug_assert_bits!(d, 32);
767 debug_assert!(c <= 0x800001703fffc2f7);
769 t7 = (c & M) as u32; c >>= 26; c += v7 * R1;
771 debug_assert_bits!(t7, 26);
772 debug_assert_bits!(c, 38);
773 c = c.wrapping_add(
777 (a.n[0] as u64) * (b.n[8] as u64)).wrapping_add(
778 (a.n[1] as u64) * (b.n[7] as u64)).wrapping_add(
779 (a.n[2] as u64) * (b.n[6] as u64)).wrapping_add(
780 (a.n[3] as u64) * (b.n[5] as u64)).wrapping_add(
781 (a.n[4] as u64) * (b.n[4] as u64)).wrapping_add(
782 (a.n[5] as u64) * (b.n[3] as u64)).wrapping_add(
783 (a.n[6] as u64) * (b.n[2] as u64)).wrapping_add(
784 (a.n[7] as u64) * (b.n[1] as u64)).wrapping_add(
785 (a.n[8] as u64) * (b.n[0] as u64));
786 debug_assert!(c <= 0x9000007b80000008);
788 d = d.wrapping_add((a.n[9] as u64) * (b.n[9] as u64));
790 debug_assert_bits!(d, 57);
791 v8 = d & M; d >>= 26; c += v8 * R0;
793 debug_assert_bits!(v8, 26);
794 debug_assert_bits!(d, 31);
795 debug_assert!(c <= 0x9000016fbfffc2f8);
797 self.n[3] = t3;
800 debug_assert_bits!(self.n[3], 26);
801 self.n[4] = t4;
803 debug_assert_bits!(self.n[4], 26);
804 self.n[5] = t5;
806 debug_assert_bits!(self.n[5], 26);
807 self.n[6] = t6;
809 debug_assert_bits!(self.n[6], 26);
810 self.n[7] = t7;
812 debug_assert_bits!(self.n[7], 26);
813 self.n[8] = (c & M) as u32; c >>= 26; c += v8 * R1;
816 debug_assert_bits!(self.n[8], 26);
817 debug_assert_bits!(c, 39);
818 c += d * R0 + t9 as u64;
821 debug_assert_bits!(c, 45);
822 self.n[9] = (c & (M >> 4)) as u32; c >>= 22; c += d * (R1 << 4);
824 debug_assert_bits!(self.n[9], 22);
825 debug_assert_bits!(c, 46);
826 d = c * (R0 >> 4) + t0 as u64;
831 debug_assert_bits!(d, 56);
832 self.n[0] = (d & M) as u32; d >>= 26;
834 debug_assert_bits!(self.n[0], 26);
835 debug_assert_bits!(d, 30);
836 d += c * (R1 >> 4) + t1 as u64;
838 debug_assert_bits!(d, 53);
839 debug_assert!(d <= 0x10000003ffffbf);
840 self.n[1] = (d & M) as u32; d >>= 26;
843 debug_assert_bits!(self.n[1], 26);
844 debug_assert_bits!(d, 27);
845 debug_assert!(d <= 0x4000000);
846 d += t2 as u64;
848 debug_assert_bits!(d, 27);
849 self.n[2] = d as u32;
851 debug_assert_bits!(self.n[2], 27);
852 }
854
855 fn sqr_inner(&mut self, a: &Field) {
856 const M: u64 = 0x3ffffff;
857 const R0: u64 = 0x3d10;
858 const R1: u64 = 0x400;
859
860 let (mut c, mut d): (u64, u64);
861 let (v0, v1, v2, v3, v4, v5, v6, v7, v8): (u64, u64, u64, u64, u64, u64, u64, u64, u64);
862 let (t9, t0, t1, t2, t3, t4, t5, t6, t7): (u32, u32, u32, u32, u32, u32, u32, u32, u32);
863
864 debug_assert_bits!(a.n[0], 30);
865 debug_assert_bits!(a.n[1], 30);
866 debug_assert_bits!(a.n[2], 30);
867 debug_assert_bits!(a.n[3], 30);
868 debug_assert_bits!(a.n[4], 30);
869 debug_assert_bits!(a.n[5], 30);
870 debug_assert_bits!(a.n[6], 30);
871 debug_assert_bits!(a.n[7], 30);
872 debug_assert_bits!(a.n[8], 30);
873 debug_assert_bits!(a.n[9], 26);
874
875 d = (((a.n[0]*2) as u64) * (a.n[9] as u64)).wrapping_add(
880 ((a.n[1]*2) as u64) * (a.n[8] as u64)).wrapping_add(
881 ((a.n[2]*2) as u64) * (a.n[7] as u64)).wrapping_add(
882 ((a.n[3]*2) as u64) * (a.n[6] as u64)).wrapping_add(
883 ((a.n[4]*2) as u64) * (a.n[5] as u64));
884 t9 = (d & M) as u32; d >>= 26;
887 debug_assert_bits!(t9, 26);
888 debug_assert_bits!(d, 38);
889 c = (a.n[0] as u64) * (a.n[0] as u64);
892 debug_assert_bits!(c, 60);
893 d = d.wrapping_add(
895 ((a.n[1]*2) as u64) * (a.n[9] as u64)).wrapping_add(
896 ((a.n[2]*2) as u64) * (a.n[8] as u64)).wrapping_add(
897 ((a.n[3]*2) as u64) * (a.n[7] as u64)).wrapping_add(
898 ((a.n[4]*2) as u64) * (a.n[6] as u64)).wrapping_add(
899 (a.n[5] as u64) * (a.n[5] as u64));
900 debug_assert_bits!(d, 63);
901 v0 = d & M; d >>= 26; c += v0 * R0;
903 debug_assert_bits!(v0, 26);
904 debug_assert_bits!(d, 37);
905 debug_assert_bits!(c, 61);
906 t0 = (c & M) as u32; c >>= 26; c += v0 * R1;
908 debug_assert_bits!(t0, 26);
909 debug_assert_bits!(c, 37);
910 c = c.wrapping_add(
914 ((a.n[0]*2) as u64) * (a.n[1] as u64));
915 debug_assert_bits!(c, 62);
916 d = d.wrapping_add(
918 ((a.n[2]*2) as u64) * (a.n[9] as u64)).wrapping_add(
919 ((a.n[3]*2) as u64) * (a.n[8] as u64)).wrapping_add(
920 ((a.n[4]*2) as u64) * (a.n[7] as u64)).wrapping_add(
921 ((a.n[5]*2) as u64) * (a.n[6] as u64));
922 debug_assert_bits!(d, 63);
923 v1 = d & M; d >>= 26; c += v1 * R0;
925 debug_assert_bits!(v1, 26);
926 debug_assert_bits!(d, 37);
927 debug_assert_bits!(c, 63);
928 t1 = (c & M) as u32; c >>= 26; c += v1 * R1;
930 debug_assert_bits!(t1, 26);
931 debug_assert_bits!(c, 38);
932 c = c.wrapping_add(
936 ((a.n[0]*2) as u64) * (a.n[2] as u64)).wrapping_add(
937 (a.n[1] as u64) * (a.n[1] as u64));
938 debug_assert_bits!(c, 62);
939 d = d.wrapping_add(
941 ((a.n[3]*2) as u64) * (a.n[9] as u64)).wrapping_add(
942 ((a.n[4]*2) as u64) * (a.n[8] as u64)).wrapping_add(
943 ((a.n[5]*2) as u64) * (a.n[7] as u64)).wrapping_add(
944 (a.n[6] as u64) * (a.n[6] as u64));
945 debug_assert_bits!(d, 63);
946 v2 = d & M; d >>= 26; c += v2 * R0;
948 debug_assert_bits!(v2, 26);
949 debug_assert_bits!(d, 37);
950 debug_assert_bits!(c, 63);
951 t2 = (c & M) as u32; c >>= 26; c += v2 * R1;
953 debug_assert_bits!(t2, 26);
954 debug_assert_bits!(c, 38);
955 c = c.wrapping_add(
959 ((a.n[0]*2) as u64) * (a.n[3] as u64)).wrapping_add(
960 ((a.n[1]*2) as u64) * (a.n[2] as u64));
961 debug_assert_bits!(c, 63);
962 d = d.wrapping_add(
964 ((a.n[4]*2) as u64) * (a.n[9] as u64)).wrapping_add(
965 ((a.n[5]*2) as u64) * (a.n[8] as u64)).wrapping_add(
966 ((a.n[6]*2) as u64) * (a.n[7] as u64));
967 debug_assert_bits!(d, 63);
968 v3 = d & M; d >>= 26; c += v3 * R0;
970 debug_assert_bits!(v3, 26);
971 debug_assert_bits!(d, 37);
972 t3 = (c & M) as u32; c >>= 26; c += v3 * R1;
975 debug_assert_bits!(t3, 26);
976 debug_assert_bits!(c, 39);
977 c = c.wrapping_add(
981 ((a.n[0]*2) as u64) * (a.n[4] as u64)).wrapping_add(
982 ((a.n[1]*2) as u64) * (a.n[3] as u64)).wrapping_add(
983 (a.n[2] as u64) * (a.n[2] as u64));
984 debug_assert_bits!(c, 63);
985 d = d.wrapping_add(
987 ((a.n[5]*2) as u64) * (a.n[9] as u64)).wrapping_add(
988 ((a.n[6]*2) as u64) * (a.n[8] as u64)).wrapping_add(
989 (a.n[7] as u64) * (a.n[7] as u64));
990 debug_assert_bits!(d, 62);
991 v4 = d & M; d >>= 26; c += v4 * R0;
993 debug_assert_bits!(v4, 26);
994 debug_assert_bits!(d, 36);
995 t4 = (c & M) as u32; c >>= 26; c += v4 * R1;
998 debug_assert_bits!(t4, 26);
999 debug_assert_bits!(c, 39);
1000 c = c.wrapping_add(
1004 ((a.n[0]*2) as u64) * (a.n[5] as u64)).wrapping_add(
1005 ((a.n[1]*2) as u64) * (a.n[4] as u64)).wrapping_add(
1006 ((a.n[2]*2) as u64) * (a.n[3] as u64));
1007 debug_assert_bits!(c, 63);
1008 d = d.wrapping_add(
1010 ((a.n[6]*2) as u64) * (a.n[9] as u64)).wrapping_add(
1011 ((a.n[7]*2) as u64) * (a.n[8] as u64));
1012 debug_assert_bits!(d, 62);
1013 v5 = d & M; d >>= 26; c += v5 * R0;
1015 debug_assert_bits!(v5, 26);
1016 debug_assert_bits!(d, 36);
1017 t5 = (c & M) as u32; c >>= 26; c += v5 * R1;
1020 debug_assert_bits!(t5, 26);
1021 debug_assert_bits!(c, 39);
1022 c = c.wrapping_add(
1026 ((a.n[0]*2) as u64) * (a.n[6] as u64)).wrapping_add(
1027 ((a.n[1]*2) as u64) * (a.n[5] as u64)).wrapping_add(
1028 ((a.n[2]*2) as u64) * (a.n[4] as u64)).wrapping_add(
1029 (a.n[3] as u64) * (a.n[3] as u64));
1030 debug_assert_bits!(c, 63);
1031 d = d.wrapping_add(
1033 ((a.n[7]*2) as u64) * (a.n[9] as u64)).wrapping_add(
1034 (a.n[8] as u64) * (a.n[8] as u64));
1035 debug_assert_bits!(d, 61);
1036 v6 = d & M; d >>= 26; c += v6 * R0;
1038 debug_assert_bits!(v6, 26);
1039 debug_assert_bits!(d, 35);
1040 t6 = (c & M) as u32; c >>= 26; c += v6 * R1;
1043 debug_assert_bits!(t6, 26);
1044 debug_assert_bits!(c, 39);
1045 c = c.wrapping_add(
1049 ((a.n[0]*2) as u64) * (a.n[7] as u64)).wrapping_add(
1050 ((a.n[1]*2) as u64) * (a.n[6] as u64)).wrapping_add(
1051 ((a.n[2]*2) as u64) * (a.n[5] as u64)).wrapping_add(
1052 ((a.n[3]*2) as u64) * (a.n[4] as u64));
1053 debug_assert!(c <= 0x8000007C00000007);
1055 d = d.wrapping_add(
1057 ((a.n[8]*2) as u64) * (a.n[9] as u64));
1058 debug_assert_bits!(d, 58);
1059 v7 = d & M; d >>= 26; c += v7 * R0;
1061 debug_assert_bits!(v7, 26);
1062 debug_assert_bits!(d, 32);
1063 debug_assert!(c <= 0x800001703FFFC2F7);
1065 t7 = (c & M) as u32; c >>= 26; c += v7 * R1;
1067 debug_assert_bits!(t7, 26);
1068 debug_assert_bits!(c, 38);
1069 c = c.wrapping_add(
1073 ((a.n[0]*2) as u64) * (a.n[8] as u64)).wrapping_add(
1074 ((a.n[1]*2) as u64) * (a.n[7] as u64)).wrapping_add(
1075 ((a.n[2]*2) as u64) * (a.n[6] as u64)).wrapping_add(
1076 ((a.n[3]*2) as u64) * (a.n[5] as u64)).wrapping_add(
1077 (a.n[4] as u64) * (a.n[4] as u64));
1078 debug_assert!(c <= 0x9000007B80000008);
1080 d = d.wrapping_add(
1082 (a.n[9] as u64) * (a.n[9] as u64));
1083 debug_assert_bits!(d, 57);
1084 v8 = d & M; d >>= 26; c += v8 * R0;
1086 debug_assert_bits!(v8, 26);
1087 debug_assert_bits!(d, 31);
1088 debug_assert!(c <= 0x9000016FBFFFC2F8);
1090 self.n[3] = t3;
1093 debug_assert_bits!(self.n[3], 26);
1094 self.n[4] = t4;
1096 debug_assert_bits!(self.n[4], 26);
1097 self.n[5] = t5;
1099 debug_assert_bits!(self.n[5], 26);
1100 self.n[6] = t6;
1102 debug_assert_bits!(self.n[6], 26);
1103 self.n[7] = t7;
1105 debug_assert_bits!(self.n[7], 26);
1106 self.n[8] = (c & M) as u32; c >>= 26; c += v8 * R1;
1109 debug_assert_bits!(self.n[8], 26);
1110 debug_assert_bits!(c, 39);
1111 c += d * R0 + t9 as u64;
1114 debug_assert_bits!(c, 45);
1115 self.n[9] = (c & (M >> 4)) as u32; c >>= 22; c += d * (R1 << 4);
1117 debug_assert_bits!(self.n[9], 22);
1118 debug_assert_bits!(c, 46);
1119 d = c * (R0 >> 4) + t0 as u64;
1124 debug_assert_bits!(d, 56);
1125 self.n[0] = (d & M) as u32; d >>= 26;
1127 debug_assert_bits!(self.n[0], 26);
1128 debug_assert_bits!(d, 30);
1129 d += c * (R1 >> 4) + t1 as u64;
1131 debug_assert_bits!(d, 53);
1132 debug_assert!(d <= 0x10000003FFFFBF);
1133 self.n[1] = (d & M) as u32; d >>= 26;
1136 debug_assert_bits!(self.n[1], 26);
1137 debug_assert_bits!(d, 27);
1138 debug_assert!(d <= 0x4000000);
1139 d += t2 as u64;
1141 debug_assert_bits!(d, 27);
1142 self.n[2] = d as u32;
1144 debug_assert_bits!(self.n[2], 27);
1145 }
1147
1148 pub fn mul_in_place(&mut self, a: &Field, b: &Field) {
1152 debug_assert!(a.magnitude <= 8);
1153 debug_assert!(b.magnitude <= 8);
1154 debug_assert!(a.verify());
1155 debug_assert!(b.verify());
1156 self.mul_inner(a, b);
1157 self.magnitude = 1;
1158 self.normalized = false;
1159 debug_assert!(self.verify());
1160 }
1161
1162 pub fn sqr_in_place(&mut self, a: &Field) {
1166 debug_assert!(a.magnitude <= 8);
1167 debug_assert!(a.verify());
1168 self.sqr_inner(a);
1169 self.magnitude = 1;
1170 self.normalized = false;
1171 debug_assert!(a.verify());
1172 }
1173
1174 pub fn sqr(&self) -> Field {
1175 let mut ret = Field::default();
1176 ret.sqr_in_place(self);
1177 ret
1178 }
1179
1180 pub fn sqrt(&self) -> (Field, bool) {
1187 let mut x2 = self.sqr();
1188 x2 *= self;
1189
1190 let mut x3 = x2.sqr();
1191 x3 *= self;
1192
1193 let mut x6 = x3.clone();
1194 for _ in 0..3 {
1195 x6 = x6.sqr();
1196 }
1197 x6 *= &x3;
1198
1199 let mut x9 = x6.clone();
1200 for _ in 0..3 {
1201 x9 = x9.sqr();
1202 }
1203 x9 *= &x3;
1204
1205 let mut x11 = x9.clone();
1206 for _ in 0..2 {
1207 x11 = x11.sqr();
1208 }
1209 x11 *= &x2;
1210
1211 let mut x22 = x11.clone();
1212 for _ in 0..11 {
1213 x22 = x22.sqr();
1214 }
1215 x22 *= &x11;
1216
1217 let mut x44 = x22.clone();
1218 for _ in 0..22 {
1219 x44 = x44.sqr();
1220 }
1221 x44 *= &x22;
1222
1223 let mut x88 = x44.clone();
1224 for _ in 0..44 {
1225 x88 = x88.sqr();
1226 }
1227 x88 *= &x44;
1228
1229 let mut x176 = x88.clone();
1230 for _ in 0..88 {
1231 x176 = x176.sqr();
1232 }
1233 x176 *= &x88;
1234
1235 let mut x220 = x176.clone();
1236 for _ in 0..44 {
1237 x220 = x220.sqr();
1238 }
1239 x220 *= &x44;
1240
1241 let mut x223 = x220.clone();
1242 for _ in 0..3 {
1243 x223 = x223.sqr();
1244 }
1245 x223 *= &x3;
1246
1247 let mut t1 = x223;
1248 for _ in 0..23 {
1249 t1 = t1.sqr();
1250 }
1251 t1 *= &x22;
1252 for _ in 0..6 {
1253 t1 = t1.sqr();
1254 }
1255 t1 *= &x2;
1256 t1 = t1.sqr();
1257 let r = t1.sqr();
1258
1259 t1 = r.sqr();
1260 (r, &t1 == self)
1261 }
1262
1263 pub fn inv(&self) -> Field {
1267 let mut x2 = self.sqr();
1268 x2 *= self;
1269
1270 let mut x3 = x2.sqr();
1271 x3 *= self;
1272
1273 let mut x6 = x3.clone();
1274 for _ in 0..3 {
1275 x6 = x6.sqr();
1276 }
1277 x6 *= &x3;
1278
1279 let mut x9 = x6.clone();
1280 for _ in 0..3 {
1281 x9 = x9.sqr();
1282 }
1283 x9 *= &x3;
1284
1285 let mut x11 = x9.clone();
1286 for _ in 0..2 {
1287 x11 = x11.sqr();
1288 }
1289 x11 *= &x2;
1290
1291 let mut x22 = x11.clone();
1292 for _ in 0..11 {
1293 x22 = x22.sqr();
1294 }
1295 x22 *= &x11;
1296
1297 let mut x44 = x22.clone();
1298 for _ in 0..22 {
1299 x44 = x44.sqr();
1300 }
1301 x44 *= &x22;
1302
1303 let mut x88 = x44.clone();
1304 for _ in 0..44 {
1305 x88 = x88.sqr();
1306 }
1307 x88 *= &x44;
1308
1309 let mut x176 = x88.clone();
1310 for _ in 0..88 {
1311 x176 = x176.sqr();
1312 }
1313 x176 *= &x88;
1314
1315 let mut x220 = x176.clone();
1316 for _ in 0..44 {
1317 x220 = x220.sqr();
1318 }
1319 x220 *= &x44;
1320
1321 let mut x223 = x220.clone();
1322 for _ in 0..3 {
1323 x223 = x223.sqr();
1324 }
1325 x223 *= &x3;
1326
1327 let mut t1 = x223.clone();
1328 for _ in 0..23 {
1329 t1 = t1.sqr();
1330 }
1331 t1 *= &x22;
1332 for _ in 0..5 {
1333 t1 = t1.sqr();
1334 }
1335 t1 *= self;
1336 for _ in 0..3 {
1337 t1 = t1.sqr();
1338 }
1339 t1 *= &x2;
1340 for _ in 0..2 {
1341 t1 = t1.sqr();
1342 }
1343 let r = self * &t1;
1344 r
1345 }
1346
1347 pub fn inv_var(&self) -> Field {
1350 self.inv()
1351 }
1352
1353 pub fn is_quad_var(&self) -> bool {
1355 let (_, ret) = self.sqrt();
1356 ret
1357 }
1358
1359 pub fn cmov(&mut self, other: &Field, flag: bool) {
1362 self.n[0] = if flag { other.n[0] } else { self.n[0] };
1363 self.n[1] = if flag { other.n[1] } else { self.n[1] };
1364 self.n[2] = if flag { other.n[2] } else { self.n[2] };
1365 self.n[3] = if flag { other.n[3] } else { self.n[3] };
1366 self.n[4] = if flag { other.n[4] } else { self.n[4] };
1367 self.n[5] = if flag { other.n[5] } else { self.n[5] };
1368 self.n[6] = if flag { other.n[6] } else { self.n[6] };
1369 self.n[7] = if flag { other.n[7] } else { self.n[7] };
1370 self.n[8] = if flag { other.n[8] } else { self.n[8] };
1371 self.n[9] = if flag { other.n[9] } else { self.n[9] };
1372 self.magnitude = if flag { other.magnitude } else { self.magnitude };
1373 self.normalized = if flag { other.normalized } else { self.normalized };
1374 }
1375}
1376
1377impl Default for Field {
1378 fn default() -> Field {
1379 Self {
1380 n: [0u32; 10],
1381 magnitude: 0,
1382 normalized: true,
1383 }
1384 }
1385}
1386
1387impl Add<Field> for Field {
1388 type Output = Field;
1389 fn add(self, other: Field) -> Field {
1390 let mut ret = self.clone();
1391 ret.add_assign(&other);
1392 ret
1393 }
1394}
1395
1396impl<'a, 'b> Add<&'a Field> for &'b Field {
1397 type Output = Field;
1398 fn add(self, other: &'a Field) -> Field {
1399 let mut ret = self.clone();
1400 ret.add_assign(other);
1401 ret
1402 }
1403}
1404
1405impl<'a> AddAssign<&'a Field> for Field {
1406 fn add_assign(&mut self, other: &'a Field) {
1407 self.n[0] += other.n[0];
1408 self.n[1] += other.n[1];
1409 self.n[2] += other.n[2];
1410 self.n[3] += other.n[3];
1411 self.n[4] += other.n[4];
1412 self.n[5] += other.n[5];
1413 self.n[6] += other.n[6];
1414 self.n[7] += other.n[7];
1415 self.n[8] += other.n[8];
1416 self.n[9] += other.n[9];
1417
1418 self.magnitude += other.magnitude;
1419 self.normalized = false;
1420 debug_assert!(self.verify());
1421 }
1422}
1423
1424impl AddAssign<Field> for Field {
1425 fn add_assign(&mut self, other: Field) {
1426 self.add_assign(&other)
1427 }
1428}
1429
1430impl Mul<Field> for Field {
1431 type Output = Field;
1432 fn mul(self, other: Field) -> Field {
1433 let mut ret = Field::default();
1434 ret.mul_in_place(&self, &other);
1435 ret
1436 }
1437}
1438
1439impl<'a, 'b> Mul<&'a Field> for &'b Field {
1440 type Output = Field;
1441 fn mul(self, other: &'a Field) -> Field {
1442 let mut ret = Field::default();
1443 ret.mul_in_place(self, other);
1444 ret
1445 }
1446}
1447
1448impl<'a> MulAssign<&'a Field> for Field {
1449 fn mul_assign(&mut self, other: &'a Field) {
1450 let mut ret = Field::default();
1451 ret.mul_in_place(self, other);
1452 *self = ret;
1453 }
1454}
1455
1456impl MulAssign<Field> for Field {
1457 fn mul_assign(&mut self, other: Field) {
1458 self.mul_assign(&other)
1459 }
1460}
1461
1462impl PartialEq for Field {
1463 fn eq(&self, other: &Field) -> bool {
1464 let mut na = self.neg(self.magnitude);
1465 na += other;
1466 return na.normalizes_to_zero();
1467 }
1468}
1469
1470impl Eq for Field { }
1471
1472impl Ord for Field {
1473 fn cmp(&self, other: &Field) -> Ordering {
1474 self.cmp_var(other)
1475 }
1476}
1477
1478impl PartialOrd for Field {
1479 fn partial_cmp(&self, other: &Field) -> Option<Ordering> {
1480 Some(self.cmp(other))
1481 }
1482}
1483
1484#[derive(Debug, Clone, Eq, PartialEq)]
1485pub struct FieldStorage(pub [u32; 8]);
1487
1488impl Default for FieldStorage {
1489 fn default() -> FieldStorage {
1490 FieldStorage([0; 8])
1491 }
1492}
1493
1494impl FieldStorage {
1495 pub const fn new(
1496 d7: u32, d6: u32, d5: u32, d4: u32, d3: u32, d2: u32, d1: u32, d0: u32
1497 ) -> Self {
1498 Self([d0, d1, d2, d3, d4, d5, d6, d7])
1499 }
1500
1501 pub fn cmov(&mut self, other: &FieldStorage, flag: bool) {
1502 self.0[0] = if flag { other.0[0] } else { self.0[0] };
1503 self.0[1] = if flag { other.0[1] } else { self.0[1] };
1504 self.0[2] = if flag { other.0[2] } else { self.0[2] };
1505 self.0[3] = if flag { other.0[3] } else { self.0[3] };
1506 self.0[4] = if flag { other.0[4] } else { self.0[4] };
1507 self.0[5] = if flag { other.0[5] } else { self.0[5] };
1508 self.0[6] = if flag { other.0[6] } else { self.0[6] };
1509 self.0[7] = if flag { other.0[7] } else { self.0[7] };
1510 }
1511}
1512
1513impl From<FieldStorage> for Field {
1514 fn from(a: FieldStorage) -> Field {
1515 let mut r = Field::default();
1516
1517 r.n[0] = a.0[0] & 0x3FFFFFF;
1518 r.n[1] = a.0[0] >> 26 | ((a.0[1] << 6) & 0x3FFFFFF);
1519 r.n[2] = a.0[1] >> 20 | ((a.0[2] << 12) & 0x3FFFFFF);
1520 r.n[3] = a.0[2] >> 14 | ((a.0[3] << 18) & 0x3FFFFFF);
1521 r.n[4] = a.0[3] >> 8 | ((a.0[4] << 24) & 0x3FFFFFF);
1522 r.n[5] = (a.0[4] >> 2) & 0x3FFFFFF;
1523 r.n[6] = a.0[4] >> 28 | ((a.0[5] << 4) & 0x3FFFFFF);
1524 r.n[7] = a.0[5] >> 22 | ((a.0[6] << 10) & 0x3FFFFFF);
1525 r.n[8] = a.0[6] >> 16 | ((a.0[7] << 16) & 0x3FFFFFF);
1526 r.n[9] = a.0[7] >> 10;
1527
1528 r.magnitude = 1;
1529 r.normalized = true;
1530
1531 r
1532 }
1533}
1534
1535impl Into<FieldStorage> for Field {
1536 fn into(self) -> FieldStorage {
1537 debug_assert!(self.normalized);
1538 let mut r = FieldStorage::default();
1539
1540 r.0[0] = self.n[0] | self.n[1] << 26;
1541 r.0[1] = self.n[1] >> 6 | self.n[2] << 20;
1542 r.0[2] = self.n[2] >> 12 | self.n[3] << 14;
1543 r.0[3] = self.n[3] >> 18 | self.n[4] << 8;
1544 r.0[4] = self.n[4] >> 24 | self.n[5] << 2 | self.n[6] << 28;
1545 r.0[5] = self.n[6] >> 4 | self.n[7] << 22;
1546 r.0[6] = self.n[7] >> 10 | self.n[8] << 16;
1547 r.0[7] = self.n[8] >> 16 | self.n[9] << 10;
1548
1549 r
1550 }
1551}
1552
1553
1554impl subtle::ConditionallySelectable for Field {
1555 fn conditional_select(a: &Self, b: &Self, choice: subtle::Choice) -> Self {
1556 Field {
1557 n: [
1558 u32::conditional_select(&a.n[0], &b.n[0], choice),
1559 u32::conditional_select(&a.n[1], &b.n[1], choice),
1560 u32::conditional_select(&a.n[2], &b.n[2], choice),
1561 u32::conditional_select(&a.n[3], &b.n[3], choice),
1562 u32::conditional_select(&a.n[4], &b.n[4], choice),
1563 u32::conditional_select(&a.n[5], &b.n[5], choice),
1564 u32::conditional_select(&a.n[6], &b.n[6], choice),
1565 u32::conditional_select(&a.n[7], &b.n[7], choice),
1566 u32::conditional_select(&a.n[8], &b.n[8], choice),
1567 u32::conditional_select(&a.n[9], &b.n[9], choice),
1568 ],
1569 magnitude: u32::conditional_select(&a.magnitude, &b.magnitude, choice),
1570 normalized: u8::conditional_select(&(a.normalized as u8), &(b.normalized as u8), choice) != 0,
1571 }
1572 }
1573}