1use core::{
2 cmp::Ordering,
3 ops::{Add, AddAssign, Mul, MulAssign},
4};
5
6macro_rules! debug_assert_bits {
7 ($x: expr, $n: expr) => {
8 debug_assert!($x >> $n == 0);
9 };
10}
11
12#[derive(Debug, Clone, Copy)]
13pub struct Field {
15 n: [u32; 10],
21 magnitude: u32,
22 normalized: bool,
23}
24
25impl Field {
26 pub const fn new_raw(
27 d9: u32,
28 d8: u32,
29 d7: u32,
30 d6: u32,
31 d5: u32,
32 d4: u32,
33 d3: u32,
34 d2: u32,
35 d1: u32,
36 d0: u32,
37 ) -> Self {
38 Self {
39 n: [d0, d1, d2, d3, d4, d5, d6, d7, d8, d9],
40 magnitude: 1,
41 normalized: false,
42 }
43 }
44
45 pub const fn new(
46 d7: u32,
47 d6: u32,
48 d5: u32,
49 d4: u32,
50 d3: u32,
51 d2: u32,
52 d1: u32,
53 d0: u32,
54 ) -> Self {
55 Self {
56 n: [
57 d0 & 0x3ffffff,
58 (d0 >> 26) | ((d1 & 0xfffff) << 6),
59 (d1 >> 20) | ((d2 & 0x3fff) << 12),
60 (d2 >> 14) | ((d3 & 0xff) << 18),
61 (d3 >> 8) | ((d4 & 0x3) << 24),
62 (d4 >> 2) & 0x3ffffff,
63 (d4 >> 28) | ((d5 & 0x3fffff) << 4),
64 (d5 >> 22) | ((d6 & 0xffff) << 10),
65 (d6 >> 16) | ((d7 & 0x3ff) << 16),
66 (d7 >> 10),
67 ],
68 magnitude: 1,
69 normalized: true,
70 }
71 }
72
73 pub fn from_int(a: u32) -> Field {
74 let mut f = Field::default();
75 f.set_int(a);
76 f
77 }
78
79 fn verify(&self) -> bool {
80 let m = if self.normalized { 1 } else { 2 } * self.magnitude;
81 let mut r = true;
82 r = r && (self.n[0] <= 0x3ffffff * m);
83 r = r && (self.n[1] <= 0x3ffffff * m);
84 r = r && (self.n[2] <= 0x3ffffff * m);
85 r = r && (self.n[3] <= 0x3ffffff * m);
86 r = r && (self.n[4] <= 0x3ffffff * m);
87 r = r && (self.n[5] <= 0x3ffffff * m);
88 r = r && (self.n[6] <= 0x3ffffff * m);
89 r = r && (self.n[7] <= 0x3ffffff * m);
90 r = r && (self.n[8] <= 0x3ffffff * m);
91 r = r && (self.n[9] <= 0x03fffff * m);
92 r = r && (self.magnitude <= 32);
93 if self.normalized {
94 r = r && self.magnitude <= 1;
95 if r && (self.n[9] == 0x03fffff) {
96 let mid = self.n[8]
97 & self.n[7]
98 & self.n[6]
99 & self.n[5]
100 & self.n[4]
101 & self.n[3]
102 & self.n[2];
103 if mid == 0x3ffffff {
104 r = r && ((self.n[1] + 0x40 + ((self.n[0] + 0x3d1) >> 26)) <= 0x3ffffff)
105 }
106 }
107 }
108 r
109 }
110
111 pub fn normalize(&mut self) {
113 let mut t0 = self.n[0];
114 let mut t1 = self.n[1];
115 let mut t2 = self.n[2];
116 let mut t3 = self.n[3];
117 let mut t4 = self.n[4];
118 let mut t5 = self.n[5];
119 let mut t6 = self.n[6];
120 let mut t7 = self.n[7];
121 let mut t8 = self.n[8];
122 let mut t9 = self.n[9];
123
124 let mut m: u32;
125 let mut x = t9 >> 22;
126 t9 &= 0x03fffff;
127
128 t0 += x * 0x3d1;
129 t1 += x << 6;
130 t1 += t0 >> 26;
131 t0 &= 0x3ffffff;
132 t2 += t1 >> 26;
133 t1 &= 0x3ffffff;
134 t3 += t2 >> 26;
135 t2 &= 0x3ffffff;
136 m = t2;
137 t4 += t3 >> 26;
138 t3 &= 0x3ffffff;
139 m &= t3;
140 t5 += t4 >> 26;
141 t4 &= 0x3ffffff;
142 m &= t4;
143 t6 += t5 >> 26;
144 t5 &= 0x3ffffff;
145 m &= t5;
146 t7 += t6 >> 26;
147 t6 &= 0x3ffffff;
148 m &= t6;
149 t8 += t7 >> 26;
150 t7 &= 0x3ffffff;
151 m &= t7;
152 t9 += t8 >> 26;
153 t8 &= 0x3ffffff;
154 m &= t8;
155
156 debug_assert!(t9 >> 23 == 0);
157
158 x = (t9 >> 22)
159 | (if t9 == 0x03fffff { 1 } else { 0 }
160 & if m == 0x3ffffff { 1 } else { 0 }
161 & (if (t1 + 0x40 + ((t0 + 0x3d1) >> 26)) > 0x3ffffff {
162 1
163 } else {
164 0
165 }));
166
167 t0 += x * 0x3d1;
168 t1 += x << 6;
169 t1 += t0 >> 26;
170 t0 &= 0x3ffffff;
171 t2 += t1 >> 26;
172 t1 &= 0x3ffffff;
173 t3 += t2 >> 26;
174 t2 &= 0x3ffffff;
175 t4 += t3 >> 26;
176 t3 &= 0x3ffffff;
177 t5 += t4 >> 26;
178 t4 &= 0x3ffffff;
179 t6 += t5 >> 26;
180 t5 &= 0x3ffffff;
181 t7 += t6 >> 26;
182 t6 &= 0x3ffffff;
183 t8 += t7 >> 26;
184 t7 &= 0x3ffffff;
185 t9 += t8 >> 26;
186 t8 &= 0x3ffffff;
187
188 debug_assert!(t9 >> 22 == x);
189
190 t9 &= 0x03fffff;
191
192 self.n = [t0, t1, t2, t3, t4, t5, t6, t7, t8, t9];
193 self.magnitude = 1;
194 self.normalized = true;
195 debug_assert!(self.verify());
196 }
197
198 pub fn normalize_weak(&mut self) {
201 let mut t0 = self.n[0];
202 let mut t1 = self.n[1];
203 let mut t2 = self.n[2];
204 let mut t3 = self.n[3];
205 let mut t4 = self.n[4];
206 let mut t5 = self.n[5];
207 let mut t6 = self.n[6];
208 let mut t7 = self.n[7];
209 let mut t8 = self.n[8];
210 let mut t9 = self.n[9];
211
212 let x = t9 >> 22;
213 t9 &= 0x03fffff;
214
215 t0 += x * 0x3d1;
216 t1 += x << 6;
217 t1 += t0 >> 26;
218 t0 &= 0x3ffffff;
219 t2 += t1 >> 26;
220 t1 &= 0x3ffffff;
221 t3 += t2 >> 26;
222 t2 &= 0x3ffffff;
223 t4 += t3 >> 26;
224 t3 &= 0x3ffffff;
225 t5 += t4 >> 26;
226 t4 &= 0x3ffffff;
227 t6 += t5 >> 26;
228 t5 &= 0x3ffffff;
229 t7 += t6 >> 26;
230 t6 &= 0x3ffffff;
231 t8 += t7 >> 26;
232 t7 &= 0x3ffffff;
233 t9 += t8 >> 26;
234 t8 &= 0x3ffffff;
235
236 debug_assert!(t9 >> 23 == 0);
237
238 self.n = [t0, t1, t2, t3, t4, t5, t6, t7, t8, t9];
239 self.magnitude = 1;
240 debug_assert!(self.verify());
241 }
242
243 pub fn normalize_var(&mut self) {
245 let mut t0 = self.n[0];
246 let mut t1 = self.n[1];
247 let mut t2 = self.n[2];
248 let mut t3 = self.n[3];
249 let mut t4 = self.n[4];
250 let mut t5 = self.n[5];
251 let mut t6 = self.n[6];
252 let mut t7 = self.n[7];
253 let mut t8 = self.n[8];
254 let mut t9 = self.n[9];
255
256 let mut m: u32;
257 let mut x = t9 >> 22;
258 t9 &= 0x03fffff;
259
260 t0 += x * 0x3d1;
261 t1 += x << 6;
262 t1 += t0 >> 26;
263 t0 &= 0x3ffffff;
264 t2 += t1 >> 26;
265 t1 &= 0x3ffffff;
266 t3 += t2 >> 26;
267 t2 &= 0x3ffffff;
268 m = t2;
269 t4 += t3 >> 26;
270 t3 &= 0x3ffffff;
271 m &= t3;
272 t5 += t4 >> 26;
273 t4 &= 0x3ffffff;
274 m &= t4;
275 t6 += t5 >> 26;
276 t5 &= 0x3ffffff;
277 m &= t5;
278 t7 += t6 >> 26;
279 t6 &= 0x3ffffff;
280 m &= t6;
281 t8 += t7 >> 26;
282 t7 &= 0x3ffffff;
283 m &= t7;
284 t9 += t8 >> 26;
285 t8 &= 0x3ffffff;
286 m &= t8;
287
288 debug_assert!(t9 >> 23 == 0);
289
290 x = (t9 >> 22)
291 | (if t9 == 0x03fffff { 1 } else { 0 }
292 & if m == 0x3ffffff { 1 } else { 0 }
293 & (if (t1 + 0x40 + ((t0 + 0x3d1) >> 26)) > 0x3ffffff {
294 1
295 } else {
296 0
297 }));
298
299 if x > 0 {
300 t0 += 0x3d1;
301 t1 += x << 6;
302 t1 += t0 >> 26;
303 t0 &= 0x3ffffff;
304 t2 += t1 >> 26;
305 t1 &= 0x3ffffff;
306 t3 += t2 >> 26;
307 t2 &= 0x3ffffff;
308 t4 += t3 >> 26;
309 t3 &= 0x3ffffff;
310 t5 += t4 >> 26;
311 t4 &= 0x3ffffff;
312 t6 += t5 >> 26;
313 t5 &= 0x3ffffff;
314 t7 += t6 >> 26;
315 t6 &= 0x3ffffff;
316 t8 += t7 >> 26;
317 t7 &= 0x3ffffff;
318 t9 += t8 >> 26;
319 t8 &= 0x3ffffff;
320
321 debug_assert!(t9 >> 22 == x);
322
323 t9 &= 0x03fffff;
324 }
325
326 self.n = [t0, t1, t2, t3, t4, t5, t6, t7, t8, t9];
327 self.magnitude = 1;
328 self.normalized = true;
329 debug_assert!(self.verify());
330 }
331
332 pub fn normalizes_to_zero(&self) -> bool {
337 let mut t0 = self.n[0];
338 let mut t1 = self.n[1];
339 let mut t2 = self.n[2];
340 let mut t3 = self.n[3];
341 let mut t4 = self.n[4];
342 let mut t5 = self.n[5];
343 let mut t6 = self.n[6];
344 let mut t7 = self.n[7];
345 let mut t8 = self.n[8];
346 let mut t9 = self.n[9];
347
348 let mut z0: u32;
349 let mut z1: u32;
350
351 let x = t9 >> 22;
352 t9 &= 0x03fffff;
353
354 t0 += x * 0x3d1;
355 t1 += x << 6;
356 t1 += t0 >> 26;
357 t0 &= 0x3ffffff;
358 z0 = t0;
359 z1 = t0 ^ 0x3d0;
360 t2 += t1 >> 26;
361 t1 &= 0x3ffffff;
362 z0 |= t1;
363 z1 &= t1 ^ 0x40;
364 t3 += t2 >> 26;
365 t2 &= 0x3ffffff;
366 z0 |= t2;
367 z1 &= t2;
368 t4 += t3 >> 26;
369 t3 &= 0x3ffffff;
370 z0 |= t3;
371 z1 &= t3;
372 t5 += t4 >> 26;
373 t4 &= 0x3ffffff;
374 z0 |= t4;
375 z1 &= t4;
376 t6 += t5 >> 26;
377 t5 &= 0x3ffffff;
378 z0 |= t5;
379 z1 &= t5;
380 t7 += t6 >> 26;
381 t6 &= 0x3ffffff;
382 z0 |= t6;
383 z1 &= t6;
384 t8 += t7 >> 26;
385 t7 &= 0x3ffffff;
386 z0 |= t7;
387 z1 &= t7;
388 t9 += t8 >> 26;
389 t8 &= 0x3ffffff;
390 z0 |= t8;
391 z1 &= t8;
392 z0 |= t9;
393 z1 &= t9 ^ 0x3c00000;
394
395 debug_assert!(t9 >> 23 == 0);
396
397 z0 == 0 || z1 == 0x3ffffff
398 }
399
400 pub fn normalizes_to_zero_var(&self) -> bool {
405 let mut t0: u32;
406 let mut t1: u32;
407 let mut t2: u32;
408 let mut t3: u32;
409 let mut t4: u32;
410 let mut t5: u32;
411 let mut t6: u32;
412 let mut t7: u32;
413 let mut t8: u32;
414 let mut t9: u32;
415 let mut z0: u32;
416 let mut z1: u32;
417
418 t0 = self.n[0];
419 t9 = self.n[9];
420
421 let x = t9 >> 22;
422 t0 += x * 0x3d1;
423
424 z0 = t0 & 0x3ffffff;
425 z1 = z0 ^ 0x3d0;
426
427 if z0 != 0 && z1 != 0x3ffffff {
428 return false;
429 }
430
431 t1 = self.n[1];
432 t2 = self.n[2];
433 t3 = self.n[3];
434 t4 = self.n[4];
435 t5 = self.n[5];
436 t6 = self.n[6];
437 t7 = self.n[7];
438 t8 = self.n[8];
439
440 t9 &= 0x03fffff;
441 t1 += x << 6;
442
443 t1 += t0 >> 26;
444 t2 += t1 >> 26;
445 t1 &= 0x3ffffff;
446 z0 |= t1;
447 z1 &= t1 ^ 0x40;
448 t3 += t2 >> 26;
449 t2 &= 0x3ffffff;
450 z0 |= t2;
451 z1 &= t2;
452 t4 += t3 >> 26;
453 t3 &= 0x3ffffff;
454 z0 |= t3;
455 z1 &= t3;
456 t5 += t4 >> 26;
457 t4 &= 0x3ffffff;
458 z0 |= t4;
459 z1 &= t4;
460 t6 += t5 >> 26;
461 t5 &= 0x3ffffff;
462 z0 |= t5;
463 z1 &= t5;
464 t7 += t6 >> 26;
465 t6 &= 0x3ffffff;
466 z0 |= t6;
467 z1 &= t6;
468 t8 += t7 >> 26;
469 t7 &= 0x3ffffff;
470 z0 |= t7;
471 z1 &= t7;
472 t9 += t8 >> 26;
473 t8 &= 0x3ffffff;
474 z0 |= t8;
475 z1 &= t8;
476 z0 |= t9;
477 z1 &= t9 ^ 0x3c00000;
478
479 debug_assert!(t9 >> 23 == 0);
480
481 z0 == 0 || z1 == 0x3ffffff
482 }
483
484 pub fn set_int(&mut self, a: u32) {
487 self.n = [a, 0, 0, 0, 0, 0, 0, 0, 0, 0];
488 self.magnitude = 1;
489 self.normalized = true;
490 debug_assert!(self.verify());
491 }
492
493 pub fn is_zero(&self) -> bool {
496 debug_assert!(self.normalized);
497 debug_assert!(self.verify());
498 (self.n[0]
499 | self.n[1]
500 | self.n[2]
501 | self.n[3]
502 | self.n[4]
503 | self.n[5]
504 | self.n[6]
505 | self.n[7]
506 | self.n[8]
507 | self.n[9])
508 == 0
509 }
510
511 pub fn is_odd(&self) -> bool {
514 debug_assert!(self.normalized);
515 debug_assert!(self.verify());
516 self.n[0] & 1 != 0
517 }
518
519 pub fn clear(&mut self) {
521 self.magnitude = 0;
522 self.normalized = true;
523 self.n = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
524 }
525
526 #[must_use]
529 pub fn set_b32(&mut self, a: &[u8; 32]) -> bool {
530 self.n[0] = (a[31] as u32)
531 | ((a[30] as u32) << 8)
532 | ((a[29] as u32) << 16)
533 | (((a[28] & 0x3) as u32) << 24);
534 self.n[1] = (((a[28] >> 2) & 0x3f) as u32)
535 | ((a[27] as u32) << 6)
536 | ((a[26] as u32) << 14)
537 | (((a[25] & 0xf) as u32) << 22);
538 self.n[2] = (((a[25] >> 4) & 0xf) as u32)
539 | ((a[24] as u32) << 4)
540 | ((a[23] as u32) << 12)
541 | (((a[22] as u32) & 0x3f) << 20);
542 self.n[3] = (((a[22] >> 6) & 0x3) as u32)
543 | ((a[21] as u32) << 2)
544 | ((a[20] as u32) << 10)
545 | ((a[19] as u32) << 18);
546 self.n[4] = (a[18] as u32)
547 | ((a[17] as u32) << 8)
548 | ((a[16] as u32) << 16)
549 | (((a[15] & 0x3) as u32) << 24);
550 self.n[5] = (((a[15] >> 2) & 0x3f) as u32)
551 | ((a[14] as u32) << 6)
552 | ((a[13] as u32) << 14)
553 | (((a[12] as u32) & 0xf) << 22);
554 self.n[6] = (((a[12] >> 4) & 0xf) as u32)
555 | ((a[11] as u32) << 4)
556 | ((a[10] as u32) << 12)
557 | (((a[9] & 0x3f) as u32) << 20);
558 self.n[7] = (((a[9] >> 6) & 0x3) as u32)
559 | ((a[8] as u32) << 2)
560 | ((a[7] as u32) << 10)
561 | ((a[6] as u32) << 18);
562 self.n[8] = (a[5] as u32)
563 | ((a[4] as u32) << 8)
564 | ((a[3] as u32) << 16)
565 | (((a[2] & 0x3) as u32) << 24);
566 self.n[9] = (((a[2] >> 2) & 0x3f) as u32) | ((a[1] as u32) << 6) | ((a[0] as u32) << 14);
567
568 if self.n[9] == 0x03fffff
569 && (self.n[8] & self.n[7] & self.n[6] & self.n[5] & self.n[4] & self.n[3] & self.n[2])
570 == 0x3ffffff
571 && (self.n[1] + 0x40 + ((self.n[0] + 0x3d1) >> 26)) > 0x3ffffff
572 {
573 return false;
574 }
575
576 self.magnitude = 1;
577 self.normalized = true;
578 debug_assert!(self.verify());
579
580 true
581 }
582
583 pub fn fill_b32(&self, r: &mut [u8; 32]) {
584 debug_assert!(self.normalized);
585 debug_assert!(self.verify());
586
587 r[0] = ((self.n[9] >> 14) & 0xff) as u8;
588 r[1] = ((self.n[9] >> 6) & 0xff) as u8;
589 r[2] = (((self.n[9] & 0x3f) << 2) | ((self.n[8] >> 24) & 0x3)) as u8;
590 r[3] = ((self.n[8] >> 16) & 0xff) as u8;
591 r[4] = ((self.n[8] >> 8) & 0xff) as u8;
592 r[5] = (self.n[8] & 0xff) as u8;
593 r[6] = ((self.n[7] >> 18) & 0xff) as u8;
594 r[7] = ((self.n[7] >> 10) & 0xff) as u8;
595 r[8] = ((self.n[7] >> 2) & 0xff) as u8;
596 r[9] = (((self.n[7] & 0x3) << 6) | ((self.n[6] >> 20) & 0x3f)) as u8;
597 r[10] = ((self.n[6] >> 12) & 0xff) as u8;
598 r[11] = ((self.n[6] >> 4) & 0xff) as u8;
599 r[12] = (((self.n[6] & 0xf) << 4) | ((self.n[5] >> 22) & 0xf)) as u8;
600 r[13] = ((self.n[5] >> 14) & 0xff) as u8;
601 r[14] = ((self.n[5] >> 6) & 0xff) as u8;
602 r[15] = (((self.n[5] & 0x3f) << 2) | ((self.n[4] >> 24) & 0x3)) as u8;
603 r[16] = ((self.n[4] >> 16) & 0xff) as u8;
604 r[17] = ((self.n[4] >> 8) & 0xff) as u8;
605 r[18] = (self.n[4] & 0xff) as u8;
606 r[19] = ((self.n[3] >> 18) & 0xff) as u8;
607 r[20] = ((self.n[3] >> 10) & 0xff) as u8;
608 r[21] = ((self.n[3] >> 2) & 0xff) as u8;
609 r[22] = (((self.n[3] & 0x3) << 6) | ((self.n[2] >> 20) & 0x3f)) as u8;
610 r[23] = ((self.n[2] >> 12) & 0xff) as u8;
611 r[24] = ((self.n[2] >> 4) & 0xff) as u8;
612 r[25] = (((self.n[2] & 0xf) << 4) | ((self.n[1] >> 22) & 0xf)) as u8;
613 r[26] = ((self.n[1] >> 14) & 0xff) as u8;
614 r[27] = ((self.n[1] >> 6) & 0xff) as u8;
615 r[28] = (((self.n[1] & 0x3f) << 2) | ((self.n[0] >> 24) & 0x3)) as u8;
616 r[29] = ((self.n[0] >> 16) & 0xff) as u8;
617 r[30] = ((self.n[0] >> 8) & 0xff) as u8;
618 r[31] = (self.n[0] & 0xff) as u8;
619 }
620
621 pub fn b32(&self) -> [u8; 32] {
624 let mut r = [0u8; 32];
625 self.fill_b32(&mut r);
626 r
627 }
628
629 pub fn neg_in_place(&mut self, other: &Field, m: u32) {
633 debug_assert!(other.magnitude <= m);
634 debug_assert!(other.verify());
635
636 self.n[0] = 0x3fffc2f * 2 * (m + 1) - other.n[0];
637 self.n[1] = 0x3ffffbf * 2 * (m + 1) - other.n[1];
638 self.n[2] = 0x3ffffff * 2 * (m + 1) - other.n[2];
639 self.n[3] = 0x3ffffff * 2 * (m + 1) - other.n[3];
640 self.n[4] = 0x3ffffff * 2 * (m + 1) - other.n[4];
641 self.n[5] = 0x3ffffff * 2 * (m + 1) - other.n[5];
642 self.n[6] = 0x3ffffff * 2 * (m + 1) - other.n[6];
643 self.n[7] = 0x3ffffff * 2 * (m + 1) - other.n[7];
644 self.n[8] = 0x3ffffff * 2 * (m + 1) - other.n[8];
645 self.n[9] = 0x03fffff * 2 * (m + 1) - other.n[9];
646
647 self.magnitude = m + 1;
648 self.normalized = false;
649 debug_assert!(self.verify());
650 }
651
652 pub fn neg(&self, m: u32) -> Field {
655 let mut ret = Field::default();
656 ret.neg_in_place(self, m);
657 ret
658 }
659
660 pub fn mul_int(&mut self, a: u32) {
663 self.n[0] *= a;
664 self.n[1] *= a;
665 self.n[2] *= a;
666 self.n[3] *= a;
667 self.n[4] *= a;
668 self.n[5] *= a;
669 self.n[6] *= a;
670 self.n[7] *= a;
671 self.n[8] *= a;
672 self.n[9] *= a;
673
674 self.magnitude *= a;
675 self.normalized = false;
676 debug_assert!(self.verify());
677 }
678
679 pub fn cmp_var(&self, other: &Field) -> Ordering {
682 debug_assert!(self.normalized);
684 debug_assert!(other.normalized);
685 debug_assert!(self.verify());
686 debug_assert!(other.verify());
687
688 for i in (0..10).rev() {
689 if self.n[i] > other.n[i] {
690 return Ordering::Greater;
691 }
692 if self.n[i] < other.n[i] {
693 return Ordering::Less;
694 }
695 }
696 Ordering::Equal
697 }
698
699 pub fn eq_var(&self, other: &Field) -> bool {
700 let mut na = self.neg(1);
701 na += other;
702 na.normalizes_to_zero_var()
703 }
704
705 fn mul_inner(&mut self, a: &Field, b: &Field) {
706 const M: u64 = 0x3ffffff;
707 const R0: u64 = 0x3d10;
708 const R1: u64 = 0x400;
709
710 let (mut c, mut d): (u64, u64);
711 let (v0, v1, v2, v3, v4, v5, v6, v7, v8): (u64, u64, u64, u64, u64, u64, u64, u64, u64);
712 let (t9, t1, t0, t2, t3, t4, t5, t6, t7): (u32, u32, u32, u32, u32, u32, u32, u32, u32);
713
714 debug_assert_bits!(a.n[0], 30);
715 debug_assert_bits!(a.n[1], 30);
716 debug_assert_bits!(a.n[2], 30);
717 debug_assert_bits!(a.n[3], 30);
718 debug_assert_bits!(a.n[4], 30);
719 debug_assert_bits!(a.n[5], 30);
720 debug_assert_bits!(a.n[6], 30);
721 debug_assert_bits!(a.n[7], 30);
722 debug_assert_bits!(a.n[8], 30);
723 debug_assert_bits!(a.n[9], 26);
724 debug_assert_bits!(b.n[0], 30);
725 debug_assert_bits!(b.n[1], 30);
726 debug_assert_bits!(b.n[2], 30);
727 debug_assert_bits!(b.n[3], 30);
728 debug_assert_bits!(b.n[4], 30);
729 debug_assert_bits!(b.n[5], 30);
730 debug_assert_bits!(b.n[6], 30);
731 debug_assert_bits!(b.n[7], 30);
732 debug_assert_bits!(b.n[8], 30);
733 debug_assert_bits!(b.n[9], 26);
734
735 d = ((a.n[0] as u64) * (b.n[9] as u64))
740 .wrapping_add((a.n[1] as u64) * (b.n[8] as u64))
741 .wrapping_add((a.n[2] as u64) * (b.n[7] as u64))
742 .wrapping_add((a.n[3] as u64) * (b.n[6] as u64))
743 .wrapping_add((a.n[4] as u64) * (b.n[5] as u64))
744 .wrapping_add((a.n[5] as u64) * (b.n[4] as u64))
745 .wrapping_add((a.n[6] as u64) * (b.n[3] as u64))
746 .wrapping_add((a.n[7] as u64) * (b.n[2] as u64))
747 .wrapping_add((a.n[8] as u64) * (b.n[1] as u64))
748 .wrapping_add((a.n[9] as u64) * (b.n[0] as u64));
749 t9 = (d & M) as u32;
753 d >>= 26;
754 debug_assert_bits!(t9, 26);
755 debug_assert_bits!(d, 38);
756 c = (a.n[0] as u64) * (b.n[0] as u64);
759 debug_assert_bits!(c, 60);
760 d = d
763 .wrapping_add((a.n[1] as u64) * (b.n[9] as u64))
764 .wrapping_add((a.n[2] as u64) * (b.n[8] as u64))
765 .wrapping_add((a.n[3] as u64) * (b.n[7] as u64))
766 .wrapping_add((a.n[4] as u64) * (b.n[6] as u64))
767 .wrapping_add((a.n[5] as u64) * (b.n[5] as u64))
768 .wrapping_add((a.n[6] as u64) * (b.n[4] as u64))
769 .wrapping_add((a.n[7] as u64) * (b.n[3] as u64))
770 .wrapping_add((a.n[8] as u64) * (b.n[2] as u64))
771 .wrapping_add((a.n[9] as u64) * (b.n[1] as u64));
772 debug_assert_bits!(d, 63);
773 v0 = d & M;
775 d >>= 26;
776 c += v0 * R0;
777 debug_assert_bits!(v0, 26);
778 debug_assert_bits!(d, 37);
779 debug_assert_bits!(c, 61);
780 t0 = (c & M) as u32;
782 c >>= 26;
783 c += v0 * R1;
784
785 debug_assert_bits!(t0, 26);
786 debug_assert_bits!(c, 37);
787 c = c
791 .wrapping_add((a.n[0] as u64) * (b.n[1] as u64))
792 .wrapping_add((a.n[1] as u64) * (b.n[0] as u64));
793 debug_assert_bits!(c, 62);
794 d = d
796 .wrapping_add((a.n[2] as u64) * (b.n[9] as u64))
797 .wrapping_add((a.n[3] as u64) * (b.n[8] as u64))
798 .wrapping_add((a.n[4] as u64) * (b.n[7] as u64))
799 .wrapping_add((a.n[5] as u64) * (b.n[6] as u64))
800 .wrapping_add((a.n[6] as u64) * (b.n[5] as u64))
801 .wrapping_add((a.n[7] as u64) * (b.n[4] as u64))
802 .wrapping_add((a.n[8] as u64) * (b.n[3] as u64))
803 .wrapping_add((a.n[9] as u64) * (b.n[2] as u64));
804 debug_assert_bits!(d, 63);
805 v1 = d & M;
807 d >>= 26;
808 c += v1 * R0;
809 debug_assert_bits!(v1, 26);
810 debug_assert_bits!(d, 37);
811 debug_assert_bits!(c, 63);
812 t1 = (c & M) as u32;
814 c >>= 26;
815 c += v1 * R1;
816 debug_assert_bits!(t1, 26);
817 debug_assert_bits!(c, 38);
818 c = c
822 .wrapping_add((a.n[0] as u64) * (b.n[2] as u64))
823 .wrapping_add((a.n[1] as u64) * (b.n[1] as u64))
824 .wrapping_add((a.n[2] as u64) * (b.n[0] as u64));
825 debug_assert_bits!(c, 62);
826 d = d
828 .wrapping_add((a.n[3] as u64) * (b.n[9] as u64))
829 .wrapping_add((a.n[4] as u64) * (b.n[8] as u64))
830 .wrapping_add((a.n[5] as u64) * (b.n[7] as u64))
831 .wrapping_add((a.n[6] as u64) * (b.n[6] as u64))
832 .wrapping_add((a.n[7] as u64) * (b.n[5] as u64))
833 .wrapping_add((a.n[8] as u64) * (b.n[4] as u64))
834 .wrapping_add((a.n[9] as u64) * (b.n[3] as u64));
835 debug_assert_bits!(d, 63);
836 v2 = d & M;
838 d >>= 26;
839 c += v2 * R0;
840 debug_assert_bits!(v2, 26);
841 debug_assert_bits!(d, 37);
842 debug_assert_bits!(c, 63);
843 t2 = (c & M) as u32;
845 c >>= 26;
846 c += v2 * R1;
847 debug_assert_bits!(t2, 26);
848 debug_assert_bits!(c, 38);
849 c = c
853 .wrapping_add((a.n[0] as u64) * (b.n[3] as u64))
854 .wrapping_add((a.n[1] as u64) * (b.n[2] as u64))
855 .wrapping_add((a.n[2] as u64) * (b.n[1] as u64))
856 .wrapping_add((a.n[3] as u64) * (b.n[0] as u64));
857 debug_assert_bits!(c, 63);
858 d = d
860 .wrapping_add((a.n[4] as u64) * (b.n[9] as u64))
861 .wrapping_add((a.n[5] as u64) * (b.n[8] as u64))
862 .wrapping_add((a.n[6] as u64) * (b.n[7] as u64))
863 .wrapping_add((a.n[7] as u64) * (b.n[6] as u64))
864 .wrapping_add((a.n[8] as u64) * (b.n[5] as u64))
865 .wrapping_add((a.n[9] as u64) * (b.n[4] as u64));
866 debug_assert_bits!(d, 63);
867 v3 = d & M;
869 d >>= 26;
870 c += v3 * R0;
871 debug_assert_bits!(v3, 26);
872 debug_assert_bits!(d, 37);
873 t3 = (c & M) as u32;
876 c >>= 26;
877 c += v3 * R1;
878 debug_assert_bits!(t3, 26);
879 debug_assert_bits!(c, 39);
880 c = c
884 .wrapping_add((a.n[0] as u64) * (b.n[4] as u64))
885 .wrapping_add((a.n[1] as u64) * (b.n[3] as u64))
886 .wrapping_add((a.n[2] as u64) * (b.n[2] as u64))
887 .wrapping_add((a.n[3] as u64) * (b.n[1] as u64))
888 .wrapping_add((a.n[4] as u64) * (b.n[0] as u64));
889 debug_assert_bits!(c, 63);
890 d = d
892 .wrapping_add((a.n[5] as u64) * (b.n[9] as u64))
893 .wrapping_add((a.n[6] as u64) * (b.n[8] as u64))
894 .wrapping_add((a.n[7] as u64) * (b.n[7] as u64))
895 .wrapping_add((a.n[8] as u64) * (b.n[6] as u64))
896 .wrapping_add((a.n[9] as u64) * (b.n[5] as u64));
897 debug_assert_bits!(d, 62);
898 v4 = d & M;
900 d >>= 26;
901 c += v4 * R0;
902 debug_assert_bits!(v4, 26);
903 debug_assert_bits!(d, 36);
904 t4 = (c & M) as u32;
907 c >>= 26;
908 c += v4 * R1;
909 debug_assert_bits!(t4, 26);
910 debug_assert_bits!(c, 39);
911 c = c
915 .wrapping_add((a.n[0] as u64) * (b.n[5] as u64))
916 .wrapping_add((a.n[1] as u64) * (b.n[4] as u64))
917 .wrapping_add((a.n[2] as u64) * (b.n[3] as u64))
918 .wrapping_add((a.n[3] as u64) * (b.n[2] as u64))
919 .wrapping_add((a.n[4] as u64) * (b.n[1] as u64))
920 .wrapping_add((a.n[5] as u64) * (b.n[0] as u64));
921 debug_assert_bits!(c, 63);
922 d = d
924 .wrapping_add((a.n[6] as u64) * (b.n[9] as u64))
925 .wrapping_add((a.n[7] as u64) * (b.n[8] as u64))
926 .wrapping_add((a.n[8] as u64) * (b.n[7] as u64))
927 .wrapping_add((a.n[9] as u64) * (b.n[6] as u64));
928 debug_assert_bits!(d, 62);
929 v5 = d & M;
931 d >>= 26;
932 c += v5 * R0;
933 debug_assert_bits!(v5, 26);
934 debug_assert_bits!(d, 36);
935 t5 = (c & M) as u32;
938 c >>= 26;
939 c += v5 * R1;
940 debug_assert_bits!(t5, 26);
941 debug_assert_bits!(c, 39);
942 c = c
946 .wrapping_add((a.n[0] as u64) * (b.n[6] as u64))
947 .wrapping_add((a.n[1] as u64) * (b.n[5] as u64))
948 .wrapping_add((a.n[2] as u64) * (b.n[4] as u64))
949 .wrapping_add((a.n[3] as u64) * (b.n[3] as u64))
950 .wrapping_add((a.n[4] as u64) * (b.n[2] as u64))
951 .wrapping_add((a.n[5] as u64) * (b.n[1] as u64))
952 .wrapping_add((a.n[6] as u64) * (b.n[0] as u64));
953 debug_assert_bits!(c, 63);
954 d = d
956 .wrapping_add((a.n[7] as u64) * (b.n[9] as u64))
957 .wrapping_add((a.n[8] as u64) * (b.n[8] as u64))
958 .wrapping_add((a.n[9] as u64) * (b.n[7] as u64));
959 debug_assert_bits!(d, 61);
960 v6 = d & M;
962 d >>= 26;
963 c += v6 * R0;
964 debug_assert_bits!(v6, 26);
965 debug_assert_bits!(d, 35);
966 t6 = (c & M) as u32;
969 c >>= 26;
970 c += v6 * R1;
971 debug_assert_bits!(t6, 26);
972 debug_assert_bits!(c, 39);
973 c = c
977 .wrapping_add((a.n[0] as u64) * (b.n[7] as u64))
978 .wrapping_add((a.n[1] as u64) * (b.n[6] as u64))
979 .wrapping_add((a.n[2] as u64) * (b.n[5] as u64))
980 .wrapping_add((a.n[3] as u64) * (b.n[4] as u64))
981 .wrapping_add((a.n[4] as u64) * (b.n[3] as u64))
982 .wrapping_add((a.n[5] as u64) * (b.n[2] as u64))
983 .wrapping_add((a.n[6] as u64) * (b.n[1] as u64))
984 .wrapping_add((a.n[7] as u64) * (b.n[0] as u64));
985 debug_assert!(c <= 0x8000007c00000007);
987 d = d
989 .wrapping_add((a.n[8] as u64) * (b.n[9] as u64))
990 .wrapping_add((a.n[9] as u64) * (b.n[8] as u64));
991 debug_assert_bits!(d, 58);
992 v7 = d & M;
994 d >>= 26;
995 c += v7 * R0;
996 debug_assert_bits!(v7, 26);
997 debug_assert_bits!(d, 32);
998 debug_assert!(c <= 0x800001703fffc2f7);
1000 t7 = (c & M) as u32;
1002 c >>= 26;
1003 c += v7 * R1;
1004 debug_assert_bits!(t7, 26);
1005 debug_assert_bits!(c, 38);
1006 c = c
1010 .wrapping_add((a.n[0] as u64) * (b.n[8] as u64))
1011 .wrapping_add((a.n[1] as u64) * (b.n[7] as u64))
1012 .wrapping_add((a.n[2] as u64) * (b.n[6] as u64))
1013 .wrapping_add((a.n[3] as u64) * (b.n[5] as u64))
1014 .wrapping_add((a.n[4] as u64) * (b.n[4] as u64))
1015 .wrapping_add((a.n[5] as u64) * (b.n[3] as u64))
1016 .wrapping_add((a.n[6] as u64) * (b.n[2] as u64))
1017 .wrapping_add((a.n[7] as u64) * (b.n[1] as u64))
1018 .wrapping_add((a.n[8] as u64) * (b.n[0] as u64));
1019 debug_assert!(c <= 0x9000007b80000008);
1021 d = d.wrapping_add((a.n[9] as u64) * (b.n[9] as u64));
1023 debug_assert_bits!(d, 57);
1024 v8 = d & M;
1026 d >>= 26;
1027 c += v8 * R0;
1028 debug_assert_bits!(v8, 26);
1029 debug_assert_bits!(d, 31);
1030 debug_assert!(c <= 0x9000016fbfffc2f8);
1032 self.n[3] = t3;
1035 debug_assert_bits!(self.n[3], 26);
1036 self.n[4] = t4;
1038 debug_assert_bits!(self.n[4], 26);
1039 self.n[5] = t5;
1041 debug_assert_bits!(self.n[5], 26);
1042 self.n[6] = t6;
1044 debug_assert_bits!(self.n[6], 26);
1045 self.n[7] = t7;
1047 debug_assert_bits!(self.n[7], 26);
1048 self.n[8] = (c & M) as u32;
1051 c >>= 26;
1052 c += v8 * R1;
1053 debug_assert_bits!(self.n[8], 26);
1054 debug_assert_bits!(c, 39);
1055 c += d * R0 + t9 as u64;
1058 debug_assert_bits!(c, 45);
1059 self.n[9] = (c & (M >> 4)) as u32;
1061 c >>= 22;
1062 c += d * (R1 << 4);
1063 debug_assert_bits!(self.n[9], 22);
1064 debug_assert_bits!(c, 46);
1065 d = c * (R0 >> 4) + t0 as u64;
1070 debug_assert_bits!(d, 56);
1071 self.n[0] = (d & M) as u32;
1073 d >>= 26;
1074 debug_assert_bits!(self.n[0], 26);
1075 debug_assert_bits!(d, 30);
1076 d += c * (R1 >> 4) + t1 as u64;
1078 debug_assert_bits!(d, 53);
1079 debug_assert!(d <= 0x10000003ffffbf);
1080 self.n[1] = (d & M) as u32;
1083 d >>= 26;
1084 debug_assert_bits!(self.n[1], 26);
1085 debug_assert_bits!(d, 27);
1086 debug_assert!(d <= 0x4000000);
1087 d += t2 as u64;
1089 debug_assert_bits!(d, 27);
1090 self.n[2] = d as u32;
1092 debug_assert_bits!(self.n[2], 27);
1093 }
1095
1096 fn sqr_inner(&mut self, a: &Field) {
1097 const M: u64 = 0x3ffffff;
1098 const R0: u64 = 0x3d10;
1099 const R1: u64 = 0x400;
1100
1101 let (mut c, mut d): (u64, u64);
1102 let (v0, v1, v2, v3, v4, v5, v6, v7, v8): (u64, u64, u64, u64, u64, u64, u64, u64, u64);
1103 let (t9, t0, t1, t2, t3, t4, t5, t6, t7): (u32, u32, u32, u32, u32, u32, u32, u32, u32);
1104
1105 debug_assert_bits!(a.n[0], 30);
1106 debug_assert_bits!(a.n[1], 30);
1107 debug_assert_bits!(a.n[2], 30);
1108 debug_assert_bits!(a.n[3], 30);
1109 debug_assert_bits!(a.n[4], 30);
1110 debug_assert_bits!(a.n[5], 30);
1111 debug_assert_bits!(a.n[6], 30);
1112 debug_assert_bits!(a.n[7], 30);
1113 debug_assert_bits!(a.n[8], 30);
1114 debug_assert_bits!(a.n[9], 26);
1115
1116 d = (((a.n[0] * 2) as u64) * (a.n[9] as u64))
1121 .wrapping_add(((a.n[1] * 2) as u64) * (a.n[8] as u64))
1122 .wrapping_add(((a.n[2] * 2) as u64) * (a.n[7] as u64))
1123 .wrapping_add(((a.n[3] * 2) as u64) * (a.n[6] as u64))
1124 .wrapping_add(((a.n[4] * 2) as u64) * (a.n[5] as u64));
1125 t9 = (d & M) as u32;
1128 d >>= 26;
1129 debug_assert_bits!(t9, 26);
1130 debug_assert_bits!(d, 38);
1131 c = (a.n[0] as u64) * (a.n[0] as u64);
1134 debug_assert_bits!(c, 60);
1135 d = d
1137 .wrapping_add(((a.n[1] * 2) as u64) * (a.n[9] as u64))
1138 .wrapping_add(((a.n[2] * 2) as u64) * (a.n[8] as u64))
1139 .wrapping_add(((a.n[3] * 2) as u64) * (a.n[7] as u64))
1140 .wrapping_add(((a.n[4] * 2) as u64) * (a.n[6] as u64))
1141 .wrapping_add((a.n[5] as u64) * (a.n[5] as u64));
1142 debug_assert_bits!(d, 63);
1143 v0 = d & M;
1145 d >>= 26;
1146 c += v0 * R0;
1147 debug_assert_bits!(v0, 26);
1148 debug_assert_bits!(d, 37);
1149 debug_assert_bits!(c, 61);
1150 t0 = (c & M) as u32;
1152 c >>= 26;
1153 c += v0 * R1;
1154 debug_assert_bits!(t0, 26);
1155 debug_assert_bits!(c, 37);
1156 c = c.wrapping_add(((a.n[0] * 2) as u64) * (a.n[1] as u64));
1160 debug_assert_bits!(c, 62);
1161 d = d
1163 .wrapping_add(((a.n[2] * 2) as u64) * (a.n[9] as u64))
1164 .wrapping_add(((a.n[3] * 2) as u64) * (a.n[8] as u64))
1165 .wrapping_add(((a.n[4] * 2) as u64) * (a.n[7] as u64))
1166 .wrapping_add(((a.n[5] * 2) as u64) * (a.n[6] as u64));
1167 debug_assert_bits!(d, 63);
1168 v1 = d & M;
1170 d >>= 26;
1171 c += v1 * R0;
1172 debug_assert_bits!(v1, 26);
1173 debug_assert_bits!(d, 37);
1174 debug_assert_bits!(c, 63);
1175 t1 = (c & M) as u32;
1177 c >>= 26;
1178 c += v1 * R1;
1179 debug_assert_bits!(t1, 26);
1180 debug_assert_bits!(c, 38);
1181 c = c
1185 .wrapping_add(((a.n[0] * 2) as u64) * (a.n[2] as u64))
1186 .wrapping_add((a.n[1] as u64) * (a.n[1] as u64));
1187 debug_assert_bits!(c, 62);
1188 d = d
1190 .wrapping_add(((a.n[3] * 2) as u64) * (a.n[9] as u64))
1191 .wrapping_add(((a.n[4] * 2) as u64) * (a.n[8] as u64))
1192 .wrapping_add(((a.n[5] * 2) as u64) * (a.n[7] as u64))
1193 .wrapping_add((a.n[6] as u64) * (a.n[6] as u64));
1194 debug_assert_bits!(d, 63);
1195 v2 = d & M;
1197 d >>= 26;
1198 c += v2 * R0;
1199 debug_assert_bits!(v2, 26);
1200 debug_assert_bits!(d, 37);
1201 debug_assert_bits!(c, 63);
1202 t2 = (c & M) as u32;
1204 c >>= 26;
1205 c += v2 * R1;
1206 debug_assert_bits!(t2, 26);
1207 debug_assert_bits!(c, 38);
1208 c = c
1212 .wrapping_add(((a.n[0] * 2) as u64) * (a.n[3] as u64))
1213 .wrapping_add(((a.n[1] * 2) as u64) * (a.n[2] as u64));
1214 debug_assert_bits!(c, 63);
1215 d = d
1217 .wrapping_add(((a.n[4] * 2) as u64) * (a.n[9] as u64))
1218 .wrapping_add(((a.n[5] * 2) as u64) * (a.n[8] as u64))
1219 .wrapping_add(((a.n[6] * 2) as u64) * (a.n[7] as u64));
1220 debug_assert_bits!(d, 63);
1221 v3 = d & M;
1223 d >>= 26;
1224 c += v3 * R0;
1225 debug_assert_bits!(v3, 26);
1226 debug_assert_bits!(d, 37);
1227 t3 = (c & M) as u32;
1230 c >>= 26;
1231 c += v3 * R1;
1232 debug_assert_bits!(t3, 26);
1233 debug_assert_bits!(c, 39);
1234 c = c
1238 .wrapping_add(((a.n[0] * 2) as u64) * (a.n[4] as u64))
1239 .wrapping_add(((a.n[1] * 2) as u64) * (a.n[3] as u64))
1240 .wrapping_add((a.n[2] as u64) * (a.n[2] as u64));
1241 debug_assert_bits!(c, 63);
1242 d = d
1244 .wrapping_add(((a.n[5] * 2) as u64) * (a.n[9] as u64))
1245 .wrapping_add(((a.n[6] * 2) as u64) * (a.n[8] as u64))
1246 .wrapping_add((a.n[7] as u64) * (a.n[7] as u64));
1247 debug_assert_bits!(d, 62);
1248 v4 = d & M;
1250 d >>= 26;
1251 c += v4 * R0;
1252 debug_assert_bits!(v4, 26);
1253 debug_assert_bits!(d, 36);
1254 t4 = (c & M) as u32;
1257 c >>= 26;
1258 c += v4 * R1;
1259 debug_assert_bits!(t4, 26);
1260 debug_assert_bits!(c, 39);
1261 c = c
1265 .wrapping_add(((a.n[0] * 2) as u64) * (a.n[5] as u64))
1266 .wrapping_add(((a.n[1] * 2) as u64) * (a.n[4] as u64))
1267 .wrapping_add(((a.n[2] * 2) as u64) * (a.n[3] as u64));
1268 debug_assert_bits!(c, 63);
1269 d = d
1271 .wrapping_add(((a.n[6] * 2) as u64) * (a.n[9] as u64))
1272 .wrapping_add(((a.n[7] * 2) as u64) * (a.n[8] as u64));
1273 debug_assert_bits!(d, 62);
1274 v5 = d & M;
1276 d >>= 26;
1277 c += v5 * R0;
1278 debug_assert_bits!(v5, 26);
1279 debug_assert_bits!(d, 36);
1280 t5 = (c & M) as u32;
1283 c >>= 26;
1284 c += v5 * R1;
1285 debug_assert_bits!(t5, 26);
1286 debug_assert_bits!(c, 39);
1287 c = c
1291 .wrapping_add(((a.n[0] * 2) as u64) * (a.n[6] as u64))
1292 .wrapping_add(((a.n[1] * 2) as u64) * (a.n[5] as u64))
1293 .wrapping_add(((a.n[2] * 2) as u64) * (a.n[4] as u64))
1294 .wrapping_add((a.n[3] as u64) * (a.n[3] as u64));
1295 debug_assert_bits!(c, 63);
1296 d = d
1298 .wrapping_add(((a.n[7] * 2) as u64) * (a.n[9] as u64))
1299 .wrapping_add((a.n[8] as u64) * (a.n[8] as u64));
1300 debug_assert_bits!(d, 61);
1301 v6 = d & M;
1303 d >>= 26;
1304 c += v6 * R0;
1305 debug_assert_bits!(v6, 26);
1306 debug_assert_bits!(d, 35);
1307 t6 = (c & M) as u32;
1310 c >>= 26;
1311 c += v6 * R1;
1312 debug_assert_bits!(t6, 26);
1313 debug_assert_bits!(c, 39);
1314 c = c
1318 .wrapping_add(((a.n[0] * 2) as u64) * (a.n[7] as u64))
1319 .wrapping_add(((a.n[1] * 2) as u64) * (a.n[6] as u64))
1320 .wrapping_add(((a.n[2] * 2) as u64) * (a.n[5] as u64))
1321 .wrapping_add(((a.n[3] * 2) as u64) * (a.n[4] as u64));
1322 debug_assert!(c <= 0x8000007C00000007);
1324 d = d.wrapping_add(((a.n[8] * 2) as u64) * (a.n[9] as u64));
1326 debug_assert_bits!(d, 58);
1327 v7 = d & M;
1329 d >>= 26;
1330 c += v7 * R0;
1331 debug_assert_bits!(v7, 26);
1332 debug_assert_bits!(d, 32);
1333 debug_assert!(c <= 0x800001703FFFC2F7);
1335 t7 = (c & M) as u32;
1337 c >>= 26;
1338 c += v7 * R1;
1339 debug_assert_bits!(t7, 26);
1340 debug_assert_bits!(c, 38);
1341 c = c
1345 .wrapping_add(((a.n[0] * 2) as u64) * (a.n[8] as u64))
1346 .wrapping_add(((a.n[1] * 2) as u64) * (a.n[7] as u64))
1347 .wrapping_add(((a.n[2] * 2) as u64) * (a.n[6] as u64))
1348 .wrapping_add(((a.n[3] * 2) as u64) * (a.n[5] as u64))
1349 .wrapping_add((a.n[4] as u64) * (a.n[4] as u64));
1350 debug_assert!(c <= 0x9000007B80000008);
1352 d = d.wrapping_add((a.n[9] as u64) * (a.n[9] as u64));
1354 debug_assert_bits!(d, 57);
1355 v8 = d & M;
1357 d >>= 26;
1358 c += v8 * R0;
1359 debug_assert_bits!(v8, 26);
1360 debug_assert_bits!(d, 31);
1361 debug_assert!(c <= 0x9000016FBFFFC2F8);
1363 self.n[3] = t3;
1366 debug_assert_bits!(self.n[3], 26);
1367 self.n[4] = t4;
1369 debug_assert_bits!(self.n[4], 26);
1370 self.n[5] = t5;
1372 debug_assert_bits!(self.n[5], 26);
1373 self.n[6] = t6;
1375 debug_assert_bits!(self.n[6], 26);
1376 self.n[7] = t7;
1378 debug_assert_bits!(self.n[7], 26);
1379 self.n[8] = (c & M) as u32;
1382 c >>= 26;
1383 c += v8 * R1;
1384 debug_assert_bits!(self.n[8], 26);
1385 debug_assert_bits!(c, 39);
1386 c += d * R0 + t9 as u64;
1389 debug_assert_bits!(c, 45);
1390 self.n[9] = (c & (M >> 4)) as u32;
1392 c >>= 22;
1393 c += d * (R1 << 4);
1394 debug_assert_bits!(self.n[9], 22);
1395 debug_assert_bits!(c, 46);
1396 d = c * (R0 >> 4) + t0 as u64;
1401 debug_assert_bits!(d, 56);
1402 self.n[0] = (d & M) as u32;
1404 d >>= 26;
1405 debug_assert_bits!(self.n[0], 26);
1406 debug_assert_bits!(d, 30);
1407 d += c * (R1 >> 4) + t1 as u64;
1409 debug_assert_bits!(d, 53);
1410 debug_assert!(d <= 0x10000003FFFFBF);
1411 self.n[1] = (d & M) as u32;
1414 d >>= 26;
1415 debug_assert_bits!(self.n[1], 26);
1416 debug_assert_bits!(d, 27);
1417 debug_assert!(d <= 0x4000000);
1418 d += t2 as u64;
1420 debug_assert_bits!(d, 27);
1421 self.n[2] = d as u32;
1423 debug_assert_bits!(self.n[2], 27);
1424 }
1426
1427 pub fn mul_in_place(&mut self, a: &Field, b: &Field) {
1431 debug_assert!(a.magnitude <= 8);
1432 debug_assert!(b.magnitude <= 8);
1433 debug_assert!(a.verify());
1434 debug_assert!(b.verify());
1435 self.mul_inner(a, b);
1436 self.magnitude = 1;
1437 self.normalized = false;
1438 debug_assert!(self.verify());
1439 }
1440
1441 pub fn sqr_in_place(&mut self, a: &Field) {
1445 debug_assert!(a.magnitude <= 8);
1446 debug_assert!(a.verify());
1447 self.sqr_inner(a);
1448 self.magnitude = 1;
1449 self.normalized = false;
1450 debug_assert!(a.verify());
1451 }
1452
1453 pub fn sqr(&self) -> Field {
1454 let mut ret = Field::default();
1455 ret.sqr_in_place(self);
1456 ret
1457 }
1458
1459 pub fn sqrt(&self) -> (Field, bool) {
1466 let mut x2 = self.sqr();
1467 x2 *= self;
1468
1469 let mut x3 = x2.sqr();
1470 x3 *= self;
1471
1472 let mut x6 = x3;
1473 for _ in 0..3 {
1474 x6 = x6.sqr();
1475 }
1476 x6 *= &x3;
1477
1478 let mut x9 = x6;
1479 for _ in 0..3 {
1480 x9 = x9.sqr();
1481 }
1482 x9 *= &x3;
1483
1484 let mut x11 = x9;
1485 for _ in 0..2 {
1486 x11 = x11.sqr();
1487 }
1488 x11 *= &x2;
1489
1490 let mut x22 = x11;
1491 for _ in 0..11 {
1492 x22 = x22.sqr();
1493 }
1494 x22 *= &x11;
1495
1496 let mut x44 = x22;
1497 for _ in 0..22 {
1498 x44 = x44.sqr();
1499 }
1500 x44 *= &x22;
1501
1502 let mut x88 = x44;
1503 for _ in 0..44 {
1504 x88 = x88.sqr();
1505 }
1506 x88 *= &x44;
1507
1508 let mut x176 = x88;
1509 for _ in 0..88 {
1510 x176 = x176.sqr();
1511 }
1512 x176 *= &x88;
1513
1514 let mut x220 = x176;
1515 for _ in 0..44 {
1516 x220 = x220.sqr();
1517 }
1518 x220 *= &x44;
1519
1520 let mut x223 = x220;
1521 for _ in 0..3 {
1522 x223 = x223.sqr();
1523 }
1524 x223 *= &x3;
1525
1526 let mut t1 = x223;
1527 for _ in 0..23 {
1528 t1 = t1.sqr();
1529 }
1530 t1 *= &x22;
1531 for _ in 0..6 {
1532 t1 = t1.sqr();
1533 }
1534 t1 *= &x2;
1535 t1 = t1.sqr();
1536 let r = t1.sqr();
1537
1538 t1 = r.sqr();
1539 (r, &t1 == self)
1540 }
1541
1542 pub fn inv(&self) -> Field {
1546 let mut x2 = self.sqr();
1547 x2 *= self;
1548
1549 let mut x3 = x2.sqr();
1550 x3 *= self;
1551
1552 let mut x6 = x3;
1553 for _ in 0..3 {
1554 x6 = x6.sqr();
1555 }
1556 x6 *= &x3;
1557
1558 let mut x9 = x6;
1559 for _ in 0..3 {
1560 x9 = x9.sqr();
1561 }
1562 x9 *= &x3;
1563
1564 let mut x11 = x9;
1565 for _ in 0..2 {
1566 x11 = x11.sqr();
1567 }
1568 x11 *= &x2;
1569
1570 let mut x22 = x11;
1571 for _ in 0..11 {
1572 x22 = x22.sqr();
1573 }
1574 x22 *= &x11;
1575
1576 let mut x44 = x22;
1577 for _ in 0..22 {
1578 x44 = x44.sqr();
1579 }
1580 x44 *= &x22;
1581
1582 let mut x88 = x44;
1583 for _ in 0..44 {
1584 x88 = x88.sqr();
1585 }
1586 x88 *= &x44;
1587
1588 let mut x176 = x88;
1589 for _ in 0..88 {
1590 x176 = x176.sqr();
1591 }
1592 x176 *= &x88;
1593
1594 let mut x220 = x176;
1595 for _ in 0..44 {
1596 x220 = x220.sqr();
1597 }
1598 x220 *= &x44;
1599
1600 let mut x223 = x220;
1601 for _ in 0..3 {
1602 x223 = x223.sqr();
1603 }
1604 x223 *= &x3;
1605
1606 let mut t1 = x223;
1607 for _ in 0..23 {
1608 t1 = t1.sqr();
1609 }
1610 t1 *= &x22;
1611 for _ in 0..5 {
1612 t1 = t1.sqr();
1613 }
1614 t1 *= self;
1615 for _ in 0..3 {
1616 t1 = t1.sqr();
1617 }
1618 t1 *= &x2;
1619 for _ in 0..2 {
1620 t1 = t1.sqr();
1621 }
1622 self * &t1
1623 }
1624
1625 pub fn inv_var(&self) -> Field {
1628 self.inv()
1629 }
1630
1631 pub fn is_quad_var(&self) -> bool {
1633 let (_, ret) = self.sqrt();
1634 ret
1635 }
1636
1637 pub fn cmov(&mut self, other: &Field, flag: bool) {
1640 self.n[0] = if flag { other.n[0] } else { self.n[0] };
1641 self.n[1] = if flag { other.n[1] } else { self.n[1] };
1642 self.n[2] = if flag { other.n[2] } else { self.n[2] };
1643 self.n[3] = if flag { other.n[3] } else { self.n[3] };
1644 self.n[4] = if flag { other.n[4] } else { self.n[4] };
1645 self.n[5] = if flag { other.n[5] } else { self.n[5] };
1646 self.n[6] = if flag { other.n[6] } else { self.n[6] };
1647 self.n[7] = if flag { other.n[7] } else { self.n[7] };
1648 self.n[8] = if flag { other.n[8] } else { self.n[8] };
1649 self.n[9] = if flag { other.n[9] } else { self.n[9] };
1650 self.magnitude = if flag {
1651 other.magnitude
1652 } else {
1653 self.magnitude
1654 };
1655 self.normalized = if flag {
1656 other.normalized
1657 } else {
1658 self.normalized
1659 };
1660 }
1661}
1662
1663impl Default for Field {
1664 fn default() -> Field {
1665 Self {
1666 n: [0u32; 10],
1667 magnitude: 0,
1668 normalized: true,
1669 }
1670 }
1671}
1672
1673impl Add<Field> for Field {
1674 type Output = Field;
1675 fn add(self, other: Field) -> Field {
1676 let mut ret = self;
1677 ret.add_assign(&other);
1678 ret
1679 }
1680}
1681
1682impl<'a, 'b> Add<&'a Field> for &'b Field {
1683 type Output = Field;
1684 fn add(self, other: &'a Field) -> Field {
1685 let mut ret = *self;
1686 ret.add_assign(other);
1687 ret
1688 }
1689}
1690
1691impl<'a> AddAssign<&'a Field> for Field {
1692 fn add_assign(&mut self, other: &'a Field) {
1693 self.n[0] += other.n[0];
1694 self.n[1] += other.n[1];
1695 self.n[2] += other.n[2];
1696 self.n[3] += other.n[3];
1697 self.n[4] += other.n[4];
1698 self.n[5] += other.n[5];
1699 self.n[6] += other.n[6];
1700 self.n[7] += other.n[7];
1701 self.n[8] += other.n[8];
1702 self.n[9] += other.n[9];
1703
1704 self.magnitude += other.magnitude;
1705 self.normalized = false;
1706 debug_assert!(self.verify());
1707 }
1708}
1709
1710impl AddAssign<Field> for Field {
1711 fn add_assign(&mut self, other: Field) {
1712 self.add_assign(&other)
1713 }
1714}
1715
1716impl Mul<Field> for Field {
1717 type Output = Field;
1718 fn mul(self, other: Field) -> Field {
1719 let mut ret = Field::default();
1720 ret.mul_in_place(&self, &other);
1721 ret
1722 }
1723}
1724
1725impl<'a, 'b> Mul<&'a Field> for &'b Field {
1726 type Output = Field;
1727 fn mul(self, other: &'a Field) -> Field {
1728 let mut ret = Field::default();
1729 ret.mul_in_place(self, other);
1730 ret
1731 }
1732}
1733
1734impl<'a> MulAssign<&'a Field> for Field {
1735 fn mul_assign(&mut self, other: &'a Field) {
1736 let mut ret = Field::default();
1737 ret.mul_in_place(self, other);
1738 *self = ret;
1739 }
1740}
1741
1742impl MulAssign<Field> for Field {
1743 fn mul_assign(&mut self, other: Field) {
1744 self.mul_assign(&other)
1745 }
1746}
1747
1748impl PartialEq for Field {
1749 fn eq(&self, other: &Field) -> bool {
1750 let mut na = self.neg(self.magnitude);
1751 na += other;
1752 na.normalizes_to_zero()
1753 }
1754}
1755
1756impl Eq for Field {}
1757
1758impl Ord for Field {
1759 fn cmp(&self, other: &Field) -> Ordering {
1760 self.cmp_var(other)
1761 }
1762}
1763
1764impl PartialOrd for Field {
1765 fn partial_cmp(&self, other: &Field) -> Option<Ordering> {
1766 Some(self.cmp(other))
1767 }
1768}
1769
1770#[derive(Debug, Clone, Copy, Eq, PartialEq)]
1771pub struct FieldStorage(pub [u32; 8]);
1773
1774impl Default for FieldStorage {
1775 fn default() -> FieldStorage {
1776 FieldStorage([0; 8])
1777 }
1778}
1779
1780impl FieldStorage {
1781 pub const fn new(
1782 d7: u32,
1783 d6: u32,
1784 d5: u32,
1785 d4: u32,
1786 d3: u32,
1787 d2: u32,
1788 d1: u32,
1789 d0: u32,
1790 ) -> Self {
1791 Self([d0, d1, d2, d3, d4, d5, d6, d7])
1792 }
1793
1794 pub fn cmov(&mut self, other: &FieldStorage, flag: bool) {
1795 self.0[0] = if flag { other.0[0] } else { self.0[0] };
1796 self.0[1] = if flag { other.0[1] } else { self.0[1] };
1797 self.0[2] = if flag { other.0[2] } else { self.0[2] };
1798 self.0[3] = if flag { other.0[3] } else { self.0[3] };
1799 self.0[4] = if flag { other.0[4] } else { self.0[4] };
1800 self.0[5] = if flag { other.0[5] } else { self.0[5] };
1801 self.0[6] = if flag { other.0[6] } else { self.0[6] };
1802 self.0[7] = if flag { other.0[7] } else { self.0[7] };
1803 }
1804}
1805
1806impl From<FieldStorage> for Field {
1807 fn from(a: FieldStorage) -> Field {
1808 let mut r = Field::default();
1809
1810 r.n[0] = a.0[0] & 0x3FFFFFF;
1811 r.n[1] = a.0[0] >> 26 | ((a.0[1] << 6) & 0x3FFFFFF);
1812 r.n[2] = a.0[1] >> 20 | ((a.0[2] << 12) & 0x3FFFFFF);
1813 r.n[3] = a.0[2] >> 14 | ((a.0[3] << 18) & 0x3FFFFFF);
1814 r.n[4] = a.0[3] >> 8 | ((a.0[4] << 24) & 0x3FFFFFF);
1815 r.n[5] = (a.0[4] >> 2) & 0x3FFFFFF;
1816 r.n[6] = a.0[4] >> 28 | ((a.0[5] << 4) & 0x3FFFFFF);
1817 r.n[7] = a.0[5] >> 22 | ((a.0[6] << 10) & 0x3FFFFFF);
1818 r.n[8] = a.0[6] >> 16 | ((a.0[7] << 16) & 0x3FFFFFF);
1819 r.n[9] = a.0[7] >> 10;
1820
1821 r.magnitude = 1;
1822 r.normalized = true;
1823
1824 r
1825 }
1826}
1827
1828impl Into<FieldStorage> for Field {
1829 fn into(self) -> FieldStorage {
1830 debug_assert!(self.normalized);
1831 let mut r = FieldStorage::default();
1832
1833 r.0[0] = self.n[0] | self.n[1] << 26;
1834 r.0[1] = self.n[1] >> 6 | self.n[2] << 20;
1835 r.0[2] = self.n[2] >> 12 | self.n[3] << 14;
1836 r.0[3] = self.n[3] >> 18 | self.n[4] << 8;
1837 r.0[4] = self.n[4] >> 24 | self.n[5] << 2 | self.n[6] << 28;
1838 r.0[5] = self.n[6] >> 4 | self.n[7] << 22;
1839 r.0[6] = self.n[7] >> 10 | self.n[8] << 16;
1840 r.0[7] = self.n[8] >> 16 | self.n[9] << 10;
1841
1842 r
1843 }
1844}