1use crate::{
2 AdditiveGroup, BigInt, BigInteger, FftField, Field, LegendreSymbol, One, PrimeField,
3 SqrtPrecomputation, Zero,
4};
5use ark_serialize::{
6 buffer_byte_size, CanonicalDeserialize, CanonicalDeserializeWithFlags, CanonicalSerialize,
7 CanonicalSerializeWithFlags, Compress, EmptyFlags, Flags, SerializationError, Valid, Validate,
8};
9use ark_std::{
10 cmp::*,
11 fmt::{Display, Formatter, Result as FmtResult},
12 marker::PhantomData,
13 ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign},
14 str::FromStr,
15 string::*,
16};
17use core::iter;
18
19#[macro_use]
20mod montgomery_backend;
21pub use montgomery_backend::*;
22
23pub trait FpConfig<const N: usize>: Send + Sync + 'static + Sized {
26 const MODULUS: crate::BigInt<N>;
28
29 const GENERATOR: Fp<Self, N>;
33
34 const ZERO: Fp<Self, N>;
37
38 const ONE: Fp<Self, N>;
41
42 const NEG_ONE: Fp<Self, N>;
44
45 const TWO_ADICITY: u32;
49
50 const TWO_ADIC_ROOT_OF_UNITY: Fp<Self, N>;
52
53 const SMALL_SUBGROUP_BASE: Option<u32> = None;
56
57 const SMALL_SUBGROUP_BASE_ADICITY: Option<u32> = None;
60
61 const LARGE_SUBGROUP_ROOT_OF_UNITY: Option<Fp<Self, N>> = None;
65
66 const SQRT_PRECOMP: Option<SqrtPrecomputation<Fp<Self, N>>>;
70
71 fn add_assign(a: &mut Fp<Self, N>, b: &Fp<Self, N>);
73
74 fn sub_assign(a: &mut Fp<Self, N>, b: &Fp<Self, N>);
76
77 fn double_in_place(a: &mut Fp<Self, N>);
79
80 fn neg_in_place(a: &mut Fp<Self, N>);
82
83 fn mul_assign(a: &mut Fp<Self, N>, b: &Fp<Self, N>);
85
86 fn sum_of_products<const T: usize>(a: &[Fp<Self, N>; T], b: &[Fp<Self, N>; T]) -> Fp<Self, N>;
88
89 fn square_in_place(a: &mut Fp<Self, N>);
91
92 fn inverse(a: &Fp<Self, N>) -> Option<Fp<Self, N>>;
94
95 fn from_bigint(other: BigInt<N>) -> Option<Fp<Self, N>>;
99
100 fn into_bigint(other: Fp<Self, N>) -> BigInt<N>;
103}
104
105#[derive(educe::Educe)]
108#[educe(Default, Hash, Clone, Copy, PartialEq, Eq)]
109pub struct Fp<P: FpConfig<N>, const N: usize>(
110 #[doc(hidden)]
113 pub BigInt<N>,
114 #[doc(hidden)] pub PhantomData<P>,
115);
116
117pub type Fp64<P> = Fp<P, 1>;
118pub type Fp128<P> = Fp<P, 2>;
119pub type Fp192<P> = Fp<P, 3>;
120pub type Fp256<P> = Fp<P, 4>;
121pub type Fp320<P> = Fp<P, 5>;
122pub type Fp384<P> = Fp<P, 6>;
123pub type Fp448<P> = Fp<P, 7>;
124pub type Fp512<P> = Fp<P, 8>;
125pub type Fp576<P> = Fp<P, 9>;
126pub type Fp640<P> = Fp<P, 10>;
127pub type Fp704<P> = Fp<P, 11>;
128pub type Fp768<P> = Fp<P, 12>;
129pub type Fp832<P> = Fp<P, 13>;
130
131impl<P: FpConfig<N>, const N: usize> Fp<P, N> {
132 #[doc(hidden)]
133 #[inline]
134 pub fn is_geq_modulus(&self) -> bool {
135 self.0 >= P::MODULUS
136 }
137
138 #[inline]
139 fn subtract_modulus(&mut self) {
140 if self.is_geq_modulus() {
141 self.0.sub_with_borrow(&Self::MODULUS);
142 }
143 }
144
145 #[inline]
146 fn subtract_modulus_with_carry(&mut self, carry: bool) {
147 if carry || self.is_geq_modulus() {
148 self.0.sub_with_borrow(&Self::MODULUS);
149 }
150 }
151
152 const fn num_bits_to_shave() -> usize {
153 64 * N - (Self::MODULUS_BIT_SIZE as usize)
154 }
155}
156
157impl<P: FpConfig<N>, const N: usize> ark_std::fmt::Debug for Fp<P, N> {
158 fn fmt(&self, f: &mut Formatter<'_>) -> ark_std::fmt::Result {
159 ark_std::fmt::Debug::fmt(&self.into_bigint(), f)
160 }
161}
162
163impl<P: FpConfig<N>, const N: usize> Zero for Fp<P, N> {
164 #[inline]
165 fn zero() -> Self {
166 P::ZERO
167 }
168
169 #[inline]
170 fn is_zero(&self) -> bool {
171 *self == P::ZERO
172 }
173}
174
175impl<P: FpConfig<N>, const N: usize> One for Fp<P, N> {
176 #[inline]
177 fn one() -> Self {
178 P::ONE
179 }
180
181 #[inline]
182 fn is_one(&self) -> bool {
183 *self == P::ONE
184 }
185}
186
187impl<P: FpConfig<N>, const N: usize> AdditiveGroup for Fp<P, N> {
188 type Scalar = Self;
189 const ZERO: Self = P::ZERO;
190
191 #[inline]
192 fn double(&self) -> Self {
193 let mut temp = *self;
194 temp.double_in_place();
195 temp
196 }
197
198 #[inline]
199 fn double_in_place(&mut self) -> &mut Self {
200 P::double_in_place(self);
201 self
202 }
203
204 #[inline]
205 fn neg_in_place(&mut self) -> &mut Self {
206 P::neg_in_place(self);
207 self
208 }
209}
210
211impl<P: FpConfig<N>, const N: usize> Field for Fp<P, N> {
212 type BasePrimeField = Self;
213
214 const SQRT_PRECOMP: Option<SqrtPrecomputation<Self>> = P::SQRT_PRECOMP;
215 const ONE: Self = P::ONE;
216 const NEG_ONE: Self = P::NEG_ONE;
217
218 fn extension_degree() -> u64 {
219 1
220 }
221
222 fn from_base_prime_field(elem: Self::BasePrimeField) -> Self {
223 elem
224 }
225
226 fn to_base_prime_field_elements(&self) -> impl Iterator<Item = Self::BasePrimeField> {
227 iter::once(*self)
228 }
229
230 fn from_base_prime_field_elems(
231 elems: impl IntoIterator<Item = Self::BasePrimeField>,
232 ) -> Option<Self> {
233 let mut iter = elems.into_iter();
234 let first = iter.next()?;
235 if iter.next().is_some() {
236 return None;
237 }
238 Some(first)
239 }
240
241 #[inline]
242 fn characteristic() -> &'static [u64] {
243 P::MODULUS.as_ref()
244 }
245
246 #[inline]
247 fn sum_of_products<const T: usize>(a: &[Self; T], b: &[Self; T]) -> Self {
248 P::sum_of_products(a, b)
249 }
250
251 #[inline]
252 fn from_random_bytes_with_flags<F: Flags>(bytes: &[u8]) -> Option<(Self, F)> {
253 if F::BIT_SIZE > 8 {
254 None
255 } else {
256 let shave_bits = Self::num_bits_to_shave();
257 let mut result_bytes = crate::const_helpers::SerBuffer::<N>::zeroed();
258 result_bytes.copy_from_u8_slice(bytes);
260 let last_limb_mask =
263 (u64::MAX.checked_shr(shave_bits as u32).unwrap_or(0)).to_le_bytes();
264 let mut last_bytes_mask = [0u8; 9];
265 last_bytes_mask[..8].copy_from_slice(&last_limb_mask);
266
267 let output_byte_size = buffer_byte_size(Self::MODULUS_BIT_SIZE as usize + F::BIT_SIZE);
269 let flag_location = output_byte_size - 1;
272
273 let flag_location_in_last_limb = flag_location.saturating_sub(8 * (N - 1));
275
276 let last_bytes = result_bytes.last_n_plus_1_bytes_mut();
278
279 let flags_mask = u8::MAX.checked_shl(8 - (F::BIT_SIZE as u32)).unwrap_or(0);
281
282 let mut flags: u8 = 0;
285 for (i, (b, m)) in last_bytes.zip(&last_bytes_mask).enumerate() {
286 if i == flag_location_in_last_limb {
287 flags = *b & flags_mask
288 }
289 *b &= m;
290 }
291 Self::deserialize_compressed(&result_bytes.as_slice()[..(N * 8)])
292 .ok()
293 .and_then(|f| F::from_u8(flags).map(|flag| (f, flag)))
294 }
295 }
296
297 #[inline]
298 fn square(&self) -> Self {
299 let mut temp = *self;
300 temp.square_in_place();
301 temp
302 }
303
304 fn square_in_place(&mut self) -> &mut Self {
305 P::square_in_place(self);
306 self
307 }
308
309 #[inline]
310 fn inverse(&self) -> Option<Self> {
311 P::inverse(self)
312 }
313
314 fn inverse_in_place(&mut self) -> Option<&mut Self> {
315 self.inverse().map(|inverse| {
316 *self = inverse;
317 self
318 })
319 }
320
321 #[inline]
323 fn frobenius_map_in_place(&mut self, _: usize) {}
324
325 #[inline]
326 fn legendre(&self) -> LegendreSymbol {
327 let s = self.pow(Self::MODULUS_MINUS_ONE_DIV_TWO);
329 if s.is_zero() {
330 LegendreSymbol::Zero
331 } else if s.is_one() {
332 LegendreSymbol::QuadraticResidue
333 } else {
334 LegendreSymbol::QuadraticNonResidue
335 }
336 }
337
338 #[inline]
340 fn mul_by_base_prime_field(&self, elem: &Self::BasePrimeField) -> Self {
341 *self * elem
342 }
343}
344
345impl<P: FpConfig<N>, const N: usize> PrimeField for Fp<P, N> {
346 type BigInt = BigInt<N>;
347 const MODULUS: Self::BigInt = P::MODULUS;
348 const MODULUS_MINUS_ONE_DIV_TWO: Self::BigInt = P::MODULUS.divide_by_2_round_down();
349 const MODULUS_BIT_SIZE: u32 = P::MODULUS.const_num_bits();
350 const TRACE: Self::BigInt = P::MODULUS.two_adic_coefficient();
351 const TRACE_MINUS_ONE_DIV_TWO: Self::BigInt = Self::TRACE.divide_by_2_round_down();
352
353 #[inline]
354 fn from_bigint(r: BigInt<N>) -> Option<Self> {
355 P::from_bigint(r)
356 }
357
358 fn into_bigint(self) -> BigInt<N> {
359 P::into_bigint(self)
360 }
361}
362
363impl<P: FpConfig<N>, const N: usize> FftField for Fp<P, N> {
364 const GENERATOR: Self = P::GENERATOR;
365 const TWO_ADICITY: u32 = P::TWO_ADICITY;
366 const TWO_ADIC_ROOT_OF_UNITY: Self = P::TWO_ADIC_ROOT_OF_UNITY;
367 const SMALL_SUBGROUP_BASE: Option<u32> = P::SMALL_SUBGROUP_BASE;
368 const SMALL_SUBGROUP_BASE_ADICITY: Option<u32> = P::SMALL_SUBGROUP_BASE_ADICITY;
369 const LARGE_SUBGROUP_ROOT_OF_UNITY: Option<Self> = P::LARGE_SUBGROUP_ROOT_OF_UNITY;
370}
371
372impl<P: FpConfig<N>, const N: usize> Ord for Fp<P, N> {
379 #[inline(always)]
380 fn cmp(&self, other: &Self) -> Ordering {
381 self.into_bigint().cmp(&other.into_bigint())
382 }
383}
384
385impl<P: FpConfig<N>, const N: usize> PartialOrd for Fp<P, N> {
392 #[inline(always)]
393 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
394 Some(self.cmp(other))
395 }
396}
397
398impl<P: FpConfig<N>, const N: usize> From<u128> for Fp<P, N> {
399 fn from(mut other: u128) -> Self {
400 let mut result = BigInt::default();
401 if N == 1 {
402 result.0[0] = (other % u128::from(P::MODULUS.0[0])) as u64;
403 } else if N == 2 || P::MODULUS.0[2..].iter().all(Zero::is_zero) {
404 let mod_as_u128 = P::MODULUS.0[0] as u128 + ((P::MODULUS.0[1] as u128) << 64);
405 other %= mod_as_u128;
406 result.0[0] = ((other << 64) >> 64) as u64;
407 result.0[1] = (other >> 64) as u64;
408 } else {
409 result.0[0] = ((other << 64) >> 64) as u64;
410 result.0[1] = (other >> 64) as u64;
411 }
412 result.into()
413 }
414}
415
416impl<P: FpConfig<N>, const N: usize> From<i128> for Fp<P, N> {
417 fn from(other: i128) -> Self {
418 let abs = other.unsigned_abs().into();
419 if other.is_positive() {
420 abs
421 } else {
422 -abs
423 }
424 }
425}
426
427impl<P: FpConfig<N>, const N: usize> From<bool> for Fp<P, N> {
428 fn from(other: bool) -> Self {
429 if N == 1 {
430 BigInt::from(u64::from(other) % P::MODULUS.0[0]).into()
431 } else {
432 BigInt::from(u64::from(other)).into()
433 }
434 }
435}
436
437impl<P: FpConfig<N>, const N: usize> From<u64> for Fp<P, N> {
438 fn from(other: u64) -> Self {
439 if N == 1 {
440 BigInt::from(other % P::MODULUS.0[0]).into()
441 } else {
442 BigInt::from(other).into()
443 }
444 }
445}
446
447impl<P: FpConfig<N>, const N: usize> From<i64> for Fp<P, N> {
448 fn from(other: i64) -> Self {
449 let abs = other.unsigned_abs().into();
450 if other.is_positive() {
451 abs
452 } else {
453 -abs
454 }
455 }
456}
457
458impl<P: FpConfig<N>, const N: usize> From<u32> for Fp<P, N> {
459 fn from(other: u32) -> Self {
460 if N == 1 {
461 BigInt::from(u64::from(other) % P::MODULUS.0[0]).into()
462 } else {
463 BigInt::from(other).into()
464 }
465 }
466}
467
468impl<P: FpConfig<N>, const N: usize> From<i32> for Fp<P, N> {
469 fn from(other: i32) -> Self {
470 let abs = other.unsigned_abs().into();
471 if other.is_positive() {
472 abs
473 } else {
474 -abs
475 }
476 }
477}
478
479impl<P: FpConfig<N>, const N: usize> From<u16> for Fp<P, N> {
480 fn from(other: u16) -> Self {
481 if N == 1 {
482 BigInt::from(u64::from(other) % P::MODULUS.0[0]).into()
483 } else {
484 BigInt::from(other).into()
485 }
486 }
487}
488
489impl<P: FpConfig<N>, const N: usize> From<i16> for Fp<P, N> {
490 fn from(other: i16) -> Self {
491 let abs = other.unsigned_abs().into();
492 if other.is_positive() {
493 abs
494 } else {
495 -abs
496 }
497 }
498}
499
500impl<P: FpConfig<N>, const N: usize> From<u8> for Fp<P, N> {
501 fn from(other: u8) -> Self {
502 if N == 1 {
503 BigInt::from(u64::from(other) % P::MODULUS.0[0]).into()
504 } else {
505 BigInt::from(other).into()
506 }
507 }
508}
509
510impl<P: FpConfig<N>, const N: usize> From<i8> for Fp<P, N> {
511 fn from(other: i8) -> Self {
512 let abs = other.unsigned_abs().into();
513 if other.is_positive() {
514 abs
515 } else {
516 -abs
517 }
518 }
519}
520
521impl<P: FpConfig<N>, const N: usize> ark_std::rand::distributions::Distribution<Fp<P, N>>
522 for ark_std::rand::distributions::Standard
523{
524 #[inline]
525 fn sample<R: ark_std::rand::Rng + ?Sized>(&self, rng: &mut R) -> Fp<P, N> {
526 loop {
527 let mut tmp = Fp(
528 rng.sample(ark_std::rand::distributions::Standard),
529 PhantomData,
530 );
531 let shave_bits = Fp::<P, N>::num_bits_to_shave();
532 assert!(shave_bits <= 64);
534 let mask = if shave_bits == 64 {
535 0
536 } else {
537 u64::MAX >> shave_bits
538 };
539
540 if let Some(val) = tmp.0 .0.last_mut() {
541 *val &= mask
542 }
543
544 if !tmp.is_geq_modulus() {
545 return tmp;
546 }
547 }
548 }
549}
550
551impl<P: FpConfig<N>, const N: usize> CanonicalSerializeWithFlags for Fp<P, N> {
552 fn serialize_with_flags<W: ark_std::io::Write, F: Flags>(
553 &self,
554 writer: W,
555 flags: F,
556 ) -> Result<(), SerializationError> {
557 if F::BIT_SIZE > 8 {
560 return Err(SerializationError::NotEnoughSpace);
561 }
562
563 let output_byte_size = buffer_byte_size(Self::MODULUS_BIT_SIZE as usize + F::BIT_SIZE);
567
568 let mut bytes = crate::const_helpers::SerBuffer::zeroed();
572 bytes.copy_from_u64_slice(&self.into_bigint().0);
573 bytes[output_byte_size - 1] |= flags.u8_bitmask();
575
576 bytes.write_up_to(writer, output_byte_size)?;
577 Ok(())
578 }
579
580 fn serialized_size_with_flags<F: Flags>(&self) -> usize {
585 buffer_byte_size(Self::MODULUS_BIT_SIZE as usize + F::BIT_SIZE)
586 }
587}
588
589impl<P: FpConfig<N>, const N: usize> CanonicalSerialize for Fp<P, N> {
590 #[inline]
591 fn serialize_with_mode<W: ark_std::io::Write>(
592 &self,
593 writer: W,
594 _compress: Compress,
595 ) -> Result<(), SerializationError> {
596 self.serialize_with_flags(writer, EmptyFlags)
597 }
598
599 #[inline]
600 fn serialized_size(&self, _compress: Compress) -> usize {
601 self.serialized_size_with_flags::<EmptyFlags>()
602 }
603}
604
605impl<P: FpConfig<N>, const N: usize> CanonicalDeserializeWithFlags for Fp<P, N> {
606 fn deserialize_with_flags<R: ark_std::io::Read, F: Flags>(
607 reader: R,
608 ) -> Result<(Self, F), SerializationError> {
609 if F::BIT_SIZE > 8 {
612 return Err(SerializationError::NotEnoughSpace);
613 }
614 let output_byte_size = Self::zero().serialized_size_with_flags::<F>();
617
618 let mut masked_bytes = crate::const_helpers::SerBuffer::zeroed();
619 masked_bytes.read_exact_up_to(reader, output_byte_size)?;
620 let flags = F::from_u8_remove_flags(&mut masked_bytes[output_byte_size - 1])
621 .ok_or(SerializationError::UnexpectedFlags)?;
622
623 let self_integer = masked_bytes.to_bigint();
624 Self::from_bigint(self_integer)
625 .map(|v| (v, flags))
626 .ok_or(SerializationError::InvalidData)
627 }
628}
629
630impl<P: FpConfig<N>, const N: usize> Valid for Fp<P, N> {
631 const TRIVIAL_CHECK: bool = true;
632 #[inline]
633 fn check(&self) -> Result<(), SerializationError> {
634 Ok(())
635 }
636
637 #[inline]
638 fn batch_check<'a>(_: impl Iterator<Item = &'a Self> + Send) -> Result<(), SerializationError>
639 where
640 Self: 'a,
641 {
642 Ok(())
643 }
644}
645
646impl<P: FpConfig<N>, const N: usize> CanonicalDeserialize for Fp<P, N> {
647 fn deserialize_with_mode<R: ark_std::io::Read>(
648 reader: R,
649 _compress: Compress,
650 _validate: Validate,
651 ) -> Result<Self, SerializationError> {
652 Self::deserialize_with_flags::<R, EmptyFlags>(reader).map(|(r, _)| r)
653 }
654}
655
656impl<P: FpConfig<N>, const N: usize> FromStr for Fp<P, N> {
657 type Err = ();
658
659 fn from_str(s: &str) -> Result<Self, Self::Err> {
662 use num_bigint::{BigInt, BigUint};
663 use num_traits::Signed;
664
665 let modulus = BigInt::from(P::MODULUS);
666 let mut a = BigInt::from_str(s).map_err(|_| ())? % &modulus;
667 if a.is_negative() {
668 a += modulus
669 }
670 BigUint::try_from(a)
671 .map_err(|_| ())
672 .and_then(TryFrom::try_from)
673 .ok()
674 .and_then(Self::from_bigint)
675 .ok_or(())
676 }
677}
678
679impl<P: FpConfig<N>, const N: usize> Display for Fp<P, N> {
682 #[inline]
683 fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
684 let string = self.into_bigint().to_string();
685 write!(f, "{string}")
686 }
687}
688
689impl<P: FpConfig<N>, const N: usize> Neg for Fp<P, N> {
690 type Output = Self;
691 #[inline]
692 fn neg(mut self) -> Self {
693 P::neg_in_place(&mut self);
694 self
695 }
696}
697
698impl<P: FpConfig<N>, const N: usize> Add<&Fp<P, N>> for Fp<P, N> {
699 type Output = Self;
700
701 #[inline]
702 fn add(mut self, other: &Self) -> Self {
703 self += other;
704 self
705 }
706}
707
708impl<P: FpConfig<N>, const N: usize> Sub<&Fp<P, N>> for Fp<P, N> {
709 type Output = Self;
710
711 #[inline]
712 fn sub(mut self, other: &Self) -> Self {
713 self -= other;
714 self
715 }
716}
717
718impl<P: FpConfig<N>, const N: usize> Mul<&Fp<P, N>> for Fp<P, N> {
719 type Output = Self;
720
721 #[inline]
722 fn mul(mut self, other: &Self) -> Self {
723 self *= other;
724 self
725 }
726}
727
728impl<P: FpConfig<N>, const N: usize> Div<&Fp<P, N>> for Fp<P, N> {
729 type Output = Self;
730
731 #[inline]
734 #[allow(clippy::suspicious_arithmetic_impl)]
735 fn div(mut self, other: &Self) -> Self {
736 self *= &other.inverse().unwrap();
737 self
738 }
739}
740
741impl<'b, P: FpConfig<N>, const N: usize> Add<&'b Fp<P, N>> for &Fp<P, N> {
742 type Output = Fp<P, N>;
743
744 #[inline]
745 fn add(self, other: &'b Fp<P, N>) -> Fp<P, N> {
746 let mut result = *self;
747 result += other;
748 result
749 }
750}
751
752impl<P: FpConfig<N>, const N: usize> Sub<&Fp<P, N>> for &Fp<P, N> {
753 type Output = Fp<P, N>;
754
755 #[inline]
756 fn sub(self, other: &Fp<P, N>) -> Fp<P, N> {
757 let mut result = *self;
758 result -= other;
759 result
760 }
761}
762
763impl<P: FpConfig<N>, const N: usize> Mul<&Fp<P, N>> for &Fp<P, N> {
764 type Output = Fp<P, N>;
765
766 #[inline]
767 fn mul(self, other: &Fp<P, N>) -> Fp<P, N> {
768 let mut result = *self;
769 result *= other;
770 result
771 }
772}
773
774impl<P: FpConfig<N>, const N: usize> Div<&Fp<P, N>> for &Fp<P, N> {
775 type Output = Fp<P, N>;
776
777 #[inline]
778 fn div(self, other: &Fp<P, N>) -> Fp<P, N> {
779 let mut result = *self;
780 result.div_assign(other);
781 result
782 }
783}
784
785impl<P: FpConfig<N>, const N: usize> AddAssign<&Self> for Fp<P, N> {
786 #[inline]
787 fn add_assign(&mut self, other: &Self) {
788 P::add_assign(self, other)
789 }
790}
791
792impl<P: FpConfig<N>, const N: usize> SubAssign<&Self> for Fp<P, N> {
793 #[inline]
794 fn sub_assign(&mut self, other: &Self) {
795 P::sub_assign(self, other);
796 }
797}
798
799impl<P: FpConfig<N>, const N: usize> core::ops::Add<Self> for Fp<P, N> {
800 type Output = Self;
801
802 #[inline]
803 fn add(mut self, other: Self) -> Self {
804 self += &other;
805 self
806 }
807}
808
809impl<'a, P: FpConfig<N>, const N: usize> core::ops::Add<&'a mut Self> for Fp<P, N> {
810 type Output = Self;
811
812 #[inline]
813 fn add(mut self, other: &'a mut Self) -> Self {
814 self += &*other;
815 self
816 }
817}
818
819impl<P: FpConfig<N>, const N: usize> core::ops::Sub<Self> for Fp<P, N> {
820 type Output = Self;
821
822 #[inline]
823 fn sub(mut self, other: Self) -> Self {
824 self -= &other;
825 self
826 }
827}
828
829impl<'a, P: FpConfig<N>, const N: usize> core::ops::Sub<&'a mut Self> for Fp<P, N> {
830 type Output = Self;
831
832 #[inline]
833 fn sub(mut self, other: &'a mut Self) -> Self {
834 self -= &*other;
835 self
836 }
837}
838
839impl<P: FpConfig<N>, const N: usize> core::iter::Sum<Self> for Fp<P, N> {
840 fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {
841 iter.fold(Self::zero(), core::ops::Add::add)
842 }
843}
844
845impl<'a, P: FpConfig<N>, const N: usize> core::iter::Sum<&'a Self> for Fp<P, N> {
846 fn sum<I: Iterator<Item = &'a Self>>(iter: I) -> Self {
847 iter.fold(Self::zero(), core::ops::Add::add)
848 }
849}
850
851impl<P: FpConfig<N>, const N: usize> core::ops::AddAssign<Self> for Fp<P, N> {
852 #[inline(always)]
853 fn add_assign(&mut self, other: Self) {
854 *self += &other
855 }
856}
857
858impl<P: FpConfig<N>, const N: usize> core::ops::SubAssign<Self> for Fp<P, N> {
859 #[inline(always)]
860 fn sub_assign(&mut self, other: Self) {
861 *self -= &other
862 }
863}
864
865impl<'a, P: FpConfig<N>, const N: usize> core::ops::AddAssign<&'a mut Self> for Fp<P, N> {
866 #[inline(always)]
867 fn add_assign(&mut self, other: &'a mut Self) {
868 *self += &*other
869 }
870}
871
872impl<'a, P: FpConfig<N>, const N: usize> core::ops::SubAssign<&'a mut Self> for Fp<P, N> {
873 #[inline(always)]
874 fn sub_assign(&mut self, other: &'a mut Self) {
875 *self -= &*other
876 }
877}
878
879impl<P: FpConfig<N>, const N: usize> MulAssign<&Self> for Fp<P, N> {
880 fn mul_assign(&mut self, other: &Self) {
881 P::mul_assign(self, other)
882 }
883}
884
885impl<P: FpConfig<N>, const N: usize> DivAssign<&Self> for Fp<P, N> {
888 #[inline(always)]
889 fn div_assign(&mut self, other: &Self) {
890 *self *= &other.inverse().unwrap();
891 }
892}
893
894impl<P: FpConfig<N>, const N: usize> core::ops::Mul<Self> for Fp<P, N> {
895 type Output = Self;
896
897 #[inline(always)]
898 fn mul(mut self, other: Self) -> Self {
899 self *= &other;
900 self
901 }
902}
903
904impl<P: FpConfig<N>, const N: usize> core::ops::Div<Self> for Fp<P, N> {
905 type Output = Self;
906
907 #[inline(always)]
908 fn div(mut self, other: Self) -> Self {
909 self.div_assign(&other);
910 self
911 }
912}
913
914impl<'a, P: FpConfig<N>, const N: usize> core::ops::Mul<&'a mut Self> for Fp<P, N> {
915 type Output = Self;
916
917 #[inline(always)]
918 fn mul(mut self, other: &'a mut Self) -> Self {
919 self *= &*other;
920 self
921 }
922}
923
924impl<'a, P: FpConfig<N>, const N: usize> core::ops::Div<&'a mut Self> for Fp<P, N> {
925 type Output = Self;
926
927 #[inline(always)]
928 fn div(mut self, other: &'a mut Self) -> Self {
929 self.div_assign(&*other);
930 self
931 }
932}
933
934impl<P: FpConfig<N>, const N: usize> core::iter::Product<Self> for Fp<P, N> {
935 fn product<I: Iterator<Item = Self>>(iter: I) -> Self {
936 iter.fold(Self::one(), core::ops::Mul::mul)
937 }
938}
939
940impl<'a, P: FpConfig<N>, const N: usize> core::iter::Product<&'a Self> for Fp<P, N> {
941 fn product<I: Iterator<Item = &'a Self>>(iter: I) -> Self {
942 iter.fold(Self::one(), Mul::mul)
943 }
944}
945
946impl<P: FpConfig<N>, const N: usize> core::ops::MulAssign<Self> for Fp<P, N> {
947 #[inline(always)]
948 fn mul_assign(&mut self, other: Self) {
949 *self *= &other
950 }
951}
952
953impl<'a, P: FpConfig<N>, const N: usize> core::ops::DivAssign<&'a mut Self> for Fp<P, N> {
954 #[inline(always)]
955 fn div_assign(&mut self, other: &'a mut Self) {
956 self.div_assign(&*other)
957 }
958}
959
960impl<'a, P: FpConfig<N>, const N: usize> core::ops::MulAssign<&'a mut Self> for Fp<P, N> {
961 #[inline(always)]
962 fn mul_assign(&mut self, other: &'a mut Self) {
963 *self *= &*other
964 }
965}
966
967impl<P: FpConfig<N>, const N: usize> core::ops::DivAssign<Self> for Fp<P, N> {
968 #[inline(always)]
969 fn div_assign(&mut self, other: Self) {
970 self.div_assign(&other)
971 }
972}
973
974impl<P: FpConfig<N>, const N: usize> zeroize::Zeroize for Fp<P, N> {
975 fn zeroize(&mut self) {
978 self.0.zeroize();
979 }
980}
981
982impl<P: FpConfig<N>, const N: usize> From<num_bigint::BigUint> for Fp<P, N> {
983 #[inline]
984 fn from(val: num_bigint::BigUint) -> Fp<P, N> {
985 Fp::from_le_bytes_mod_order(&val.to_bytes_le())
986 }
987}
988
989impl<P: FpConfig<N>, const N: usize> From<Fp<P, N>> for num_bigint::BigUint {
990 #[inline(always)]
991 fn from(other: Fp<P, N>) -> Self {
992 other.into_bigint().into()
993 }
994}
995
996impl<P: FpConfig<N>, const N: usize> From<Fp<P, N>> for BigInt<N> {
997 #[inline(always)]
998 fn from(fp: Fp<P, N>) -> Self {
999 fp.into_bigint()
1000 }
1001}
1002
1003impl<P: FpConfig<N>, const N: usize> From<BigInt<N>> for Fp<P, N> {
1004 #[inline(always)]
1006 fn from(int: BigInt<N>) -> Self {
1007 Self::from_bigint(int).unwrap()
1008 }
1009}