1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
use ark_ff::{fields::Fp64, BigInt, PrimeField, SqrtPrecomputation, Zero};
use std::marker::PhantomData;
#[derive(ark_ff::MontConfig)]
#[modulus = "4611624995532046337"]
#[generator = "3"]
pub struct Fp7Params;
pub type Generic = ark_ff::Fp128<ark_ff::MontBackend<FpParams, 2>>;
const MODULUS: u64 = 18_446_744_069_414_584_321;
const R2: u64 = 18_446_744_065_119_617_025;
pub struct FpParams;
impl ark_ff::FpConfig<1> for FpParams {
const MODULUS: ark_ff::BigInt<1> = BigInt([MODULUS]);
const GENERATOR: Fp64<Self> = into_mont(7);
const ZERO: Fp64<Self> = into_mont(0);
const ONE: Fp64<Self> = into_mont(1);
const TWO_ADICITY: u32 = 32;
const TWO_ADIC_ROOT_OF_UNITY: Fp64<Self> = into_mont(1_753_635_133_440_165_772);
const SQRT_PRECOMP: Option<ark_ff::SqrtPrecomputation<Fp64<Self>>> =
Some(SqrtPrecomputation::TonelliShanks {
two_adicity: Self::TWO_ADICITY,
quadratic_nonresidue_to_trace: Self::TWO_ADIC_ROOT_OF_UNITY,
trace_of_modulus_minus_one_div_two: &<Fp64<Self>>::TRACE_MINUS_ONE_DIV_TWO.0,
});
fn add_assign(a: &mut Fp64<Self>, b: &Fp64<Self>) {
let (x1, c1) = (a.0).0[0].overflowing_sub(MODULUS - (b.0).0[0]);
let adj = 0u32.wrapping_sub(u32::from(c1));
(a.0).0[0] = x1.wrapping_sub(u64::from(adj));
}
fn sub_assign(a: &mut Fp64<Self>, b: &Fp64<Self>) {
let (x1, c1) = (a.0).0[0].overflowing_sub((b.0).0[0]);
let adj = 0u32.wrapping_sub(u32::from(c1));
(a.0).0[0] = x1.wrapping_sub(u64::from(adj));
}
fn double_in_place(a: &mut Fp64<Self>) {
Self::add_assign(a, &a.clone());
}
fn mul_assign(a: &mut Fp64<Self>, b: &Fp64<Self>) {
(a.0).0[0] = mont_red(u128::from((a.0).0[0]) * u128::from((b.0).0[0]));
}
fn sum_of_products<const T: usize>(a: &[Fp64<Self>; T], b: &[Fp64<Self>; T]) -> Fp64<Self> {
a.iter().zip(b).map(|(&a, b)| a * b).sum()
}
fn square_in_place(a: &mut Fp64<Self>) {
let temp = *a;
Self::mul_assign(a, &temp);
}
fn inverse(a: &Fp64<Self>) -> Option<Fp64<Self>> {
if a.is_zero() {
None
} else {
let a = (a.0).0[0];
let t2 = exp_acc::<1>(a, a);
let t3 = exp_acc::<1>(t2, a);
let t6 = exp_acc::<3>(t3, t3);
let t12 = exp_acc::<6>(t6, t6);
let t24 = exp_acc::<12>(t12, t12);
let t30 = exp_acc::<6>(t24, t6);
let t31 = exp_acc::<1>(t30, a);
let t63 = exp_acc::<32>(t31, t31);
let inv = exp_acc::<1>(t63, a);
Some(ark_ff::Fp(BigInt([inv]), PhantomData))
}
}
fn from_bigint(other: ark_ff::BigInt<1>) -> Option<Fp64<Self>> {
let inner = other.0[0];
if inner.is_zero() {
Some(Self::ZERO)
} else if inner < MODULUS {
Some(into_mont(other.0[0]))
} else {
None
}
}
fn into_bigint(other: Fp64<Self>) -> ark_ff::BigInt<1> {
BigInt([mont_red(u128::from((other.0).0[0]))])
}
fn neg_in_place(a: &mut Fp64<Self>) {
let mut tmp = Self::ZERO;
Self::sub_assign(&mut tmp, a);
a.0 = tmp.0;
}
}
pub type Fp = Fp64<FpParams>;
#[inline]
const fn into_mont(value: u64) -> Fp {
ark_ff::Fp(BigInt([mont_red(value as u128 * R2 as u128)]), PhantomData)
}
#[inline]
const fn mont_red(x: u128) -> u64 {
#[allow(clippy::cast_possible_truncation)]
let xl = x as u64;
let xh = (x >> 64) as u64;
let (a, overflow) = xl.overflowing_add(xl << 32);
let b = a.wrapping_sub(a >> 32).wrapping_sub(overflow as u64);
let (r, underflow) = xh.overflowing_sub(b);
r.wrapping_sub(0u32.wrapping_sub(underflow as u32) as u64)
}
#[inline]
const fn exp_acc<const N: usize>(base: u64, tail: u64) -> u64 {
let mut result = base;
let mut i = 0;
while i < N {
result = mont_red(result as u128 * result as u128);
i += 1;
}
mont_red(result as u128 * tail as u128)
}
#[cfg(test)]
mod tests {
use super::Fp as TestField;
use ark_algebra_test_templates::test_field;
test_field!(generated; TestField; prime);
}