Struct secret_integers::U64
source · pub struct U64(pub u64);
Tuple Fields§
§0: u64
Implementations§
source§impl U64
impl U64
sourcepub fn classify<T: Into<u64>>(x: T) -> Self
pub fn classify<T: Into<u64>>(x: T) -> Self
Examples found in repository?
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
fn mul(self, _rhs: &'b FieldElement64) -> FieldElement64 {
/// Helper function to multiply two 64-bit integers with 128
/// bits of output.
#[inline(always)]
fn m(x: U64, y: U64) -> U128 {
U128::from(x) * y.into()
}
// Alias self, _rhs for more readable formulas
let a: &[Limb; 5] = &self.0;
let b: &[Limb; 5] = &_rhs.0;
// Precondition: assume input limbs a[i], b[i] are bounded as
//
// a[i], b[i] < 2^(51 + b)
//
// where b is a real parameter measuring the "bit excess" of the limbs.
// 64-bit precomputations to avoid 128-bit multiplications.
//
// This fits into a u64 whenever 51 + b + lg(19) < 64.
//
// Since 51 + b + lg(19) < 51 + 4.25 + b
// = 55.25 + b,
// this fits if b < 8.75.
let nineteen = 19u64.into();
let b1_19 = b[1] * nineteen;
let b2_19 = b[2] * nineteen;
let b3_19 = b[3] * nineteen;
let b4_19 = b[4] * nineteen;
// Multiply to get 128-bit coefficients of output
let c0: U128 =
m(a[0], b[0]) + m(a[4], b1_19) + m(a[3], b2_19) + m(a[2], b3_19) + m(a[1], b4_19);
let mut c1: U128 =
m(a[1], b[0]) + m(a[0], b[1]) + m(a[4], b2_19) + m(a[3], b3_19) + m(a[2], b4_19);
let mut c2: U128 =
m(a[2], b[0]) + m(a[1], b[1]) + m(a[0], b[2]) + m(a[4], b3_19) + m(a[3], b4_19);
let mut c3: U128 =
m(a[3], b[0]) + m(a[2], b[1]) + m(a[1], b[2]) + m(a[0], b[3]) + m(a[4], b4_19);
let mut c4: U128 =
m(a[4], b[0]) + m(a[3], b[1]) + m(a[2], b[2]) + m(a[1], b[3]) + m(a[0], b[4]);
// How big are the c[i]? We have
//
// c[i] < 2^(102 + 2*b) * (1+i + (4-i)*19)
// < 2^(102 + lg(1 + 4*19) + 2*b)
// < 2^(108.27 + 2*b)
//
// The carry (c[i] >> 51) fits into a u64 when
// 108.27 + 2*b - 51 < 64
// 2*b < 6.73
// b < 3.365.
//
// So we require b < 3 to ensure this fits.
debug_assert!(U64::declassify(a[0]) < (1 << 54));
debug_assert!(U64::declassify(b[0]) < (1 << 54));
debug_assert!(U64::declassify(a[1]) < (1 << 54));
debug_assert!(U64::declassify(b[1]) < (1 << 54));
debug_assert!(U64::declassify(a[2]) < (1 << 54));
debug_assert!(U64::declassify(b[2]) < (1 << 54));
debug_assert!(U64::declassify(a[3]) < (1 << 54));
debug_assert!(U64::declassify(b[3]) < (1 << 54));
debug_assert!(U64::declassify(a[4]) < (1 << 54));
debug_assert!(U64::declassify(b[4]) < (1 << 54));
// Casting to u64 and back tells the compiler that the carry is
// bounded by 2^64, so that the addition is a u128 + u64 rather
// than u128 + u128.
const LOW_51_BIT_MASK: u64 = (1u64 << 51) - 1;
let mut out = [U64::classify(0u64); 5];
c1 += U64::from(c0 >> 51).into();
out[0] = U64::from(c0) & LOW_51_BIT_MASK.into();
c2 += U64::from(c1 >> 51).into();
out[1] = U64::from(c1) & LOW_51_BIT_MASK.into();
c3 += U64::from(c2 >> 51).into();
out[2] = U64::from(c2) & LOW_51_BIT_MASK.into();
c4 += U64::from(c3 >> 51).into();
out[3] = U64::from(c3) & LOW_51_BIT_MASK.into();
let carry: U64 = (c4 >> 51).into();
out[4] = U64::from(c4) & LOW_51_BIT_MASK.into();
// To see that this does not overflow, we need out[0] + carry * 19 < 2^64.
//
// c4 < a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 + (carry from c3)
// < 5*(2^(51 + b) * 2^(51 + b)) + (carry from c3)
// < 2^(102 + 2*b + lg(5)) + 2^64.
//
// When b < 3 we get
//
// c4 < 2^110.33 so that carry < 2^59.33
//
// so that
//
// out[0] + carry * 19 < 2^51 + 19 * 2^59.33 < 2^63.58
//
// and there is no overflow.
out[0] = out[0] + carry * nineteen;
// Now out[1] < 2^51 + 2^(64 -51) = 2^51 + 2^13 < 2^(51 + epsilon).
out[1] += out[0] >> 51;
out[0] &= LOW_51_BIT_MASK.into();
// Now out[i] < 2^(51 + epsilon) for all i.
FieldElement64(out)
}
sourcepub fn declassify(self) -> u64
pub fn declassify(self) -> u64
Warning: use with caution, breaks the constant-time guarantee.
Examples found in repository?
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
fn mul(self, _rhs: &'b FieldElement64) -> FieldElement64 {
/// Helper function to multiply two 64-bit integers with 128
/// bits of output.
#[inline(always)]
fn m(x: U64, y: U64) -> U128 {
U128::from(x) * y.into()
}
// Alias self, _rhs for more readable formulas
let a: &[Limb; 5] = &self.0;
let b: &[Limb; 5] = &_rhs.0;
// Precondition: assume input limbs a[i], b[i] are bounded as
//
// a[i], b[i] < 2^(51 + b)
//
// where b is a real parameter measuring the "bit excess" of the limbs.
// 64-bit precomputations to avoid 128-bit multiplications.
//
// This fits into a u64 whenever 51 + b + lg(19) < 64.
//
// Since 51 + b + lg(19) < 51 + 4.25 + b
// = 55.25 + b,
// this fits if b < 8.75.
let nineteen = 19u64.into();
let b1_19 = b[1] * nineteen;
let b2_19 = b[2] * nineteen;
let b3_19 = b[3] * nineteen;
let b4_19 = b[4] * nineteen;
// Multiply to get 128-bit coefficients of output
let c0: U128 =
m(a[0], b[0]) + m(a[4], b1_19) + m(a[3], b2_19) + m(a[2], b3_19) + m(a[1], b4_19);
let mut c1: U128 =
m(a[1], b[0]) + m(a[0], b[1]) + m(a[4], b2_19) + m(a[3], b3_19) + m(a[2], b4_19);
let mut c2: U128 =
m(a[2], b[0]) + m(a[1], b[1]) + m(a[0], b[2]) + m(a[4], b3_19) + m(a[3], b4_19);
let mut c3: U128 =
m(a[3], b[0]) + m(a[2], b[1]) + m(a[1], b[2]) + m(a[0], b[3]) + m(a[4], b4_19);
let mut c4: U128 =
m(a[4], b[0]) + m(a[3], b[1]) + m(a[2], b[2]) + m(a[1], b[3]) + m(a[0], b[4]);
// How big are the c[i]? We have
//
// c[i] < 2^(102 + 2*b) * (1+i + (4-i)*19)
// < 2^(102 + lg(1 + 4*19) + 2*b)
// < 2^(108.27 + 2*b)
//
// The carry (c[i] >> 51) fits into a u64 when
// 108.27 + 2*b - 51 < 64
// 2*b < 6.73
// b < 3.365.
//
// So we require b < 3 to ensure this fits.
debug_assert!(U64::declassify(a[0]) < (1 << 54));
debug_assert!(U64::declassify(b[0]) < (1 << 54));
debug_assert!(U64::declassify(a[1]) < (1 << 54));
debug_assert!(U64::declassify(b[1]) < (1 << 54));
debug_assert!(U64::declassify(a[2]) < (1 << 54));
debug_assert!(U64::declassify(b[2]) < (1 << 54));
debug_assert!(U64::declassify(a[3]) < (1 << 54));
debug_assert!(U64::declassify(b[3]) < (1 << 54));
debug_assert!(U64::declassify(a[4]) < (1 << 54));
debug_assert!(U64::declassify(b[4]) < (1 << 54));
// Casting to u64 and back tells the compiler that the carry is
// bounded by 2^64, so that the addition is a u128 + u64 rather
// than u128 + u128.
const LOW_51_BIT_MASK: u64 = (1u64 << 51) - 1;
let mut out = [U64::classify(0u64); 5];
c1 += U64::from(c0 >> 51).into();
out[0] = U64::from(c0) & LOW_51_BIT_MASK.into();
c2 += U64::from(c1 >> 51).into();
out[1] = U64::from(c1) & LOW_51_BIT_MASK.into();
c3 += U64::from(c2 >> 51).into();
out[2] = U64::from(c2) & LOW_51_BIT_MASK.into();
c4 += U64::from(c3 >> 51).into();
out[3] = U64::from(c3) & LOW_51_BIT_MASK.into();
let carry: U64 = (c4 >> 51).into();
out[4] = U64::from(c4) & LOW_51_BIT_MASK.into();
// To see that this does not overflow, we need out[0] + carry * 19 < 2^64.
//
// c4 < a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 + (carry from c3)
// < 5*(2^(51 + b) * 2^(51 + b)) + (carry from c3)
// < 2^(102 + 2*b + lg(5)) + 2^64.
//
// When b < 3 we get
//
// c4 < 2^110.33 so that carry < 2^59.33
//
// so that
//
// out[0] + carry * 19 < 2^51 + 19 * 2^59.33 < 2^63.58
//
// and there is no overflow.
out[0] = out[0] + carry * nineteen;
// Now out[1] < 2^51 + 2^(64 -51) = 2^51 + 2^13 < 2^(51 + epsilon).
out[1] += out[0] >> 51;
out[0] &= LOW_51_BIT_MASK.into();
// Now out[i] < 2^(51 + epsilon) for all i.
FieldElement64(out)
}
pub fn zero() -> Self
pub fn one() -> Self
pub fn ones() -> Self
pub fn from_le_bytes(bytes: &[U8]) -> Vec<U64>
pub fn to_le_bytes(ints: &[U64]) -> Vec<U8>
pub fn from_be_bytes(bytes: &[U8]) -> Vec<U64>
pub fn to_be_bytes(ints: &[U64]) -> Vec<U8>
pub fn max_value() -> U64
source§impl U64
impl U64
sourcepub fn checked_add(self, rhs: Self) -> Self
pub fn checked_add(self, rhs: Self) -> Self
Warning: panics when overflow.
source§impl U64
impl U64
sourcepub fn checked_sub(self, rhs: Self) -> Self
pub fn checked_sub(self, rhs: Self) -> Self
Warning: panics when overflow.
source§impl U64
impl U64
sourcepub fn checked_mul(self, rhs: Self) -> Self
pub fn checked_mul(self, rhs: Self) -> Self
Warning: panics when overflow.
source§impl U64
impl U64
pub fn rotate_left(self, rotval: usize) -> Self
pub fn rotate_right(self, rotval: usize) -> Self
source§impl U64
impl U64
sourcepub fn comp_eq(self, rhs: Self) -> Self
pub fn comp_eq(self, rhs: Self) -> Self
Produces a new integer which is all ones if the two arguments are equal and all zeroes otherwise. With inspiration from Wireguard.
sourcepub fn comp_ne(self, rhs: Self) -> Self
pub fn comp_ne(self, rhs: Self) -> Self
Produces a new integer which is all ones if the first argument is different from the second argument, and all zeroes otherwise.
sourcepub fn comp_gte(self, rhs: Self) -> Self
pub fn comp_gte(self, rhs: Self) -> Self
Produces a new integer which is all ones if the first argument is greater than or equal to the second argument, and all zeroes otherwise. With inspiration from WireGuard.
sourcepub fn comp_gt(self, rhs: Self) -> Self
pub fn comp_gt(self, rhs: Self) -> Self
Produces a new integer which is all ones if the first argument is strictly greater than the second argument, and all zeroes otherwise.
Trait Implementations§
source§impl AddAssign<U64> for U64
impl AddAssign<U64> for U64
Warning: has wrapping semantics.
source§fn add_assign(&mut self, rhs: Self)
fn add_assign(&mut self, rhs: Self)
+=
operation. Read moresource§impl BitAndAssign<U64> for U64
impl BitAndAssign<U64> for U64
source§fn bitand_assign(&mut self, rhs: Self)
fn bitand_assign(&mut self, rhs: Self)
&=
operation. Read moresource§impl BitOrAssign<U64> for U64
impl BitOrAssign<U64> for U64
source§fn bitor_assign(&mut self, rhs: Self)
fn bitor_assign(&mut self, rhs: Self)
|=
operation. Read moresource§impl BitXorAssign<U64> for U64
impl BitXorAssign<U64> for U64
source§fn bitxor_assign(&mut self, rhs: Self)
fn bitxor_assign(&mut self, rhs: Self)
^=
operation. Read moresource§impl MulAssign<U64> for U64
impl MulAssign<U64> for U64
Warning: has wrapping semantics.
source§fn mul_assign(&mut self, rhs: Self)
fn mul_assign(&mut self, rhs: Self)
*=
operation. Read moresource§impl ShlAssign<usize> for U64
impl ShlAssign<usize> for U64
source§fn shl_assign(&mut self, rhs: usize)
fn shl_assign(&mut self, rhs: usize)
<<=
operation. Read moresource§impl ShrAssign<usize> for U64
impl ShrAssign<usize> for U64
source§fn shr_assign(&mut self, rhs: usize)
fn shr_assign(&mut self, rhs: usize)
>>=
operation. Read moresource§impl SubAssign<U64> for U64
impl SubAssign<U64> for U64
Warning: has wrapping semantics.
source§fn sub_assign(&mut self, rhs: Self)
fn sub_assign(&mut self, rhs: Self)
-=
operation. Read more