pub mod blinding;
use crate::{FieldBytes, NistP256};
use core::{
convert::TryInto,
ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign},
};
use elliptic_curve::{
ff::{Field, PrimeField},
generic_array::arr,
rand_core::RngCore,
subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption},
util::{adc64, mac64, sbb64},
};
#[cfg(feature = "digest")]
use elliptic_curve::{consts::U32, Digest, FromDigest};
#[cfg(feature = "zeroize")]
use crate::SecretKey;
#[cfg(feature = "zeroize")]
use elliptic_curve::zeroize::Zeroize;
const LIMBS: usize = 4;
type U256 = [u64; LIMBS];
pub const MODULUS: U256 = [
0xf3b9_cac2_fc63_2551,
0xbce6_faad_a717_9e84,
0xffff_ffff_ffff_ffff,
0xffff_ffff_0000_0000,
];
const MODULUS_SHR1: U256 = [
0x79dc_e561_7e31_92a8,
0xde73_7d56_d38b_cf42,
0x7fff_ffff_ffff_ffff,
0x7fff_ffff_8000_0000,
];
pub const MU: [u64; 5] = [
0x012f_fd85_eedf_9bfe,
0x4319_0552_df1a_6c21,
0xffff_fffe_ffff_ffff,
0x0000_0000_ffff_ffff,
0x0000_0000_0000_0001,
];
pub type NonZeroScalar = elliptic_curve::scalar::NonZeroScalar<NistP256>;
pub type ScalarBits = elliptic_curve::scalar::ScalarBits<NistP256>;
#[derive(Clone, Copy, Debug, Default)]
#[cfg_attr(docsrs, doc(cfg(feature = "arithmetic")))]
pub struct Scalar(pub(crate) [u64; LIMBS]);
impl Field for Scalar {
fn random(mut rng: impl RngCore) -> Self {
let mut bytes = FieldBytes::default();
loop {
rng.fill_bytes(&mut bytes);
if let Some(scalar) = Scalar::from_repr(bytes) {
return scalar;
}
}
}
fn zero() -> Self {
Scalar::zero()
}
fn one() -> Self {
Scalar::one()
}
fn is_zero(&self) -> bool {
self.is_zero().into()
}
#[must_use]
fn square(&self) -> Self {
Scalar::square(self)
}
#[must_use]
fn double(&self) -> Self {
self.add(self)
}
fn invert(&self) -> CtOption<Self> {
Scalar::invert(self)
}
fn sqrt(&self) -> CtOption<Self> {
todo!("see RustCrypto/elliptic-curves#170");
}
}
impl PrimeField for Scalar {
type Repr = FieldBytes;
#[cfg(target_pointer_width = "32")]
type ReprBits = [u32; 8];
#[cfg(target_pointer_width = "64")]
type ReprBits = [u64; 4];
const NUM_BITS: u32 = 256;
const CAPACITY: u32 = 255;
const S: u32 = 4;
fn from_repr(bytes: FieldBytes) -> Option<Self> {
let mut w = [0u64; LIMBS];
w[3] = u64::from_be_bytes(bytes[0..8].try_into().unwrap());
w[2] = u64::from_be_bytes(bytes[8..16].try_into().unwrap());
w[1] = u64::from_be_bytes(bytes[16..24].try_into().unwrap());
w[0] = u64::from_be_bytes(bytes[24..32].try_into().unwrap());
let (_, borrow) = sbb64(w[0], MODULUS[0], 0);
let (_, borrow) = sbb64(w[1], MODULUS[1], borrow);
let (_, borrow) = sbb64(w[2], MODULUS[2], borrow);
let (_, borrow) = sbb64(w[3], MODULUS[3], borrow);
let is_some = (borrow as u8) & 1;
CtOption::new(Scalar(w), Choice::from(is_some)).into()
}
fn to_repr(&self) -> FieldBytes {
self.to_bytes()
}
fn to_le_bits(&self) -> ScalarBits {
self.into()
}
fn is_odd(&self) -> bool {
self.0[0] as u8 == 1
}
#[cfg(target_pointer_width = "32")]
fn char_le_bits() -> ScalarBits {
[
0xfc63_2551,
0xf3b9_cac2,
0xa717_9e84,
0xbce6_faad,
0xffff_ffff,
0xffff_ffff,
0x0000_0000,
0xffff_ffff,
]
.into()
}
#[cfg(target_pointer_width = "64")]
fn char_le_bits() -> ScalarBits {
MODULUS.into()
}
fn multiplicative_generator() -> Self {
7u64.into()
}
fn root_of_unity() -> Self {
Scalar::from_repr(arr![u8;
0xff, 0xc9, 0x7f, 0x06, 0x2a, 0x77, 0x09, 0x92, 0xba, 0x80, 0x7a, 0xce, 0x84, 0x2a,
0x3d, 0xfc, 0x15, 0x46, 0xca, 0xd0, 0x04, 0x37, 0x8d, 0xaf, 0x05, 0x92, 0xd7, 0xfb,
0xb4, 0x1e, 0x66, 0x00,
])
.unwrap()
}
}
impl From<u64> for Scalar {
fn from(k: u64) -> Self {
Scalar([k, 0, 0, 0])
}
}
impl PartialEq for Scalar {
fn eq(&self, other: &Self) -> bool {
self.ct_eq(other).into()
}
}
impl Eq for Scalar {}
impl PartialOrd for Scalar {
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
Some(self.cmp(other))
}
}
fn cmp_vartime(left: &U256, right: &U256) -> i32 {
use core::cmp::Ordering::*;
for (l, r) in left.iter().rev().zip(right.iter().rev()) {
match l.cmp(r) {
Less => return -1,
Greater => return 1,
Equal => continue,
}
}
0
}
fn shr1(u256: &mut U256) {
let mut bit: u64 = 0;
for digit in u256.iter_mut().rev() {
let new_digit = (bit << 63) | (*digit >> 1);
bit = *digit & 1;
*digit = new_digit;
}
}
impl Ord for Scalar {
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
use core::cmp::Ordering::*;
match cmp_vartime(&self.0, &other.0) {
-1 => Less,
0 => Equal,
1 => Greater,
_ => unreachable!(),
}
}
}
#[cfg(feature = "digest")]
#[cfg_attr(docsrs, doc(cfg(feature = "digest")))]
impl FromDigest<NistP256> for Scalar {
fn from_digest<D>(digest: D) -> Self
where
D: Digest<OutputSize = U32>,
{
Self::from_bytes_reduced(&digest.finalize())
}
}
impl Scalar {
pub const fn zero() -> Scalar {
Scalar([0, 0, 0, 0])
}
pub const fn one() -> Scalar {
Scalar([1, 0, 0, 0])
}
pub fn from_bytes_reduced(bytes: &FieldBytes) -> Self {
Self::sub_inner(
u64::from_be_bytes(bytes[24..32].try_into().unwrap()),
u64::from_be_bytes(bytes[16..24].try_into().unwrap()),
u64::from_be_bytes(bytes[8..16].try_into().unwrap()),
u64::from_be_bytes(bytes[0..8].try_into().unwrap()),
0,
MODULUS[0],
MODULUS[1],
MODULUS[2],
MODULUS[3],
0,
)
}
pub fn to_bytes(&self) -> FieldBytes {
let mut ret = FieldBytes::default();
ret[0..8].copy_from_slice(&self.0[3].to_be_bytes());
ret[8..16].copy_from_slice(&self.0[2].to_be_bytes());
ret[16..24].copy_from_slice(&self.0[1].to_be_bytes());
ret[24..32].copy_from_slice(&self.0[0].to_be_bytes());
ret
}
pub fn is_zero(&self) -> Choice {
self.ct_eq(&Scalar::zero())
}
pub const fn add(&self, rhs: &Self) -> Self {
let (w0, carry) = adc64(self.0[0], rhs.0[0], 0);
let (w1, carry) = adc64(self.0[1], rhs.0[1], carry);
let (w2, carry) = adc64(self.0[2], rhs.0[2], carry);
let (w3, w4) = adc64(self.0[3], rhs.0[3], carry);
Self::sub_inner(
w0, w1, w2, w3, w4, MODULUS[0], MODULUS[1], MODULUS[2], MODULUS[3], 0,
)
}
pub const fn double(&self) -> Self {
self.add(self)
}
pub const fn subtract(&self, rhs: &Self) -> Self {
Self::sub_inner(
self.0[0], self.0[1], self.0[2], self.0[3], 0, rhs.0[0], rhs.0[1], rhs.0[2], rhs.0[3],
0,
)
}
#[inline]
#[allow(clippy::too_many_arguments)]
const fn sub_inner(
l0: u64,
l1: u64,
l2: u64,
l3: u64,
l4: u64,
r0: u64,
r1: u64,
r2: u64,
r3: u64,
r4: u64,
) -> Self {
let (w0, borrow) = sbb64(l0, r0, 0);
let (w1, borrow) = sbb64(l1, r1, borrow);
let (w2, borrow) = sbb64(l2, r2, borrow);
let (w3, borrow) = sbb64(l3, r3, borrow);
let (_, borrow) = sbb64(l4, r4, borrow);
let (w0, carry) = adc64(w0, MODULUS[0] & borrow, 0);
let (w1, carry) = adc64(w1, MODULUS[1] & borrow, carry);
let (w2, carry) = adc64(w2, MODULUS[2] & borrow, carry);
let (w3, _) = adc64(w3, MODULUS[3] & borrow, carry);
Scalar([w0, w1, w2, w3])
}
#[inline]
#[allow(clippy::too_many_arguments)]
const fn barrett_reduce(
a0: u64,
a1: u64,
a2: u64,
a3: u64,
a4: u64,
a5: u64,
a6: u64,
a7: u64,
) -> Self {
let q1: [u64; 5] = [a3, a4, a5, a6, a7];
const fn q1_times_mu_shift_five(q1: &[u64; 5]) -> [u64; 5] {
let (_w0, carry) = mac64(0, q1[0], MU[0], 0);
let (w1, carry) = mac64(0, q1[0], MU[1], carry);
let (w2, carry) = mac64(0, q1[0], MU[2], carry);
let (w3, carry) = mac64(0, q1[0], MU[3], carry);
let (w4, w5) = mac64(0, q1[0], MU[4], carry);
let (_w1, carry) = mac64(w1, q1[1], MU[0], 0);
let (w2, carry) = mac64(w2, q1[1], MU[1], carry);
let (w3, carry) = mac64(w3, q1[1], MU[2], carry);
let (w4, carry) = mac64(w4, q1[1], MU[3], carry);
let (w5, w6) = mac64(w5, q1[1], MU[4], carry);
let (_w2, carry) = mac64(w2, q1[2], MU[0], 0);
let (w3, carry) = mac64(w3, q1[2], MU[1], carry);
let (w4, carry) = mac64(w4, q1[2], MU[2], carry);
let (w5, carry) = mac64(w5, q1[2], MU[3], carry);
let (w6, w7) = mac64(w6, q1[2], MU[4], carry);
let (_w3, carry) = mac64(w3, q1[3], MU[0], 0);
let (w4, carry) = mac64(w4, q1[3], MU[1], carry);
let (w5, carry) = mac64(w5, q1[3], MU[2], carry);
let (w6, carry) = mac64(w6, q1[3], MU[3], carry);
let (w7, w8) = mac64(w7, q1[3], MU[4], carry);
let (_w4, carry) = mac64(w4, q1[4], MU[0], 0);
let (w5, carry) = mac64(w5, q1[4], MU[1], carry);
let (w6, carry) = mac64(w6, q1[4], MU[2], carry);
let (w7, carry) = mac64(w7, q1[4], MU[3], carry);
let (w8, w9) = mac64(w8, q1[4], MU[4], carry);
[w5, w6, w7, w8, w9]
}
let q3 = q1_times_mu_shift_five(&q1);
let r1: [u64; 5] = [a0, a1, a2, a3, a4];
const fn q3_times_n_keep_five(q3: &[u64; 5]) -> [u64; 5] {
let (w0, carry) = mac64(0, q3[0], MODULUS[0], 0);
let (w1, carry) = mac64(0, q3[0], MODULUS[1], carry);
let (w2, carry) = mac64(0, q3[0], MODULUS[2], carry);
let (w3, carry) = mac64(0, q3[0], MODULUS[3], carry);
let (w4, _) = mac64(0, q3[0], 0, carry);
let (w1, carry) = mac64(w1, q3[1], MODULUS[0], 0);
let (w2, carry) = mac64(w2, q3[1], MODULUS[1], carry);
let (w3, carry) = mac64(w3, q3[1], MODULUS[2], carry);
let (w4, _) = mac64(w4, q3[1], MODULUS[3], carry);
let (w2, carry) = mac64(w2, q3[2], MODULUS[0], 0);
let (w3, carry) = mac64(w3, q3[2], MODULUS[1], carry);
let (w4, _) = mac64(w4, q3[2], MODULUS[2], carry);
let (w3, carry) = mac64(w3, q3[3], MODULUS[0], 0);
let (w4, _) = mac64(w4, q3[3], MODULUS[1], carry);
let (w4, _) = mac64(w4, q3[4], MODULUS[0], 0);
[w0, w1, w2, w3, w4]
}
let r2: [u64; 5] = q3_times_n_keep_five(&q3);
#[inline]
#[allow(clippy::too_many_arguments)]
const fn sub_inner_five(l: [u64; 5], r: [u64; 5]) -> [u64; 5] {
let (w0, borrow) = sbb64(l[0], r[0], 0);
let (w1, borrow) = sbb64(l[1], r[1], borrow);
let (w2, borrow) = sbb64(l[2], r[2], borrow);
let (w3, borrow) = sbb64(l[3], r[3], borrow);
let (w4, _borrow) = sbb64(l[4], r[4], borrow);
[w0, w1, w2, w3, w4]
}
let r: [u64; 5] = sub_inner_five(r1, r2);
#[inline]
#[allow(clippy::too_many_arguments)]
const fn subtract_n_if_necessary(r0: u64, r1: u64, r2: u64, r3: u64, r4: u64) -> [u64; 5] {
let (w0, borrow) = sbb64(r0, MODULUS[0], 0);
let (w1, borrow) = sbb64(r1, MODULUS[1], borrow);
let (w2, borrow) = sbb64(r2, MODULUS[2], borrow);
let (w3, borrow) = sbb64(r3, MODULUS[3], borrow);
let (w4, borrow) = sbb64(r4, 0, borrow);
let (w0, carry) = adc64(w0, MODULUS[0] & borrow, 0);
let (w1, carry) = adc64(w1, MODULUS[1] & borrow, carry);
let (w2, carry) = adc64(w2, MODULUS[2] & borrow, carry);
let (w3, carry) = adc64(w3, MODULUS[3] & borrow, carry);
let (w4, _carry) = adc64(w4, 0, carry);
[w0, w1, w2, w3, w4]
}
let r = subtract_n_if_necessary(r[0], r[1], r[2], r[3], r[4]);
let r = subtract_n_if_necessary(r[0], r[1], r[2], r[3], r[4]);
Scalar([r[0], r[1], r[2], r[3]])
}
pub const fn mul(&self, rhs: &Self) -> Self {
let (w0, carry) = mac64(0, self.0[0], rhs.0[0], 0);
let (w1, carry) = mac64(0, self.0[0], rhs.0[1], carry);
let (w2, carry) = mac64(0, self.0[0], rhs.0[2], carry);
let (w3, w4) = mac64(0, self.0[0], rhs.0[3], carry);
let (w1, carry) = mac64(w1, self.0[1], rhs.0[0], 0);
let (w2, carry) = mac64(w2, self.0[1], rhs.0[1], carry);
let (w3, carry) = mac64(w3, self.0[1], rhs.0[2], carry);
let (w4, w5) = mac64(w4, self.0[1], rhs.0[3], carry);
let (w2, carry) = mac64(w2, self.0[2], rhs.0[0], 0);
let (w3, carry) = mac64(w3, self.0[2], rhs.0[1], carry);
let (w4, carry) = mac64(w4, self.0[2], rhs.0[2], carry);
let (w5, w6) = mac64(w5, self.0[2], rhs.0[3], carry);
let (w3, carry) = mac64(w3, self.0[3], rhs.0[0], 0);
let (w4, carry) = mac64(w4, self.0[3], rhs.0[1], carry);
let (w5, carry) = mac64(w5, self.0[3], rhs.0[2], carry);
let (w6, w7) = mac64(w6, self.0[3], rhs.0[3], carry);
Scalar::barrett_reduce(w0, w1, w2, w3, w4, w5, w6, w7)
}
pub const fn square(&self) -> Self {
self.mul(self)
}
pub fn pow_vartime(&self, by: &[u64; 4]) -> Self {
let mut res = Self::one();
for e in by.iter().rev() {
for i in (0..64).rev() {
res = res.square();
if ((*e >> i) & 1) == 1 {
res *= self;
}
}
}
res
}
pub fn invert(&self) -> CtOption<Self> {
let inverse = self.pow_vartime(&[
0xf3b9_cac2_fc63_254f,
0xbce6_faad_a717_9e84,
0xffff_ffff_ffff_ffff,
0xffff_ffff_0000_0000,
]);
CtOption::new(inverse, !self.is_zero())
}
pub fn is_odd(&self) -> Choice {
((self.0[0] & 1) as u8).into()
}
pub fn is_even(&self) -> Choice {
!self.is_odd()
}
fn shr1(&mut self) {
shr1(&mut self.0);
}
pub fn invert_vartime(&self) -> CtOption<Self> {
let mut u = *self;
let mut v = Scalar(MODULUS);
#[allow(non_snake_case)]
let mut A = Self::one();
#[allow(non_snake_case)]
let mut C = Self::zero();
while !bool::from(u.is_zero()) {
while bool::from(u.is_even()) {
u.shr1();
if bool::from(A.is_even()) {
A.shr1();
} else {
A.shr1();
A += Scalar(MODULUS_SHR1);
A += Self::one();
}
}
while bool::from(v.is_even()) {
v.shr1();
if bool::from(C.is_even()) {
C.shr1();
} else {
C.shr1();
C += Scalar(MODULUS_SHR1);
C += Self::one();
}
}
if u >= v {
u -= &v;
A -= &C;
} else {
v -= &u;
C -= &A;
}
}
CtOption::new(C, !self.is_zero())
}
}
impl Add<Scalar> for Scalar {
type Output = Scalar;
fn add(self, other: Scalar) -> Scalar {
Scalar::add(&self, &other)
}
}
impl Add<&Scalar> for &Scalar {
type Output = Scalar;
fn add(self, other: &Scalar) -> Scalar {
Scalar::add(self, other)
}
}
impl Add<&Scalar> for Scalar {
type Output = Scalar;
fn add(self, other: &Scalar) -> Scalar {
Scalar::add(&self, other)
}
}
impl AddAssign<Scalar> for Scalar {
fn add_assign(&mut self, rhs: Scalar) {
*self = Scalar::add(self, &rhs);
}
}
impl AddAssign<&Scalar> for Scalar {
fn add_assign(&mut self, rhs: &Scalar) {
*self = Scalar::add(self, rhs);
}
}
impl Sub<Scalar> for Scalar {
type Output = Scalar;
fn sub(self, other: Scalar) -> Scalar {
Scalar::subtract(&self, &other)
}
}
impl Sub<&Scalar> for &Scalar {
type Output = Scalar;
fn sub(self, other: &Scalar) -> Scalar {
Scalar::subtract(self, other)
}
}
impl Sub<&Scalar> for Scalar {
type Output = Scalar;
fn sub(self, other: &Scalar) -> Scalar {
Scalar::subtract(&self, other)
}
}
impl SubAssign<Scalar> for Scalar {
fn sub_assign(&mut self, rhs: Scalar) {
*self = Scalar::subtract(self, &rhs);
}
}
impl SubAssign<&Scalar> for Scalar {
fn sub_assign(&mut self, rhs: &Scalar) {
*self = Scalar::subtract(self, rhs);
}
}
impl Mul<Scalar> for Scalar {
type Output = Scalar;
fn mul(self, other: Scalar) -> Scalar {
Scalar::mul(&self, &other)
}
}
impl Mul<&Scalar> for &Scalar {
type Output = Scalar;
fn mul(self, other: &Scalar) -> Scalar {
Scalar::mul(self, other)
}
}
impl Mul<&Scalar> for Scalar {
type Output = Scalar;
fn mul(self, other: &Scalar) -> Scalar {
Scalar::mul(&self, other)
}
}
impl MulAssign<Scalar> for Scalar {
fn mul_assign(&mut self, rhs: Scalar) {
*self = Scalar::mul(self, &rhs);
}
}
impl MulAssign<&Scalar> for Scalar {
fn mul_assign(&mut self, rhs: &Scalar) {
*self = Scalar::mul(self, rhs);
}
}
impl Neg for Scalar {
type Output = Scalar;
fn neg(self) -> Scalar {
Scalar::zero() - self
}
}
impl<'a> Neg for &'a Scalar {
type Output = Scalar;
fn neg(self) -> Scalar {
Scalar::zero() - self
}
}
impl ConditionallySelectable for Scalar {
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
Scalar([
u64::conditional_select(&a.0[0], &b.0[0], choice),
u64::conditional_select(&a.0[1], &b.0[1], choice),
u64::conditional_select(&a.0[2], &b.0[2], choice),
u64::conditional_select(&a.0[3], &b.0[3], choice),
])
}
}
impl ConstantTimeEq for Scalar {
fn ct_eq(&self, other: &Self) -> Choice {
self.0[0].ct_eq(&other.0[0])
& self.0[1].ct_eq(&other.0[1])
& self.0[2].ct_eq(&other.0[2])
& self.0[3].ct_eq(&other.0[3])
}
}
#[cfg(target_pointer_width = "32")]
impl From<&Scalar> for ScalarBits {
fn from(scalar: &Scalar) -> ScalarBits {
let mut output = [0u32; 8];
for (input, output) in scalar.0.iter().zip(output.chunks_mut(2)) {
output[0] = (input >> 32) as u32;
output[1] = (input & 0xFFFF) as u32;
}
output.into()
}
}
#[cfg(target_pointer_width = "64")]
impl From<&Scalar> for ScalarBits {
fn from(scalar: &Scalar) -> ScalarBits {
scalar.0.into()
}
}
impl From<Scalar> for FieldBytes {
fn from(scalar: Scalar) -> Self {
scalar.to_bytes()
}
}
impl From<&Scalar> for FieldBytes {
fn from(scalar: &Scalar) -> Self {
scalar.to_bytes()
}
}
#[cfg(feature = "zeroize")]
impl From<&SecretKey> for Scalar {
fn from(secret_key: &SecretKey) -> Scalar {
*secret_key.secret_scalar()
}
}
#[cfg(feature = "zeroize")]
impl Zeroize for Scalar {
fn zeroize(&mut self) {
self.0.as_mut().zeroize()
}
}
#[cfg(test)]
mod tests {
use super::Scalar;
use crate::FieldBytes;
use elliptic_curve::ff::PrimeField;
#[cfg(feature = "zeroize")]
use crate::SecretKey;
#[test]
fn from_to_bytes_roundtrip() {
let k: u64 = 42;
let mut bytes = FieldBytes::default();
bytes[24..].copy_from_slice(k.to_be_bytes().as_ref());
let scalar = Scalar::from_repr(bytes).unwrap();
assert_eq!(bytes, scalar.to_bytes());
}
#[test]
fn multiply() {
let one = Scalar::one();
let two = one + &one;
let three = two + &one;
let six = three + &three;
assert_eq!(six, two * &three);
let minus_two = -two;
let minus_three = -three;
assert_eq!(two, -minus_two);
assert_eq!(minus_three * &minus_two, minus_two * &minus_three);
assert_eq!(six, minus_two * &minus_three);
}
#[test]
fn invert() {
let one = Scalar::one();
let three = one + &one + &one;
let inv_three = three.invert().unwrap();
assert_eq!(three * &inv_three, one);
let minus_three = -three;
let inv_minus_three = minus_three.invert().unwrap();
assert_eq!(inv_minus_three, -inv_three);
assert_eq!(three * &inv_minus_three, -one);
}
#[test]
#[cfg(feature = "zeroize")]
fn from_ec_secret() {
let scalar = Scalar::one();
let secret = SecretKey::from_bytes(scalar.to_bytes()).unwrap();
let rederived_scalar = Scalar::from(&secret);
assert_eq!(scalar.0, rederived_scalar.0);
}
#[test]
#[cfg(target_pointer_width = "32")]
fn scalar_into_scalarbits() {
use super::ScalarBits;
let minus_one = ScalarBits::from([
0xfc63_2550,
0xf3b9_cac2,
0xa717_9e84,
0xbce6_faad,
0xffff_ffff,
0xffff_ffff,
0x0000_0000,
0xffff_ffff,
]);
let scalar_bits = ScalarBits::from(&-Scalar::from(1));
assert_eq!(minus_one, scalar_bits);
}
}