use blstrs::{Fp, G1Affine};
use ff::PrimeField;
use group::prime::PrimeCurveAffine;
use crate::gpu::curve::GpuCurve;
use crate::gpu::curve::bls12_381::{fq_13bit_to_bytes, fq_bytes_to_13bit};
const FQ_GPU_BYTES: usize = <blstrs::Bls12 as GpuCurve>::FQ_GPU_BYTES;
const FQ_GPU_PADDED_BYTES: usize =
<blstrs::Bls12 as GpuCurve>::FQ_GPU_PADDED_BYTES;
const G1_GPU_BYTES: usize = <blstrs::Bls12 as GpuCurve>::G1_GPU_BYTES;
const BETA_LE_BYTES: [u8; 48] = [
0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0x01, 0x2e, 0x02, 0x00, 0x0a, 0x62,
0x13, 0xd8, 0x17, 0xde, 0x88, 0x96, 0xf8, 0xe6, 0x3b, 0xa9, 0xb3, 0xdd,
0xea, 0x77, 0x0f, 0x6a, 0x07, 0xc6, 0x69, 0xba, 0x51, 0xce, 0x76, 0xdf,
0x2f, 0x67, 0x19, 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
];
const N11_LO: u64 = 0x0000000100000000;
const N11_HI: u64 = 0xac45a4010001a402;
const N22_LO: u64 = 0x00000000ffffffff;
const N22_HI: u64 = 0xac45a4010001a402;
const G_LIMBS: [u64; 3] =
[0x63f6e522f6cfee2e, 0x7c6becf1e01faadd, 0x0000000000000001];
const Q_MODULUS_LE: [u8; 48] = [
0xab, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xb9, 0xff, 0xff, 0x53, 0xb1,
0xfe, 0xff, 0xab, 0x1e, 0x24, 0xf6, 0xb0, 0xf6, 0xa0, 0xd2, 0x30, 0x67,
0xbf, 0x12, 0x85, 0xf3, 0x84, 0x4b, 0x77, 0x64, 0xd7, 0xac, 0x4b, 0x43,
0xb6, 0xa7, 0x1b, 0x4b, 0x9a, 0xe6, 0x7f, 0x39, 0xea, 0x11, 0x01, 0x1a,
];
const R_HALF: [u64; 4] = [
0x7fffffff80000000,
0x9defd2017fff2dff,
0x9cec0404d0ec0402,
0x39f6d3a994cebea2,
];
pub fn glv_decompose<F: PrimeField>(k: &F) -> (u128, bool, u128, bool) {
let k_repr = k.to_repr();
let k_bytes = k_repr.as_ref();
let k_limbs = bytes_to_u64x4(k_bytes);
let c1: u64 = if gt_u256(&k_limbs, &R_HALF) { 1 } else { 0 };
let c2 = mul_u256_u192_high(&k_limbs, &G_LIMBS);
let (k1_abs, k1_neg) = compute_k1(&k_limbs, c1, c2);
let (k2_abs, k2_neg) = compute_k2(c1, c2);
(k1_abs, k1_neg, k2_abs, k2_neg)
}
fn compute_k1(k: &[u64; 4], c1: u64, c2: u128) -> (u128, bool) {
let prod = mul_u128_u128(c2, (N11_LO as u128) | ((N11_HI as u128) << 64));
let (d0, b0) = k[0].overflowing_sub(c1);
let (d1, b1) = k[1].overflowing_sub(b0 as u64);
let (d2, b2) = k[2].overflowing_sub(b1 as u64);
let d3 = k[3].wrapping_sub(b2 as u64);
let k_minus_c1 = [d0, d1, d2, d3];
let (diff, borrow) = sub_u256(&k_minus_c1, &prod);
if borrow {
let neg = negate_u256(&diff);
let val = neg[0] as u128 | ((neg[1] as u128) << 64);
debug_assert!(
neg[2] == 0 && neg[3] == 0,
"k1 overflow: doesn't fit in 128 bits"
);
(val, true)
} else {
let val = diff[0] as u128 | ((diff[1] as u128) << 64);
debug_assert!(
diff[2] == 0 && diff[3] == 0,
"k1 overflow: doesn't fit in 128 bits"
);
(val, false)
}
}
fn compute_k2(c1: u64, c2: u128) -> (u128, bool) {
let n22 = (N22_LO as u128) | ((N22_HI as u128) << 64);
let term = if c1 == 1 { n22 } else { 0u128 };
if term >= c2 {
(term - c2, false)
} else {
(c2 - term, true)
}
}
pub fn endomorphism_g1(p: &G1Affine) -> G1Affine {
if bool::from(p.is_identity()) {
return G1Affine::identity();
}
let beta: Fp = Fp::from_bytes_le(&BETA_LE_BYTES)
.expect("BETA constant is a valid Fp element");
let beta_x = beta * p.x();
let y = p.y();
let beta_x_be = beta_x.to_bytes_be();
let y_be = y.to_bytes_be();
let mut uncompressed = [0u8; 96];
uncompressed[..48].copy_from_slice(&beta_x_be);
uncompressed[48..].copy_from_slice(&y_be);
G1Affine::from_uncompressed_unchecked(&uncompressed)
.expect("endomorphism produces a valid curve point")
}
pub fn endomorphism_g1_bytes(point_bytes: &[u8]) -> [u8; G1_GPU_BYTES] {
debug_assert_eq!(point_bytes.len(), G1_GPU_BYTES);
let mut result = [0u8; G1_GPU_BYTES];
result.copy_from_slice(point_bytes);
let z_start = 2 * FQ_GPU_PADDED_BYTES;
if point_bytes[z_start..z_start + FQ_GPU_BYTES]
.iter()
.all(|&b| b == 0)
{
return result;
}
let x_le = fq_13bit_to_bytes(&point_bytes[0..FQ_GPU_BYTES]);
let x_le_arr: [u8; 48] = x_le.try_into().unwrap();
let x = Fp::from_bytes_le(&x_le_arr).expect("valid Fp x-coordinate");
let beta = Fp::from_bytes_le(&BETA_LE_BYTES).expect("valid BETA constant");
let beta_x = beta * x;
let beta_x_be = beta_x.to_bytes_be();
let mut beta_x_le = [0u8; 48];
for i in 0..48 {
beta_x_le[i] = beta_x_be[47 - i];
}
let beta_x_13bit = fq_bytes_to_13bit(&beta_x_le);
result[0..FQ_GPU_BYTES].copy_from_slice(&beta_x_13bit);
result
}
pub fn negate_g1_bytes(point_bytes: &mut [u8]) {
debug_assert!(point_bytes.len() == G1_GPU_BYTES);
if point_bytes.iter().all(|&b| b == 0) {
return;
}
let y_start = FQ_GPU_PADDED_BYTES;
let y_le = fq_13bit_to_bytes(&point_bytes[y_start..y_start + FQ_GPU_BYTES]);
let mut neg_y_le = [0u8; 48];
let mut borrow: u16 = 0;
for i in 0..48 {
let q_byte = Q_MODULUS_LE[i] as u16;
let y_byte = y_le[i] as u16;
let diff = q_byte.wrapping_sub(y_byte).wrapping_sub(borrow);
if q_byte < y_byte + borrow {
neg_y_le[i] = diff as u8;
borrow = 1;
} else {
neg_y_le[i] = diff as u8;
borrow = 0;
}
}
debug_assert_eq!(borrow, 0, "q - y underflow: y >= q");
let neg_y_13bit = fq_bytes_to_13bit(&neg_y_le);
point_bytes[y_start..y_start + FQ_GPU_BYTES].copy_from_slice(&neg_y_13bit);
}
pub fn u128_to_windows(k: u128, c: usize) -> Vec<u32> {
let num_windows = 128_usize.div_ceil(c);
let bytes = k.to_le_bytes();
let mut windows = Vec::with_capacity(num_windows);
let mask = (1u64 << c) - 1;
for i in 0..num_windows {
let bit_offset = i * c;
let mut window: u64 = 0;
let bit_shift = bit_offset % 8;
let bytes_needed = (bit_shift + c).div_ceil(8).min(8);
for j in 0..bytes_needed {
let byte_idx = bit_offset / 8 + j;
if byte_idx < 16 {
window |= (bytes[byte_idx] as u64) << (j * 8);
}
}
windows.push(((window >> bit_shift) & mask) as u32);
}
windows
}
pub fn u128_to_signed_windows(k: u128, c: usize) -> Vec<(u32, bool)> {
let mut unsigned = u128_to_windows(k, c);
unsigned.push(0);
let half = 1u32 << (c - 1);
let full = 1u32 << c;
let mut result = Vec::with_capacity(unsigned.len());
for i in 0..unsigned.len() {
let w = unsigned[i];
if w >= half {
let abs_val = full - w;
result.push((abs_val, true));
if i + 1 < unsigned.len() {
unsigned[i + 1] += 1;
}
} else {
result.push((w, false));
}
}
while result.len() > 1 && result.last() == Some(&(0, false)) {
result.pop();
}
result
}
fn bytes_to_u64x4(bytes: &[u8]) -> [u64; 4] {
let mut limbs = [0u64; 4];
for (i, limb) in limbs.iter_mut().enumerate() {
let offset = i * 8;
if offset + 8 <= bytes.len() {
*limb = u64::from_le_bytes(
bytes[offset..offset + 8].try_into().unwrap(),
);
} else {
let mut buf = [0u8; 8];
let end = bytes.len().min(offset + 8);
buf[..end - offset].copy_from_slice(&bytes[offset..end]);
*limb = u64::from_le_bytes(buf);
}
}
limbs
}
fn gt_u256(a: &[u64; 4], b: &[u64; 4]) -> bool {
for i in (0..4).rev() {
if a[i] > b[i] {
return true;
}
if a[i] < b[i] {
return false;
}
}
false }
fn mul_u256_u192_high(k: &[u64; 4], g: &[u64; 3]) -> u128 {
let mut result = [0u64; 7];
for i in 0..4 {
let mut carry = 0u64;
for (j, &gj) in g.iter().enumerate() {
let idx = i + j;
let (lo, hi) = mul_u64(k[i], gj);
let (sum1, c1) = result[idx].overflowing_add(lo);
let (sum2, c2) = sum1.overflowing_add(carry);
result[idx] = sum2;
carry = hi + c1 as u64 + c2 as u64;
}
result[i + 3] = result[i + 3].wrapping_add(carry);
}
result[4] as u128 | ((result[5] as u128) << 64)
}
#[inline(always)]
fn mul_u64(a: u64, b: u64) -> (u64, u64) {
let r = a as u128 * b as u128;
(r as u64, (r >> 64) as u64)
}
fn mul_u128_u128(a: u128, b: u128) -> [u64; 4] {
let a_lo = a as u64;
let a_hi = (a >> 64) as u64;
let b_lo = b as u64;
let b_hi = (b >> 64) as u64;
let (r0, c0) = mul_u64(a_lo, b_lo);
let (r1a, c1a) = mul_u64(a_lo, b_hi);
let (r1b, c1b) = mul_u64(a_hi, b_lo);
let (r2, c2) = mul_u64(a_hi, b_hi);
let (mid_sum, mid_carry) = r1a.overflowing_add(r1b);
let (r1, carry1) = mid_sum.overflowing_add(c0);
let high_carry = mid_carry as u64 + carry1 as u64 + c1a + c1b;
let (r2_sum, carry2) = r2.overflowing_add(high_carry);
let r3 = c2 + carry2 as u64;
[r0, r1, r2_sum, r3]
}
fn sub_u256(a: &[u64; 4], b: &[u64; 4]) -> ([u64; 4], bool) {
let mut result = [0u64; 4];
let mut borrow = 0u64;
for i in 0..4 {
let (diff1, b1) = a[i].overflowing_sub(b[i]);
let (diff2, b2) = diff1.overflowing_sub(borrow);
result[i] = diff2;
borrow = b1 as u64 + b2 as u64;
}
(result, borrow != 0)
}
fn negate_u256(a: &[u64; 4]) -> [u64; 4] {
let mut result = [0u64; 4];
let mut carry = 1u64;
for i in 0..4 {
let (sum, c) = (!a[i]).overflowing_add(carry);
result[i] = sum;
carry = c as u64;
}
result
}
#[cfg(test)]
mod tests {
use blstrs::Scalar;
use ff::Field;
use group::Curve;
use super::*;
#[test]
fn beta_is_cube_root_of_unity() {
let beta: Fp = Fp::from_bytes_le(&BETA_LE_BYTES).unwrap();
let one: Fp = Fp::ONE;
let beta_cubed = beta * beta * beta;
assert_eq!(beta_cubed, one, "β³ should equal 1");
assert_ne!(beta, one, "β should not be 1");
}
#[test]
fn glv_decompose_roundtrip() {
let lambda = Scalar::from_repr_vartime([
0x01, 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xb7, 0xfc,
0xff, 0x01, 0x00, 0x78, 0xa7, 0x04, 0xd8, 0xa1, 0x09, 0x08, 0xd8,
0x39, 0x33, 0x48, 0x7d, 0x9d, 0x29, 0x53, 0xa7, 0xed, 0x73,
])
.expect("LAMBDA is a valid scalar");
let lambda_cubed = lambda * lambda * lambda;
assert_eq!(lambda_cubed, Scalar::ONE, "λ³ should equal 1");
let test_scalars = [
Scalar::ZERO,
Scalar::ONE,
Scalar::from(2u64),
Scalar::from(0xd201000000010000u64),
-Scalar::ONE,
lambda,
Scalar::ROOT_OF_UNITY,
];
for k in &test_scalars {
let (k1_abs, k1_neg, k2_abs, k2_neg) = glv_decompose(k);
let k1_scalar = scalar_from_u128(k1_abs, k1_neg);
let k2_scalar = scalar_from_u128(k2_abs, k2_neg);
let reconstructed = k1_scalar + k2_scalar * lambda;
assert_eq!(reconstructed, *k, "decomposition failed for k={:?}", k);
}
}
#[test]
fn glv_decompose_bounds() {
use rand_core::OsRng;
for _ in 0..1000 {
let k = Scalar::random(&mut OsRng);
let (k1, _, k2, _) = glv_decompose(&k);
assert!(
k1 < (1u128 << 127) + (1u128 << 126),
"k1 too large: {} bits",
128 - k1.leading_zeros()
);
assert!(
k2 < (1u128 << 127) + (1u128 << 126),
"k2 too large: {} bits",
128 - k2.leading_zeros()
);
}
}
#[test]
fn endomorphism_matches_scalar_mul() {
let lambda = Scalar::from_repr_vartime([
0x01, 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xb7, 0xfc,
0xff, 0x01, 0x00, 0x78, 0xa7, 0x04, 0xd8, 0xa1, 0x09, 0x08, 0xd8,
0x39, 0x33, 0x48, 0x7d, 0x9d, 0x29, 0x53, 0xa7, 0xed, 0x73,
])
.unwrap();
let g = G1Affine::generator();
let phi_g = endomorphism_g1(&g);
let lambda_g: G1Affine =
(blstrs::G1Projective::from(g) * lambda).to_affine();
assert_eq!(phi_g, lambda_g, "φ(G) should equal [λ]·G");
}
#[test]
fn endomorphism_identity() {
let inf = G1Affine::identity();
assert_eq!(endomorphism_g1(&inf), inf);
}
#[test]
fn endomorphism_g1_bytes_matches_affine() {
use crate::gpu::curve::GpuCurve;
let g = G1Affine::generator();
let phi_g = endomorphism_g1(&g);
let g_bytes = <blstrs::Bls12 as GpuCurve>::serialize_g1(&g);
let phi_bytes = endomorphism_g1_bytes(&g_bytes);
let phi_from_bytes =
<blstrs::Bls12 as GpuCurve>::deserialize_g1(&phi_bytes)
.expect("endomorphism bytes should deserialize");
let phi_affine = blstrs::G1Projective::from(phi_g);
assert_eq!(
phi_from_bytes, phi_affine,
"byte-level endomorphism should match affine-level"
);
}
#[test]
fn negate_g1_bytes_roundtrip() {
use crate::gpu::curve::GpuCurve;
let p = G1Affine::generator();
let mut bytes = <blstrs::Bls12 as GpuCurve>::serialize_g1(&p);
let original = bytes.clone();
negate_g1_bytes(&mut bytes);
assert_ne!(bytes, original, "negation should change the point");
negate_g1_bytes(&mut bytes);
assert_eq!(bytes, original, "double negation should return original");
}
#[test]
fn negate_g1_bytes_matches_group_negation() {
use crate::gpu::curve::GpuCurve;
let p = G1Affine::generator();
let mut bytes = <blstrs::Bls12 as GpuCurve>::serialize_g1(&p);
negate_g1_bytes(&mut bytes);
let deserialized = <blstrs::Bls12 as GpuCurve>::deserialize_g1(&bytes)
.expect("negated point should deserialize");
let neg_p: blstrs::G1Projective =
(-blstrs::G1Projective::from(p)).into();
assert_eq!(deserialized, neg_p, "negated bytes should match -P");
}
#[test]
fn u128_to_windows_known_values() {
let windows = u128_to_windows(0, 15);
assert_eq!(windows.len(), 9);
assert!(windows.iter().all(|&w| w == 0));
let windows = u128_to_windows(1, 15);
assert_eq!(windows[0], 1);
assert!(windows[1..].iter().all(|&w| w == 0));
let windows = u128_to_windows(32767, 15);
assert_eq!(windows[0], 32767);
assert!(windows[1..].iter().all(|&w| w == 0));
let windows = u128_to_windows(1 << 15, 15);
assert_eq!(windows[0], 0);
assert_eq!(windows[1], 1);
}
#[test]
fn u128_to_windows_matches_bit_extraction() {
let vals = [
0u128,
1,
0xdeadbeef,
u128::MAX,
1u128 << 127,
(1u128 << 127) - 1,
];
for val in vals {
let windows = u128_to_windows(val, 15);
let expected = reference_u128_windows(val, 15);
assert_eq!(windows, expected, "mismatch for val={}", val);
}
}
fn reference_u128_windows(k: u128, c: usize) -> Vec<u32> {
let num_windows = 128_usize.div_ceil(c);
let mut out = Vec::with_capacity(num_windows);
for i in 0..num_windows {
let bit_offset = i * c;
let mut w = 0u32;
for j in 0..c {
let idx = bit_offset + j;
if idx < 128 && (k >> idx) & 1 == 1 {
w |= 1u32 << j;
}
}
out.push(w);
}
out
}
#[test]
fn beta_satisfies_minimal_polynomial() {
let beta: Fp = Fp::from_bytes_le(&BETA_LE_BYTES).unwrap();
let beta_sq = beta * beta;
let sum = beta_sq + beta + Fp::ONE;
assert_eq!(sum, Fp::ZERO, "β should satisfy β² + β + 1 = 0");
}
#[test]
fn glv_decompose_one() {
let lambda = Scalar::from_repr_vartime([
0x01, 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xb7, 0xfc,
0xff, 0x01, 0x00, 0x78, 0xa7, 0x04, 0xd8, 0xa1, 0x09, 0x08, 0xd8,
0x39, 0x33, 0x48, 0x7d, 0x9d, 0x29, 0x53, 0xa7, 0xed, 0x73,
])
.unwrap();
let (k1, k1_neg, k2, k2_neg) = glv_decompose(&Scalar::ONE);
let k1_s = scalar_from_u128(k1, k1_neg);
let k2_s = scalar_from_u128(k2, k2_neg);
let reconstructed = k1_s + k2_s * lambda;
assert_eq!(reconstructed, Scalar::ONE, "decomposition of 1 failed");
}
#[test]
fn glv_decompose_r_minus_one() {
let lambda = Scalar::from_repr_vartime([
0x01, 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xb7, 0xfc,
0xff, 0x01, 0x00, 0x78, 0xa7, 0x04, 0xd8, 0xa1, 0x09, 0x08, 0xd8,
0x39, 0x33, 0x48, 0x7d, 0x9d, 0x29, 0x53, 0xa7, 0xed, 0x73,
])
.unwrap();
let k = -Scalar::ONE; let (k1, k1_neg, k2, k2_neg) = glv_decompose(&k);
let k1_s = scalar_from_u128(k1, k1_neg);
let k2_s = scalar_from_u128(k2, k2_neg);
let reconstructed = k1_s + k2_s * lambda;
assert_eq!(reconstructed, k, "decomposition of r-1 failed");
assert!(k1 < (1u128 << 127) + (1u128 << 126), "k1 too large for r-1");
assert!(k2 < (1u128 << 127) + (1u128 << 126), "k2 too large for r-1");
}
#[test]
fn glv_decompose_lambda() {
let lambda = Scalar::from_repr_vartime([
0x01, 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xb7, 0xfc,
0xff, 0x01, 0x00, 0x78, 0xa7, 0x04, 0xd8, 0xa1, 0x09, 0x08, 0xd8,
0x39, 0x33, 0x48, 0x7d, 0x9d, 0x29, 0x53, 0xa7, 0xed, 0x73,
])
.unwrap();
let (k1, k1_neg, k2, k2_neg) = glv_decompose(&lambda);
let k1_s = scalar_from_u128(k1, k1_neg);
let k2_s = scalar_from_u128(k2, k2_neg);
let reconstructed = k1_s + k2_s * lambda;
assert_eq!(reconstructed, lambda, "decomposition of λ failed");
}
#[test]
fn mul_u128_u128_known_values() {
let r = mul_u128_u128(0, 0);
assert_eq!(r, [0, 0, 0, 0]);
let r = mul_u128_u128(1, 1);
assert_eq!(r, [1, 0, 0, 0]);
let a: u128 = 1u128 << 64;
let b: u128 = 1u128 << 64;
let r = mul_u128_u128(a, b);
assert_eq!(r, [0, 0, 1, 0]);
let max = u128::MAX;
let r = mul_u128_u128(max, 2);
assert_eq!(r[0], u64::MAX - 1); assert_eq!(r[1], u64::MAX);
assert_eq!(r[2], 1);
assert_eq!(r[3], 0);
}
#[test]
fn sub_u256_known_values() {
let a = [42u64, 0, 0, 0];
let (r, borrow) = sub_u256(&a, &[0, 0, 0, 0]);
assert_eq!(r, a);
assert!(!borrow);
let (r, borrow) = sub_u256(&[0, 0, 0, 0], &[1, 0, 0, 0]);
assert!(borrow);
assert_eq!(r[0], u64::MAX);
let a = [0x1234, 0x5678, 0x9abc, 0xdef0];
let (r, borrow) = sub_u256(&a, &a);
assert_eq!(r, [0, 0, 0, 0]);
assert!(!borrow);
}
#[test]
fn negate_u256_known_values() {
let r = negate_u256(&[0, 0, 0, 0]);
assert_eq!(r, [0, 0, 0, 0]);
let r = negate_u256(&[1, 0, 0, 0]);
assert_eq!(r, [u64::MAX, u64::MAX, u64::MAX, u64::MAX]);
let x = [0xdeadbeef, 0xcafebabe, 0x12345678, 0x9abcdef0];
let neg = negate_u256(&x);
let double_neg = negate_u256(&neg);
assert_eq!(double_neg, x);
}
#[test]
fn gt_u256_comparison() {
let a = [1, 2, 3, 4];
assert!(!gt_u256(&a, &a));
assert!(gt_u256(&[0, 0, 0, 5], &[0, 0, 0, 4]));
assert!(!gt_u256(&[0, 0, 0, 4], &[0, 0, 0, 5]));
assert!(gt_u256(&[2, 0, 0, 0], &[1, 0, 0, 0]));
assert!(!gt_u256(&[1, 0, 0, 0], &[2, 0, 0, 0]));
assert!(gt_u256(&[0, 0, 0, 1], &[u64::MAX, u64::MAX, u64::MAX, 0]));
}
#[test]
fn bytes_to_u64x4_roundtrip() {
let limbs = [
0x0102030405060708u64,
0x090a0b0c0d0e0f10,
0x1112131415161718,
0x191a1b1c1d1e1f20,
];
let mut bytes = Vec::new();
for l in &limbs {
bytes.extend_from_slice(&l.to_le_bytes());
}
let parsed = bytes_to_u64x4(&bytes);
assert_eq!(parsed, limbs);
}
#[test]
fn u128_to_windows_various_widths() {
for c in [8, 13, 15, 16] {
let vals = [0u128, 1, u128::MAX, 1u128 << 127, (1u128 << 64) - 1];
for val in vals {
let windows = u128_to_windows(val, c);
let expected = reference_u128_windows(val, c);
assert_eq!(
windows, expected,
"mismatch for val={val:#x} c={c}"
);
}
}
}
#[test]
fn endomorphism_g1_order_three() {
let g = G1Affine::generator();
let phi1 = endomorphism_g1(&g);
let phi2 = endomorphism_g1(&phi1);
let phi3 = endomorphism_g1(&phi2);
assert_ne!(phi1, g, "φ(G) should not equal G");
assert_ne!(phi2, g, "φ²(G) should not equal G");
assert_eq!(phi3, g, "φ³(G) should equal G (order 3)");
}
#[test]
fn negate_g1_bytes_identity() {
let mut zero_bytes = vec![0u8; G1_GPU_BYTES];
let original = zero_bytes.clone();
negate_g1_bytes(&mut zero_bytes);
assert_eq!(zero_bytes, original, "negating identity should be a no-op");
}
#[test]
fn endomorphism_negation_commute() {
use crate::gpu::curve::GpuCurve;
let g = G1Affine::generator();
let g_bytes = <blstrs::Bls12 as GpuCurve>::serialize_g1(&g);
let mut neg_bytes = g_bytes.clone();
negate_g1_bytes(&mut neg_bytes);
let phi_neg = endomorphism_g1_bytes(&neg_bytes);
let phi_bytes = endomorphism_g1_bytes(&g_bytes);
let mut neg_phi = phi_bytes.clone();
negate_g1_bytes(&mut neg_phi);
assert_eq!(
phi_neg.to_vec(),
neg_phi.to_vec(),
"φ(-P) should equal -φ(P)"
);
}
#[test]
fn glv_decompose_zero() {
let (k1, k1_neg, k2, k2_neg) = glv_decompose(&Scalar::ZERO);
assert_eq!(k1, 0);
assert_eq!(k2, 0);
assert!(!k1_neg);
assert!(!k2_neg);
}
fn scalar_from_u128(val: u128, negative: bool) -> Scalar {
let lo = val as u64;
let hi = (val >> 64) as u64;
let mut bytes = [0u8; 32];
bytes[0..8].copy_from_slice(&lo.to_le_bytes());
bytes[8..16].copy_from_slice(&hi.to_le_bytes());
let s = Scalar::from_repr_vartime(bytes)
.expect("128-bit value is a valid scalar");
if negative { -s } else { s }
}
}