#![no_std]
#![allow(clippy::missing_safety_doc)]
#[cfg(target_arch = "arm")]
core::arch::global_asm!(include_str!("./asm.s"));
extern "C" {
fn P256_divsteps2_31(delta: i32, f: u32, g: u32, res_matrix: *mut u32) -> i32;
fn P256_matrix_mul_fg_9(a: u32, b: u32, fg: *const FGInteger, res: *mut FGInteger) -> i32;
fn P256_matrix_mul_mod_n(a: u32, b: u32, xy: *const XYInteger, res: *mut XYInteger) -> i32;
fn P256_to_montgomery(aR: *mut u32, a: *const u32);
fn P256_from_montgomery(a: *mut u32, aR: *const u32);
fn P256_mul_mod_n(res: *mut u32, a: *const u32, b: *const u32);
fn P256_add_mod_n(res: *mut u32, a: *const u32, b: *const u32);
fn P256_reduce_mod_n_32bytes(res: *mut u32, a: *const u32);
fn P256_jacobian_to_affine(
affine_mont_x: *mut u32,
affine_mont_y: *mut u32,
jacobian_mont: *const [u32; 8],
);
fn P256_point_is_on_curve(x_mont: *const u32, y_mont: *const u32) -> bool;
fn P256_decompress_point(y: *mut u32, x: *const u32, y_parity: u32) -> bool;
fn P256_double_j(jacobian_point_out: *mut [u32; 8], jacobian_point_in: *const [u32; 8]);
fn P256_add_sub_j(
jacobian_point1: *mut [u32; 8],
point2: *const [u32; 8],
is_sub: bool,
p2_is_affine: bool,
);
fn P256_verify_last_step(r: *const u32, jacobian_point: *const [u32; 8]) -> bool;
fn P256_negate_mod_p_if(out: *mut u32, inn: *const u32, should_negate: u32);
fn P256_negate_mod_n_if(out: *mut u32, inn: *const u32, should_negate: u32);
fn P256_check_range_n(a: &[u32; 8]) -> bool;
fn P256_check_range_p(a: &[u32; 8]) -> bool;
static P256_order: [u32; 9];
}
const ONE_MONTGOMERY: [u32; 8] = [1, 0, 0, 0xffffffff, 0xffffffff, 0xffffffff, 0xfffffffe, 0];
#[rustfmt::skip]
const P256_BASEPOINT_PRECOMP: [[[u32; 8]; 2]; 8]= [
[[0x18a9143c, 0x79e730d4, 0x5fedb601, 0x75ba95fc, 0x77622510, 0x79fb732b, 0xa53755c6, 0x18905f76],
[0xce95560a, 0xddf25357, 0xba19e45c, 0x8b4ab8e4, 0xdd21f325, 0xd2e88688, 0x25885d85, 0x8571ff18]],
[[0x4eebc127, 0xffac3f90, 0x87d81fb, 0xb027f84a, 0x87cbbc98, 0x66ad77dd, 0xb6ff747e, 0x26936a3f],
[0xc983a7eb, 0xb04c5c1f, 0x861fe1a, 0x583e47ad, 0x1a2ee98e, 0x78820831, 0xe587cc07, 0xd5f06a29]],
[[0xc45c61f5, 0xbe1b8aae, 0x94b9537d, 0x90ec649a, 0xd076c20c, 0x941cb5aa, 0x890523c8, 0xc9079605],
[0xe7ba4f10, 0xeb309b4a, 0xe5eb882b, 0x73c568ef, 0x7e7a1f68, 0x3540a987, 0x2dd1e916, 0x73a076bb]],
[[0xa0173b4f, 0x746354e, 0xd23c00f7, 0x2bd20213, 0xc23bb08, 0xf43eaab5, 0xc3123e03, 0x13ba5119],
[0x3f5b9d4d, 0x2847d030, 0x5da67bdd, 0x6742f2f2, 0x77c94195, 0xef933bdc, 0x6e240867, 0xeaedd915]],
[[0x264e20e8, 0x75c96e8f, 0x59a7a841, 0xabe6bfed, 0x44c8eb00, 0x2cc09c04, 0xf0c4e16b, 0xe05b3080],
[0xa45f3314, 0x1eb7777a, 0xce5d45e3, 0x56af7bed, 0x88b12f1a, 0x2b6e019a, 0xfd835f9b, 0x86659cd]],
[[0x6245e404, 0xea7d260a, 0x6e7fdfe0, 0x9de40795, 0x8dac1ab5, 0x1ff3a415, 0x649c9073, 0x3e7090f1],
[0x2b944e88, 0x1a768561, 0xe57f61c8, 0x250f939e, 0x1ead643d, 0xc0daa89, 0xe125b88e, 0x68930023]],
[[0x4b2ed709, 0xccc42563, 0x856fd30d, 0xe356769, 0x559e9811, 0xbcbcd43f, 0x5395b759, 0x738477ac],
[0xc00ee17f, 0x35752b90, 0x742ed2e3, 0x68748390, 0xbd1f5bc1, 0x7cd06422, 0xc9e7b797, 0xfbc08769]],
[[0xbc60055b, 0x72bcd8b7, 0x56e27e4b, 0x3cc23ee, 0xe4819370, 0xee337424, 0xad3da09, 0xe2aa0e43],
[0x6383c45d, 0x40b8524f, 0x42a41b25, 0xd7663554, 0x778a4797, 0x64efa6de, 0x7079adf4, 0x2042170a]]
];
#[rustfmt::skip]
const P256_BASEPOINT_PRECOMP2: [[[[u32; 8]; 2]; 8]; 2] =
[
[
[[0x670844e0, 0x52d8a7c9, 0xef68a29d, 0xe33bdc, 0x4bdb7361, 0xf3d2848, 0x91c5304d, 0x5222c821],
[0xdf73fc25, 0xea6d2944, 0x255c81b, 0xa04c0f55, 0xefe488a8, 0x29acdc97, 0x80a560de, 0xbe2e158f]],
[[0x2b13e673, 0xfc8511ee, 0xd103ed24, 0xffc58dee, 0xea7e99b8, 0x1022523a, 0x4afc8a17, 0x8f43ea39],
[0xc5f33d0b, 0x8f4e2dbc, 0xd0aa1681, 0x3bc099fa, 0x79ff9df1, 0xffbb7b41, 0xd58b57c4, 0x180de09d]],
[[0x8bd1cda5, 0x56430752, 0x8e05eda5, 0x1807577f, 0x956896e9, 0x99c699b, 0xf1f0efb5, 0x83d6093d],
[0xed97061c, 0xef5af17e, 0x30d4c3c, 0x35b977b8, 0x49229439, 0x81fa75a2, 0xa0b6d35d, 0xf5a22070]],
[[0x74f81cf1, 0x814c5365, 0x120065b, 0xe30baff7, 0x15132621, 0x80ae1256, 0x36a80788, 0x16d2b8cb],
[0xecc50bca, 0x33d14697, 0x17aedd21, 0x19a9dfb0, 0xedc3f766, 0x523fbcc7, 0xb2cf5afd, 0x9c4de6dd]],
[[0xcf0d9f6d, 0x5305a9e6, 0x81a9b021, 0x5839172f, 0x75c687cf, 0xcca7a4dd, 0x844be22f, 0x36d59b3e],
[0x111a53e9, 0xcace7e62, 0xf063f3a1, 0x91c843d4, 0xda812da, 0xbf77e5f0, 0x437f3176, 0xe64af9c]],
[[0xcf07517d, 0xdbd568bb, 0xba6830b9, 0x2f1afba2, 0xe6c4c2a6, 0x15b6807c, 0xe4966aef, 0x91c7eabc],
[0xd6b2b6e6, 0x716dea1b, 0x19f85b4b, 0x248c43d1, 0x4a315e2a, 0x16dcfd60, 0xc72b3d0b, 0x15fdd303]],
[[0x42b7dfd5, 0xe40bf9f4, 0x2d934f2a, 0x673689f3, 0x30a6f50b, 0x8314beb4, 0x976ec64e, 0xd17af2bc],
[0x1ee7ddf1, 0x39f66c4f, 0x68ea373c, 0x7f68e18b, 0x53d0b186, 0x5166c1f2, 0x7be58f14, 0x95dda601]],
[[0x42913074, 0xd5ae356, 0x48a542b1, 0x55491b27, 0xb310732a, 0x469ca665, 0x5f1a4cc1, 0x29591d52],
[0xb84f983f, 0xe76f5b6b, 0x9f5f84e1, 0xbe7eef41, 0x80baa189, 0x1200d496, 0x18ef332c, 0x6376551f]]
],
[
[[0x7c4e54f5, 0xb9e5cbc0, 0xe1410e34, 0xc53a1a17, 0xec454425, 0x3e199130, 0x1700902e, 0xb029c97e],
[0x786423b6, 0x2de66e11, 0xb41a95be, 0x262dc914, 0x451b683, 0x51766abd, 0x85bb6fb1, 0x55ad5f34]],
[[0x9066cb79, 0x74f4f1c, 0x30c8b94e, 0x1ab31bd6, 0xd74275b3, 0x6d3f012f, 0x9ddcce40, 0xa214d0b1],
[0xd165050a, 0x24aedf74, 0xe0e5dc3e, 0x95f17ece, 0xd9224456, 0x6ada9cda, 0x2dd60eea, 0x1fadb2d1]],
[[0xe20cfb9b, 0xa3d83091, 0xba76e0cb, 0xae79c975, 0xc8858a6e, 0xa5f2a588, 0x874a3168, 0xe897a5f4],
[0x7d48f096, 0xf6c1ef40, 0xc35b132c, 0x1f9c516b, 0x53c479fd, 0xe1040f91, 0x9df06743, 0x60e881f]],
[[0x52a90e51, 0x9e0ad72, 0x38c50a96, 0xb7e66ea3, 0x7d997770, 0xab32ad05, 0x445671cb, 0xceaffe2],
[0x5d37cc99, 0xdfbe753c, 0xe0fea2d5, 0x95d068cc, 0x4dd77cb6, 0x1e37cdda, 0x55530688, 0x88c5a4bb]],
[[0xc7744f1, 0x3413f033, 0xbc816702, 0x23c05c89, 0x1192b5ac, 0x2322ee9a, 0x373180bb, 0xc1636a0],
[0xbdde0207, 0xfe2f3d4, 0xc23578d8, 0xe1a093a, 0xc888ead, 0x6e5f0d1, 0x52a2b660, 0x9ca285a5]],
[[0xce923964, 0xdae76995, 0xa34c7993, 0xcc96493a, 0xea73d9e7, 0xd19b5144, 0x311e6e34, 0x4a5c263],
[0xd9a2a443, 0x7db5b32b, 0x2cfd960c, 0x3754bd33, 0xa430f15, 0xc5bcc98, 0xd9a94574, 0x5651201f]],
[[0xfc0418fe, 0xebdd8921, 0x34e20036, 0x37015b39, 0xdf03a353, 0xcf4fcd8f, 0xf12cab16, 0xdc2de6e1],
[0xd071df14, 0x9c17cc1a, 0x63415530, 0xd7c5e6a3, 0x68f3fb1e, 0xb5301660, 0x18269301, 0xb5f70bc9]],
[[0x79ec1a0f, 0x2d8daefd, 0xceb39c97, 0x3bbcd6fd, 0x58f61a95, 0xf5575ffc, 0xadf7b420, 0xdbd986c4],
[0x15f39eb7, 0x81aa8814, 0xb98d976c, 0x6ee2fcf5, 0xcf2f717d, 0x5465475d, 0x6860bbd0, 0x8e24d3c4]]
]
];
#[inline(always)]
fn abs_int(a: i8) -> u32 {
let a_u: u32 = a as i32 as u32;
let mut mask: u32 = a_u >> 31;
mask |= mask << 1;
mask |= mask << 2;
let mut result: u32 = ((-a) as u32) & mask;
result |= (a as u32) & (mask ^ 0xF);
result
}
#[inline]
#[must_use]
pub fn check_range_n(a: &[u32; 8]) -> bool {
unsafe { P256_check_range_n(a) }
}
#[inline]
#[must_use]
pub fn check_range_p(a: &[u32; 8]) -> bool {
unsafe { P256_check_range_p(a) }
}
pub fn convert_endianness(output: &mut [u8; 32], input: &[u8; 32]) {
(0..16).for_each(|i| {
let t: u8 = input[31 - i];
output[31 - i] = input[i];
output[i] = t;
});
}
fn u32x8_to_u8x32(input: &[u32; 8]) -> &[u8; 32] {
unsafe { core::mem::transmute::<&[u32; 8], &[u8; 32]>(input) }
}
fn u32x8_to_u8x32_mut(input: &mut [u32; 8]) -> &mut [u8; 32] {
unsafe { core::mem::transmute::<&mut [u32; 8], &mut [u8; 32]>(input) }
}
pub fn point_to_octet_string_uncompressed(out: &mut [u8; 65], x: &[u32; 8], y: &[u32; 8]) {
out[0] = 4;
convert_endianness((&mut out[1..33]).try_into().unwrap(), u32x8_to_u8x32(x));
convert_endianness((&mut out[33..65]).try_into().unwrap(), u32x8_to_u8x32(y));
}
pub fn point_to_octet_string_compressed(out: &mut [u8; 33], x: &[u32; 8], y: &[u32; 8]) {
out[0] = 2_u8.wrapping_add((y[0] & 1) as u8);
convert_endianness((&mut out[1..]).try_into().unwrap(), u32x8_to_u8x32(x))
}
pub fn point_to_octet_string_hybrid(out: &mut [u8; 65], x: &[u32; 8], y: &[u32; 8]) {
out[0] = 6_u8.wrapping_add((y[0] & 1) as u8);
convert_endianness((&mut out[1..33]).try_into().unwrap(), u32x8_to_u8x32(x));
convert_endianness((&mut out[33..65]).try_into().unwrap(), u32x8_to_u8x32(y));
}
#[must_use]
pub fn octet_string_to_point(x: &mut [u32; 8], y: &mut [u32; 8], input: &[u8]) -> bool {
if let Ok(slice) = input[1..33].try_into() {
convert_endianness(u32x8_to_u8x32_mut(x), slice);
if unsafe { !P256_check_range_p(x) } {
return false;
}
if (input[0] == 4 || ((input[0] >> 1) == 3)) && input.len() == 65 {
convert_endianness(u32x8_to_u8x32_mut(y), input[33..65].try_into().unwrap());
if unsafe { !P256_check_range_p(y) } {
return false;
}
if (input[0] >> 1) == 3 && u32::from(input[0] & 1) != (y[0] & 1) {
return false;
}
let mut x_mont: [u32; 8] = [0; 8];
let mut y_mont: [u32; 8] = [0; 8];
unsafe { P256_to_montgomery(x_mont.as_mut_ptr(), x.as_ptr()) };
unsafe { P256_to_montgomery(y_mont.as_mut_ptr(), y.as_ptr()) };
unsafe { P256_point_is_on_curve(x_mont.as_ptr(), y_mont.as_ptr()) }
} else if (input[0] >> 1) == 1 && input.len() == 33 {
unsafe { P256_decompress_point(y.as_mut_ptr(), x.as_ptr(), u32::from(input[0] & 1)) }
} else {
false
}
} else {
false
}
}
fn scalarmult_variable_base(
output_mont_x: &mut [u32; 8],
output_mont_y: &mut [u32; 8],
scalar: &[u32; 8],
) {
let mut scalar2: [u32; 8] = [0; 8];
let even: u32 = ((scalar[0]) & 1) ^ 1;
unsafe { P256_negate_mod_n_if(scalar2.as_mut_ptr(), scalar.as_ptr(), even) };
let mut e: [i8; 64] = [0; 64];
e[0] = (scalar2[0] & 0xf) as i8;
(1..64).for_each(|i| {
e[i] = ((scalar2[i / 8] >> ((i % 8) * 4)) & 0xf) as u8 as i8;
e[i - 1] -= ((e[i] & 1) ^ 1) << 4;
e[i] |= 1;
});
let mut table: [[[u32; 8]; 3]; 8] = [[[0; 8]; 3]; 8];
table[0][0].copy_from_slice(output_mont_x);
table[0][1].copy_from_slice(output_mont_y);
table[0][2].copy_from_slice(&ONE_MONTGOMERY);
unsafe { P256_double_j(table[7].as_mut_ptr(), table[0].as_ptr()) };
(1..8).for_each(|i| {
table.copy_within(7..8, i);
unsafe { P256_add_sub_j(table[i].as_mut_ptr(), table[i - 1].as_ptr(), false, false) };
});
let mut current_point: [[u32; 8]; 3] = [[0; 8]; 3];
current_point.copy_from_slice(&table[(e[63] >> 1) as u8 as usize]);
(0..63).rev().for_each(|i| {
(0..4).for_each(|_| unsafe {
P256_double_j(current_point.as_mut_ptr(), current_point.as_ptr())
});
let mut selected_point: [[u32; 8]; 3] = [[0; 8]; 3];
selected_point.copy_from_slice(&table[(abs_int(e[i]) >> 1) as u8 as usize]);
unsafe {
P256_negate_mod_p_if(
selected_point[1].as_mut_ptr(),
selected_point[1].as_ptr(),
((e[i] as u8) >> 7) as u32,
)
};
unsafe {
P256_add_sub_j(
current_point.as_mut_ptr(),
selected_point.as_ptr(),
false,
false,
)
};
});
unsafe {
P256_jacobian_to_affine(
output_mont_x.as_mut_ptr(),
output_mont_y.as_mut_ptr(),
current_point.as_ptr(),
)
};
unsafe { P256_negate_mod_p_if(output_mont_y.as_mut_ptr(), output_mont_y.as_mut_ptr(), even) };
}
#[must_use]
fn scalarmult_generic_no_scalar_check(
output_mont_x: &mut [u32; 8],
output_mont_y: &mut [u32; 8],
scalar: &[u32; 8],
in_x: &[u32; 8],
in_y: &[u32; 8],
) -> bool {
if unsafe { !P256_check_range_p(in_x) || !P256_check_range_p(in_y) } {
false
} else {
unsafe { P256_to_montgomery(output_mont_x.as_mut_ptr(), in_x.as_ptr()) };
unsafe { P256_to_montgomery(output_mont_y.as_mut_ptr(), in_y.as_ptr()) };
if unsafe {
!P256_point_is_on_curve(output_mont_x.as_mut_ptr(), output_mont_y.as_mut_ptr())
} {
false
} else {
scalarmult_variable_base(output_mont_x, output_mont_y, scalar);
true
}
}
}
#[must_use]
pub fn scalarmult_generic(
result_x: &mut [u32; 8],
result_y: &mut [u32; 8],
scalar: &[u32; 8],
in_x: &[u32; 8],
in_y: &[u32; 8],
) -> bool {
if unsafe {
!P256_check_range_n(scalar)
|| !scalarmult_generic_no_scalar_check(result_x, result_y, scalar, in_x, in_y)
} {
false
} else {
unsafe { P256_from_montgomery(result_x.as_mut_ptr(), result_x.as_ptr()) };
unsafe { P256_from_montgomery(result_y.as_mut_ptr(), result_y.as_ptr()) };
true
}
}
#[must_use]
pub fn ecdh_calc_shared_secret(
shared_secret: &mut [u8; 32],
private_key: &[u32; 8],
others_public_key_x: &[u32; 8],
others_public_key_y: &[u32; 8],
) -> bool {
let mut result_x: [u32; 8] = [0; 8];
let mut result_y: [u32; 8] = [0; 8];
if !scalarmult_generic_no_scalar_check(
&mut result_x,
&mut result_y,
private_key,
others_public_key_x,
others_public_key_y,
) {
false
} else {
unsafe { P256_from_montgomery(result_x.as_mut_ptr(), result_x.as_ptr()) };
convert_endianness(shared_secret, u32x8_to_u8x32(&result_x));
true
}
}
#[must_use]
pub fn keygen(
public_key_x: &mut [u32; 8],
public_key_y: &mut [u32; 8],
private_key: &[u32; 8],
) -> bool {
scalarmult_base(public_key_x, public_key_y, private_key)
}
macro_rules! get_bit {
($arr:ident, $i:expr) => {
(($arr[$i / 32] >> ($i % 32)) & 1)
};
}
fn scalarmult_fixed_base(
output_mont_x: &mut [u32; 8],
output_mont_y: &mut [u32; 8],
scalar: &[u32; 8],
) {
let mut scalar2: [u32; 8] = [0; 8];
let even: u32 = ((scalar[0]) & 1) ^ 1;
unsafe { P256_negate_mod_n_if(scalar2.as_mut_ptr(), scalar.as_ptr(), even) };
let mut current_point: [[u32; 8]; 3] = [[0; 8]; 3];
let mut selected_point: [[u32; 8]; 2] = [[0; 8]; 2];
for i in (0..32).rev() {
{
let mut mask: u32 = get_bit!(scalar2, i + 32 + 1)
| (get_bit!(scalar2, i + 64 + 32 + 1) << 1)
| (get_bit!(scalar2, i + 2 * 64 + 32 + 1) << 2);
if i == 31 {
current_point[..2].copy_from_slice(&P256_BASEPOINT_PRECOMP2[1][mask as usize]);
current_point[2].copy_from_slice(&ONE_MONTGOMERY);
} else {
unsafe { P256_double_j(current_point.as_mut_ptr(), current_point.as_ptr()) };
let sign: u32 = get_bit!(scalar2, i + 3 * 64 + 32 + 1).wrapping_sub(1); mask = (mask ^ sign) & 7;
selected_point.copy_from_slice(&P256_BASEPOINT_PRECOMP2[1][mask as usize]);
unsafe {
P256_negate_mod_p_if(
selected_point[1].as_mut_ptr(),
selected_point[1].as_ptr(),
sign & 1,
)
};
unsafe {
P256_add_sub_j(
current_point.as_mut_ptr(),
selected_point.as_mut_ptr(),
false,
true,
)
};
}
}
{
let mut mask: u32 = get_bit!(scalar2, i + 1)
| (get_bit!(scalar2, i + 64 + 1) << 1)
| (get_bit!(scalar2, i + 2 * 64 + 1) << 2);
let sign: u32 = get_bit!(scalar2, i + 3 * 64 + 1).wrapping_sub(1); mask = (mask ^ sign) & 7;
selected_point.copy_from_slice(&P256_BASEPOINT_PRECOMP2[0][mask as usize]);
unsafe {
P256_negate_mod_p_if(
selected_point[1].as_mut_ptr(),
selected_point[1].as_ptr(),
sign & 1,
)
};
unsafe {
P256_add_sub_j(
current_point.as_mut_ptr(),
selected_point.as_mut_ptr(),
false,
true,
)
};
}
}
unsafe {
P256_jacobian_to_affine(
output_mont_x.as_mut_ptr(),
output_mont_y.as_mut_ptr(),
current_point.as_ptr(),
)
};
unsafe { P256_negate_mod_p_if(output_mont_y.as_mut_ptr(), output_mont_y.as_mut_ptr(), even) };
}
#[must_use]
pub fn scalarmult_base(
result_x: &mut [u32; 8],
result_y: &mut [u32; 8],
scalar: &[u32; 8],
) -> bool {
if unsafe { !P256_check_range_n(scalar) } {
false
} else {
scalarmult_fixed_base(result_x, result_y, scalar);
unsafe { P256_from_montgomery(result_x.as_mut_ptr(), result_x.as_ptr()) };
unsafe { P256_from_montgomery(result_y.as_mut_ptr(), result_y.as_ptr()) };
true
}
}
#[repr(C)]
#[derive(Default, Debug, Copy, Clone)]
pub struct SignPrecomp {
pub r: [u32; 8],
pub k_inv: [u32; 8],
}
#[must_use]
pub fn sign(
r: &mut [u32; 8],
s: &mut [u32; 8],
hash: &[u8],
private_key: &[u32; 8],
k: &[u32; 8],
) -> bool {
let mut t: SignPrecomp = Default::default();
if !sign_step1(&mut t, k) {
r.fill(0);
s.fill(0);
false
} else {
sign_step2(r, s, hash, private_key, &mut t)
}
}
#[must_use]
pub fn sign_step1(result: &mut SignPrecomp, k: &[u32; 8]) -> bool {
#[allow(clippy::never_loop)]
loop {
if unsafe { !P256_check_range_n(k) } {
break;
}
let mut output_x: [u32; 8] = [0; 8];
let mut output_y: [u32; 8] = [0; 8];
scalarmult_fixed_base(&mut output_x, &mut output_y, k);
mod_n_inv(&mut result.k_inv, k);
unsafe { P256_from_montgomery(result.r.as_mut_ptr(), output_x.as_ptr()) };
unsafe { P256_reduce_mod_n_32bytes(result.r.as_mut_ptr(), result.r.as_ptr()) };
let r_sum: u32 = (0..8).fold(0, |r_sum, i| r_sum | result.r[i]);
if r_sum == 0 {
break;
}
return true;
}
result.r.fill(0);
result.k_inv.fill(0);
false
}
fn hash_to_z(z: &mut [u8], hash: &[u8]) {
let hashlen: usize = core::cmp::min(hash.len(), 32);
(0..hashlen).for_each(|i| {
z[i] = hash[hashlen - 1 - i];
});
z[hashlen..].fill(0);
}
#[must_use]
pub fn sign_step2(
r: &mut [u32; 8],
s: &mut [u32; 8],
hash: &[u8],
private_key: &[u32; 8],
sign_precomp: &mut SignPrecomp,
) -> bool {
#[allow(clippy::never_loop)]
loop {
if unsafe {
!P256_check_range_n(&sign_precomp.k_inv) || !P256_check_range_n(&sign_precomp.r)
} {
break;
}
hash_to_z(u32x8_to_u8x32_mut(r), hash);
unsafe {
P256_mul_mod_n(
s.as_mut_ptr(),
sign_precomp.r.as_ptr(),
private_key.as_ptr(),
)
};
unsafe { P256_add_mod_n(s.as_mut_ptr(), r as *const u32, s.as_ptr()) };
unsafe { P256_mul_mod_n(s.as_mut_ptr(), sign_precomp.k_inv.as_ptr(), s.as_ptr()) };
r.copy_from_slice(&sign_precomp.r);
let s_sum: u32 = s.iter().fold(0, |s_sum, s| s_sum | s);
if s_sum == 0 {
break;
}
sign_precomp.r.fill(0);
sign_precomp.k_inv.fill(0);
return true;
}
r.fill(0);
s.fill(0);
false
}
fn slide_257(a: &[u8; 32]) -> [i8; 257] {
let mut r: [i8; 257] = [0; 257];
(0..256).for_each(|i| {
r[i] = (1 & (a[i >> 3] >> (i & 7))) as i8;
});
r[256] = 0;
(0..256).for_each(|i| {
if r[i] != 0 {
let mut b: usize = 1;
while b <= 4 && i + b < 256 {
if r[i + b] != 0 {
if r[i] + (r[i + b] << b) <= 15 {
r[i] += r[i + b] << b;
r[i + b] = 0;
} else if r[i] - (r[i + b] << b) >= -15 {
r[i] -= r[i + b] << b;
loop {
r[i + b] = 0;
b += 1;
if r[i + b] == 0 {
r[i + b] = 1;
b -= 1; break;
}
}
} else {
break;
}
}
b += 1;
}
}
});
r
}
#[must_use = "The return value indicates if the message is authentic"]
pub fn verify(
public_key_x: &[u32; 8],
public_key_y: &[u32; 8],
hash: &[u8],
r: &[u32; 8],
s: &[u32; 8],
) -> bool {
if unsafe { !P256_check_range_n(r) || !P256_check_range_n(s) } {
return false;
}
if unsafe { !P256_check_range_p(public_key_x) || !P256_check_range_p(public_key_y) } {
return false;
}
let mut pk_table: [[[u32; 8]; 3]; 8] = [[[0; 8]; 3]; 8];
unsafe {
P256_to_montgomery(pk_table[0][0].as_mut_ptr(), public_key_x.as_ptr());
P256_to_montgomery(pk_table[0][1].as_mut_ptr(), public_key_y.as_ptr());
}
pk_table[0][2].copy_from_slice(&ONE_MONTGOMERY);
if unsafe { !P256_point_is_on_curve(pk_table[0][0].as_ptr(), pk_table[0][1].as_ptr()) } {
return false;
}
unsafe { P256_double_j(pk_table[7].as_mut_ptr(), pk_table[0].as_ptr()) };
(1..8).for_each(|i| {
pk_table.copy_within(7..8, i);
unsafe {
P256_add_sub_j(
pk_table[i].as_mut_ptr(),
pk_table[i - 1].as_ptr(),
false,
false,
)
};
});
let mut w: [u32; 8] = [0; 8];
let mut u1: [u32; 8] = [0; 8];
let mut u2: [u32; 8] = [0; 8];
let mut z: [u32; 8] = [0; 8];
hash_to_z(u32x8_to_u8x32_mut(&mut z), hash);
mod_n_inv(&mut w, s);
unsafe { P256_mul_mod_n(u1.as_mut_ptr(), z.as_ptr(), w.as_ptr()) };
unsafe { P256_mul_mod_n(u2.as_mut_ptr(), r.as_ptr(), w.as_ptr()) };
let slide_bp: [i8; 257] = slide_257(u32x8_to_u8x32(&u1));
let slide_pk: [i8; 257] = slide_257(u32x8_to_u8x32(&u2));
let mut cp: [[u32; 8]; 3] = [[0; 8]; 3];
#[allow(clippy::comparison_chain)]
slide_bp
.iter()
.rev()
.zip(slide_pk.iter().rev())
.for_each(|(&bp, &pk)| {
unsafe { P256_double_j(cp.as_mut_ptr(), cp.as_ptr()) };
if bp > 0 {
unsafe {
P256_add_sub_j(
cp.as_mut_ptr(),
P256_BASEPOINT_PRECOMP[(bp / 2) as usize].as_ptr(),
false,
true,
)
};
} else if bp < 0 {
unsafe {
P256_add_sub_j(
cp.as_mut_ptr(),
P256_BASEPOINT_PRECOMP[((-bp) / 2) as usize].as_ptr(),
true,
true,
)
};
}
if pk > 0 {
unsafe {
P256_add_sub_j(
cp.as_mut_ptr(),
pk_table[(pk / 2) as usize].as_ptr(),
false,
false,
)
};
} else if pk < 0 {
unsafe {
P256_add_sub_j(
cp.as_mut_ptr(),
pk_table[((-pk) / 2) as usize].as_ptr(),
true,
false,
)
};
}
});
unsafe { P256_verify_last_step(r.as_ptr(), cp.as_ptr()) }
}
#[repr(C)]
#[derive(Default)]
struct FGInteger {
flip_sign: i32, signed_value: [u32; 9],
}
#[repr(C)]
#[derive(Default)]
struct XYInteger {
flip_sign: i32, value: [u32; 8], }
#[repr(C)]
#[derive(Default)]
struct State {
fg: [FGInteger; 2],
xy: [XYInteger; 2],
}
fn mod_n_inv(res: &mut [u32; 8], a: &[u32; 8]) {
let mut state: [State; 2] = Default::default();
state[0].fg[0].flip_sign = 0; state[0].fg[0]
.signed_value
.copy_from_slice(unsafe { &P256_order }); state[0].fg[1].flip_sign = 0; state[0].fg[1].signed_value[..8].copy_from_slice(a); state[0].fg[1].signed_value[8] = 0;
state[0].xy[1].value[0] = 1 << 24;
let mut delta: i32 = 1;
(0..24).for_each(|i| {
let mut matrix: [u32; 4] = [0; 4];
let negate_f: u32 = state[i % 2].fg[0].flip_sign as u32;
let negate_g: u32 = state[i % 2].fg[1].flip_sign as u32;
delta = unsafe {
P256_divsteps2_31(
delta,
(state[i % 2].fg[0].signed_value[0] ^ negate_f).wrapping_sub(negate_f),
(state[i % 2].fg[1].signed_value[0] ^ negate_g).wrapping_sub(negate_g),
matrix.as_mut_ptr(),
)
};
unsafe {
P256_matrix_mul_fg_9(
matrix[0],
matrix[1],
state[i % 2].fg.as_ptr(),
&mut state[(i + 1) % 2].fg[0],
)
};
unsafe {
P256_matrix_mul_fg_9(
matrix[2],
matrix[3],
state[i % 2].fg.as_ptr(),
&mut state[(i + 1) % 2].fg[1],
)
};
unsafe {
P256_matrix_mul_mod_n(
matrix[0],
matrix[1],
state[i % 2].xy.as_ptr(),
&mut state[(i + 1) % 2].xy[0],
)
};
unsafe {
P256_matrix_mul_mod_n(
matrix[2],
matrix[3],
state[i % 2].xy.as_ptr(),
&mut state[(i + 1) % 2].xy[1],
)
};
});
unsafe {
P256_negate_mod_n_if(
res.as_mut_ptr(),
&state[0].xy[0].value[0],
((state[0].xy[0].flip_sign
^ state[0].fg[0].flip_sign
^ (state[0].fg[0].signed_value[8] as i32))
& 1) as u32,
)
};
}