dcrypt_algorithms/ec/p256/scalar.rs
1//! P-256 scalar arithmetic operations
2
3use crate::ec::p256::constants::P256_SCALAR_SIZE;
4use crate::error::{validate, Error, Result};
5use dcrypt_common::security::SecretBuffer;
6use dcrypt_params::traditional::ecdsa::NIST_P256;
7use zeroize::{Zeroize, ZeroizeOnDrop};
8
9/// P-256 scalar value for use in elliptic curve operations
10///
11/// Represents integers modulo the curve order n. Used for private keys
12/// and scalar multiplication. Automatically zeroized on drop for security.
13#[derive(Clone, Zeroize, ZeroizeOnDrop, Debug)]
14pub struct Scalar(SecretBuffer<P256_SCALAR_SIZE>);
15
16impl Scalar {
17 /// Create a scalar from raw bytes with modular reduction
18 ///
19 /// Ensures the scalar is in the valid range [1, n-1] where n is the curve order.
20 /// Performs modular reduction if the input is >= n.
21 /// Returns an error if the result would be zero (invalid for cryptographic use).
22 pub fn new(mut data: [u8; P256_SCALAR_SIZE]) -> Result<Self> {
23 Self::reduce_scalar_bytes(&mut data)?;
24 Ok(Scalar(SecretBuffer::new(data)))
25 }
26
27 /// Internal constructor that allows zero values
28 ///
29 /// Used for intermediate arithmetic operations where zero is a valid result.
30 /// Should NOT be used for secret keys, nonces, or final signature components.
31 fn from_bytes_unchecked(bytes: [u8; P256_SCALAR_SIZE]) -> Self {
32 Scalar(SecretBuffer::new(bytes))
33 }
34
35 /// Create a scalar from an existing SecretBuffer
36 ///
37 /// Performs the same validation and reduction as `new()` but starts
38 /// from a SecretBuffer instead of a raw byte array.
39 pub fn from_secret_buffer(buffer: SecretBuffer<P256_SCALAR_SIZE>) -> Result<Self> {
40 let mut bytes = [0u8; P256_SCALAR_SIZE];
41 bytes.copy_from_slice(buffer.as_ref());
42
43 Self::reduce_scalar_bytes(&mut bytes)?;
44 Ok(Scalar(SecretBuffer::new(bytes)))
45 }
46
47 /// Access the underlying SecretBuffer containing the scalar value
48 pub fn as_secret_buffer(&self) -> &SecretBuffer<P256_SCALAR_SIZE> {
49 &self.0
50 }
51
52 /// Serialize the scalar to a byte array
53 ///
54 /// Returns the scalar in big-endian byte representation.
55 /// The output is suitable for storage or transmission.
56 pub fn serialize(&self) -> [u8; P256_SCALAR_SIZE] {
57 let mut result = [0u8; P256_SCALAR_SIZE];
58 result.copy_from_slice(self.0.as_ref());
59 result
60 }
61
62 /// Deserialize a scalar from bytes with validation
63 ///
64 /// Parses bytes as a big-endian scalar value and ensures it's
65 /// in the valid range for P-256 operations.
66 pub fn deserialize(bytes: &[u8]) -> Result<Self> {
67 validate::length("P-256 Scalar", bytes.len(), P256_SCALAR_SIZE)?;
68
69 let mut scalar_bytes = [0u8; P256_SCALAR_SIZE];
70 scalar_bytes.copy_from_slice(bytes);
71
72 Self::new(scalar_bytes)
73 }
74
75 /// Check if the scalar represents zero
76 ///
77 /// Constant-time check to determine if the scalar is the
78 /// additive identity (which is invalid for most cryptographic operations).
79 pub fn is_zero(&self) -> bool {
80 self.0.as_ref().iter().all(|&b| b == 0)
81 }
82
83 /// Convert big-endian bytes to little-endian limbs
84 /// Properly extracts 4-byte chunks from BE array and converts to LE limbs
85 #[inline(always)]
86 fn to_le_limbs(bytes_be: &[u8; 32]) -> [u32; 8] {
87 let mut limbs = [0u32; 8];
88
89 // limb-0 must hold the 4 least-significant bytes, limb-7 the 4 most-significant
90 #[allow(clippy::needless_range_loop)] // Index used for offset calculation
91 for i in 0..8 {
92 let start = 28 - i * 4; // index of the MS-byte of this limb
93 limbs[i] = u32::from_le_bytes([
94 bytes_be[start + 3],
95 bytes_be[start + 2],
96 bytes_be[start + 1],
97 bytes_be[start],
98 ]);
99 }
100 limbs
101 }
102
103 /// Add two scalars modulo the curve order n
104 pub fn add_mod_n(&self, other: &Self) -> Result<Self> {
105 let self_limbs = Self::to_le_limbs(&self.serialize());
106 let other_limbs = Self::to_le_limbs(&other.serialize());
107
108 let mut r = [0u32; 8];
109 let mut carry = 0u64;
110
111 // Plain 256-bit add
112 #[allow(clippy::needless_range_loop)] // Index used for multiple arrays
113 for i in 0..8 {
114 let tmp = self_limbs[i] as u64 + other_limbs[i] as u64 + carry;
115 r[i] = tmp as u32;
116 carry = tmp >> 32;
117 }
118
119 // If we overflowed OR r >= n, subtract n once
120 if carry == 1 || Self::geq(&r, &Self::N_LIMBS) {
121 Self::sub_in_place(&mut r, &Self::N_LIMBS);
122 }
123
124 // Use unchecked constructor to allow zero in intermediate arithmetic
125 Ok(Self::from_bytes_unchecked(Self::limbs_to_be(&r)))
126 }
127
128 /// Subtract two scalars modulo the curve order n
129 pub fn sub_mod_n(&self, other: &Self) -> Result<Self> {
130 let self_limbs = Self::to_le_limbs(&self.serialize());
131 let other_limbs = Self::to_le_limbs(&other.serialize());
132
133 let mut r = [0u32; 8];
134 let mut borrow = 0i64;
135
136 #[allow(clippy::needless_range_loop)] // Index used for multiple arrays
137 for i in 0..8 {
138 let tmp = self_limbs[i] as i64 - other_limbs[i] as i64 - borrow;
139 if tmp < 0 {
140 r[i] = (tmp + (1i64 << 32)) as u32;
141 borrow = 1;
142 } else {
143 r[i] = tmp as u32;
144 borrow = 0;
145 }
146 }
147
148 if borrow == 1 {
149 // Result was negative → add n back
150 let mut c = 0u64;
151 #[allow(clippy::needless_range_loop)] // Index used for multiple arrays
152 for i in 0..8 {
153 let tmp = r[i] as u64 + Self::N_LIMBS[i] as u64 + c;
154 r[i] = tmp as u32;
155 c = tmp >> 32;
156 }
157 }
158
159 // Use unchecked constructor to allow zero in intermediate arithmetic
160 Ok(Self::from_bytes_unchecked(Self::limbs_to_be(&r)))
161 }
162
163 /// Multiply two scalars modulo the curve order n
164 ///
165 /// Uses constant-time double-and-add algorithm for correctness and security.
166 /// Processes bits from MSB to LSB to ensure correct powers of 2.
167 pub fn mul_mod_n(&self, other: &Self) -> Result<Self> {
168 // Start with zero (additive identity)
169 let mut acc = Self::from_bytes_unchecked([0u8; P256_SCALAR_SIZE]);
170
171 // Process each bit from MSB to LSB
172 for byte in other.serialize() {
173 for i in (0..8).rev() {
174 // MSB first within each byte
175 // Double the accumulator: acc = acc * 2 (mod n)
176 acc = acc.add_mod_n(&acc)?;
177
178 // If bit is set, add self: acc = acc + self (mod n)
179 if (byte >> i) & 1 == 1 {
180 acc = acc.add_mod_n(self)?;
181 }
182 }
183 }
184
185 Ok(acc)
186 }
187
188 /// Compute multiplicative inverse modulo n using Fermat's little theorem
189 /// a^(-1) ≡ a^(n-2) (mod n). Left-to-right binary exponentiation.
190 pub fn inv_mod_n(&self) -> Result<Self> {
191 // zero has no inverse
192 if self.is_zero() {
193 return Err(Error::param("P-256 Scalar", "Cannot invert zero scalar"));
194 }
195
196 // Step 1: form exponent = n-2
197 let mut exp = NIST_P256.n; // big-endian [u8;32]
198 // subtract 2 with borrow
199 let mut borrow = 2u16;
200 for i in (0..P256_SCALAR_SIZE).rev() {
201 let v = exp[i] as i16 - (borrow as i16);
202 if v < 0 {
203 exp[i] = (v + 256) as u8;
204 borrow = 1;
205 } else {
206 exp[i] = v as u8;
207 borrow = 0;
208 }
209 }
210
211 // Step 2: binary exponentiation, left-to-right:
212 // result = 1
213 // for each bit of exp from MSB to LSB:
214 // result = result^2 mod n
215 // if bit == 1 { result = result * a mod n }
216 let mut result = {
217 let mut one = [0u8; P256_SCALAR_SIZE];
218 one[P256_SCALAR_SIZE - 1] = 1;
219 // from_bytes_unchecked is fine here because 1 < n
220 Self::from_bytes_unchecked(one)
221 };
222 let base = self.clone();
223
224 for byte in exp {
225 for bit in (0..8).rev() {
226 // square
227 result = result.mul_mod_n(&result)?;
228 // multiply if this exp-bit is 1
229 if (byte >> bit) & 1 == 1 {
230 result = result.mul_mod_n(&base)?;
231 }
232 }
233 }
234
235 Ok(result)
236 }
237
238 /// Compute the additive inverse (negation) modulo n
239 ///
240 /// Returns -self mod n, which is equivalent to n - self when self != 0
241 /// Returns 0 when self is 0
242 pub fn negate(&self) -> Self {
243 // If self is zero, return zero
244 if self.is_zero() {
245 return Self::from_bytes_unchecked([0u8; P256_SCALAR_SIZE]);
246 }
247
248 // Otherwise compute n - self
249 let n_limbs = Self::N_LIMBS;
250 let self_limbs = Self::to_le_limbs(&self.serialize());
251 let mut res = [0u32; 8];
252
253 // Subtract self from n
254 let mut borrow = 0i64;
255 #[allow(clippy::needless_range_loop)] // Index used for multiple arrays
256 for i in 0..8 {
257 let tmp = n_limbs[i] as i64 - self_limbs[i] as i64 - borrow;
258 if tmp < 0 {
259 res[i] = (tmp + (1i64 << 32)) as u32;
260 borrow = 1;
261 } else {
262 res[i] = tmp as u32;
263 borrow = 0;
264 }
265 }
266
267 // No borrow should occur since self < n
268 debug_assert_eq!(borrow, 0);
269
270 Self::from_bytes_unchecked(Self::limbs_to_be(&res))
271 }
272
273 // Private helper methods
274
275 /// Reduce scalar modulo the curve order n using constant-time arithmetic
276 ///
277 /// The curve order n for P-256 is:
278 /// n = 0xFFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632551
279 ///
280 /// Algorithm:
281 /// 1. Check if input is zero (invalid)
282 /// 2. Compare with curve order using constant-time comparison
283 /// 3. Conditionally subtract n if input >= n
284 /// 4. Verify result is still non-zero
285 fn reduce_scalar_bytes(bytes: &mut [u8; P256_SCALAR_SIZE]) -> Result<()> {
286 let order = &NIST_P256.n;
287
288 // Reject zero scalars immediately
289 if bytes.iter().all(|&b| b == 0) {
290 return Err(Error::param("P-256 Scalar", "Scalar cannot be zero"));
291 }
292
293 // Constant-time comparison with curve order
294 // We want to check: is bytes >= order?
295 let mut gt = 0u8; // set if bytes > order
296 let mut lt = 0u8; // set if bytes < order
297
298 for i in 0..P256_SCALAR_SIZE {
299 let x = bytes[i];
300 let y = order[i];
301 gt |= ((x > y) as u8) & (!lt);
302 lt |= ((x < y) as u8) & (!gt);
303 }
304 let ge = gt | ((!lt) & 1); // ge = gt || eq (if not less, then greater or equal)
305
306 if ge == 1 {
307 // If scalar >= order, perform modular reduction
308 let mut borrow = 0u16;
309 let mut temp_bytes = *bytes;
310
311 for i in (0..P256_SCALAR_SIZE).rev() {
312 let diff = (temp_bytes[i] as i16) - (order[i] as i16) - (borrow as i16);
313 if diff < 0 {
314 temp_bytes[i] = (diff + 256) as u8;
315 borrow = 1;
316 } else {
317 temp_bytes[i] = diff as u8;
318 borrow = 0;
319 }
320 }
321
322 *bytes = temp_bytes;
323 }
324
325 // Check for zero after reduction
326 if bytes.iter().all(|&b| b == 0) {
327 return Err(Error::param(
328 "P-256 Scalar",
329 "Reduction resulted in zero scalar",
330 ));
331 }
332
333 Ok(())
334 }
335
336 // Helper constants - stored in little-endian limb order
337 const N_LIMBS: [u32; 8] = [
338 0xFC63_2551,
339 0xF3B9_CAC2,
340 0xA717_9E84,
341 0xBCE6_FAAD,
342 0xFFFF_FFFF,
343 0xFFFF_FFFF,
344 0x0000_0000,
345 0xFFFF_FFFF,
346 ];
347
348 /// Compare two limb arrays for greater-than-or-equal
349 #[inline(always)]
350 fn geq(a: &[u32; 8], b: &[u32; 8]) -> bool {
351 for i in (0..8).rev() {
352 if a[i] > b[i] {
353 return true;
354 }
355 if a[i] < b[i] {
356 return false;
357 }
358 }
359 true // equal
360 }
361
362 /// Subtract b from a in-place
363 #[inline(always)]
364 fn sub_in_place(a: &mut [u32; 8], b: &[u32; 8]) {
365 let mut borrow = 0u64;
366 #[allow(clippy::needless_range_loop)] // Index used for multiple arrays
367 for i in 0..8 {
368 let tmp = (a[i] as u64).wrapping_sub(b[i] as u64).wrapping_sub(borrow);
369 a[i] = tmp as u32;
370 borrow = (tmp >> 63) & 1; // 1 if we wrapped
371 }
372 }
373
374 /// Convert little-endian limbs to big-endian bytes
375 /// The inverse of to_le_limbs
376 #[inline(always)]
377 fn limbs_to_be(limbs: &[u32; 8]) -> [u8; 32] {
378 let mut out = [0u8; 32];
379 for (i, &w) in limbs.iter().enumerate() {
380 let be = w.to_le_bytes(); // limb itself is little-endian
381 let start = 28 - i * 4;
382 out[start] = be[3];
383 out[start + 1] = be[2];
384 out[start + 2] = be[1];
385 out[start + 3] = be[0];
386 }
387 out
388 }
389}