dcrypt_algorithms/ec/p224/scalar.rs
1//! P-224 scalar arithmetic operations
2
3use crate::ec::p224::constants::P224_SCALAR_SIZE;
4use crate::error::{validate, Error, Result};
5use dcrypt_common::security::SecretBuffer;
6use dcrypt_params::traditional::ecdsa::NIST_P224;
7use zeroize::{Zeroize, ZeroizeOnDrop};
8
9/// P-224 scalar value for use in elliptic curve operations
10///
11/// Represents integers modulo the curve order n. Used for private keys
12/// and scalar multiplication. Automatically zeroized on drop for security.
13#[derive(Clone, Zeroize, ZeroizeOnDrop, Debug)]
14pub struct Scalar(SecretBuffer<P224_SCALAR_SIZE>);
15
16impl Scalar {
17 /// Create a scalar from raw bytes with modular reduction
18 ///
19 /// Ensures the scalar is in the valid range [1, n-1] where n is the curve order.
20 /// Performs modular reduction if the input is >= n.
21 /// Returns an error if the result would be zero (invalid for cryptographic use).
22 pub fn new(mut data: [u8; P224_SCALAR_SIZE]) -> Result<Self> {
23 Self::reduce_scalar_bytes(&mut data)?;
24 Ok(Scalar(SecretBuffer::new(data)))
25 }
26
27 /// Internal constructor that allows zero values
28 ///
29 /// Used for intermediate arithmetic operations where zero is a valid result.
30 /// Should NOT be used for secret keys, nonces, or final signature components.
31 fn from_bytes_unchecked(bytes: [u8; P224_SCALAR_SIZE]) -> Self {
32 Scalar(SecretBuffer::new(bytes))
33 }
34
35 /// Create a scalar from an existing SecretBuffer
36 ///
37 /// Performs the same validation and reduction as `new()` but starts
38 /// from a SecretBuffer instead of a raw byte array.
39 pub fn from_secret_buffer(buffer: SecretBuffer<P224_SCALAR_SIZE>) -> Result<Self> {
40 let mut bytes = [0u8; P224_SCALAR_SIZE];
41 bytes.copy_from_slice(buffer.as_ref());
42
43 Self::reduce_scalar_bytes(&mut bytes)?;
44 Ok(Scalar(SecretBuffer::new(bytes)))
45 }
46
47 /// Access the underlying SecretBuffer containing the scalar value
48 pub fn as_secret_buffer(&self) -> &SecretBuffer<P224_SCALAR_SIZE> {
49 &self.0
50 }
51
52 /// Serialize the scalar to a byte array
53 ///
54 /// Returns the scalar in big-endian byte representation.
55 /// The output is suitable for storage or transmission.
56 pub fn serialize(&self) -> [u8; P224_SCALAR_SIZE] {
57 let mut result = [0u8; P224_SCALAR_SIZE];
58 result.copy_from_slice(self.0.as_ref());
59 result
60 }
61
62 /// Deserialize a scalar from bytes with validation
63 ///
64 /// Parses bytes as a big-endian scalar value and ensures it's
65 /// in the valid range for P-224 operations.
66 pub fn deserialize(bytes: &[u8]) -> Result<Self> {
67 validate::length("P-224 Scalar", bytes.len(), P224_SCALAR_SIZE)?;
68
69 let mut scalar_bytes = [0u8; P224_SCALAR_SIZE];
70 scalar_bytes.copy_from_slice(bytes);
71
72 Self::new(scalar_bytes)
73 }
74
75 /// Check if the scalar represents zero
76 ///
77 /// Constant-time check to determine if the scalar is the
78 /// additive identity (which is invalid for most cryptographic operations).
79 pub fn is_zero(&self) -> bool {
80 self.0.as_ref().iter().all(|&b| b == 0)
81 }
82
83 /// Convert big-endian bytes to little-endian limbs
84 /// Input bytes are already big-endian from parameter tables
85 #[inline(always)]
86 fn to_le_limbs(bytes_be: &[u8; 28]) -> [u32; 7] {
87 let mut limbs = [0u32; 7];
88
89 // Read big-endian bytes directly into little-endian limbs
90 // bytes[0..4] is most significant, goes to limbs[6]
91 // bytes[24..28] is least significant, goes to limbs[0]
92 for i in 0..7 {
93 let offset = i * 4;
94 limbs[6 - i] = u32::from_be_bytes([
95 bytes_be[offset],
96 bytes_be[offset + 1],
97 bytes_be[offset + 2],
98 bytes_be[offset + 3],
99 ]);
100 }
101 limbs
102 }
103
104 /// Convert little-endian limbs to big-endian bytes
105 /// The inverse of to_le_limbs
106 #[inline(always)]
107 fn limbs_to_be(limbs: &[u32; 7]) -> [u8; 28] {
108 let mut out = [0u8; 28];
109
110 // Write little-endian limbs to big-endian bytes
111 // limbs[6] is most significant, goes to bytes[0..4]
112 // limbs[0] is least significant, goes to bytes[24..28]
113 for i in 0..7 {
114 let bytes = limbs[6 - i].to_be_bytes();
115 let offset = i * 4;
116 out[offset..offset + 4].copy_from_slice(&bytes);
117 }
118 out
119 }
120
121 /// Add two scalars modulo the curve order n
122 pub fn add_mod_n(&self, other: &Self) -> Result<Self> {
123 let self_limbs = Self::to_le_limbs(&self.serialize());
124 let other_limbs = Self::to_le_limbs(&other.serialize());
125
126 let mut r = [0u32; 7];
127 let mut carry = 0u64;
128
129 // Plain 224-bit add
130 for (i, result) in r.iter_mut().enumerate() {
131 let tmp = self_limbs[i] as u64 + other_limbs[i] as u64 + carry;
132 *result = tmp as u32;
133 carry = tmp >> 32;
134 }
135
136 // If we overflowed OR r >= n, subtract n once
137 if carry == 1 || Self::geq(&r, &Self::N_LIMBS) {
138 Self::sub_in_place(&mut r, &Self::N_LIMBS);
139 }
140
141 // Use unchecked constructor to allow zero in intermediate arithmetic
142 Ok(Self::from_bytes_unchecked(Self::limbs_to_be(&r)))
143 }
144
145 /// Subtract two scalars modulo the curve order n
146 pub fn sub_mod_n(&self, other: &Self) -> Result<Self> {
147 let self_limbs = Self::to_le_limbs(&self.serialize());
148 let other_limbs = Self::to_le_limbs(&other.serialize());
149
150 let mut r = [0u32; 7];
151 let mut borrow = 0i64;
152
153 for (i, result) in r.iter_mut().enumerate() {
154 let tmp = self_limbs[i] as i64 - other_limbs[i] as i64 - borrow;
155 if tmp < 0 {
156 *result = (tmp + (1i64 << 32)) as u32;
157 borrow = 1;
158 } else {
159 *result = tmp as u32;
160 borrow = 0;
161 }
162 }
163
164 if borrow == 1 {
165 // Result was negative → add n back
166 let mut c = 0u64;
167 for (i, result) in r.iter_mut().enumerate() {
168 let tmp = *result as u64 + Self::N_LIMBS[i] as u64 + c;
169 *result = tmp as u32;
170 c = tmp >> 32;
171 }
172 }
173
174 // Use unchecked constructor to allow zero in intermediate arithmetic
175 Ok(Self::from_bytes_unchecked(Self::limbs_to_be(&r)))
176 }
177
178 /// Multiply two scalars modulo the curve order n
179 ///
180 /// Uses constant-time double-and-add algorithm for correctness and security.
181 /// Processes bits from MSB to LSB to ensure correct powers of 2.
182 pub fn mul_mod_n(&self, other: &Self) -> Result<Self> {
183 // Start with zero (additive identity)
184 let mut acc = Self::from_bytes_unchecked([0u8; P224_SCALAR_SIZE]);
185
186 // Process each bit from MSB to LSB
187 for byte in other.serialize() {
188 for i in (0..8).rev() {
189 // MSB first within each byte
190 // Double the accumulator: acc = acc * 2 (mod n)
191 acc = acc.add_mod_n(&acc)?;
192
193 // If bit is set, add self: acc = acc + self (mod n)
194 if (byte >> i) & 1 == 1 {
195 acc = acc.add_mod_n(self)?;
196 }
197 }
198 }
199
200 Ok(acc)
201 }
202
203 /// Compute multiplicative inverse modulo n using Fermat's little theorem
204 /// a^(-1) ≡ a^(n-2) (mod n). Left-to-right binary exponentiation.
205 pub fn inv_mod_n(&self) -> Result<Self> {
206 // zero has no inverse
207 if self.is_zero() {
208 return Err(Error::param("P-224 Scalar", "Cannot invert zero scalar"));
209 }
210
211 // Step 1: form exponent = n-2
212 let mut exp = NIST_P224.n; // big-endian [u8;28]
213 // subtract 2 with borrow
214 let mut borrow = 2u16;
215 for i in (0..P224_SCALAR_SIZE).rev() {
216 let v = exp[i] as i16 - (borrow as i16);
217 if v < 0 {
218 exp[i] = (v + 256) as u8;
219 borrow = 1;
220 } else {
221 exp[i] = v as u8;
222 borrow = 0;
223 }
224 }
225
226 // Step 2: binary exponentiation, left-to-right:
227 // result = 1
228 // for each bit of exp from MSB to LSB:
229 // result = result^2 mod n
230 // if bit == 1 { result = result * a mod n }
231 let mut result = {
232 let mut one = [0u8; P224_SCALAR_SIZE];
233 one[P224_SCALAR_SIZE - 1] = 1;
234 // from_bytes_unchecked is fine here because 1 < n
235 Self::from_bytes_unchecked(one)
236 };
237 let base = self.clone();
238
239 for byte in exp {
240 for bit in (0..8).rev() {
241 // square
242 result = result.mul_mod_n(&result)?;
243 // multiply if this exp-bit is 1
244 if (byte >> bit) & 1 == 1 {
245 result = result.mul_mod_n(&base)?;
246 }
247 }
248 }
249
250 Ok(result)
251 }
252
253 /// Compute the additive inverse (negation) modulo n
254 ///
255 /// Returns -self mod n, which is equivalent to n - self when self != 0
256 /// Returns 0 when self is 0
257 pub fn negate(&self) -> Self {
258 // If self is zero, return zero
259 if self.is_zero() {
260 return Self::from_bytes_unchecked([0u8; P224_SCALAR_SIZE]);
261 }
262
263 // Otherwise compute n - self
264 let n_limbs = Self::N_LIMBS;
265 let self_limbs = Self::to_le_limbs(&self.serialize());
266 let mut res = [0u32; 7];
267
268 // Subtract self from n
269 let mut borrow = 0i64;
270 for (i, result) in res.iter_mut().enumerate() {
271 let tmp = n_limbs[i] as i64 - self_limbs[i] as i64 - borrow;
272 if tmp < 0 {
273 *result = (tmp + (1i64 << 32)) as u32;
274 borrow = 1;
275 } else {
276 *result = tmp as u32;
277 borrow = 0;
278 }
279 }
280
281 // No borrow should occur since self < n
282 debug_assert_eq!(borrow, 0);
283
284 Self::from_bytes_unchecked(Self::limbs_to_be(&res))
285 }
286
287 // Private helper methods
288
289 /// Reduce scalar modulo the curve order n using constant-time arithmetic
290 ///
291 /// The curve order n for P-224 is:
292 /// n = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFF16A2E0B8F03E13DD29455C5C2A3D
293 ///
294 /// Algorithm:
295 /// 1. Check if input is zero (invalid)
296 /// 2. Compare with curve order using constant-time comparison
297 /// 3. Conditionally subtract n if input >= n
298 /// 4. Verify result is still non-zero
299 fn reduce_scalar_bytes(bytes: &mut [u8; P224_SCALAR_SIZE]) -> Result<()> {
300 let order = &NIST_P224.n;
301
302 // Reject zero scalars immediately
303 if bytes.iter().all(|&b| b == 0) {
304 return Err(Error::param("P-224 Scalar", "Scalar cannot be zero"));
305 }
306
307 // Constant-time comparison with curve order
308 // We want to check: is bytes >= order?
309 let mut gt = 0u8; // set if bytes > order
310 let mut lt = 0u8; // set if bytes < order
311
312 for i in 0..P224_SCALAR_SIZE {
313 let x = bytes[i];
314 let y = order[i];
315 gt |= ((x > y) as u8) & (!lt);
316 lt |= ((x < y) as u8) & (!gt);
317 }
318 let ge = gt | ((!lt) & 1); // ge = gt || eq (if not less, then greater or equal)
319
320 if ge == 1 {
321 // If scalar >= order, perform modular reduction
322 let mut borrow = 0u16;
323 let mut temp_bytes = *bytes;
324
325 for i in (0..P224_SCALAR_SIZE).rev() {
326 let diff = (temp_bytes[i] as i16) - (order[i] as i16) - (borrow as i16);
327 if diff < 0 {
328 temp_bytes[i] = (diff + 256) as u8;
329 borrow = 1;
330 } else {
331 temp_bytes[i] = diff as u8;
332 borrow = 0;
333 }
334 }
335
336 *bytes = temp_bytes;
337 }
338
339 // Check for zero after reduction
340 if bytes.iter().all(|&b| b == 0) {
341 return Err(Error::param(
342 "P-224 Scalar",
343 "Reduction resulted in zero scalar",
344 ));
345 }
346
347 Ok(())
348 }
349
350 // Helper constants - stored in little-endian limb order
351 const N_LIMBS: [u32; 7] = [
352 0x5C5C_2A3D,
353 0x13DD_2945,
354 0xE0B8_F03E,
355 0xFFFF_16A2,
356 0xFFFF_FFFF,
357 0xFFFF_FFFF,
358 0xFFFF_FFFF,
359 ];
360
361 /// Compare two limb arrays for greater-than-or-equal
362 #[inline(always)]
363 fn geq(a: &[u32; 7], b: &[u32; 7]) -> bool {
364 for i in (0..7).rev() {
365 if a[i] > b[i] {
366 return true;
367 }
368 if a[i] < b[i] {
369 return false;
370 }
371 }
372 true // equal
373 }
374
375 /// Subtract b from a in-place
376 #[inline(always)]
377 fn sub_in_place(a: &mut [u32; 7], b: &[u32; 7]) {
378 let mut borrow = 0u64;
379 for (i, elem) in a.iter_mut().enumerate() {
380 let tmp = (*elem as u64)
381 .wrapping_sub(b[i] as u64)
382 .wrapping_sub(borrow);
383 *elem = tmp as u32;
384 borrow = (tmp >> 63) & 1; // 1 if we wrapped
385 }
386 }
387}