commonware_cryptography/bls12381/primitives/
group.rs

1//! Group operations over the BLS12-381 scalar field.
2//!
3//! This crate implements basic group operations over BLS12-381 elements,
4//! including point addition, scalar multiplication, and pairing operations.
5//!
6//! # Warning
7//!
8//! Ensure that points are checked to belong to the correct subgroup
9//! (G1 or G2) to prevent small subgroup attacks. This is particularly important
10//! when handling deserialized points or points received from untrusted sources. This
11//! is already taken care of for you if you use the provided `deserialize` function.
12
13use super::variant::Variant;
14use crate::Secret;
15#[cfg(not(feature = "std"))]
16use alloc::{vec, vec::Vec};
17use blst::{
18    blst_bendian_from_fp12, blst_bendian_from_scalar, blst_expand_message_xmd, blst_fp12, blst_fr,
19    blst_fr_add, blst_fr_cneg, blst_fr_from_scalar, blst_fr_from_uint64, blst_fr_inverse,
20    blst_fr_mul, blst_fr_sub, blst_hash_to_g1, blst_hash_to_g2, blst_keygen, blst_p1,
21    blst_p1_add_or_double, blst_p1_affine, blst_p1_cneg, blst_p1_compress, blst_p1_from_affine,
22    blst_p1_in_g1, blst_p1_is_inf, blst_p1_mult, blst_p1_to_affine, blst_p1_uncompress,
23    blst_p1s_mult_pippenger, blst_p1s_mult_pippenger_scratch_sizeof, blst_p1s_to_affine, blst_p2,
24    blst_p2_add_or_double, blst_p2_affine, blst_p2_cneg, blst_p2_compress, blst_p2_from_affine,
25    blst_p2_in_g2, blst_p2_is_inf, blst_p2_mult, blst_p2_to_affine, blst_p2_uncompress,
26    blst_p2s_mult_pippenger, blst_p2s_mult_pippenger_scratch_sizeof, blst_p2s_to_affine,
27    blst_scalar, blst_scalar_from_be_bytes, blst_scalar_from_bendian, blst_scalar_from_fr,
28    blst_sk_check, Pairing, BLS12_381_G1, BLS12_381_G2, BLST_ERROR,
29};
30use bytes::{Buf, BufMut};
31use commonware_codec::{
32    EncodeSize,
33    Error::{self, Invalid},
34    FixedSize, Read, ReadExt, Write,
35};
36use commonware_math::algebra::{
37    Additive, CryptoGroup, Field, HashToGroup, Multiplicative, Object, Random, Ring, Space,
38};
39use commonware_parallel::Strategy;
40use commonware_utils::{hex, Participant};
41use core::{
42    fmt::{Debug, Display, Formatter},
43    hash::{Hash, Hasher},
44    iter,
45    mem::MaybeUninit,
46    ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign},
47    ptr,
48};
49use ctutils::{Choice, CtEq};
50use rand_core::CryptoRngCore;
51use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
52
53fn all_zero(bytes: &[u8]) -> Choice {
54    bytes
55        .iter()
56        .fold(Choice::TRUE, |acc, b| acc & b.ct_eq(&0u8))
57}
58
59/// Domain separation tag used when hashing a message to a curve (G1 or G2).
60///
61/// Reference: <https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-05#name-ciphersuites>
62pub type DST = &'static [u8];
63
64/// Wrapper around [blst_fr] that represents an element of the BLS12‑381
65/// scalar field `F_r`.
66///
67/// The new‑type is marked `#[repr(transparent)]`, so it has exactly the same
68/// memory layout as the underlying `blst_fr`, allowing safe passage across
69/// the C FFI boundary without additional transmutation.
70///
71/// All arithmetic is performed modulo the prime
72/// `r = 0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001`,
73/// the order of the BLS12‑381 G1/G2 groups.
74#[derive(Clone, Eq, PartialEq)]
75#[repr(transparent)]
76pub struct Scalar(blst_fr);
77
78#[cfg(feature = "arbitrary")]
79impl arbitrary::Arbitrary<'_> for Scalar {
80    fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
81        let ikm = u.arbitrary::<[u8; IKM_LENGTH]>()?;
82        Ok(Self::from_ikm(&ikm))
83    }
84}
85
86/// Number of bytes required to encode a scalar in its canonical
87/// big-endian form (`32 × 8 = 256 bits`).
88///
89/// Because `r` is only 255 bits wide, the most-significant byte is always in
90/// the range `0x00..=0x7f`, leaving the top bit clear.
91pub const SCALAR_LENGTH: usize = 32;
92
93/// Effective bit-length of the field modulus `r` (`ceil(log_2 r) = 255`).
94///
95/// Useful for constant-time exponentiation loops and for validating that a
96/// decoded integer lies in the range `0 <= x < r`.
97const SCALAR_BITS: usize = 255;
98
99/// Number of scalar bits for SmallScalar (128 bits).
100///
101/// 128 bits provides sufficient security (2^-128 collision probability)
102/// while roughly halving MSM computation time compared to full 255-bit scalars.
103const SMALL_SCALAR_BITS: usize = 128;
104
105/// Number of bytes for SmallScalar (16 bytes = 128 bits).
106const SMALL_SCALAR_LENGTH: usize = 16;
107
108/// Number of bytes of input key material for BLS key generation.
109const IKM_LENGTH: usize = 64;
110
111/// A 128-bit scalar for use in batch verification random challenges.
112///
113/// This provides 128-bit security which is sufficient for preventing
114/// forgery attacks in batch verification while reducing computational cost
115/// compared to full 255-bit scalars.
116#[derive(Clone, Debug, Eq, PartialEq)]
117pub struct SmallScalar {
118    /// Stored as blst_scalar with only lower 128 bits populated.
119    inner: blst_scalar,
120}
121
122impl SmallScalar {
123    /// Generates a random 128-bit scalar.
124    pub fn random(mut rng: impl CryptoRngCore) -> Self {
125        // blst_scalar is 32 bytes
126        let mut bytes = [0u8; 32];
127        // Fill the last 16 bytes (128 bits) with entropy.
128        // In big-endian, bytes[16..32] are the least significant.
129        // Leaving bytes[0..16] as zero ensures the scalar is < 2^128.
130        rng.fill_bytes(&mut bytes[SMALL_SCALAR_LENGTH..]);
131
132        let mut scalar = blst_scalar::default();
133        // SAFETY: bytes is a valid 32-byte array.
134        unsafe {
135            blst_scalar_from_bendian(&mut scalar, bytes.as_ptr());
136        }
137        Self { inner: scalar }
138    }
139
140    pub const fn as_bytes(&self) -> &[u8] {
141        self.inner.b.as_slice()
142    }
143
144    /// Returns the zero scalar.
145    pub fn zero() -> Self {
146        Self {
147            inner: blst_scalar::default(),
148        }
149    }
150}
151
152/// This constant serves as the multiplicative identity (i.e., "one") in the
153/// BLS12-381 finite field, ensuring that arithmetic is carried out within the
154/// correct modulo.
155///
156/// `R = 2^256 mod q` in little-endian Montgomery form which is equivalent to 1 in little-endian
157/// non-Montgomery form:
158///
159/// ```txt
160/// mod(2^256, 0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001) = 0x1824b159acc5056f998c4fefecbc4ff55884b7fa0003480200000001fffffffe
161/// ```
162///
163/// Reference: <https://github.com/filecoin-project/blstrs/blob/ffbb41d1495d84e40a712583346439924603b49a/src/scalar.rs#L77-L89>
164const BLST_FR_ONE: Scalar = Scalar(blst_fr {
165    l: [
166        0x0000_0001_ffff_fffe,
167        0x5884_b7fa_0003_4802,
168        0x998c_4fef_ecbc_4ff5,
169        0x1824_b159_acc5_056f,
170    ],
171});
172
173/// A point on the BLS12-381 G1 curve.
174#[derive(Clone, Copy, Eq, PartialEq)]
175#[repr(transparent)]
176pub struct G1(blst_p1);
177
178/// The size in bytes of an encoded G1 element.
179pub const G1_ELEMENT_BYTE_LENGTH: usize = 48;
180
181/// Domain separation tag for hashing a proof of possession (compressed G2) to G1.
182pub const G1_PROOF_OF_POSSESSION: DST = b"BLS_POP_BLS12381G1_XMD:SHA-256_SSWU_RO_POP_";
183
184/// Domain separation tag for hashing a message to G1.
185///
186/// We use the `POP` scheme for hashing all messages because this crate is expected to be
187/// used in a Byzantine environment (where any player may attempt a rogue key attack) and
188/// any message could be aggregated into a multi-signature (which requires a proof-of-possession
189/// to be safely deployed in this environment).
190pub const G1_MESSAGE: DST = b"BLS_SIG_BLS12381G1_XMD:SHA-256_SSWU_RO_POP_";
191
192#[cfg(feature = "arbitrary")]
193impl arbitrary::Arbitrary<'_> for G1 {
194    fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
195        Ok(Self::generator() * &u.arbitrary::<Scalar>()?)
196    }
197}
198
199/// A point on the BLS12-381 G2 curve.
200#[derive(Clone, Copy, Eq, PartialEq)]
201#[repr(transparent)]
202pub struct G2(blst_p2);
203
204/// The size in bytes of an encoded G2 element.
205pub const G2_ELEMENT_BYTE_LENGTH: usize = 96;
206
207/// Domain separation tag for hashing a proof of possession (compressed G1) to G2.
208pub const G2_PROOF_OF_POSSESSION: DST = b"BLS_POP_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_";
209
210/// Domain separation tag for hashing a message to G2.
211///
212/// We use the `POP` scheme for hashing all messages because this crate is expected to be
213/// used in a Byzantine environment (where any player may attempt a rogue key attack) and
214/// any message could be aggregated into a multi-signature (which requires a proof-of-possession
215/// to be safely deployed in this environment).
216pub const G2_MESSAGE: DST = b"BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_";
217
218#[cfg(feature = "arbitrary")]
219impl arbitrary::Arbitrary<'_> for G2 {
220    fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
221        Ok(Self::generator() * &u.arbitrary::<Scalar>()?)
222    }
223}
224
225/// The target group of the BLS12-381 pairing.
226///
227/// This is an element in the extension field `F_p^12` and is
228/// produced as the result of a pairing operation.
229#[derive(Debug, Clone, Eq, PartialEq, Copy)]
230#[repr(transparent)]
231pub struct GT(blst_fp12);
232
233/// The size in bytes of an encoded GT element.
234///
235/// GT is a 12-tuple of Fp elements, each 48 bytes.
236pub const GT_ELEMENT_BYTE_LENGTH: usize = 576;
237
238impl GT {
239    /// Create GT from blst_fp12.
240    pub(crate) const fn from_blst_fp12(fp12: blst_fp12) -> Self {
241        Self(fp12)
242    }
243
244    /// Converts the GT element to its canonical big-endian byte representation.
245    pub fn as_slice(&self) -> [u8; GT_ELEMENT_BYTE_LENGTH] {
246        let mut slice = [0u8; GT_ELEMENT_BYTE_LENGTH];
247        // SAFETY: blst_bendian_from_fp12 writes exactly 576 bytes to a valid buffer.
248        // Using the proper serialization function ensures portable, canonical encoding.
249        unsafe {
250            blst_bendian_from_fp12(slice.as_mut_ptr(), &self.0);
251        }
252        slice
253    }
254}
255
256/// The private key type.
257#[derive(Clone, Debug, PartialEq, Eq)]
258pub struct Private {
259    scalar: Secret<Scalar>,
260}
261
262impl Private {
263    /// Creates a new private key from a scalar.
264    pub const fn new(private: Scalar) -> Self {
265        Self {
266            scalar: Secret::new(private),
267        }
268    }
269
270    /// Temporarily exposes the inner scalar to a closure.
271    ///
272    /// See [`Secret::expose`](crate::Secret::expose) for more details.
273    pub fn expose<R>(&self, f: impl for<'a> FnOnce(&'a Scalar) -> R) -> R {
274        self.scalar.expose(f)
275    }
276
277    /// Consumes the private key and returns the inner scalar.
278    ///
279    /// See [`Secret::expose_unwrap`](crate::Secret::expose_unwrap) for more details.
280    pub fn expose_unwrap(self) -> Scalar {
281        self.scalar.expose_unwrap()
282    }
283}
284
285impl Write for Private {
286    fn write(&self, buf: &mut impl BufMut) {
287        self.expose(|scalar| scalar.write(buf));
288    }
289}
290
291impl Read for Private {
292    type Cfg = ();
293
294    fn read_cfg(buf: &mut impl Buf, _: &()) -> Result<Self, Error> {
295        let scalar = Scalar::read(buf)?;
296        Ok(Self::new(scalar))
297    }
298}
299
300impl FixedSize for Private {
301    const SIZE: usize = PRIVATE_KEY_LENGTH;
302}
303
304impl Random for Private {
305    fn random(rng: impl CryptoRngCore) -> Self {
306        Self::new(Scalar::random(rng))
307    }
308}
309
310#[cfg(feature = "arbitrary")]
311impl arbitrary::Arbitrary<'_> for Private {
312    fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
313        Ok(Self::new(u.arbitrary::<Scalar>()?))
314    }
315}
316
317/// The private key length.
318pub const PRIVATE_KEY_LENGTH: usize = SCALAR_LENGTH;
319
320impl Scalar {
321    /// Creates a scalar from input key material.
322    /// Uses IETF BLS KeyGen which loops internally until a non-zero value is produced.
323    fn from_ikm(ikm: &[u8; IKM_LENGTH]) -> Self {
324        let mut sc = blst_scalar::default();
325        let mut ret = blst_fr::default();
326        // SAFETY: ikm is a valid 64-byte buffer; blst_keygen handles null key_info.
327        unsafe {
328            blst_keygen(&mut sc, ikm.as_ptr(), ikm.len(), ptr::null(), 0);
329            blst_fr_from_scalar(&mut ret, &sc);
330        }
331        Self(ret)
332    }
333
334    /// Maps arbitrary bytes to a scalar using RFC9380 hash-to-field.
335    pub fn map(dst: DST, msg: &[u8]) -> Self {
336        // The BLS12-381 scalar field has a modulus of approximately 255 bits.
337        // According to RFC9380, when mapping to a field element, we need to
338        // generate uniform bytes with length L = ceil((ceil(log2(p)) + k) / 8),
339        // where p is the field modulus and k is the security parameter.
340        //
341        // For BLS12-381's scalar field:
342        // - log2(p) ≈ 255 bits
343        // - k = 128 bits (for 128-bit security)
344        // - L = ceil((255 + 128) / 8) = ceil(383 / 8) = 48 bytes
345        //
346        // These 48 bytes provide sufficient entropy to ensure uniform distribution
347        // in the scalar field after modular reduction, maintaining the security
348        // properties required by the hash-to-field construction.
349        const L: usize = 48;
350        let mut uniform_bytes = Zeroizing::new([0u8; L]);
351        // SAFETY: All buffers are valid with correct lengths; blst handles empty inputs.
352        unsafe {
353            blst_expand_message_xmd(
354                uniform_bytes.as_mut_ptr(),
355                L,
356                msg.as_ptr(),
357                msg.len(),
358                dst.as_ptr(),
359                dst.len(),
360            );
361        }
362
363        // Transform expanded bytes with modular reduction
364        let mut fr = blst_fr::default();
365        // SAFETY: uniform_bytes is a valid 48-byte buffer.
366        unsafe {
367            let mut scalar = blst_scalar::default();
368            blst_scalar_from_be_bytes(&mut scalar, uniform_bytes.as_ptr(), L);
369            blst_fr_from_scalar(&mut fr, &scalar);
370        }
371
372        Self(fr)
373    }
374
375    /// Creates a new scalar from the provided integer.
376    pub(crate) fn from_u64(i: u64) -> Self {
377        // Create a new scalar
378        let mut ret = blst_fr::default();
379        let buffer = [i, 0, 0, 0];
380
381        // SAFETY: blst_fr_from_uint64 reads exactly 4 u64 values from the buffer.
382        //
383        // Reference: https://github.com/supranational/blst/blob/415d4f0e2347a794091836a3065206edfd9c72f3/bindings/blst.h#L102
384        unsafe { blst_fr_from_uint64(&mut ret, buffer.as_ptr()) };
385        Self(ret)
386    }
387
388    /// Encodes the scalar into a byte array.
389    fn as_slice(&self) -> Zeroizing<[u8; Self::SIZE]> {
390        let mut slice = Zeroizing::new([0u8; Self::SIZE]);
391        // SAFETY: All pointers valid; blst_bendian_from_scalar writes exactly 32 bytes.
392        unsafe {
393            let mut scalar = blst_scalar::default();
394            blst_scalar_from_fr(&mut scalar, &self.0);
395            blst_bendian_from_scalar(slice.as_mut_ptr(), &scalar);
396        }
397        slice
398    }
399
400    /// Converts the scalar to the raw `blst_scalar` type.
401    pub(crate) fn as_blst_scalar(&self) -> blst_scalar {
402        let mut scalar = blst_scalar::default();
403        // SAFETY: Both pointers are valid and properly aligned.
404        unsafe { blst_scalar_from_fr(&mut scalar, &self.0) };
405        scalar
406    }
407}
408
409impl Write for Scalar {
410    fn write(&self, buf: &mut impl BufMut) {
411        let slice = self.as_slice();
412        buf.put_slice(slice.as_ref());
413    }
414}
415
416impl Read for Scalar {
417    type Cfg = ();
418
419    fn read_cfg(buf: &mut impl Buf, _: &()) -> Result<Self, Error> {
420        let bytes = Zeroizing::new(<[u8; Self::SIZE]>::read(buf)?);
421        let mut ret = blst_fr::default();
422        // SAFETY: bytes is a valid 32-byte array. blst_sk_check validates non-zero and in-range.
423        // We use blst_sk_check instead of blst_scalar_fr_check because it also checks non-zero
424        // per IETF BLS12-381 spec (Draft 4+).
425        //
426        // References:
427        // * https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-03#section-2.3
428        // * https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04#section-2.3
429        unsafe {
430            let mut scalar = blst_scalar::default();
431            blst_scalar_from_bendian(&mut scalar, bytes.as_ptr());
432            if !blst_sk_check(&scalar) {
433                return Err(Invalid("Scalar", "Invalid"));
434            }
435            blst_fr_from_scalar(&mut ret, &scalar);
436        }
437        Ok(Self(ret))
438    }
439}
440
441impl FixedSize for Scalar {
442    const SIZE: usize = SCALAR_LENGTH;
443}
444
445impl Hash for Scalar {
446    fn hash<H: Hasher>(&self, state: &mut H) {
447        let slice = self.as_slice();
448        state.write(slice.as_ref());
449    }
450}
451
452impl CtEq for Scalar {
453    fn ct_eq(&self, other: &Self) -> ctutils::Choice {
454        self.0.l.ct_eq(&other.0.l)
455    }
456}
457
458impl PartialOrd for Scalar {
459    fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
460        Some(self.cmp(other))
461    }
462}
463
464impl Ord for Scalar {
465    fn cmp(&self, other: &Self) -> core::cmp::Ordering {
466        self.as_slice().cmp(&other.as_slice())
467    }
468}
469
470impl Debug for Scalar {
471    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
472        write!(f, "Scalar([REDACTED])")
473    }
474}
475
476impl Display for Scalar {
477    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
478        write!(f, "[REDACTED]")
479    }
480}
481
482impl Zeroize for Scalar {
483    fn zeroize(&mut self) {
484        self.0.l.zeroize();
485    }
486}
487
488impl Drop for Scalar {
489    fn drop(&mut self) {
490        self.zeroize();
491    }
492}
493
494impl ZeroizeOnDrop for Scalar {}
495
496impl Object for Scalar {}
497
498impl<'a> AddAssign<&'a Self> for Scalar {
499    fn add_assign(&mut self, rhs: &'a Self) {
500        let ptr = &raw mut self.0;
501        // SAFETY: blst_fr_add supports in-place (ret==a). Raw pointer avoids aliased refs.
502        unsafe {
503            blst_fr_add(ptr, ptr, &rhs.0);
504        }
505    }
506}
507
508impl<'a> Add<&'a Self> for Scalar {
509    type Output = Self;
510
511    fn add(mut self, rhs: &'a Self) -> Self::Output {
512        self += rhs;
513        self
514    }
515}
516
517impl<'a> SubAssign<&'a Self> for Scalar {
518    fn sub_assign(&mut self, rhs: &'a Self) {
519        let ptr = &raw mut self.0;
520        // SAFETY: blst_fr_sub supports in-place (ret==a). Raw pointer avoids aliased refs.
521        unsafe { blst_fr_sub(ptr, ptr, &rhs.0) }
522    }
523}
524
525impl<'a> Sub<&'a Self> for Scalar {
526    type Output = Self;
527
528    fn sub(mut self, rhs: &'a Self) -> Self::Output {
529        self -= rhs;
530        self
531    }
532}
533
534impl Neg for Scalar {
535    type Output = Self;
536
537    fn neg(mut self) -> Self::Output {
538        let ptr = &raw mut self.0;
539        // SAFETY: blst_fr_cneg supports in-place (ret==a). Raw pointer avoids aliased refs.
540        unsafe {
541            blst_fr_cneg(ptr, ptr, true);
542        }
543        self
544    }
545}
546
547impl Additive for Scalar {
548    fn zero() -> Self {
549        Self(blst_fr::default())
550    }
551}
552
553impl<'a> MulAssign<&'a Self> for Scalar {
554    fn mul_assign(&mut self, rhs: &'a Self) {
555        let ptr = &raw mut self.0;
556        // SAFETY: blst_fr_mul supports in-place (ret==a). Raw pointer avoids aliased refs.
557        unsafe {
558            blst_fr_mul(ptr, ptr, &rhs.0);
559        }
560    }
561}
562
563impl<'a> Mul<&'a Self> for Scalar {
564    type Output = Self;
565
566    fn mul(mut self, rhs: &'a Self) -> Self::Output {
567        self *= rhs;
568        self
569    }
570}
571
572impl Multiplicative for Scalar {}
573
574impl Ring for Scalar {
575    fn one() -> Self {
576        BLST_FR_ONE
577    }
578}
579
580impl Field for Scalar {
581    fn inv(&self) -> Self {
582        if *self == Self::zero() {
583            return Self::zero();
584        }
585        let mut ret = blst_fr::default();
586        // SAFETY: Input is non-zero (checked above); blst_fr_inverse is defined for non-zero.
587        unsafe { blst_fr_inverse(&mut ret, &self.0) };
588        Self(ret)
589    }
590}
591
592impl Random for Scalar {
593    /// Returns a random non-zero scalar.
594    fn random(mut rng: impl CryptoRngCore) -> Self {
595        let mut ikm = Zeroizing::new([0u8; IKM_LENGTH]);
596        rng.fill_bytes(ikm.as_mut());
597        Self::from_ikm(&ikm)
598    }
599}
600
601/// A share of a threshold signing key.
602#[derive(Clone, Debug, PartialEq, Eq)]
603pub struct Share {
604    /// The share's index in the polynomial.
605    pub index: Participant,
606    /// The scalar corresponding to the share's secret.
607    pub private: Private,
608}
609
610impl Share {
611    /// Creates a new `Share` with the given index and private key.
612    pub const fn new(index: Participant, private: Private) -> Self {
613        Self { index, private }
614    }
615
616    /// Returns the public key corresponding to the share.
617    ///
618    /// This can be verified against the public polynomial.
619    pub fn public<V: Variant>(&self) -> V::Public {
620        self.private
621            .expose(|private| V::Public::generator() * private)
622    }
623}
624
625impl Write for Share {
626    fn write(&self, buf: &mut impl BufMut) {
627        self.index.write(buf);
628        self.private.expose(|private| private.write(buf));
629    }
630}
631
632impl Read for Share {
633    type Cfg = ();
634
635    fn read_cfg(buf: &mut impl Buf, _: &()) -> Result<Self, Error> {
636        let index = Participant::read(buf)?;
637        let private = Private::read(buf)?;
638        Ok(Self { index, private })
639    }
640}
641
642impl EncodeSize for Share {
643    fn encode_size(&self) -> usize {
644        self.index.encode_size() + self.private.expose(|private| private.encode_size())
645    }
646}
647
648impl Display for Share {
649    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
650        write!(f, "{:?}", self)
651    }
652}
653
654#[cfg(feature = "arbitrary")]
655impl arbitrary::Arbitrary<'_> for Share {
656    fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
657        let index = u.arbitrary()?;
658        let private = u.arbitrary::<Private>()?;
659        Ok(Self { index, private })
660    }
661}
662
663impl G1 {
664    /// Encodes the G1 element into a slice.
665    fn as_slice(&self) -> [u8; Self::SIZE] {
666        let mut slice = [0u8; Self::SIZE];
667        // SAFETY: blst_p1_compress writes exactly 48 bytes to a valid buffer.
668        unsafe {
669            blst_p1_compress(slice.as_mut_ptr(), &self.0);
670        }
671        slice
672    }
673
674    /// Like [`std::ops::Neg::neg`], except operating in place.
675    ///
676    /// This function exists in order to avoid an extra copy when implement
677    /// subtraction. Basically, the compiler (including LLVM) aren't smart
678    /// enough to eliminate a copy that happens if you implement subtraction
679    /// as `x += &-*rhs`. So, instead, we copy rhs, negate it in place, and then
680    /// add it, to avoid a copy.
681    fn neg_in_place(&mut self) {
682        let ptr = &raw mut self.0;
683        // SAFETY: ptr is valid.
684        unsafe {
685            blst_p1_cneg(ptr, true);
686        }
687    }
688
689    /// Converts the G1 point to its affine representation.
690    pub(crate) fn as_blst_p1_affine(&self) -> blst_p1_affine {
691        let mut affine = blst_p1_affine::default();
692        // SAFETY: Both pointers are valid and properly aligned.
693        unsafe { blst_p1_to_affine(&mut affine, &self.0) };
694        affine
695    }
696
697    /// Creates a G1 point from a raw `blst_p1`.
698    pub(crate) const fn from_blst_p1(p: blst_p1) -> Self {
699        Self(p)
700    }
701
702    /// Batch converts projective G1 points to affine.
703    ///
704    /// This uses Montgomery's trick to reduce n field inversions to 1,
705    /// providing significant speedup over converting points individually.
706    pub fn batch_to_affine(points: &[Self]) -> Vec<blst_p1_affine> {
707        if points.is_empty() {
708            return Vec::new();
709        }
710
711        let n = points.len();
712        let mut out = vec![blst_p1_affine::default(); n];
713
714        // SAFETY: blst_p1s_to_affine batch converts projective points to affine.
715        // The function uses Montgomery's trick internally for efficiency.
716        // All pointers are valid and point to properly sized arrays.
717        unsafe {
718            let points_ptr: Vec<*const blst_p1> = points.iter().map(|p| &p.0 as *const _).collect();
719            blst_p1s_to_affine(out.as_mut_ptr(), points_ptr.as_ptr(), n);
720        }
721
722        out
723    }
724
725    /// Checks that `sum_i (p1[i] ⊙ p2[i]) + t1 ⊙ t2 == 0`.
726    ///
727    /// `p1` and `p2` MUST have the same length.
728    #[must_use]
729    pub(crate) fn multi_pairing_check(p1: &[Self], p2: &[G2], t1: &Self, t2: &G2) -> bool {
730        assert_eq!(p1.len(), p2.len());
731        // We deal with group elements directly, so there's no need for hashing,
732        // or a domain separation tag, hence `false`, `&[]`.
733        let mut pairing = Pairing::new(false, &[]);
734        let p1_affine = Self::batch_to_affine(p1);
735        let p2_affine = G2::batch_to_affine(p2);
736        for (p1, p2) in iter::once((&t1.as_blst_p1_affine(), &t2.as_blst_p2_affine()))
737            .chain(p1_affine.iter().zip(p2_affine.iter()))
738        {
739            pairing.raw_aggregate(p2, p1);
740        }
741
742        // These final two steps check that the sum of the pairings is equal to 0.
743        pairing.commit();
744        // Passing `None` here indicates that our target is 0.
745        pairing.finalverify(None)
746    }
747
748    fn msm_inner<'a>(iter: impl Iterator<Item = (&'a Self, &'a [u8])>, nbits: usize) -> Self {
749        // Filter out zero points/scalars and convert to blst types.
750        // `blst` does not filter out infinity, so we must ensure it is impossible.
751        //
752        // Sources:
753        // * https://github.com/supranational/blst/blob/cbc7e166a10d7286b91a3a7bea341e708962db13/src/multi_scalar.c#L10-L12
754        // * https://github.com/MystenLabs/fastcrypto/blob/0acf0ff1a163c60e0dec1e16e4fbad4a4cf853bd/fastcrypto/src/groups/bls12381.rs#L160-L194
755        let (points_filtered, scalars_filtered): (Vec<_>, Vec<_>) = iter
756            .filter_map(|(point, scalar)| {
757                if *point == Self::zero() || all_zero(scalar).into() {
758                    return None;
759                }
760                Some((point, scalar))
761            })
762            .unzip();
763
764        if points_filtered.is_empty() {
765            return Self::zero();
766        }
767
768        let affine_points = Self::batch_to_affine(&points_filtered);
769        let points: Vec<*const blst_p1_affine> =
770            affine_points.iter().map(|p| p as *const _).collect();
771        let scalars: Vec<*const u8> = scalars_filtered.iter().map(|s| s.as_ptr()).collect();
772
773        // SAFETY: blst_p1s_mult_pippenger_scratch_sizeof returns size in bytes for valid input.
774        let scratch_size = unsafe { blst_p1s_mult_pippenger_scratch_sizeof(points.len()) };
775        assert_eq!(scratch_size % 8, 0, "scratch_size must be multiple of 8");
776        let mut scratch = vec![MaybeUninit::<u64>::uninit(); scratch_size / 8];
777
778        let mut msm_result = blst_p1::default();
779        // SAFETY: All pointer arrays are valid and point to data that outlives this call.
780        unsafe {
781            blst_p1s_mult_pippenger(
782                &mut msm_result,
783                points.as_ptr(),
784                points.len(),
785                scalars.as_ptr(),
786                nbits,
787                scratch.as_mut_ptr() as *mut _,
788            );
789        }
790
791        Self::from_blst_p1(msm_result)
792    }
793}
794
795impl Write for G1 {
796    fn write(&self, buf: &mut impl BufMut) {
797        let slice = self.as_slice();
798        buf.put_slice(&slice);
799    }
800}
801
802impl Read for G1 {
803    type Cfg = ();
804
805    fn read_cfg(buf: &mut impl Buf, _: &()) -> Result<Self, Error> {
806        let bytes = <[u8; Self::SIZE]>::read(buf)?;
807        let mut ret = blst_p1::default();
808        // SAFETY: bytes is a valid 48-byte array. blst_p1_uncompress validates encoding.
809        // Additional checks for infinity and subgroup membership prevent small subgroup attacks.
810        unsafe {
811            let mut affine = blst_p1_affine::default();
812            match blst_p1_uncompress(&mut affine, bytes.as_ptr()) {
813                BLST_ERROR::BLST_SUCCESS => {}
814                BLST_ERROR::BLST_BAD_ENCODING => return Err(Invalid("G1", "Bad encoding")),
815                BLST_ERROR::BLST_POINT_NOT_ON_CURVE => return Err(Invalid("G1", "Not on curve")),
816                BLST_ERROR::BLST_POINT_NOT_IN_GROUP => return Err(Invalid("G1", "Not in group")),
817                BLST_ERROR::BLST_AGGR_TYPE_MISMATCH => return Err(Invalid("G1", "Type mismatch")),
818                BLST_ERROR::BLST_VERIFY_FAIL => return Err(Invalid("G1", "Verify fail")),
819                BLST_ERROR::BLST_PK_IS_INFINITY => return Err(Invalid("G1", "PK is Infinity")),
820                BLST_ERROR::BLST_BAD_SCALAR => return Err(Invalid("G1", "Bad scalar")),
821            }
822            blst_p1_from_affine(&mut ret, &affine);
823
824            // Verify that deserialized element isn't infinite
825            if blst_p1_is_inf(&ret) {
826                return Err(Invalid("G1", "Infinity"));
827            }
828
829            // Verify that the deserialized element is in G1
830            if !blst_p1_in_g1(&ret) {
831                return Err(Invalid("G1", "Outside G1"));
832            }
833        }
834        Ok(Self(ret))
835    }
836}
837
838impl FixedSize for G1 {
839    const SIZE: usize = G1_ELEMENT_BYTE_LENGTH;
840}
841
842impl Hash for G1 {
843    fn hash<H: Hasher>(&self, state: &mut H) {
844        let slice = self.as_slice();
845        state.write(&slice);
846    }
847}
848
849impl PartialOrd for G1 {
850    fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
851        Some(self.cmp(other))
852    }
853}
854
855impl Ord for G1 {
856    fn cmp(&self, other: &Self) -> core::cmp::Ordering {
857        self.as_slice().cmp(&other.as_slice())
858    }
859}
860
861impl Debug for G1 {
862    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
863        write!(f, "{}", hex(&self.as_slice()))
864    }
865}
866
867impl Display for G1 {
868    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
869        write!(f, "{}", hex(&self.as_slice()))
870    }
871}
872
873impl Object for G1 {}
874
875impl<'a> AddAssign<&'a Self> for G1 {
876    fn add_assign(&mut self, rhs: &'a Self) {
877        let ptr = &raw mut self.0;
878        // SAFETY: blst_p1_add_or_double supports in-place (ret==a). Raw pointer avoids aliased refs.
879        unsafe {
880            blst_p1_add_or_double(ptr, ptr, &rhs.0);
881        }
882    }
883}
884
885impl<'a> Add<&'a Self> for G1 {
886    type Output = Self;
887
888    fn add(mut self, rhs: &'a Self) -> Self::Output {
889        self += rhs;
890        self
891    }
892}
893
894impl Neg for G1 {
895    type Output = Self;
896
897    fn neg(mut self) -> Self::Output {
898        self.neg_in_place();
899        self
900    }
901}
902
903impl<'a> SubAssign<&'a Self> for G1 {
904    fn sub_assign(&mut self, rhs: &'a Self) {
905        let mut rhs_cp = *rhs;
906        rhs_cp.neg_in_place();
907        *self += &rhs_cp;
908    }
909}
910
911impl<'a> Sub<&'a Self> for G1 {
912    type Output = Self;
913
914    fn sub(mut self, rhs: &'a Self) -> Self::Output {
915        self -= rhs;
916        self
917    }
918}
919
920impl Additive for G1 {
921    fn zero() -> Self {
922        Self(blst_p1::default())
923    }
924}
925
926impl<'a> MulAssign<&'a Scalar> for G1 {
927    fn mul_assign(&mut self, rhs: &'a Scalar) {
928        let ptr = &raw mut self.0;
929        let mut scalar: blst_scalar = blst_scalar::default();
930        // SAFETY: blst_p1_mult supports in-place (ret==a). Using SCALAR_BITS (255) ensures
931        // constant-time execution. Raw pointer avoids aliased refs.
932        unsafe {
933            blst_scalar_from_fr(&mut scalar, &rhs.0);
934            blst_p1_mult(ptr, ptr, scalar.b.as_ptr(), SCALAR_BITS);
935        }
936    }
937}
938
939impl<'a> Mul<&'a Scalar> for G1 {
940    type Output = Self;
941
942    fn mul(mut self, rhs: &'a Scalar) -> Self::Output {
943        self *= rhs;
944        self
945    }
946}
947
948impl<'a> MulAssign<&'a SmallScalar> for G1 {
949    fn mul_assign(&mut self, rhs: &'a SmallScalar) {
950        let ptr = &raw mut self.0;
951        // SAFETY: blst_p1_mult supports in-place (ret==a). Using SMALL_SCALAR_BITS (128)
952        // processes only the lower 128 bits of the scalar, halving computation time.
953        unsafe {
954            blst_p1_mult(ptr, ptr, rhs.inner.b.as_ptr(), SMALL_SCALAR_BITS);
955        }
956    }
957}
958
959impl<'a> Mul<&'a SmallScalar> for G1 {
960    type Output = Self;
961
962    fn mul(mut self, rhs: &'a SmallScalar) -> Self::Output {
963        self *= rhs;
964        self
965    }
966}
967
968impl Space<Scalar> for G1 {
969    fn msm(points: &[Self], scalars: &[Scalar], _strategy: &impl Strategy) -> Self {
970        assert_eq!(points.len(), scalars.len(), "mismatched lengths");
971        let scalar_bytes: Vec<_> = scalars.iter().map(|s| s.as_blst_scalar()).collect();
972        Self::msm_inner(
973            points
974                .iter()
975                .zip(scalar_bytes.iter().map(|s| s.b.as_slice())),
976            SCALAR_BITS,
977        )
978    }
979}
980
981impl Space<SmallScalar> for G1 {
982    fn msm(points: &[Self], scalars: &[SmallScalar], _strategy: &impl Strategy) -> Self {
983        assert_eq!(points.len(), scalars.len(), "mismatched lengths");
984        Self::msm_inner(
985            points.iter().zip(scalars.iter().map(|s| s.as_bytes())),
986            SMALL_SCALAR_BITS,
987        )
988    }
989}
990
991impl CryptoGroup for G1 {
992    type Scalar = Scalar;
993
994    fn generator() -> Self {
995        let mut ret = blst_p1::default();
996        // SAFETY: BLS12_381_G1 is a valid generator point constant.
997        unsafe {
998            blst_p1_from_affine(&mut ret, &BLS12_381_G1);
999        }
1000        Self(ret)
1001    }
1002}
1003
1004impl HashToGroup for G1 {
1005    fn hash_to_group(domain_separator: &[u8], message: &[u8]) -> Self {
1006        let mut out = blst_p1::default();
1007        // SAFETY: All pointers valid; blst_hash_to_g1 handles empty data. Aug is null/0 (unused).
1008        unsafe {
1009            blst_hash_to_g1(
1010                &mut out,
1011                message.as_ptr(),
1012                message.len(),
1013                domain_separator.as_ptr(),
1014                domain_separator.len(),
1015                ptr::null(),
1016                0,
1017            );
1018        }
1019        Self(out)
1020    }
1021}
1022
1023impl G2 {
1024    /// Encodes the G2 element into a slice.
1025    fn as_slice(&self) -> [u8; Self::SIZE] {
1026        let mut slice = [0u8; Self::SIZE];
1027        // SAFETY: blst_p2_compress writes exactly 96 bytes to a valid buffer.
1028        unsafe {
1029            blst_p2_compress(slice.as_mut_ptr(), &self.0);
1030        }
1031        slice
1032    }
1033
1034    /// c.f. [G1::neg_in_place].
1035    fn neg_in_place(&mut self) {
1036        let ptr = &raw mut self.0;
1037        // SAFETY: ptr is valid.
1038        unsafe {
1039            blst_p2_cneg(ptr, true);
1040        }
1041    }
1042
1043    /// Converts the G2 point to its affine representation.
1044    pub(crate) fn as_blst_p2_affine(&self) -> blst_p2_affine {
1045        let mut affine = blst_p2_affine::default();
1046        // SAFETY: Both pointers are valid and properly aligned.
1047        unsafe { blst_p2_to_affine(&mut affine, &self.0) };
1048        affine
1049    }
1050
1051    /// Creates a G2 point from a raw `blst_p2`.
1052    pub(crate) const fn from_blst_p2(p: blst_p2) -> Self {
1053        Self(p)
1054    }
1055
1056    /// Batch converts projective G2 points to affine.
1057    ///
1058    /// This uses Montgomery's trick to reduce n field inversions to 1,
1059    /// providing significant speedup over converting points individually.
1060    pub fn batch_to_affine(points: &[Self]) -> Vec<blst_p2_affine> {
1061        if points.is_empty() {
1062            return Vec::new();
1063        }
1064
1065        let n = points.len();
1066        let mut out = vec![blst_p2_affine::default(); n];
1067
1068        // SAFETY: blst_p2s_to_affine batch converts projective points to affine.
1069        // The function uses Montgomery's trick internally for efficiency.
1070        // All pointers are valid and point to properly sized arrays.
1071        unsafe {
1072            let points_ptr: Vec<*const blst_p2> = points.iter().map(|p| &p.0 as *const _).collect();
1073            blst_p2s_to_affine(out.as_mut_ptr(), points_ptr.as_ptr(), n);
1074        }
1075
1076        out
1077    }
1078
1079    /// Checks that `sum_i (p1[i] ⊙ p2[i]) + t1 ⊙ t2 == 0`.
1080    ///
1081    /// `p1` and `p2` MUST have the same length.
1082    #[must_use]
1083    pub(crate) fn multi_pairing_check(p1: &[Self], p2: &[G1], t1: &Self, t2: &G1) -> bool {
1084        G1::multi_pairing_check(p2, p1, t2, t1)
1085    }
1086
1087    fn msm_inner<'a>(iter: impl Iterator<Item = (&'a Self, &'a [u8])>, nbits: usize) -> Self {
1088        // Filter out zero points/scalars and convert to blst types.
1089        // `blst` does not filter out infinity, so we must ensure it is impossible.
1090        //
1091        // Sources:
1092        // * https://github.com/supranational/blst/blob/cbc7e166a10d7286b91a3a7bea341e708962db13/src/multi_scalar.c#L10-L12
1093        // * https://github.com/MystenLabs/fastcrypto/blob/0acf0ff1a163c60e0dec1e16e4fbad4a4cf853bd/fastcrypto/src/groups/bls12381.rs#L160-L194
1094        let (points_filtered, scalars_filtered): (Vec<_>, Vec<_>) = iter
1095            .filter_map(|(point, scalar)| {
1096                if *point == Self::zero() || all_zero(scalar).into() {
1097                    return None;
1098                }
1099                Some((point, scalar))
1100            })
1101            .unzip();
1102
1103        if points_filtered.is_empty() {
1104            return Self::zero();
1105        }
1106
1107        let affine_points = Self::batch_to_affine(&points_filtered);
1108        let points: Vec<*const blst_p2_affine> =
1109            affine_points.iter().map(|p| p as *const _).collect();
1110        let scalars: Vec<*const u8> = scalars_filtered.iter().map(|s| s.as_ptr()).collect();
1111
1112        // SAFETY: blst_p2s_mult_pippenger_scratch_sizeof returns size in bytes for valid input.
1113        let scratch_size = unsafe { blst_p2s_mult_pippenger_scratch_sizeof(points.len()) };
1114        assert_eq!(scratch_size % 8, 0, "scratch_size must be multiple of 8");
1115        let mut scratch = vec![MaybeUninit::<u64>::uninit(); scratch_size / 8];
1116
1117        let mut msm_result = blst_p2::default();
1118        // SAFETY: All pointer arrays are valid and point to data that outlives this call.
1119        unsafe {
1120            blst_p2s_mult_pippenger(
1121                &mut msm_result,
1122                points.as_ptr(),
1123                points.len(),
1124                scalars.as_ptr(),
1125                nbits,
1126                scratch.as_mut_ptr() as *mut _,
1127            );
1128        }
1129
1130        Self::from_blst_p2(msm_result)
1131    }
1132}
1133
1134impl Write for G2 {
1135    fn write(&self, buf: &mut impl BufMut) {
1136        let slice = self.as_slice();
1137        buf.put_slice(&slice);
1138    }
1139}
1140
1141impl Read for G2 {
1142    type Cfg = ();
1143
1144    fn read_cfg(buf: &mut impl Buf, _: &()) -> Result<Self, Error> {
1145        let bytes = <[u8; Self::SIZE]>::read(buf)?;
1146        let mut ret = blst_p2::default();
1147        // SAFETY: bytes is a valid 96-byte array. blst_p2_uncompress validates encoding.
1148        // Additional checks for infinity and subgroup membership prevent small subgroup attacks.
1149        unsafe {
1150            let mut affine = blst_p2_affine::default();
1151            match blst_p2_uncompress(&mut affine, bytes.as_ptr()) {
1152                BLST_ERROR::BLST_SUCCESS => {}
1153                BLST_ERROR::BLST_BAD_ENCODING => return Err(Invalid("G2", "Bad encoding")),
1154                BLST_ERROR::BLST_POINT_NOT_ON_CURVE => return Err(Invalid("G2", "Not on curve")),
1155                BLST_ERROR::BLST_POINT_NOT_IN_GROUP => return Err(Invalid("G2", "Not in group")),
1156                BLST_ERROR::BLST_AGGR_TYPE_MISMATCH => return Err(Invalid("G2", "Type mismatch")),
1157                BLST_ERROR::BLST_VERIFY_FAIL => return Err(Invalid("G2", "Verify fail")),
1158                BLST_ERROR::BLST_PK_IS_INFINITY => return Err(Invalid("G2", "PK is Infinity")),
1159                BLST_ERROR::BLST_BAD_SCALAR => return Err(Invalid("G2", "Bad scalar")),
1160            }
1161            blst_p2_from_affine(&mut ret, &affine);
1162
1163            // Verify that deserialized element isn't infinite
1164            if blst_p2_is_inf(&ret) {
1165                return Err(Invalid("G2", "Infinity"));
1166            }
1167
1168            // Verify that the deserialized element is in G2
1169            if !blst_p2_in_g2(&ret) {
1170                return Err(Invalid("G2", "Outside G2"));
1171            }
1172        }
1173        Ok(Self(ret))
1174    }
1175}
1176
1177impl FixedSize for G2 {
1178    const SIZE: usize = G2_ELEMENT_BYTE_LENGTH;
1179}
1180
1181impl Hash for G2 {
1182    fn hash<H: Hasher>(&self, state: &mut H) {
1183        let slice = self.as_slice();
1184        state.write(&slice);
1185    }
1186}
1187
1188impl PartialOrd for G2 {
1189    fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
1190        Some(self.cmp(other))
1191    }
1192}
1193
1194impl Ord for G2 {
1195    fn cmp(&self, other: &Self) -> core::cmp::Ordering {
1196        self.as_slice().cmp(&other.as_slice())
1197    }
1198}
1199
1200impl Debug for G2 {
1201    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
1202        write!(f, "{}", hex(&self.as_slice()))
1203    }
1204}
1205
1206impl Display for G2 {
1207    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
1208        write!(f, "{}", hex(&self.as_slice()))
1209    }
1210}
1211
1212impl Object for G2 {}
1213
1214impl<'a> AddAssign<&'a Self> for G2 {
1215    fn add_assign(&mut self, rhs: &'a Self) {
1216        let ptr = &raw mut self.0;
1217        // SAFETY: blst_p2_add_or_double supports in-place (ret==a). Raw pointer avoids aliased refs.
1218        unsafe {
1219            blst_p2_add_or_double(ptr, ptr, &rhs.0);
1220        }
1221    }
1222}
1223
1224impl<'a> Add<&'a Self> for G2 {
1225    type Output = Self;
1226
1227    fn add(mut self, rhs: &'a Self) -> Self::Output {
1228        self += rhs;
1229        self
1230    }
1231}
1232
1233impl Neg for G2 {
1234    type Output = Self;
1235
1236    fn neg(mut self) -> Self::Output {
1237        self.neg_in_place();
1238        self
1239    }
1240}
1241
1242impl<'a> SubAssign<&'a Self> for G2 {
1243    fn sub_assign(&mut self, rhs: &'a Self) {
1244        let mut rhs_cp = *rhs;
1245        rhs_cp.neg_in_place();
1246        *self += &rhs_cp;
1247    }
1248}
1249
1250impl<'a> Sub<&'a Self> for G2 {
1251    type Output = Self;
1252
1253    fn sub(mut self, rhs: &'a Self) -> Self::Output {
1254        self -= rhs;
1255        self
1256    }
1257}
1258
1259impl Additive for G2 {
1260    fn zero() -> Self {
1261        Self(blst_p2::default())
1262    }
1263}
1264
1265impl<'a> MulAssign<&'a Scalar> for G2 {
1266    fn mul_assign(&mut self, rhs: &'a Scalar) {
1267        let mut scalar = blst_scalar::default();
1268        let ptr = &raw mut self.0;
1269        // SAFETY: blst_p2_mult supports in-place (ret==a). Using SCALAR_BITS (255) ensures
1270        // constant-time execution. Raw pointer avoids aliased refs.
1271        unsafe {
1272            blst_scalar_from_fr(&mut scalar, &rhs.0);
1273            blst_p2_mult(ptr, ptr, scalar.b.as_ptr(), SCALAR_BITS);
1274        }
1275    }
1276}
1277
1278impl<'a> Mul<&'a Scalar> for G2 {
1279    type Output = Self;
1280
1281    fn mul(mut self, rhs: &'a Scalar) -> Self::Output {
1282        self *= rhs;
1283        self
1284    }
1285}
1286
1287impl<'a> MulAssign<&'a SmallScalar> for G2 {
1288    fn mul_assign(&mut self, rhs: &'a SmallScalar) {
1289        let ptr = &raw mut self.0;
1290        // SAFETY: blst_p2_mult supports in-place (ret==a). Using SMALL_SCALAR_BITS (128)
1291        // processes only the lower 128 bits of the scalar, halving computation time.
1292        unsafe {
1293            blst_p2_mult(ptr, ptr, rhs.inner.b.as_ptr(), SMALL_SCALAR_BITS);
1294        }
1295    }
1296}
1297
1298impl<'a> Mul<&'a SmallScalar> for G2 {
1299    type Output = Self;
1300
1301    fn mul(mut self, rhs: &'a SmallScalar) -> Self::Output {
1302        self *= rhs;
1303        self
1304    }
1305}
1306
1307impl Space<Scalar> for G2 {
1308    fn msm(points: &[Self], scalars: &[Scalar], _strategy: &impl Strategy) -> Self {
1309        assert_eq!(points.len(), scalars.len(), "mismatched lengths");
1310        let scalar_bytes: Vec<_> = scalars.iter().map(|s| s.as_blst_scalar()).collect();
1311        Self::msm_inner(
1312            points
1313                .iter()
1314                .zip(scalar_bytes.iter().map(|s| s.b.as_slice())),
1315            SCALAR_BITS,
1316        )
1317    }
1318}
1319
1320impl Space<SmallScalar> for G2 {
1321    fn msm(points: &[Self], scalars: &[SmallScalar], _strategy: &impl Strategy) -> Self {
1322        assert_eq!(points.len(), scalars.len(), "mismatched lengths");
1323        Self::msm_inner(
1324            points.iter().zip(scalars.iter().map(|s| s.as_bytes())),
1325            SMALL_SCALAR_BITS,
1326        )
1327    }
1328}
1329
1330impl CryptoGroup for G2 {
1331    type Scalar = Scalar;
1332
1333    fn generator() -> Self {
1334        let mut ret = blst_p2::default();
1335        // SAFETY: BLS12_381_G2 is a valid generator point constant.
1336        unsafe {
1337            blst_p2_from_affine(&mut ret, &BLS12_381_G2);
1338        }
1339        Self(ret)
1340    }
1341}
1342
1343impl HashToGroup for G2 {
1344    fn hash_to_group(domain_separator: &[u8], message: &[u8]) -> Self {
1345        let mut out = blst_p2::default();
1346        // SAFETY: All pointers valid; blst_hash_to_g2 handles empty data. Aug is null/0 (unused).
1347        unsafe {
1348            blst_hash_to_g2(
1349                &mut out,
1350                message.as_ptr(),
1351                message.len(),
1352                domain_separator.as_ptr(),
1353                domain_separator.len(),
1354                ptr::null(),
1355                0,
1356            );
1357        }
1358        Self(out)
1359    }
1360}
1361
1362#[cfg(test)]
1363mod tests {
1364    use super::*;
1365    use crate::bls12381::primitives::group::Scalar;
1366    use commonware_codec::{DecodeExt, Encode};
1367    use commonware_math::algebra::{test_suites, Random};
1368    use commonware_parallel::Sequential;
1369    use commonware_utils::test_rng;
1370    use proptest::{prelude::*, strategy::Strategy};
1371    use rand::{rngs::StdRng, SeedableRng};
1372    use std::collections::{BTreeSet, HashMap};
1373
1374    impl Arbitrary for Scalar {
1375        type Parameters = ();
1376        type Strategy = BoxedStrategy<Self>;
1377
1378        fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy {
1379            any::<[u8; 32]>()
1380                .prop_map(|seed| Self::random(&mut StdRng::from_seed(seed)))
1381                .boxed()
1382        }
1383    }
1384
1385    impl Arbitrary for G1 {
1386        type Parameters = ();
1387        type Strategy = BoxedStrategy<Self>;
1388
1389        fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy {
1390            prop_oneof![
1391                Just(Self::zero()),
1392                Just(Self::generator()),
1393                any::<Scalar>().prop_map(|s| Self::generator() * &s)
1394            ]
1395            .boxed()
1396        }
1397    }
1398
1399    impl Arbitrary for G2 {
1400        type Parameters = ();
1401        type Strategy = BoxedStrategy<Self>;
1402
1403        fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy {
1404            prop_oneof![
1405                Just(Self::zero()),
1406                Just(Self::generator()),
1407                any::<Scalar>().prop_map(|s| Self::generator() * &s)
1408            ]
1409            .boxed()
1410        }
1411    }
1412
1413    #[test]
1414    fn test_scalar_as_field() {
1415        test_suites::test_field(file!(), &any::<Scalar>());
1416    }
1417
1418    #[test]
1419    fn test_g1_as_space() {
1420        test_suites::test_space_ring(file!(), &any::<Scalar>(), &any::<G1>());
1421    }
1422
1423    #[test]
1424    fn test_g2_as_space() {
1425        test_suites::test_space_ring(file!(), &any::<Scalar>(), &any::<G2>());
1426    }
1427
1428    #[test]
1429    fn test_hash_to_g1() {
1430        test_suites::test_hash_to_group::<G1>(file!());
1431    }
1432
1433    #[test]
1434    fn test_hash_to_g2() {
1435        test_suites::test_hash_to_group::<G2>(file!());
1436    }
1437
1438    #[test]
1439    fn basic_group() {
1440        // Reference: https://github.com/celo-org/celo-threshold-bls-rs/blob/b0ef82ff79769d085a5a7d3f4fe690b1c8fe6dc9/crates/threshold-bls/src/curve/bls12381.rs#L200-L220
1441        let s = Scalar::random(&mut test_rng());
1442        let mut s2 = s.clone();
1443        s2.double();
1444
1445        // p1 = s2 * G = (s+s)G
1446        let p1 = G1::generator() * &s2;
1447
1448        // p2 = sG + sG = s2 * G
1449        let mut p2 = G1::generator() * &s;
1450        p2.double();
1451        assert_eq!(p1, p2);
1452    }
1453
1454    #[test]
1455    fn test_scalar_codec() {
1456        let original = Scalar::random(&mut test_rng());
1457        let mut encoded = original.encode();
1458        assert_eq!(encoded.len(), Scalar::SIZE);
1459        let decoded = Scalar::decode(&mut encoded).unwrap();
1460        assert_eq!(original, decoded);
1461    }
1462
1463    #[test]
1464    fn test_g1_codec() {
1465        let original = G1::generator() * &Scalar::random(&mut test_rng());
1466        let mut encoded = original.encode();
1467        assert_eq!(encoded.len(), G1::SIZE);
1468        let decoded = G1::decode(&mut encoded).unwrap();
1469        assert_eq!(original, decoded);
1470    }
1471
1472    #[test]
1473    fn test_g2_codec() {
1474        let original = G2::generator() * &Scalar::random(&mut test_rng());
1475        let mut encoded = original.encode();
1476        assert_eq!(encoded.len(), G2::SIZE);
1477        let decoded = G2::decode(&mut encoded).unwrap();
1478        assert_eq!(original, decoded);
1479    }
1480
1481    /// Naive calculation of Multi-Scalar Multiplication: sum(scalar * point)
1482    fn naive_msm<P: Space<Scalar>>(points: &[P], scalars: &[Scalar]) -> P {
1483        assert_eq!(points.len(), scalars.len());
1484        let mut total = P::zero();
1485        for (point, scalar) in points.iter().zip(scalars.iter()) {
1486            // Skip identity points or zero scalars, similar to the optimized MSM
1487            if *point == P::zero() || *scalar == Scalar::zero() {
1488                continue;
1489            }
1490            let term = point.clone() * scalar;
1491            total += &term;
1492        }
1493        total
1494    }
1495
1496    #[test]
1497    fn test_g1_msm() {
1498        let mut rng = test_rng();
1499        let n = 10; // Number of points/scalars
1500
1501        // Case 1: Random points and scalars
1502        let points_g1: Vec<G1> = (0..n)
1503            .map(|_| G1::generator() * &Scalar::random(&mut rng))
1504            .collect();
1505        let scalars: Vec<Scalar> = (0..n).map(|_| Scalar::random(&mut rng)).collect();
1506        let expected_g1 = naive_msm(&points_g1, &scalars);
1507        let result_g1 = G1::msm(&points_g1, &scalars, &Sequential);
1508        assert_eq!(expected_g1, result_g1, "G1 MSM basic case failed");
1509
1510        // Case 2: Include identity point
1511        let mut points_with_zero_g1 = points_g1.clone();
1512        points_with_zero_g1[n / 2] = G1::zero();
1513        let expected_zero_pt_g1 = naive_msm(&points_with_zero_g1, &scalars);
1514        let result_zero_pt_g1 = G1::msm(&points_with_zero_g1, &scalars, &Sequential);
1515        assert_eq!(
1516            expected_zero_pt_g1, result_zero_pt_g1,
1517            "G1 MSM with identity point failed"
1518        );
1519
1520        // Case 3: Include zero scalar
1521        let mut scalars_with_zero = scalars.clone();
1522        scalars_with_zero[n / 2] = Scalar::zero();
1523        let expected_zero_sc_g1 = naive_msm(&points_g1, &scalars_with_zero);
1524        let result_zero_sc_g1 = G1::msm(&points_g1, &scalars_with_zero, &Sequential);
1525        assert_eq!(
1526            expected_zero_sc_g1, result_zero_sc_g1,
1527            "G1 MSM with zero scalar failed"
1528        );
1529
1530        // Case 4: All points identity
1531        let zero_points_g1 = vec![G1::zero(); n];
1532        let expected_all_zero_pt_g1 = naive_msm(&zero_points_g1, &scalars);
1533        let result_all_zero_pt_g1 = G1::msm(&zero_points_g1, &scalars, &Sequential);
1534        assert_eq!(
1535            expected_all_zero_pt_g1,
1536            G1::zero(),
1537            "G1 MSM all identity points (naive) failed"
1538        );
1539        assert_eq!(
1540            result_all_zero_pt_g1,
1541            G1::zero(),
1542            "G1 MSM all identity points failed"
1543        );
1544
1545        // Case 5: All scalars zero
1546        let zero_scalars = vec![Scalar::zero(); n];
1547        let expected_all_zero_sc_g1 = naive_msm(&points_g1, &zero_scalars);
1548        let result_all_zero_sc_g1 = G1::msm(&points_g1, &zero_scalars, &Sequential);
1549        assert_eq!(
1550            expected_all_zero_sc_g1,
1551            G1::zero(),
1552            "G1 MSM all zero scalars (naive) failed"
1553        );
1554        assert_eq!(
1555            result_all_zero_sc_g1,
1556            G1::zero(),
1557            "G1 MSM all zero scalars failed"
1558        );
1559
1560        // Case 6: Single element
1561        let single_point_g1 = [points_g1[0]];
1562        let single_scalar = [scalars[0].clone()];
1563        let expected_single_g1 = naive_msm(&single_point_g1, &single_scalar);
1564        let result_single_g1 = G1::msm(&single_point_g1, &single_scalar, &Sequential);
1565        assert_eq!(
1566            expected_single_g1, result_single_g1,
1567            "G1 MSM single element failed"
1568        );
1569
1570        // Case 7: Empty input
1571        let empty_points_g1: [G1; 0] = [];
1572        let empty_scalars: [Scalar; 0] = [];
1573        let expected_empty_g1 = naive_msm(&empty_points_g1, &empty_scalars);
1574        let result_empty_g1 = G1::msm(&empty_points_g1, &empty_scalars, &Sequential);
1575        assert_eq!(expected_empty_g1, G1::zero(), "G1 MSM empty (naive) failed");
1576        assert_eq!(result_empty_g1, G1::zero(), "G1 MSM empty failed");
1577
1578        // Case 8: Random points and scalars (big)
1579        let points_g1: Vec<G1> = (0..50_000)
1580            .map(|_| G1::generator() * &Scalar::random(&mut rng))
1581            .collect();
1582        let scalars: Vec<Scalar> = (0..50_000).map(|_| Scalar::random(&mut rng)).collect();
1583        let expected_g1 = naive_msm(&points_g1, &scalars);
1584        let result_g1 = G1::msm(&points_g1, &scalars, &Sequential);
1585        assert_eq!(expected_g1, result_g1, "G1 MSM basic case failed");
1586    }
1587
1588    #[test]
1589    fn test_g2_msm() {
1590        let mut rng = test_rng();
1591        let n = 10; // Number of points/scalars
1592
1593        // Case 1: Random points and scalars
1594        let points_g2: Vec<G2> = (0..n)
1595            .map(|_| G2::generator() * &Scalar::random(&mut rng))
1596            .collect();
1597        let scalars: Vec<Scalar> = (0..n).map(|_| Scalar::random(&mut rng)).collect();
1598        let expected_g2 = naive_msm(&points_g2, &scalars);
1599        let result_g2 = G2::msm(&points_g2, &scalars, &Sequential);
1600        assert_eq!(expected_g2, result_g2, "G2 MSM basic case failed");
1601
1602        // Case 2: Include identity point
1603        let mut points_with_zero_g2 = points_g2.clone();
1604        points_with_zero_g2[n / 2] = G2::zero();
1605        let expected_zero_pt_g2 = naive_msm(&points_with_zero_g2, &scalars);
1606        let result_zero_pt_g2 = G2::msm(&points_with_zero_g2, &scalars, &Sequential);
1607        assert_eq!(
1608            expected_zero_pt_g2, result_zero_pt_g2,
1609            "G2 MSM with identity point failed"
1610        );
1611
1612        // Case 3: Include zero scalar
1613        let mut scalars_with_zero = scalars.clone();
1614        scalars_with_zero[n / 2] = Scalar::zero();
1615        let expected_zero_sc_g2 = naive_msm(&points_g2, &scalars_with_zero);
1616        let result_zero_sc_g2 = G2::msm(&points_g2, &scalars_with_zero, &Sequential);
1617        assert_eq!(
1618            expected_zero_sc_g2, result_zero_sc_g2,
1619            "G2 MSM with zero scalar failed"
1620        );
1621
1622        // Case 4: All points identity
1623        let zero_points_g2 = vec![G2::zero(); n];
1624        let expected_all_zero_pt_g2 = naive_msm(&zero_points_g2, &scalars);
1625        let result_all_zero_pt_g2 = G2::msm(&zero_points_g2, &scalars, &Sequential);
1626        assert_eq!(
1627            expected_all_zero_pt_g2,
1628            G2::zero(),
1629            "G2 MSM all identity points (naive) failed"
1630        );
1631        assert_eq!(
1632            result_all_zero_pt_g2,
1633            G2::zero(),
1634            "G2 MSM all identity points failed"
1635        );
1636
1637        // Case 5: All scalars zero
1638        let zero_scalars = vec![Scalar::zero(); n];
1639        let expected_all_zero_sc_g2 = naive_msm(&points_g2, &zero_scalars);
1640        let result_all_zero_sc_g2 = G2::msm(&points_g2, &zero_scalars, &Sequential);
1641        assert_eq!(
1642            expected_all_zero_sc_g2,
1643            G2::zero(),
1644            "G2 MSM all zero scalars (naive) failed"
1645        );
1646        assert_eq!(
1647            result_all_zero_sc_g2,
1648            G2::zero(),
1649            "G2 MSM all zero scalars failed"
1650        );
1651
1652        // Case 6: Single element
1653        let single_point_g2 = [points_g2[0]];
1654        let single_scalar = [scalars[0].clone()];
1655        let expected_single_g2 = naive_msm(&single_point_g2, &single_scalar);
1656        let result_single_g2 = G2::msm(&single_point_g2, &single_scalar, &Sequential);
1657        assert_eq!(
1658            expected_single_g2, result_single_g2,
1659            "G2 MSM single element failed"
1660        );
1661
1662        // Case 7: Empty input
1663        let empty_points_g2: [G2; 0] = [];
1664        let empty_scalars: [Scalar; 0] = [];
1665        let expected_empty_g2 = naive_msm(&empty_points_g2, &empty_scalars);
1666        let result_empty_g2 = G2::msm(&empty_points_g2, &empty_scalars, &Sequential);
1667        assert_eq!(expected_empty_g2, G2::zero(), "G2 MSM empty (naive) failed");
1668        assert_eq!(result_empty_g2, G2::zero(), "G2 MSM empty failed");
1669
1670        // Case 8: Random points and scalars (big)
1671        let points_g2: Vec<G2> = (0..50_000)
1672            .map(|_| G2::generator() * &Scalar::random(&mut rng))
1673            .collect();
1674        let scalars: Vec<Scalar> = (0..50_000).map(|_| Scalar::random(&mut rng)).collect();
1675        let expected_g2 = naive_msm(&points_g2, &scalars);
1676        let result_g2 = G2::msm(&points_g2, &scalars, &Sequential);
1677        assert_eq!(expected_g2, result_g2, "G2 MSM basic case failed");
1678    }
1679
1680    #[test]
1681    fn test_trait_implementations() {
1682        // Generate a set of unique items to test.
1683        let mut rng = test_rng();
1684        const NUM_ITEMS: usize = 10;
1685        let mut scalar_set = BTreeSet::new();
1686        let mut g1_set = BTreeSet::new();
1687        let mut g2_set = BTreeSet::new();
1688        while scalar_set.len() < NUM_ITEMS {
1689            let scalar = Scalar::random(&mut rng);
1690            let g1 = G1::generator() * &scalar;
1691            let g2 = G2::generator() * &scalar;
1692
1693            scalar_set.insert(scalar);
1694            g1_set.insert(g1);
1695            g2_set.insert(g2);
1696        }
1697
1698        // Verify that the sets contain the expected number of unique items.
1699        assert_eq!(scalar_set.len(), NUM_ITEMS);
1700        assert_eq!(g1_set.len(), NUM_ITEMS);
1701        assert_eq!(g2_set.len(), NUM_ITEMS);
1702
1703        // Verify that `BTreeSet` iteration is sorted, which relies on `Ord`.
1704        let scalars: Vec<_> = scalar_set.iter().collect();
1705        assert!(scalars.windows(2).all(|w| w[0] <= w[1]));
1706        let g1s: Vec<_> = g1_set.iter().collect();
1707        assert!(g1s.windows(2).all(|w| w[0] <= w[1]));
1708        let g2s: Vec<_> = g2_set.iter().collect();
1709        assert!(g2s.windows(2).all(|w| w[0] <= w[1]));
1710
1711        // Test that we can use these types as keys in hash maps, which relies on `Hash` and `Eq`.
1712        let scalar_map: HashMap<_, _> = scalar_set.iter().cloned().zip(0..).collect();
1713        let g1_map: HashMap<_, _> = g1_set.iter().cloned().zip(0..).collect();
1714        let g2_map: HashMap<_, _> = g2_set.iter().cloned().zip(0..).collect();
1715
1716        // Verify that the maps contain the expected number of unique items.
1717        assert_eq!(scalar_map.len(), NUM_ITEMS);
1718        assert_eq!(g1_map.len(), NUM_ITEMS);
1719        assert_eq!(g2_map.len(), NUM_ITEMS);
1720    }
1721
1722    #[test]
1723    fn test_scalar_map() {
1724        // Test 1: Basic functionality
1725        let msg = b"test message";
1726        let dst = b"TEST_DST";
1727        let scalar1 = Scalar::map(dst, msg);
1728        let scalar2 = Scalar::map(dst, msg);
1729        assert_eq!(scalar1, scalar2, "Same input should produce same output");
1730
1731        // Test 2: Different messages produce different scalars
1732        let msg2 = b"different message";
1733        let scalar3 = Scalar::map(dst, msg2);
1734        assert_ne!(
1735            scalar1, scalar3,
1736            "Different messages should produce different scalars"
1737        );
1738
1739        // Test 3: Different DSTs produce different scalars
1740        let dst2 = b"DIFFERENT_DST";
1741        let scalar4 = Scalar::map(dst2, msg);
1742        assert_ne!(
1743            scalar1, scalar4,
1744            "Different DSTs should produce different scalars"
1745        );
1746
1747        // Test 4: Empty message
1748        let empty_msg = b"";
1749        let scalar_empty = Scalar::map(dst, empty_msg);
1750        assert_ne!(
1751            scalar_empty,
1752            Scalar::zero(),
1753            "Empty message should not produce zero"
1754        );
1755
1756        // Test 5: Large message
1757        let large_msg = vec![0x42u8; 1000];
1758        let scalar_large = Scalar::map(dst, &large_msg);
1759        assert_ne!(
1760            scalar_large,
1761            Scalar::zero(),
1762            "Large message should not produce zero"
1763        );
1764
1765        // Test 6: Verify the scalar is valid (not zero)
1766        assert_ne!(
1767            scalar1,
1768            Scalar::zero(),
1769            "Hash should not produce zero scalar"
1770        );
1771    }
1772
1773    #[test]
1774    fn test_secret_scalar_equality() {
1775        let mut rng = test_rng();
1776        let scalar1 = Scalar::random(&mut rng);
1777        let scalar2 = scalar1.clone();
1778        let scalar3 = Scalar::random(&mut rng);
1779
1780        let s1 = Secret::new(scalar1);
1781        let s2 = Secret::new(scalar2);
1782        let s3 = Secret::new(scalar3);
1783
1784        // Same scalar should be equal
1785        assert_eq!(s1, s2);
1786        // Different scalars should (very likely) be different
1787        assert_ne!(s1, s3);
1788    }
1789
1790    #[test]
1791    fn test_share_redacted() {
1792        let mut rng = test_rng();
1793        let share = Share::new(Participant::new(1), Private::random(&mut rng));
1794        let debug = format!("{:?}", share);
1795        let display = format!("{}", share);
1796        assert!(debug.contains("REDACTED"));
1797        assert!(display.contains("REDACTED"));
1798    }
1799
1800    #[cfg(feature = "arbitrary")]
1801    mod conformance {
1802        use super::*;
1803        use commonware_codec::conformance::CodecConformance;
1804
1805        commonware_conformance::conformance_tests! {
1806            CodecConformance<G1>,
1807            CodecConformance<G2>,
1808            CodecConformance<Private>,
1809            CodecConformance<Scalar>,
1810            CodecConformance<Share>
1811        }
1812    }
1813}