dleq_mirror/
lib.rs

1#![cfg_attr(docsrs, feature(doc_auto_cfg))]
2#![cfg_attr(not(feature = "std"), no_std)]
3#![doc = include_str!("../README.md")]
4
5use core::ops::Deref;
6
7use rand_core::{RngCore, CryptoRng};
8
9use zeroize::{Zeroize, Zeroizing};
10
11use transcript::Transcript;
12
13use ff::{Field, PrimeField};
14use group::prime::PrimeGroup;
15
16#[cfg(feature = "serialize")]
17use std::io::{self, Error, Read, Write};
18
19/// A cross-group DLEq proof capable of proving that two public keys, across two different curves,
20/// share a private key.
21#[cfg(feature = "experimental")]
22pub mod cross_group;
23
24#[cfg(test)]
25mod tests;
26
27// Produce a non-biased challenge from the transcript in the specified field
28pub(crate) fn challenge<T: Transcript, F: PrimeField>(transcript: &mut T) -> F {
29  // From here, there are three ways to get a scalar under the ff/group API
30  // 1: Scalar::random(ChaCha20Rng::from_seed(self.transcript.rng_seed(b"challenge")))
31  // 2: Grabbing a UInt library to perform reduction by the modulus, then determining endianness
32  //    and loading it in
33  // 3: Iterating over each byte and manually doubling/adding. This is simplest
34
35  let mut challenge = F::ZERO;
36
37  // Get a wide amount of bytes to safely reduce without bias
38  // In most cases, <=1.5x bytes is enough. 2x is still standard and there's some theoretical
39  // groups which may technically require more than 1.5x bytes for this to work as intended
40  let target_bytes = ((usize::try_from(F::NUM_BITS).unwrap() + 7) / 8) * 2;
41  let mut challenge_bytes = transcript.challenge(b"challenge");
42  let challenge_bytes_len = challenge_bytes.as_ref().len();
43  // If the challenge is 32 bytes, and we need 64, we need two challenges
44  let needed_challenges = (target_bytes + (challenge_bytes_len - 1)) / challenge_bytes_len;
45
46  // The following algorithm should be equivalent to a wide reduction of the challenges,
47  // interpreted as concatenated, big-endian byte string
48  let mut handled_bytes = 0;
49  'outer: for _ in 0 ..= needed_challenges {
50    // Cursor of which byte of the challenge to use next
51    let mut b = 0;
52    while b < challenge_bytes_len {
53      // Get the next amount of bytes to attempt
54      // Only grabs the needed amount of bytes, up to 8 at a time (u64), so long as they're
55      // available in the challenge
56      let chunk_bytes = (target_bytes - handled_bytes).min(8).min(challenge_bytes_len - b);
57
58      let mut chunk = 0;
59      for _ in 0 .. chunk_bytes {
60        chunk <<= 8;
61        chunk |= u64::from(challenge_bytes.as_ref()[b]);
62        b += 1;
63      }
64      // Add this chunk
65      challenge += F::from(chunk);
66
67      handled_bytes += chunk_bytes;
68      // If we've reached the target amount of bytes, break
69      if handled_bytes == target_bytes {
70        break 'outer;
71      }
72
73      // Shift over by however many bits will be in the next chunk
74      let next_chunk_bytes = (target_bytes - handled_bytes).min(8).min(challenge_bytes_len);
75      for _ in 0 .. (next_chunk_bytes * 8) {
76        challenge = challenge.double();
77      }
78    }
79
80    // Secure thanks to the Transcript trait having a bound of updating on challenge
81    challenge_bytes = transcript.challenge(b"challenge_extension");
82  }
83
84  challenge
85}
86
87// Helper function to read a scalar
88#[cfg(feature = "serialize")]
89fn read_scalar<R: Read, F: PrimeField>(r: &mut R) -> io::Result<F> {
90  let mut repr = F::Repr::default();
91  r.read_exact(repr.as_mut())?;
92  let scalar = F::from_repr(repr);
93  if scalar.is_none().into() {
94    Err(Error::other("invalid scalar"))?;
95  }
96  Ok(scalar.unwrap())
97}
98
99/// Error for DLEq proofs.
100#[derive(Clone, Copy, PartialEq, Eq, Debug)]
101pub enum DLEqError {
102  /// The proof was invalid.
103  InvalidProof,
104}
105
106/// A proof that points have the same discrete logarithm across generators.
107#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
108pub struct DLEqProof<G: PrimeGroup<Scalar: Zeroize>> {
109  c: G::Scalar,
110  s: G::Scalar,
111}
112
113#[allow(non_snake_case)]
114impl<G: PrimeGroup<Scalar: Zeroize>> DLEqProof<G> {
115  fn transcript<T: Transcript>(transcript: &mut T, generator: G, nonce: G, point: G) {
116    transcript.append_message(b"generator", generator.to_bytes());
117    transcript.append_message(b"nonce", nonce.to_bytes());
118    transcript.append_message(b"point", point.to_bytes());
119  }
120
121  /// Prove that the points created by `scalar * G`, for each specified generator, share a discrete
122  /// logarithm.
123  pub fn prove<R: RngCore + CryptoRng, T: Transcript>(
124    rng: &mut R,
125    transcript: &mut T,
126    generators: &[G],
127    scalar: &Zeroizing<G::Scalar>,
128  ) -> DLEqProof<G> {
129    let r = Zeroizing::new(G::Scalar::random(rng));
130
131    transcript.domain_separate(b"dleq");
132    for generator in generators {
133      // R, A
134      Self::transcript(transcript, *generator, *generator * r.deref(), *generator * scalar.deref());
135    }
136
137    let c = challenge(transcript);
138    // r + ca
139    let s = (c * scalar.deref()) + r.deref();
140
141    DLEqProof { c, s }
142  }
143
144  // Transcript a specific generator/nonce/point (G/R/A), as used when verifying a proof.
145  // This takes in the generator/point, and then the challenge and solution to calculate the nonce.
146  fn verify_statement<T: Transcript>(
147    transcript: &mut T,
148    generator: G,
149    point: G,
150    c: G::Scalar,
151    s: G::Scalar,
152  ) {
153    // s = r + ca
154    // sG - cA = R
155    // R, A
156    Self::transcript(transcript, generator, (generator * s) - (point * c), point);
157  }
158
159  /// Verify the specified points share a discrete logarithm across the specified generators.
160  pub fn verify<T: Transcript>(
161    &self,
162    transcript: &mut T,
163    generators: &[G],
164    points: &[G],
165  ) -> Result<(), DLEqError> {
166    if generators.len() != points.len() {
167      Err(DLEqError::InvalidProof)?;
168    }
169
170    transcript.domain_separate(b"dleq");
171    for (generator, point) in generators.iter().zip(points) {
172      Self::verify_statement(transcript, *generator, *point, self.c, self.s);
173    }
174
175    if self.c != challenge(transcript) {
176      Err(DLEqError::InvalidProof)?;
177    }
178
179    Ok(())
180  }
181
182  /// Write a DLEq proof to something implementing Write.
183  #[cfg(feature = "serialize")]
184  pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
185    w.write_all(self.c.to_repr().as_ref())?;
186    w.write_all(self.s.to_repr().as_ref())
187  }
188
189  /// Read a DLEq proof from something implementing Read.
190  #[cfg(feature = "serialize")]
191  pub fn read<R: Read>(r: &mut R) -> io::Result<DLEqProof<G>> {
192    Ok(DLEqProof { c: read_scalar(r)?, s: read_scalar(r)? })
193  }
194
195  /// Serialize a DLEq proof to a `Vec<u8>`.
196  #[cfg(feature = "serialize")]
197  pub fn serialize(&self) -> Vec<u8> {
198    let mut res = vec![];
199    self.write(&mut res).unwrap();
200    res
201  }
202}
203
204/// A proof that multiple series of points each have a single discrete logarithm across generators.
205///
206/// This is effectively n distinct DLEq proofs, one for each discrete logarithm and its points
207/// across some generators, yet with a smaller overall proof size.
208#[cfg(feature = "std")]
209#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
210pub struct MultiDLEqProof<G: PrimeGroup<Scalar: Zeroize>> {
211  c: G::Scalar,
212  s: Vec<G::Scalar>,
213}
214
215#[cfg(feature = "std")]
216#[allow(non_snake_case)]
217impl<G: PrimeGroup<Scalar: Zeroize>> MultiDLEqProof<G> {
218  /// Prove for each scalar that the series of points created by multiplying it against its
219  /// matching generators share a discrete logarithm.
220  /// This function panics if `generators.len() != scalars.len()`.
221  pub fn prove<R: RngCore + CryptoRng, T: Transcript>(
222    rng: &mut R,
223    transcript: &mut T,
224    generators: &[Vec<G>],
225    scalars: &[Zeroizing<G::Scalar>],
226  ) -> MultiDLEqProof<G> {
227    assert_eq!(
228      generators.len(),
229      scalars.len(),
230      "amount of series of generators doesn't match the amount of scalars"
231    );
232
233    transcript.domain_separate(b"multi_dleq");
234
235    let mut nonces = vec![];
236    for (i, (scalar, generators)) in scalars.iter().zip(generators).enumerate() {
237      // Delineate between discrete logarithms
238      transcript.append_message(b"discrete_logarithm", i.to_le_bytes());
239
240      let nonce = Zeroizing::new(G::Scalar::random(&mut *rng));
241      for generator in generators {
242        DLEqProof::transcript(
243          transcript,
244          *generator,
245          *generator * nonce.deref(),
246          *generator * scalar.deref(),
247        );
248      }
249      nonces.push(nonce);
250    }
251
252    let c = challenge(transcript);
253
254    let mut s = vec![];
255    for (scalar, nonce) in scalars.iter().zip(nonces) {
256      s.push((c * scalar.deref()) + nonce.deref());
257    }
258
259    MultiDLEqProof { c, s }
260  }
261
262  /// Verify each series of points share a discrete logarithm against their matching series of
263  /// generators.
264  pub fn verify<T: Transcript>(
265    &self,
266    transcript: &mut T,
267    generators: &[Vec<G>],
268    points: &[Vec<G>],
269  ) -> Result<(), DLEqError> {
270    if points.len() != generators.len() {
271      Err(DLEqError::InvalidProof)?;
272    }
273    if self.s.len() != generators.len() {
274      Err(DLEqError::InvalidProof)?;
275    }
276
277    transcript.domain_separate(b"multi_dleq");
278    for (i, (generators, points)) in generators.iter().zip(points).enumerate() {
279      if points.len() != generators.len() {
280        Err(DLEqError::InvalidProof)?;
281      }
282
283      transcript.append_message(b"discrete_logarithm", i.to_le_bytes());
284      for (generator, point) in generators.iter().zip(points) {
285        DLEqProof::verify_statement(transcript, *generator, *point, self.c, self.s[i]);
286      }
287    }
288
289    if self.c != challenge(transcript) {
290      Err(DLEqError::InvalidProof)?;
291    }
292
293    Ok(())
294  }
295
296  /// Write a multi-DLEq proof to something implementing Write.
297  #[cfg(feature = "serialize")]
298  pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
299    w.write_all(self.c.to_repr().as_ref())?;
300    for s in &self.s {
301      w.write_all(s.to_repr().as_ref())?;
302    }
303    Ok(())
304  }
305
306  /// Read a multi-DLEq proof from something implementing Read.
307  #[cfg(feature = "serialize")]
308  pub fn read<R: Read>(r: &mut R, discrete_logs: usize) -> io::Result<MultiDLEqProof<G>> {
309    let c = read_scalar(r)?;
310    let mut s = vec![];
311    for _ in 0 .. discrete_logs {
312      s.push(read_scalar(r)?);
313    }
314    Ok(MultiDLEqProof { c, s })
315  }
316
317  /// Serialize a multi-DLEq proof to a `Vec<u8>`.
318  #[cfg(feature = "serialize")]
319  pub fn serialize(&self) -> Vec<u8> {
320    let mut res = vec![];
321    self.write(&mut res).unwrap();
322    res
323  }
324}