use bellman_ce::pairing::ff::{Field, PrimeField};
use bellman_ce::pairing::*;
use log::{error, info};
use generic_array::GenericArray;
use itertools::Itertools;
use memmap::{Mmap, MmapMut};
use std::io::{self, Read, Write};
use std::sync::{Arc, Mutex};
use typenum::consts::U64;
use super::keypair::{PrivateKey, PublicKey};
use super::parameters::{
CeremonyParams, CheckForCorrectness, DeserializationError, ElementType, UseCompression,
};
use super::utils::{blank_hash, compute_g2_s, power_pairs, same_ratio};
pub enum AccumulatorState {
Empty,
NonEmpty,
Transformed,
}
pub struct BatchedAccumulator<'a, E: Engine> {
pub tau_powers_g1: Vec<E::G1Affine>,
pub tau_powers_g2: Vec<E::G2Affine>,
pub alpha_tau_powers_g1: Vec<E::G1Affine>,
pub beta_tau_powers_g1: Vec<E::G1Affine>,
pub beta_g2: E::G2Affine,
pub hash: GenericArray<u8, U64>,
pub parameters: &'a CeremonyParams<E>,
}
impl<'a, E: Engine> BatchedAccumulator<'a, E> {
pub fn empty(parameters: &'a CeremonyParams<E>) -> Self {
Self {
tau_powers_g1: vec![],
tau_powers_g2: vec![],
alpha_tau_powers_g1: vec![],
beta_tau_powers_g1: vec![],
beta_g2: E::G2Affine::zero(),
hash: blank_hash(),
parameters,
}
}
fn g1_size(&self, compression: UseCompression) -> usize {
match compression {
UseCompression::Yes => self.parameters.curve.g1_compressed,
UseCompression::No => self.parameters.curve.g1,
}
}
fn g2_size(&self, compression: UseCompression) -> usize {
match compression {
UseCompression::Yes => self.parameters.curve.g2_compressed,
UseCompression::No => self.parameters.curve.g2,
}
}
fn get_size(&self, element_type: ElementType, compression: UseCompression) -> usize {
match element_type {
ElementType::AlphaG1 | ElementType::BetaG1 | ElementType::TauG1 => {
self.g1_size(compression)
}
ElementType::BetaG2 | ElementType::TauG2 => self.g2_size(compression),
}
}
fn calculate_mmap_position(
&self,
index: usize,
element_type: ElementType,
compression: UseCompression,
) -> usize {
let g1_size = self.g1_size(compression);
let g2_size = self.g2_size(compression);
let required_tau_g1_power = self.parameters.powers_g1_length;
let required_power = self.parameters.powers_length;
let parameters = &self.parameters;
let position = match element_type {
ElementType::TauG1 => {
let mut position = 0;
position += g1_size * index;
assert!(
index < parameters.powers_g1_length,
format!(
"Index of TauG1 element written must not exceed {}, while it's {}",
parameters.powers_g1_length, index
)
);
position
}
ElementType::TauG2 => {
let mut position = 0;
position += g1_size * required_tau_g1_power;
assert!(
index < required_power,
format!(
"Index of TauG2 element written must not exceed {}, while it's {}",
required_power, index
)
);
position += g2_size * index;
position
}
ElementType::AlphaG1 => {
let mut position = 0;
position += g1_size * required_tau_g1_power;
position += g2_size * required_power;
assert!(
index < required_power,
format!(
"Index of AlphaG1 element written must not exceed {}, while it's {}",
required_power, index
)
);
position += g1_size * index;
position
}
ElementType::BetaG1 => {
let mut position = 0;
position += g1_size * required_tau_g1_power;
position += g2_size * required_power;
position += g1_size * required_power;
assert!(
index < required_power,
format!(
"Index of BetaG1 element written must not exceed {}, while it's {}",
required_power, index
)
);
position += g1_size * index;
position
}
ElementType::BetaG2 => {
let mut position = 0;
position += g1_size * required_tau_g1_power;
position += g2_size * required_power;
position += g1_size * required_power;
position += g1_size * required_power;
position
}
};
position + self.parameters.hash_size
}
}
pub fn verify_transform<E: Engine>(
before: &BatchedAccumulator<E>,
after: &BatchedAccumulator<E>,
key: &PublicKey<E>,
digest: &[u8],
) -> bool {
assert_eq!(digest.len(), 64);
let tau_g2_s = compute_g2_s::<E>(&digest, &key.tau_g1.0, &key.tau_g1.1, 0);
let alpha_g2_s = compute_g2_s::<E>(&digest, &key.alpha_g1.0, &key.alpha_g1.1, 1);
let beta_g2_s = compute_g2_s::<E>(&digest, &key.beta_g1.0, &key.beta_g1.1, 2);
if !same_ratio(key.tau_g1, (tau_g2_s, key.tau_g2)) {
return false;
}
if !same_ratio(key.alpha_g1, (alpha_g2_s, key.alpha_g2)) {
return false;
}
if !same_ratio(key.beta_g1, (beta_g2_s, key.beta_g2)) {
return false;
}
if after.tau_powers_g1[0] != E::G1Affine::one() {
return false;
}
if after.tau_powers_g2[0] != E::G2Affine::one() {
return false;
}
if !same_ratio(
(before.tau_powers_g1[1], after.tau_powers_g1[1]),
(tau_g2_s, key.tau_g2),
) {
return false;
}
if !same_ratio(
(before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]),
(alpha_g2_s, key.alpha_g2),
) {
return false;
}
if !same_ratio(
(before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]),
(beta_g2_s, key.beta_g2),
) {
return false;
}
if !same_ratio(
(before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]),
(before.beta_g2, after.beta_g2),
) {
return false;
}
if !same_ratio(
power_pairs(&after.tau_powers_g1),
(after.tau_powers_g2[0], after.tau_powers_g2[1]),
) {
return false;
}
if !same_ratio(
power_pairs(&after.tau_powers_g2),
(after.tau_powers_g1[0], after.tau_powers_g1[1]),
) {
return false;
}
if !same_ratio(
power_pairs(&after.alpha_tau_powers_g1),
(after.tau_powers_g2[0], after.tau_powers_g2[1]),
) {
return false;
}
if !same_ratio(
power_pairs(&after.beta_tau_powers_g1),
(after.tau_powers_g2[0], after.tau_powers_g2[1]),
) {
return false;
}
true
}
impl<'a, E: Engine> BatchedAccumulator<'a, E> {
#[allow(clippy::too_many_arguments, clippy::cognitive_complexity)]
pub fn verify_transformation(
input_map: &Mmap,
output_map: &Mmap,
key: &PublicKey<E>,
digest: &[u8],
input_is_compressed: UseCompression,
output_is_compressed: UseCompression,
check_input_for_correctness: CheckForCorrectness,
check_output_for_correctness: CheckForCorrectness,
parameters: &'a CeremonyParams<E>,
) -> bool {
use itertools::MinMaxResult::MinMax;
assert_eq!(digest.len(), 64);
let tau_g2_s = compute_g2_s::<E>(&digest, &key.tau_g1.0, &key.tau_g1.1, 0);
let alpha_g2_s = compute_g2_s::<E>(&digest, &key.alpha_g1.0, &key.alpha_g1.1, 1);
let beta_g2_s = compute_g2_s::<E>(&digest, &key.beta_g1.0, &key.beta_g1.1, 2);
if !same_ratio(key.tau_g1, (tau_g2_s, key.tau_g2)) {
error!("Invalid ratio key.tau_g1, (tau_g2_s, key.tau_g2)");
return false;
}
if !same_ratio(key.alpha_g1, (alpha_g2_s, key.alpha_g2)) {
error!("Invalid ratio key.alpha_g1, (alpha_g2_s, key.alpha_g2)");
return false;
}
if !same_ratio(key.beta_g1, (beta_g2_s, key.beta_g2)) {
error!("Invalid ratio key.beta_g1, (beta_g2_s, key.beta_g2)");
return false;
}
let mut before = Self::empty(parameters);
let mut after = Self::empty(parameters);
{
let chunk_size = 2;
before
.read_chunk(
0,
chunk_size,
input_is_compressed,
check_input_for_correctness,
&input_map,
)
.expect("must read a first chunk from `challenge`");
after
.read_chunk(
0,
chunk_size,
output_is_compressed,
check_output_for_correctness,
&output_map,
)
.expect("must read a first chunk from `response`");
if after.tau_powers_g1[0] != E::G1Affine::one() {
error!("tau_powers_g1[0] != 1");
return false;
}
if after.tau_powers_g2[0] != E::G2Affine::one() {
error!("tau_powers_g2[0] != 1");
return false;
}
if !same_ratio(
(before.tau_powers_g1[1], after.tau_powers_g1[1]),
(tau_g2_s, key.tau_g2),
) {
error!("Invalid ratio (before.tau_powers_g1[1], after.tau_powers_g1[1]), (tau_g2_s, key.tau_g2)");
return false;
}
if !same_ratio(
(before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]),
(alpha_g2_s, key.alpha_g2),
) {
error!("Invalid ratio (before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), (alpha_g2_s, key.alpha_g2)");
return false;
}
if !same_ratio(
(before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]),
(beta_g2_s, key.beta_g2),
) {
error!("Invalid ratio (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (beta_g2_s, key.beta_g2)");
return false;
}
if !same_ratio(
(before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]),
(before.beta_g2, after.beta_g2),
) {
error!("Invalid ratio (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (before.beta_g2, after.beta_g2)");
return false;
}
}
let tau_powers_g2_0 = after.tau_powers_g2[0];
let tau_powers_g2_1 = after.tau_powers_g2[1];
let tau_powers_g1_0 = after.tau_powers_g1[0];
let tau_powers_g1_1 = after.tau_powers_g1[1];
let mut tau_powers_last_first_chunks = vec![E::G1Affine::zero(); 2];
let tau_powers_length = parameters.powers_length;
for chunk in &(0..tau_powers_length).chunks(parameters.batch_size) {
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1 + if end == tau_powers_length - 1 { 0 } else { 1 };
before
.read_chunk(
start,
size,
input_is_compressed,
check_input_for_correctness,
&input_map,
)
.unwrap_or_else(|_| {
panic!(format!(
"must read a chunk from {} to {} from `challenge`",
start, end
))
});
after
.read_chunk(
start,
size,
output_is_compressed,
check_output_for_correctness,
&output_map,
)
.unwrap_or_else(|_| {
panic!(format!(
"must read a chunk from {} to {} from `response`",
start, end
))
});
if !same_ratio(
power_pairs(&after.tau_powers_g1),
(tau_powers_g2_0, tau_powers_g2_1),
) {
error!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)");
return false;
}
if !same_ratio(
power_pairs(&after.tau_powers_g2),
(tau_powers_g1_0, tau_powers_g1_1),
) {
error!("Invalid ratio power_pairs(&after.tau_powers_g2), (tau_powers_g1_0, tau_powers_g1_1)");
return false;
}
if !same_ratio(
power_pairs(&after.alpha_tau_powers_g1),
(tau_powers_g2_0, tau_powers_g2_1),
) {
error!("Invalid ratio power_pairs(&after.alpha_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)");
return false;
}
if !same_ratio(
power_pairs(&after.beta_tau_powers_g1),
(tau_powers_g2_0, tau_powers_g2_1),
) {
error!("Invalid ratio power_pairs(&after.beta_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)");
return false;
}
if end == tau_powers_length - 1 {
tau_powers_last_first_chunks[0] = after.tau_powers_g1[size - 1];
}
info!("Done processing {} powers of tau", end);
} else {
panic!("Chunk does not have a min and max");
}
}
for chunk in &(tau_powers_length..parameters.powers_g1_length).chunks(parameters.batch_size)
{
if let MinMax(start, end) = chunk.minmax() {
let size = end - start
+ 1
+ if end == parameters.powers_g1_length - 1 {
0
} else {
1
};
before
.read_chunk(
start,
size,
input_is_compressed,
check_input_for_correctness,
&input_map,
)
.unwrap_or_else(|_| {
panic!(format!(
"must read a chunk from {} to {} from `challenge`",
start, end
))
});
after
.read_chunk(
start,
size,
output_is_compressed,
check_output_for_correctness,
&output_map,
)
.unwrap_or_else(|_| {
panic!(format!(
"must read a chunk from {} to {} from `response`",
start, end
))
});
assert_eq!(
before.tau_powers_g2.len(),
0,
"during rest of tau g1 generation tau g2 must be empty"
);
assert_eq!(
after.tau_powers_g2.len(),
0,
"during rest of tau g1 generation tau g2 must be empty"
);
if !same_ratio(
power_pairs(&after.tau_powers_g1),
(tau_powers_g2_0, tau_powers_g2_1),
) {
error!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1) in extra TauG1 contribution");
return false;
}
if start == parameters.powers_length {
tau_powers_last_first_chunks[1] = after.tau_powers_g1[0];
}
info!("Done processing {} powers of tau", end);
} else {
panic!("Chunk does not have a min and max");
}
}
if !same_ratio(
power_pairs(&tau_powers_last_first_chunks),
(tau_powers_g2_0, tau_powers_g2_1),
) {
error!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1) in TauG1 contribution intersection");
return false;
}
true
}
pub fn decompress(
input_map: &Mmap,
output_map: &mut MmapMut,
check_input_for_correctness: CheckForCorrectness,
parameters: &'a CeremonyParams<E>,
) -> io::Result<()> {
use itertools::MinMaxResult::MinMax;
let mut accumulator = Self::empty(parameters);
for chunk in &(0..parameters.powers_length).chunks(parameters.batch_size) {
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1;
accumulator
.read_chunk(
start,
size,
UseCompression::Yes,
check_input_for_correctness,
&input_map,
)
.unwrap_or_else(|_| {
panic!(format!(
"must read a chunk from {} to {} from source of decompression",
start, end
))
});
accumulator.write_chunk(start, UseCompression::No, output_map)?;
} else {
panic!("Chunk does not have a min and max");
}
}
for chunk in
&(parameters.powers_length..parameters.powers_g1_length).chunks(parameters.batch_size)
{
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1;
accumulator
.read_chunk(
start,
size,
UseCompression::Yes,
check_input_for_correctness,
&input_map,
)
.unwrap_or_else(|_| {
panic!(format!(
"must read a chunk from {} to {} from source of decompression",
start, end
))
});
assert_eq!(
accumulator.tau_powers_g2.len(),
0,
"during rest of tau g1 generation tau g2 must be empty"
);
assert_eq!(
accumulator.alpha_tau_powers_g1.len(),
0,
"during rest of tau g1 generation alpha*tau in g1 must be empty"
);
assert_eq!(
accumulator.beta_tau_powers_g1.len(),
0,
"during rest of tau g1 generation beta*tau in g1 must be empty"
);
accumulator.write_chunk(start, UseCompression::No, output_map)?;
} else {
panic!("Chunk does not have a min and max");
}
}
Ok(())
}
pub fn deserialize(
input_map: &Mmap,
check_input_for_correctness: CheckForCorrectness,
compression: UseCompression,
parameters: &'a CeremonyParams<E>,
) -> io::Result<BatchedAccumulator<'a, E>> {
use itertools::MinMaxResult::MinMax;
let mut accumulator = Self::empty(parameters);
let mut tau_powers_g1 = vec![];
let mut tau_powers_g2 = vec![];
let mut alpha_tau_powers_g1 = vec![];
let mut beta_tau_powers_g1 = vec![];
let mut beta_g2 = vec![];
for chunk in &(0..parameters.powers_length).chunks(parameters.batch_size) {
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1;
accumulator
.read_chunk(
start,
size,
compression,
check_input_for_correctness,
&input_map,
)
.unwrap_or_else(|_| {
panic!(format!(
"must read a chunk from {} to {} from source of decompression",
start, end
))
});
tau_powers_g1.extend_from_slice(&accumulator.tau_powers_g1);
tau_powers_g2.extend_from_slice(&accumulator.tau_powers_g2);
alpha_tau_powers_g1.extend_from_slice(&accumulator.alpha_tau_powers_g1);
beta_tau_powers_g1.extend_from_slice(&accumulator.beta_tau_powers_g1);
if start == 0 {
beta_g2.extend_from_slice(&[accumulator.beta_g2]);
}
} else {
panic!("Chunk does not have a min and max");
}
}
for chunk in
&(parameters.powers_length..parameters.powers_g1_length).chunks(parameters.batch_size)
{
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1;
accumulator
.read_chunk(
start,
size,
compression,
check_input_for_correctness,
&input_map,
)
.unwrap_or_else(|_| {
panic!(format!(
"must read a chunk from {} to {} from source of decompression",
start, end
))
});
assert_eq!(
accumulator.tau_powers_g2.len(),
0,
"during rest of tau g1 generation tau g2 must be empty"
);
assert_eq!(
accumulator.alpha_tau_powers_g1.len(),
0,
"during rest of tau g1 generation alpha*tau in g1 must be empty"
);
assert_eq!(
accumulator.beta_tau_powers_g1.len(),
0,
"during rest of tau g1 generation beta*tau in g1 must be empty"
);
tau_powers_g1.extend_from_slice(&accumulator.tau_powers_g1);
tau_powers_g2.extend_from_slice(&accumulator.tau_powers_g2);
alpha_tau_powers_g1.extend_from_slice(&accumulator.alpha_tau_powers_g1);
beta_tau_powers_g1.extend_from_slice(&accumulator.beta_tau_powers_g1);
} else {
panic!("Chunk does not have a min and max");
}
}
Ok(BatchedAccumulator {
tau_powers_g1,
tau_powers_g2,
alpha_tau_powers_g1,
beta_tau_powers_g1,
beta_g2: beta_g2[0],
hash: blank_hash(),
parameters,
})
}
pub fn serialize(
&mut self,
output_map: &mut MmapMut,
compression: UseCompression,
parameters: &CeremonyParams<E>,
) -> io::Result<()> {
use itertools::MinMaxResult::MinMax;
for chunk in &(0..parameters.powers_length).chunks(parameters.batch_size) {
if let MinMax(start, end) = chunk.minmax() {
let mut tmp_acc = BatchedAccumulator::<E> {
tau_powers_g1: (&self.tau_powers_g1[start..=end]).to_vec(),
tau_powers_g2: (&self.tau_powers_g2[start..=end]).to_vec(),
alpha_tau_powers_g1: (&self.alpha_tau_powers_g1[start..=end]).to_vec(),
beta_tau_powers_g1: (&self.beta_tau_powers_g1[start..=end]).to_vec(),
beta_g2: self.beta_g2,
hash: self.hash,
parameters,
};
tmp_acc.write_chunk(start, compression, output_map)?;
} else {
panic!("Chunk does not have a min and max");
}
}
for chunk in
&(parameters.powers_length..parameters.powers_g1_length).chunks(parameters.batch_size)
{
if let MinMax(start, end) = chunk.minmax() {
let mut tmp_acc = BatchedAccumulator::<E> {
tau_powers_g1: (&self.tau_powers_g1[start..=end]).to_vec(),
tau_powers_g2: vec![],
alpha_tau_powers_g1: vec![],
beta_tau_powers_g1: vec![],
beta_g2: self.beta_g2,
hash: self.hash,
parameters,
};
tmp_acc.write_chunk(start, compression, output_map)?;
} else {
panic!("Chunk does not have a min and max");
}
}
Ok(())
}
pub fn read_chunk(
&mut self,
from: usize,
size: usize,
compression: UseCompression,
checked: CheckForCorrectness,
input_map: &Mmap,
) -> Result<(), DeserializationError> {
self.tau_powers_g1 = match compression {
UseCompression::Yes => self
.read_points_chunk::<<E::G1Affine as CurveAffine>::Compressed>(
from,
size,
ElementType::TauG1,
compression,
checked,
&input_map,
)?,
UseCompression::No => self
.read_points_chunk::<<E::G1Affine as CurveAffine>::Uncompressed>(
from,
size,
ElementType::TauG1,
compression,
checked,
&input_map,
)?,
};
self.tau_powers_g2 = match compression {
UseCompression::Yes => self
.read_points_chunk::<<E::G2Affine as CurveAffine>::Compressed>(
from,
size,
ElementType::TauG2,
compression,
checked,
&input_map,
)?,
UseCompression::No => self
.read_points_chunk::<<E::G2Affine as CurveAffine>::Uncompressed>(
from,
size,
ElementType::TauG2,
compression,
checked,
&input_map,
)?,
};
self.alpha_tau_powers_g1 = match compression {
UseCompression::Yes => self
.read_points_chunk::<<E::G1Affine as CurveAffine>::Compressed>(
from,
size,
ElementType::AlphaG1,
compression,
checked,
&input_map,
)?,
UseCompression::No => self
.read_points_chunk::<<E::G1Affine as CurveAffine>::Uncompressed>(
from,
size,
ElementType::AlphaG1,
compression,
checked,
&input_map,
)?,
};
self.beta_tau_powers_g1 = match compression {
UseCompression::Yes => self
.read_points_chunk::<<E::G1Affine as CurveAffine>::Compressed>(
from,
size,
ElementType::BetaG1,
compression,
checked,
&input_map,
)?,
UseCompression::No => self
.read_points_chunk::<<E::G1Affine as CurveAffine>::Uncompressed>(
from,
size,
ElementType::BetaG1,
compression,
checked,
&input_map,
)?,
};
self.beta_g2 = match compression {
UseCompression::Yes => {
let points = self.read_points_chunk::<<E::G2Affine as CurveAffine>::Compressed>(
0,
1,
ElementType::BetaG2,
compression,
checked,
&input_map,
)?;
points[0]
}
UseCompression::No => {
let points = self.read_points_chunk::<<E::G2Affine as CurveAffine>::Uncompressed>(
0,
1,
ElementType::BetaG2,
compression,
checked,
&input_map,
)?;
points[0]
}
};
Ok(())
}
fn read_points_chunk<ENC: EncodedPoint>(
&mut self,
from: usize,
size: usize,
element_type: ElementType,
compression: UseCompression,
checked: CheckForCorrectness,
input_map: &Mmap,
) -> Result<Vec<ENC::Affine>, DeserializationError> {
let mut res = vec![ENC::empty(); size];
for (i, encoded) in res.iter_mut().enumerate() {
let index = from + i;
match element_type {
ElementType::TauG1 => {
if index >= self.parameters.powers_g1_length {
return Ok(vec![]);
}
}
ElementType::AlphaG1
| ElementType::BetaG1
| ElementType::BetaG2
| ElementType::TauG2 => {
if index >= self.parameters.powers_length {
return Ok(vec![]);
}
}
};
let position = self.calculate_mmap_position(index, element_type, compression);
let element_size = self.get_size(element_type, compression);
let mut memory_slice = input_map
.get(position..position + element_size)
.expect("must read point data from file");
memory_slice.read_exact(encoded.as_mut())?;
}
let mut res_affine = vec![ENC::Affine::zero(); size];
let mut chunk_size = res.len() / num_cpus::get();
if chunk_size == 0 {
chunk_size = 1;
}
let decoding_error = Arc::new(Mutex::new(None));
crossbeam::scope(|scope| {
for (source, target) in res
.chunks(chunk_size)
.zip(res_affine.chunks_mut(chunk_size))
{
let decoding_error = decoding_error.clone();
scope.spawn(move |_| {
assert_eq!(source.len(), target.len());
for (source, target) in source.iter().zip(target.iter_mut()) {
match {
match checked {
CheckForCorrectness::Yes => {
source
.into_affine()
.map_err(|e| e.into())
.and_then(|source| {
if source.is_zero() {
Err(DeserializationError::PointAtInfinity)
} else {
Ok(source)
}
})
}
CheckForCorrectness::No => {
source.into_affine_unchecked().map_err(|e| e.into())
}
}
} {
Ok(source) => {
*target = source;
}
Err(e) => {
*decoding_error.lock().unwrap() = Some(e);
}
}
}
});
}
}).unwrap();
for decoded in res_affine.iter() {
if decoded.is_zero() {
return Err(DeserializationError::PointAtInfinity);
}
}
match Arc::try_unwrap(decoding_error)
.unwrap()
.into_inner()
.unwrap()
{
Some(e) => Err(e),
None => Ok(res_affine),
}
}
fn write_all(
&mut self,
chunk_start: usize,
compression: UseCompression,
element_type: ElementType,
output_map: &mut MmapMut,
) -> io::Result<()> {
match element_type {
ElementType::TauG1 => {
for (i, c) in self.tau_powers_g1.clone().iter().enumerate() {
let index = chunk_start + i;
self.write_point(index, c, compression, element_type.clone(), output_map)?;
}
}
ElementType::TauG2 => {
for (i, c) in self.tau_powers_g2.clone().iter().enumerate() {
let index = chunk_start + i;
self.write_point(index, c, compression, element_type.clone(), output_map)?;
}
}
ElementType::AlphaG1 => {
for (i, c) in self.alpha_tau_powers_g1.clone().iter().enumerate() {
let index = chunk_start + i;
self.write_point(index, c, compression, element_type.clone(), output_map)?;
}
}
ElementType::BetaG1 => {
for (i, c) in self.beta_tau_powers_g1.clone().iter().enumerate() {
let index = chunk_start + i;
self.write_point(index, c, compression, element_type.clone(), output_map)?;
}
}
ElementType::BetaG2 => {
let index = chunk_start;
self.write_point(
index,
&self.beta_g2.clone(),
compression,
element_type.clone(),
output_map,
)?
}
};
output_map.flush()?;
Ok(())
}
fn write_point<C>(
&mut self,
index: usize,
p: &C,
compression: UseCompression,
element_type: ElementType,
output_map: &mut MmapMut,
) -> io::Result<()>
where
C: CurveAffine<Engine = E, Scalar = E::Fr>,
{
match element_type {
ElementType::TauG1 => {
if index >= self.parameters.powers_g1_length {
return Ok(());
}
}
ElementType::AlphaG1
| ElementType::BetaG1
| ElementType::BetaG2
| ElementType::TauG2 => {
if index >= self.parameters.powers_length {
return Ok(());
}
}
};
match compression {
UseCompression::Yes => {
let position = self.calculate_mmap_position(index, element_type, compression);
(&mut output_map[position..]).write_all(p.into_compressed().as_ref())?;
}
UseCompression::No => {
let position = self.calculate_mmap_position(index, element_type, compression);
(&mut output_map[position..]).write_all(p.into_uncompressed().as_ref())?;
}
};
Ok(())
}
pub fn write_chunk(
&mut self,
chunk_start: usize,
compression: UseCompression,
output_map: &mut MmapMut,
) -> io::Result<()> {
self.write_all(chunk_start, compression, ElementType::TauG1, output_map)?;
if chunk_start < self.parameters.powers_length {
self.write_all(chunk_start, compression, ElementType::TauG2, output_map)?;
self.write_all(chunk_start, compression, ElementType::AlphaG1, output_map)?;
self.write_all(chunk_start, compression, ElementType::BetaG1, output_map)?;
self.write_all(chunk_start, compression, ElementType::BetaG2, output_map)?;
}
Ok(())
}
pub fn transform(
input_map: &Mmap,
output_map: &mut MmapMut,
input_is_compressed: UseCompression,
compress_the_output: UseCompression,
check_input_for_correctness: CheckForCorrectness,
key: &PrivateKey<E>,
parameters: &'a CeremonyParams<E>,
) -> io::Result<()> {
fn batch_exp<EE: Engine, C: CurveAffine<Engine = EE, Scalar = EE::Fr>>(
bases: &mut [C],
exp: &[C::Scalar],
coeff: Option<&C::Scalar>,
) {
assert_eq!(bases.len(), exp.len());
let mut projective = vec![C::Projective::zero(); bases.len()];
let chunk_size = bases.len() / num_cpus::get();
crossbeam::scope(|scope| {
for ((bases, exp), projective) in bases
.chunks_mut(chunk_size)
.zip(exp.chunks(chunk_size))
.zip(projective.chunks_mut(chunk_size))
{
scope.spawn(move |_| {
let mut wnaf = Wnaf::new();
for ((base, exp), projective) in
bases.iter_mut().zip(exp.iter()).zip(projective.iter_mut())
{
let mut exp = *exp;
if let Some(coeff) = coeff {
exp.mul_assign(coeff);
}
*projective =
wnaf.base(base.into_projective(), 1).scalar(exp.into_repr());
}
});
}
}).unwrap();
crossbeam::scope(|scope| {
for projective in projective.chunks_mut(chunk_size) {
scope.spawn(move |_| {
C::Projective::batch_normalization(projective);
});
}
}).unwrap();
for (projective, affine) in projective.iter().zip(bases.iter_mut()) {
*affine = projective.into_affine();
assert!(
!affine.is_zero(),
"your contribution happened to produce a point at infinity, please re-run"
);
}
}
let mut accumulator = Self::empty(parameters);
use itertools::MinMaxResult::MinMax;
for chunk in &(0..parameters.powers_length).chunks(parameters.batch_size) {
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1;
accumulator
.read_chunk(
start,
size,
input_is_compressed,
check_input_for_correctness,
&input_map,
)
.expect("must read a first chunk");
let mut taupowers = vec![E::Fr::zero(); size];
let chunk_size = size / num_cpus::get();
crossbeam::scope(|scope| {
for (i, taupowers) in taupowers.chunks_mut(chunk_size).enumerate() {
scope.spawn(move |_| {
let mut acc = key.tau.pow(&[(start + i * chunk_size) as u64]);
for t in taupowers {
*t = acc;
acc.mul_assign(&key.tau);
}
});
}
}).unwrap();
batch_exp::<E, _>(&mut accumulator.tau_powers_g1, &taupowers[0..], None);
batch_exp::<E, _>(&mut accumulator.tau_powers_g2, &taupowers[0..], None);
batch_exp::<E, _>(
&mut accumulator.alpha_tau_powers_g1,
&taupowers[0..],
Some(&key.alpha),
);
batch_exp::<E, _>(
&mut accumulator.beta_tau_powers_g1,
&taupowers[0..],
Some(&key.beta),
);
accumulator.beta_g2 = accumulator.beta_g2.mul(key.beta).into_affine();
assert!(
!accumulator.beta_g2.is_zero(),
"your contribution happened to produce a point at infinity, please re-run"
);
accumulator.write_chunk(start, compress_the_output, output_map)?;
info!("Done processing {} powers of tau", end);
} else {
panic!("Chunk does not have a min and max");
}
}
for chunk in
&(parameters.powers_length..parameters.powers_g1_length).chunks(parameters.batch_size)
{
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1;
accumulator
.read_chunk(
start,
size,
input_is_compressed,
check_input_for_correctness,
&input_map,
)
.expect("must read a first chunk");
assert_eq!(
accumulator.tau_powers_g2.len(),
0,
"during rest of tau g1 generation tau g2 must be empty"
);
let mut taupowers = vec![E::Fr::zero(); size];
let chunk_size = size / num_cpus::get();
crossbeam::scope(|scope| {
for (i, taupowers) in taupowers.chunks_mut(chunk_size).enumerate() {
scope.spawn(move |_| {
let mut acc = key.tau.pow(&[(start + i * chunk_size) as u64]);
for t in taupowers {
*t = acc;
acc.mul_assign(&key.tau);
}
});
}
}).unwrap();
batch_exp::<E, _>(&mut accumulator.tau_powers_g1, &taupowers[0..], None);
accumulator.write_chunk(start, compress_the_output, output_map)?;
info!("Done processing {} powers of tau", end);
} else {
panic!("Chunk does not have a min and max");
}
}
Ok(())
}
pub fn generate_initial(
output_map: &mut MmapMut,
compress_the_output: UseCompression,
parameters: &'a CeremonyParams<E>,
) -> io::Result<()> {
use itertools::MinMaxResult::MinMax;
for chunk in &(0..parameters.powers_length).chunks(parameters.batch_size) {
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1;
let mut accumulator = Self {
tau_powers_g1: vec![E::G1Affine::one(); size],
tau_powers_g2: vec![E::G2Affine::one(); size],
alpha_tau_powers_g1: vec![E::G1Affine::one(); size],
beta_tau_powers_g1: vec![E::G1Affine::one(); size],
beta_g2: E::G2Affine::one(),
hash: blank_hash(),
parameters,
};
accumulator.write_chunk(start, compress_the_output, output_map)?;
info!("Done processing {} powers of tau", end);
} else {
panic!("Chunk does not have a min and max");
}
}
for chunk in
&(parameters.powers_length..parameters.powers_g1_length).chunks(parameters.batch_size)
{
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1;
let mut accumulator = Self {
tau_powers_g1: vec![E::G1Affine::one(); size],
tau_powers_g2: vec![],
alpha_tau_powers_g1: vec![],
beta_tau_powers_g1: vec![],
beta_g2: E::G2Affine::one(),
hash: blank_hash(),
parameters,
};
accumulator.write_chunk(start, compress_the_output, output_map)?;
info!("Done processing {} powers of tau", end);
} else {
panic!("Chunk does not have a min and max");
}
}
Ok(())
}
}