1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
mod batched;
mod standard;
#[cfg(all(feature = "cuda", target_arch = "x86_64"))]
mod cuda;
#[cfg(target_arch = "x86_64")]
pub mod prefetch;
use snarkvm_curves::{bls12_377::G1Affine, traits::AffineCurve};
use snarkvm_fields::PrimeField;
use core::any::TypeId;
#[cfg(all(feature = "cuda", target_arch = "x86_64"))]
use core::sync::atomic::{AtomicBool, Ordering};
#[cfg(all(feature = "cuda", target_arch = "x86_64"))]
static HAS_CUDA_FAILED: AtomicBool = AtomicBool::new(false);
pub struct VariableBase;
impl VariableBase {
pub fn msm<G: AffineCurve>(bases: &[G], scalars: &[<G::ScalarField as PrimeField>::BigInteger]) -> G::Projective {
if TypeId::of::<G>() == TypeId::of::<G1Affine>() {
#[cfg(all(feature = "cuda", target_arch = "x86_64"))]
if !HAS_CUDA_FAILED.load(Ordering::SeqCst) {
match cuda::msm_cuda(bases, scalars) {
Ok(x) => return x,
Err(_e) => {
HAS_CUDA_FAILED.store(true, Ordering::SeqCst);
eprintln!("CUDA failed, moving to the next MSM method");
}
}
}
batched::msm(bases, scalars)
}
else {
standard::msm(bases, scalars)
}
}
#[cfg(test)]
fn msm_naive<G: AffineCurve>(bases: &[G], scalars: &[<G::ScalarField as PrimeField>::BigInteger]) -> G::Projective {
use itertools::Itertools;
use snarkvm_utilities::BitIteratorBE;
bases.iter().zip_eq(scalars).map(|(base, scalar)| base.mul_bits(BitIteratorBE::new(*scalar))).sum()
}
#[cfg(test)]
fn msm_naive_parallel<G: AffineCurve>(
bases: &[G],
scalars: &[<G::ScalarField as PrimeField>::BigInteger],
) -> G::Projective {
use rayon::prelude::*;
use snarkvm_utilities::BitIteratorBE;
bases.par_iter().zip_eq(scalars).map(|(base, scalar)| base.mul_bits(BitIteratorBE::new(*scalar))).sum()
}
}
#[cfg(test)]
mod tests {
use super::*;
use snarkvm_curves::bls12_377::{Fr, G1Affine};
use snarkvm_fields::PrimeField;
use snarkvm_utilities::rand::test_rng;
use rand_xorshift::XorShiftRng;
fn create_scalar_bases<G: AffineCurve<ScalarField = F>, F: PrimeField>(
rng: &mut XorShiftRng,
size: usize,
) -> (Vec<G>, Vec<F::BigInteger>) {
let bases = (0..size).map(|_| G::rand(rng)).collect::<Vec<_>>();
let scalars = (0..size).map(|_| F::rand(rng).to_repr()).collect::<Vec<_>>();
(bases, scalars)
}
#[test]
fn test_msm() {
use snarkvm_curves::ProjectiveCurve;
for msm_size in [1, 5, 10, 50, 100, 500, 1000] {
let mut rng = test_rng();
let (bases, scalars) = create_scalar_bases::<G1Affine, Fr>(&mut rng, msm_size);
let naive_a = VariableBase::msm_naive(bases.as_slice(), scalars.as_slice()).to_affine();
let naive_b = VariableBase::msm_naive_parallel(bases.as_slice(), scalars.as_slice()).to_affine();
assert_eq!(naive_a, naive_b, "MSM size: {msm_size}");
let candidate = standard::msm(bases.as_slice(), scalars.as_slice()).to_affine();
assert_eq!(naive_a, candidate, "MSM size: {msm_size}");
let candidate = batched::msm(bases.as_slice(), scalars.as_slice()).to_affine();
assert_eq!(naive_a, candidate, "MSM size: {msm_size}");
}
}
#[cfg(all(feature = "cuda", target_arch = "x86_64"))]
#[test]
fn test_msm_cuda() {
let mut rng = test_rng();
for _ in 0..100 {
let (bases, scalars) = create_scalar_bases::<G1Affine, Fr>(&mut rng, 1 << 10);
let rust = standard::msm(bases.as_slice(), scalars.as_slice());
let cuda = cuda::msm_cuda(bases.as_slice(), scalars.as_slice()).unwrap();
assert_eq!(rust, cuda);
}
}
}