use std::rc::Rc;
use std::str::FromStr;
use crate::bigint::nat_err::NatError;
use crate::parse_err::ParseErrKind::{BeginWithIllegalChar, IllegalCharEncounter};
use std::fmt::{Debug, Binary, LowerHex, UpperHex, Octal, Formatter, Display};
use std::cell::Cell;
use std::cmp::Ordering;
use std::ops::{Add, AddAssign, SubAssign, Sub, ShrAssign, Shr, Shl, ShlAssign,
Div, DivAssign, Mul, MulAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign,
BitXor, BitXorAssign, Not, Rem, RemAssign};
use crate::rand::IterSource;
use crate::bigint::arith::{add_mul_vvw, sub_vv_inner, add_vv_inner, add_vw_inner, sub_vw_inner, add_mul_vvw_inner, mul_ww, shl_vu_inner, mul_add_vww, add_vv};
use crate::bigint::BigInt;
use crate::parse_err::ParseErrKind;
const KARATSUBA_THRESHOLD: usize = 40;
const BASIC_SQRT_HRESHOLD: usize = 20;
const KARATSUBA_SQRT_HRESHOLD: usize = 260;
#[cfg(test)]
mod tests;
#[derive(Clone)]
pub struct Nat {
nat: Rc<Cell<Vec<u32>>>,
}
impl Nat {
pub(super) fn as_vec(&self) -> &Vec<u32> {
unsafe {self.nat.as_ptr().as_ref().unwrap()}
}
pub(super) fn as_mut_vec(&self) -> &mut Vec<u32> {
unsafe {self.nat.as_ptr().as_mut().unwrap()}
}
pub(super) fn as_slice(&self) -> &[u32] {
self.as_vec().as_slice()
}
#[allow(unused)]
pub(super) fn to_vec(&self) -> Vec<u32> {
self.as_vec().clone()
}
#[allow(unused)]
pub(super) fn into_vec(self) -> Vec<u32> {
self.nat.take()
}
#[allow(unused)]
pub(super) fn as_mut_slice(&self) -> &mut [u32] {
self.as_mut_vec().as_mut_slice()
}
pub(super) fn iter(&self) -> std::slice::Iter<'_, u32> {
self.as_slice().iter()
}
pub(super) fn iter_mut(&self) -> std::slice::IterMut<'_, u32> {
self.as_mut_slice().iter_mut()
}
pub(super) fn clear(&mut self) {
self.as_mut_vec().clear();
}
pub fn deep_clone(&self) -> Self {
Nat::from(self.as_slice())
}
pub fn is_nan(&self) -> bool {
self.as_vec().is_empty()
}
#[inline]
pub(super) fn num(&self) -> usize {
self.as_vec().len()
}
fn from_le_or_be_bytes<'a>(data: impl DoubleEndedIterator<Item=&'a u8> + ExactSizeIterator<Item=&'a u8>) -> Nat {
let (mut val, mut nat) = (0u32, Vec::with_capacity(data.len()));
for (i, &e) in data.enumerate() {
let j = i & 3;
val |= (e as u32) << (j << 3);
if j == 3 {
nat.push(val);
val = 0;
}
}
if val > 0 {
nat.push(val);
} else if nat.is_empty() {
nat.push(0);
}
Self::trim_head_zero_(&mut nat);
Self {
nat: Rc::new(Cell::new(nat)),
}
}
pub fn from_le_bytes(data: &[u8]) -> Nat {
if data.is_empty() {
Self::nan()
} else {
Self::from_le_or_be_bytes(data.iter())
}
}
pub fn from_be_bytes(data: &[u8]) -> Nat {
if data.is_empty() {
Self::nan()
} else {
Self::from_le_or_be_bytes(data.iter().rev())
}
}
pub fn copy_to_le(&self, dst: &mut Vec<u8>) {
dst.clear();
if self.is_nan() {
return;
}
self.iter().take(self.num().saturating_sub(1)).for_each(|&x| {
for &y in x.to_le_bytes().iter() {
dst.push(y);
};
});
match self.iter().last() {
Some(&x) => {
let i = if x > 0xffffffu32 {4} else if x > 0xffffu32 {3} else if x > 0xffu32 {2} else {1};
for &y in x.to_le_bytes().iter().take(i) {
dst.push(y);
}
},
None => {},
}
}
pub fn to_le_bytes(&self) -> Vec<u8> {
let mut v = Vec::with_capacity(self.num() << 2);
self.copy_to_le(&mut v);
v
}
pub fn copy_to_be(&self, dst: &mut Vec<u8>) {
dst.clear();
if self.is_nan() {
return;
}
match self.iter().last() {
Some(&x) => {
let i = if x > 0xffffffu32 {0} else if x > 0xffffu32 {1} else if x > 0xffu32 {2} else {3};
for &y in x.to_be_bytes().iter().skip(i) {
dst.push(y);
}
},
None => {},
}
self.iter().rev().skip(1).for_each(|&x| {
x.to_be_bytes().iter().for_each(|&y| {dst.push(y);});
});
}
pub fn to_be_bytes(&self) -> Vec<u8> {
let mut v = Vec::with_capacity(self.num() << 2);
self.copy_to_be(&mut v);
v
}
pub fn bits_len(&self) -> usize {
match self.as_vec().last() {
Some(&x) if x == 0 => {
if self.as_vec().len() == 1 {1} else {(self.as_vec().len() - 1) << 5}
},
Some(&x) => {
((32 - x.leading_zeros()) as usize) + ((self.as_vec().len() - 1) << 5)
},
None => 0,
}
}
pub(super) fn trim_head_zero_(v: &mut Vec<u32>) {
let mut i = 0;
for &ele in v.iter().skip(1).rev() {
if ele != 0 {
break;
}
i += 1;
}
v.truncate(v.len() - i);
}
pub(super) fn trim_head_zero(&mut self) {
Self::trim_head_zero_(self.as_mut_vec());
}
pub(super) fn min_max<'a>(&'a self, other: &'a Self) -> (&'a Self, &'a Self, bool) {
if self < other {
(self, other, false)
} else {
(other, self, true)
}
}
pub fn and_not(self, rhs: Nat) -> Nat {
let mut nat = self.deep_clone();
nat.and_not_assign(rhs);
nat
}
pub fn and_not_assign(&mut self, rhs: Nat) {
self.iter_mut().zip(rhs.iter()).for_each(|(x, &y) | {
*x = (*x) & (!y);
});
self.trim_head_zero();
}
fn check_base(s: &str) -> Result<(usize, usize), NatError> {
let (mut i, mut itr) = (0, s.chars());
while let Some(c) = itr.next() {
if c != '+' {
if c != ' ' {
return if c >= '0' && c <= '9' {
match itr.next() {
Some(x) => {
if c == '0' {
if x == 'o' {
Ok((i+2,8))
} else if x == 'b' || x == 'B' {
Ok((i+2, 2))
} else if x == 'x' || x == 'X' {
Ok((i+2, 16))
} else {
Err(NatError::new(BeginWithIllegalChar, format!("begin with illegal char {}{}", c,x)))
}
} else {
Ok((i, 10))
}
},
None => Ok((i, 10)),
}
} else {
Err(NatError::new(BeginWithIllegalChar, format!("begin with illegal char {}", c)))
};
}
}
i += 1;
}
if s.len() > 0 {
Err(NatError::new(BeginWithIllegalChar, "all char is +"))
} else {
Err(NatError::new(BeginWithIllegalChar, "empty string"))
}
}
fn from_dec_str(s: &str) -> Result<Self, NatError> {
if s.is_empty() {return Ok(Nat::from(Vec::<u32>::new()));}
let s = s.trim_start_matches('0');
if s.is_empty() { return Ok(Nat::from(0u32));}
const ZERO: u32 = '0' as u32;
let mut v = Vec::with_capacity(s.len());
for c in s.chars() {
if !c.is_ascii_digit() {
return Err(NatError::new(IllegalCharEncounter, format!("illegal char {}", c)));
}
v.push((c as u32) - ZERO);
}
let mut nat = Nat::from(0u32);
v.iter().for_each(|&ele| {
let tmp = nat.clone() << 1;
nat <<= 3;
nat += tmp;
nat += Nat::from(ele);
});
Ok(nat)
}
fn from_bin_str(s: &str) -> Result<Self, NatError> {
let mut tmp = 0u32;
let mut v = Vec::with_capacity(s.len() >> 5);
for (i, c) in s.chars().rev().enumerate() {
let x = if c == '0' {0} else if c == '1' {1} else {
return Err(NatError::new(IllegalCharEncounter, format!("illegal char {}", c)));
};
tmp += x << (i & 31);
if (i & 31) == 31 {
v.push(tmp);
tmp = 0;
}
}
if tmp > 0{v.push(tmp);} else {if !s.is_empty() {v.push(tmp);}}
Ok(Nat::from(v))
}
fn from_oct_str(s: &str) -> Result<Self, NatError> {
const ZERO: u32 = '0' as u32;
let (mut i, mut tmp) = (0, 0);
let mut v = Vec::with_capacity(s.len() / 10);
for c in s.chars().rev() {
if c >= '0' && c <= '7' {
let n = (c as u32) - ZERO;
if (i + 3) >= 32 {
tmp += (((1 << (32 - i)) - 1) & n) << i;
v.push(tmp);
tmp = n >> (32 - i);
i = (i + 3) - 32;
} else {
tmp += n << i;
i += 3;
}
} else {
return Err(NatError::new(IllegalCharEncounter, format!("illegal char {}", c)));
}
}
if tmp > 0 {v.push(tmp);} else {if !s.is_empty() {v.push(tmp);}}
Ok(Nat::from(v))
}
fn from_hex_str(s: &str) -> Result<Self, NatError> {
const ZERO: u32 = '0' as u32;
const LA: u32 = 'a' as u32;
const BA: u32 = 'A' as u32;
let mut tmp = 0;
let mut v = Vec::with_capacity(s.len() >> 3);
for (i, c) in s.chars().rev().enumerate() {
let n = if c >= '0' && c <= '9' {(c as u32) - ZERO}
else if c >= 'a' && c <= 'f' {((c as u32) - LA) + 10}
else if c >= 'A' && c <= 'F' {((c as u32) - BA) + 10}
else {return Err(NatError::new(IllegalCharEncounter, format!("illegal char {}", c)))};
tmp += n << ((i & 7) << 2);
if (i & 7) == 7 {
v.push(tmp);
tmp = 0;
}
}
if tmp > 0 {v.push(tmp);} else {if !s.is_empty() {v.push(tmp);}}
Ok(Nat::from(v))
}
pub fn sub_with_sign(&self, rhs: Self) -> (Nat, bool) {
if self.is_nan() || rhs.is_nan() {
(Nat::default(), false)
} else {
let mut nat = self.deep_clone();
let x = nat.sub_inner_with_sign(&rhs);
(nat, x)
}
}
pub fn sub_assign_with_sign(&mut self, rhs: Self) -> bool {
if self.is_nan() || rhs.is_nan() {
self.clear();
false
} else {
self.sub_inner_with_sign(&rhs)
}
}
pub(super) fn partial_cmp_inner(&self, rhs: &Self) -> Ordering {
if self.num() < rhs.num() {
Ordering::Less
} else if self.num() > rhs.num() {
Ordering::Greater
} else {
for (a, b) in self.iter().rev().zip(rhs.iter().rev()) {
if a < b {
return Ordering::Less;
} else if a > b {
return Ordering::Greater;
}
}
Ordering::Equal
}
}
pub(super) fn shl_inner(&mut self, rhs: &usize) {
let rhs = *rhs;
let (num, nom) = (rhs >> 5, rhs & 31);
let v = self.as_mut_vec();
if nom > 0 {
let (nom_c, mut pre) = (32 - nom, 0);
self.iter_mut().for_each(|a| {
let tmp = pre;
pre = (*a) >> nom_c;
*a = ((*a) << nom) | tmp;
});
if pre > 0 {v.push(pre);}
}
if num > 0 {
v.resize(v.len() + num, 0);
v.rotate_right(num);
}
}
pub(super) fn shr_inner(&mut self, rhs: &usize) {
let rhs = *rhs;
let v = self.as_mut_vec();
let (num, nom, nom_c) = (rhs >> 5, rhs & 31, 32 - (rhs & 31));
let (nom, nom_c) = (nom as u32, nom_c as u32);
if num >= v.len() {
v.clear();
v.push(0);
return;
}
let lhs = self.clone();
let mut pre = 0;
if nom > 0 {
let mask_h: u32 = ((1 << nom) - 1) << nom_c;
let mask_l: u32 = (1 << nom_c) - 1;
let mut itr = lhs.iter().skip(num);
match itr.next() {
Some(x) => pre = *x >> nom,
None => {},
};
v.iter_mut().zip(itr).for_each(|(a, &b)| {
let r = b.rotate_right(nom);
*a = (r & mask_h) | pre;
pre = r & mask_l;
});
}
v.truncate(v.len() - num);
if pre > 0 {
match v.last_mut() {
Some(x) => *x = pre,
_ => {},
};
} else {
if nom > 0 && num == 0 {
v.pop();
if v.len() == 0 {
v.push(0);
}
}
}
}
#[inline]
pub(super) fn u32_shl(&mut self, lhs: u32, rhs: usize) {
let (num, rem) = (rhs >> 5, rhs & 31);
let v = self.as_mut_vec();
v.clear();
v.resize(num, 0);
v.push(lhs << rem);
}
pub(super) fn div_inner(&mut self, rhs: &Self) {
assert_ne!(rhs, &0u32, "The divisor must not be 0");
if self.nat.as_ptr() == rhs.nat.as_ptr() {
self.clear();
self.as_mut_vec().push(1);
return;
}
let mut sc = self.deep_clone();
self.clear();
if sc.partial_cmp_inner(rhs) == Ordering::Less {
self.as_mut_vec().push(0);
return;
}
let (dividend_len, divisor_len) = (sc.bits_len(), rhs.bits_len());
if dividend_len == divisor_len {
self.as_mut_vec().push(1);
return;
}
let mut one = Nat::from(1u32);
let mut den = Nat::with_capacity(rhs.num());
self.as_mut_vec().push(0);
loop {
if !(sc.partial_cmp_inner(rhs) == Ordering::Less) {
den.clear();
let mut shift = sc.bits_len() - divisor_len;
den.as_mut_vec().extend_from_slice(rhs.as_slice());
den.shl_inner(&shift);
while den.partial_cmp_inner(&sc) == Ordering::Greater {
den.shr_inner(&1);
shift -= 1;
}
sc.sub_inner(&den);
one.u32_shl(1, shift);
self.add_inner(&one);
} else {
break;
}
}
}
pub(super) fn rem_inner(&mut self, rhs: &Self) {
assert_ne!(rhs, &0u32, "The modulus must not be 0");
if self.partial_cmp_inner(rhs) == Ordering::Less {
return;
}
let divisor_len = rhs.bits_len();
let mut den = Nat::with_capacity(rhs.num());
loop {
if self.partial_cmp_inner(rhs) == Ordering::Less {
break;
} else {
den.clear();
let shift = self.bits_len() - divisor_len;
den.as_mut_vec().extend_from_slice(rhs.as_slice());
den.shl_inner(&shift);
if self.partial_cmp_inner(&den) == Ordering::Less {
den.shr_inner(&1);
}
self.sub_inner(&den);
}
}
self.trim_head_zero();
}
pub(super) fn bitand_inner(&mut self, rhs: &Self) {
self.iter_mut().zip(rhs.iter()).for_each(|(a,&b)| {
*a &= b;
});
self.as_mut_vec().truncate(std::cmp::min(self.num(), rhs.num()));
self.trim_head_zero();
}
pub(super) fn bitor_inner(&mut self, rhs: &Self) {
self.iter_mut().zip(rhs.iter()).for_each(|(a, &b)| {
*a |= b;
});
let (mi, ma) = (self.num(), rhs.num());
if mi < ma {
let v = self.as_mut_vec();
rhs.iter().skip(mi).for_each(|&b| {
v.push(b);
});
}
}
pub(super) fn bitxor_inner(&mut self, rhs: &Self) {
self.iter_mut().zip(rhs.iter()).for_each(|(a, &b)| {
*a ^= b;
});
let (mi, ma) = (self.num(), rhs.num());
if mi < ma {
let v = self.as_mut_vec();
rhs.iter().skip(mi).for_each(|&b| {
v.push(b);
});
}
self.trim_head_zero();
}
pub(super) fn mul_inner_basic(&mut self, rhs: &u32) {
let mut pre = 0;
const MASK: u64 = u32::MAX as u64;
let rhs = *rhs as u64;
self.iter_mut().for_each(|a| {
let x = (*a as u64) * rhs;
let val = (x & MASK) + pre;
*a = (val & MASK) as u32;
pre = (val >> 32) + (x >> 32);
});
if pre > MASK {
self.as_mut_vec().push((pre & MASK) as u32);
self.as_mut_vec().push((pre >> 32) as u32);
} else {
self.as_mut_vec().push(pre as u32);
}
}
pub(super) fn div_inner_basic(&mut self, rhs: &u32) {
assert_ne!(rhs, &0, "divisor cannot be the 0");
if self.num() <= 1 {
match self.as_mut_vec().last_mut() {
Some(x) => *x = *x / *rhs,
None => {},
};
return;
}
let old_len = self.num();
let rhs = (*rhs) as u64;
let (mut pre, mut i) = (0, 0);
self.iter_mut().rev().for_each(|x| {
let val = (pre << 32) + (*x as u64);
*x = (val / rhs) as u32;
pre = val % rhs;
i += 1;
});
if i == 0 {
self.clear();
self.as_mut_vec().push(0);
} else {
self.as_mut_vec().rotate_right(old_len - i);
self.as_mut_vec().truncate(i);
self.trim_head_zero();
}
}
pub(super) fn rem_inner_basic(&mut self, rhs: &u32) {
assert_ne!(rhs, &0, "modulus cannot be the 0");
if self.num() <= 1 {
match self.as_mut_vec().last_mut() {
Some(x) => *x %= *rhs,
None => {},
};
return;
}
let mut pre = 0;
let rhs = (*rhs) as u64;
self.iter().rev().for_each(|&x| {
let val = (pre << 32) + (x as u64);
pre = val % rhs;
});
self.clear();
self.as_mut_vec().push(pre as u32);
}
pub(super) fn not_inner(&self) -> Vec<u32> {
let mut v = Vec::with_capacity(self.num());
let bitlen = self.bits_len() & 31;
self.iter().for_each(|&a| {
v.push(!a);
});
if bitlen > 0 {
match v.last_mut() {
Some(x) => {
*x = (*x) & ((1 << bitlen) - 1);
},
None => {},
}
}
Self::trim_head_zero_(&mut v);
v
}
#[inline]
pub(super) fn nan() -> Nat {
Nat::from(Vec::<u32>::new())
}
#[inline]
pub(super) fn with_capacity(cap: usize) -> Nat {
Nat {
nat: Rc::new(Cell::new(Vec::with_capacity(cap))),
}
}
pub(super) fn is_set_bit_(&self, idx: usize) -> bool {
let (num, rem) = (idx >> 5, (idx & 31));
match self.iter().nth(num) {
Some(&x) => {
(x & (1 << rem)) != 0
},
None => unreachable!(),
}
}
pub fn is_set_bit(&self, bit_idx: usize) -> Option<bool> {
if bit_idx >= self.bits_len() {None}
else {Some(self.is_set_bit_(bit_idx))}
}
pub fn pow(&self, b: Nat) -> Nat {
if self.is_nan() || b.is_nan() {return Nat::nan();}
let mut lhs = self.deep_clone();
lhs.pow_inner(b);
lhs
}
fn pow_inner(&mut self, b: Nat) {
if &*self == &0u32 {
if b == 0u32 {self.clear(); self.as_mut_vec().push(1);}
else {self.clear(); self.as_mut_vec().push(0);}
}
else if b.clone() == 0u32 {
self.clear();
self.as_mut_vec().push(1);
}
else {
let b = if b.as_vec().as_ptr() == self.as_vec().as_ptr() {
b.deep_clone()
} else {
b
};
let blen = b.bits_len();
let mut pre = self.deep_clone();
if !b.is_set_bit_(0) {
self.clear();
self.as_mut_vec().push(1u32);
}
(1..blen).for_each(|i| {
pre.mul_inner(&pre.clone());
if b.is_set_bit_(i) {
self.mul_inner(&pre);
}
});
}
}
pub fn pow_mod(&self, b: Nat, n: Nat) -> Nat {
if self.is_nan() || b.is_nan() || n.is_nan() { return Nat::nan(); }
let mut a = self.deep_clone();
a.pow_mod_inner(b, n);
a
}
fn pow_mod_inner(&mut self, b: Nat, n: Nat) {
if n == 0u32 {
self.pow_inner(b);
} else if n == 1u32 {
self.clear();
self.as_mut_vec().push(0);
} else {
let n = if n.as_vec().as_ptr() == self.as_vec().as_ptr() {n.deep_clone()} else {n};
let b = if b.as_vec().as_ptr() == self.as_vec().as_ptr() {b.deep_clone()} else {b};
let mut sm = self.deep_clone();
self.clear();
self.as_mut_vec().push(1);
sm.rem_inner(&n);
(0..b.bits_len()).rev().for_each(|i| {
self.mul_inner(&self.clone());
self.rem_inner(&n);
if b.is_set_bit_(i) {
self.mul_inner(&sm);
self.rem_inner(&n);
}
});
}
}
fn random_inner<Rng: IterSource<u32>>(&self, n: &mut Nat, rng: &mut Rng) {
let bits_len = self.bits_len();
let (num, rem) = (self.num(), bits_len & 31);
let mask = if rem == 0 {u32::MAX} else {(1u32 << rem) - 1};
let mut itr = rng.iter_mut();
loop {
let v = n.as_mut_vec();
v.clear();
while v.len() < num {
match itr.next() {
Some(x) => v.push(x),
None => itr.clear_error(),
}
}
match v.last_mut() {
Some(x) => *x &= mask,
None => {},
}
if &*n< self {
break;
}
}
}
pub fn random<Rng: IterSource<u32>>(&self, rng: &mut Rng) -> Nat {
if self.is_nan() || self == &0u32 {
return Nat::nan();
}
let mut nat = Nat::with_capacity(self.num());
self.random_inner(&mut nat, rng);
nat
}
pub fn trailling_zeros(&self) -> usize {
let mut cnt = 0;
for &ele in self.iter() {
if ele == 0 {
cnt += 32;
} else {
cnt += ele.trailing_zeros() as usize;
break;
}
}
cnt
}
fn miller_rabin_witness(&mut self, n: &Nat, t: usize, n_m1: &Nat, buf: &mut Nat) -> bool {
let (mut xi, mut xi_m1) = (buf, self);
for _ in 1..=t {
Self::sqr_v(xi.as_mut_vec(), xi_m1.as_slice());
xi.rem_inner(n);
if *xi == 1u32 && *xi_m1 != 1u32 && (&*xi_m1) != n_m1 {
return true;
}
let tmp = xi;
xi = xi_m1;
xi_m1 = tmp;
}
*xi_m1 != 1u32
}
fn prime_validate_by_miller_rabin<Rng: IterSource<u32>>(&self, s: usize, rng: &mut Rng) -> bool {
let mut a = Nat::with_capacity(self.num());
let n_m1 = self.clone() - 1u32;
let t = n_m1.trailling_zeros();
let u = n_m1.clone() >> t;
let mut buf = Nat::with_capacity(self.num());
for _ in 0..s {
self.random_inner(&mut a, rng);
let mut tmp = a.exp(&u, self);
if tmp.miller_rabin_witness(self, t, &n_m1, &mut buf) {
return false;
}
}
true
}
fn prime_validate_by_lucas(&self) -> bool {
if self.is_nan() || self == &1u32 {
return false;
}
if (self.as_vec()[0] & 1) == 0 {
return self == &2u32;
}
let mut p = 3;
let mut t1 = Nat::nan();
let (int_d, int_n) = (BigInt::from(1u32), BigInt::from(self.clone()));
while p <= 10000 {
int_d.get_nat().as_mut_vec()[0] = (p * p) - 4;
let j = if let Some(x) = int_d.jacobi(&int_n) {x} else {return false};
if j == -1 {
break;
}
if j == 0 {
return self.as_vec().len() == 1 && self.as_vec()[0] == (p + 2);
}
if p == 40 {
t1 = self.sqrt();
t1 = t1.sqr();
if &t1 == self {
return false;
}
}
p += 1;
}
let mut s = self.clone() + 1u32;
let r = s.trailling_zeros();
s.shr_inner(&r);
let nm2 = self.clone() - 2u32;
let nat_p = Nat::from(p);
let mut vk = Nat::from(2u32);
let mut vk1 = Nat::from(p);
for i in (0..=s.bits_len()).rev() {
Self::mul_by_karatsuba(t1.as_mut_vec(), vk.as_slice(), vk1.as_slice());
t1.add_inner(self);
t1.sub_inner(&nat_p);
match s.is_set_bit(i) {
Some(true) => {
vk = t1.clone() % self.clone();
Self::sqr_v(t1.as_mut_vec(), vk1.as_slice());
t1.add_inner(&nm2);
vk1 = t1.clone() % self.clone();
},
_ => {
vk1 = t1.clone() % self.clone();
Self::sqr_v(t1.as_mut_vec(), vk.as_slice());
t1.add_inner(&nm2);
vk = t1.clone() % self.clone();
}
}
}
if vk == 2u32 || vk == nm2 {
Self::mul_by_karatsuba(t1.as_mut_vec(), vk.as_slice(), nat_p.as_slice());
t1 -= vk1 << 1;
vk1 = t1.clone() % self.clone();
if vk1 == 0u32 {
return true;
}
}
for _ in 0..r.saturating_sub(1) {
if vk == 0u32 {
return true;
}
if vk.as_vec().len() == 1 && vk.as_vec()[0] == 2 {
return false;
}
Self::sqr_v(t1.as_mut_vec(), vk.as_slice());
t1 -= 2u32;
vk = t1.clone() % self.clone();
}
return false;
}
fn rem_inner_u32(&self, rhs: u32) -> u32 {
let (mut r, rhs) = (0, rhs as u64);
for &ele in self.iter().rev() {
let m = (r << 32) | (ele as u64);
r = m % rhs;
}
r as u32
}
pub fn probably_prime_test<Rng: IterSource<u32>>(&self, n: usize, rng: &mut Rng) -> bool {
if self.is_nan() || (self == &0u32) {
return false;
}
const PRIME_BIT_MASK: u128 = 1<<2 | 1<<3 | 1<<5 | 1<<7 |
1<<11 | 1<<13 | 1<<17 | 1<<19 | 1<<23 | 1<<29 | 1<<31 |
1<<37 | 1<<41 | 1<<43 | 1<<47 | 1<<53 | 1<<59 | 1<<61 | 1<<67 |
1<<71 | 1<<73 | 1<<79 | 1<<83 | 1<<89 | 1<<97 | 1<<101 |
1<<103 | 1<<107 | 1<<109 | 1<< 113 | 1<<127;
let x = self.as_vec()[0] as u128;
if (self.num() == 1) && (x < 128) {
return ((1<<x) & PRIME_BIT_MASK) != 0;
}
if x & 0x1 == 0 {
return false;
}
const PRIMES_A: u32 = 3 * 5 * 7 * 11 * 13 * 17 * 19 * 23 * 37;
const PRIMES_B: u32 = 29 * 31 * 41 * 43 * 47 * 53;
let (ra, rb) = (self.rem_inner_u32(PRIMES_A), self.rem_inner_u32(PRIMES_B));
if ra%3u32 == 0u32 || ra%5u32 == 0u32 || ra%7u32 == 0u32 || ra%11u32 == 0u32 || ra%13u32 == 0u32
|| ra%17u32 == 0u32 || ra%19u32 == 0u32 || ra%23u32 == 0u32 || ra%37u32 == 0u32 ||
rb%29u32 == 0u32 || rb%31u32 == 0u32 || rb%41u32 == 0u32 || rb%43u32 == 0u32 || rb%47u32 == 0u32 || rb%53u32 == 0u32 {
return false
}
self.prime_validate_by_miller_rabin(n+1, rng) && self.prime_validate_by_lucas()
}
pub fn sqrt(&self) -> Nat {
if self.is_nan() || self <= &1u32 {
return self.deep_clone();
}
let mut z1 = Nat::from(1u32);
z1 <<= (self.bits_len() + 1) >> 1;
loop {
let mut z2 = self.clone() / z1.clone();
z2 += z1.clone();
z2 >>= 1;
if z2 >= z1 {
return z1;
}
z1 = z2;
}
}
unsafe fn karatsuba_add(z: *mut u32, x: *const u32, n: usize) {
let c = add_vv_inner(z, z, x, n);
if c != 0 {
add_vw_inner(z.add(n), z.add(n), c, n >> 1);
}
}
unsafe fn karatsuba_sub(z: *mut u32, x: *const u32, n: usize) {
let c = sub_vv_inner(z, z, x, n);
if c != 0 {
sub_vw_inner(z.add(n), z.add(n), c, n >> 1);
}
}
unsafe fn basic_mul(z: *mut u32, x: *const u32, y: *const u32, xlen: usize, ylen: usize) {
(0..(xlen + ylen)).for_each(|i| {z.add(i).write(0)});
(0..ylen).for_each(|i| {
let d = y.add(i).read();
if d != 0 {
z.add(xlen + i).write(add_mul_vvw_inner(z.add(i), x, d, xlen));
}
});
}
fn mul_add_ww(z: &mut Vec<u32>, x: &[u32], y: u32, r: u32) {
let m = x.len();
z.clear();
if m == 0 || y == 0 {
z.push(r);
return;
} else {
z.resize(m + 1, 0);
z[m] = mul_add_vww(&mut z[0..m], x, y, r);
Self::trim_head_zero_(z);
}
}
pub fn mul_karatsuba(&self, b: &Nat) -> Nat {
let mut v = Vec::with_capacity(1);
Self::mul_by_karatsuba(&mut v, self.as_slice(), b.as_slice());
Nat::from(v)
}
fn mul_by_karatsuba(z: &mut Vec<u32>, x: &[u32], y: &[u32]) {
let (m, n) = (x.len(), y.len());
z.clear();
if m < n {
Self::mul_by_karatsuba(z, y, x);
return;
} else if m == 0 || n == 0 {
return;
} else if n == 1 {
Self::mul_add_ww(z, x, y[0], 0);
return;
}
if n < KARATSUBA_THRESHOLD {
z.resize(m + n, 0);
unsafe {
Self::basic_mul(z.as_mut_ptr(), x.as_ptr(), y.as_ptr(), x.len(), y.len());
}
Self::trim_head_zero_(z);
return;
}
let k = Self::karatsuba_len(n, KARATSUBA_THRESHOLD);
let (x0, y0) = (&x[0..k], &y[0..k]);
z.resize(std::cmp::max(6*k, m+n), 0);
unsafe {
Self::karatsuba(z.as_mut_ptr(), x0.as_ptr(), y0.as_ptr(), x0.len(), y0.len(), z.len());
}
z.truncate(m+n);
z.iter_mut().skip(k<<1).for_each(|e| {*e = 0});
if k < n || m != n {
let mut t = Vec::with_capacity(k * 3);
let mut zero_count = 0;
for &ele in x0.iter().rev() { if ele != 0 {break;} zero_count += 1};
let x0 = &x0[..(x0.len() - zero_count)];
let y1 = &y[k..];
Self::mul_by_karatsuba(&mut t, x0, y1);
unsafe {
Self::add_at(z.as_mut_ptr(), t.as_ptr(), k, t.len(), z.len());
}
zero_count = 0;
for &ele in y0.iter().rev() { if ele != 0 {break;} zero_count += 1};
let y0 = &y0[..(y0.len() - zero_count)];
for i in (k..x.len()).step_by(k) {
let xi = &x[i..];
let xi = if xi.len() > k {
&xi[..k]
} else {
xi
};
zero_count = 0;
for &ele in xi.iter().rev() { if ele != 0 {break;} zero_count += 1};
let xi = &xi[..(xi.len() - zero_count)];
Self::mul_by_karatsuba(&mut t, xi, y0);
unsafe {
Self::add_at(z.as_mut_ptr(), t.as_ptr(), i, t.len(), z.len());
}
Self::mul_by_karatsuba(&mut t, xi, y1);
unsafe {
Self::add_at(z.as_mut_ptr(), t.as_ptr(), i + k, t.len(), z.len());
}
}
}
Self::trim_head_zero_(z);
}
unsafe fn karatsuba(z: *mut u32, x: *const u32, y: *const u32, xlen: usize, ylen: usize, zlen: usize) {
let n = ylen;
if (n & 1) != 0 || n < KARATSUBA_THRESHOLD || n < 2 {
Self::basic_mul(z, x, y, xlen, ylen);
return;
}
let n2 = n >> 1; let (x1, x0, x0len, x1len) = (x.add(n2), x, n2, xlen - n2); let (y1, y0, y0len, y1len) = (y.add(n2), y, n2, ylen - n2);
Self::karatsuba(z, x0, y0, x0len, y0len, zlen); Self::karatsuba(z.add(n), x1, y1, x1len, y1len, zlen.saturating_sub(n));
let mut s = 1; let (xd, xdlen) = (z.add(n << 1), n2);
let tmp = std::cmp::min(xdlen, x1len);
if sub_vv_inner(xd, x1, x0, tmp) != 0 {
s = -s;
sub_vv_inner(xd, x0, x1, tmp);
}
let (yd, ydlen) = (z.add((n<<1) + n2), n.saturating_sub(n2));
let tmp = std::cmp::min(ydlen, std::cmp::min(y0len, y1len));
if sub_vv_inner(yd, y0, y1, tmp) != 0 {
s = -s;
sub_vv_inner(yd, y1, y0, tmp);
}
let (p, plen) = (z.add((n<<1)+n), zlen - ((n<<1)+n));
Self::karatsuba(p, xd, yd, xdlen, ydlen, plen);
let (r, rlen) = (z.add(n<<2), zlen - (n<<2));
let tmp = std::cmp::min(n<<1, rlen);
for i in 0..tmp {
r.add(i).write(z.add(i).read());
}
Self::karatsuba_add(z.add(n2), r, n);
Self::karatsuba_add(z.add(n2), r.add(n), n);
if s > 0 {
Self::karatsuba_add(z.add(n2), p, n);
} else {
Self::karatsuba_sub(z.add(n2), p, n);
}
}
fn karatsuba_len(mut n: usize, threshold: usize) -> usize {
let mut i = 0;
while n > threshold {
n >>= 1;
i += 1;
}
n << i
}
unsafe fn basic_sqr(z: *mut u32, x: *const u32, xlen: usize, zlen: usize) {
let mut t = Vec::with_capacity(xlen << 1);
t.resize(xlen << 1, 0);
let (z1, z0) = mul_ww(x.read(), x.read());
z.write(z0);
z.add(1).write(z1);
let t_ptr = t.as_mut_ptr();
for i in 1..xlen {
let d = x.add(i).read();
let (z1, z0) = mul_ww(d, d);
let idx = i << 1;
z.add(idx).write(z0);
z.add(idx + 1).write(z1);
let z0 = add_mul_vvw_inner(t.as_mut_ptr().add(i), x, d, i);
t_ptr.add(idx).write(z0);
}
let tmp = (xlen << 1) - 1;
t[tmp] = shl_vu_inner(t.as_mut_ptr().add(1), t.as_mut_ptr().add(1), 1, tmp-1);
add_vv_inner(z, z, t.as_ptr(), std::cmp::min(zlen, xlen << 1));
}
unsafe fn karatsuba_sqr(z: *mut u32, x: *const u32, xlen: usize, zlen: usize) {
let n = xlen;
if (n & 1) != 0 || n < KARATSUBA_THRESHOLD || n < 2 {
Self::basic_sqr(z, x, xlen, zlen);
return;
}
let n2 = n >> 1;
let (x1, x0, x1len, x0len) = (x.add(n2), x, n - n2, n2);
Self::karatsuba_sqr(z, x0, x0len, zlen);
Self::karatsuba_sqr(z.add(n), x1, x1len, zlen - n);
let (xd, xdlen) = (z.add(n<<1), n2);
let tmp = std::cmp::min(xdlen, std::cmp::min(x0len, x1len));
if sub_vv_inner(xd, x1, x0, tmp) != 0 {
sub_vv_inner(xd, x0, x1, tmp);
}
let (p, plen) = (z.add((n<<1) + n), zlen - ((n<<1) + n));
Self::karatsuba_sqr(p, xd, xdlen, plen);
let (r, rlen) = (z.add(n<<2), zlen - (n<<2));
let tmp = std::cmp::min(rlen, n<<1);
(0..tmp).for_each(|i| {
r.add(i).write(z.add(i).read());
});
Self::karatsuba_add(z.add(n2), r, n);
Self::karatsuba_add(z.add(n2), r.add(n), n);
Self::karatsuba_sub(z.add(n2), p, n); }
unsafe fn add_at(z: *mut u32, x: *const u32, i: usize, xlen: usize, zlen: usize) {
if xlen > 0 {
let c = add_vv_inner(z.add(i), z.add(i), x, xlen);
if c != 0 {
let j = i + xlen;
if j < zlen {
add_vw_inner(z.add(j), z.add(j), c, zlen - j);
}
}
}
}
pub fn sqr(&self) -> Nat {
let mut z = Vec::new();
Self::sqr_v(&mut z, self.as_slice());
Nat::from(z)
}
pub(super) fn sqr_v(z: &mut Vec<u32>, x: &[u32]) {
z.clear();
if x.len() == 0 {
return;
} else if x.len() == 1 {
let d = x[0];
let (z1, z0) = mul_ww(d, d);
z.push(z0);
z.push(z1);
Self::trim_head_zero_(z);
return;
}
if x.len() < BASIC_SQRT_HRESHOLD {
z.resize(x.len() << 1, 0);
unsafe {
Self::basic_mul(z.as_mut_ptr(), x.as_ptr(), x.as_ptr(), x.len(), x.len());
}
Self::trim_head_zero_(z);
return;
}
if x.len() < KARATSUBA_SQRT_HRESHOLD {
z.resize(x.len() << 1, 0);
unsafe {
Self::basic_sqr(z.as_mut_ptr(), x.as_ptr(), x.len(), z.len());
}
Self::trim_head_zero_(z);
return;
}
let k = Self::karatsuba_len(x.len(), KARATSUBA_SQRT_HRESHOLD);
let x0 = &x[0..k];
z.resize(std::cmp::max(k*6, x.len() * 2), 0u32);
unsafe {
Self::karatsuba_sqr(z.as_mut_ptr(), x0.as_ptr(), x0.len(), z.len());
}
z.truncate(x.len() << 1);
z.iter_mut().skip(k<<1).for_each(|e| {*e = 0});
if k < x.len() {
let mut t = Vec::with_capacity(k << 1);
let mut zero_count = 0;
for &ele in x0.iter().rev() {if ele != 0 {break;} zero_count += 1;}
let x0 = &x0[..(x0.len() - zero_count)];
let x1 = &x[k..];
Self::mul_by_karatsuba(&mut t, x0, x1);
unsafe {
Self::add_at(z.as_mut_ptr(), t.as_ptr(), k, t.len(), z.len());
Self::add_at(z.as_mut_ptr(), t.as_ptr(), k, t.len(), z.len()); }
Self::sqr_v(&mut t, x1);
unsafe {
Self::add_at(z.as_mut_ptr(), t.as_ptr(), k << 1, t.len(), z.len());
}
}
Self::trim_head_zero_(z);
}
fn montgomery(z: &mut Vec<u32>, x: &Vec<u32>, y: &Vec<u32>, m: &Vec<u32>, k: u32, n: usize) {
if x.len() != n || y.len() != n || m.len() != n {
panic!("mismatched montgomery number lengths");
}
z.clear();
z.resize(n<<1, 0);
let mut c = 0u32;
for i in 0..n {
let d = y[i];
let c2 = add_mul_vvw(&mut z[i..(n+i)], x, d);
let t = z[i].wrapping_mul(k);
let c3 = add_mul_vvw(&mut z[i..(n+i)], m, t);
let cx = c.wrapping_add(c2);
let cy = cx.wrapping_add(c3);
z[n + i] = cy;
c = if cx < c2 || cy < c3 {1} else {0};
}
if c != 0 {
unsafe {
let z0 = z.as_mut_ptr();
let z1 = z0.add(n);
sub_vv_inner(z0, z1, m.as_ptr(), n);
}
} else {
for i in 0..n {
z[i] = z[n+i];
}
}
z.truncate(n);
}
fn exp_montogomery(z: &mut Nat, x: &Nat, y: &Nat, m: &Nat) {
let num_words = m.as_vec().len();
let x = if x.as_vec().len() > num_words {
x.clone() % m.clone()
} else {
x.deep_clone()
};
if x.as_vec().len() < num_words {
x.as_mut_vec().resize(num_words, 0);
}
let (mut i, mut k0, mut t) = (1, 2u32.wrapping_sub(m.as_vec()[0]), m.as_vec()[0].wrapping_sub(1));
while i < 32 {
t = t.wrapping_mul(t);
k0 = k0.wrapping_mul(t.wrapping_add(1));
i <<= 1;
}
k0 = k0.wrapping_neg();
let mut rr = Nat::from(1u32);
rr <<= num_words << 6;
rr %= m.clone();
if rr.as_vec().len() < num_words {
rr.as_mut_vec().resize(num_words, 0);
}
let one = Nat::from(1u32);
one.as_mut_vec().resize(num_words, 0);
let n:usize = 4;
let powers = [
Nat::from(0u32), Nat::from(0u32),Nat::from(0u32),Nat::from(0u32),
Nat::from(0u32), Nat::from(0u32),Nat::from(0u32),Nat::from(0u32),
Nat::from(0u32), Nat::from(0u32),Nat::from(0u32),Nat::from(0u32),
Nat::from(0u32), Nat::from(0u32),Nat::from(0u32),Nat::from(0u32),
];
Self::montgomery(powers[0].as_mut_vec(), one.as_vec(), rr.as_vec(), m.as_vec(), k0, num_words);
Self::montgomery(powers[1].as_mut_vec(), x.as_vec(), rr.as_vec(), m.as_vec(), k0, num_words);
(2..(1 << n)).for_each(|i| {
Self::montgomery(powers[i].as_mut_vec(), powers[i-1].as_vec(), powers[1].as_vec(), m.as_vec(), k0, num_words);
});
let zz = &mut rr;
z.clear();
z.as_mut_vec().extend(powers[0].as_vec().iter());
zz.clear();
zz.as_mut_vec().resize(num_words, 0);
let (mut tmpz, mut tmpzz, mut is_switch) = (z, zz, false);
for (i, &ye) in y.iter().enumerate().rev() {
let mut yi = ye;
for j in (0..32).step_by(n) {
if i != (y.as_vec().len() - 1) || j != 0 {
Self::montgomery(tmpzz.as_mut_vec(), tmpz.as_vec(), tmpz.as_vec(), m.as_vec(), k0, num_words);
Self::montgomery(tmpz.as_mut_vec(), tmpzz.as_vec(), tmpzz.as_vec(), m.as_vec(), k0, num_words);
Self::montgomery(tmpzz.as_mut_vec(), tmpz.as_vec(), tmpz.as_vec(), m.as_vec(), k0, num_words);
Self::montgomery(tmpz.as_mut_vec(), tmpzz.as_vec(), tmpzz.as_vec(), m.as_vec(), k0, num_words);
}
Self::montgomery(tmpzz.as_mut_vec(), tmpz.as_vec(), powers[((yi as usize) >> (32 - n))].as_vec(), m.as_vec(), k0, num_words);
let tmp = tmpz;
tmpz = tmpzz;
tmpzz = tmp;
is_switch = !is_switch;
yi <<= n;
}
}
Self::montgomery(tmpzz.as_mut_vec(), tmpz.as_vec(), one.as_vec(), m.as_vec(), k0, num_words);
if *tmpzz >= *m {
*tmpzz -= m.clone();
if *tmpzz >= *m {
*tmpzz %= m.clone();
}
}
tmpzz.trim_head_zero();
if !is_switch {
tmpz.clear();
tmpz.as_mut_vec().extend(tmpzz.iter());
}
}
fn exp_windowed(o: &mut Nat, x: &Nat, y: &Nat, m: &Nat) {
let n = 4;
let powers = [
Nat::from(1u32), x.deep_clone(), Nat::from(0u32),Nat::from(0u32),
Nat::from(0u32), Nat::from(0u32),Nat::from(0u32),Nat::from(0u32),
Nat::from(0u32), Nat::from(0u32),Nat::from(0u32),Nat::from(0u32),
Nat::from(0u32), Nat::from(0u32),Nat::from(0u32),Nat::from(0u32),
];
for i in (2..(1<<4)).step_by(2) {
let (p2, p, p1) = (&powers[i>>1], &powers[i], &powers[i+1]);
Self::sqr_v(p.as_mut_vec(), p2.as_slice());
let r = p.clone() % m.clone();
p.as_mut_vec().clear();
p.as_mut_vec().extend(r.iter());
Self::mul_by_karatsuba(p1.as_mut_vec(), p.as_slice(), x.as_slice());
let r = p1.clone() % m.clone();
p1.as_mut_vec().clear();
p1.as_mut_vec().extend(r.iter());
}
let mut z = Nat::from(1u32);
let mut buf = Nat::with_capacity(32);
let (mut z, mut zz) = (&mut z, &mut buf);
let ylen = y.as_vec().len();
for (i, &ele) in y.iter().enumerate().rev() {
let mut yi = ele;
for j in (0..32).step_by(n) {
if i != (ylen - 1) || j != 0 {
Self::sqr_v(zz.as_mut_vec(), z.as_slice());
let tmp = z; z = zz; zz = tmp;
*z %= m.clone();
Self::sqr_v(zz.as_mut_vec(), z.as_slice());
let tmp = z; z = zz; zz = tmp;
*z %= m.clone();
Self::sqr_v(zz.as_mut_vec(), z.as_slice());
let tmp = z; z = zz; zz = tmp;
*z %= m.clone();
Self::sqr_v(zz.as_mut_vec(), z.as_slice());
let tmp = z; z = zz; zz = tmp;
*z %= m.clone();
}
Self::mul_by_karatsuba(zz.as_mut_vec(), z.as_slice(), powers[(yi as usize) >> (32 - n)].as_slice());
let tmp = z; z = zz; zz = tmp;
*z %= m.clone();
yi <<= n;
}
}
z.trim_head_zero();
o.clear();
o.as_mut_vec().extend(z.iter());
}
pub fn exp(&self, b: &Nat, n: &Nat) -> Nat {
if self.is_nan() || b.is_nan() || n.is_nan() { return Nat::nan(); }
if n.as_vec().len() == 1 && n.as_vec()[0] == 1 {
return Nat::from(0u32);
}
if b.as_vec().len() == 1 {
if b.as_vec()[0] == 0 {
return Nat::from(1u32);
} else if b.as_vec()[0] == 1 {
if n.as_vec().len() == 1 && n.as_vec()[0] == 0 {
return self.deep_clone();
} else {
return self.clone() % n.clone();
}
}
}
let mut z = Nat::with_capacity(n.as_vec().len());
z.as_mut_vec().extend(self.iter());
if (self.as_vec().len() > 1 || self.as_vec()[0] > 1) && b.as_vec().len() > 1 && (n.as_vec().len() > 1 || n.as_vec()[0] > 0) {
if (n.as_vec()[0] & 1) == 1 {
Self::exp_montogomery(&mut z, self, b, n);
} else {
Self::exp_windowed(&mut z, self, b, n);
}
return z;
}
let mut v = *b.as_vec().last().unwrap();
let shift = v.leading_zeros() + 1;
v <<= shift;
let mask = 1u32 << (32 - 1);
let w = 32 - shift;
let mut zz = Nat::with_capacity(32);
let (mut z, mut zz) = (&mut z, &mut zz);
for _ in 0..w {
Self::sqr_v(zz.as_mut_vec(), z.as_slice());
let tmp = z; z = zz; zz = tmp;
if (v & mask) != 0 {
Self::mul_by_karatsuba(zz.as_mut_vec(), z.as_slice(), self.as_slice());
let tmp = z; z = zz; zz = tmp;
}
if !(n.as_vec().len() == 1 && n.as_vec()[0] == 0) {
let r = z.clone() % n.clone();
z.as_mut_vec().clear();
z.as_mut_vec().extend(r.iter());
}
v <<= 1;
}
for &be in b.iter().rev().skip(1) {
let mut v = be;
for _ in 0..32 {
Self::sqr_v(zz.as_mut_vec(), z.as_slice());
let tmp = z; z = zz; zz = tmp;
if (v & mask) != 0 {
Self::mul_by_karatsuba(zz.as_mut_vec(), z.as_slice(), self.as_slice());
let tmp = z; z = zz; zz = tmp;
}
if !(n.as_vec().len() == 1 && n.as_vec()[0] == 0) {
let r = z.clone() % n.clone();
z.as_mut_vec().clear();
z.as_mut_vec().extend(r.iter());
}
v <<= 1;
}
}
z.trim_head_zero();
z.deep_clone()
}
pub(super) fn mul_range(a: u64, b: u64) -> Nat {
if a == 0 {
Nat::from(0u32)
} else if a > b {
Nat::from(1u32)
} else if a == b {
Nat::from(a)
} else {
let mut res = Nat::from(b);
let mut tmp_high = Nat::with_capacity(2);
let tmp_low = Nat::with_capacity(2);
for e in a..b {
let (high, lower) = ((e >> 32) as u32, (e & (u32::MAX) as u64) as u32);
Self::mul_add_ww(tmp_high.as_mut_vec(), res.as_slice(), high, 0);
Self::mul_add_ww(tmp_low.as_mut_vec(), res.as_slice(), lower, 0);
tmp_high.shl_inner(&32);
let len = std::cmp::max(tmp_high.as_vec().len(), tmp_low.as_vec().len());
tmp_high.as_mut_vec().resize(len, 0);
tmp_low.as_mut_vec().resize(len, 0);
res.as_mut_vec().resize(len, 0);
let c = add_vv(res.as_mut_slice(), tmp_high.as_slice(), tmp_low.as_slice());
res.as_mut_vec().push(c);
res.trim_head_zero();
}
res
}
}
pub(super) fn set_bits(&mut self, i: usize, is_set: bool) {
let (j, m) = (i / 32, 1 << (i % 32));
let n = self.as_vec().len();
if is_set {
if j >= n {
self.as_mut_vec().resize(j + 1, 0);
}
self.as_mut_vec()[j] |= m;
} else {
if j < n {
self.as_mut_vec()[j] &= !m;
}
}
}
pub(super) fn sticky(&self, bits_num: usize) -> usize {
if self.is_nan() {return 0;}
let j = bits_num >> 5;
if j >= self.as_vec().len() {
if self == &0u32 {
0
} else {
1
}
} else {
for &x in self.iter().take(j) {
if x != 0 {
return 1;
}
}
let rom = bits_num & 31;
if rom > 0 && (self.as_vec()[j] << (32 - rom)) != 0 {
1
} else {
0
}
}
}
fn rem_inner_u64(&self, rhs: u64) -> u64 {
let mut itr = self.iter().rev();
let (mut r, rhs) = (0, rhs as u128);
while let Some(&a) = itr.next() {
r = (r << 32) | (a as u128);
if let Some(&b) = itr.next() {
r = (r << 32) | (b as u128);
}
r = r % rhs
}
r as u64
}
pub fn generate_prime<Rng: IterSource<u32>>(bits_len: usize, test_round_num: usize, rng: &mut Rng) -> Result<Nat, NatError> {
if bits_len < 2 {
return Err(NatError::new(ParseErrKind::InvalidParameters, "prime size must at least 2-bits"));
}
const SMALL_PRIMES: [u8; 15] = [
3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53,
];
const SMALL_PRIMES_PRODUCT: u64 = 16294579238595022365u64;
let b = if (bits_len & 31) == 0 {32} else {bits_len & 31};
let len = (bits_len + 31) >> 5;
let mut p = Nat::with_capacity(len);
loop {
p.as_mut_vec().clear();
for x in rng.iter_mut().take(len) {
p.as_mut_vec().push(x);
}
if b != 32 {
*p.iter_mut().last().unwrap() &= (1u32 << b) - 1;
}
if b >= 2 {
*p.iter_mut().last().unwrap() |= 3 << (b - 2);
} else {
for (i, x) in p.iter_mut().rev().enumerate() {
if i == 0 {
*x |= 1;
} else if i == 1 {
*x |= 0x80000000;
break;
}
}
}
*p.iter_mut().next().unwrap() |= 0x1;
let modulus = p.rem_inner_u64(SMALL_PRIMES_PRODUCT);
'next_delta: for delta in (0u64..(1u64 << 20)).step_by(2) {
let m = modulus + delta;
for &prime in SMALL_PRIMES.iter() {
let prime = prime as u64;
if (m % prime) == 0 && (bits_len > 6 || m != prime) {
continue 'next_delta;
}
}
if delta > 0 {
p += (delta & 0xffff_ffff) as u32;
}
break;
}
if p.bits_len() == bits_len && p.probably_prime_test(test_round_num, rng) {
return Ok(p);
}
}
}
}
impl Default for Nat {
fn default() -> Self {
Nat {nat: Rc::new(Cell::new(Vec::new()))}
}
}
nat_from_basic!(u8, u16, u32, u64, u128, usize);
impl From<Vec<u32>> for Nat {
fn from(x: Vec<u32>) -> Self {
let mut x = x;
Self::trim_head_zero_(&mut x);
Nat {nat: Rc::new(Cell::new(x))}
}
}
impl From<&Vec<u32>> for Nat {
fn from(x: &Vec<u32>) -> Self {
Self::from(x.clone())
}
}
impl From<&[u32]> for Nat {
fn from(x: &[u32]) -> Self {
Self::from(x.to_vec())
}
}
impl From<&[u8]> for Nat {
fn from(x: &[u8]) -> Self {
const MASK: usize = 3;
let mut v = Vec::with_capacity((x.len() + MASK) >> 2);
let mut tmp = 0;
x.iter().enumerate().for_each(|(i, &ele)| {
tmp += (ele as u32) << ((i & MASK) << 3);
if (i & MASK) == 3 {
v.push(tmp);
tmp = 0;
}
});
if tmp > 0 {v.push(tmp);}
else { if !x.is_empty() {v.push(tmp);}}
Self::trim_head_zero_(&mut v);
Nat {nat: Rc::new(Cell::new(v))}
}
}
impl From<Vec<u8>> for Nat {
fn from(x: Vec<u8>) -> Self {
Nat::from(x.as_slice())
}
}
impl From<&Vec<u8>> for Nat {
fn from(x: &Vec<u8>) -> Self {
Nat::from(x.as_slice())
}
}
impl FromStr for Nat {
type Err = NatError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let s = s.trim();
let (i, base) = Self::check_base(s)?;
let s = &s[i..];
match base {
2 => Self::from_bin_str(s),
8 => Self::from_oct_str(s),
10 => Self::from_dec_str(s),
16 => Self::from_hex_str(s),
_ => unreachable!(),
}
}
}
nat_fmt!((Binary, "{:032b}", "0b"), (LowerHex, "{:08x}", "0x"), (Debug, "{:08x}", "0x"), (UpperHex, "{:08X}", "0x"));
impl Octal for Nat {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
if self.is_nan() {
return write!(f, "{}", "NaN");
}
let mut nat_str = String::with_capacity(self.num() * 11);
let mut pre = 0;
for (i, &ele) in self.iter().enumerate() {
let tmp = match i % 3 {
0 => {
(ele, (ele >> 30) as u8)
},
1 => {
let val = ((((ele & 0x1) << 2) as u8) | pre) + b'0';
nat_str.push(val as char);
(ele >> 1, (ele >> 31) as u8)
},
_ => {
let val = ((((ele & 0x3) << 1) as u8) | pre) + b'0';
nat_str.push(val as char);
(ele >> 2, 0)
},
};
pre = tmp.1;
for idx in 0..10u32 {
let val = (((tmp.0 >> (idx * 3)) & 0x7u32) as u8) + b'0';
nat_str.push(val as char);
}
}
if pre > 0 { nat_str.push((pre + b'0') as char);}
let nat_str: String = nat_str.chars().rev().collect();
let (nat, prefix) = (nat_str.trim_start_matches('0'), if f.alternate() {"0o"} else {""});
if nat.is_empty() && self.num() > 0 {
write!(f, "{}{}", prefix, 0)
} else {
write!(f, "{}{}", prefix, nat)
}
}
}
impl Display for Nat {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
if self.is_nan() {
return write!(f, "{}", "NaN");
}
let (zero, ten) = (Nat::from(0u8), Nat::from(10u8));
let mut nat_str = Vec::with_capacity(self.bits_len() >> 3);
let mut nat = self.deep_clone();
while nat != zero {
let rem = nat.clone() % ten.clone();
nat /= ten.clone();
let val = rem.as_vec().first().unwrap();
nat_str.push(format!("{}", val));
}
if nat_str.is_empty() {
write!(f, "{}", 0)
} else {
nat_str.reverse();
write!(f, "{}", nat_str.join(""))
}
}
}
impl PartialEq for Nat {
fn eq(&self, other: &Self) -> bool {
if self.is_nan() {
false
} else {
self.as_vec() == other.as_vec()
}
}
}
nat_eq_basic!(u8, u16, u32, u64, u128, usize);
impl PartialOrd for Nat {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
if self.is_nan() || other.is_nan() {
return None;
}
Some(self.partial_cmp_inner(other))
}
}
nat_ord_basic!(u8, u16, u32, u64, u128, usize);
nat_arith_ops1!(
(Nat, Mul, MulAssign, mul, mul_assign, mul_inner, |rhs: &Nat| {rhs.is_nan()}),
(u32, Mul, MulAssign, mul, mul_assign, mul_inner_basic, |_rhs: &u32| {false}),
(u32, Rem, RemAssign, rem, rem_assign, rem_inner_basic, |_rhs: &u32| {false}),
(u32, Div, DivAssign, div, div_assign, div_inner_basic, |_rhs: &u32| {false}),
(Nat, BitXor, BitXorAssign, bitxor, bitxor_assign, bitxor_inner, |rhs: &Nat| {rhs.is_nan()}),
(Nat, BitOr, BitOrAssign, bitor, bitor_assign, bitor_inner, |rhs: &Nat| {rhs.is_nan()}),
(Nat, BitAnd, BitAndAssign, bitand, bitand_assign, bitand_inner, |rhs: &Nat| {rhs.is_nan()}),
(Nat, Rem, RemAssign, rem, rem_assign, rem_inner, |rhs: &Nat| {rhs.is_nan()}),
(Nat, Div, DivAssign, div, div_assign, div_inner, |rhs: &Nat| {rhs.is_nan()}),
(usize, Shr, ShrAssign, shr, shr_assign, shr_inner, |_rhs: &usize| {false}),
(usize, Shl, ShlAssign, shl, shl_assign, shl_inner, |_rhs: &usize| {false}),
(Nat, Sub, SubAssign, sub, sub_assign, sub_inner, |rhs: &Nat| {rhs.is_nan()}),
(u32, Sub, SubAssign, sub, sub_assign, sub_inner_basic, |_rhs: &u32| {false}),
(u32, Add, AddAssign, add, add_assign, add_inner_basic, |_rhs: &u32| {false}),
(Nat, Add, AddAssign, add, add_assign, add_inner, |rhs: &Nat| {rhs.is_nan()})
);
impl Not for Nat {
type Output = Nat;
fn not(self) -> Self::Output {
if self.is_nan() {
Nat::default()
} else {
Nat::from(self.not_inner())
}
}
}
impl AsRef<[u32]> for Nat {
fn as_ref(&self) -> &[u32] {
self.as_slice()
}
}