#![doc(hidden)]
#[cfg(feature = "alloc")]
use crate::heapvec::HeapVec;
use crate::num::Float;
#[cfg(not(feature = "alloc"))]
use crate::stackvec::StackVec;
#[cfg(not(feature = "compact"))]
use crate::table::{LARGE_POW5, LARGE_POW5_STEP};
use core::{cmp, ops, ptr};
pub const BIGINT_BITS: usize = 4000;
pub const BIGINT_LIMBS: usize = BIGINT_BITS / LIMB_BITS;
#[cfg(feature = "alloc")]
pub type VecType = HeapVec;
#[cfg(not(feature = "alloc"))]
pub type VecType = StackVec;
#[derive(Clone, PartialEq, Eq)]
pub struct Bigint {
pub data: VecType,
}
#[allow(clippy::new_without_default)]
impl Bigint {
#[inline(always)]
pub fn new() -> Self {
Self {
data: VecType::new(),
}
}
#[inline(always)]
pub fn from_u64(value: u64) -> Self {
Self {
data: VecType::from_u64(value),
}
}
#[inline(always)]
pub fn hi64(&self) -> (u64, bool) {
self.data.hi64()
}
#[inline]
pub fn pow(&mut self, base: u32, exp: u32) -> Option<()> {
debug_assert!(base == 2 || base == 5 || base == 10);
if base % 5 == 0 {
pow(&mut self.data, exp)?;
}
if base % 2 == 0 {
shl(&mut self.data, exp as usize)?;
}
Some(())
}
#[inline]
pub fn bit_length(&self) -> u32 {
bit_length(&self.data)
}
}
impl ops::MulAssign<&Bigint> for Bigint {
fn mul_assign(&mut self, rhs: &Bigint) {
self.data *= &rhs.data;
}
}
pub struct ReverseView<'a, T: 'a> {
inner: &'a [T],
}
impl<'a, T> ops::Index<usize> for ReverseView<'a, T> {
type Output = T;
#[inline]
fn index(&self, index: usize) -> &T {
let len = self.inner.len();
&(*self.inner)[len - index - 1]
}
}
#[inline]
pub fn rview(x: &[Limb]) -> ReverseView<Limb> {
ReverseView {
inner: x,
}
}
#[inline]
pub fn compare(x: &[Limb], y: &[Limb]) -> cmp::Ordering {
match x.len().cmp(&y.len()) {
cmp::Ordering::Equal => {
let iter = x.iter().rev().zip(y.iter().rev());
for (&xi, yi) in iter {
match xi.cmp(yi) {
cmp::Ordering::Equal => (),
ord => return ord,
}
}
cmp::Ordering::Equal
},
ord => ord,
}
}
#[inline]
pub fn normalize(x: &mut VecType) {
while let Some(&value) = x.get(x.len().wrapping_sub(1)) {
if value == 0 {
unsafe { x.set_len(x.len() - 1) };
} else {
break;
}
}
}
#[inline]
#[allow(clippy::match_like_matches_macro)]
pub fn is_normalized(x: &[Limb]) -> bool {
match x.get(x.len().wrapping_sub(1)) {
Some(&0) => false,
_ => true,
}
}
#[inline(always)]
#[allow(clippy::branches_sharing_code)]
pub fn from_u64(x: u64) -> VecType {
let mut vec = VecType::new();
debug_assert!(vec.capacity() >= 2);
if LIMB_BITS == 32 {
vec.try_push(x as Limb).unwrap();
vec.try_push((x >> 32) as Limb).unwrap();
} else {
vec.try_push(x as Limb).unwrap();
}
vec.normalize();
vec
}
#[inline]
pub fn nonzero(x: &[Limb], rindex: usize) -> bool {
debug_assert!(rindex <= x.len());
let len = x.len();
let slc = &x[..len - rindex];
slc.iter().rev().any(|&x| x != 0)
}
#[inline]
pub fn u32_to_hi64_1(r0: u32) -> (u64, bool) {
u64_to_hi64_1(r0 as u64)
}
#[inline]
pub fn u32_to_hi64_2(r0: u32, r1: u32) -> (u64, bool) {
let r0 = (r0 as u64) << 32;
let r1 = r1 as u64;
u64_to_hi64_1(r0 | r1)
}
#[inline]
pub fn u32_to_hi64_3(r0: u32, r1: u32, r2: u32) -> (u64, bool) {
let r0 = r0 as u64;
let r1 = (r1 as u64) << 32;
let r2 = r2 as u64;
u64_to_hi64_2(r0, r1 | r2)
}
#[inline]
pub fn u64_to_hi64_1(r0: u64) -> (u64, bool) {
let ls = r0.leading_zeros();
(r0 << ls, false)
}
#[inline]
pub fn u64_to_hi64_2(r0: u64, r1: u64) -> (u64, bool) {
let ls = r0.leading_zeros();
let rs = 64 - ls;
let v = match ls {
0 => r0,
_ => (r0 << ls) | (r1 >> rs),
};
let n = r1 << ls != 0;
(v, n)
}
macro_rules! hi {
(@1 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{
$fn($rview[0] as $t)
}};
(@2 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{
let r0 = $rview[0] as $t;
let r1 = $rview[1] as $t;
$fn(r0, r1)
}};
(@nonzero2 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{
let (v, n) = hi!(@2 $self, $rview, $t, $fn);
(v, n || nonzero($self, 2 ))
}};
(@3 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{
let r0 = $rview[0] as $t;
let r1 = $rview[1] as $t;
let r2 = $rview[2] as $t;
$fn(r0, r1, r2)
}};
(@nonzero3 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{
let (v, n) = hi!(@3 $self, $rview, $t, $fn);
(v, n || nonzero($self, 3))
}};
}
#[inline(always)]
pub fn hi64(x: &[Limb]) -> (u64, bool) {
let rslc = rview(x);
match x.len() {
0 => (0, false),
1 if LIMB_BITS == 32 => hi!(@1 x, rslc, u32, u32_to_hi64_1),
1 => hi!(@1 x, rslc, u64, u64_to_hi64_1),
2 if LIMB_BITS == 32 => hi!(@2 x, rslc, u32, u32_to_hi64_2),
2 => hi!(@2 x, rslc, u64, u64_to_hi64_2),
_ if LIMB_BITS == 32 => hi!(@nonzero3 x, rslc, u32, u32_to_hi64_3),
_ => hi!(@nonzero2 x, rslc, u64, u64_to_hi64_2),
}
}
pub fn pow(x: &mut VecType, mut exp: u32) -> Option<()> {
#[cfg(not(feature = "compact"))]
{
while exp >= LARGE_POW5_STEP {
large_mul(x, &LARGE_POW5)?;
exp -= LARGE_POW5_STEP;
}
}
let small_step = if LIMB_BITS == 32 {
13
} else {
27
};
let max_native = (5 as Limb).pow(small_step);
while exp >= small_step {
small_mul(x, max_native)?;
exp -= small_step;
}
if exp != 0 {
let small_power = unsafe { f64::int_pow_fast_path(exp as usize, 5) };
small_mul(x, small_power as Limb)?;
}
Some(())
}
#[inline(always)]
pub fn scalar_add(x: Limb, y: Limb) -> (Limb, bool) {
x.overflowing_add(y)
}
#[inline(always)]
pub fn scalar_mul(x: Limb, y: Limb, carry: Limb) -> (Limb, Limb) {
let z: Wide = (x as Wide) * (y as Wide) + (carry as Wide);
(z as Limb, (z >> LIMB_BITS) as Limb)
}
#[inline]
pub fn small_add_from(x: &mut VecType, y: Limb, start: usize) -> Option<()> {
let mut index = start;
let mut carry = y;
while carry != 0 && index < x.len() {
let result = scalar_add(x[index], carry);
x[index] = result.0;
carry = result.1 as Limb;
index += 1;
}
if carry != 0 {
x.try_push(carry)?;
}
Some(())
}
#[inline(always)]
pub fn small_add(x: &mut VecType, y: Limb) -> Option<()> {
small_add_from(x, y, 0)
}
#[inline]
pub fn small_mul(x: &mut VecType, y: Limb) -> Option<()> {
let mut carry = 0;
for xi in x.iter_mut() {
let result = scalar_mul(*xi, y, carry);
*xi = result.0;
carry = result.1;
}
if carry != 0 {
x.try_push(carry)?;
}
Some(())
}
pub fn large_add_from(x: &mut VecType, y: &[Limb], start: usize) -> Option<()> {
if y.len() > x.len().saturating_sub(start) {
x.try_resize(y.len() + start, 0)?;
}
let mut carry = false;
for (index, &yi) in y.iter().enumerate() {
let xi = x.get_mut(start + index).unwrap();
let result = scalar_add(*xi, yi);
*xi = result.0;
let mut tmp = result.1;
if carry {
let result = scalar_add(*xi, 1);
*xi = result.0;
tmp |= result.1;
}
carry = tmp;
}
if carry {
small_add_from(x, 1, y.len() + start)?;
}
Some(())
}
#[inline(always)]
pub fn large_add(x: &mut VecType, y: &[Limb]) -> Option<()> {
large_add_from(x, y, 0)
}
pub fn long_mul(x: &[Limb], y: &[Limb]) -> Option<VecType> {
let mut z = VecType::try_from(x)?;
if !y.is_empty() {
let y0 = y[0];
small_mul(&mut z, y0)?;
for (index, &yi) in y.iter().enumerate().skip(1) {
if yi != 0 {
let mut zi = VecType::try_from(x)?;
small_mul(&mut zi, yi)?;
large_add_from(&mut z, &zi, index)?;
}
}
}
z.normalize();
Some(z)
}
#[inline(always)]
pub fn large_mul(x: &mut VecType, y: &[Limb]) -> Option<()> {
if y.len() == 1 {
small_mul(x, y[0])?;
} else {
*x = long_mul(y, x)?;
}
Some(())
}
#[inline]
pub fn shl_bits(x: &mut VecType, n: usize) -> Option<()> {
debug_assert!(n != 0);
debug_assert!(n < LIMB_BITS);
let rshift = LIMB_BITS - n;
let lshift = n;
let mut prev: Limb = 0;
for xi in x.iter_mut() {
let tmp = *xi;
*xi <<= lshift;
*xi |= prev >> rshift;
prev = tmp;
}
let carry = prev >> rshift;
if carry != 0 {
x.try_push(carry)?;
}
Some(())
}
#[inline]
pub fn shl_limbs(x: &mut VecType, n: usize) -> Option<()> {
debug_assert!(n != 0);
if n + x.len() > x.capacity() {
None
} else if !x.is_empty() {
let len = n + x.len();
unsafe {
let src = x.as_ptr();
let dst = x.as_mut_ptr().add(n);
ptr::copy(src, dst, x.len());
ptr::write_bytes(x.as_mut_ptr(), 0, n);
x.set_len(len);
}
Some(())
} else {
Some(())
}
}
#[inline]
pub fn shl(x: &mut VecType, n: usize) -> Option<()> {
let rem = n % LIMB_BITS;
let div = n / LIMB_BITS;
if rem != 0 {
shl_bits(x, rem)?;
}
if div != 0 {
shl_limbs(x, div)?;
}
Some(())
}
#[inline]
pub fn leading_zeros(x: &[Limb]) -> u32 {
let length = x.len();
if let Some(&value) = x.get(length.wrapping_sub(1)) {
value.leading_zeros()
} else {
0
}
}
#[inline]
pub fn bit_length(x: &[Limb]) -> u32 {
let nlz = leading_zeros(x);
LIMB_BITS as u32 * x.len() as u32 - nlz
}
#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))]
pub type Limb = u64;
#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))]
pub type Wide = u128;
#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))]
pub const LIMB_BITS: usize = 64;
#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))]
pub type Limb = u32;
#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))]
pub type Wide = u64;
#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))]
pub const LIMB_BITS: usize = 32;