#![doc(hidden)]
#[cfg(feature = "radix")]
use crate::float::ExtendedFloat80;
use crate::float::RawFloat;
use crate::limits::{u32_power_limit, u64_power_limit};
#[cfg(not(feature = "compact"))]
use crate::table::get_large_int_power;
use core::{cmp, mem, ops, ptr, slice};
#[cfg(feature = "radix")]
const BIGINT_BITS: usize = 6000;
#[cfg(not(feature = "radix"))]
const BIGINT_BITS: usize = 4000;
const BIGINT_LIMBS: usize = BIGINT_BITS / LIMB_BITS;
#[derive(Clone, PartialEq, Eq)]
pub struct Bigint {
pub data: StackVec<BIGINT_LIMBS>,
}
impl Bigint {
#[inline(always)]
pub const fn new() -> Self {
Self {
data: StackVec::new(),
}
}
#[inline(always)]
pub fn from_u32(value: u32) -> Self {
Self {
data: StackVec::from_u32(value),
}
}
#[inline(always)]
pub fn from_u64(value: u64) -> Self {
Self {
data: StackVec::from_u64(value),
}
}
#[inline(always)]
pub fn hi64(&self) -> (u64, bool) {
self.data.hi64()
}
#[inline]
pub fn pow(&mut self, base: u32, exp: u32) -> Option<()> {
let (odd, shift) = split_radix(base);
if odd != 0 {
pow::<BIGINT_LIMBS>(&mut self.data, odd, exp)?;
}
if shift != 0 {
shl(&mut self.data, (exp * shift) as usize)?;
}
Some(())
}
#[inline]
pub fn bit_length(&self) -> u32 {
bit_length(&self.data)
}
}
impl ops::MulAssign<&Bigint> for Bigint {
fn mul_assign(&mut self, rhs: &Bigint) {
self.data *= &rhs.data;
}
}
#[cfg(feature = "radix")]
const BIGFLOAT_BITS: usize = 1200;
#[cfg(feature = "radix")]
const BIGFLOAT_LIMBS: usize = BIGFLOAT_BITS / LIMB_BITS;
#[cfg(feature = "radix")]
#[derive(Clone, PartialEq, Eq)]
pub struct Bigfloat {
pub data: StackVec<BIGFLOAT_LIMBS>,
pub exp: i32,
}
#[cfg(feature = "radix")]
impl Bigfloat {
#[inline(always)]
pub const fn new() -> Self {
Self {
data: StackVec::new(),
exp: 0,
}
}
#[inline(always)]
pub fn from_float(fp: ExtendedFloat80) -> Self {
Self {
data: StackVec::from_u64(fp.mant),
exp: fp.exp,
}
}
#[inline(always)]
pub fn from_u32(value: u32) -> Self {
Self {
data: StackVec::from_u32(value),
exp: 0,
}
}
#[inline(always)]
pub fn from_u64(value: u64) -> Self {
Self {
data: StackVec::from_u64(value),
exp: 0,
}
}
#[inline]
pub fn pow(&mut self, base: u32, exp: u32) -> Option<()> {
let (odd, shift) = split_radix(base);
if odd != 0 {
pow::<BIGFLOAT_LIMBS>(&mut self.data, odd, exp)?;
}
if shift != 0 {
self.exp += (exp * shift) as i32;
}
Some(())
}
#[inline]
pub fn shl_bits(&mut self, n: usize) -> Option<()> {
shl_bits(&mut self.data, n)
}
#[inline]
pub fn shl_limbs(&mut self, n: usize) -> Option<()> {
shl_limbs(&mut self.data, n)
}
#[inline]
pub fn shl(&mut self, n: usize) -> Option<()> {
shl(&mut self.data, n)
}
#[inline]
pub fn leading_zeros(&self) -> u32 {
leading_zeros(&self.data)
}
}
#[cfg(feature = "radix")]
impl ops::MulAssign<&Bigfloat> for Bigfloat {
#[inline]
#[allow(clippy::suspicious_op_assign_impl)]
fn mul_assign(&mut self, rhs: &Bigfloat) {
large_mul(&mut self.data, &rhs.data).unwrap();
self.exp += rhs.exp;
}
}
#[derive(Clone)]
pub struct StackVec<const SIZE: usize> {
data: [mem::MaybeUninit<Limb>; SIZE],
length: u16,
}
macro_rules! hi {
(@1 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{
$fn(unsafe { index_unchecked!($rview[0]) as $t })
}};
(@2 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{
let r0 = unsafe { index_unchecked!($rview[0]) as $t };
let r1 = unsafe { index_unchecked!($rview[1]) as $t };
$fn(r0, r1)
}};
(@nonzero2 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{
let (v, n) = hi!(@2 $self, $rview, $t, $fn);
(v, n || unsafe { nonzero($self, 2 ) })
}};
(@3 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{
let r0 = unsafe { index_unchecked!($rview[0]) as $t };
let r1 = unsafe { index_unchecked!($rview[1]) as $t };
let r2 = unsafe { index_unchecked!($rview[2]) as $t };
$fn(r0, r1, r2)
}};
(@nonzero3 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{
let (v, n) = hi!(@3 $self, $rview, $t, $fn);
(v, n || unsafe { nonzero($self, 3 ) })
}};
}
impl<const SIZE: usize> StackVec<SIZE> {
#[inline]
pub const fn new() -> Self {
Self {
length: 0,
data: [mem::MaybeUninit::uninit(); SIZE],
}
}
#[inline]
pub fn as_mut_ptr(&mut self) -> *mut Limb {
self.data.as_mut_ptr().cast::<Limb>()
}
#[inline]
pub fn as_ptr(&self) -> *const Limb {
self.data.as_ptr().cast::<Limb>()
}
#[inline]
pub fn try_from(x: &[Limb]) -> Option<Self> {
let mut vec = Self::new();
vec.try_extend(x)?;
Some(vec)
}
#[inline]
pub unsafe fn set_len(&mut self, len: usize) {
debug_assert!(len <= u16::MAX as usize);
debug_assert!(len <= SIZE);
self.length = len as u16;
}
#[inline]
pub const fn len(&self) -> usize {
self.length as usize
}
#[inline]
pub const fn is_empty(&self) -> bool {
self.len() == 0
}
#[inline]
pub const fn capacity(&self) -> usize {
SIZE as usize
}
#[inline]
pub unsafe fn push_unchecked(&mut self, value: Limb) {
debug_assert!(self.len() < self.capacity());
unsafe {
let len = self.len();
let ptr = self.as_mut_ptr().add(len);
ptr.write(value);
self.length += 1;
}
}
#[inline]
pub fn try_push(&mut self, value: Limb) -> Option<()> {
if self.len() < self.capacity() {
unsafe { self.push_unchecked(value) };
Some(())
} else {
None
}
}
#[inline]
pub unsafe fn pop_unchecked(&mut self) -> Limb {
debug_assert!(!self.is_empty());
self.length -= 1;
unsafe { ptr::read(self.as_mut_ptr().add(self.len())) }
}
#[inline]
pub fn pop(&mut self) -> Option<Limb> {
if self.is_empty() {
None
} else {
unsafe { Some(self.pop_unchecked()) }
}
}
#[inline]
pub unsafe fn extend_unchecked(&mut self, slc: &[Limb]) {
let index = self.len();
let new_len = index + slc.len();
debug_assert!(self.len() + slc.len() <= self.capacity());
let src = slc.as_ptr();
unsafe {
let dst = self.as_mut_ptr().add(index);
ptr::copy_nonoverlapping(src, dst, slc.len());
self.set_len(new_len);
}
}
#[inline]
pub fn try_extend(&mut self, slc: &[Limb]) -> Option<()> {
if self.len() + slc.len() <= self.capacity() {
unsafe { self.extend_unchecked(slc) };
Some(())
} else {
None
}
}
unsafe fn truncate_unchecked(&mut self, len: usize) {
debug_assert!(len <= self.capacity());
self.length = len as u16;
}
#[inline]
pub unsafe fn resize_unchecked(&mut self, len: usize, value: Limb) {
debug_assert!(len <= self.capacity());
let old_len = self.len();
if len > old_len {
let count = len - old_len;
for index in 0..count {
unsafe {
let dst = self.as_mut_ptr().add(old_len + index);
ptr::write(dst, value);
}
}
self.length = len as u16;
} else {
unsafe { self.truncate_unchecked(len) };
}
}
#[inline]
pub fn try_resize(&mut self, len: usize, value: Limb) -> Option<()> {
if len > self.capacity() {
None
} else {
unsafe { self.resize_unchecked(len, value) };
Some(())
}
}
#[inline(always)]
pub fn hi16(&self) -> (u16, bool) {
let rview = self.rview();
match self.len() {
0 => (0, false),
1 if LIMB_BITS == 32 => hi!(@1 self, rview, u32, u32_to_hi16_1),
1 => hi!(@1 self, rview, u64, u64_to_hi16_1),
_ if LIMB_BITS == 32 => hi!(@nonzero2 self, rview, u32, u32_to_hi16_2),
_ => hi!(@nonzero2 self, rview, u64, u64_to_hi16_2),
}
}
#[inline(always)]
pub fn hi32(&self) -> (u32, bool) {
let rview = self.rview();
match self.len() {
0 => (0, false),
1 if LIMB_BITS == 32 => hi!(@1 self, rview, u32, u32_to_hi32_1),
1 => hi!(@1 self, rview, u64, u64_to_hi32_1),
_ if LIMB_BITS == 32 => hi!(@nonzero2 self, rview, u32, u32_to_hi32_2),
_ => hi!(@nonzero2 self, rview, u64, u64_to_hi32_2),
}
}
#[inline(always)]
pub fn hi64(&self) -> (u64, bool) {
let rview = self.rview();
match self.len() {
0 => (0, false),
1 if LIMB_BITS == 32 => hi!(@1 self, rview, u32, u32_to_hi64_1),
1 => hi!(@1 self, rview, u64, u64_to_hi64_1),
2 if LIMB_BITS == 32 => hi!(@2 self, rview, u32, u32_to_hi64_2),
2 => hi!(@2 self, rview, u64, u64_to_hi64_2),
_ if LIMB_BITS == 32 => hi!(@nonzero3 self, rview, u32, u32_to_hi64_3),
_ => hi!(@nonzero2 self, rview, u64, u64_to_hi64_2),
}
}
#[inline(always)]
pub fn from_u16(x: u16) -> Self {
let mut vec = Self::new();
assert!(1 <= vec.capacity());
unsafe { vec.push_unchecked(x as Limb) };
vec.normalize();
vec
}
#[inline(always)]
pub fn from_u32(x: u32) -> Self {
let mut vec = Self::new();
assert!(1 <= vec.capacity());
unsafe { vec.push_unchecked(x as Limb) };
vec.normalize();
vec
}
#[inline(always)]
pub fn from_u64(x: u64) -> Self {
let mut vec = Self::new();
assert!(2 <= vec.capacity());
if LIMB_BITS == 32 {
unsafe {
vec.push_unchecked(x as Limb);
vec.push_unchecked((x >> 32) as Limb);
}
} else {
unsafe { vec.push_unchecked(x as Limb) };
}
vec.normalize();
vec
}
#[inline]
pub fn rview(&self) -> ReverseView<Limb> {
ReverseView {
inner: &*self,
}
}
#[inline]
pub fn normalize(&mut self) {
while let Some(&value) = self.get(self.len().wrapping_sub(1)) {
if value == 0 {
self.length -= 1;
} else {
break;
}
}
}
#[inline]
#[allow(clippy::match_like_matches_macro)]
pub fn is_normalized(&self) -> bool {
match self.get(self.len().wrapping_sub(1)) {
Some(&0) => false,
_ => true,
}
}
#[inline]
#[cfg(feature = "radix")]
pub fn quorem(&mut self, y: &Self) -> Limb {
large_quorem(self, y)
}
#[inline]
pub fn add_small(&mut self, y: Limb) -> Option<()> {
small_add(self, y)
}
#[inline]
pub fn mul_small(&mut self, y: Limb) -> Option<()> {
small_mul(self, y)
}
}
impl<const SIZE: usize> PartialEq for StackVec<SIZE> {
#[inline]
#[allow(clippy::op_ref)]
fn eq(&self, other: &Self) -> bool {
use core::ops::Deref;
self.len() == other.len() && self.deref() == other.deref()
}
}
impl<const SIZE: usize> Eq for StackVec<SIZE> {
}
impl<const SIZE: usize> cmp::PartialOrd for StackVec<SIZE> {
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(compare(self, other))
}
}
impl<const SIZE: usize> cmp::Ord for StackVec<SIZE> {
#[inline]
fn cmp(&self, other: &Self) -> cmp::Ordering {
compare(self, other)
}
}
impl<const SIZE: usize> ops::Deref for StackVec<SIZE> {
type Target = [Limb];
#[inline]
fn deref(&self) -> &[Limb] {
unsafe {
let ptr = self.data.as_ptr() as *const Limb;
slice::from_raw_parts(ptr, self.len())
}
}
}
impl<const SIZE: usize> ops::DerefMut for StackVec<SIZE> {
#[inline]
fn deref_mut(&mut self) -> &mut [Limb] {
unsafe {
let ptr = self.data.as_mut_ptr() as *mut Limb;
slice::from_raw_parts_mut(ptr, self.len())
}
}
}
impl<const SIZE: usize> ops::MulAssign<&[Limb]> for StackVec<SIZE> {
#[inline]
fn mul_assign(&mut self, rhs: &[Limb]) {
large_mul(self, rhs).unwrap();
}
}
pub struct ReverseView<'a, T: 'a> {
inner: &'a [T],
}
impl<'a, T: 'a> ReverseView<'a, T> {
#[inline(always)]
pub unsafe fn get_unchecked(&self, index: usize) -> &T {
debug_assert!(index < self.inner.len());
let len = self.inner.len();
unsafe { self.inner.get_unchecked(len - index - 1) }
}
#[inline(always)]
pub fn get(&self, index: usize) -> Option<&T> {
let len = self.inner.len();
self.inner.get(len.wrapping_sub(index + 1))
}
}
impl<'a, T> ops::Index<usize> for ReverseView<'a, T> {
type Output = T;
#[inline]
fn index(&self, index: usize) -> &T {
let len = self.inner.len();
&(*self.inner)[len - index - 1]
}
}
#[inline]
pub unsafe fn nonzero(x: &[Limb], rindex: usize) -> bool {
debug_assert!(rindex <= x.len());
let len = x.len();
let slc = unsafe { &index_unchecked!(x[..len - rindex]) };
slc.iter().rev().any(|&x| x != 0)
}
#[inline]
pub const fn u32_to_hi16_1(r0: u32) -> (u16, bool) {
let r0 = u32_to_hi32_1(r0).0;
((r0 >> 16) as u16, r0 as u16 != 0)
}
#[inline]
pub const fn u32_to_hi16_2(r0: u32, r1: u32) -> (u16, bool) {
let (r0, n) = u32_to_hi32_2(r0, r1);
((r0 >> 16) as u16, n || r0 as u16 != 0)
}
#[inline]
pub const fn u32_to_hi32_1(r0: u32) -> (u32, bool) {
let ls = r0.leading_zeros();
(r0 << ls, false)
}
#[inline]
pub const fn u32_to_hi32_2(r0: u32, r1: u32) -> (u32, bool) {
let ls = r0.leading_zeros();
let rs = 32 - ls;
let v = match ls {
0 => r0,
_ => (r0 << ls) | (r1 >> rs),
};
let n = r1 << ls != 0;
(v, n)
}
#[inline]
pub const fn u32_to_hi64_1(r0: u32) -> (u64, bool) {
u64_to_hi64_1(r0 as u64)
}
#[inline]
pub const fn u32_to_hi64_2(r0: u32, r1: u32) -> (u64, bool) {
let r0 = (r0 as u64) << 32;
let r1 = r1 as u64;
u64_to_hi64_1(r0 | r1)
}
#[inline]
pub const fn u32_to_hi64_3(r0: u32, r1: u32, r2: u32) -> (u64, bool) {
let r0 = r0 as u64;
let r1 = (r1 as u64) << 32;
let r2 = r2 as u64;
u64_to_hi64_2(r0, r1 | r2)
}
#[inline]
pub const fn u64_to_hi16_1(r0: u64) -> (u16, bool) {
let r0 = u64_to_hi64_1(r0).0;
((r0 >> 48) as u16, r0 as u16 != 0)
}
#[inline]
pub const fn u64_to_hi16_2(r0: u64, r1: u64) -> (u16, bool) {
let (r0, n) = u64_to_hi64_2(r0, r1);
((r0 >> 48) as u16, n || r0 as u16 != 0)
}
#[inline]
pub const fn u64_to_hi32_1(r0: u64) -> (u32, bool) {
let r0 = u64_to_hi64_1(r0).0;
((r0 >> 32) as u32, r0 as u32 != 0)
}
#[inline]
pub const fn u64_to_hi32_2(r0: u64, r1: u64) -> (u32, bool) {
let (r0, n) = u64_to_hi64_2(r0, r1);
((r0 >> 32) as u32, n || r0 as u32 != 0)
}
#[inline]
pub const fn u64_to_hi64_1(r0: u64) -> (u64, bool) {
let ls = r0.leading_zeros();
(r0 << ls, false)
}
#[inline]
pub const fn u64_to_hi64_2(r0: u64, r1: u64) -> (u64, bool) {
let ls = r0.leading_zeros();
let rs = 64 - ls;
let v = match ls {
0 => r0,
_ => (r0 << ls) | (r1 >> rs),
};
let n = r1 << ls != 0;
(v, n)
}
pub fn pow<const SIZE: usize>(x: &mut StackVec<SIZE>, base: u32, mut exp: u32) -> Option<()> {
#[cfg(not(feature = "compact"))]
{
let (large, step) = get_large_int_power(base);
while exp >= step {
large_mul(x, large)?;
exp -= step;
}
}
let small_step = if LIMB_BITS == 32 {
u32_power_limit(base)
} else {
u64_power_limit(base)
};
let max_native = (base as Limb).pow(small_step);
while exp >= small_step {
small_mul(x, max_native)?;
exp -= small_step;
}
if exp != 0 {
let small_power = unsafe { f64::int_pow_fast_path(exp as usize, base) };
small_mul(x, small_power as Limb)?;
}
Some(())
}
#[inline(always)]
pub fn scalar_add(x: Limb, y: Limb) -> (Limb, bool) {
x.overflowing_add(y)
}
#[inline(always)]
pub fn scalar_mul(x: Limb, y: Limb, carry: Limb) -> (Limb, Limb) {
let z: Wide = (x as Wide) * (y as Wide) + (carry as Wide);
(z as Limb, (z >> LIMB_BITS) as Limb)
}
#[inline]
pub fn small_add_from<const SIZE: usize>(
x: &mut StackVec<SIZE>,
y: Limb,
start: usize,
) -> Option<()> {
let mut index = start;
let mut carry = y;
while carry != 0 && index < x.len() {
let result = scalar_add(unsafe { index_unchecked!(x[index]) }, carry);
unsafe { index_unchecked_mut!(x[index]) = result.0 };
carry = result.1 as Limb;
index += 1;
}
if carry != 0 {
x.try_push(carry)?;
}
Some(())
}
#[inline(always)]
pub fn small_add<const SIZE: usize>(x: &mut StackVec<SIZE>, y: Limb) -> Option<()> {
small_add_from(x, y, 0)
}
#[inline]
pub fn small_mul<const SIZE: usize>(x: &mut StackVec<SIZE>, y: Limb) -> Option<()> {
let mut carry = 0;
for xi in x.iter_mut() {
let result = scalar_mul(*xi, y, carry);
*xi = result.0;
carry = result.1;
}
if carry != 0 {
x.try_push(carry)?;
}
Some(())
}
pub fn large_add_from<const SIZE: usize>(
x: &mut StackVec<SIZE>,
y: &[Limb],
start: usize,
) -> Option<()> {
if y.len() > x.len().saturating_sub(start) {
x.try_resize(y.len() + start, 0)?;
}
let mut carry = false;
for index in 0..y.len() {
let xi = unsafe { &mut index_unchecked_mut!(x[start + index]) };
let yi = unsafe { index_unchecked!(y[index]) };
let result = scalar_add(*xi, yi);
*xi = result.0;
let mut tmp = result.1;
if carry {
let result = scalar_add(*xi, 1);
*xi = result.0;
tmp |= result.1;
}
carry = tmp;
}
if carry {
small_add_from(x, 1, y.len() + start)?;
}
Some(())
}
#[inline(always)]
pub fn large_add<const SIZE: usize>(x: &mut StackVec<SIZE>, y: &[Limb]) -> Option<()> {
large_add_from(x, y, 0)
}
#[allow(clippy::needless_range_loop)]
pub fn long_mul<const SIZE: usize>(x: &[Limb], y: &[Limb]) -> Option<StackVec<SIZE>> {
let mut z = StackVec::<SIZE>::try_from(x)?;
if !y.is_empty() {
let y0 = unsafe { index_unchecked!(y[0]) };
small_mul(&mut z, y0)?;
for index in 1..y.len() {
let yi = unsafe { index_unchecked!(y[index]) };
if yi != 0 {
let mut zi = StackVec::<SIZE>::try_from(x)?;
small_mul(&mut zi, yi)?;
large_add_from(&mut z, &zi, index)?;
}
}
}
z.normalize();
Some(z)
}
#[inline(always)]
pub fn large_mul<const SIZE: usize>(x: &mut StackVec<SIZE>, y: &[Limb]) -> Option<()> {
if y.len() == 1 {
small_mul(x, unsafe { index_unchecked!(y[0]) })?;
} else {
*x = long_mul(y, x)?;
}
Some(())
}
#[cfg(feature = "radix")]
#[allow(clippy::many_single_char_names)]
pub fn large_quorem<const SIZE: usize>(x: &mut StackVec<SIZE>, y: &[Limb]) -> Limb {
assert!(!y.is_empty(), "large_quorem:: division by zero error.");
assert!(x.len() <= y.len(), "large_quorem:: oversized numerator.");
let mask = Limb::max_value() as Wide;
let m = x.len();
let n = y.len();
if m < n {
return 0;
}
let xm_1 = unsafe { index_unchecked!(x[m - 1]) };
let yn_1 = unsafe { index_unchecked!(y[n - 1]) };
let mut q = xm_1 / (yn_1 + 1);
if q != 0 {
let mut borrow: Wide = 0;
let mut carry: Wide = 0;
for j in 0..m {
let yj = unsafe { index_unchecked!(y[j]) } as Wide;
let p = yj * q as Wide + carry;
carry = p >> LIMB_BITS;
let xj = unsafe { index_unchecked!(x[j]) } as Wide;
let t = xj.wrapping_sub(p & mask).wrapping_sub(borrow);
borrow = (t >> LIMB_BITS) & 1;
unsafe { index_unchecked_mut!(x[j]) = t as Limb };
}
x.normalize();
}
if compare(x, y) != cmp::Ordering::Less {
q += 1;
let mut borrow: Wide = 0;
let mut carry: Wide = 0;
for j in 0..m {
let yj = unsafe { index_unchecked!(y[j]) } as Wide;
let p = yj + carry;
carry = p >> LIMB_BITS;
let xj = unsafe { index_unchecked!(x[j]) } as Wide;
let t = xj.wrapping_sub(p & mask).wrapping_sub(borrow);
borrow = (t >> LIMB_BITS) & 1;
unsafe { index_unchecked_mut!(x[j]) = t as Limb };
}
x.normalize();
}
q
}
#[inline]
pub fn compare(x: &[Limb], y: &[Limb]) -> cmp::Ordering {
match x.len().cmp(&y.len()) {
cmp::Ordering::Equal => {
let iter = x.iter().rev().zip(y.iter().rev());
for (&xi, yi) in iter {
match xi.cmp(yi) {
cmp::Ordering::Equal => (),
ord => return ord,
}
}
cmp::Ordering::Equal
},
ord => ord,
}
}
#[inline]
pub fn shl_bits<const SIZE: usize>(x: &mut StackVec<SIZE>, n: usize) -> Option<()> {
debug_assert!(n != 0);
debug_assert!(n < LIMB_BITS);
let rshift = LIMB_BITS - n;
let lshift = n;
let mut prev: Limb = 0;
for xi in x.iter_mut() {
let tmp = *xi;
*xi <<= lshift;
*xi |= prev >> rshift;
prev = tmp;
}
let carry = prev >> rshift;
if carry != 0 {
x.try_push(carry)?;
}
Some(())
}
#[inline]
pub fn shl_limbs<const SIZE: usize>(x: &mut StackVec<SIZE>, n: usize) -> Option<()> {
debug_assert!(n != 0);
if n + x.len() > x.capacity() {
None
} else if !x.is_empty() {
let len = n + x.len();
unsafe {
let x_len = x.len();
let ptr = x.as_mut_ptr();
let src = ptr;
let dst = ptr.add(n);
ptr::copy(src, dst, x_len);
ptr::write_bytes(ptr, 0, n);
x.set_len(len);
}
Some(())
} else {
Some(())
}
}
#[inline]
pub fn shl<const SIZE: usize>(x: &mut StackVec<SIZE>, n: usize) -> Option<()> {
let rem = n % LIMB_BITS;
let div = n / LIMB_BITS;
if rem != 0 {
shl_bits(x, rem)?;
}
if div != 0 {
shl_limbs(x, div)?;
}
Some(())
}
#[inline]
pub fn leading_zeros(x: &[Limb]) -> u32 {
let length = x.len();
if let Some(&value) = x.get(length.wrapping_sub(1)) {
value.leading_zeros()
} else {
0
}
}
#[inline]
pub fn bit_length(x: &[Limb]) -> u32 {
let nlz = leading_zeros(x);
LIMB_BITS as u32 * x.len() as u32 - nlz
}
#[inline(always)]
pub const fn split_radix(radix: u32) -> (u32, u32) {
match radix {
2 => (0, 1),
3 if cfg!(feature = "radix") => (3, 0),
4 if cfg!(feature = "power-of-two") => (0, 2),
5 => (5, 0),
6 if cfg!(feature = "radix") => (3, 1),
7 if cfg!(feature = "radix") => (7, 0),
8 if cfg!(feature = "power-of-two") => (0, 3),
9 if cfg!(feature = "radix") => (9, 0),
10 => (5, 1),
11 if cfg!(feature = "radix") => (11, 0),
12 if cfg!(feature = "radix") => (6, 1),
13 if cfg!(feature = "radix") => (13, 0),
14 if cfg!(feature = "radix") => (7, 1),
15 if cfg!(feature = "radix") => (15, 0),
16 if cfg!(feature = "power-of-two") => (0, 4),
17 if cfg!(feature = "radix") => (17, 0),
18 if cfg!(feature = "radix") => (9, 1),
19 if cfg!(feature = "radix") => (19, 0),
20 if cfg!(feature = "radix") => (5, 2),
21 if cfg!(feature = "radix") => (21, 0),
22 if cfg!(feature = "radix") => (11, 1),
23 if cfg!(feature = "radix") => (23, 0),
24 if cfg!(feature = "radix") => (3, 3),
25 if cfg!(feature = "radix") => (25, 0),
26 if cfg!(feature = "radix") => (13, 1),
27 if cfg!(feature = "radix") => (27, 0),
28 if cfg!(feature = "radix") => (7, 2),
29 if cfg!(feature = "radix") => (29, 0),
30 if cfg!(feature = "radix") => (15, 1),
31 if cfg!(feature = "radix") => (31, 0),
32 if cfg!(feature = "power-of-two") => (0, 5),
33 if cfg!(feature = "radix") => (33, 0),
34 if cfg!(feature = "radix") => (17, 1),
35 if cfg!(feature = "radix") => (35, 0),
36 if cfg!(feature = "radix") => (9, 2),
_ => (0, 0),
}
}
#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))]
pub type Limb = u64;
#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))]
pub type Wide = u128;
#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))]
pub type SignedWide = i128;
#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))]
pub const LIMB_BITS: usize = 64;
#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))]
pub type Limb = u32;
#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))]
pub type Wide = u64;
#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))]
pub type SignedWide = i64;
#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))]
pub const LIMB_BITS: usize = 32;