1#[doc(hidden)]
2pub use {faster_hex, malachite_base, malachite_nz, serde};
3
4#[macro_export]
7macro_rules! construct_uint {
8 ($name:ident, $n_words:literal $(, $derive_trait:ty)*) => {
9 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug$(, $derive_trait )*)]
11 pub struct $name(pub [u64; $n_words]);
12 #[allow(unused)]
13 impl $name {
14 pub const ZERO: Self = $name([0; $n_words]);
15 pub const MIN: Self = Self::ZERO;
16 pub const MAX: Self = $name([u64::MAX; $n_words]);
17 pub const BITS: u32 = $n_words * u64::BITS;
18 pub const BYTES: usize = $n_words * size_of::<u64>();
19 pub const LIMBS: usize = $n_words;
20
21 #[inline]
22 pub fn from_u64(n: u64) -> Self {
23 let mut ret = Self::ZERO;
24 ret.0[0] = n;
25 ret
26 }
27 #[inline]
28 pub fn from_u128(n: u128) -> Self {
29 let mut ret = Self::ZERO;
30 ret.0[0] = n as u64;
31 ret.0[1] = (n >> 64) as u64;
32 ret
33 }
34
35 #[inline]
36 pub fn as_u128(self) -> u128 {
37 self.0[0] as u128 | ((self.0[1] as u128) << 64)
38 }
39
40 #[inline]
41 pub fn as_u64(self) -> u64 {
42 self.0[0] as u64
43 }
44
45 #[inline(always)]
46 pub fn is_zero(self) -> bool {
47 self.0.iter().all(|&a| a == 0)
48 }
49
50 #[inline(always)]
52 pub fn bits(&self) -> u32 {
53 for (i, &word) in self.0.iter().enumerate().rev() {
54 if word != 0 {
55 return u64::BITS * (i as u32 + 1) - word.leading_zeros();
56 }
57 }
58 0
59 }
60
61 #[inline(always)]
62 pub fn leading_zeros(&self) -> u32 {
63 return Self::BITS - self.bits();
64 }
65
66 #[inline]
67 pub fn overflowing_shl(self, mut s: u32) -> (Self, bool) {
68 let overflows = s >= Self::BITS;
69 s %= Self::BITS;
70 let mut ret = [0u64; $n_words];
71 let left_words = (s / 64) as usize;
72 let left_shifts = s % 64;
73
74 for i in left_words..$n_words {
75 ret[i] = self.0[i - left_words] << left_shifts;
76 }
77 if left_shifts > 0 {
78 let left_over = 64 - left_shifts;
79 for i in left_words + 1..$n_words {
80 ret[i] |= self.0[i - 1 - left_words] >> left_over;
81 }
82 }
83 (Self(ret), overflows)
84 }
85
86 #[inline]
87 pub fn wrapping_shl(self, s: u32) -> Self {
88 self.overflowing_shl(s).0
89 }
90
91 #[inline]
92 pub fn overflowing_shr(self, mut s: u32) -> (Self, bool) {
93 let overflows = s >= Self::BITS;
94 s %= Self::BITS;
95 let mut ret = [0u64; Self::LIMBS];
96 let left_words = (s / 64) as usize;
97 let left_shifts = s % 64;
98
99 for i in left_words..Self::LIMBS {
100 ret[i - left_words] = self.0[i] >> left_shifts;
101 }
102 if left_shifts > 0 {
103 let left_over = 64 - left_shifts;
104 for i in left_words + 1..Self::LIMBS {
105 ret[i - left_words - 1] |= self.0[i] << left_over;
106 }
107 }
108 (Self(ret), overflows)
109 }
110
111 #[inline]
112 pub fn overflowing_add(mut self, other: Self) -> (Self, bool) {
113 #[inline(always)]
115 pub const fn carrying_add_u64(lhs: u64, rhs: u64, carry: bool) -> (u64, bool) {
116 let (a, b) = lhs.overflowing_add(rhs);
117 let (c, d) = a.overflowing_add(carry as u64);
118 (c, b != d)
119 }
120 let mut carry = false;
121 let mut carry_out;
122 for i in 0..Self::LIMBS {
123 (self.0[i], carry_out) = carrying_add_u64(self.0[i], other.0[i], carry);
124 carry = carry_out;
125 }
126 (self, carry)
127 }
128
129 #[inline]
130 pub fn overflowing_add_u64(mut self, other: u64) -> (Self, bool) {
131 let mut carry: bool;
132 (self.0[0], carry) = self.0[0].overflowing_add(other);
133 for i in 1..Self::LIMBS {
134 if !carry {
135 break;
136 }
137 (self.0[i], carry) = self.0[i].overflowing_add(1);
138 }
139 (self, carry)
140 }
141
142 #[inline]
143 pub fn overflowing_sub(mut self, other: Self) -> (Self, bool) {
144 #[inline(always)]
146 pub const fn borrowing_sub_u64(lhs: u64, rhs: u64, borrow: bool) -> (u64, bool) {
147 let (a, b) = lhs.overflowing_sub(rhs);
148 let (c, d) = a.overflowing_sub(borrow as u64);
149 (c, b != d)
150 }
151
152 let mut carry = false;
153 let mut carry_out;
154 for i in 0..Self::LIMBS {
155 (self.0[i], carry_out) = borrowing_sub_u64(self.0[i], other.0[i], carry);
156 carry = carry_out;
157 }
158 (self, carry)
159 }
160
161 #[inline]
163 pub fn overflowing_mul_u64(self, other: u64) -> (Self, bool) {
164 let (this, carry) = self.carrying_mul_u64(other);
165 (this, carry != 0)
166 }
167
168 #[inline]
169 pub fn carrying_mul_u64(mut self, other: u64) -> (Self, u64) {
170 let mut carry: u128 = 0;
171 for i in 0..Self::LIMBS {
172 let n = carry + (other as u128) * (self.0[i] as u128);
174 self.0[i] = n as u64;
175 carry = (n >> 64) & u64::MAX as u128;
176 }
177 (self, carry as u64)
178 }
179
180 #[inline]
181 pub fn overflowing_mul(self, other: Self) -> (Self, bool) {
182 let mut result = Self::ZERO;
184 let mut carry_out = false;
185 for j in 0..Self::LIMBS {
186 let mut carry = 0;
187 let mut i = 0;
188 while i + j < Self::LIMBS {
189 let n = (self.0[i] as u128) * (other.0[j] as u128) + (result.0[i + j] as u128) + (carry as u128);
190 result.0[i + j] = n as u64;
191 carry = (n >> 64) as u64;
192 i += 1;
193 }
194 carry_out |= carry != 0;
195 }
196 (result, carry_out)
197 }
198 #[inline(always)]
201 pub fn from_le_bytes(bytes: [u8; Self::BYTES]) -> Self {
202 let mut out = [0u64; Self::LIMBS];
203 out.iter_mut()
205 .zip(bytes.chunks_exact(8))
206 .for_each(|(word, bytes)| *word = u64::from_le_bytes(bytes.try_into().unwrap()));
207 Self(out)
208 }
209
210 #[inline(always)]
213 pub fn from_be_bytes(bytes: [u8; Self::BYTES]) -> Self {
214 let mut out = [0u64; Self::LIMBS];
215 out.iter_mut()
216 .rev()
217 .zip(bytes.chunks_exact(8))
218 .for_each(|(word, bytes)| *word = u64::from_be_bytes(bytes.try_into().unwrap()));
219 Self(out)
220 }
221
222 #[inline(always)]
224 pub fn to_le_bytes(self) -> [u8; Self::BYTES] {
225 let mut out = [0u8; Self::BYTES];
226 out.chunks_exact_mut(8).zip(self.0).for_each(|(bytes, word)| bytes.copy_from_slice(&word.to_le_bytes()));
228 out
229 }
230
231 #[inline(always)]
233 pub fn to_be_bytes(self) -> [u8; Self::BYTES] {
234 let mut out = [0u8; Self::BYTES];
235 out.chunks_exact_mut(8)
237 .zip(self.0.into_iter().rev())
238 .for_each(|(bytes, word)| bytes.copy_from_slice(&word.to_be_bytes()));
239 out
240 }
241
242 #[inline(always)]
243 pub fn to_be_bytes_var(self) -> Vec<u8> {
244 let bytes = self.to_be_bytes();
245 let start = bytes.iter().copied().position(|b| b != 0).unwrap_or(bytes.len());
246 Vec::from(&bytes[start..])
247 }
248
249 #[inline]
250 pub fn div_rem_u64(mut self, other: u64) -> (Self, u64) {
251 let mut rem = 0u64;
252 self.0.iter_mut().rev().for_each(|d| {
253 let n = (rem as u128) << 64 | (*d as u128);
254 *d = (n / other as u128) as u64;
255 rem = (n % other as u128) as u64;
256 });
257 (self, rem)
258 }
259
260 #[inline]
261 pub fn as_f64(&self) -> f64 {
262 let leading_zeroes = self.leading_zeros();
265 let left_aligned = self.wrapping_shl(leading_zeroes);
267 let mut mantissa = left_aligned.0[Self::LIMBS - 1] >> 11;
269 let highest_dropped_bits = left_aligned.0[Self::LIMBS - 1] << 53;
274 let highest_dropped_bit = highest_dropped_bits >> 63 != 0;
275 let mut rest_dropped_bits = highest_dropped_bits << 1; for &word in &left_aligned.0[..Self::LIMBS - 1] {
278 rest_dropped_bits |= word;
279 }
280 let higher_than_half = highest_dropped_bit & (rest_dropped_bits != 0);
282 let exactly_half_but_mantissa_odd = highest_dropped_bit & (rest_dropped_bits == 0) & (mantissa & 1 == 1);
283 mantissa += (higher_than_half | exactly_half_but_mantissa_odd) as u64;
286 let exponent = if self.is_zero() { 0 } else { u64::from(Self::BITS) + 1021 - u64::from(leading_zeroes) };
291 f64::from_bits((exponent << 52) + mantissa)
294 }
295
296 #[inline]
298 pub fn div_rem(self, other: Self) -> (Self, Self) {
299 let mut sub_copy = self;
300 let mut shift_copy = other;
301 let mut ret = [0u64; Self::LIMBS];
302
303 let my_bits = self.bits();
304 let your_bits = other.bits();
305
306 assert_ne!(your_bits, 0, "attempted to divide {} by zero", self);
308
309 if my_bits < your_bits {
311 return (Self(ret), sub_copy);
312 }
313
314 let mut shift = my_bits - your_bits;
316 shift_copy = shift_copy << shift;
317 loop {
318 if sub_copy >= shift_copy {
319 let (shift_index, shift_val) = ((shift / 64) as usize, shift % 64);
320 ret[shift_index] |= 1 << shift_val;
321 sub_copy = sub_copy - shift_copy;
322 }
323 shift_copy = shift_copy >> 1;
324 if shift == 0 {
325 break;
326 }
327 shift -= 1;
328 }
329
330 (Self(ret), sub_copy)
331 }
332
333 #[inline]
335 pub fn mod_inverse(self, prime: Self) -> Option<Self> {
336 use $crate::uint::malachite_nz::natural::Natural;
337 use $crate::uint::malachite_base::num::arithmetic::traits::ModInverse;
338
339 let x = Natural::from_limbs_asc(&self.0);
340 let p = Natural::from_limbs_asc(&prime.0);
341 let mod_inv = x.mod_inverse(p);
342
343 mod_inv.map(|n| {
344 let mut res = [0u64; Self::LIMBS];
345 let limbs = n.into_limbs_asc();
346 res[..limbs.len()].copy_from_slice(&limbs);
347 Self(res)
348 })
349 }
350
351 #[inline]
352 pub fn iter_be_bits(self) -> impl ExactSizeIterator<Item = bool> + core::iter::FusedIterator {
353 struct BinaryIterator {
354 array: [u64; $n_words],
355 bit: usize,
356 }
357
358 impl Iterator for BinaryIterator {
359 type Item = bool;
360
361 #[inline]
362 fn next(&mut self) -> Option<Self::Item> {
363 if self.bit >= 64 * $n_words {
364 return None;
365 }
366 let (word, subbit) = (self.bit / 64, self.bit % 64);
367 let current_bit = self.array[$n_words - word - 1] & (1 << 64 - subbit - 1);
368 self.bit += 1;
369 Some(current_bit != 0)
370 }
371
372 #[inline]
373 fn nth(&mut self, n: usize) -> Option<Self::Item> {
374 match self.bit.checked_add(n) {
375 Some(bit) => {
376 self.bit = bit;
377 self.next()
378 }
379 None => {
380 self.bit = usize::MAX;
381 None
382 }
383 }
384 }
385 #[inline]
386 fn size_hint(&self) -> (usize, Option<usize>) {
387 let remaining_bits = $n_words * (u64::BITS as usize) - self.bit;
388 (remaining_bits, Some(remaining_bits))
389 }
390 }
391 impl ExactSizeIterator for BinaryIterator {}
392 impl core::iter::FusedIterator for BinaryIterator {}
393
394 BinaryIterator { array: self.0, bit: 0 }
395 }
396
397 #[inline]
399 pub fn from_hex(hex: &str) -> Result<Self, $crate::uint::faster_hex::Error> {
400 if hex.len() > Self::BYTES * 2 {
401 return Err($crate::uint::faster_hex::Error::InvalidLength(hex.len()));
402 }
403 let mut out = [0u8; Self::BYTES];
404 let mut input = [b'0'; Self::BYTES * 2];
405 let start = input.len() - hex.len();
406 input[start..].copy_from_slice(hex.as_bytes());
407 $crate::uint::faster_hex::hex_decode(&input, &mut out)?;
408 Ok(Self::from_be_bytes(out))
409 }
410
411 #[inline]
412 pub fn from_be_bytes_var(bytes: &[u8]) -> Result<Self, $crate::uint::TryFromSliceError> {
413 if bytes.len() > Self::BYTES {
414 return Err($crate::uint::TryFromSliceError);
415 }
416 let mut out = [0u8; Self::BYTES];
417 let start = Self::BYTES - bytes.len();
418 out[start..].copy_from_slice(bytes);
419 Ok(Self::from_be_bytes(out))
420 }
421
422 #[inline]
423 pub fn as_bigint(&self) -> Result<js_sys::BigInt, $crate::Error> {
424 self.try_into()
425 }
426
427 #[inline]
428 pub fn to_bigint(self) -> Result<js_sys::BigInt, $crate::Error> {
429 self.try_into()
430 }
431
432 }
433
434 impl kaspa_utils::mem_size::MemSizeEstimator for $name {
435 fn estimate_mem_units(&self) -> usize {
436 1
437
438 }
439 }
440
441 impl kaspa_utils::hex::ToHex for $name {
442 fn to_hex(&self) -> String {
443 self.to_be_bytes().as_slice().to_hex()
444 }
445 }
446
447 impl kaspa_utils::hex::ToHex for &$name {
448 fn to_hex(&self) -> String {
449 self.to_be_bytes().as_slice().to_hex()
450 }
451 }
452
453 impl kaspa_utils::hex::FromHex for $name {
454 type Error = $crate::Error;
455 fn from_hex(hex: &str) -> Result<$name, Self::Error> {
456 Ok($name::from_hex(hex)?)
457 }
458 }
459
460 impl PartialEq<u64> for $name {
461 #[inline]
462 fn eq(&self, other: &u64) -> bool {
463 let bigger = self.0[1..].iter().any(|&x| x != 0);
464 !bigger && self.0[0] == *other
465 }
466 }
467 impl PartialOrd<u64> for $name {
468 #[inline]
469 fn partial_cmp(&self, other: &u64) -> Option<core::cmp::Ordering> {
470 let bigger = self.0[1..].iter().any(|&x| x != 0);
471 if bigger {
472 Some(core::cmp::Ordering::Greater)
473 } else {
474 self.0[0].partial_cmp(other)
475 }
476 }
477 }
478
479 impl PartialEq<u128> for $name {
480 #[inline]
481 fn eq(&self, other: &u128) -> bool {
482 let bigger = self.0[2..].iter().any(|&x| x != 0);
483 !bigger && self.0[0] == (*other as u64) && self.0[1] == ((*other >> 64) as u64)
484 }
485 }
486 impl PartialOrd<u128> for $name {
487 #[inline]
488 fn partial_cmp(&self, other: &u128) -> Option<core::cmp::Ordering> {
489 let bigger = self.0[2..].iter().any(|&x| x != 0);
490 if bigger {
491 Some(core::cmp::Ordering::Greater)
492 } else {
493 self.as_u128().partial_cmp(other)
494 }
495 }
496 }
497
498 impl PartialOrd for $name {
499 #[inline]
500 fn partial_cmp(&self, other: &$name) -> Option<core::cmp::Ordering> {
501 Some(self.cmp(&other))
502 }
503 }
504
505 impl Ord for $name {
506 #[inline]
507 fn cmp(&self, other: &$name) -> core::cmp::Ordering {
508 Iterator::cmp(self.0.iter().rev(), other.0.iter().rev())
512 }
513 }
514
515 impl core::ops::Add<$name> for $name {
516 type Output = $name;
517
518 #[inline]
519 #[track_caller]
520 fn add(self, other: $name) -> $name {
521 let (sum, carry) = self.overflowing_add(other);
522 debug_assert!(!carry, "attempt to add with overflow"); sum
524 }
525 }
526
527 impl core::ops::Add<u64> for $name {
528 type Output = $name;
529
530 #[inline]
531 #[track_caller]
532 fn add(self, other: u64) -> $name {
533 let (sum, carry) = self.overflowing_add_u64(other);
534 debug_assert!(!carry, "attempt to add with overflow"); sum
536 }
537 }
538
539 impl core::ops::Sub<$name> for $name {
540 type Output = $name;
541
542 #[inline]
543 #[track_caller]
544 fn sub(self, other: $name) -> $name {
545 let (sum, carry) = self.overflowing_sub(other);
546 debug_assert!(!carry, "attempt to subtract with overflow"); sum
548 }
549 }
550
551 impl core::ops::Mul<$name> for $name {
552 type Output = $name;
553
554 #[inline]
555 #[track_caller]
556 fn mul(self, other: $name) -> $name {
557 let (product, carry) = self.overflowing_mul(other);
558 debug_assert!(!carry, "attempt to multiply with overflow"); product
560 }
561 }
562
563 impl core::ops::Mul<u64> for $name {
564 type Output = $name;
565
566 #[inline]
567 #[track_caller]
568 fn mul(self, other: u64) -> $name {
569 let (product, carry) = self.overflowing_mul_u64(other);
570 debug_assert!(!carry, "attempt to multiply with overflow"); product
572 }
573 }
574
575 impl core::ops::Div<$name> for $name {
576 type Output = $name;
577
578 #[inline]
579 fn div(self, other: $name) -> $name {
580 self.div_rem(other).0
581 }
582 }
583
584 impl core::ops::Rem<$name> for $name {
585 type Output = $name;
586
587 #[inline]
588 fn rem(self, other: $name) -> $name {
589 self.div_rem(other).1
590 }
591 }
592
593 impl core::ops::Div<u64> for $name {
594 type Output = $name;
595
596 #[inline]
597 fn div(self, other: u64) -> $name {
598 self.div_rem_u64(other).0
599 }
600 }
601
602 impl core::ops::Rem<u64> for $name {
603 type Output = u64;
604
605 fn rem(self, other: u64) -> u64 {
606 self.div_rem_u64(other).1
607 }
608 }
609
610 impl core::ops::BitAnd<$name> for $name {
611 type Output = $name;
612
613 #[inline]
614 fn bitand(mut self, other: $name) -> $name {
615 self.0.iter_mut().zip(other.0.iter()).for_each(|(a, b)| *a &= *b);
616 self
617 }
618 }
619
620 impl core::ops::BitXor<$name> for $name {
621 type Output = $name;
622
623 #[inline]
624 fn bitxor(mut self, other: $name) -> $name {
625 self.0.iter_mut().zip(other.0.iter()).for_each(|(a, b)| *a ^= *b);
626 self
627 }
628 }
629
630 impl core::ops::BitOr<$name> for $name {
631 type Output = $name;
632
633 #[inline]
634 fn bitor(mut self, other: $name) -> $name {
635 self.0.iter_mut().zip(other.0.iter()).for_each(|(a, b)| *a |= *b);
636 self
637 }
638 }
639
640 impl core::ops::Not for $name {
641 type Output = $name;
642
643 #[inline]
644 fn not(mut self) -> $name {
645 self.0.iter_mut().for_each(|a| *a = !*a);
646 self
647 }
648 }
649
650 impl core::ops::Shl<u32> for $name {
651 type Output = $name;
652
653 #[inline]
654 #[track_caller]
655 fn shl(self, shift: u32) -> $name {
656 let (res, carry) = self.overflowing_shl(shift);
657 debug_assert!(!carry, "attempt to shift left with overflow"); res
659 }
660 }
661
662 impl core::ops::Shr<u32> for $name {
663 type Output = $name;
664
665 #[inline]
666 #[track_caller]
667 fn shr(self, shift: u32) -> $name {
668 let (res, carry) = self.overflowing_shr(shift);
669 debug_assert!(!carry, "attempt to shift left with overflow"); res
671 }
672 }
673
674 impl core::iter::Sum for $name {
675 #[inline]
676 #[track_caller]
677 fn sum<I: Iterator<Item = Self>>(mut iter: I) -> Self {
678 let first = iter.next().unwrap_or_else(|| Self::ZERO);
679 iter.fold(first, |a, b| a + b)
680 }
681 }
682
683 impl core::iter::Product for $name {
684 #[inline]
685 #[track_caller]
686 fn product<I: Iterator<Item = Self>>(mut iter: I) -> Self {
687 let first = iter.next().unwrap_or_else(|| Self::from_u64(1));
688 iter.fold(first, |a, b| a * b)
689 }
690 }
691
692 impl<'a> core::iter::Sum<&'a $name> for $name {
693 #[inline]
694 #[track_caller]
695 fn sum<I: Iterator<Item = &'a Self>>(mut iter: I) -> Self {
696 let first = iter.next().copied().unwrap_or_else(|| Self::ZERO);
697 iter.fold(first, |a, &b| a + b)
698 }
699 }
700
701 impl<'a> core::iter::Product<&'a $name> for $name {
702 #[inline]
703 #[track_caller]
704 fn product<I: Iterator<Item = &'a Self>>(mut iter: I) -> Self {
705 let first = iter.next().copied().unwrap_or_else(|| Self::from_u64(1));
706 iter.fold(first, |a, &b| a * b)
707 }
708 }
709
710 impl Default for $name {
711 #[inline]
712 fn default() -> Self {
713 Self::ZERO
714 }
715 }
716
717 impl From<u64> for $name {
718 #[inline]
719 fn from(x: u64) -> Self {
720 Self::from_u64(x)
721 }
722 }
723
724 impl core::convert::TryFrom<$name> for u128 {
725 type Error = $crate::uint::TryFromIntError;
726
727 #[inline]
728 fn try_from(value: $name) -> Result<Self, Self::Error> {
729 if value.0[2..].iter().any(|&x| x != 0) {
730 Err($crate::uint::TryFromIntError)
731 } else {
732 Ok(value.as_u128())
733 }
734 }
735 }
736
737 impl core::fmt::LowerHex for $name {
738 #[inline]
739 fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
740 let mut hex = [0u8; Self::BYTES * 2];
741 let bytes = self.to_be_bytes();
742 $crate::uint::faster_hex::hex_encode(&bytes, &mut hex).expect("The output is exactly twice the size of the input");
743 let first_non_zero = hex.iter().position(|&x| x != b'0').unwrap_or(hex.len() - 1);
744 let str = unsafe { core::str::from_utf8_unchecked(&hex[first_non_zero..]) };
746 f.pad_integral(true, "0x", str)
747 }
748 }
749
750 impl core::fmt::Display for $name {
752 #[inline]
753 fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
754 static DEC_DIGITS_LUT: &[u8; 200] = b"0001020304050607080910111213141516171819\
756 2021222324252627282930313233343536373839\
757 4041424344454647484950515253545556575859\
758 6061626364656667686970717273747576777879\
759 8081828384858687888990919293949596979899";
760
761 let mut buf = [0u8; $name::LIMBS * 20]; let mut n = *self;
763 let mut curr = buf.len();
764
765 const STEP: u64 = 10_000;
767 while n >= STEP {
768 let rem: u64;
769 (n, rem) = n.div_rem_u64(STEP);
770 let rem = rem as usize;
771 let d1 = (rem / 100) << 1;
772 let d2 = (rem % 100) << 1;
773 curr -= 4;
774
775 buf[curr] = DEC_DIGITS_LUT[d1];
776 buf[curr + 1] = DEC_DIGITS_LUT[d1 + 1];
777 buf[curr + 2] = DEC_DIGITS_LUT[d2];
778 buf[curr + 3] = DEC_DIGITS_LUT[d2 + 1];
779 }
780 let mut n = n.as_u64() as usize; if n >= 100 {
785 let d1 = (n % 100) << 1;
786 n /= 100;
787 curr -= 2;
788 buf[curr] = DEC_DIGITS_LUT[d1 as usize];
789 buf[curr + 1] = DEC_DIGITS_LUT[d1 + 1 as usize];
790 }
791
792 if n < 10 {
794 curr -= 1;
795 buf[curr] = (n as u8) + b'0'
796 } else {
797 let d1 = n << 1;
798 curr -= 2;
799 buf[curr] = DEC_DIGITS_LUT[d1];
800 buf[curr + 1] = DEC_DIGITS_LUT[d1 + 1];
801 }
802
803 let buf_str = unsafe { std::str::from_utf8_unchecked(&buf[curr..]) };
805 f.pad_integral(true, "", buf_str)
806 }
807 }
808
809 impl core::fmt::Binary for $name {
810 #[inline]
811 fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
812 const BIN_LEN: usize = $name::BITS as usize;
813 let mut buf = [0u8; BIN_LEN];
814 let mut first_one = BIN_LEN - 1;
815 for (index, (bit, char)) in self.iter_be_bits().zip(buf.iter_mut()).enumerate() {
816 *char = bit as u8 + b'0';
817 if first_one == BIN_LEN - 1 && bit {
818 first_one = index;
819 }
820 }
821 let buf_str = unsafe { std::str::from_utf8_unchecked(&buf[first_one..]) };
823 f.pad_integral(true, "0b", buf_str)
824 }
825 }
826
827 impl $crate::uint::serde::Serialize for $name {
830 #[inline]
831 fn serialize<S: $crate::uint::serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
832 if serializer.is_human_readable() {
833 let mut hex = [0u8; Self::BYTES * 2];
834 let bytes = self.to_be_bytes();
835 $crate::uint::faster_hex::hex_encode(&bytes, &mut hex).expect("The output is exactly twice the size of the input");
836 let hex_str = unsafe { std::str::from_utf8_unchecked(&hex) };
837 serializer.serialize_str(hex_str)
838 } else {
839 use $crate::uint::serde::ser::SerializeTuple;
840 let mut seq = serializer.serialize_tuple(Self::LIMBS)?;
841 for limb in &self.0 {
842 seq.serialize_element(limb)?;
843 }
844 seq.end()
845 }
846 }
847 }
848
849 impl<'de> $crate::uint::serde::Deserialize<'de> for $name {
850 #[inline]
851 fn deserialize<D: $crate::uint::serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
852 if deserializer.is_human_readable() {
853 let hex = <std::string::String as serde::Deserialize>::deserialize(deserializer)?;
854 Ok(Self::from_hex(&hex).map_err($crate::uint::serde::de::Error::custom)?)
855 } else {
856 use core::{fmt, marker::PhantomData};
857 use $crate::uint::serde::de::{Error, SeqAccess, Visitor};
858 struct EmptyVisitor(PhantomData<$name>);
859 impl<'de> Visitor<'de> for EmptyVisitor {
860 type Value = $name;
861 #[inline]
862
863 fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
864 formatter.write_str(concat!("an integer with ", $n_words, " limbs"))
865 }
866
867 #[inline]
868 fn visit_seq<A: SeqAccess<'de>>(self, mut seq: A) -> Result<Self::Value, A::Error> {
869 let mut ret = $name::ZERO;
870 for (i, limb) in ret.0.iter_mut().enumerate() {
871 *limb = seq.next_element()?.ok_or_else(|| Error::invalid_length(i, &self))?;
872 }
873 Ok(ret)
874 }
875 }
876 deserializer.deserialize_tuple(Self::LIMBS, EmptyVisitor(PhantomData))
877 }
878 }
879
880 #[inline]
881 fn deserialize_in_place<D: $crate::uint::serde::Deserializer<'de>>(
882 deserializer: D,
883 place: &mut Self,
884 ) -> Result<(), D::Error> {
885 if deserializer.is_human_readable() {
886
887 use core::fmt;
888 use $crate::uint::serde::de::{Error, Visitor};
889 struct InPlaceVisitor<'a>(&'a mut $name);
890
891 impl<'de, 'a> Visitor<'de> for InPlaceVisitor<'a> {
892 type Value = ();
893 #[inline]
894 fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
895 formatter.write_str(concat!("a hex string"))
896 }
897 #[inline]
898 fn visit_str<E>(self, hex: &str) -> Result<Self::Value, E>
899 where
900 E: Error,
901 {
902 if hex.len() > $name::BYTES * 2 {
903 return Err($crate::uint::serde::de::Error::custom("invalid hex string length"));
904 }
905 let mut bytes = [0u8; $name::BYTES];
906 let mut input = [b'0'; $name::BYTES * 2];
907 let start = input.len() - hex.len();
908 input[start..].copy_from_slice(hex.as_bytes());
909 $crate::uint::faster_hex::hex_decode(&input, &mut bytes).map_err(Error::custom)?;
910
911 self.0.0.iter_mut()
912 .rev()
913 .zip(bytes.chunks_exact(8))
914 .for_each(|(word, bytes)| *word = u64::from_be_bytes(bytes.try_into().unwrap()));
915
916 Ok(())
917 }
918 }
919 deserializer.deserialize_str(InPlaceVisitor(place))
920
921 } else {
922 use core::fmt;
923 use $crate::uint::serde::de::{Error, SeqAccess, Visitor};
924 struct InPlaceVisitor<'a>(&'a mut $name);
925
926 impl<'de, 'a> Visitor<'de> for InPlaceVisitor<'a> {
927 type Value = ();
928 #[inline]
929 fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
930 formatter.write_str(concat!("an integer with ", $n_words, " limbs"))
931 }
932 #[inline]
933 fn visit_seq<A: SeqAccess<'de>>(self, mut seq: A) -> Result<Self::Value, A::Error> {
934 for (idx, dest) in self.0 .0[..].iter_mut().enumerate() {
935 match seq.next_element()? {
936 Some(elem) => *dest = elem,
937 None => {
938 return Err(Error::invalid_length(idx, &self));
939 }
940 }
941 }
942 Ok(())
943 }
944 }
945 deserializer.deserialize_tuple(Self::LIMBS, InPlaceVisitor(place))
946 }
947 }
948
949 }
950
951 impl TryFrom<&$name> for js_sys::BigInt {
952 type Error = $crate::Error;
953 #[inline]
954 fn try_from(value: &$name) -> Result<js_sys::BigInt, Self::Error> {
955 use $crate::wasm::*;
956 BigInt::new(&JsValue::from_str(&format!("0x{value:x}"))).map_err(|err|$crate::Error::JsSys(Sendable(err)))
957 }
958 }
959
960 impl TryFrom<$name> for js_sys::BigInt {
961 type Error = $crate::Error;
962 #[inline]
963 fn try_from(value: $name) -> Result<js_sys::BigInt, Self::Error> {
964 use $crate::wasm::*;
965 BigInt::try_from(&value)
966 }
967 }
968
969 impl TryFrom<wasm_bindgen::JsValue> for $name {
970 type Error = $crate::Error;
971 fn try_from(js_value: wasm_bindgen::JsValue) -> Result<Self, Self::Error> {
972 use $crate::wasm::*;
973
974 if js_value.is_string() || js_value.is_array() {
975 let bytes = js_value.try_as_vec_u8()?;
976 Ok(Self::from_be_bytes_var(&bytes)?)
977 } else if js_value.is_bigint() {
978 let v: &BigInt = js_value.dyn_ref().unwrap();
979 let hex = String::from(v.to_string(16)?);
980 Ok(Self::from_hex(hex.as_str())?)
981 } else {
982 return Err(Self::Error::NotCompatible);
983 }
984 }
985 }
986
987 };
988}
989
990#[derive(Debug, Copy, Clone, PartialEq, Eq)]
992pub struct TryFromIntError;
993
994impl std::error::Error for TryFromIntError {}
995
996impl core::fmt::Display for TryFromIntError {
997 fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
998 "out of range integral type conversion attempted".fmt(fmt)
999 }
1000}
1001
1002impl From<core::convert::Infallible> for TryFromIntError {
1003 fn from(x: core::convert::Infallible) -> TryFromIntError {
1004 match x {}
1005 }
1006}
1007
1008#[derive(Debug, Copy, Clone, PartialEq, Eq)]
1010pub struct TryFromSliceError;
1011
1012impl std::error::Error for TryFromSliceError {}
1013
1014impl core::fmt::Display for TryFromSliceError {
1015 fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
1016 "conversion attempted from a slice too large".fmt(fmt)
1017 }
1018}
1019
1020impl From<core::convert::Infallible> for TryFromSliceError {
1021 fn from(x: core::convert::Infallible) -> TryFromSliceError {
1022 match x {}
1023 }
1024}
1025
1026#[cfg(test)]
1027mod tests {
1028 use rand_chacha::{
1029 rand_core::{RngCore, SeedableRng},
1030 ChaCha8Rng,
1031 };
1032 use std::fmt::Write;
1033 construct_uint!(Uint128, 2);
1034
1035 #[test]
1036 fn test_u128() {
1037 use core::fmt::Arguments;
1038 let mut fmt_buf = String::with_capacity(256);
1039 let mut fmt_buf2 = String::with_capacity(256);
1040 let mut assert_equal_args = |arg1: Arguments, arg2: Arguments| {
1041 fmt_buf.clear();
1042 fmt_buf2.clear();
1043 fmt_buf.write_fmt(arg1).unwrap();
1044 fmt_buf2.write_fmt(arg2).unwrap();
1045 assert_eq!(fmt_buf, fmt_buf2);
1046 };
1047 let mut assert_equal = |a: Uint128, b: u128, check_fmt: bool| {
1048 assert_eq!(a, b);
1049 assert_eq!(a.to_le_bytes(), b.to_le_bytes());
1050 if !check_fmt {
1051 return;
1052 }
1053
1054 assert_equal_args(format_args!("{a:}"), format_args!("{b:}"));
1055 assert_equal_args(format_args!("{a:b}"), format_args!("{b:b}")); assert_equal_args(format_args!("{a:#b}"), format_args!("{b:#b}")); assert_equal_args(format_args!("{a:0128b}"), format_args!("{b:0128b}")); assert_equal_args(format_args!("{a:x}"), format_args!("{b:x}")); assert_equal_args(format_args!("{a:#x}"), format_args!("{b:#x}")); assert_equal_args(format_args!("{a:032x}"), format_args!("{b:032x}"));
1062 };
1063 let mut rng = ChaCha8Rng::from_seed([0; 32]);
1064 let mut buf = [0u8; 16];
1065 let mut str_buf = String::with_capacity(32);
1066 for i in 0..80_000 {
1067 let check_fmt = i % 8 == 1;
1069 rng.fill_bytes(&mut buf);
1070 let mine = Uint128::from_le_bytes(buf);
1071 let default = u128::from_le_bytes(buf);
1072 rng.fill_bytes(&mut buf);
1073 let mine2 = Uint128::from_le_bytes(buf);
1074 let default2 = u128::from_le_bytes(buf);
1075 assert_equal(mine, default, check_fmt);
1076 assert_equal(mine2, default2, check_fmt);
1077
1078 let mine = mine.overflowing_add(mine2).0.overflowing_mul(mine2).0;
1079 let default = default.overflowing_add(default2).0.overflowing_mul(default2).0;
1080 assert_equal(mine, default, check_fmt);
1081 let shift = rng.next_u32() % 4096;
1082 {
1083 let mine_overflow_shl = mine.overflowing_shl(shift);
1084 let default_overflow_shl = default.overflowing_shl(shift);
1085 assert_equal(mine_overflow_shl.0, default_overflow_shl.0, check_fmt);
1086 assert_eq!(mine_overflow_shl.1, default_overflow_shl.1);
1087 }
1088 {
1089 let mine_overflow_shr = mine.overflowing_shl(shift);
1090 let default_overflow_shr = default.overflowing_shl(shift);
1091 assert_equal(mine_overflow_shr.0, default_overflow_shr.0, check_fmt);
1092 assert_eq!(mine_overflow_shr.1, default_overflow_shr.1);
1093 }
1094 {
1095 let mine_divrem = mine.div_rem(mine2);
1096 let default_divrem = (default / default2, default % default2);
1097 assert_equal(mine_divrem.0, default_divrem.0, check_fmt);
1098 assert_equal(mine_divrem.1, default_divrem.1, check_fmt);
1099 }
1100 {
1102 let mine_f64 = mine.as_f64();
1103 let default_f64 = default as f64;
1104 assert_eq!(mine_f64, default_f64);
1105 }
1106 {
1108 let rand_u64 = rng.next_u64();
1109 let mine_divrem = mine.div_rem_u64(rand_u64);
1110 let default_divrem = (default / u128::from(rand_u64), default % u128::from(rand_u64));
1111 assert_equal(mine_divrem.0, default_divrem.0, check_fmt);
1112 assert_eq!(mine_divrem.1, u64::try_from(default_divrem.1).unwrap());
1113 }
1114 {
1116 let rand_u64 = rng.next_u64();
1117 let mine_mult = mine.overflowing_mul_u64(rand_u64);
1118 let default_mult = default.overflowing_mul(rand_u64 as u128);
1119 assert_equal(mine_mult.0, default_mult.0, check_fmt);
1120 assert_eq!(mine_mult.1, default_mult.1);
1121 }
1122 {
1124 let rand_u64 = rng.next_u64();
1125 let mine_add = mine.overflowing_add_u64(rand_u64);
1126 let default_add = default.overflowing_add(rand_u64 as u128);
1127 assert_equal(mine_add.0, default_add.0, check_fmt);
1128 assert_eq!(mine_add.1, default_add.1);
1129 }
1130 {
1132 let mine_le = mine.to_le_bytes();
1133 let default_le = default.to_le_bytes();
1134 assert_eq!(mine_le, default_le);
1135 assert_eq!(mine, Uint128::from_le_bytes(mine_le));
1136 }
1137 {
1139 let mine_le = mine.to_be_bytes();
1140 let default_le = default.to_be_bytes();
1141 assert_eq!(mine_le, default_le);
1142 assert_eq!(mine, Uint128::from_be_bytes(mine_le));
1143 }
1144 if check_fmt {
1146 str_buf.clear();
1147 str_buf.write_fmt(format_args!("{mine:032x}")).unwrap();
1148 assert_eq!(mine, Uint128::from_hex(&str_buf).unwrap());
1149 }
1150 }
1151 }
1152
1153 #[test]
1154 fn test_mod_inv() {
1155 use core::cmp::Ordering;
1156 let mut rng = ChaCha8Rng::from_seed([0; 32]);
1157 let mut buf = [0u8; 16];
1158 for _ in 0..50_000 {
1159 rng.fill_bytes(&mut buf);
1160 let uint1 = Uint128::from_le_bytes(buf);
1161 rng.fill_bytes(&mut buf);
1162 let uint2 = Uint128::from_le_bytes(buf);
1163 let (bigger, smaller) = match uint1.cmp(&uint2) {
1164 Ordering::Greater => (uint1, uint2),
1165 Ordering::Less => (uint2, uint1),
1166 Ordering::Equal => continue,
1167 };
1168 let inv = smaller.mod_inverse(bigger);
1169 if let Some(inv) = inv {
1170 assert_eq!(prod_bin(inv, smaller, bigger), 1u64);
1171 }
1172 }
1173
1174 fn sum(x: Uint128, y: Uint128, m: Uint128) -> Uint128 {
1175 let res = x.overflowing_add(y).0;
1176 if res < x || res >= m {
1177 res.overflowing_sub(m).0
1178 } else {
1179 res
1180 }
1181 }
1182 fn prod_bin(x: Uint128, y: Uint128, m: Uint128) -> Uint128 {
1183 if y == 1u64 {
1184 return x;
1185 } else if y == 0u64 {
1186 return Uint128::ZERO;
1187 }
1188 let mut res = prod_bin(x, y >> 1, m);
1189 res = sum(res, res, m);
1190 if (y.as_u64() & 1) == 1 {
1191 res = sum(res, x, m);
1192 }
1193 res
1194 }
1195 }
1196}