1#![no_std]
11
12pub trait EintWideningMulU: Sized {
13 fn _widening_mul_u(self, other: Self) -> (Self, Self);
14}
15
16macro_rules! impl_widening_mul_u_wrap {
17 ($eint:ty, $wint:ty) => {
18 impl EintWideningMulU for $eint {
19 fn _widening_mul_u(self, other: Self) -> (Self, Self) {
20 let lh = (self.0 as $wint) * (other.0 as $wint);
21 let l = Self::from(lh);
22 let h = Self::from(lh >> Self::BITS);
23 (l, h)
24 }
25 }
26 };
27}
28
29macro_rules! impl_widening_mul_u_twin {
30 ($eint:ty, $size:expr) => {
31 impl EintWideningMulU for $eint {
32 fn _widening_mul_u(self, other: Self) -> (Self, Self) {
33 let mut lh = [0u64; $size * 2];
34 for i in 0..$size {
35 let mut c = 0u64;
36 for j in 0..$size {
37 let uv = self.0[j] as u128 * other.0[i] as u128 + lh[i + j] as u128 + c as u128;
38 lh[i + j] = uv as u64;
39 c = (uv >> 64) as u64;
40 }
41 lh[i + $size] = c;
42 }
43
44 let mut lo = [0u64; $size];
45 lo.copy_from_slice(&lh[0..$size]);
46 let mut hi = [0u64; $size];
47 hi.copy_from_slice(&lh[$size..$size * 2]);
48 (Self(lo), Self(hi))
49 }
50 }
51 };
52}
53
54pub trait Eint:
55 Clone
56 + Copy
57 + Default
58 + Eq
59 + From<bool>
60 + From<i8>
61 + From<i16>
62 + From<i32>
63 + From<i64>
64 + From<i128>
65 + From<u8>
66 + From<u16>
67 + From<u32>
68 + From<u64>
69 + From<u128>
70 + From<Self>
71 + PartialEq
72 + core::cmp::Ord
73 + core::cmp::PartialOrd
74 + core::fmt::Debug
75 + core::fmt::Display
76 + core::fmt::LowerHex
77 + core::ops::Add<Output = Self>
78 + core::ops::AddAssign
79 + core::ops::BitAnd<Output = Self>
80 + core::ops::BitAndAssign
81 + core::ops::BitOr<Output = Self>
82 + core::ops::BitOrAssign
83 + core::ops::BitXor<Output = Self>
84 + core::ops::BitXorAssign
85 + core::ops::Div<Output = Self>
86 + core::ops::DivAssign
87 + core::ops::Mul<Output = Self>
88 + core::ops::MulAssign
89 + core::ops::Neg<Output = Self>
90 + core::ops::Not
91 + core::ops::Rem<Output = Self>
92 + core::ops::RemAssign
93 + core::ops::Sub<Output = Self>
94 + core::ops::SubAssign
95 + core::ops::Shl<u32, Output = Self>
96 + core::ops::Shr<u32, Output = Self>
97 + EintWideningMulU
98{
99 const BITS: u32;
100 const MAX_S: Self;
101 const MAX_U: Self;
102 const MIN_S: Self;
103 const MIN_U: Self;
104 const ONE: Self;
105 const ZERO: Self;
106
107 fn average_add_s(self, other: Self) -> Self {
109 (self & other).wrapping_add((self ^ other).wrapping_sra(1))
110 }
111
112 fn average_add_u(self, other: Self) -> Self {
114 (self & other).wrapping_add((self ^ other).wrapping_shr(1))
115 }
116
117 fn average_sub_s(self, other: Self) -> Self {
119 let (lo, borrow) = self.overflowing_sub_u(other);
120 let hi_0 = if !self.is_negative() { Self::MIN_U } else { Self::MAX_U };
121 let hi_1 = if !other.is_negative() { Self::MIN_U } else { Self::MAX_U };
122 let hi = hi_0.wrapping_sub(hi_1).wrapping_sub(Self::from(borrow));
123 lo.wrapping_shr(1) | hi.wrapping_shl(1).wrapping_shl(Self::BITS - 2)
124 }
125
126 fn average_sub_u(self, other: Self) -> Self {
128 let (lo, borrow) = self.overflowing_sub_u(other);
129 if borrow {
130 (lo >> 1) | (Self::ONE << (Self::BITS - 1))
131 } else {
132 lo >> 1
133 }
134 }
135
136 fn bit(&self, n: u32) -> bool;
138
139 fn bit_clr(&mut self, n: u32);
141
142 fn bit_set(&mut self, n: u32);
144
145 fn clz(&self) -> u32;
147
148 fn cmp_s(&self, other: &Self) -> core::cmp::Ordering;
150
151 fn cmp_u(&self, other: &Self) -> core::cmp::Ordering;
153
154 fn cpop(&self) -> u32;
156
157 fn ctz(&self) -> u32;
159
160 fn get(mem: &[u8]) -> Self {
162 unsafe { core::ptr::read(mem.as_ptr() as *const _) }
163 }
164
165 fn hi(self) -> Self;
167
168 fn is_negative(&self) -> bool;
170
171 fn is_positive(&self) -> bool;
173
174 fn lo(self) -> Self;
176
177 fn lo_sext(self) -> Self;
179
180 fn overflowing_add_s(self, other: Self) -> (Self, bool);
182
183 fn overflowing_add_u(self, other: Self) -> (Self, bool);
185
186 fn overflowing_mul_s(self, other: Self) -> (Self, bool);
188
189 fn overflowing_mul_u(self, other: Self) -> (Self, bool);
191
192 fn overflowing_sub_s(self, other: Self) -> (Self, bool);
194
195 fn overflowing_sub_u(self, other: Self) -> (Self, bool);
197
198 fn put(&self, mem: &mut [u8]);
200
201 fn put_lo(&self, mem: &mut [u8]);
203
204 fn saturating_add_s(self, other: Self) -> (Self, bool) {
206 let r = self.wrapping_add(other);
207 if !(self ^ other).is_negative() {
208 if (r ^ self).is_negative() {
209 let r = if self.is_negative() { Self::MIN_S } else { Self::MAX_S };
210 return (r, true);
211 }
212 }
213 (r, false)
214 }
215
216 fn saturating_add_u(self, other: Self) -> (Self, bool) {
218 let (r, overflow) = self.overflowing_add_u(other);
219 if overflow {
220 (Self::MAX_U, overflow)
221 } else {
222 (r, overflow)
223 }
224 }
225
226 fn saturating_sub_s(self, other: Self) -> (Self, bool) {
228 let r = self.wrapping_sub(other);
229 if (self ^ other).is_negative() {
230 if (r ^ self).is_negative() {
231 let r = if self.is_negative() { Self::MIN_S } else { Self::MAX_S };
232 return (r, true);
233 }
234 }
235 (r, false)
236 }
237
238 fn saturating_sub_u(self, other: Self) -> (Self, bool) {
240 if self >= other {
241 (self.wrapping_sub(other), false)
242 } else {
243 (Self::MIN_U, true)
244 }
245 }
246
247 fn sext(self, other: u32) -> Self {
249 self.wrapping_shl(Self::BITS - other - 1).wrapping_sra(Self::BITS - other - 1)
250 }
251
252 fn u8(self) -> u8;
254
255 fn u16(self) -> u16;
257
258 fn u32(self) -> u32;
260
261 fn u64(self) -> u64;
263
264 fn widening_add_s(self, other: Self) -> (Self, Self) {
267 let hi_0 = if self.is_negative() { Self::MAX_U } else { Self::MIN_U };
268 let hi_1 = if other.is_negative() { Self::MAX_U } else { Self::MIN_U };
269 let (lo, carry) = self.overflowing_add_u(other);
270 let hi = hi_0.wrapping_add(hi_1).wrapping_add(Self::from(carry));
271 (lo, hi)
272 }
273
274 fn widening_add_u(self, other: Self) -> (Self, Self) {
277 let (lo, carry) = self.overflowing_add_u(other);
278 (lo, Self::from(carry))
279 }
280
281 fn widening_mul_s(self, other: Self) -> (Self, Self) {
286 let (lo, hi) = self.widening_mul_u(other);
287 let hi = hi
288 - if self.is_negative() { other } else { Self::MIN_U }
289 - if other.is_negative() { self } else { Self::MIN_U };
290 (lo, hi)
291 }
292
293 fn widening_mul_su(self, other: Self) -> (Self, Self) {
296 if !other.is_negative() {
297 self.widening_mul_s(other)
298 } else {
299 let (lo, hi) = self.widening_mul_s(other);
300 let hi = hi + self;
301 (lo, hi)
302 }
303 }
304
305 fn widening_mul_u(self, other: Self) -> (Self, Self) {
308 self._widening_mul_u(other)
309 }
310
311 fn widening_sub_s(self, other: Self) -> (Self, Self) {
314 let hi_0 = if self.is_negative() { Self::MAX_U } else { Self::MIN_U };
315 let hi_1 = if other.is_negative() { Self::MAX_U } else { Self::MIN_U };
316 let (lo, borrow) = self.overflowing_sub_u(other);
317 let hi = hi_0.wrapping_sub(hi_1).wrapping_sub(Self::from(borrow));
318 (lo, hi)
319 }
320
321 fn widening_sub_u(self, other: Self) -> (Self, Self) {
324 let (lo, borrow) = self.overflowing_sub_u(other);
325 (lo, if borrow { Self::MAX_U } else { Self::MIN_U })
326 }
327
328 fn wrapping_add(self, other: Self) -> Self;
330
331 fn wrapping_div_u(self, other: Self) -> Self;
334
335 fn wrapping_div_s(self, other: Self) -> Self;
339
340 fn wrapping_mul(self, other: Self) -> Self;
342
343 fn wrapping_rem_s(self, other: Self) -> Self;
347
348 fn wrapping_rem_u(self, other: Self) -> Self;
351
352 fn wrapping_shl(self, other: u32) -> Self;
355
356 fn wrapping_shr(self, other: u32) -> Self;
359
360 fn wrapping_sra(self, other: u32) -> Self;
362
363 fn wrapping_sub(self, other: Self) -> Self;
365
366 fn zext(self, other: u32) -> Self {
368 self.wrapping_shl(Self::BITS - other - 1).wrapping_shr(Self::BITS - other - 1)
369 }
370}
371
372macro_rules! construct_eint_wrap_from_uint {
373 ($name:ident, $uint:ty, $from:ty) => {
374 impl core::convert::From<$from> for $name {
375 fn from(small: $from) -> Self {
376 Self(small as $uint)
377 }
378 }
379 };
380}
381
382macro_rules! construct_eint_wrap {
383 ($name:ident, $uint:ty, $sint:ty, $fstring:expr) => {
384 #[derive(Copy, Clone, Default, PartialEq, Eq)]
385 pub struct $name(pub $uint);
386
387 construct_eint_wrap_from_uint!($name, $uint, bool);
388 construct_eint_wrap_from_uint!($name, $uint, i8);
389 construct_eint_wrap_from_uint!($name, $uint, i16);
390 construct_eint_wrap_from_uint!($name, $uint, i32);
391 construct_eint_wrap_from_uint!($name, $uint, i64);
392 construct_eint_wrap_from_uint!($name, $uint, i128);
393 construct_eint_wrap_from_uint!($name, $uint, u8);
394 construct_eint_wrap_from_uint!($name, $uint, u16);
395 construct_eint_wrap_from_uint!($name, $uint, u32);
396 construct_eint_wrap_from_uint!($name, $uint, u64);
397 construct_eint_wrap_from_uint!($name, $uint, u128);
398
399 impl core::cmp::Ord for $name {
400 fn cmp(&self, other: &Self) -> core::cmp::Ordering {
401 self.0.cmp(&other.0)
402 }
403 }
404
405 impl core::cmp::PartialOrd for $name {
406 fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
407 return self.0.partial_cmp(&other.0);
408 }
409 }
410
411 impl core::fmt::Debug for $name {
412 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
413 write!(f, "{:x}", self)
414 }
415 }
416
417 impl core::fmt::Display for $name {
418 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
419 write!(f, "{:x}", self)
420 }
421 }
422
423 impl core::fmt::LowerHex for $name {
424 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
425 write!(f, $fstring, self.0)
426 }
427 }
428
429 impl core::ops::Add for $name {
430 type Output = Self;
431 fn add(self, other: Self) -> Self {
432 Self(self.0.wrapping_add(other.0))
433 }
434 }
435
436 impl core::ops::AddAssign for $name {
437 fn add_assign(&mut self, other: Self) {
438 self.0 = self.0.wrapping_add(other.0)
439 }
440 }
441
442 impl core::ops::Div for $name {
443 type Output = Self;
444 fn div(self, other: Self) -> Self::Output {
445 if other.0 == 0 {
446 Self::MAX_U
447 } else {
448 Self(self.0.wrapping_div(other.0))
449 }
450 }
451 }
452
453 impl core::ops::DivAssign for $name {
454 fn div_assign(&mut self, other: Self) {
455 self.0 = if other.0 == 0 { <$uint>::MAX } else { self.0.wrapping_div(other.0) }
456 }
457 }
458
459 impl core::ops::BitAnd for $name {
460 type Output = Self;
461 fn bitand(self, other: Self) -> Self::Output {
462 Self(self.0 & other.0)
463 }
464 }
465
466 impl core::ops::BitAndAssign for $name {
467 fn bitand_assign(&mut self, other: Self) {
468 self.0 &= other.0
469 }
470 }
471
472 impl core::ops::BitOr for $name {
473 type Output = Self;
474 fn bitor(self, other: Self) -> Self::Output {
475 Self(self.0 | other.0)
476 }
477 }
478
479 impl core::ops::BitOrAssign for $name {
480 fn bitor_assign(&mut self, other: Self) {
481 self.0 |= other.0
482 }
483 }
484
485 impl core::ops::BitXor for $name {
486 type Output = Self;
487 fn bitxor(self, other: Self) -> Self::Output {
488 Self(self.0 ^ other.0)
489 }
490 }
491
492 impl core::ops::BitXorAssign for $name {
493 fn bitxor_assign(&mut self, other: Self) {
494 self.0 ^= other.0
495 }
496 }
497
498 impl core::ops::Mul for $name {
499 type Output = Self;
500 fn mul(self, other: Self) -> Self::Output {
501 Self(self.0.wrapping_mul(other.0))
502 }
503 }
504
505 impl core::ops::MulAssign for $name {
506 fn mul_assign(&mut self, other: Self) {
507 self.0 = self.0.wrapping_mul(other.0)
508 }
509 }
510
511 impl core::ops::Neg for $name {
512 type Output = Self;
513 fn neg(self) -> Self::Output {
514 Self((!self.0).wrapping_add(1))
515 }
516 }
517
518 impl core::ops::Not for $name {
519 type Output = Self;
520 fn not(self) -> Self::Output {
521 Self(!self.0)
522 }
523 }
524
525 impl core::ops::Rem for $name {
526 type Output = Self;
527 fn rem(self, other: Self) -> Self::Output {
528 if other.0 == 0 {
529 self
530 } else {
531 Self(self.0.wrapping_rem(other.0))
532 }
533 }
534 }
535
536 impl core::ops::RemAssign for $name {
537 fn rem_assign(&mut self, other: Self) {
538 self.0 = if other.0 == 0 { self.0 } else { self.0.wrapping_rem(other.0) }
539 }
540 }
541
542 impl core::ops::Shl<u32> for $name {
543 type Output = Self;
544 fn shl(self, other: u32) -> Self::Output {
545 Self(self.0.wrapping_shl(other))
546 }
547 }
548
549 impl core::ops::Shr<u32> for $name {
550 type Output = Self;
551 fn shr(self, other: u32) -> Self::Output {
552 Self(self.0.wrapping_shr(other))
553 }
554 }
555
556 impl core::ops::Sub for $name {
557 type Output = Self;
558 fn sub(self, other: Self) -> Self::Output {
559 Self(self.0.wrapping_sub(other.0))
560 }
561 }
562
563 impl core::ops::SubAssign for $name {
564 fn sub_assign(&mut self, other: Self) {
565 self.0 = self.0.wrapping_sub(other.0)
566 }
567 }
568
569 impl Eint for $name {
570 const BITS: u32 = <$uint>::MIN.leading_zeros();
571 const MIN_U: Self = Self(0);
572 const MAX_U: Self = Self(<$uint>::MAX);
573 const MIN_S: Self = Self(<$sint>::MIN as $uint);
574 const MAX_S: Self = Self(<$sint>::MAX as $uint);
575 const ONE: Self = Self(1);
576 const ZERO: Self = Self(0);
577
578 fn bit(&self, n: u32) -> bool {
579 self.0.wrapping_shr(n) & 1 != 0
580 }
581
582 fn bit_clr(&mut self, n: u32) {
583 self.0 &= !<$name>::ONE.0.wrapping_shl(n)
584 }
585
586 fn bit_set(&mut self, n: u32) {
587 self.0 |= <$name>::ONE.0.wrapping_shl(n)
588 }
589
590 fn clz(&self) -> u32 {
591 self.0.leading_zeros()
592 }
593
594 fn cmp_s(&self, other: &Self) -> core::cmp::Ordering {
595 (self.0 as $sint).cmp(&(other.0 as $sint))
596 }
597
598 fn cmp_u(&self, other: &Self) -> core::cmp::Ordering {
599 self.0.cmp(&other.0)
600 }
601
602 fn cpop(&self) -> u32 {
603 self.0.count_ones()
604 }
605
606 fn ctz(&self) -> u32 {
607 self.0.trailing_zeros()
608 }
609
610 fn get(mem: &[u8]) -> Self {
611 let mut buf = [0u8; Self::BITS as usize >> 3];
612 buf.copy_from_slice(&mem);
613 Self(<$uint>::from_le_bytes(buf))
614 }
615
616 fn hi(self) -> Self {
617 self >> (Self::BITS >> 1)
618 }
619
620 fn is_negative(&self) -> bool {
621 (self.0 as $sint).is_negative()
622 }
623
624 fn is_positive(&self) -> bool {
625 (self.0 as $sint).is_positive()
626 }
627
628 fn lo(self) -> Self {
629 self & (Self::MAX_U >> (Self::BITS >> 1))
630 }
631
632 fn lo_sext(self) -> Self {
633 self.sext((Self::BITS >> 1) - 1)
634 }
635
636 fn overflowing_add_s(self, other: Self) -> (Self, bool) {
637 let (r, carry) = (self.0 as $sint).overflowing_add(other.0 as $sint);
638 (Self(r as $uint), carry)
639 }
640
641 fn overflowing_add_u(self, other: Self) -> (Self, bool) {
642 let (r, carry) = self.0.overflowing_add(other.0);
643 (Self(r), carry)
644 }
645
646 fn overflowing_mul_s(self, other: Self) -> (Self, bool) {
647 let (r, carry) = (self.0 as $sint).overflowing_mul(other.0 as $sint);
648 (Self(r as $uint), carry)
649 }
650
651 fn overflowing_mul_u(self, other: Self) -> (Self, bool) {
652 let (r, carry) = self.0.overflowing_mul(other.0);
653 (Self(r), carry)
654 }
655
656 fn overflowing_sub_s(self, other: Self) -> (Self, bool) {
657 let (r, borrow) = (self.0 as $sint).overflowing_sub(other.0 as $sint);
658 (Self(r as $uint), borrow)
659 }
660
661 fn overflowing_sub_u(self, other: Self) -> (Self, bool) {
662 let (r, borrow) = self.0.overflowing_sub(other.0);
663 (Self(r), borrow)
664 }
665
666 fn put(&self, mem: &mut [u8]) {
667 let buf = self.0.to_le_bytes();
668 mem.copy_from_slice(&buf);
669 }
670
671 fn put_lo(&self, mem: &mut [u8]) {
672 let buf = self.0.to_le_bytes();
673 if Self::BITS == 8 {
674 mem[0] = buf[0] & 0x0f
675 } else {
676 mem.copy_from_slice(&buf[0..buf.len() >> 1]);
677 }
678 }
679
680 fn u8(self) -> u8 {
681 self.0 as u8
682 }
683
684 fn u16(self) -> u16 {
685 self.0 as u16
686 }
687
688 fn u32(self) -> u32 {
689 self.0 as u32
690 }
691
692 fn u64(self) -> u64 {
693 self.0 as u64
694 }
695
696 fn wrapping_add(self, other: Self) -> Self {
697 Self(self.0.wrapping_add(other.0))
698 }
699
700 fn wrapping_div_s(self, other: Self) -> Self {
701 if other.0 == 0 {
702 Self::MAX_U
703 } else if self == Self::MIN_S && other == Self::MAX_U {
704 Self::MIN_S
705 } else {
706 Self((self.0 as $sint).wrapping_div(other.0 as $sint) as $uint)
707 }
708 }
709
710 fn wrapping_div_u(self, other: Self) -> Self {
711 if other.0 == 0 {
712 Self::MAX_U
713 } else {
714 Self(self.0.wrapping_div(other.0))
715 }
716 }
717
718 fn wrapping_mul(self, other: Self) -> Self {
719 Self(self.0.wrapping_mul(other.0))
720 }
721
722 fn wrapping_rem_s(self, other: Self) -> Self {
723 if other.0 == 0 {
724 self
725 } else if self.0 == 1 << (Self::BITS - 1) && other == Self::MAX_U {
726 Self::MIN_U
727 } else {
728 Self((self.0 as $sint).wrapping_rem(other.0 as $sint) as $uint)
729 }
730 }
731
732 fn wrapping_rem_u(self, other: Self) -> Self {
733 if other.0 == 0 {
734 self
735 } else {
736 Self(self.0.wrapping_rem(other.0))
737 }
738 }
739
740 fn wrapping_shl(self, other: u32) -> Self {
741 Self(self.0.wrapping_shl(other))
742 }
743
744 fn wrapping_shr(self, other: u32) -> Self {
745 Self(self.0.wrapping_shr(other))
746 }
747
748 fn wrapping_sra(self, other: u32) -> Self {
749 Self((self.0 as $sint).wrapping_shr(other) as $uint)
750 }
751
752 fn wrapping_sub(self, other: Self) -> Self {
753 Self(self.0.wrapping_sub(other.0))
754 }
755 }
756 };
757}
758
759macro_rules! uint_wrap_from_impl {
760 ($name:ty, $from:ty) => {
761 impl From<$from> for $name {
762 fn from(small: $from) -> Self {
763 Self::from(small.0)
764 }
765 }
766 };
767}
768
769construct_eint_wrap!(E8, u8, i8, "{:02x}");
770construct_eint_wrap!(E16, u16, i16, "{:04x}");
771construct_eint_wrap!(E32, u32, i32, "{:08x}");
772construct_eint_wrap!(E64, u64, i64, "{:016x}");
773construct_eint_wrap!(E128, u128, i128, "{:032x}");
774impl_widening_mul_u_wrap!(E8, u16);
775impl_widening_mul_u_wrap!(E16, u32);
776impl_widening_mul_u_wrap!(E32, u64);
777impl_widening_mul_u_wrap!(E64, u128);
778uint_wrap_from_impl!(E16, E8);
779uint_wrap_from_impl!(E32, E8);
780uint_wrap_from_impl!(E32, E16);
781uint_wrap_from_impl!(E64, E8);
782uint_wrap_from_impl!(E64, E16);
783uint_wrap_from_impl!(E64, E32);
784uint_wrap_from_impl!(E128, E8);
785uint_wrap_from_impl!(E128, E16);
786uint_wrap_from_impl!(E128, E32);
787uint_wrap_from_impl!(E128, E64);
788
789impl EintWideningMulU for E128 {
790 fn _widening_mul_u(self, other: Self) -> (Self, Self) {
791 let x0 = self.lo();
792 let x1 = self.hi();
793 let y0 = other.lo();
794 let y1 = other.hi();
795 let w0 = x0.wrapping_mul(y0);
796 let t = x1.wrapping_mul(y0).wrapping_add(w0.hi());
797 let w1 = t.lo();
798 let w2 = t.hi();
799 let w1 = x0.wrapping_mul(y1).wrapping_add(w1);
800 let hi = x1.wrapping_mul(y1).wrapping_add(w2).wrapping_add(w1.hi());
801 let lo = self.wrapping_mul(other);
802 (lo, hi)
803 }
804}
805
806macro_rules! construct_eint_twin_from_uint {
807 ($name:ident, $from:ty) => {
808 impl core::convert::From<$from> for $name {
809 fn from(small: $from) -> Self {
810 let mut b = [u64::MIN; Self::BITS as usize >> 6];
811 unsafe {
812 core::ptr::copy_nonoverlapping(
813 &small as *const $from as *const u8,
814 b.as_mut_ptr() as *mut u8,
815 Self::BITS as usize >> 3,
816 );
817 }
818 Self(b)
819 }
820 }
821 };
822}
823
824macro_rules! construct_eint_twin_from_sint {
825 ($name:ident, $from:ty) => {
826 impl core::convert::From<$from> for $name {
827 fn from(small: $from) -> Self {
828 let mut b = if small >= 0 {
829 [u64::MIN; Self::BITS as usize >> 6]
830 } else {
831 [u64::MAX; Self::BITS as usize >> 6]
832 };
833 unsafe {
834 core::ptr::copy_nonoverlapping(
835 &small as *const $from as *const u8,
836 b.as_mut_ptr() as *mut u8,
837 Self::BITS as usize >> 3,
838 );
839 }
840 Self(b)
841 }
842 }
843 };
844}
845
846macro_rules! construct_eint_twin {
847 ($name:ident, $size:expr) => {
848 #[derive(Copy, Clone, Default, PartialEq, Eq)]
849 pub struct $name(pub [u64; $size]);
850
851 impl core::convert::From<bool> for $name {
852 fn from(small: bool) -> Self {
853 if small {
854 Self::ONE
855 } else {
856 Self::MIN_U
857 }
858 }
859 }
860
861 construct_eint_twin_from_sint!($name, i8);
862 construct_eint_twin_from_sint!($name, i16);
863 construct_eint_twin_from_sint!($name, i32);
864 construct_eint_twin_from_sint!($name, i64);
865 construct_eint_twin_from_sint!($name, i128);
866 construct_eint_twin_from_uint!($name, u8);
867 construct_eint_twin_from_uint!($name, u16);
868 construct_eint_twin_from_uint!($name, u32);
869 construct_eint_twin_from_uint!($name, u64);
870 construct_eint_twin_from_uint!($name, u128);
871
872 impl core::cmp::PartialOrd for $name {
873 fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
874 Some(self.cmp(other))
875 }
876 }
877
878 impl core::cmp::Ord for $name {
879 fn cmp(&self, other: &Self) -> core::cmp::Ordering {
880 self.cmp_u(other)
881 }
882 }
883
884 impl core::fmt::Debug for $name {
885 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
886 write!(f, "{:x}", self)
887 }
888 }
889
890 impl core::fmt::Display for $name {
891 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
892 write!(f, "{:x}", self)
893 }
894 }
895
896 impl core::fmt::LowerHex for $name {
897 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
898 for i in self.0.iter().rev() {
899 write!(f, "{:016x}", i)?
900 }
901 Ok(())
902 }
903 }
904
905 impl core::ops::Add for $name {
906 type Output = Self;
907 fn add(self, other: Self) -> Self {
908 self.wrapping_add(other)
909 }
910 }
911
912 impl core::ops::AddAssign for $name {
913 fn add_assign(&mut self, other: Self) {
914 *self = self.wrapping_add(other)
915 }
916 }
917
918 impl core::ops::BitAnd for $name {
919 type Output = Self;
920 fn bitand(self, other: Self) -> Self::Output {
921 let mut b = [0u64; $size];
922 for i in 0..$size {
923 b[i] = self.0[i] & other.0[i];
924 }
925 Self(b)
926 }
927 }
928
929 impl core::ops::BitAndAssign for $name {
930 fn bitand_assign(&mut self, other: Self) {
931 for i in 0..$size {
932 self.0[i] &= other.0[i];
933 }
934 }
935 }
936
937 impl core::ops::BitOr for $name {
938 type Output = Self;
939 fn bitor(self, other: Self) -> Self::Output {
940 let mut b = [0u64; $size];
941 for i in 0..$size {
942 b[i] = self.0[i] | other.0[i];
943 }
944 Self(b)
945 }
946 }
947
948 impl core::ops::BitOrAssign for $name {
949 fn bitor_assign(&mut self, other: Self) {
950 for i in 0..$size {
951 self.0[i] |= other.0[i];
952 }
953 }
954 }
955
956 impl core::ops::BitXor for $name {
957 type Output = Self;
958 fn bitxor(self, other: Self) -> Self::Output {
959 let mut b = [0u64; $size];
960 for i in 0..$size {
961 b[i] = self.0[i] ^ other.0[i];
962 }
963 Self(b)
964 }
965 }
966
967 impl core::ops::BitXorAssign for $name {
968 fn bitxor_assign(&mut self, other: Self) {
969 for i in 0..$size {
970 self.0[i] ^= other.0[i];
971 }
972 }
973 }
974
975 impl core::ops::Div for $name {
976 type Output = Self;
977 fn div(self, other: Self) -> Self::Output {
978 self.wrapping_div_u(other)
979 }
980 }
981
982 impl core::ops::DivAssign for $name {
983 fn div_assign(&mut self, other: Self) {
984 *self = self.wrapping_div_u(other)
985 }
986 }
987
988 impl core::ops::Mul for $name {
989 type Output = Self;
990 fn mul(self, other: Self) -> Self::Output {
991 self.wrapping_mul(other)
992 }
993 }
994
995 impl core::ops::MulAssign for $name {
996 fn mul_assign(&mut self, other: Self) {
997 *self = self.wrapping_mul(other)
998 }
999 }
1000
1001 impl core::ops::Neg for $name {
1002 type Output = Self;
1003 fn neg(self) -> Self::Output {
1004 (!self).wrapping_add(<$name>::ONE)
1005 }
1006 }
1007
1008 impl core::ops::Not for $name {
1009 type Output = Self;
1010 fn not(self) -> Self::Output {
1011 let mut b = [0u64; $size];
1012 for i in 0..$size {
1013 b[i] = !self.0[i];
1014 }
1015 Self(b)
1016 }
1017 }
1018
1019 impl core::ops::Rem for $name {
1020 type Output = Self;
1021 fn rem(self, other: Self) -> Self::Output {
1022 self.wrapping_rem_u(other)
1023 }
1024 }
1025
1026 impl core::ops::RemAssign for $name {
1027 fn rem_assign(&mut self, other: Self) {
1028 *self = self.wrapping_rem_u(other);
1029 }
1030 }
1031
1032 impl core::ops::Shl<u32> for $name {
1033 type Output = Self;
1034 fn shl(self, other: u32) -> Self::Output {
1035 self.wrapping_shl(other)
1036 }
1037 }
1038
1039 impl core::ops::Shr<u32> for $name {
1040 type Output = Self;
1041 fn shr(self, other: u32) -> Self::Output {
1042 self.wrapping_shr(other)
1043 }
1044 }
1045
1046 impl core::ops::Sub for $name {
1047 type Output = Self;
1048 fn sub(self, other: Self) -> Self::Output {
1049 self.wrapping_sub(other)
1050 }
1051 }
1052
1053 impl core::ops::SubAssign for $name {
1054 fn sub_assign(&mut self, other: Self) {
1055 *self = self.wrapping_sub(other)
1056 }
1057 }
1058
1059 impl Eint for $name {
1060 const BITS: u32 = $size * 64;
1061 const MAX_S: Self = {
1062 let mut b = [u64::MAX; $size as usize];
1063 b[$size as usize - 1] = i64::MAX as u64;
1064 Self(b)
1065 };
1066 const MAX_U: Self = Self([u64::MAX; $size]);
1067 const MIN_S: Self = {
1068 let mut b = [u64::MIN; $size as usize];
1069 b[$size as usize - 1] = i64::MIN as u64;
1070 Self(b)
1071 };
1072 const MIN_U: Self = Self([u64::MIN; $size]);
1073 const ONE: Self = {
1074 let mut b = [u64::MIN; $size as usize];
1075 b[0] = 1;
1076 Self(b)
1077 };
1078 const ZERO: Self = Self([u64::MIN; $size]);
1079
1080 fn bit(&self, n: u32) -> bool {
1081 let n = n % Self::BITS;
1082 self.0[n as usize / 64] & (1 << (n % 64)) != 0
1083 }
1084
1085 fn bit_clr(&mut self, n: u32) {
1086 let n = n % Self::BITS;
1087 self.0[n as usize / 64] &= !(1 << (n % 64))
1088 }
1089
1090 fn bit_set(&mut self, n: u32) {
1091 let n = n % Self::BITS;
1092 self.0[n as usize / 64] |= 1 << (n % 64)
1093 }
1094
1095 fn clz(&self) -> u32 {
1096 let mut r = 0;
1097 for i in 0..$size {
1098 let w = self.0[$size - i - 1];
1099 if w == 0 {
1100 r += 64;
1101 } else {
1102 r += w.leading_zeros();
1103 break;
1104 }
1105 }
1106 r
1107 }
1108
1109 fn cmp_s(&self, other: &Self) -> core::cmp::Ordering {
1110 let l_sign = self.is_negative();
1111 let r_sign = other.is_negative();
1112 match (l_sign, r_sign) {
1113 (false, false) => self.cmp(&other),
1114 (false, true) => core::cmp::Ordering::Greater,
1115 (true, false) => core::cmp::Ordering::Less,
1116 (true, true) => self.cmp(&other),
1117 }
1118 }
1119
1120 fn cmp_u(&self, other: &Self) -> core::cmp::Ordering {
1121 self.0.iter().rev().cmp(other.0.iter().rev())
1122 }
1123
1124 fn cpop(&self) -> u32 {
1125 let mut r = 0;
1126 for i in 0..$size {
1127 r += self.0[i].count_ones();
1128 }
1129 r
1130 }
1131
1132 fn ctz(&self) -> u32 {
1133 let mut r = 0;
1134 for i in 0..$size {
1135 let w = self.0[i];
1136 if w == 0 {
1137 r += 64;
1138 } else {
1139 r += w.trailing_zeros();
1140 break;
1141 }
1142 }
1143 r
1144 }
1145
1146 fn get(mem: &[u8]) -> Self {
1147 let mut b = [0u64; $size];
1148 unsafe {
1149 core::ptr::copy_nonoverlapping(
1150 mem.as_ptr() as *const u8,
1151 b.as_mut_ptr() as *mut u8,
1152 Self::BITS as usize >> 3,
1153 );
1154 }
1155 Self(b)
1156 }
1157
1158 fn hi(self) -> Self {
1159 let mut b = [0u64; $size];
1160 b[0..$size / 2].copy_from_slice(&self.0[$size / 2..$size]);
1161 Self(b)
1162 }
1163
1164 fn is_negative(&self) -> bool {
1165 (self.0[$size - 1] as i64).is_negative()
1166 }
1167
1168 fn is_positive(&self) -> bool {
1169 (self.0[$size - 1] as i64).is_positive()
1170 }
1171
1172 fn lo(self) -> Self {
1173 let mut b = [0u64; $size];
1174 b[0..$size / 2].copy_from_slice(&self.0[0..$size / 2]);
1175 Self(b)
1176 }
1177
1178 fn lo_sext(self) -> Self {
1179 if (self.0[$size / 2 - 1] as i64).is_negative() {
1180 let mut b = Self::MAX_U.0;
1181 b[0..$size / 2].copy_from_slice(&self.0[0..$size / 2]);
1182 Self(b)
1183 } else {
1184 self
1185 }
1186 }
1187
1188 fn overflowing_add_s(self, other: Self) -> (Self, bool) {
1189 let r = self.wrapping_add(other);
1190 if self.is_negative() == other.is_negative() {
1191 (r, r.is_negative() != self.is_negative())
1192 } else {
1193 (r, false)
1194 }
1195 }
1196
1197 fn overflowing_add_u(self, other: Self) -> (Self, bool) {
1198 let mut b = [0u64; $size];
1199 let mut carry = false;
1200 for i in 0..$size {
1201 let (r0, carry0) = self.0[i].overflowing_add(other.0[i]);
1202 let (r1, carry1) = r0.overflowing_add(carry as u64);
1203 b[i] = r1;
1204 carry = carry0 | carry1
1205 }
1206 (Self(b), carry)
1207 }
1208
1209 fn overflowing_mul_s(self, other: Self) -> (Self, bool) {
1210 let (lo, hi) = self.widening_mul_s(other);
1211 if !hi.is_negative() {
1212 if hi != Self::MIN_U || lo.is_negative() {
1213 return (lo, true);
1214 } else {
1215 return (lo, false);
1216 }
1217 } else {
1218 if hi != Self::MAX_U || lo < Self::MIN_S {
1219 return (lo, true);
1220 } else {
1221 return (lo, false);
1222 }
1223 }
1224 }
1225
1226 fn overflowing_mul_u(self, other: Self) -> (Self, bool) {
1227 let (lo, hi) = self.widening_mul_u(other);
1228 (lo, hi != Self::ZERO)
1229 }
1230
1231 fn overflowing_sub_s(self, other: Self) -> (Self, bool) {
1232 let r = self.wrapping_sub(other);
1233 if self.is_negative() == other.is_negative() {
1234 (r, false)
1235 } else {
1236 (r, r.is_negative() != self.is_negative())
1237 }
1238 }
1239
1240 fn overflowing_sub_u(self, other: Self) -> (Self, bool) {
1241 let mut b = [0u64; $size];
1242 let mut borrow = false;
1243 for i in 0..$size {
1244 let (r0, borrow0) = self.0[i].overflowing_sub(other.0[i]);
1245 let (r1, borrow1) = r0.overflowing_sub(borrow as u64);
1246 b[i] = r1;
1247 borrow = borrow0 | borrow1
1248 }
1249 (Self(b), borrow)
1250 }
1251
1252 fn put(&self, mem: &mut [u8]) {
1253 unsafe {
1254 core::ptr::copy_nonoverlapping(
1255 self.0.as_ptr() as *const u8,
1256 mem.as_mut_ptr(),
1257 Self::BITS as usize >> 3,
1258 );
1259 }
1260 }
1261
1262 fn put_lo(&self, mem: &mut [u8]) {
1263 unsafe {
1264 core::ptr::copy_nonoverlapping(
1265 self.0.as_ptr() as *const u8,
1266 mem.as_mut_ptr(),
1267 Self::BITS as usize >> 4,
1268 );
1269 }
1270 }
1271
1272 fn u8(self) -> u8 {
1273 self.0[0] as u8
1274 }
1275
1276 fn u16(self) -> u16 {
1277 self.0[0] as u16
1278 }
1279
1280 fn u32(self) -> u32 {
1281 self.0[0] as u32
1282 }
1283
1284 fn u64(self) -> u64 {
1285 self.0[0]
1286 }
1287
1288 fn wrapping_add(self, other: Self) -> Self {
1289 let mut b = [0u64; $size];
1290 let mut carry = false;
1291 for i in 0..$size {
1292 let (r0, carry0) = self.0[i].overflowing_add(other.0[i]);
1293 let (r1, carry1) = r0.overflowing_add(carry as u64);
1294 b[i] = r1;
1295 carry = carry0 | carry1
1296 }
1297 Self(b)
1298 }
1299
1300 fn wrapping_div_s(self, other: Self) -> Self {
1301 if other == Self::MIN_U {
1302 Self::MAX_U
1303 } else if self == Self::MIN_S && other == Self::MAX_U {
1304 Self::MIN_S
1305 } else {
1306 self.div_s(other).0
1307 }
1308 }
1309
1310 fn wrapping_div_u(self, other: Self) -> Self {
1311 if other == Self::MIN_U {
1312 Self::MAX_U
1313 } else {
1314 self.div_u(other).0
1315 }
1316 }
1317
1318 fn wrapping_mul(self, other: Self) -> Self {
1319 let mut b = [0u64; $size];
1320 for i in 0..$size {
1321 let mut c = 0u64;
1322 let inner_count = $size - i;
1323 for j in 0..inner_count {
1324 let uv: u128 = (self.0[j] as u128) * other.0[i] as u128 + b[i + j] as u128 + c as u128;
1325 b[i + j] = uv as u64;
1326 c = (uv >> 64) as u64;
1327 }
1328 if ((i + inner_count) < $size) {
1329 b[i + inner_count] = c;
1330 }
1331 }
1332 Self(b)
1333 }
1334
1335 fn wrapping_rem_s(self, other: Self) -> Self {
1336 let minus_min = Self::ONE << (Self::BITS - 1);
1337 let minus_one = Self::MAX_U;
1338 if other == Self::MIN_U {
1339 self
1340 } else if self == minus_min && other == minus_one {
1341 Self::MIN_U
1342 } else {
1343 self.div_s(other).1
1344 }
1345 }
1346
1347 fn wrapping_rem_u(self, other: Self) -> Self {
1348 if other == Self::MIN_U {
1349 self
1350 } else {
1351 self.div_u(other).1
1352 }
1353 }
1354
1355 fn wrapping_shl(self, other: u32) -> Self {
1356 let shamt = other % Self::BITS;
1357 let mut b = [0u64; $size];
1358 let elem_shift = shamt as usize / 64;
1359 let bits_shift = shamt as usize % 64;
1360 for i in elem_shift..$size {
1361 b[i] = self.0[i - elem_shift] << bits_shift;
1362 }
1363 if bits_shift != 0 {
1364 for i in elem_shift + 1..$size {
1365 b[i] += self.0[i - 1 - elem_shift] >> (64 - bits_shift);
1366 }
1367 }
1368 Self(b)
1369 }
1370
1371 fn wrapping_shr(self, other: u32) -> Self {
1372 let shamt = other % Self::BITS;
1373 let mut b = [0u64; $size];
1374 let elem_shift = shamt as usize / 64;
1375 let bits_shift = shamt as usize % 64;
1376 for i in elem_shift..$size {
1377 b[i - elem_shift] = self.0[i] >> bits_shift;
1378 }
1379 if bits_shift != 0 {
1380 for i in elem_shift + 1..$size {
1381 b[i - elem_shift - 1] += self.0[i] << (64 - bits_shift);
1382 }
1383 }
1384 Self(b)
1385 }
1386
1387 fn wrapping_sra(self, other: u32) -> Self {
1388 let shamt = other % Self::BITS;
1389 let hi =
1390 if self.is_negative() && shamt != 0 { Self::MAX_U << (Self::BITS - shamt) } else { Self::MIN_U };
1391 let lo = self.wrapping_shr(shamt);
1392 hi | lo
1393 }
1394
1395 fn wrapping_sub(self, other: Self) -> Self {
1396 let mut b = [0u64; $size];
1397 let mut borrow = false;
1398 for i in 0..$size {
1399 let (r0, borrow0) = self.0[i].overflowing_sub(other.0[i]);
1400 let (r1, borrow1) = r0.overflowing_sub(borrow as u64);
1401 b[i] = r1;
1402 borrow = borrow0 | borrow1
1403 }
1404 Self(b)
1405 }
1406 }
1407
1408 impl $name {
1409 fn div_s(self, other: Self) -> (Self, Self) {
1410 let x = self;
1411 let y = other;
1412 let x_is_neg = x.is_negative();
1413 let y_is_neg = y.is_negative();
1414 let x_abs = if x_is_neg { -x } else { x };
1415 let y_abs = if y_is_neg { -y } else { y };
1416 let q_is_neg = x_is_neg ^ y_is_neg;
1417 let r = x_abs.div_u(y_abs);
1418 let quo = r.0;
1419 let rem = r.1;
1420 let quo = Self::from(if q_is_neg { -quo } else { quo });
1421 let rem = Self::from(if x_is_neg { -rem } else { rem });
1422 (quo, rem)
1423 }
1424 }
1425 };
1426}
1427
1428macro_rules! uint_twin_from_impl {
1429 ($name:ident, $from:ty) => {
1430 impl core::convert::From<$from> for $name {
1431 fn from(small: $from) -> Self {
1432 let mut b = [0u64; Self::BITS as usize >> 6];
1433 unsafe {
1434 core::ptr::copy_nonoverlapping(
1435 &small as *const $from as *const u8,
1436 b.as_mut_ptr() as *mut u8,
1437 <$from>::BITS as usize >> 3,
1438 );
1439 }
1440 Self(b)
1441 }
1442 }
1443 };
1444}
1445
1446construct_eint_twin!(E256, 4);
1447construct_eint_twin!(E512, 8);
1448construct_eint_twin!(E1024, 16);
1449construct_eint_twin!(E2048, 32);
1450impl_widening_mul_u_twin!(E256, 4);
1451impl_widening_mul_u_twin!(E512, 8);
1452impl_widening_mul_u_twin!(E1024, 16);
1453impl_widening_mul_u_twin!(E2048, 32);
1454uint_twin_from_impl!(E256, E8);
1455uint_twin_from_impl!(E256, E16);
1456uint_twin_from_impl!(E256, E32);
1457uint_twin_from_impl!(E256, E64);
1458uint_twin_from_impl!(E256, E128);
1459uint_twin_from_impl!(E512, E8);
1460uint_twin_from_impl!(E512, E16);
1461uint_twin_from_impl!(E512, E32);
1462uint_twin_from_impl!(E512, E64);
1463uint_twin_from_impl!(E512, E128);
1464uint_twin_from_impl!(E512, E256);
1465uint_twin_from_impl!(E1024, E8);
1466uint_twin_from_impl!(E1024, E16);
1467uint_twin_from_impl!(E1024, E32);
1468uint_twin_from_impl!(E1024, E64);
1469uint_twin_from_impl!(E1024, E128);
1470uint_twin_from_impl!(E1024, E256);
1471uint_twin_from_impl!(E1024, E512);
1472uint_twin_from_impl!(E2048, E8);
1473uint_twin_from_impl!(E2048, E16);
1474uint_twin_from_impl!(E2048, E32);
1475uint_twin_from_impl!(E2048, E64);
1476uint_twin_from_impl!(E2048, E128);
1477uint_twin_from_impl!(E2048, E256);
1478uint_twin_from_impl!(E2048, E512);
1479uint_twin_from_impl!(E2048, E1024);
1480
1481use uint::construct_uint;
1482
1483construct_uint! { struct U256(4); }
1484construct_uint! { struct U512(8); }
1485construct_uint! { struct U1024(16); }
1486construct_uint! { struct U2048(32); }
1487
1488impl E256 {
1489 fn div_u(self, other: Self) -> (Self, Self) {
1490 let (quo, rem) = U256(self.0).div_mod(U256(other.0));
1491 (Self(quo.0), Self(rem.0))
1492 }
1493}
1494
1495impl E512 {
1496 fn div_u(self, other: Self) -> (Self, Self) {
1497 let (quo, rem) = U512(self.0).div_mod(U512(other.0));
1498 (Self(quo.0), Self(rem.0))
1499 }
1500}
1501
1502impl E1024 {
1503 fn div_u(self, other: Self) -> (Self, Self) {
1504 let (quo, rem) = U1024(self.0).div_mod(U1024(other.0));
1505 (Self(quo.0), Self(rem.0))
1506 }
1507}
1508
1509impl E2048 {
1510 fn div_u(self, other: Self) -> (Self, Self) {
1511 let (quo, rem) = U2048(self.0).div_mod(U2048(other.0));
1512 (Self(quo.0), Self(rem.0))
1513 }
1514}