1use crate::logic::*;
2use crate::{CLog2, SafeDivCeil};
3use itertools::izip;
4use std::num::NonZeroU8;
5use std::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign};
6
7#[derive(Debug, Clone, Copy, PartialEq, Eq)]
8pub(super) enum OpResult {
9 Unchanged,
10 Changed,
11}
12
13impl BitAnd for OpResult {
14 type Output = Self;
15
16 #[inline]
17 fn bitand(self, rhs: Self) -> Self::Output {
18 match (self, rhs) {
19 (Self::Changed, Self::Changed) => Self::Changed,
20 _ => Self::Unchanged,
21 }
22 }
23}
24
25impl BitAndAssign for OpResult {
26 #[inline]
27 fn bitand_assign(&mut self, rhs: Self) {
28 *self = *self & rhs;
29 }
30}
31
32impl BitOr for OpResult {
33 type Output = Self;
34
35 #[inline]
36 fn bitor(self, rhs: Self) -> Self::Output {
37 match (self, rhs) {
38 (Self::Unchanged, Self::Unchanged) => Self::Unchanged,
39 _ => Self::Changed,
40 }
41 }
42}
43
44impl BitOrAssign for OpResult {
45 #[inline]
46 fn bitor_assign(&mut self, rhs: Self) {
47 *self = *self | rhs;
48 }
49}
50
51#[cfg(not(debug_assertions))]
59macro_rules! get {
60 ($slice:expr, $i:expr) => {
61 unsafe { *$slice.get_unchecked($i) }
62 };
63}
64
65#[cfg(debug_assertions)]
66macro_rules! get {
67 ($slice:expr, $i:expr) => {
68 $slice[$i]
69 };
70}
71
72#[cfg(not(debug_assertions))]
73macro_rules! get_mut {
74 ($slice:expr, $i:expr) => {
75 unsafe { $slice.get_unchecked_mut($i) }
76 };
77}
78
79#[cfg(debug_assertions)]
80macro_rules! get_mut {
81 ($slice:expr, $i:expr) => {
82 &mut $slice[$i]
83 };
84}
85
86#[inline]
87fn perform_3<Op>(
88 width: NonZeroU8,
89 out: &mut [Atom],
90 lhs: &[Atom],
91 rhs: &[Atom],
92 mut op: Op,
93) -> OpResult
94where
95 Op: FnMut(AtomWidth, Atom, Atom) -> Atom,
96{
97 debug_assert_eq!(out.len(), width.safe_div_ceil(Atom::BITS).get() as usize);
98 debug_assert_eq!(out.len(), lhs.len());
99 debug_assert_eq!(out.len(), rhs.len());
100
101 let mut result = OpResult::Unchanged;
102
103 let mut i = 0;
104 let mut total_width = width.get();
105 while total_width >= Atom::BITS.get() {
106 let out = get_mut!(out, i);
107 let lhs = get!(lhs, i);
108 let rhs = get!(rhs, i);
109
110 let new = op(AtomWidth::MAX, lhs, rhs);
111 if !out.eq(new, AtomWidth::MAX) {
112 result = OpResult::Changed;
113 }
114 *out = new;
115
116 i += 1;
117 total_width -= Atom::BITS.get();
118 }
119
120 if total_width > 0 {
121 let last_out = get_mut!(out, i);
122 let last_lhs = get!(lhs, i);
123 let last_rhs = get!(rhs, i);
124
125 let last_width = unsafe {
126 AtomWidth::new_unchecked(total_width)
128 };
129
130 let last_new = op(last_width, last_lhs, last_rhs);
131 if !last_out.eq(last_new, last_width) {
132 result = OpResult::Changed;
133 }
134 *last_out = last_new;
135 }
136
137 result
138}
139
140#[inline]
141fn perform_2<Op>(width: NonZeroU8, out: &mut [Atom], rhs: &[Atom], mut op: Op) -> OpResult
142where
143 Op: FnMut(AtomWidth, Atom, Atom) -> Atom,
144{
145 debug_assert_eq!(out.len(), width.safe_div_ceil(Atom::BITS).get() as usize);
146 debug_assert_eq!(out.len(), rhs.len());
147
148 let mut result = OpResult::Unchanged;
149
150 let mut i = 0;
151 let mut total_width = width.get();
152 while total_width >= Atom::BITS.get() {
153 let out = get_mut!(out, i);
154 let rhs = get!(rhs, i);
155
156 let new = op(AtomWidth::MAX, *out, rhs);
157 if !out.eq(new, AtomWidth::MAX) {
158 result = OpResult::Changed;
159 }
160 *out = new;
161
162 i += 1;
163 total_width -= Atom::BITS.get();
164 }
165
166 if total_width > 0 {
167 let last_out = get_mut!(out, i);
168 let last_rhs = get!(rhs, i);
169
170 let last_width = unsafe {
171 AtomWidth::new_unchecked(total_width)
173 };
174
175 let last_new = op(last_width, *last_out, last_rhs);
176 if !last_out.eq(last_new, last_width) {
177 result = OpResult::Changed;
178 }
179 *last_out = last_new;
180 }
181
182 result
183}
184
185#[inline]
186fn perform_1<Op>(width: NonZeroU8, out: &mut [Atom], mut op: Op) -> OpResult
187where
188 Op: FnMut(AtomWidth, Atom) -> Atom,
189{
190 debug_assert_eq!(out.len(), width.safe_div_ceil(Atom::BITS).get() as usize);
191
192 let mut result = OpResult::Unchanged;
193
194 let mut i = 0;
195 let mut total_width = width.get();
196 while total_width >= Atom::BITS.get() {
197 let out = get_mut!(out, i);
198
199 let new = op(AtomWidth::MAX, *out);
200 if !out.eq(new, AtomWidth::MAX) {
201 result = OpResult::Changed;
202 }
203 *out = new;
204
205 i += 1;
206 total_width -= Atom::BITS.get();
207 }
208
209 if total_width > 0 {
210 let last_out = get_mut!(out, i);
211
212 let last_width = unsafe {
213 AtomWidth::new_unchecked(total_width)
215 };
216
217 let last_new = op(last_width, *last_out);
218 if !last_out.eq(last_new, last_width) {
219 result = OpResult::Changed;
220 }
221 *last_out = last_new;
222 }
223
224 result
225}
226
227#[inline]
228fn logic_and_impl(a: Atom, b: Atom) -> Atom {
229 Atom {
249 state: (a.state & b.state)
250 | (!a.valid & !b.valid)
251 | (a.state & !b.valid)
252 | (b.state & !a.valid),
253 valid: (a.valid & b.valid) | (!a.state & a.valid) | (!b.state & b.valid),
254 }
255}
256
257#[inline]
258fn logic_or_impl(a: Atom, b: Atom) -> Atom {
259 Atom {
279 state: a.state | !a.valid | b.state | !b.valid,
280 valid: (a.state & a.valid) | (b.state & b.valid) | (a.valid & b.valid),
281 }
282}
283
284#[inline]
285fn logic_xor_impl(a: Atom, b: Atom) -> Atom {
286 Atom {
306 state: (a.state ^ b.state) | !a.valid | !b.valid,
307 valid: a.valid & b.valid,
308 }
309}
310
311#[inline]
312fn logic_nand_impl(a: Atom, b: Atom) -> Atom {
313 Atom {
333 state: !a.state | !a.valid | !b.state | !b.valid,
334 valid: (a.valid & b.valid) | (!a.state & a.valid) | (!b.state & b.valid),
335 }
336}
337
338#[inline]
339fn logic_nor_impl(a: Atom, b: Atom) -> Atom {
340 Atom {
360 state: (!a.state & !b.state)
361 | (!a.valid & !b.valid)
362 | (!a.state & !b.valid)
363 | (!b.state & !a.valid),
364 valid: (a.state & a.valid) | (b.state & b.valid) | (a.valid & b.valid),
365 }
366}
367
368#[inline]
369fn logic_xnor_impl(a: Atom, b: Atom) -> Atom {
370 Atom {
390 state: !(a.state ^ b.state) | !a.valid | !b.valid,
391 valid: a.valid & b.valid,
392 }
393}
394
395macro_rules! def_binary_op {
396 ($op_impl:ident -> $name3:ident, $name2:ident) => {
397 pub(super) fn $name3(
398 width: NonZeroU8,
399 out: &mut [Atom],
400 lhs: &[Atom],
401 rhs: &[Atom],
402 ) -> OpResult {
403 perform_3(width, out, lhs, rhs, |_, a, b| $op_impl(a, b))
404 }
405
406 #[allow(dead_code)]
407 pub(super) fn $name2(width: NonZeroU8, out: &mut [Atom], rhs: &[Atom]) -> OpResult {
408 perform_2(width, out, rhs, |_, a, b| $op_impl(a, b))
409 }
410 };
411}
412
413def_binary_op!(logic_and_impl -> logic_and_3, logic_and_2);
414def_binary_op!(logic_or_impl -> logic_or_3, logic_or_2);
415def_binary_op!(logic_xor_impl -> logic_xor_3, logic_xor_2);
416def_binary_op!(logic_nand_impl -> logic_nand_3, logic_nand_2);
417def_binary_op!(logic_nor_impl -> logic_nor_3, logic_nor_2);
418def_binary_op!(logic_xnor_impl -> logic_xnor_3, logic_xnor_2);
419
420#[inline]
421fn logic_not_impl(v: Atom) -> Atom {
422 Atom {
430 state: !v.state | !v.valid,
431 valid: v.valid,
432 }
433}
434
435macro_rules! def_unary_op {
436 ($op_impl:ident -> $name2:ident, $name1:ident) => {
437 pub(super) fn $name2(width: NonZeroU8, out: &mut [Atom], val: &[Atom]) -> OpResult {
438 perform_2(width, out, val, |_, _, v| $op_impl(v))
439 }
440
441 pub(super) fn $name1(width: NonZeroU8, out: &mut [Atom]) -> OpResult {
442 perform_1(width, out, |_, v| $op_impl(v))
443 }
444 };
445}
446
447def_unary_op!(logic_not_impl -> logic_not_2, logic_not_1);
448
449pub(super) fn buffer(
450 width: NonZeroU8,
451 out: &mut [Atom],
452 val: &[Atom],
453 en: LogicBitState,
454) -> OpResult {
455 match en {
456 LogicBitState::Undefined => perform_1(width, out, |_, _| Atom::UNDEFINED),
457 LogicBitState::Logic1 => perform_2(width, out, val, |_, _, v| v.high_z_to_undefined()),
458 _ => perform_1(width, out, |_, _| Atom::HIGH_Z),
459 }
460}
461
462#[inline]
463fn add_impl(width: AtomWidth, a: Atom, b: Atom, c: LogicBitState) -> (Atom, LogicBitState) {
464 let (c_in, c_valid) = c.to_bits();
465 let sum = (a.state.get() as u64) + (b.state.get() as u64) + (c_in as u64);
466
467 let valid_mask_a = a.valid.keep_trailing_ones();
468 let valid_mask_b = b.valid.keep_trailing_ones();
469 let valid_mask_c = match c_valid {
470 false => LogicStorage::ALL_ZERO,
471 true => LogicStorage::ALL_ONE,
472 };
473 let valid_mask = (valid_mask_a & valid_mask_b & valid_mask_c).get();
474
475 let c_valid = (valid_mask >> (width.get() - 1)) > 0;
476 let c_state = ((sum >> width.get()) > 0) | !c_valid;
477
478 (
479 Atom {
480 state: LogicStorage::new((sum as u32) | !valid_mask),
481 valid: LogicStorage::new(valid_mask),
482 },
483 LogicBitState::from_bits(c_state, c_valid),
484 )
485}
486
487pub(super) fn add(
488 width: NonZeroU8,
489 out: &mut [Atom],
490 carry_out: &mut LogicBitState,
491 lhs: &[Atom],
492 rhs: &[Atom],
493 carry_in: LogicBitState,
494) -> OpResult {
495 let mut carry = carry_in;
496 let result = perform_3(width, out, lhs, rhs, |width, a, b| {
497 let sum;
498 (sum, carry) = add_impl(width, a, b, carry);
499 sum
500 });
501
502 *carry_out = carry;
503 result
504}
505
506pub(super) fn sub(
507 width: NonZeroU8,
508 out: &mut [Atom],
509 carry_out: &mut LogicBitState,
510 lhs: &[Atom],
511 rhs: &[Atom],
512 carry_in: LogicBitState,
513) -> OpResult {
514 let mut carry = carry_in;
515 let result = perform_3(width, out, lhs, rhs, |width, a, mut b| {
516 let sum;
517 b.state = !b.state;
518 (sum, carry) = add_impl(width, a, b, carry);
519 sum
520 });
521
522 *carry_out = carry;
523 result
524}
525
526pub(super) fn neg(
527 width: NonZeroU8,
528 out: &mut [Atom],
529 carry_out: &mut LogicBitState,
530 val: &[Atom],
531 carry_in: LogicBitState,
532) -> OpResult {
533 let mut carry = carry_in;
534 let result = perform_2(width, out, val, |width, _, mut v| {
535 let sum;
536 v.state = !v.state;
537 (sum, carry) = add_impl(width, Atom::LOGIC_0, v, carry);
538 sum
539 });
540
541 *carry_out = carry;
542 result
543}
544
545#[inline]
546fn mul_impl(width: AtomWidth, prev: Atom, a: Atom, b: Atom, c: Atom) -> (Atom, Atom) {
547 let mask = LogicStorage::mask(width);
548
549 let prev_valid = prev.valid | !mask;
550 let a_valid = a.valid | !mask;
551 let b_valid = b.valid | !mask;
552 let c_valid = c.valid | !mask;
553
554 if (prev_valid != LogicStorage::ALL_ONE)
555 | (a_valid != LogicStorage::ALL_ONE)
556 | (b_valid != LogicStorage::ALL_ONE)
557 | (c_valid != LogicStorage::ALL_ONE)
558 {
559 return (Atom::UNDEFINED, Atom::UNDEFINED);
560 }
561
562 let prev_state = prev.state & mask;
563 let a_state = a.state & mask;
564 let b_state = b.state & mask;
565 let c_state = c.state & mask;
566
567 let product = (prev_state.get() as u64)
568 + (a_state.get() as u64) * (b_state.get() as u64)
569 + (c_state.get() as u64);
570
571 (
572 Atom::from_int(product as u32),
573 Atom::from_int((product >> 32) as u32),
574 )
575}
576
577pub(super) fn mul(width: NonZeroU8, out: &mut [Atom], lhs: &[Atom], rhs: &[Atom]) -> OpResult {
578 debug_assert_eq!(out.len(), width.safe_div_ceil(Atom::BITS).get() as usize);
579 debug_assert_eq!(out.len(), lhs.len());
580 debug_assert_eq!(out.len(), rhs.len());
581
582 let mut tmp_state = [Atom::LOGIC_0; MAX_ATOM_COUNT];
583 let tmp_state = get_mut!(tmp_state, ..out.len());
584
585 let mut total_width = width.get();
586 for (i, lhs) in lhs.iter().copied().enumerate() {
587 let width = AtomWidth::new(total_width).unwrap_or(AtomWidth::MAX);
588 total_width -= width.get();
589
590 let mut carry = Atom::LOGIC_0;
591 for (j, rhs) in rhs.iter().copied().enumerate() {
592 if let Some(dst) = tmp_state.get_mut(i + j) {
593 (*dst, carry) = mul_impl(width, *dst, lhs, rhs, carry);
594 } else {
595 break;
596 }
597 }
598 }
599
600 let mut iter = tmp_state.iter().copied();
601 perform_1(width, out, |_, _| iter.next().unwrap())
602}
603
604#[inline]
647fn perform_shift<'a, Iter, F>(
648 width: NonZeroU8,
649 shamnt_width: NonZeroU8,
650 out: &mut [Atom],
651 val: &'a [Atom],
652 shamnt: Atom,
653 make_iter: F,
654) -> OpResult
655where
656 Iter: Iterator<Item = Atom>,
657 F: FnOnce(NonZeroU8, &'a [Atom], usize) -> Iter + 'a,
658{
659 debug_assert_eq!(width.clog2(), shamnt_width.get());
660 debug_assert!(shamnt_width <= Atom::BITS);
661
662 let shamnt_width = unsafe { AtomWidth::new_unchecked(shamnt_width.get()) };
663 if shamnt.is_valid(shamnt_width) {
664 debug_assert_eq!(out.len(), width.safe_div_ceil(Atom::BITS).get() as usize);
665 debug_assert_eq!(out.len(), val.len());
666
667 let shamnt_mask = LogicStorage::mask(shamnt_width);
668 let shamnt = (shamnt.state & shamnt_mask).get() as usize;
669
670 let mut result = OpResult::Unchanged;
671
672 let mut i = 0;
673 let mut total_width = width.get();
674 let mut val_iter = make_iter(width, val, shamnt);
675 while total_width >= Atom::BITS.get() {
676 let out = get_mut!(out, i);
677 let val = val_iter.next().unwrap();
678
679 if !out.eq(val, AtomWidth::MAX) {
680 result = OpResult::Changed;
681 }
682 *out = val;
683
684 i += 1;
685 total_width -= Atom::BITS.get();
686 }
687
688 if total_width > 0 {
689 let last_out = get_mut!(out, i);
690 let last_val = val_iter.next().unwrap();
691
692 let last_width = unsafe {
693 AtomWidth::new_unchecked(total_width)
695 };
696
697 if !last_out.eq(last_val, last_width) {
698 result = OpResult::Changed;
699 }
700 *last_out = last_val;
701 }
702
703 result
704 } else {
705 perform_1(width, out, |_, _| Atom::UNDEFINED)
706 }
707}
708
709struct ShlIter<'a> {
710 iter: std::slice::Iter<'a, Atom>,
711 atom_shift: usize,
712 bit_shift: AtomOffset,
713 carry: Option<Atom>,
714}
715
716impl<'a> ShlIter<'a> {
717 #[inline]
718 fn new(val: &'a [Atom], shamnt: usize) -> Self {
719 let atom_shift = shamnt / (Atom::BITS.get() as usize);
720 let bit_shift = unsafe {
721 AtomOffset::new_unchecked((shamnt % (Atom::BITS.get() as usize)) as u8)
723 };
724
725 Self {
726 iter: val.iter(),
727 atom_shift,
728 bit_shift,
729 carry: None,
730 }
731 }
732}
733
734impl Iterator for ShlIter<'_> {
735 type Item = Atom;
736
737 #[inline]
738 fn next(&mut self) -> Option<Self::Item> {
739 if self.atom_shift > 0 {
740 self.atom_shift -= 1;
741 Some(Atom::LOGIC_0)
742 } else if let Some(next) = self.iter.next().copied().map(Atom::high_z_to_undefined) {
743 let (low_state, high_state) = next.state.widening_shl(self.bit_shift);
744 let (low_valid, high_valid) = next.valid.widening_shl(self.bit_shift);
745 let carry = self.carry.unwrap_or(Atom {
746 state: Atom::LOGIC_0.state.widening_shl(self.bit_shift).1,
747 valid: Atom::LOGIC_0.valid.widening_shl(self.bit_shift).1,
748 });
749
750 self.carry = Some(Atom {
751 state: high_state,
752 valid: high_valid,
753 });
754
755 Some(Atom {
756 state: low_state | carry.state,
757 valid: low_valid | carry.valid,
758 })
759 } else if let Some(carry) = self.carry.take() {
760 Some(carry)
761 } else {
762 Some(Atom::LOGIC_0)
763 }
764 }
765}
766
767pub(super) fn shl(
768 width: NonZeroU8,
769 shamnt_width: NonZeroU8,
770 out: &mut [Atom],
771 val: &[Atom],
772 shamnt: Atom,
773) -> OpResult {
774 perform_shift(width, shamnt_width, out, val, shamnt, |_, val, shamnt| {
775 ShlIter::new(val, shamnt)
776 })
777}
778
779struct MaskingIter<Iter: Iterator<Item = Atom>> {
780 width: u8,
781 fill: Atom,
782 iter: Iter,
783}
784
785impl<Iter: Iterator<Item = Atom>> MaskingIter<Iter> {
786 #[inline]
787 fn new(width: NonZeroU8, fill: Atom, iter: Iter) -> Self {
788 Self {
789 width: width.get(),
790 fill,
791 iter,
792 }
793 }
794}
795
796impl<Iter: Iterator<Item = Atom>> Iterator for MaskingIter<Iter> {
797 type Item = Atom;
798
799 #[inline]
800 fn next(&mut self) -> Option<Self::Item> {
801 if self.width > 0 {
802 let next = self.iter.next().unwrap_or(self.fill);
803
804 let width = AtomWidth::new(self.width).unwrap_or(AtomWidth::MAX);
805 self.width -= width.get();
806
807 let mask = LogicStorage::mask(width);
808 let inv_mask = !mask;
809
810 Some(Atom {
811 state: (next.state & mask) | (self.fill.state & inv_mask),
812 valid: (next.valid & mask) | (self.fill.valid & inv_mask),
813 })
814 } else {
815 Some(self.fill)
816 }
817 }
818}
819
820struct ShrIter<'a> {
821 iter: MaskingIter<std::iter::Copied<std::iter::Skip<std::slice::Iter<'a, Atom>>>>,
822 bit_shift: AtomOffset,
823 current: Atom,
824}
825
826impl<'a> ShrIter<'a> {
827 #[inline]
828 fn new_logical(width: NonZeroU8, val: &'a [Atom], shamnt: usize) -> Self {
829 let atom_shift = shamnt / (Atom::BITS.get() as usize);
830 let bit_shift = unsafe {
831 AtomOffset::new_unchecked((shamnt % (Atom::BITS.get() as usize)) as u8)
833 };
834
835 let mut iter = MaskingIter::new(width, Atom::LOGIC_0, val.iter().skip(atom_shift).copied());
836 let current = iter.next().map(Atom::high_z_to_undefined).unwrap();
837 let current = Atom {
838 state: current.state >> bit_shift,
839 valid: current.valid >> bit_shift,
840 };
841
842 Self {
843 iter,
844 bit_shift,
845 current,
846 }
847 }
848
849 #[inline]
850 fn new_arithmetic(width: NonZeroU8, val: &'a [Atom], shamnt: usize) -> Self {
851 let atom_index = ((width.get() - 1) / Atom::BITS.get()) as usize;
852 let bit_index = unsafe {
853 AtomOffset::new_unchecked((width.get() - 1) % Atom::BITS.get())
855 };
856
857 let atom_shift = shamnt / (Atom::BITS.get() as usize);
858 let bit_shift = unsafe {
859 AtomOffset::new_unchecked((shamnt % (Atom::BITS.get() as usize)) as u8)
861 };
862
863 let fill = val[atom_index].get_bit_state(bit_index).splat();
864 let mut iter = MaskingIter::new(width, fill, val.iter().skip(atom_shift).copied());
865 let current = iter.next().map(Atom::high_z_to_undefined).unwrap();
866 let current = Atom {
867 state: current.state >> bit_shift,
868 valid: current.valid >> bit_shift,
869 };
870
871 Self {
872 iter,
873 bit_shift,
874 current,
875 }
876 }
877}
878
879impl Iterator for ShrIter<'_> {
880 type Item = Atom;
881
882 #[inline]
883 fn next(&mut self) -> Option<Self::Item> {
884 let current = self.current;
885 let next = self.iter.next().map(Atom::high_z_to_undefined).unwrap();
886
887 let (high_state, low_state) = next.state.widening_shr(self.bit_shift);
888 let (high_valid, low_valid) = next.valid.widening_shr(self.bit_shift);
889
890 self.current = Atom {
891 state: high_state,
892 valid: high_valid,
893 };
894
895 Some(Atom {
896 state: current.state | low_state,
897 valid: current.valid | low_valid,
898 })
899 }
900}
901
902pub(super) fn lshr(
903 width: NonZeroU8,
904 shamnt_width: NonZeroU8,
905 out: &mut [Atom],
906 val: &[Atom],
907 shamnt: Atom,
908) -> OpResult {
909 perform_shift(width, shamnt_width, out, val, shamnt, ShrIter::new_logical)
910}
911
912pub(super) fn ashr(
913 width: NonZeroU8,
914 shamnt_width: NonZeroU8,
915 out: &mut [Atom],
916 val: &[Atom],
917 shamnt: Atom,
918) -> OpResult {
919 perform_shift(
920 width,
921 shamnt_width,
922 out,
923 val,
924 shamnt,
925 ShrIter::new_arithmetic,
926 )
927}
928
929struct SliceShiftIter<'a> {
930 iter: std::iter::Skip<std::slice::Iter<'a, Atom>>,
931 bit_shift: AtomOffset,
932 current: Atom,
933}
934
935impl<'a> SliceShiftIter<'a> {
936 #[inline]
937 fn new(val: &'a [Atom], shamnt: usize) -> Self {
938 let atom_shift = shamnt / (Atom::BITS.get() as usize);
939 let bit_shift = unsafe {
940 AtomOffset::new_unchecked((shamnt % (Atom::BITS.get() as usize)) as u8)
942 };
943
944 let mut iter = val.iter().skip(atom_shift);
945 let current = iter.next().copied().unwrap_or(Atom::UNDEFINED);
946 let current = Atom {
947 state: current.state >> bit_shift,
948 valid: current.valid >> bit_shift,
949 };
950
951 Self {
952 iter,
953 bit_shift,
954 current,
955 }
956 }
957}
958
959impl Iterator for SliceShiftIter<'_> {
960 type Item = Atom;
961
962 #[inline]
963 fn next(&mut self) -> Option<Self::Item> {
964 let current = self.current;
965 let next = self.iter.next().copied().unwrap_or(Atom::UNDEFINED);
966
967 let (high_state, low_state) = next.state.widening_shr(self.bit_shift);
968 let (high_valid, low_valid) = next.valid.widening_shr(self.bit_shift);
969
970 self.current = Atom {
971 state: high_state,
972 valid: high_valid,
973 };
974
975 Some(Atom {
976 state: current.state | low_state,
977 valid: current.valid | low_valid,
978 })
979 }
980}
981
982pub(super) fn slice(width: NonZeroU8, out: &mut [Atom], val: &[Atom], offset: u8) -> OpResult {
983 debug_assert_eq!(out.len(), width.safe_div_ceil(Atom::BITS).get() as usize);
984 debug_assert!(out.len() <= val.len());
985
986 let mut result = OpResult::Unchanged;
987
988 let mut i = 0;
989 let mut total_width = width.get();
990 let mut val_iter = SliceShiftIter::new(val, offset as usize);
991 while total_width >= Atom::BITS.get() {
992 let out = get_mut!(out, i);
993 let val = val_iter.next().unwrap();
994
995 if !out.eq(val, AtomWidth::MAX) {
996 result = OpResult::Changed;
997 }
998 *out = val;
999
1000 i += 1;
1001 total_width -= Atom::BITS.get();
1002 }
1003
1004 if total_width > 0 {
1005 let last_out = get_mut!(out, i);
1006 let last_val = val_iter.next().unwrap();
1007
1008 let last_width = unsafe {
1009 AtomWidth::new_unchecked(total_width)
1011 };
1012
1013 if !last_out.eq(last_val, last_width) {
1014 result = OpResult::Changed;
1015 }
1016 *last_out = last_val;
1017 }
1018
1019 result
1020}
1021
1022struct MergeShiftIter<'a> {
1023 iter: MaskingIter<std::iter::Copied<std::slice::Iter<'a, Atom>>>,
1024 atom_shift: usize,
1025 bit_shift: AtomOffset,
1026 carry: Atom,
1027}
1028
1029impl<'a> MergeShiftIter<'a> {
1030 #[inline]
1031 fn new(width: NonZeroU8, val: &'a [Atom], shamnt: usize) -> Self {
1032 let atom_shift = shamnt / (Atom::BITS.get() as usize);
1033 let bit_shift = unsafe {
1034 AtomOffset::new_unchecked((shamnt % (Atom::BITS.get() as usize)) as u8)
1036 };
1037
1038 Self {
1039 iter: MaskingIter::new(width, Atom::HIGH_Z, val.iter().copied()),
1040 atom_shift,
1041 bit_shift,
1042 carry: Atom::HIGH_Z,
1043 }
1044 }
1045}
1046
1047impl Iterator for MergeShiftIter<'_> {
1048 type Item = Atom;
1049
1050 #[inline]
1051 fn next(&mut self) -> Option<Self::Item> {
1052 if self.atom_shift > 0 {
1053 self.atom_shift -= 1;
1054 Some(Atom::HIGH_Z)
1055 } else {
1056 let next = self.iter.next().unwrap();
1057 let (low_state, high_state) = next.state.widening_shl(self.bit_shift);
1058 let (low_valid, high_valid) = next.valid.widening_shl(self.bit_shift);
1059 let carry = self.carry;
1060
1061 self.carry = Atom {
1062 state: high_state,
1063 valid: high_valid,
1064 };
1065
1066 Some(Atom {
1067 state: low_state | carry.state,
1068 valid: low_valid | carry.valid,
1069 })
1070 }
1071 }
1072}
1073
1074pub(super) fn merge_one(dst: &mut [Atom], src_width: NonZeroU8, src: &[Atom], shamnt: usize) {
1075 debug_assert_eq!(
1076 src.len(),
1077 src_width.safe_div_ceil(Atom::BITS).get() as usize
1078 );
1079 debug_assert!(dst.len() >= src.len());
1080
1081 let src_iter = MergeShiftIter::new(src_width, src, shamnt);
1082 for (dst, src) in izip!(dst, src_iter) {
1083 dst.state |= src.state;
1084 dst.valid |= src.valid;
1085 }
1086}
1087
1088pub(super) fn copy(width: NonZeroU8, dst: &mut [Atom], src: &[Atom]) -> OpResult {
1089 debug_assert_eq!(src.len(), width.safe_div_ceil(Atom::BITS).get() as usize);
1090 debug_assert_eq!(dst.len(), src.len());
1091
1092 let mut result = OpResult::Unchanged;
1093 let mut i = 0;
1094 let mut total_width = width.get();
1095 while total_width >= Atom::BITS.get() {
1096 let dst = get_mut!(dst, i);
1097 let src = get!(src, i);
1098
1099 if !dst.eq(src, AtomWidth::MAX) {
1100 result = OpResult::Changed;
1101 }
1102 *dst = src;
1103
1104 i += 1;
1105 total_width -= Atom::BITS.get();
1106 }
1107
1108 if total_width > 0 {
1109 let dst = get_mut!(dst, i);
1110 let src = get!(src, i);
1111
1112 let last_width = unsafe {
1113 AtomWidth::new_unchecked(total_width)
1115 };
1116
1117 if !dst.eq(src, last_width) {
1118 result = OpResult::Changed;
1119 }
1120 *dst = src;
1121 }
1122
1123 result
1124}
1125
1126#[inline]
1127fn reduce_atom<Op>(mut val: Atom, mut op: Op) -> Atom
1128where
1129 Op: FnMut(Atom, Atom) -> Atom,
1130{
1131 const O16: AtomOffset = unsafe { AtomOffset::new_unchecked(16) };
1132 const O8: AtomOffset = unsafe { AtomOffset::new_unchecked(8) };
1133 const O4: AtomOffset = unsafe { AtomOffset::new_unchecked(4) };
1134 const O2: AtomOffset = unsafe { AtomOffset::new_unchecked(2) };
1135 const O1: AtomOffset = unsafe { AtomOffset::new_unchecked(1) };
1136
1137 let sh16 = Atom {
1138 state: val.state >> O16,
1139 valid: val.valid >> O16,
1140 };
1141 val = op(val, sh16);
1142
1143 let sh8 = Atom {
1144 state: val.state >> O8,
1145 valid: val.valid >> O8,
1146 };
1147 val = op(val, sh8);
1148
1149 let sh4 = Atom {
1150 state: val.state >> O4,
1151 valid: val.valid >> O4,
1152 };
1153 val = op(val, sh4);
1154
1155 let sh2 = Atom {
1156 state: val.state >> O2,
1157 valid: val.valid >> O2,
1158 };
1159 val = op(val, sh2);
1160
1161 let sh1 = Atom {
1162 state: val.state >> O1,
1163 valid: val.valid >> O1,
1164 };
1165 val = op(val, sh1);
1166
1167 val
1168}
1169
1170#[inline]
1175pub(super) fn horizontal_logic_and_impl(width: NonZeroU8, val: &[Atom]) -> Atom {
1176 let mut new = Atom::LOGIC_1;
1177
1178 let mut total_width = width.get();
1179 for &(mut val) in val {
1180 let width = AtomWidth::new(total_width).unwrap_or(AtomWidth::MAX);
1181 total_width -= width.get();
1182
1183 if width < AtomWidth::MAX {
1184 let mask = LogicStorage::mask(width);
1186 val.state |= !mask;
1187 val.valid |= !mask;
1188 }
1189
1190 new = logic_and_impl(new, val);
1191 }
1192
1193 reduce_atom(new, logic_and_impl)
1194}
1195
1196#[inline]
1197pub(super) fn horizontal_logic_or_impl(width: NonZeroU8, val: &[Atom]) -> Atom {
1198 let mut new = Atom::LOGIC_0;
1199
1200 let mut total_width = width.get();
1201 for &(mut val) in val {
1202 let width = AtomWidth::new(total_width).unwrap_or(AtomWidth::MAX);
1203 total_width -= width.get();
1204
1205 if width < AtomWidth::MAX {
1206 let mask = LogicStorage::mask(width);
1208 val.state &= mask;
1209 val.valid |= !mask;
1210 }
1211
1212 new = logic_or_impl(new, val);
1213 }
1214
1215 reduce_atom(new, logic_or_impl)
1216}
1217
1218pub(super) fn horizontal_logic_xor_impl(width: NonZeroU8, val: &[Atom]) -> Atom {
1219 let mut new = Atom::LOGIC_0;
1220
1221 let mut total_width = width.get();
1222 for &(mut val) in val {
1223 let width = AtomWidth::new(total_width).unwrap_or(AtomWidth::MAX);
1224 total_width -= width.get();
1225
1226 if width < AtomWidth::MAX {
1227 let mask = LogicStorage::mask(width);
1229 val.state &= mask;
1230 val.valid |= !mask;
1231 }
1232
1233 new = logic_xor_impl(new, val);
1234 }
1235
1236 reduce_atom(new, logic_xor_impl)
1237}
1238
1239pub(super) fn horizontal_logic_and(width: NonZeroU8, out: &mut Atom, val: &[Atom]) -> OpResult {
1240 let new = horizontal_logic_and_impl(width, val);
1241
1242 if !out.eq(new, AtomWidth::MIN) {
1243 *out = new;
1244 OpResult::Changed
1245 } else {
1246 OpResult::Unchanged
1247 }
1248}
1249
1250pub(super) fn horizontal_logic_or(width: NonZeroU8, out: &mut Atom, val: &[Atom]) -> OpResult {
1251 let new = horizontal_logic_or_impl(width, val);
1252
1253 if !out.eq(new, AtomWidth::MIN) {
1254 *out = new;
1255 OpResult::Changed
1256 } else {
1257 OpResult::Unchanged
1258 }
1259}
1260
1261pub(super) fn horizontal_logic_xor(width: NonZeroU8, out: &mut Atom, val: &[Atom]) -> OpResult {
1262 let new = horizontal_logic_xor_impl(width, val);
1263
1264 if !out.eq(new, AtomWidth::MIN) {
1265 *out = new;
1266 OpResult::Changed
1267 } else {
1268 OpResult::Unchanged
1269 }
1270}
1271
1272pub(super) fn horizontal_logic_nand(width: NonZeroU8, out: &mut Atom, val: &[Atom]) -> OpResult {
1273 let new = horizontal_logic_and_impl(width, val);
1274 let new = logic_not_impl(new);
1275
1276 if !out.eq(new, AtomWidth::MIN) {
1277 *out = new;
1278 OpResult::Changed
1279 } else {
1280 OpResult::Unchanged
1281 }
1282}
1283
1284pub(super) fn horizontal_logic_nor(width: NonZeroU8, out: &mut Atom, val: &[Atom]) -> OpResult {
1285 let new = horizontal_logic_or_impl(width, val);
1286 let new = logic_not_impl(new);
1287
1288 if !out.eq(new, AtomWidth::MIN) {
1289 *out = new;
1290 OpResult::Changed
1291 } else {
1292 OpResult::Unchanged
1293 }
1294}
1295
1296pub(super) fn horizontal_logic_xnor(width: NonZeroU8, out: &mut Atom, val: &[Atom]) -> OpResult {
1297 let new = horizontal_logic_xor_impl(width, val);
1298 let new = logic_not_impl(new);
1299
1300 if !out.eq(new, AtomWidth::MIN) {
1301 *out = new;
1302 OpResult::Changed
1303 } else {
1304 OpResult::Unchanged
1305 }
1306}
1307
1308pub(super) fn equal(width: NonZeroU8, out: &mut Atom, lhs: &[Atom], rhs: &[Atom]) -> OpResult {
1309 let mut new = Atom::LOGIC_1;
1310
1311 let mut total_width = width.get();
1312 for (&lhs, &rhs) in izip!(lhs, rhs) {
1313 let width = AtomWidth::new(total_width).unwrap_or(AtomWidth::MAX);
1314 total_width -= width.get();
1315
1316 if !lhs.is_valid(width) || !rhs.is_valid(width) {
1317 new = Atom::UNDEFINED;
1318 break;
1319 }
1320
1321 if !lhs.eq(rhs, width) {
1322 new = Atom::LOGIC_0;
1323 }
1324 }
1325
1326 if !out.eq(new, AtomWidth::MIN) {
1327 *out = new;
1328 OpResult::Changed
1329 } else {
1330 OpResult::Unchanged
1331 }
1332}
1333
1334pub(super) fn not_equal(width: NonZeroU8, out: &mut Atom, lhs: &[Atom], rhs: &[Atom]) -> OpResult {
1335 let mut new = Atom::LOGIC_0;
1336
1337 let mut total_width = width.get();
1338 for (&lhs, &rhs) in izip!(lhs, rhs) {
1339 let width = AtomWidth::new(total_width).unwrap_or(AtomWidth::MAX);
1340 total_width -= width.get();
1341
1342 if !lhs.is_valid(width) || !rhs.is_valid(width) {
1343 new = Atom::UNDEFINED;
1344 break;
1345 }
1346
1347 if !lhs.eq(rhs, width) {
1348 new = Atom::LOGIC_1;
1349 }
1350 }
1351
1352 if !out.eq(new, AtomWidth::MIN) {
1353 *out = new;
1354 OpResult::Changed
1355 } else {
1356 OpResult::Unchanged
1357 }
1358}
1359
1360#[inline]
1361fn cmp<Cmp>(width: NonZeroU8, out: &mut Atom, lhs: &[Atom], rhs: &[Atom], inv_cmp: Cmp) -> OpResult
1362where
1363 Cmp: Fn(LogicStorage, LogicStorage) -> bool,
1364{
1365 let mut new = Atom::LOGIC_1;
1366
1367 'valid: {
1368 let mut iter = izip!(lhs, rhs).rev();
1369
1370 let head_width = AtomWidth::new(width.get() % Atom::BITS.get()).unwrap_or(AtomWidth::MAX);
1371 if let Some((&lhs, &rhs)) = iter.next() {
1372 if !lhs.is_valid(head_width) || !rhs.is_valid(head_width) {
1373 new = Atom::UNDEFINED;
1374 break 'valid;
1375 }
1376
1377 let mask = LogicStorage::mask(head_width);
1378 if inv_cmp(lhs.state & mask, rhs.state & mask) {
1379 new = Atom::LOGIC_0;
1380 }
1381 }
1382
1383 for (&lhs, &rhs) in iter {
1384 if !lhs.is_valid(AtomWidth::MAX) || !rhs.is_valid(AtomWidth::MAX) {
1385 new = Atom::UNDEFINED;
1386 break 'valid;
1387 }
1388
1389 if inv_cmp(lhs.state, rhs.state) {
1390 new = Atom::LOGIC_0;
1391 }
1392 }
1393 }
1394
1395 if !out.eq(new, AtomWidth::MIN) {
1396 *out = new;
1397 OpResult::Changed
1398 } else {
1399 OpResult::Unchanged
1400 }
1401}
1402
1403pub(super) fn less_than(width: NonZeroU8, out: &mut Atom, lhs: &[Atom], rhs: &[Atom]) -> OpResult {
1404 cmp(width, out, lhs, rhs, |a, b| a >= b)
1405}
1406
1407pub(super) fn greater_than(
1408 width: NonZeroU8,
1409 out: &mut Atom,
1410 lhs: &[Atom],
1411 rhs: &[Atom],
1412) -> OpResult {
1413 cmp(width, out, lhs, rhs, |a, b| a <= b)
1414}
1415
1416pub(super) fn less_than_or_equal(
1417 width: NonZeroU8,
1418 out: &mut Atom,
1419 lhs: &[Atom],
1420 rhs: &[Atom],
1421) -> OpResult {
1422 cmp(width, out, lhs, rhs, |a, b| a > b)
1423}
1424
1425pub(super) fn greater_than_or_equal(
1426 width: NonZeroU8,
1427 out: &mut Atom,
1428 lhs: &[Atom],
1429 rhs: &[Atom],
1430) -> OpResult {
1431 cmp(width, out, lhs, rhs, |a, b| a < b)
1432}
1433
1434#[inline]
1435fn cmp_signed<Cmp>(
1436 width: NonZeroU8,
1437 out: &mut Atom,
1438 lhs: &[Atom],
1439 rhs: &[Atom],
1440 inv_cmp: Cmp,
1441) -> OpResult
1442where
1443 Cmp: Fn(LogicStorage, LogicStorage, AtomWidth) -> bool,
1444{
1445 let mut new = Atom::LOGIC_1;
1446
1447 'valid: {
1448 let mut iter = izip!(lhs, rhs).rev();
1449
1450 let head_width = AtomWidth::new(width.get() % Atom::BITS.get()).unwrap_or(AtomWidth::MAX);
1451 if let Some((&lhs, &rhs)) = iter.next() {
1452 if !lhs.is_valid(head_width) || !rhs.is_valid(head_width) {
1453 new = Atom::UNDEFINED;
1454 break 'valid;
1455 }
1456
1457 let mask = LogicStorage::mask(head_width);
1458 if inv_cmp(lhs.state & mask, rhs.state & mask, head_width) {
1459 new = Atom::LOGIC_0;
1460 }
1461 }
1462
1463 for (&lhs, &rhs) in iter {
1464 if !lhs.is_valid(AtomWidth::MAX) || !rhs.is_valid(AtomWidth::MAX) {
1465 new = Atom::UNDEFINED;
1466 break 'valid;
1467 }
1468
1469 if inv_cmp(lhs.state, rhs.state, AtomWidth::MAX) {
1470 new = Atom::LOGIC_0;
1471 }
1472 }
1473 }
1474
1475 if !out.eq(new, AtomWidth::MIN) {
1476 *out = new;
1477 OpResult::Changed
1478 } else {
1479 OpResult::Unchanged
1480 }
1481}
1482
1483pub(super) fn less_than_signed(
1484 width: NonZeroU8,
1485 out: &mut Atom,
1486 lhs: &[Atom],
1487 rhs: &[Atom],
1488) -> OpResult {
1489 cmp_signed(width, out, lhs, rhs, LogicStorage::ges)
1490}
1491
1492pub(super) fn greater_than_signed(
1493 width: NonZeroU8,
1494 out: &mut Atom,
1495 lhs: &[Atom],
1496 rhs: &[Atom],
1497) -> OpResult {
1498 cmp_signed(width, out, lhs, rhs, LogicStorage::les)
1499}
1500
1501pub(super) fn less_than_or_equal_signed(
1502 width: NonZeroU8,
1503 out: &mut Atom,
1504 lhs: &[Atom],
1505 rhs: &[Atom],
1506) -> OpResult {
1507 cmp_signed(width, out, lhs, rhs, LogicStorage::gts)
1508}
1509
1510pub(super) fn greater_than_or_equal_signed(
1511 width: NonZeroU8,
1512 out: &mut Atom,
1513 lhs: &[Atom],
1514 rhs: &[Atom],
1515) -> OpResult {
1516 cmp_signed(width, out, lhs, rhs, LogicStorage::lts)
1517}
1518
1519pub(super) fn zero_extend(
1520 val_width: NonZeroU8,
1521 out_width: NonZeroU8,
1522 val: &[Atom],
1523 out: &mut [Atom],
1524) -> OpResult {
1525 let val_tail_width =
1526 AtomWidth::new(val_width.get() % Atom::BITS.get()).unwrap_or(AtomWidth::MAX);
1527 let out_tail_width =
1528 AtomWidth::new(out_width.get() % Atom::BITS.get()).unwrap_or(AtomWidth::MAX);
1529
1530 let mut val_iter = val.iter();
1531 let mut out_iter = out.iter_mut();
1532
1533 let mut result = OpResult::Unchanged;
1534
1535 while val_iter.len() > 1 {
1536 let val = *val_iter.next().unwrap();
1537 let out = out_iter.next().unwrap();
1538
1539 if !out.eq(val, AtomWidth::MAX) {
1540 *out = val;
1541 result = OpResult::Changed;
1542 }
1543 }
1544
1545 let val = *val_iter.next().unwrap();
1546 let out = out_iter.next().unwrap();
1547
1548 let mask = LogicStorage::mask(val_tail_width);
1549 let extend = Atom::LOGIC_0;
1550 let val = Atom {
1551 state: (val.state & mask) | (extend.state & !mask),
1552 valid: (val.valid & mask) | (extend.valid & !mask),
1553 };
1554
1555 let tail_width = if out_iter.len() == 0 {
1556 out_tail_width
1557 } else {
1558 AtomWidth::MAX
1559 };
1560
1561 if !out.eq(val, tail_width) {
1562 *out = val;
1563 result = OpResult::Changed;
1564 }
1565
1566 if out_iter.len() > 0 {
1567 while out_iter.len() > 1 {
1568 let out = out_iter.next().unwrap();
1569
1570 if !out.eq(extend, AtomWidth::MAX) {
1571 *out = extend;
1572 result = OpResult::Changed;
1573 }
1574 }
1575
1576 let out = out_iter.next().unwrap();
1577
1578 if !out.eq(extend, out_tail_width) {
1579 *out = extend;
1580 result = OpResult::Changed;
1581 }
1582 }
1583
1584 result
1585}
1586
1587pub(super) fn sign_extend(
1588 val_width: NonZeroU8,
1589 out_width: NonZeroU8,
1590 val: &[Atom],
1591 out: &mut [Atom],
1592) -> OpResult {
1593 let val_tail_width =
1594 AtomWidth::new(val_width.get() % Atom::BITS.get()).unwrap_or(AtomWidth::MAX);
1595 let out_tail_width =
1596 AtomWidth::new(out_width.get() % Atom::BITS.get()).unwrap_or(AtomWidth::MAX);
1597
1598 let mut val_iter = val.iter();
1599 let mut out_iter = out.iter_mut();
1600
1601 let mut result = OpResult::Unchanged;
1602
1603 while val_iter.len() > 1 {
1604 let val = *val_iter.next().unwrap();
1605 let out = out_iter.next().unwrap();
1606
1607 if !out.eq(val, AtomWidth::MAX) {
1608 *out = val;
1609 result = OpResult::Changed;
1610 }
1611 }
1612
1613 let val = *val_iter.next().unwrap();
1614 let out = out_iter.next().unwrap();
1615
1616 let mask = LogicStorage::mask(val_tail_width);
1617 let extend = val
1618 .get_bit_state(AtomOffset::new(val_tail_width.get() - 1).unwrap())
1619 .splat();
1620 let val = Atom {
1621 state: (val.state & mask) | (extend.state & !mask),
1622 valid: (val.valid & mask) | (extend.valid & !mask),
1623 };
1624
1625 let tail_width = if out_iter.len() == 0 {
1626 out_tail_width
1627 } else {
1628 AtomWidth::MAX
1629 };
1630
1631 if !out.eq(val, tail_width) {
1632 *out = val;
1633 result = OpResult::Changed;
1634 }
1635
1636 if out_iter.len() > 0 {
1637 while out_iter.len() > 1 {
1638 let out = out_iter.next().unwrap();
1639
1640 if !out.eq(extend, AtomWidth::MAX) {
1641 *out = extend;
1642 result = OpResult::Changed;
1643 }
1644 }
1645
1646 let out = out_iter.next().unwrap();
1647
1648 if !out.eq(extend, out_tail_width) {
1649 *out = extend;
1650 result = OpResult::Changed;
1651 }
1652 }
1653
1654 result
1655}