bva/
fixed.rs

1use std::cmp::Ordering;
2use std::fmt;
3use std::hash::{Hash, Hasher};
4use std::iter::repeat;
5use std::mem::size_of;
6use std::ops::{
7    Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Div, DivAssign,
8    Mul, MulAssign, Not, Range, Rem, RemAssign, Shl, ShlAssign, Shr, ShrAssign, Sub, SubAssign,
9};
10
11use crate::auto::Bv;
12use crate::dynamic::Bvd;
13use crate::iter::BitIterator;
14use crate::utils::{IArray, IArrayMut, Integer, StaticCast};
15use crate::{Bit, BitVector, ConvertionError, Endianness};
16
17/// Type alias for a 8 bits capacity [`Bvf`].
18pub type Bv8 = Bvf<u8, 1>;
19/// Type alias for a 16 bits capacity [`Bvf`].
20pub type Bv16 = Bvf<u16, 1>;
21/// Type alias for a 32 bits capacity [`Bvf`].
22pub type Bv32 = Bvf<u32, 1>;
23/// Type alias for a 64 bits capacity [`Bvf`].
24pub type Bv64 = Bvf<u64, 1>;
25/// Type alias for a 128 bits capacity [`Bvf`].
26pub type Bv128 = Bvf<u64, 2>;
27/// Type alias for a 192 bits capacity [`Bvf`].
28pub type Bv192 = Bvf<u64, 3>;
29/// Type alias for a 256 bits capacity [`Bvf`].
30pub type Bv256 = Bvf<u64, 4>;
31/// Type alias for a 320 bits capacity [`Bvf`].
32pub type Bv320 = Bvf<u64, 5>;
33/// Type alias for a 384 bits capacity [`Bvf`].
34pub type Bv384 = Bvf<u64, 6>;
35/// Type alias for a 448 bits capacity [`Bvf`].
36pub type Bv448 = Bvf<u64, 7>;
37/// Type alias for a 512 bits capacity [`Bvf`].
38pub type Bv512 = Bvf<u64, 8>;
39
40// ------------------------------------------------------------------------------------------------
41// Bit Vector Fixed allocation implementation
42// ------------------------------------------------------------------------------------------------
43
44/// A bit vector using a statically allocated (stack allocated) memory implementation.
45///
46/// As the capacity is static, performing operations exceeding the capacity will result in
47/// an error or panic.
48///
49/// The integer types over which [`Bvf`] can be instantiated are as follow:
50/// `u8`, `u16`, `u32`, `u64`, `u128`, `usize`.
51#[derive(Copy, Clone, Debug)]
52pub struct Bvf<I: Integer, const N: usize> {
53    data: [I; N],
54    length: usize,
55}
56
57impl<I: Integer, const N: usize> Bvf<I, N> {
58    const BYTE_UNIT: usize = size_of::<I>();
59    const NIBBLE_UNIT: usize = size_of::<I>() * 2;
60    const BIT_UNIT: usize = size_of::<I>() * 8;
61
62    /// Construct a new [`Bvf`] with the given data and length.
63    /// The least significant bit will be the least significant bit of the first integer
64    /// and the most significant bit will be the most significant bit of the last integer.
65    /// This is a low level function and should be used with care, prefer using the
66    /// functions of the [`BitVector`] trait.
67    ///
68    /// ```
69    /// use bva::Bvf;
70    ///
71    /// let data = [0x0001u16, 0x7000u16];
72    /// let bv = Bvf::new(data, 32);
73    /// assert_eq!(bv, Bvf::<u16, 2>::try_from(0x7000_0001u32).unwrap());
74    /// ```
75    pub const fn new(data: [I; N], length: usize) -> Self {
76        Self { data, length }
77    }
78
79    /// Deconstruct a [`Bvf`] into its inner data and length.
80    pub const fn into_inner(self) -> ([I; N], usize) {
81        (self.data, self.length)
82    }
83
84    /// Return this [`Bvf`] capacity. As the capacity is fixed, this is a constant static function.
85    pub const fn capacity() -> usize {
86        size_of::<I>() * 8 * N
87    }
88
89    const fn capacity_from_bit_len(bit_length: usize) -> usize {
90        (bit_length + Self::BIT_UNIT - 1) / Self::BIT_UNIT
91    }
92
93    fn mod2n(&mut self, n: usize) {
94        for i in 0..N {
95            self.data[i] &= I::mask(usize::min(
96                n - usize::min(n, i * Self::BIT_UNIT),
97                Self::BIT_UNIT,
98            ));
99        }
100    }
101}
102
103// ------------------------------------------------------------------------------------------------
104// Bvf - Integer Array traits
105// ------------------------------------------------------------------------------------------------
106
107impl<I: Integer, const N: usize> IArray for Bvf<I, N> {
108    type I = I;
109
110    fn int_len<J: Integer>(&self) -> usize
111    where
112        I: StaticCast<J>,
113    {
114        (self.length + size_of::<J>() * 8 - 1) / (size_of::<J>() * 8)
115    }
116
117    fn get_int<J: Integer>(&self, idx: usize) -> Option<J>
118    where
119        I: StaticCast<J>,
120    {
121        if idx * J::BITS < self.length {
122            IArray::get_int::<J>(self.data.as_ref(), idx)
123                .map(|v| v & J::mask(self.length - idx * J::BITS))
124        } else {
125            None
126        }
127    }
128}
129
130impl<I: Integer, const N: usize> IArrayMut for Bvf<I, N> {
131    type I = I;
132
133    fn set_int<J: Integer>(&mut self, idx: usize, v: J) -> Option<J>
134    where
135        I: StaticCast<J>,
136    {
137        if idx * J::BITS < self.length {
138            IArrayMut::set_int::<J>(
139                self.data.as_mut(),
140                idx,
141                v & J::mask(self.length - idx * J::BITS),
142            )
143        } else {
144            None
145        }
146    }
147}
148
149// ------------------------------------------------------------------------------------------------
150// Bvf - Bit Vector core trait
151// ------------------------------------------------------------------------------------------------
152
153impl<I: Integer, const N: usize> BitVector for Bvf<I, N>
154where
155    I: StaticCast<I>,
156{
157    fn with_capacity(_length: usize) -> Self {
158        Self::zeros(0)
159    }
160    fn zeros(length: usize) -> Self {
161        assert!(length <= Self::capacity());
162        Self {
163            data: [I::ZERO; N],
164            length,
165        }
166    }
167
168    fn ones(length: usize) -> Self {
169        assert!(length <= Self::capacity());
170        let mut ones = Self {
171            data: [I::MAX; N],
172            length,
173        };
174        ones.mod2n(length);
175        ones
176    }
177
178    fn capacity(&self) -> usize {
179        Bvf::<I, N>::capacity()
180    }
181
182    fn len(&self) -> usize {
183        self.length
184    }
185
186    fn from_binary<S: AsRef<str>>(string: S) -> Result<Self, ConvertionError> {
187        let length = string.as_ref().chars().count();
188        if length > Self::capacity() {
189            return Err(ConvertionError::NotEnoughCapacity);
190        }
191        let mut data = [I::ZERO; N];
192
193        for (i, c) in string.as_ref().chars().enumerate() {
194            let j = (length - 1 - i) / Self::BIT_UNIT;
195            data[j] = (data[j] << 1)
196                | match c {
197                    '0' => I::ZERO,
198                    '1' => I::ONE,
199                    _ => return Err(ConvertionError::InvalidFormat(i)),
200                };
201        }
202        Ok(Self { data, length })
203    }
204
205    fn from_hex<S: AsRef<str>>(string: S) -> Result<Self, ConvertionError> {
206        let length = string.as_ref().chars().count();
207        if length * 4 > Self::capacity() {
208            return Err(ConvertionError::NotEnoughCapacity);
209        }
210        let mut data = [I::ZERO; N];
211
212        for (i, c) in string.as_ref().chars().enumerate() {
213            let j = (length - 1 - i) / Self::NIBBLE_UNIT;
214            data[j] = (data[j] << 4)
215                | match c.to_digit(16) {
216                    Some(d) => I::cast_from(d as u8),
217                    None => return Err(ConvertionError::InvalidFormat(i)),
218                };
219        }
220        Ok(Self {
221            data,
222            length: length * 4,
223        })
224    }
225
226    fn from_bytes<B: AsRef<[u8]>>(
227        bytes: B,
228        endianness: Endianness,
229    ) -> Result<Self, ConvertionError> {
230        let byte_length = bytes.as_ref().len();
231        if byte_length * 8 > Self::capacity() {
232            return Err(ConvertionError::NotEnoughCapacity);
233        }
234        let mut data = [I::ZERO; N];
235
236        match endianness {
237            Endianness::Little => {
238                if size_of::<I>() == 1 {
239                    for (i, b) in bytes.as_ref().iter().enumerate().rev() {
240                        data[i] = I::cast_from(*b);
241                    }
242                } else {
243                    for (i, b) in bytes.as_ref().iter().enumerate().rev() {
244                        let j = i / Self::BYTE_UNIT;
245                        data[j] = (data[j] << 8) | I::cast_from(*b);
246                    }
247                }
248            }
249            Endianness::Big => {
250                if size_of::<I>() == 1 {
251                    for (i, b) in bytes.as_ref().iter().enumerate() {
252                        data[byte_length - 1 - i] = I::cast_from(*b);
253                    }
254                } else {
255                    for (i, b) in bytes.as_ref().iter().enumerate() {
256                        let j = (byte_length - 1 - i) / Self::BYTE_UNIT;
257                        data[j] = (data[j] << 8) | I::cast_from(*b);
258                    }
259                }
260            }
261        }
262        Ok(Self {
263            data,
264            length: byte_length * 8,
265        })
266    }
267
268    fn to_vec(&self, endianness: Endianness) -> Vec<u8> {
269        let num_bytes = (self.length + 7) / 8;
270        let mut buf: Vec<u8> = repeat(0u8).take(num_bytes).collect();
271        match endianness {
272            Endianness::Little => {
273                for i in 0..num_bytes {
274                    buf[i] =
275                        (self.data[i / Self::BYTE_UNIT] >> ((i % Self::BYTE_UNIT) * 8)).cast_to();
276                }
277            }
278            Endianness::Big => {
279                for i in 0..num_bytes {
280                    buf[num_bytes - i - 1] =
281                        (self.data[i / Self::BYTE_UNIT] >> ((i % Self::BYTE_UNIT) * 8)).cast_to();
282                }
283            }
284        }
285        buf
286    }
287
288    fn read<R: std::io::Read>(
289        reader: &mut R,
290        length: usize,
291        endianness: Endianness,
292    ) -> std::io::Result<Self> {
293        if length > Self::capacity() {
294            return Err(std::io::Error::new(
295                std::io::ErrorKind::InvalidInput,
296                ConvertionError::NotEnoughCapacity,
297            ));
298        }
299        let num_bytes = (length + 7) / 8;
300        let mut buf: Vec<u8> = repeat(0u8).take(num_bytes).collect();
301        reader.read_exact(&mut buf[..])?;
302        let mut bv = Self::from_bytes(&buf[..], endianness)
303            .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
304        if let Some(l) = bv.data.last_mut() {
305            *l &= I::mask(length.wrapping_sub(1) % Self::BIT_UNIT + 1);
306        }
307        bv.length = length;
308        Ok(bv)
309    }
310
311    fn write<W: std::io::Write>(
312        &self,
313        writer: &mut W,
314        endianness: Endianness,
315    ) -> std::io::Result<()> {
316        writer.write_all(self.to_vec(endianness).as_slice())
317    }
318
319    fn get(&self, index: usize) -> Bit {
320        debug_assert!(index < self.length);
321        ((self.data[index / Self::BIT_UNIT] >> (index % Self::BIT_UNIT)) & I::ONE).into()
322    }
323
324    fn set(&mut self, index: usize, bit: Bit) {
325        debug_assert!(index < self.length);
326        let b: I = (I::from(bit)) << (index % Self::BIT_UNIT);
327        let mask: I = !(I::ONE << (index % Self::BIT_UNIT));
328        self.data[index / Self::BIT_UNIT] = (self.data[index / Self::BIT_UNIT] & mask) | b;
329    }
330
331    fn copy_range(&self, range: Range<usize>) -> Self {
332        debug_assert!(range.start <= self.len() && range.end <= self.len());
333        let length = range.end - usize::min(range.start, range.end);
334        let mut data = [I::ZERO; N];
335        let offset = range.start / Self::BIT_UNIT;
336        let slide = range.start % Self::BIT_UNIT;
337
338        // If slide is 0 the left shift offset will be zero which is UB. Since we don't have a
339        // checked variant, we have to duplicate the implementation
340        if slide > 0 {
341            for i in 0..Self::capacity_from_bit_len(length) {
342                data[i] = (self.data[i + offset] >> slide)
343                    | (*self.data.get(i + offset + 1).unwrap_or(&I::ZERO)
344                        << (Self::BIT_UNIT - slide));
345            }
346        } else {
347            data[..Self::capacity_from_bit_len(length)].copy_from_slice(
348                &self.data[offset..(Self::capacity_from_bit_len(length) + offset)],
349            );
350        }
351
352        if let Some(last) = data.get_mut(length / Self::BIT_UNIT) {
353            *last &= I::mask(length.wrapping_sub(1) % Self::BIT_UNIT + 1);
354        }
355
356        Bvf::<I, N> { data, length }
357    }
358
359    fn push(&mut self, bit: Bit) {
360        debug_assert!(self.length < Self::capacity());
361        self.length += 1;
362        self.set(self.length - 1, bit);
363    }
364
365    fn pop(&mut self) -> Option<Bit> {
366        let mut b = None;
367        if self.length > 0 {
368            b = Some(self.get(self.length - 1));
369            self.set(self.length - 1, Bit::Zero);
370            self.length -= 1;
371        }
372        b
373    }
374
375    fn resize(&mut self, new_length: usize, bit: Bit) {
376        if new_length < self.length {
377            for i in (new_length / Self::BIT_UNIT + 1)..Self::capacity_from_bit_len(self.length) {
378                self.data[i] = I::ZERO;
379            }
380            if let Some(l) = self.data.get_mut(new_length / Self::BIT_UNIT) {
381                *l &= I::mask(new_length % Self::BIT_UNIT);
382            }
383            self.length = new_length;
384        } else if new_length > self.length {
385            debug_assert!(new_length <= Self::capacity());
386            let sign_pattern = match bit {
387                Bit::Zero => I::MIN,
388                Bit::One => I::MAX,
389            };
390            if let Some(l) = self.data.get_mut(self.length / Self::BIT_UNIT) {
391                *l |= sign_pattern & !I::mask(self.length % Self::BIT_UNIT);
392            }
393            for i in (self.length / Self::BIT_UNIT + 1)..Self::capacity_from_bit_len(new_length) {
394                self.data[i] = sign_pattern;
395            }
396            if let Some(l) = self.data.get_mut(new_length / Self::BIT_UNIT) {
397                *l &= I::mask(new_length % Self::BIT_UNIT);
398            }
399            self.length = new_length;
400        }
401    }
402
403    fn append<B: BitVector>(&mut self, suffix: &B) {
404        let offset = self.length % u8::BITS as usize;
405        let slide = self.length / u8::BITS as usize;
406        self.resize(self.length + suffix.len(), Bit::Zero);
407        if offset == 0 {
408            let mut i = 0;
409            while let Some(b) = suffix.get_int::<u8>(i) {
410                self.set_int::<u8>(i + slide, b);
411                i += 1;
412            }
413        } else if let Some(b) = suffix.get_int::<u8>(0) {
414            self.set_int::<u8>(
415                slide,
416                self.get_int::<u8>(slide).unwrap_or(0) | (b << offset),
417            );
418
419            let rev_offset = u8::BITS as usize - offset;
420            let mut i = 1;
421            let mut prev = b;
422
423            while let Some(b) = suffix.get_int::<u8>(i) {
424                self.set_int::<u8>(i + slide, (prev >> rev_offset) | (b << offset));
425                prev = b;
426                i += 1;
427            }
428
429            self.set_int::<u8>(i + slide, prev >> rev_offset);
430        }
431    }
432
433    fn prepend<B: BitVector>(&mut self, prefix: &B) {
434        self.resize(self.length + prefix.len(), Bit::Zero);
435        *self <<= prefix.len();
436        let last = prefix.int_len::<u8>() - 1;
437
438        for i in 0..last {
439            self.set_int::<u8>(i, prefix.get_int::<u8>(i).unwrap());
440        }
441
442        self.set_int::<u8>(
443            last,
444            self.get_int::<u8>(last).unwrap() | prefix.get_int::<u8>(last).unwrap(),
445        );
446    }
447
448    fn shl_in(&mut self, bit: Bit) -> Bit {
449        let mut carry = bit;
450        for i in 0..(self.length / Self::BIT_UNIT) {
451            let b = (self.data[i] >> (Self::BIT_UNIT - 1)) & I::ONE;
452            self.data[i] = (self.data[i] << 1) | carry.into();
453            carry = b.into();
454        }
455        if self.length % Self::BIT_UNIT != 0 {
456            let i = self.length / Self::BIT_UNIT;
457            let b = (self.data[i] >> (self.length % Self::BIT_UNIT - 1)) & I::ONE;
458            self.data[i] =
459                ((self.data[i] << 1) | carry.into()) & I::mask(self.length % Self::BIT_UNIT);
460            carry = b.into();
461        }
462        carry
463    }
464
465    fn shr_in(&mut self, bit: Bit) -> Bit {
466        let mut carry = bit;
467        if self.length % Self::BIT_UNIT != 0 {
468            let i = self.length / Self::BIT_UNIT;
469            let b = self.data[i] & I::ONE;
470            self.data[i] =
471                (self.data[i] >> 1) | (I::from(carry) << (self.length % Self::BIT_UNIT - 1));
472            carry = b.into();
473        }
474        for i in (0..(self.length / Self::BIT_UNIT)).rev() {
475            let b = self.data[i] & I::ONE;
476            self.data[i] = (self.data[i] >> 1) | (I::from(carry) << (Self::BIT_UNIT - 1));
477            carry = b.into();
478        }
479        carry
480    }
481
482    fn rotl(&mut self, rot: usize) {
483        // TODO: optimize to do it in place
484        let mut new_data = [I::ZERO; N];
485        let mut old_idx = 0;
486        while old_idx < self.length {
487            let new_idx = (old_idx + rot) % self.length;
488            let l = (Self::BIT_UNIT - new_idx % Self::BIT_UNIT)
489                .min(Self::BIT_UNIT - old_idx % Self::BIT_UNIT)
490                .min(self.length - new_idx)
491                .min(self.length - old_idx);
492            new_data[new_idx / Self::BIT_UNIT] |=
493                ((self.data[old_idx / Self::BIT_UNIT] >> (old_idx % Self::BIT_UNIT)) & I::mask(l))
494                    << (new_idx % Self::BIT_UNIT);
495            old_idx += l;
496        }
497        self.data = new_data;
498    }
499
500    fn rotr(&mut self, rot: usize) {
501        // TODO: optimize to do it in place
502        let mut new_data = [I::ZERO; N];
503        let mut new_idx = 0;
504        while new_idx < self.length {
505            let old_idx = (new_idx + rot) % self.length;
506            let l = (Self::BIT_UNIT - new_idx % Self::BIT_UNIT)
507                .min(Self::BIT_UNIT - old_idx % Self::BIT_UNIT)
508                .min(self.length - new_idx)
509                .min(self.length - old_idx);
510            new_data[new_idx / Self::BIT_UNIT] |=
511                ((self.data[old_idx / Self::BIT_UNIT] >> (old_idx % Self::BIT_UNIT)) & I::mask(l))
512                    << (new_idx % Self::BIT_UNIT);
513            new_idx += l;
514        }
515        self.data = new_data;
516    }
517
518    fn leading_zeros(&self) -> usize {
519        let mut count = 0;
520        let mut i = Self::capacity_from_bit_len(self.length);
521        if i > 0 {
522            let lastbit = (self.length - 1) % Self::BIT_UNIT + 1;
523            let mut v = self.data[i - 1] & I::mask(lastbit);
524            count = v.leading_zeros() - (Self::BIT_UNIT - lastbit);
525            i -= 1;
526            while v == I::ZERO && i > 0 {
527                v = self.data[i - 1];
528                count += v.leading_zeros();
529                i -= 1;
530            }
531        }
532        count
533    }
534
535    fn leading_ones(&self) -> usize {
536        let mut count = 0;
537        let mut i = Self::capacity_from_bit_len(self.length);
538        if i > 0 {
539            let lastbit = (self.length - 1) % Self::BIT_UNIT + 1;
540            let mut v = self.data[i - 1] | !I::mask(lastbit);
541            count = v.leading_ones() - (Self::BIT_UNIT - lastbit);
542            i -= 1;
543            while v == I::MAX && i > 0 {
544                v = self.data[i - 1];
545                count += v.leading_ones();
546                i -= 1;
547            }
548        }
549        count
550    }
551
552    fn trailing_zeros(&self) -> usize {
553        let mut count = 0;
554        let mut i = 0;
555        if i < Self::capacity_from_bit_len(self.length) {
556            let mut v = I::ZERO;
557            while v == I::ZERO && i < Self::capacity_from_bit_len(self.length) - 1 {
558                v = self.data[i];
559                count += v.trailing_zeros();
560                i += 1;
561            }
562            if v == I::ZERO {
563                let lastbit = (self.length - 1) % Self::BIT_UNIT + 1;
564                count += usize::min(self.data[i].trailing_zeros(), lastbit);
565            }
566        }
567        count
568    }
569
570    fn trailing_ones(&self) -> usize {
571        let mut count = 0;
572        let mut i = 0;
573        if i < Self::capacity_from_bit_len(self.length) {
574            let mut v = I::MAX;
575            while v == I::MAX && i < Self::capacity_from_bit_len(self.length) - 1 {
576                v = self.data[i];
577                count += v.trailing_ones();
578                i += 1;
579            }
580            if v == I::MAX {
581                let lastbit = (self.length - 1) % Self::BIT_UNIT + 1;
582                count += usize::min(self.data[i].trailing_ones(), lastbit);
583            }
584        }
585        count
586    }
587
588    fn is_zero(&self) -> bool {
589        self.data.iter().all(|&v| v == I::ZERO)
590    }
591
592    fn div_rem<B: BitVector>(&self, divisor: &B) -> (Self, Self)
593    where
594        Self: for<'a> TryFrom<&'a B, Error: fmt::Debug>,
595    {
596        assert!(!divisor.is_zero(), "Division by zero");
597        let mut rem = *self;
598        let mut quotient = Bvf::<I, N>::zeros(self.length);
599        if divisor.significant_bits() > self.significant_bits() {
600            return (quotient, rem);
601        }
602
603        let shift = self.significant_bits() - divisor.significant_bits();
604        let mut divisor: Bvf<I, N> = divisor.try_into().expect("divisor should fit in Self");
605        divisor.resize(self.length, Bit::Zero);
606        divisor <<= shift;
607
608        for i in (0..shift + 1).rev() {
609            if rem >= divisor {
610                rem -= &divisor;
611                quotient.set(i, Bit::One);
612            }
613            divisor >>= 1u32;
614        }
615
616        (quotient, rem)
617    }
618
619    fn iter(&self) -> BitIterator<'_, Self> {
620        self.into_iter()
621    }
622}
623
624// ------------------------------------------------------------------------------------------------
625// Bvf - Hasher Implementation
626// ------------------------------------------------------------------------------------------------
627
628impl<I: Integer, const N: usize> Hash for Bvf<I, N> {
629    fn hash<H: Hasher>(&self, state: &mut H) {
630        self.length.hash(state);
631        for i in 0..Self::capacity_from_bit_len(self.length) {
632            self.data[i].hash(state);
633        }
634    }
635}
636
637// ------------------------------------------------------------------------------------------------
638// Bvf - Bit iterator trait
639// ------------------------------------------------------------------------------------------------
640
641impl<'a, I: Integer, const N: usize> IntoIterator for &'a Bvf<I, N> {
642    type Item = Bit;
643    type IntoIter = BitIterator<'a, Bvf<I, N>>;
644
645    fn into_iter(self) -> Self::IntoIter {
646        BitIterator::new(self)
647    }
648}
649
650impl<I: Integer, const N: usize> FromIterator<Bit> for Bvf<I, N>
651where
652    I: StaticCast<I>,
653{
654    fn from_iter<T: IntoIterator<Item = Bit>>(iter: T) -> Self {
655        let iter = iter.into_iter();
656        let mut bv = Self::with_capacity(iter.size_hint().0);
657        iter.for_each(|b| bv.push(b));
658        bv
659    }
660}
661
662impl<I: Integer, const N: usize> Extend<Bit> for Bvf<I, N> {
663    fn extend<T: IntoIterator<Item = Bit>>(&mut self, iter: T) {
664        iter.into_iter().for_each(|b| self.push(b));
665    }
666}
667
668// ------------------------------------------------------------------------------------------------
669// Bvf - Formatting traits
670// ------------------------------------------------------------------------------------------------
671
672impl<I: Integer, const N: usize> fmt::Binary for Bvf<I, N>
673where
674    I: StaticCast<I>,
675{
676    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
677        let mut i = self.length;
678        let mut s = String::with_capacity(self.length);
679
680        while i > 0 && self.get(i - 1) == Bit::Zero {
681            i -= 1;
682        }
683        while i > 0 {
684            match self.get(i - 1) {
685                Bit::Zero => s.push('0'),
686                Bit::One => s.push('1'),
687            }
688            i -= 1;
689        }
690        if s.is_empty() {
691            s.push('0');
692        }
693
694        f.pad_integral(true, "0b", &s)
695    }
696}
697
698impl<I: Integer, const N: usize> fmt::Display for Bvf<I, N> {
699    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
700        let base = Self::try_from(10u8).expect("Should fit in any Bvf type");
701        let mut s = Vec::<char>::new();
702        let mut quotient = *self;
703        let mut remainder;
704
705        while !quotient.is_zero() {
706            (quotient, remainder) = quotient.div_rem::<Bvf<I, N>>(&base);
707            // Remainder of division by 10 will be a single digit
708            s.push(char::from_digit(u32::try_from(&remainder).unwrap(), 10).unwrap());
709        }
710        if s.is_empty() {
711            s.push('0');
712        }
713
714        f.pad_integral(true, "", s.iter().rev().collect::<String>().as_str())
715    }
716}
717
718impl<I: Integer, const N: usize> fmt::Octal for Bvf<I, N> {
719    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
720        const SEMI_NIBBLE: [char; 8] = ['0', '1', '2', '3', '4', '5', '6', '7'];
721        let length = (self.length + 2) / 3;
722        let mut s = Vec::<char>::with_capacity(length);
723        let mut it = self.iter();
724        let mut last_nz = 0;
725
726        while let Some(b0) = it.next() {
727            let b1 = it.next().unwrap_or(Bit::Zero);
728            let b2 = it.next().unwrap_or(Bit::Zero);
729            let octet = ((b2 as u8) << 2) | ((b1 as u8) << 1) | b0 as u8;
730            if octet != 0 {
731                last_nz = s.len();
732            }
733            s.push(SEMI_NIBBLE[octet as usize]);
734        }
735        if s.is_empty() {
736            s.push('0');
737        }
738        s.truncate(last_nz + 1);
739
740        f.pad_integral(true, "0o", s.iter().rev().collect::<String>().as_str())
741    }
742}
743
744impl<I: Integer, const N: usize> fmt::LowerHex for Bvf<I, N> {
745    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
746        const NIBBLE: [char; 16] = [
747            '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f',
748        ];
749        let mut i = (self.length + 3) / 4;
750        let mut s = String::with_capacity(i);
751
752        while i > 0
753            && StaticCast::<u8>::cast_to(
754                self.data[(i - 1) / Self::NIBBLE_UNIT] >> (((i - 1) % Self::NIBBLE_UNIT) * 4),
755            ) & 0xf
756                == 0
757        {
758            i -= 1;
759        }
760        while i > 0 {
761            let nibble = StaticCast::<u8>::cast_to(
762                self.data[(i - 1) / Self::NIBBLE_UNIT] >> (((i - 1) % Self::NIBBLE_UNIT) * 4),
763            ) & 0xf;
764            s.push(NIBBLE[nibble as usize]);
765            i -= 1;
766        }
767        if s.is_empty() {
768            s.push('0');
769        }
770
771        f.pad_integral(true, "0x", &s)
772    }
773}
774
775impl<I: Integer, const N: usize> fmt::UpperHex for Bvf<I, N> {
776    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
777        const NIBBLE: [char; 16] = [
778            '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F',
779        ];
780        let mut i = (self.length + 3) / 4;
781        let mut s = String::with_capacity(i);
782
783        while i > 0
784            && StaticCast::<u8>::cast_to(
785                self.data[(i - 1) / Self::NIBBLE_UNIT] >> (((i - 1) % Self::NIBBLE_UNIT) * 4),
786            ) & 0xf
787                == 0
788        {
789            i -= 1;
790        }
791        while i > 0 {
792            let nibble = StaticCast::<u8>::cast_to(
793                self.data[(i - 1) / Self::NIBBLE_UNIT] >> (((i - 1) % Self::NIBBLE_UNIT) * 4),
794            ) & 0xf;
795            s.push(NIBBLE[nibble as usize]);
796            i -= 1;
797        }
798        if s.is_empty() {
799            s.push('0');
800        }
801
802        f.pad_integral(true, "0x", &s)
803    }
804}
805
806// ------------------------------------------------------------------------------------------------
807// Bvf - Comparison traits
808// ------------------------------------------------------------------------------------------------
809
810impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> PartialEq<Bvf<I1, N1>>
811    for Bvf<I2, N2>
812where
813    I1: StaticCast<I1>,
814    I2: StaticCast<I1>,
815{
816    fn eq(&self, other: &Bvf<I1, N1>) -> bool {
817        for i in 0..usize::max(IArray::int_len::<I1>(self), IArray::int_len::<I1>(other)) {
818            if IArray::get_int(self, i).unwrap_or(I1::ZERO)
819                != IArray::get_int(other, i).unwrap_or(I1::ZERO)
820            {
821                return false;
822            }
823        }
824        true
825    }
826}
827
828impl<I: Integer, const N: usize> PartialEq<Bvd> for Bvf<I, N> {
829    fn eq(&self, other: &Bvd) -> bool {
830        other.eq(self)
831    }
832}
833
834impl<I: Integer, const N: usize> PartialEq<Bv> for Bvf<I, N>
835where
836    u64: StaticCast<I>,
837{
838    fn eq(&self, other: &Bv) -> bool {
839        other.eq(self)
840    }
841}
842
843impl<I: Integer, const N: usize> Eq for Bvf<I, N> where I: StaticCast<I> {}
844
845impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> PartialOrd<Bvf<I1, N1>>
846    for Bvf<I2, N2>
847where
848    I1: StaticCast<I1>,
849    I2: StaticCast<I1>,
850{
851    fn partial_cmp(&self, other: &Bvf<I1, N1>) -> Option<std::cmp::Ordering> {
852        for i in (0..usize::max(IArray::int_len::<I1>(self), IArray::int_len::<I1>(other))).rev() {
853            match IArray::get_int(self, i)
854                .unwrap_or(I1::ZERO)
855                .cmp(&IArray::get_int(other, i).unwrap_or(I1::ZERO))
856            {
857                Ordering::Equal => continue,
858                ord => return Some(ord),
859            }
860        }
861        Some(Ordering::Equal)
862    }
863}
864
865impl<I: Integer, const N: usize> PartialOrd<Bvd> for Bvf<I, N> {
866    fn partial_cmp(&self, other: &Bvd) -> Option<Ordering> {
867        other.partial_cmp(self).map(|o| o.reverse())
868    }
869}
870
871impl<I: Integer, const N: usize> PartialOrd<Bv> for Bvf<I, N>
872where
873    u64: StaticCast<I>,
874{
875    fn partial_cmp(&self, other: &Bv) -> Option<Ordering> {
876        other.partial_cmp(self).map(|o| o.reverse())
877    }
878}
879
880impl<I: Integer, const N: usize> Ord for Bvf<I, N>
881where
882    I: StaticCast<I>,
883{
884    fn cmp(&self, other: &Self) -> Ordering {
885        // Partial Cmp never returns None
886        self.partial_cmp(other).unwrap()
887    }
888}
889
890// ------------------------------------------------------------------------------------------------
891// Bvf - Conversion traits
892// ------------------------------------------------------------------------------------------------
893
894macro_rules! impl_tryfrom { ($($type:ty),+) => {
895    $(
896        impl<I: Integer, const N: usize> TryFrom<$type> for Bvf<I, N>
897            where I: StaticCast<$type>
898        {
899            type Error = ConvertionError;
900
901            fn try_from(int: $type) -> Result<Self, Self::Error> {
902                // Branch should be optimized at compile time
903                if size_of::<I>() >= size_of::<$type>() {
904                    let mut data = [I::ZERO; N];
905                    data[0] = I::cast_from(int);
906                    return Ok(Bvf {
907                        data,
908                        length: <$type>::BITS as usize
909                    });
910                }
911                else {
912                    // Check if value overflow the bit vector
913                    if (<$type>::BITS - int.leading_zeros()) as usize > Self::capacity() {
914                        return Err(ConvertionError::NotEnoughCapacity);
915                    }
916                    let mut data = [I::ZERO; N];
917                    for i in 0..N {
918                        data[i] = I::cast_from(int.checked_shr((i * Self::BIT_UNIT) as u32).unwrap_or(0));
919                    }
920                    return Ok(Bvf {
921                        data,
922                        length: usize::min(<$type>::BITS as usize, Self::capacity())
923                    });
924                }
925            }
926        }
927
928        impl<I: Integer, const N: usize> TryFrom<&$type> for Bvf<I, N>
929            where I: StaticCast<$type>
930        {
931            type Error = ConvertionError;
932
933            fn try_from(int: &$type) -> Result<Self, Self::Error> {
934                Self::try_from(*int)
935            }
936        }
937
938        impl<I: Integer, const N: usize> TryFrom<&Bvf<I, N>> for $type
939            where I: StaticCast<$type>
940        {
941            type Error = ConvertionError;
942            fn try_from(bv: &Bvf<I, N>) -> Result<Self, Self::Error> {
943                // Check if the bit vector overflow I
944                if bv.significant_bits() > <$type>::BITS as usize {
945                    Err(ConvertionError::NotEnoughCapacity)
946                }
947                else {
948                    Ok(IArray::get_int(bv, 0).unwrap())
949                }
950            }
951        }
952
953        impl<I: Integer, const N: usize> TryFrom<Bvf<I, N>> for $type
954            where I: StaticCast<$type>
955        {
956            type Error = ConvertionError;
957            fn try_from(bv: Bvf<I, N>) -> Result<Self, Self::Error> {
958                Self::try_from(&bv)
959            }
960        }
961    )+
962}}
963
964impl_tryfrom!(u8, u16, u32, u64, u128, usize);
965
966impl<I: Integer + StaticCast<J>, J: Integer, const N: usize> TryFrom<&[J]> for Bvf<I, N> {
967    type Error = ConvertionError;
968
969    fn try_from(slice: &[J]) -> Result<Self, Self::Error> {
970        if slice.len() * J::BITS <= Self::capacity() {
971            let mut bvf = Bvf::<I, N>::zeros(slice.len() * J::BITS);
972            for (i, v) in slice.iter().enumerate() {
973                bvf.set_int(i, *v);
974            }
975            Ok(bvf)
976        } else {
977            Err(ConvertionError::NotEnoughCapacity)
978        }
979    }
980}
981
982impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> TryFrom<&Bvf<I1, N1>>
983    for Bvf<I2, N2>
984where
985    I1: StaticCast<I2>,
986{
987    type Error = ConvertionError;
988
989    fn try_from(bvf: &Bvf<I1, N1>) -> Result<Self, Self::Error> {
990        if Self::capacity() < bvf.length {
991            Err(ConvertionError::NotEnoughCapacity)
992        } else {
993            let mut data = [I2::ZERO; N2];
994            for i in 0..usize::min(N2, IArray::int_len::<I2>(bvf)) {
995                data[i] = IArray::get_int(bvf, i).unwrap();
996            }
997            Ok(Bvf::<I2, N2> {
998                data,
999                length: bvf.length,
1000            })
1001        }
1002    }
1003}
1004
1005impl<I: Integer, const N: usize> TryFrom<&Bvd> for Bvf<I, N>
1006where
1007    u64: StaticCast<I>,
1008{
1009    type Error = ConvertionError;
1010    fn try_from(bvd: &Bvd) -> Result<Self, Self::Error> {
1011        if bvd.len() > Bvf::<I, N>::capacity() {
1012            Err(ConvertionError::NotEnoughCapacity)
1013        } else {
1014            let mut data = [I::ZERO; N];
1015            for i in 0..N {
1016                data[i] = bvd.get_int(i).unwrap_or(I::ZERO);
1017            }
1018            Ok(Bvf::<I, N> {
1019                data,
1020                length: bvd.len(),
1021            })
1022        }
1023    }
1024}
1025
1026impl<I: Integer, const N: usize> TryFrom<Bvd> for Bvf<I, N>
1027where
1028    u64: StaticCast<I>,
1029{
1030    type Error = ConvertionError;
1031    fn try_from(bvd: Bvd) -> Result<Self, Self::Error> {
1032        Self::try_from(&bvd)
1033    }
1034}
1035
1036impl<I: Integer, const N: usize> TryFrom<&Bv> for Bvf<I, N>
1037where
1038    u64: StaticCast<I>,
1039{
1040    type Error = ConvertionError;
1041    fn try_from(bv: &Bv) -> Result<Self, Self::Error> {
1042        if bv.len() > Bvf::<I, N>::capacity() {
1043            Err(ConvertionError::NotEnoughCapacity)
1044        } else {
1045            let mut data = [I::ZERO; N];
1046            for i in 0..IArray::int_len::<I>(bv) {
1047                data[i] = IArray::get_int(bv, i).unwrap();
1048            }
1049            Ok(Bvf::<I, N> {
1050                data,
1051                length: bv.len(),
1052            })
1053        }
1054    }
1055}
1056
1057impl<I: Integer, const N: usize> TryFrom<Bv> for Bvf<I, N>
1058where
1059    u64: StaticCast<I>,
1060{
1061    type Error = ConvertionError;
1062    fn try_from(bv: Bv) -> Result<Self, Self::Error> {
1063        Self::try_from(&bv)
1064    }
1065}
1066
1067// ------------------------------------------------------------------------------------------------
1068// Bvf - Unary operator & shifts
1069// ------------------------------------------------------------------------------------------------
1070
1071impl<I: Integer, const N: usize> Not for Bvf<I, N> {
1072    type Output = Bvf<I, N>;
1073
1074    fn not(mut self) -> Bvf<I, N> {
1075        for i in 0..N {
1076            self.data[i] = !self.data[i];
1077        }
1078        self.mod2n(self.length);
1079        self
1080    }
1081}
1082
1083impl<I: Integer, const N: usize> Not for &Bvf<I, N> {
1084    type Output = Bvf<I, N>;
1085
1086    fn not(self) -> Bvf<I, N> {
1087        (*self).not()
1088    }
1089}
1090
1091macro_rules! impl_shifts {({$($rhs:ty),+}) => {
1092    $(
1093        impl<I: Integer, const N: usize> ShlAssign<$rhs> for Bvf<I, N> {
1094            fn shl_assign(&mut self, rhs: $rhs) {
1095                let shift = usize::try_from(rhs).map_or(0, |s| s);
1096                if shift == 0 {
1097                    return;
1098                }
1099                let mut new_idx = self.length;
1100                while new_idx > shift {
1101                    let l = (new_idx.wrapping_sub(1) % Self::BIT_UNIT + 1)
1102                            .min((new_idx - shift).wrapping_sub(1) % Self::BIT_UNIT + 1);
1103                    new_idx -= l;
1104                    let old_idx = new_idx - shift;
1105                    let d = (self.data[old_idx / Self::BIT_UNIT] >> (old_idx % Self::BIT_UNIT)) & I::mask(l);
1106                    self.data[new_idx / Self::BIT_UNIT] &= !(I::mask(l) << (new_idx % Self::BIT_UNIT));
1107                    self.data[new_idx / Self::BIT_UNIT] |=  d << (new_idx % Self::BIT_UNIT);
1108                }
1109                while new_idx > 0 {
1110                    let l = (new_idx.wrapping_sub(1) % Self::BIT_UNIT) + 1;
1111                    new_idx -= l;
1112                    self.data[new_idx / Self::BIT_UNIT] &= !(I::mask(l) << (new_idx % Self::BIT_UNIT));
1113                }
1114            }
1115        }
1116
1117
1118        impl<I: Integer, const N: usize> ShlAssign<&$rhs> for Bvf<I, N> {
1119            fn shl_assign(&mut self, rhs: &$rhs) {
1120                self.shl_assign(*rhs);
1121            }
1122        }
1123
1124        impl<I: Integer, const N: usize> Shl<$rhs> for Bvf<I, N> {
1125            type Output = Bvf<I, N>;
1126            fn shl(mut self, rhs: $rhs) -> Self {
1127                self.shl_assign(rhs);
1128                return self;
1129            }
1130        }
1131
1132        impl<I: Integer, const N: usize> Shl<&$rhs> for Bvf<I, N> {
1133            type Output = Bvf<I, N>;
1134            fn shl(mut self, rhs: &$rhs) -> Self {
1135                self.shl_assign(rhs);
1136                return self;
1137            }
1138        }
1139
1140        impl<I: Integer, const N: usize> Shl<$rhs> for &Bvf<I, N> {
1141            type Output = Bvf<I, N>;
1142            fn shl(self, rhs: $rhs) -> Bvf<I, N> {
1143                return (*self).shl(rhs);
1144            }
1145        }
1146
1147        impl<I: Integer, const N: usize> Shl<&$rhs> for &Bvf<I, N> {
1148            type Output = Bvf<I, N>;
1149            fn shl(self, rhs: &$rhs) -> Bvf<I, N> {
1150                self.shl(*rhs)
1151            }
1152        }
1153
1154        impl<I: Integer, const N: usize> ShrAssign<$rhs> for Bvf<I, N> {
1155            fn shr_assign(&mut self, rhs: $rhs) {
1156                let shift = usize::try_from(rhs).map_or(0, |s| s);
1157                if shift == 0 {
1158                    return;
1159                }
1160                let mut new_idx = 0;
1161                while new_idx + shift < self.length {
1162                    let old_idx = new_idx + shift;
1163                    let l = (Self::BIT_UNIT - new_idx % Self::BIT_UNIT)
1164                            .min(Self::BIT_UNIT - old_idx % Self::BIT_UNIT);
1165                    let d = (self.data[old_idx / Self::BIT_UNIT] >> (old_idx % Self::BIT_UNIT)) & I::mask(l);
1166                    self.data[new_idx / Self::BIT_UNIT] &= !(I::mask(l) << (new_idx % Self::BIT_UNIT));
1167                    self.data[new_idx / Self::BIT_UNIT] |= d << (new_idx % Self::BIT_UNIT);
1168                    new_idx += l;
1169                }
1170                while new_idx < self.length {
1171                    let l = Self::BIT_UNIT - new_idx % Self::BIT_UNIT;
1172                    self.data[new_idx / Self::BIT_UNIT] &= !(I::mask(l) << (new_idx % Self::BIT_UNIT));
1173                    new_idx += l;
1174                }
1175            }
1176        }
1177
1178        impl<I: Integer, const N: usize> ShrAssign<&$rhs> for Bvf<I, N> {
1179            fn shr_assign(&mut self, rhs: &$rhs) {
1180                self.shr_assign(*rhs);
1181            }
1182        }
1183
1184        impl<I: Integer, const N: usize> Shr<$rhs> for Bvf<I, N> {
1185            type Output = Bvf<I, N>;
1186            fn shr(mut self, rhs: $rhs) -> Self {
1187                self.shr_assign(rhs);
1188                return self;
1189            }
1190        }
1191
1192        impl<I: Integer, const N: usize> Shr<&$rhs> for Bvf<I, N> {
1193            type Output = Bvf<I, N>;
1194            fn shr(mut self, rhs: &$rhs) -> Self {
1195                self.shr_assign(rhs);
1196                return self;
1197            }
1198        }
1199
1200        impl<I: Integer, const N: usize> Shr<$rhs> for &Bvf<I, N> {
1201            type Output = Bvf<I, N>;
1202            fn shr(self, rhs: $rhs) -> Bvf<I, N> {
1203                return (*self).shr(rhs);
1204            }
1205        }
1206
1207        impl<I: Integer, const N: usize> Shr<&$rhs> for &Bvf<I, N> {
1208            type Output = Bvf<I, N>;
1209            fn shr(self, rhs: &$rhs) -> Bvf<I, N> {
1210                self.shr(*rhs)
1211            }
1212        }
1213    )+
1214}}
1215
1216impl_shifts!({u8, u16, u32, u64, u128, usize});
1217
1218// ------------------------------------------------------------------------------------------------
1219// Uint helper macro
1220// ------------------------------------------------------------------------------------------------
1221
1222macro_rules! impl_op_uint {
1223    ($trait:ident, $method:ident, {$($uint:ty),+}) => {
1224        $(
1225            impl<I: Integer, const N: usize> $trait<&$uint> for &Bvf<I, N>
1226            where
1227                u64: StaticCast<I>,
1228            {
1229                type Output = Bvf<I, N>;
1230                fn $method(self, rhs: &$uint) -> Self::Output {
1231                    // All basic type should fit in 128 bits
1232                    let temp = Bvf::<u64, 2>::try_from(*rhs).unwrap();
1233                    self.$method(temp)
1234                }
1235            }
1236
1237            impl<I: Integer, const N: usize> $trait<$uint> for &Bvf<I, N>
1238            where
1239                u64: StaticCast<I>,
1240            {
1241                type Output = Bvf<I, N>;
1242                fn $method(self, rhs: $uint) -> Self::Output {
1243                    // All basic type should fit in 128 bits
1244                    let temp = Bvf::<u64, 2>::try_from(rhs).unwrap();
1245                    self.$method(temp)
1246                }
1247            }
1248
1249            impl<I: Integer, const N: usize> $trait<&$uint> for Bvf<I, N>
1250            where
1251                u64: StaticCast<I>,
1252            {
1253                type Output = Bvf<I, N>;
1254                fn $method(self, rhs: &$uint) -> Self::Output {
1255                    // All basic type should fit in 128 bits
1256                    let temp = Bvf::<u64, 2>::try_from(*rhs).unwrap();
1257                    self.$method(temp)
1258                }
1259            }
1260
1261            impl<I: Integer, const N: usize> $trait<$uint> for Bvf<I, N>
1262            where
1263                u64: StaticCast<I>,
1264            {
1265                type Output = Bvf<I, N>;
1266                fn $method(self, rhs: $uint) -> Self::Output {
1267                    // All basic type should fit in 128 bits
1268                    let temp = Bvf::<u64, 2>::try_from(rhs).unwrap();
1269                    self.$method(temp)
1270                }
1271            }
1272        )+
1273    };
1274}
1275
1276macro_rules! impl_op_assign_uint {
1277    ($trait:ident, $method:ident, {$($uint:ty),+}) => {
1278        $(
1279            impl<I: Integer, const N: usize> $trait<$uint> for Bvf<I, N>
1280            where
1281                u64: StaticCast<I>
1282            {
1283                fn $method(&mut self, rhs: $uint) {
1284                    // All basic type should fit in 128 bits
1285                    let temp = Bvf::<u64, 2>::try_from(rhs).unwrap();
1286                    self.$method(&temp);
1287                }
1288            }
1289
1290            impl<I: Integer, const N: usize> $trait<&$uint> for Bvf<I, N>
1291            where
1292                u64: StaticCast<I>
1293            {
1294                fn $method(&mut self, rhs: &$uint) {
1295                    // All basic type should fit in 128 bits
1296                    let temp = Bvf::<u64, 2>::try_from(*rhs).unwrap();
1297                    self.$method(&temp);
1298                }
1299            }
1300        )+
1301    };
1302}
1303
1304// ------------------------------------------------------------------------------------------------
1305// Bvf - Arithmetic operators (assignment kind)
1306// ------------------------------------------------------------------------------------------------
1307
1308macro_rules! impl_binop_assign {
1309    ($trait:ident, $method:ident, {$($uint:ty),+}) => {
1310        impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> $trait<&Bvf<I2, N2>>
1311            for Bvf<I1, N1>
1312        where
1313            I2: StaticCast<I1>,
1314        {
1315            fn $method(&mut self, rhs: &Bvf<I2, N2>) {
1316                if size_of::<I1>() == size_of::<I2>() {
1317                    for i in 0..usize::min(N1, N2) {
1318                        self.data[i].$method(StaticCast::<I1>::cast_to(rhs.data[i]));
1319                    }
1320                    for i in N2..N1 {
1321                        self.data[i].$method(I1::ZERO);
1322                    }
1323                } else {
1324                    for i in 0..N1 {
1325                        self.data[i].$method(IArray::get_int(rhs, i).unwrap_or(I1::ZERO));
1326                    }
1327                }
1328            }
1329        }
1330
1331        impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> $trait<Bvf<I2, N2>>
1332            for Bvf<I1, N1>
1333        where
1334            I2: StaticCast<I1>,
1335        {
1336            fn $method(&mut self, rhs: Bvf<I2, N2>) {
1337                self.$method(&rhs);
1338            }
1339        }
1340
1341        impl<I: Integer, const N: usize> $trait<&Bvd> for Bvf<I, N>
1342        where
1343            u64: StaticCast<I>,
1344        {
1345            fn $method(&mut self, rhs: &Bvd) {
1346                for i in 0..N {
1347                    self.data[i].$method(IArray::get_int(rhs, i).unwrap_or(I::ZERO));
1348                }
1349            }
1350        }
1351
1352        impl<I: Integer, const N: usize> $trait<Bvd> for Bvf<I, N>
1353        where
1354            u64: StaticCast<I>,
1355        {
1356            fn $method(&mut self, rhs: Bvd) {
1357                self.$method(&rhs);
1358            }
1359        }
1360
1361        impl<I: Integer, const N: usize> $trait<&Bv> for Bvf<I, N>
1362        where
1363            u64: StaticCast<I>,
1364        {
1365            fn $method(&mut self, rhs: &Bv) {
1366                match rhs {
1367                    Bv::Fixed(bvf) => self.$method(bvf),
1368                    Bv::Dynamic(bvd) => self.$method(bvd),
1369                }
1370            }
1371        }
1372
1373        impl<I: Integer, const N: usize> $trait<Bv> for Bvf<I, N>
1374        where
1375            u64: StaticCast<I>,
1376        {
1377            fn $method(&mut self, rhs: Bv) {
1378                self.$method(&rhs);
1379            }
1380        }
1381
1382        impl_op_assign_uint!($trait, $method, {$($uint),+});
1383    };
1384}
1385
1386impl_binop_assign!(BitAndAssign, bitand_assign, {u8, u16, u32, u64, usize, u128});
1387impl_binop_assign!(BitOrAssign, bitor_assign, {u8, u16, u32, u64, usize, u128});
1388impl_binop_assign!(BitXorAssign, bitxor_assign, {u8, u16, u32, u64, usize, u128});
1389
1390macro_rules! impl_addsub_assign {
1391    ($trait:ident, $method:ident, $carry_method:ident, {$($uint:ty),+}) => {
1392        impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> $trait<&Bvf<I2, N2>>
1393            for Bvf<I1, N1>
1394        where
1395            I2: StaticCast<I1>,
1396        {
1397            fn $method(&mut self, rhs: &Bvf<I2, N2>) {
1398                if size_of::<I1>() == size_of::<I2>() {
1399                    let mut carry = I1::ZERO;
1400
1401                    for i in 0..usize::min(N1, N2) {
1402                        carry = self.data[i]
1403                            .$carry_method(StaticCast::<I1>::cast_to(rhs.data[i]), carry);
1404                    }
1405                    for i in N2..N1 {
1406                        carry = self.data[i].$carry_method(I1::ZERO, carry);
1407                    }
1408                    self.mod2n(self.length);
1409                } else {
1410                    let mut carry = I1::ZERO;
1411                    for i in 0..N1 {
1412                        carry = self.data[i]
1413                            .$carry_method(IArray::get_int(rhs, i).unwrap_or(I1::ZERO), carry);
1414                    }
1415                    self.mod2n(self.length);
1416                }
1417            }
1418        }
1419
1420        impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> $trait<Bvf<I2, N2>>
1421            for Bvf<I1, N1>
1422        where
1423            I2: StaticCast<I1>,
1424        {
1425            fn $method(&mut self, rhs: Bvf<I2, N2>) {
1426                self.$method(&rhs);
1427            }
1428        }
1429
1430        impl<I: Integer, const N: usize> $trait<&Bvd> for Bvf<I, N>
1431        where
1432            u64: StaticCast<I>,
1433        {
1434            fn $method(&mut self, rhs: &Bvd) {
1435                let mut carry = I::ZERO;
1436                for i in 0..N {
1437                    carry = self.data[i]
1438                        .$carry_method(IArray::get_int(rhs, i).unwrap_or(I::ZERO), carry);
1439                }
1440                self.mod2n(self.length);
1441            }
1442        }
1443
1444        impl<I: Integer, const N: usize> $trait<Bvd> for Bvf<I, N>
1445        where
1446            u64: StaticCast<I>,
1447        {
1448            fn $method(&mut self, rhs: Bvd) {
1449                self.$method(&rhs);
1450            }
1451        }
1452
1453        impl<I: Integer, const N: usize> $trait<&Bv> for Bvf<I, N>
1454        where
1455            u64: StaticCast<I>,
1456        {
1457            fn $method(&mut self, rhs: &Bv) {
1458                match rhs {
1459                    Bv::Fixed(bvf) => self.$method(bvf),
1460                    Bv::Dynamic(bvd) => self.$method(bvd),
1461                }
1462            }
1463        }
1464
1465        impl<I: Integer, const N: usize> $trait<Bv> for Bvf<I, N>
1466        where
1467            u64: StaticCast<I>,
1468        {
1469            fn $method(&mut self, rhs: Bv) {
1470                self.$method(&rhs);
1471            }
1472        }
1473
1474        impl_op_assign_uint!($trait, $method, {$($uint),+});
1475    };
1476}
1477
1478impl_addsub_assign!(AddAssign, add_assign, cadd, {u8, u16, u32, u64, usize, u128});
1479impl_addsub_assign!(SubAssign, sub_assign, csub, {u8, u16, u32, u64, usize, u128});
1480
1481// ------------------------------------------------------------------------------------------------
1482// Bvf - Arithmetic operators (general kind)
1483// ------------------------------------------------------------------------------------------------
1484
1485macro_rules! impl_op {
1486    ($trait:ident, $method:ident, $assign_trait:ident, $assign_method:ident) => {
1487        impl<T, I: Integer, const N: usize> $trait<T> for Bvf<I, N>
1488        where
1489            Bvf<I, N>: $assign_trait<T>,
1490        {
1491            type Output = Bvf<I, N>;
1492            fn $method(mut self, rhs: T) -> Bvf<I, N> {
1493                self.$assign_method(rhs);
1494                return self;
1495            }
1496        }
1497
1498        impl<T, I: Integer, const N: usize> $trait<T> for &Bvf<I, N>
1499        where
1500            Bvf<I, N>: $assign_trait<T>,
1501        {
1502            type Output = Bvf<I, N>;
1503            fn $method(self, rhs: T) -> Bvf<I, N> {
1504                let mut result = self.clone();
1505                result.$assign_method(rhs);
1506                return result;
1507            }
1508        }
1509    };
1510}
1511
1512impl_op!(BitAnd, bitand, BitAndAssign, bitand_assign);
1513impl_op!(BitOr, bitor, BitOrAssign, bitor_assign);
1514impl_op!(BitXor, bitxor, BitXorAssign, bitxor_assign);
1515impl_op!(Add, add, AddAssign, add_assign);
1516impl_op!(Sub, sub, SubAssign, sub_assign);
1517
1518// ------------------------------------------------------------------------------------------------
1519// Bvf - Multiplication
1520// ------------------------------------------------------------------------------------------------
1521
1522impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> Mul<&Bvf<I2, N2>> for &Bvf<I1, N1>
1523where
1524    I2: StaticCast<I1>,
1525{
1526    type Output = Bvf<I1, N1>;
1527    fn mul(self, rhs: &Bvf<I2, N2>) -> Bvf<I1, N1> {
1528        let mut res = Bvf::<I1, N1>::zeros(self.length);
1529        let len = IArray::int_len::<I1>(&res);
1530        for i in 0..len {
1531            let mut carry = I1::ZERO;
1532            for j in 0..(len - i) {
1533                let product = self.data[i].wmul(IArray::get_int(rhs, j).unwrap_or(I1::ZERO));
1534                carry = res.data[i + j].cadd(product.0, carry) + product.1;
1535            }
1536        }
1537
1538        res.mod2n(self.length);
1539        res
1540    }
1541}
1542
1543impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> Mul<Bvf<I2, N2>> for &Bvf<I1, N1>
1544where
1545    I2: StaticCast<I1>,
1546{
1547    type Output = Bvf<I1, N1>;
1548    fn mul(self, rhs: Bvf<I2, N2>) -> Bvf<I1, N1> {
1549        self.mul(&rhs)
1550    }
1551}
1552
1553impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> Mul<&Bvf<I2, N2>> for Bvf<I1, N1>
1554where
1555    I2: StaticCast<I1>,
1556{
1557    type Output = Bvf<I1, N1>;
1558    fn mul(self, rhs: &Bvf<I2, N2>) -> Bvf<I1, N1> {
1559        (&self).mul(rhs)
1560    }
1561}
1562
1563impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> Mul<Bvf<I2, N2>> for Bvf<I1, N1>
1564where
1565    I2: StaticCast<I1>,
1566{
1567    type Output = Bvf<I1, N1>;
1568    fn mul(self, rhs: Bvf<I2, N2>) -> Bvf<I1, N1> {
1569        (&self).mul(&rhs)
1570    }
1571}
1572
1573impl<I: Integer, const N: usize> Mul<&Bvd> for &Bvf<I, N>
1574where
1575    u64: StaticCast<I>,
1576{
1577    type Output = Bvf<I, N>;
1578    fn mul(self, rhs: &Bvd) -> Bvf<I, N> {
1579        let mut res = Bvf::<I, N>::zeros(self.length);
1580        let len = IArray::int_len::<I>(&res);
1581        for i in 0..len {
1582            let mut carry = I::ZERO;
1583            for j in 0..(len - i) {
1584                let product = self.data[i].wmul(IArray::get_int(rhs, j).unwrap_or(I::ZERO));
1585                carry = res.data[i + j].cadd(product.0, carry) + product.1;
1586            }
1587        }
1588
1589        res.mod2n(self.length);
1590        res
1591    }
1592}
1593
1594impl<I: Integer, const N: usize> Mul<Bvd> for &Bvf<I, N>
1595where
1596    u64: StaticCast<I>,
1597{
1598    type Output = Bvf<I, N>;
1599    fn mul(self, rhs: Bvd) -> Bvf<I, N> {
1600        self.mul(&rhs)
1601    }
1602}
1603
1604impl<I: Integer, const N: usize> Mul<&Bvd> for Bvf<I, N>
1605where
1606    u64: StaticCast<I>,
1607{
1608    type Output = Bvf<I, N>;
1609    fn mul(self, rhs: &Bvd) -> Bvf<I, N> {
1610        (&self).mul(rhs)
1611    }
1612}
1613
1614impl<I: Integer, const N: usize> Mul<Bvd> for Bvf<I, N>
1615where
1616    u64: StaticCast<I>,
1617{
1618    type Output = Bvf<I, N>;
1619    fn mul(self, rhs: Bvd) -> Bvf<I, N> {
1620        (&self).mul(&rhs)
1621    }
1622}
1623
1624impl<I: Integer, const N: usize> Mul<&Bv> for &Bvf<I, N>
1625where
1626    u64: StaticCast<I>,
1627{
1628    type Output = Bvf<I, N>;
1629    fn mul(self, rhs: &Bv) -> Self::Output {
1630        match rhs {
1631            Bv::Fixed(bvf) => self.mul(bvf),
1632            Bv::Dynamic(bvd) => self.mul(bvd),
1633        }
1634    }
1635}
1636
1637impl<I: Integer, const N: usize> Mul<&Bv> for Bvf<I, N>
1638where
1639    u64: StaticCast<I>,
1640{
1641    type Output = Bvf<I, N>;
1642    fn mul(self, rhs: &Bv) -> Self::Output {
1643        (&self).mul(rhs)
1644    }
1645}
1646
1647impl<I: Integer, const N: usize> Mul<Bv> for &Bvf<I, N>
1648where
1649    u64: StaticCast<I>,
1650{
1651    type Output = Bvf<I, N>;
1652    fn mul(self, rhs: Bv) -> Self::Output {
1653        self.mul(&rhs)
1654    }
1655}
1656
1657impl<I: Integer, const N: usize> Mul<Bv> for Bvf<I, N>
1658where
1659    u64: StaticCast<I>,
1660{
1661    type Output = Bvf<I, N>;
1662    fn mul(self, rhs: Bv) -> Self::Output {
1663        (&self).mul(&rhs)
1664    }
1665}
1666
1667impl_op_uint!(Mul, mul, {u8, u16, u32, u64, usize, u128});
1668
1669impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> MulAssign<&Bvf<I2, N2>>
1670    for Bvf<I1, N1>
1671where
1672    I2: StaticCast<I1>,
1673{
1674    fn mul_assign(&mut self, rhs: &Bvf<I2, N2>) {
1675        *self = Mul::mul(&*self, rhs);
1676    }
1677}
1678
1679impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> MulAssign<Bvf<I2, N2>>
1680    for Bvf<I1, N1>
1681where
1682    I2: StaticCast<I1>,
1683{
1684    fn mul_assign(&mut self, rhs: Bvf<I2, N2>) {
1685        *self = Mul::mul(&*self, &rhs);
1686    }
1687}
1688
1689impl<I: Integer, const N: usize> MulAssign<&Bvd> for Bvf<I, N>
1690where
1691    u64: StaticCast<I>,
1692{
1693    fn mul_assign(&mut self, rhs: &Bvd) {
1694        *self = Mul::mul(&*self, rhs);
1695    }
1696}
1697
1698impl<I: Integer, const N: usize> MulAssign<Bvd> for Bvf<I, N>
1699where
1700    u64: StaticCast<I>,
1701{
1702    fn mul_assign(&mut self, rhs: Bvd) {
1703        *self = Mul::mul(&*self, &rhs);
1704    }
1705}
1706
1707impl<I: Integer, const N: usize> MulAssign<&Bv> for Bvf<I, N>
1708where
1709    u64: StaticCast<I>,
1710{
1711    fn mul_assign(&mut self, rhs: &Bv) {
1712        *self = Mul::mul(&*self, rhs);
1713    }
1714}
1715
1716impl<I: Integer, const N: usize> MulAssign<Bv> for Bvf<I, N>
1717where
1718    u64: StaticCast<I>,
1719{
1720    fn mul_assign(&mut self, rhs: Bv) {
1721        *self = Mul::mul(&*self, &rhs);
1722    }
1723}
1724
1725impl_op_assign_uint!(MulAssign, mul_assign, {u8, u16, u32, u64, usize, u128});
1726
1727// ------------------------------------------------------------------------------------------------
1728// Bvf - Division
1729// ------------------------------------------------------------------------------------------------
1730
1731impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> Div<&Bvf<I2, N2>> for &Bvf<I1, N1>
1732where
1733    I2: StaticCast<I1>,
1734{
1735    type Output = Bvf<I1, N1>;
1736    fn div(self, rhs: &Bvf<I2, N2>) -> Self::Output {
1737        self.div_rem::<Bvf<I2, N2>>(rhs).0
1738    }
1739}
1740
1741impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> Div<Bvf<I2, N2>> for &Bvf<I1, N1>
1742where
1743    I2: StaticCast<I1>,
1744{
1745    type Output = Bvf<I1, N1>;
1746    fn div(self, rhs: Bvf<I2, N2>) -> Self::Output {
1747        self.div_rem::<Bvf<I2, N2>>(&rhs).0
1748    }
1749}
1750
1751impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> Div<&Bvf<I2, N2>> for Bvf<I1, N1>
1752where
1753    I2: StaticCast<I1>,
1754{
1755    type Output = Bvf<I1, N1>;
1756    fn div(self, rhs: &Bvf<I2, N2>) -> Self::Output {
1757        self.div_rem::<Bvf<I2, N2>>(rhs).0
1758    }
1759}
1760
1761impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> Div<Bvf<I2, N2>> for Bvf<I1, N1>
1762where
1763    I2: StaticCast<I1>,
1764{
1765    type Output = Bvf<I1, N1>;
1766    fn div(self, rhs: Bvf<I2, N2>) -> Self::Output {
1767        self.div_rem::<Bvf<I2, N2>>(&rhs).0
1768    }
1769}
1770
1771impl<I: Integer, const N: usize> Div<&Bvd> for &Bvf<I, N>
1772where
1773    u64: StaticCast<I>,
1774{
1775    type Output = Bvf<I, N>;
1776    fn div(self, rhs: &Bvd) -> Self::Output {
1777        self.div_rem::<Bvd>(rhs).0
1778    }
1779}
1780
1781impl<I1: Integer, const N1: usize> Div<Bvd> for &Bvf<I1, N1>
1782where
1783    u64: StaticCast<I1>,
1784{
1785    type Output = Bvf<I1, N1>;
1786    fn div(self, rhs: Bvd) -> Self::Output {
1787        self.div_rem::<Bvd>(&rhs).0
1788    }
1789}
1790
1791impl<I1: Integer, const N1: usize> Div<&Bvd> for Bvf<I1, N1>
1792where
1793    u64: StaticCast<I1>,
1794{
1795    type Output = Bvf<I1, N1>;
1796    fn div(self, rhs: &Bvd) -> Self::Output {
1797        self.div_rem::<Bvd>(rhs).0
1798    }
1799}
1800
1801impl<I1: Integer, const N1: usize> Div<Bvd> for Bvf<I1, N1>
1802where
1803    u64: StaticCast<I1>,
1804{
1805    type Output = Bvf<I1, N1>;
1806    fn div(self, rhs: Bvd) -> Self::Output {
1807        self.div_rem::<Bvd>(&rhs).0
1808    }
1809}
1810
1811impl<I: Integer, const N: usize> Div<&Bv> for &Bvf<I, N>
1812where
1813    u64: StaticCast<I>,
1814{
1815    type Output = Bvf<I, N>;
1816    fn div(self, rhs: &Bv) -> Self::Output {
1817        match rhs {
1818            Bv::Fixed(bvf) => self.div_rem::<crate::auto::Bvp>(bvf).0,
1819            Bv::Dynamic(bvd) => self.div_rem::<Bvd>(bvd).0,
1820        }
1821    }
1822}
1823
1824impl<I: Integer, const N: usize> Div<&Bv> for Bvf<I, N>
1825where
1826    u64: StaticCast<I>,
1827{
1828    type Output = Bvf<I, N>;
1829    fn div(self, rhs: &Bv) -> Self::Output {
1830        (&self).div(rhs)
1831    }
1832}
1833
1834impl<I: Integer, const N: usize> Div<Bv> for &Bvf<I, N>
1835where
1836    u64: StaticCast<I>,
1837{
1838    type Output = Bvf<I, N>;
1839    fn div(self, rhs: Bv) -> Self::Output {
1840        self.div(&rhs)
1841    }
1842}
1843
1844impl<I: Integer, const N: usize> Div<Bv> for Bvf<I, N>
1845where
1846    u64: StaticCast<I>,
1847{
1848    type Output = Bvf<I, N>;
1849    fn div(self, rhs: Bv) -> Self::Output {
1850        (&self).div(&rhs)
1851    }
1852}
1853
1854impl_op_uint!(Div, div, {u8, u16, u32, u64, usize, u128});
1855
1856impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> DivAssign<&Bvf<I2, N2>>
1857    for Bvf<I1, N1>
1858where
1859    I1: StaticCast<I2>,
1860    I2: StaticCast<I1>,
1861{
1862    fn div_assign(&mut self, rhs: &Bvf<I2, N2>) {
1863        *self = Div::div(&*self, rhs);
1864    }
1865}
1866
1867impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> DivAssign<Bvf<I2, N2>>
1868    for Bvf<I1, N1>
1869where
1870    I1: StaticCast<I2>,
1871    I2: StaticCast<I1>,
1872{
1873    fn div_assign(&mut self, rhs: Bvf<I2, N2>) {
1874        *self = Div::div(&*self, &rhs);
1875    }
1876}
1877
1878impl<I: Integer, const N: usize> DivAssign<&Bvd> for Bvf<I, N>
1879where
1880    u64: StaticCast<I>,
1881{
1882    fn div_assign(&mut self, rhs: &Bvd) {
1883        *self = Div::div(&*self, rhs);
1884    }
1885}
1886
1887impl<I: Integer, const N: usize> DivAssign<Bvd> for Bvf<I, N>
1888where
1889    u64: StaticCast<I>,
1890{
1891    fn div_assign(&mut self, rhs: Bvd) {
1892        *self = Div::div(&*self, &rhs);
1893    }
1894}
1895
1896impl<I: Integer, const N: usize> DivAssign<&Bv> for Bvf<I, N>
1897where
1898    u64: StaticCast<I>,
1899{
1900    fn div_assign(&mut self, rhs: &Bv) {
1901        *self = Div::div(&*self, rhs);
1902    }
1903}
1904
1905impl<I: Integer, const N: usize> DivAssign<Bv> for Bvf<I, N>
1906where
1907    u64: StaticCast<I>,
1908{
1909    fn div_assign(&mut self, rhs: Bv) {
1910        *self = Div::div(&*self, &rhs);
1911    }
1912}
1913
1914impl_op_assign_uint!(DivAssign, div_assign, {u8, u16, u32, u64, usize, u128});
1915
1916// ------------------------------------------------------------------------------------------------
1917// Bvf - Remainder
1918// ------------------------------------------------------------------------------------------------
1919
1920impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> Rem<&Bvf<I2, N2>> for &Bvf<I1, N1>
1921where
1922    I2: StaticCast<I1>,
1923{
1924    type Output = Bvf<I1, N1>;
1925    fn rem(self, rhs: &Bvf<I2, N2>) -> Self::Output {
1926        self.div_rem::<Bvf<I2, N2>>(rhs).1
1927    }
1928}
1929
1930impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> Rem<Bvf<I2, N2>> for &Bvf<I1, N1>
1931where
1932    I2: StaticCast<I1>,
1933{
1934    type Output = Bvf<I1, N1>;
1935    fn rem(self, rhs: Bvf<I2, N2>) -> Self::Output {
1936        self.div_rem::<Bvf<I2, N2>>(&rhs).1
1937    }
1938}
1939
1940impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> Rem<&Bvf<I2, N2>> for Bvf<I1, N1>
1941where
1942    I2: StaticCast<I1>,
1943{
1944    type Output = Bvf<I1, N1>;
1945    fn rem(self, rhs: &Bvf<I2, N2>) -> Self::Output {
1946        self.div_rem::<Bvf<I2, N2>>(rhs).1
1947    }
1948}
1949
1950impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> Rem<Bvf<I2, N2>> for Bvf<I1, N1>
1951where
1952    I2: StaticCast<I1>,
1953{
1954    type Output = Bvf<I1, N1>;
1955    fn rem(self, rhs: Bvf<I2, N2>) -> Self::Output {
1956        self.div_rem::<Bvf<I2, N2>>(&rhs).1
1957    }
1958}
1959
1960impl<I: Integer, const N: usize> Rem<&Bvd> for &Bvf<I, N>
1961where
1962    u64: StaticCast<I>,
1963{
1964    type Output = Bvf<I, N>;
1965    fn rem(self, rhs: &Bvd) -> Self::Output {
1966        self.div_rem::<Bvd>(rhs).1
1967    }
1968}
1969
1970impl<I1: Integer, const N1: usize> Rem<Bvd> for &Bvf<I1, N1>
1971where
1972    u64: StaticCast<I1>,
1973{
1974    type Output = Bvf<I1, N1>;
1975    fn rem(self, rhs: Bvd) -> Self::Output {
1976        self.div_rem::<Bvd>(&rhs).1
1977    }
1978}
1979
1980impl<I1: Integer, const N1: usize> Rem<&Bvd> for Bvf<I1, N1>
1981where
1982    u64: StaticCast<I1>,
1983{
1984    type Output = Bvf<I1, N1>;
1985    fn rem(self, rhs: &Bvd) -> Self::Output {
1986        self.div_rem::<Bvd>(rhs).1
1987    }
1988}
1989
1990impl<I1: Integer, const N1: usize> Rem<Bvd> for Bvf<I1, N1>
1991where
1992    u64: StaticCast<I1>,
1993{
1994    type Output = Bvf<I1, N1>;
1995    fn rem(self, rhs: Bvd) -> Self::Output {
1996        self.div_rem::<Bvd>(&rhs).1
1997    }
1998}
1999
2000impl<I: Integer, const N: usize> Rem<&Bv> for &Bvf<I, N>
2001where
2002    u64: StaticCast<I>,
2003{
2004    type Output = Bvf<I, N>;
2005    fn rem(self, rhs: &Bv) -> Self::Output {
2006        match rhs {
2007            Bv::Fixed(bvf) => self.div_rem::<crate::auto::Bvp>(bvf).1,
2008            Bv::Dynamic(bvd) => self.div_rem::<Bvd>(bvd).1,
2009        }
2010    }
2011}
2012
2013impl<I: Integer, const N: usize> Rem<&Bv> for Bvf<I, N>
2014where
2015    u64: StaticCast<I>,
2016{
2017    type Output = Bvf<I, N>;
2018    fn rem(self, rhs: &Bv) -> Self::Output {
2019        (&self).rem(rhs)
2020    }
2021}
2022
2023impl<I: Integer, const N: usize> Rem<Bv> for &Bvf<I, N>
2024where
2025    u64: StaticCast<I>,
2026{
2027    type Output = Bvf<I, N>;
2028    fn rem(self, rhs: Bv) -> Self::Output {
2029        self.rem(&rhs)
2030    }
2031}
2032
2033impl<I: Integer, const N: usize> Rem<Bv> for Bvf<I, N>
2034where
2035    u64: StaticCast<I>,
2036{
2037    type Output = Bvf<I, N>;
2038    fn rem(self, rhs: Bv) -> Self::Output {
2039        (&self).rem(&rhs)
2040    }
2041}
2042
2043impl_op_uint!(Rem, rem, {u8, u16, u32, u64, usize, u128});
2044
2045impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> RemAssign<&Bvf<I2, N2>>
2046    for Bvf<I1, N1>
2047where
2048    I1: StaticCast<I2>,
2049    I2: StaticCast<I1>,
2050{
2051    fn rem_assign(&mut self, rhs: &Bvf<I2, N2>) {
2052        *self = Rem::rem(&*self, rhs);
2053    }
2054}
2055
2056impl<I1: Integer, I2: Integer, const N1: usize, const N2: usize> RemAssign<Bvf<I2, N2>>
2057    for Bvf<I1, N1>
2058where
2059    I1: StaticCast<I2>,
2060    I2: StaticCast<I1>,
2061{
2062    fn rem_assign(&mut self, rhs: Bvf<I2, N2>) {
2063        *self = Rem::rem(&*self, &rhs);
2064    }
2065}
2066
2067impl<I: Integer, const N: usize> RemAssign<&Bvd> for Bvf<I, N>
2068where
2069    u64: StaticCast<I>,
2070{
2071    fn rem_assign(&mut self, rhs: &Bvd) {
2072        *self = Rem::rem(&*self, rhs);
2073    }
2074}
2075
2076impl<I: Integer, const N: usize> RemAssign<Bvd> for Bvf<I, N>
2077where
2078    u64: StaticCast<I>,
2079{
2080    fn rem_assign(&mut self, rhs: Bvd) {
2081        *self = Rem::rem(&*self, &rhs);
2082    }
2083}
2084
2085impl<I: Integer, const N: usize> RemAssign<&Bv> for Bvf<I, N>
2086where
2087    u64: StaticCast<I>,
2088{
2089    fn rem_assign(&mut self, rhs: &Bv) {
2090        *self = Rem::rem(&*self, rhs);
2091    }
2092}
2093
2094impl<I: Integer, const N: usize> RemAssign<Bv> for Bvf<I, N>
2095where
2096    u64: StaticCast<I>,
2097{
2098    fn rem_assign(&mut self, rhs: Bv) {
2099        *self = Rem::rem(&*self, &rhs);
2100    }
2101}
2102
2103impl_op_assign_uint!(RemAssign, rem_assign, {u8, u16, u32, u64, usize, u128});