1use std::cmp::Ordering;
2use std::fmt;
3use std::hash::{Hash, Hasher};
4use std::io::{Read, Write};
5use std::iter::repeat;
6use std::mem::size_of;
7use std::ops::{
8 Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Div, DivAssign,
9 Mul, MulAssign, Not, Range, Rem, RemAssign, Shl, ShlAssign, Shr, ShrAssign, Sub, SubAssign,
10};
11
12use crate::auto::Bv;
13use crate::fixed::Bvf;
14use crate::iter::BitIterator;
15use crate::utils::{IArray, IArrayMut, Integer, StaticCast};
16use crate::{Bit, BitVector, ConvertionError, Endianness};
17
18#[derive(Clone, Debug)]
27pub struct Bvd {
28 data: Box<[u64]>,
29 length: usize,
30}
31
32impl Bvd {
33 const BYTE_UNIT: usize = size_of::<u64>();
34 const NIBBLE_UNIT: usize = size_of::<u64>() * 2;
35 const BIT_UNIT: usize = u64::BITS as usize;
36
37 pub const fn new(data: Box<[u64]>, length: usize) -> Self {
51 assert!(length <= data.len() * Self::BIT_UNIT);
52 Self { data, length }
53 }
54
55 pub fn into_inner(self) -> (Box<[u64]>, usize) {
57 (self.data, self.length)
58 }
59
60 fn capacity_from_byte_len(byte_length: usize) -> usize {
61 (byte_length + size_of::<u64>() - 1) / size_of::<u64>()
62 }
63
64 fn capacity_from_bit_len(bit_length: usize) -> usize {
65 Self::capacity_from_byte_len((bit_length + 7) / 8)
66 }
67
68 pub fn reserve(&mut self, additional: usize) {
83 let new_capacity = self.length + additional;
84 if Self::capacity_from_bit_len(new_capacity) > self.data.len() {
85 let mut new_data: Vec<u64> = repeat(0)
87 .take(Self::capacity_from_bit_len(new_capacity))
88 .collect();
89 for i in 0..self.data.len() {
90 new_data[i] = self.data[i];
91 }
92 self.data = new_data.into_boxed_slice();
93 }
94 }
95
96 pub fn shrink_to_fit(&mut self) {
98 if Self::capacity_from_bit_len(self.length) < self.data.len() {
99 let mut new_data: Vec<u64> = repeat(0)
101 .take(Self::capacity_from_bit_len(self.length))
102 .collect();
103 for i in 0..new_data.len() {
104 new_data[i] = self.data[i];
105 }
106 self.data = new_data.into_boxed_slice();
107 }
108 }
109}
110
111impl IArray for Bvd {
116 type I = u64;
117
118 fn int_len<J: Integer>(&self) -> usize
119 where
120 u64: StaticCast<J>,
121 {
122 (self.len() + size_of::<J>() * 8 - 1) / (size_of::<J>() * 8)
123 }
124
125 fn get_int<J: Integer>(&self, idx: usize) -> Option<J>
126 where
127 u64: StaticCast<J>,
128 {
129 if idx * J::BITS < self.length {
130 IArray::get_int::<J>(self.data.as_ref(), idx)
131 .map(|v| v & J::mask(self.length - idx * J::BITS))
132 } else {
133 None
134 }
135 }
136}
137
138impl IArrayMut for Bvd {
139 type I = u64;
140
141 fn set_int<J: Integer>(&mut self, idx: usize, v: J) -> Option<J>
142 where
143 u64: StaticCast<J>,
144 {
145 if idx * J::BITS < self.length {
146 IArrayMut::set_int::<J>(
147 self.data.as_mut(),
148 idx,
149 v & J::mask(self.length - idx * J::BITS),
150 )
151 } else {
152 None
153 }
154 }
155}
156
157impl BitVector for Bvd {
162 fn with_capacity(capacity: usize) -> Self {
163 let data: Vec<u64> = repeat(0u64)
164 .take(Self::capacity_from_bit_len(capacity))
165 .collect();
166 Bvd {
167 data: data.into_boxed_slice(),
168 length: 0,
169 }
170 }
171
172 fn zeros(length: usize) -> Self {
173 let v: Vec<u64> = repeat(0)
174 .take(Self::capacity_from_bit_len(length))
175 .collect();
176 Bvd {
177 data: v.into_boxed_slice(),
178 length,
179 }
180 }
181
182 fn ones(length: usize) -> Self {
183 let mut v: Vec<u64> = repeat(u64::MAX)
184 .take(Self::capacity_from_bit_len(length))
185 .collect();
186 if let Some(l) = v.last_mut() {
187 *l &= u64::mask(length.wrapping_sub(1) % Self::BIT_UNIT + 1);
188 }
189
190 Bvd {
191 data: v.into_boxed_slice(),
192 length,
193 }
194 }
195
196 fn capacity(&self) -> usize {
197 self.data.len() * Self::BIT_UNIT
198 }
199
200 fn len(&self) -> usize {
201 self.length
202 }
203
204 fn from_binary<S: AsRef<str>>(string: S) -> Result<Self, ConvertionError> {
205 let length = string.as_ref().chars().count();
206 let offset = (Self::BIT_UNIT - length % Self::BIT_UNIT) % Self::BIT_UNIT;
207 let mut data: Vec<u64> = repeat(0)
208 .take(Self::capacity_from_bit_len(length))
209 .collect();
210
211 for (i, c) in string.as_ref().chars().enumerate() {
212 let j = data.len() - 1 - (i + offset) / Self::BIT_UNIT;
213 data[j] = (data[j] << 1)
214 | match c {
215 '0' => 0,
216 '1' => 1,
217 _ => return Err(ConvertionError::InvalidFormat(i)),
218 };
219 }
220 Ok(Self {
221 data: data.into_boxed_slice(),
222 length,
223 })
224 }
225
226 fn from_hex<S: AsRef<str>>(string: S) -> Result<Self, ConvertionError> {
227 let length = string.as_ref().chars().count();
228 let offset = (Self::NIBBLE_UNIT - length % Self::NIBBLE_UNIT) % Self::NIBBLE_UNIT;
229 let mut data: Vec<u64> = repeat(0)
230 .take(Self::capacity_from_byte_len((length + 1) / 2))
231 .collect();
232
233 for (i, c) in string.as_ref().chars().enumerate() {
234 let j = data.len() - 1 - (i + offset) / Self::NIBBLE_UNIT;
235 data[j] = (data[j] << 4)
236 | match c.to_digit(16) {
237 Some(d) => d as u64,
238 None => return Err(ConvertionError::InvalidFormat(i)),
239 };
240 }
241 Ok(Self {
242 data: data.into_boxed_slice(),
243 length: length * 4,
244 })
245 }
246
247 fn from_bytes<B: AsRef<[u8]>>(
248 bytes: B,
249 endianness: Endianness,
250 ) -> Result<Self, ConvertionError> {
251 let byte_length = bytes.as_ref().len();
252 let mut data: Vec<u64> = repeat(0)
253 .take(Self::capacity_from_byte_len(byte_length))
254 .collect();
255 match endianness {
256 Endianness::Little => {
257 let offset = (Self::BYTE_UNIT - byte_length % Self::BYTE_UNIT) % Self::BYTE_UNIT;
258 for (i, b) in bytes.as_ref().iter().rev().enumerate() {
259 let j = data.len() - 1 - (i + offset) / Self::BYTE_UNIT;
260 data[j] = (data[j] << 8) | *b as u64;
261 }
262 }
263 Endianness::Big => {
264 let offset = (Self::BYTE_UNIT - byte_length % Self::BYTE_UNIT) % Self::BYTE_UNIT;
265 for (i, b) in bytes.as_ref().iter().enumerate() {
266 let j = data.len() - 1 - (i + offset) / Self::BYTE_UNIT;
267 data[j] = (data[j] << 8) | *b as u64;
268 }
269 }
270 }
271 Ok(Self {
272 data: data.into_boxed_slice(),
273 length: byte_length * 8,
274 })
275 }
276
277 fn to_vec(&self, endianness: Endianness) -> Vec<u8> {
278 let num_bytes = (self.length + 7) / 8;
279 let mut buf: Vec<u8> = repeat(0u8).take(num_bytes).collect();
280 match endianness {
281 Endianness::Little => {
282 for i in 0..num_bytes {
283 buf[i] = ((self.data[i / Self::BYTE_UNIT] >> ((i % Self::BYTE_UNIT) * 8))
284 & 0xff) as u8;
285 }
286 }
287 Endianness::Big => {
288 for i in 0..num_bytes {
289 buf[num_bytes - i - 1] = ((self.data[i / Self::BYTE_UNIT]
290 >> ((i % Self::BYTE_UNIT) * 8))
291 & 0xff) as u8;
292 }
293 }
294 }
295 buf
296 }
297
298 fn read<R: Read>(
299 reader: &mut R,
300 length: usize,
301 endianness: Endianness,
302 ) -> std::io::Result<Self> {
303 let num_bytes = (length + 7) / 8;
304 let mut buf: Vec<u8> = repeat(0u8).take(num_bytes).collect();
305 reader.read_exact(&mut buf[..])?;
306 let mut bv = Self::from_bytes(&buf[..], endianness)
307 .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
308 if let Some(l) = bv.data.last_mut() {
309 *l &= u64::mask(length.wrapping_sub(1) % Self::BIT_UNIT + 1);
310 }
311 bv.length = length;
312 Ok(bv)
313 }
314
315 fn write<W: Write>(&self, writer: &mut W, endianness: Endianness) -> std::io::Result<()> {
316 writer.write_all(self.to_vec(endianness).as_slice())
317 }
318
319 fn get(&self, index: usize) -> Bit {
320 debug_assert!(index < self.length);
321 ((self.data[index / Self::BIT_UNIT] >> (index % Self::BIT_UNIT)) & 1).into()
322 }
323
324 fn set(&mut self, index: usize, bit: Bit) {
325 debug_assert!(index < self.length);
326 self.data[index / Self::BIT_UNIT] = (self.data[index / Self::BIT_UNIT]
327 & !(1 << (index % Self::BIT_UNIT)))
328 | ((bit as u64) << (index % Self::BIT_UNIT));
329 }
330
331 fn copy_range(&self, range: Range<usize>) -> Self {
332 debug_assert!(range.start <= self.len() && range.end <= self.len());
333 let length = range.end - usize::min(range.start, range.end);
334 let mut data: Vec<u64> = repeat(0)
335 .take(Self::capacity_from_bit_len(length))
336 .collect();
337 let offset = range.start / Self::BIT_UNIT;
338 let slide = range.start % Self::BIT_UNIT;
339
340 for i in 0..data.len() {
341 data[i] = (self.data[i + offset] >> slide)
342 | self
343 .data
344 .get(i + offset + 1)
345 .unwrap_or(&0)
346 .checked_shl((Self::BIT_UNIT - slide) as u32)
347 .unwrap_or(0);
348 }
349 if let Some(l) = data.last_mut() {
350 *l &= u64::mask(length.wrapping_sub(1) % Self::BIT_UNIT + 1);
351 }
352
353 Bvd {
354 data: data.into_boxed_slice(),
355 length,
356 }
357 }
358
359 fn push(&mut self, bit: Bit) {
360 self.reserve(1);
361 self.length += 1;
362 self.set(self.length - 1, bit);
363 }
364
365 fn pop(&mut self) -> Option<Bit> {
366 if self.length == 0 {
367 return None;
368 }
369 let bit = self.get(self.length - 1);
370 self.set(self.length - 1, Bit::Zero);
371 self.length -= 1;
372 Some(bit)
373 }
374
375 fn resize(&mut self, new_length: usize, bit: Bit) {
376 if new_length < self.length {
377 for i in (new_length / Self::BIT_UNIT + 1)..Self::capacity_from_bit_len(self.length) {
378 self.data[i] = 0;
379 }
380 if let Some(l) = self.data.get_mut(new_length / Self::BIT_UNIT) {
381 *l &= u64::mask(new_length % Self::BIT_UNIT);
382 }
383 self.length = new_length;
384 } else if new_length > self.length {
385 let sign_pattern = match bit {
386 Bit::Zero => u64::MIN,
387 Bit::One => u64::MAX,
388 };
389 self.reserve(new_length - self.length);
390 if let Some(l) = self.data.get_mut(self.length / Self::BIT_UNIT) {
391 *l |= sign_pattern & !u64::mask(self.length % Self::BIT_UNIT);
392 }
393 for i in (self.length / Self::BIT_UNIT + 1)..Self::capacity_from_bit_len(new_length) {
394 self.data[i] = sign_pattern;
395 }
396 if let Some(l) = self.data.get_mut(new_length / Self::BIT_UNIT) {
397 *l &= u64::mask(new_length % Self::BIT_UNIT);
398 }
399 self.length = new_length;
400 }
401 }
402
403 fn append<B: BitVector>(&mut self, suffix: &B) {
404 let offset = self.length % Self::BIT_UNIT;
405 let slide = self.length / Self::BIT_UNIT;
406 self.resize(self.length + suffix.len(), Bit::Zero);
407 if offset == 0 {
408 let mut i = 0;
409 while let Some(b) = suffix.get_int::<u64>(i) {
410 self.data[i + slide] = b;
411 i += 1;
412 }
413 } else if let Some(b) = suffix.get_int::<u64>(0) {
414 self.data[slide] |= b << offset;
415
416 let rev_offset = Self::BIT_UNIT - offset;
417 let mut i = 1;
418 let mut prev = b;
419
420 while let Some(b) = suffix.get_int::<u64>(i) {
421 self.data[i + slide] = (prev >> rev_offset) | (b << offset);
422 prev = b;
423 i += 1;
424 }
425
426 if let Some(b) = self.data.get_mut(i + slide) {
427 *b = prev >> rev_offset;
428 }
429 }
430 }
431
432 fn prepend<B: BitVector>(&mut self, prefix: &B) {
433 self.resize(self.length + prefix.len(), Bit::Zero);
434 *self <<= prefix.len();
435 let last = prefix.int_len::<u64>() - 1;
436
437 for i in 0..last {
438 self.data[i] = prefix.get_int::<u64>(i).unwrap();
439 }
440
441 if let Some(b) = self.data.get_mut(last) {
442 *b |= prefix.get_int::<u64>(last).unwrap();
443 }
444 }
445
446 fn shl_in(&mut self, bit: Bit) -> Bit {
447 let mut carry = bit;
448 for i in 0..(self.length / Self::BIT_UNIT) {
449 let b = (self.data[i] >> (Self::BIT_UNIT - 1)) & 1;
450 self.data[i] = (self.data[i] << 1) | carry as u64;
451 carry = b.into();
452 }
453 if self.length % Self::BIT_UNIT != 0 {
454 let i = self.length / Self::BIT_UNIT;
455 let b = (self.data[i] >> (self.length % Self::BIT_UNIT - 1)) & 1;
456 self.data[i] =
457 ((self.data[i] << 1) | carry as u64) & u64::mask(self.length % Self::BIT_UNIT);
458 carry = b.into();
459 }
460 carry
461 }
462
463 fn shr_in(&mut self, bit: Bit) -> Bit {
464 let mut carry = bit;
465 if self.length % Self::BIT_UNIT != 0 {
466 let i = self.length / Self::BIT_UNIT;
467 let b = self.data[i] & 1;
468 self.data[i] =
469 (self.data[i] >> 1) | ((carry as u64) << (self.length % Self::BIT_UNIT - 1));
470 carry = b.into();
471 }
472 for i in (0..(self.length / Self::BIT_UNIT)).rev() {
473 let b = self.data[i] & 1;
474 self.data[i] = (self.data[i] >> 1) | ((carry as u64) << (Self::BIT_UNIT - 1));
475 carry = b.into();
476 }
477 carry
478 }
479
480 fn rotl(&mut self, rot: usize) {
481 let mut new_data: Vec<u64> = repeat(0).take(self.data.len()).collect();
483 let mut old_idx = 0;
484 while old_idx < self.length {
485 let new_idx = (old_idx + rot) % self.length;
486 let l = (Self::BIT_UNIT - new_idx % Self::BIT_UNIT)
487 .min(Self::BIT_UNIT - old_idx % Self::BIT_UNIT)
488 .min(self.length - new_idx)
489 .min(self.length - old_idx);
490 new_data[new_idx / Self::BIT_UNIT] |= ((self.data[old_idx / Self::BIT_UNIT]
491 >> (old_idx % Self::BIT_UNIT))
492 & u64::mask(l))
493 << (new_idx % Self::BIT_UNIT);
494 old_idx += l;
495 }
496 self.data = new_data.into_boxed_slice();
497 }
498
499 fn rotr(&mut self, rot: usize) {
500 let mut new_data: Vec<u64> = repeat(0).take(self.data.len()).collect();
502 let mut new_idx = 0;
503 while new_idx < self.length {
504 let old_idx = (new_idx + rot) % self.length;
505 let l = (Self::BIT_UNIT - new_idx % Self::BIT_UNIT)
506 .min(Self::BIT_UNIT - old_idx % Self::BIT_UNIT)
507 .min(self.length - new_idx)
508 .min(self.length - old_idx);
509 new_data[new_idx / Self::BIT_UNIT] |= ((self.data[old_idx / Self::BIT_UNIT]
510 >> (old_idx % Self::BIT_UNIT))
511 & u64::mask(l))
512 << (new_idx % Self::BIT_UNIT);
513 new_idx += l;
514 }
515 self.data = new_data.into_boxed_slice();
516 }
517
518 fn leading_zeros(&self) -> usize {
519 let mut count = 0;
520 let mut i = Self::capacity_from_bit_len(self.length);
521 if i > 0 {
522 let lastbit = (self.length - 1) % Self::BIT_UNIT + 1;
523 let mut v = self.data[i - 1] & u64::mask(lastbit);
524 count += Integer::leading_zeros(&v) - (Self::BIT_UNIT - lastbit);
525 i -= 1;
526 while v == 0 && i > 0 {
527 v = self.data[i - 1];
528 count += Integer::leading_zeros(&v);
529 i -= 1;
530 }
531 }
532 count
533 }
534
535 fn leading_ones(&self) -> usize {
536 let mut count = 0;
537 let mut i = Self::capacity_from_bit_len(self.length);
538 if i > 0 {
539 let lastbit = (self.length - 1) % Self::BIT_UNIT + 1;
540 let mut v = self.data[i - 1] | !u64::mask(lastbit);
541 count += Integer::leading_ones(&v) - (Self::BIT_UNIT - lastbit);
542 i -= 1;
543 while v == u64::MAX && i > 0 {
544 v = self.data[i - 1];
545 count += Integer::leading_ones(&v);
546 i -= 1;
547 }
548 }
549 count
550 }
551
552 fn trailing_zeros(&self) -> usize {
553 let mut count = 0;
554 let mut i = 0;
555 if i < Self::capacity_from_bit_len(self.length) {
556 let mut v = 0;
557 while v == 0 && i < Self::capacity_from_bit_len(self.length) - 1 {
558 v = self.data[i];
559 count += Integer::trailing_zeros(&v);
560 i += 1;
561 }
562 if v == 0 {
563 let lastbit = (self.length - 1) % Self::BIT_UNIT + 1;
564 count += usize::min(Integer::trailing_zeros(&self.data[i]), lastbit);
565 }
566 }
567 count
568 }
569
570 fn trailing_ones(&self) -> usize {
571 let mut count = 0;
572 let mut i = 0;
573 if i < Self::capacity_from_bit_len(self.length) {
574 let mut v = u64::MAX;
575 while v == u64::MAX && i < Self::capacity_from_bit_len(self.length) - 1 {
576 v = self.data[i];
577 count += Integer::trailing_ones(&v);
578 i += 1;
579 }
580 if v == u64::MAX {
581 let lastbit = (self.length - 1) % Self::BIT_UNIT + 1;
582 count += usize::min(Integer::trailing_ones(&self.data[i]), lastbit);
583 }
584 }
585 count
586 }
587
588 fn is_zero(&self) -> bool {
589 self.data[0..Self::capacity_from_bit_len(self.length)]
590 .iter()
591 .all(|&x| x == 0)
592 }
593
594 fn div_rem<B: BitVector>(&self, divisor: &B) -> (Self, Self)
595 where
596 Self: for<'a> TryFrom<&'a B, Error: std::fmt::Debug>,
597 {
598 assert!(!divisor.is_zero(), "Division by zero");
599 let mut quotient = Bvd::zeros(self.length);
600 let mut rem = self.clone();
601 if divisor.significant_bits() > self.significant_bits() {
602 return (quotient, rem);
603 }
604
605 let shift = self.significant_bits() - divisor.significant_bits();
606 let mut divisor: Bvd = divisor.try_into().expect("should never fail");
607 divisor.resize(self.length, Bit::Zero);
608 divisor <<= shift;
609
610 for i in (0..shift + 1).rev() {
611 if rem >= divisor {
612 rem -= &divisor;
613 quotient.set(i, Bit::One);
614 }
615 divisor >>= 1u32;
616 }
617
618 (quotient, rem)
619 }
620
621 fn iter(&self) -> BitIterator<'_, Self> {
622 self.into_iter()
623 }
624}
625
626impl Hash for Bvd {
631 fn hash<H: Hasher>(&self, state: &mut H) {
632 self.length.hash(state);
633 for i in 0..Self::capacity_from_bit_len(self.length) {
634 self.data[i].hash(state);
635 }
636 }
637}
638
639impl<'a> IntoIterator for &'a Bvd {
644 type Item = Bit;
645 type IntoIter = BitIterator<'a, Bvd>;
646
647 fn into_iter(self) -> Self::IntoIter {
648 BitIterator::new(self)
649 }
650}
651
652impl FromIterator<Bit> for Bvd {
653 fn from_iter<T: IntoIterator<Item = Bit>>(iter: T) -> Self {
654 let iter = iter.into_iter();
655 let mut bv = Bvd::with_capacity(iter.size_hint().0);
656 iter.for_each(|b| bv.push(b));
657 bv
658 }
659}
660
661impl Extend<Bit> for Bvd {
662 fn extend<T: IntoIterator<Item = Bit>>(&mut self, iter: T) {
663 let iter = iter.into_iter();
664 self.reserve(iter.size_hint().0);
665 iter.for_each(|b| self.push(b));
666 }
667}
668
669impl fmt::Binary for Bvd {
674 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
675 let mut i = self.length;
676
677 while i > 0 && self.get(i - 1) == Bit::Zero {
679 i -= 1;
680 }
681
682 let mut s = String::with_capacity(self.length);
683 while i > 0 {
684 match self.get(i - 1) {
685 Bit::Zero => s.push('0'),
686 Bit::One => s.push('1'),
687 }
688 i -= 1;
689 }
690 if s.is_empty() {
691 s.push('0');
692 }
693
694 f.pad_integral(true, "0b", &s)
695 }
696}
697
698impl fmt::Display for Bvd {
699 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
700 let base = Self::from(10u8);
701 let mut s = Vec::<char>::new();
702 let mut quotient = self.clone();
703 let mut remainder;
704
705 while !quotient.is_zero() {
706 (quotient, remainder) = quotient.div_rem(&base);
707 s.push(char::from_digit(u32::try_from(&remainder).unwrap(), 10).unwrap());
709 }
710 if s.is_empty() {
711 s.push('0');
712 }
713
714 f.pad_integral(true, "", s.iter().rev().collect::<String>().as_str())
715 }
716}
717
718impl fmt::Octal for Bvd {
719 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
720 const SEMI_NIBBLE: [char; 8] = ['0', '1', '2', '3', '4', '5', '6', '7'];
721 let length = (self.length + 2) / 3;
722 let mut s = Vec::<char>::with_capacity(length);
723 let mut it = self.iter();
724 let mut last_nz = 0;
725
726 while let Some(b0) = it.next() {
727 let b1 = it.next().unwrap_or(Bit::Zero);
728 let b2 = it.next().unwrap_or(Bit::Zero);
729 let octet = ((b2 as u8) << 2) | ((b1 as u8) << 1) | b0 as u8;
730 if octet != 0 {
731 last_nz = s.len();
732 }
733 s.push(SEMI_NIBBLE[octet as usize]);
734 }
735 if s.is_empty() {
736 s.push('0');
737 }
738 s.truncate(last_nz + 1);
739
740 f.pad_integral(true, "0o", s.iter().rev().collect::<String>().as_str())
741 }
742}
743
744impl fmt::LowerHex for Bvd {
745 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
746 const NIBBLE: [char; 16] = [
747 '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f',
748 ];
749 let mut i = (self.length + 3) / 4;
750
751 while i > 0
753 && StaticCast::<u8>::cast_to(
754 self.data[(i - 1) / Self::NIBBLE_UNIT] >> (((i - 1) % Self::NIBBLE_UNIT) * 4),
755 ) & 0xf
756 == 0
757 {
758 i -= 1;
759 }
760
761 let mut s = String::with_capacity(i);
762 while i > 0 {
763 let nibble = StaticCast::<u8>::cast_to(
764 self.data[(i - 1) / Self::NIBBLE_UNIT] >> (((i - 1) % Self::NIBBLE_UNIT) * 4),
765 ) & 0xf;
766 s.push(NIBBLE[nibble as usize]);
767 i -= 1;
768 }
769 if s.is_empty() {
770 s.push('0');
771 }
772
773 f.pad_integral(true, "0x", &s)
774 }
775}
776
777impl fmt::UpperHex for Bvd {
778 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
779 const NIBBLE: [char; 16] = [
780 '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F',
781 ];
782 let mut i = (self.length + 3) / 4;
783 let mut s = String::with_capacity(i);
784
785 while i > 0
786 && StaticCast::<u8>::cast_to(
787 self.data[(i - 1) / Self::NIBBLE_UNIT] >> (((i - 1) % Self::NIBBLE_UNIT) * 4),
788 ) & 0xf
789 == 0
790 {
791 i -= 1;
792 }
793 while i > 0 {
794 let nibble = StaticCast::<u8>::cast_to(
795 self.data[(i - 1) / Self::NIBBLE_UNIT] >> (((i - 1) % Self::NIBBLE_UNIT) * 4),
796 ) & 0xf;
797 s.push(NIBBLE[nibble as usize]);
798 i -= 1;
799 }
800 if s.is_empty() {
801 s.push('0');
802 }
803
804 f.pad_integral(true, "0x", &s)
805 }
806}
807
808impl PartialEq for Bvd {
813 fn eq(&self, other: &Self) -> bool {
814 for i in 0..usize::max(self.data.len(), other.data.len()) {
815 if self.data.get(i).unwrap_or(&0) != other.data.get(i).unwrap_or(&0) {
816 return false;
817 }
818 }
819 true
820 }
821}
822
823impl<I: Integer, const N: usize> PartialEq<Bvf<I, N>> for Bvd {
824 fn eq(&self, other: &Bvf<I, N>) -> bool {
825 for i in 0..usize::max(self.len(), IArray::int_len::<u64>(other)) {
826 if *self.data.get(i).unwrap_or(&0) != IArray::get_int(other, i).unwrap_or(0) {
827 return false;
828 }
829 }
830 true
831 }
832}
833
834impl PartialEq<Bv> for Bvd {
835 fn eq(&self, other: &Bv) -> bool {
836 other.eq(self)
837 }
838}
839
840impl Eq for Bvd {}
841
842impl PartialOrd for Bvd {
843 fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
844 Some(self.cmp(other))
845 }
846}
847
848impl<I: Integer, const N: usize> PartialOrd<Bvf<I, N>> for Bvd {
849 fn partial_cmp(&self, other: &Bvf<I, N>) -> Option<Ordering> {
850 for i in (0..usize::max(self.len(), IArray::int_len::<u64>(other))).rev() {
851 match self
852 .data
853 .get(i)
854 .unwrap_or(&0)
855 .cmp(&IArray::get_int(other, i).unwrap_or(0))
856 {
857 Ordering::Equal => continue,
858 ord => return Some(ord),
859 }
860 }
861 Some(Ordering::Equal)
862 }
863}
864
865impl PartialOrd<Bv> for Bvd {
866 fn partial_cmp(&self, other: &Bv) -> Option<Ordering> {
867 other.partial_cmp(self).map(|o| o.reverse())
868 }
869}
870
871impl Ord for Bvd {
872 fn cmp(&self, other: &Self) -> Ordering {
873 for i in (0..usize::max(self.data.len(), other.data.len())).rev() {
874 match self
875 .data
876 .get(i)
877 .unwrap_or(&0)
878 .cmp(other.data.get(i).unwrap_or(&0))
879 {
880 Ordering::Equal => continue,
881 ord => return ord,
882 }
883 }
884 Ordering::Equal
885 }
886}
887
888impl From<&Bvd> for Bvd {
893 fn from(bvd: &Bvd) -> Self {
894 Bvd {
895 length: bvd.len(),
896 data: bvd.data.clone(),
897 }
898 }
899}
900
901macro_rules! impl_from_ints {($($st:ty),+) => {
902 $(
903 impl From<$st> for Bvd {
904 fn from(st: $st) -> Self {
905 let array = [st];
906 Bvd {
907 length: <$st>::BITS as usize,
908 data: (0..IArray::int_len::<u64>(array.as_ref())).map(|i| IArray::get_int(array.as_ref(), i).unwrap()).collect(),
909 }
910 }
911 }
912
913 impl From<&$st> for Bvd {
914 fn from(st: &$st) -> Self {
915 Self::from(*st)
916 }
917 }
918
919 impl TryFrom<&Bvd> for $st {
920 type Error = ConvertionError;
921 fn try_from(bvd: &Bvd) -> Result<Self, Self::Error> {
922 if bvd.significant_bits() > <$st>::BITS as usize {
923 return Err(ConvertionError::NotEnoughCapacity);
924 }
925 else {
926 let mut r: $st = 0;
927 for i in (0..Bvd::capacity_from_bit_len(bvd.length)).rev() {
928 r = r.checked_shl(Bvd::BIT_UNIT as u32).unwrap_or(0) | bvd.data[i] as $st;
929 }
930 return Ok(r);
931 }
932 }
933 }
934
935 impl TryFrom<Bvd> for $st {
936 type Error = ConvertionError;
937 fn try_from(bvd: Bvd) -> Result<Self, Self::Error> {
938 <$st>::try_from(&bvd)
939 }
940 }
941 )+
942}}
943
944impl_from_ints!(u8, u16, u32, u64, u128, usize);
945
946impl<I: Integer> From<&[I]> for Bvd
947where
948 u64: StaticCast<I>,
949{
950 fn from(slice: &[I]) -> Self {
951 let mut bvd = Bvd::zeros(slice.len() * I::BITS);
952 for (i, v) in slice.iter().enumerate() {
953 bvd.set_int(i, *v);
954 }
955 bvd
956 }
957}
958
959impl<I: Integer, const N: usize> From<&Bvf<I, N>> for Bvd {
960 fn from(rhs: &Bvf<I, N>) -> Bvd {
961 Bvd {
962 length: rhs.len(),
963 data: (0..IArray::int_len::<u64>(rhs))
964 .map(|i| IArray::get_int(rhs, i).unwrap())
965 .collect(),
966 }
967 }
968}
969
970impl<I: Integer, const N: usize> From<Bvf<I, N>> for Bvd {
971 fn from(rhs: Bvf<I, N>) -> Bvd {
972 Bvd::from(&rhs)
973 }
974}
975
976impl From<&'_ Bv> for Bvd {
977 fn from(bv: &'_ Bv) -> Self {
978 match bv {
979 Bv::Fixed(b) => Bvd::from(b),
980 Bv::Dynamic(b) => b.clone(),
981 }
982 }
983}
984
985impl From<Bv> for Bvd {
986 fn from(bv: Bv) -> Self {
987 match bv {
988 Bv::Fixed(b) => Bvd::from(b),
989 Bv::Dynamic(b) => b,
990 }
991 }
992}
993
994impl Not for Bvd {
999 type Output = Bvd;
1000 fn not(mut self) -> Self::Output {
1001 for i in 0..Self::capacity_from_bit_len(self.length) {
1002 self.data[i] = !self.data[i];
1003 }
1004 if let Some(l) = self.data.get_mut(self.length / Self::BIT_UNIT) {
1005 *l &= u64::mask(self.length % Self::BIT_UNIT);
1006 }
1007 self
1008 }
1009}
1010
1011impl Not for &Bvd {
1012 type Output = Bvd;
1013 fn not(self) -> Self::Output {
1014 let mut new_data: Vec<u64> = self.data[0..Bvd::capacity_from_bit_len(self.length)]
1015 .iter()
1016 .map(|d| !d)
1017 .collect();
1018 if let Some(l) = new_data.get_mut(self.length / Bvd::BIT_UNIT) {
1019 *l &= u64::mask(self.length % Bvd::BIT_UNIT);
1020 }
1021 Bvd {
1022 data: new_data.into_boxed_slice(),
1023 length: self.length,
1024 }
1025 }
1026}
1027
1028macro_rules! impl_shifts {({$($rhs:ty),+}) => {
1029 $(
1030 impl ShlAssign<$rhs> for Bvd {
1031 fn shl_assign(&mut self, rhs: $rhs) {
1032 let shift = usize::try_from(rhs).map_or(0, |s| s);
1033 if shift == 0 {
1034 return;
1035 }
1036 let mut new_idx = self.length;
1037 while new_idx > shift {
1038 let l = (new_idx.wrapping_sub(1) % Self::BIT_UNIT + 1)
1039 .min((new_idx - shift).wrapping_sub(1) % Self::BIT_UNIT + 1);
1040 new_idx -= l;
1041 let old_idx = new_idx - shift;
1042 let d = (self.data[old_idx / Self::BIT_UNIT] >> (old_idx % Self::BIT_UNIT)) & u64::mask(l);
1043 self.data[new_idx / Self::BIT_UNIT] &= !(u64::mask(l) << (new_idx % Self::BIT_UNIT));
1044 self.data[new_idx / Self::BIT_UNIT] |= d << (new_idx % Self::BIT_UNIT);
1045 }
1046 while new_idx > 0 {
1047 let l = (new_idx.wrapping_sub(1) % Self::BIT_UNIT) + 1;
1048 new_idx -= l;
1049 self.data[new_idx / Self::BIT_UNIT] &= !(u64::mask(l) << (new_idx % Self::BIT_UNIT));
1050 }
1051 }
1052 }
1053
1054 impl ShlAssign<&$rhs> for Bvd {
1055 fn shl_assign(&mut self, rhs: &$rhs) {
1056 self.shl_assign(*rhs);
1057 }
1058 }
1059
1060 impl Shl<$rhs> for Bvd {
1061 type Output = Bvd;
1062 fn shl(mut self, rhs: $rhs) -> Bvd {
1063 self.shl_assign(rhs);
1064 return self;
1065 }
1066 }
1067
1068 impl Shl<&$rhs> for Bvd {
1069 type Output = Bvd;
1070 fn shl(mut self, rhs: &$rhs) -> Bvd {
1071 self.shl_assign(rhs);
1072 return self;
1073 }
1074 }
1075
1076 impl Shl<$rhs> for &Bvd {
1077 type Output = Bvd;
1078 fn shl(self, rhs: $rhs) -> Bvd {
1079 let shift = usize::try_from(rhs).map_or(0, |s| s);
1080 let mut new_data: Vec<u64> = repeat(0).take(Bvd::capacity_from_bit_len(self.length)).collect();
1081 let mut new_idx = self.length;
1082 while new_idx > shift {
1083 let l = (new_idx.wrapping_sub(1) % Bvd::BIT_UNIT + 1)
1084 .min((new_idx - shift).wrapping_sub(1) % Bvd::BIT_UNIT + 1);
1085 new_idx -= l;
1086 let old_idx = new_idx - shift;
1087 new_data[new_idx / Bvd::BIT_UNIT] |= ((self.data[old_idx / Bvd::BIT_UNIT] >> (old_idx % Bvd::BIT_UNIT)) & u64::mask(l)) << (new_idx % Bvd::BIT_UNIT);
1088 }
1089 Bvd {
1090 data: new_data.into_boxed_slice(),
1091 length: self.length
1092 }
1093 }
1094 }
1095
1096 impl Shl<&$rhs> for &Bvd {
1097 type Output = Bvd;
1098 fn shl(self, rhs: &$rhs) -> Bvd {
1099 self.shl(*rhs)
1100 }
1101 }
1102
1103 impl ShrAssign<$rhs> for Bvd {
1104 fn shr_assign(&mut self, rhs: $rhs) {
1105 let shift = usize::try_from(rhs).map_or(0, |s| s);
1106 if shift == 0 {
1107 return;
1108 }
1109 let mut new_idx = 0;
1110 while new_idx + shift < self.length {
1111 let old_idx = new_idx + shift;
1112 let l = (Self::BIT_UNIT - new_idx % Self::BIT_UNIT)
1113 .min(Self::BIT_UNIT - old_idx % Self::BIT_UNIT);
1114 let d = (self.data[old_idx / Self::BIT_UNIT] >> (old_idx % Self::BIT_UNIT)) & u64::mask(l);
1115 self.data[new_idx / Self::BIT_UNIT] &= !(u64::mask(l) << (new_idx % Self::BIT_UNIT));
1116 self.data[new_idx / Self::BIT_UNIT] |= d << (new_idx % Self::BIT_UNIT);
1117 new_idx += l;
1118 }
1119 while new_idx < self.length {
1120 let l = Self::BIT_UNIT - new_idx % Self::BIT_UNIT;
1121 self.data[new_idx / Self::BIT_UNIT] &= !(u64::mask(l) << (new_idx % Self::BIT_UNIT));
1122 new_idx += l;
1123 }
1124 }
1125 }
1126
1127 impl ShrAssign<&$rhs> for Bvd {
1128 fn shr_assign(&mut self, rhs: &$rhs) {
1129 self.shr_assign(*rhs);
1130 }
1131 }
1132
1133 impl Shr<$rhs> for Bvd {
1134 type Output = Bvd;
1135 fn shr(mut self, rhs: $rhs) -> Bvd {
1136 self.shr_assign(rhs);
1137 return self;
1138 }
1139 }
1140
1141 impl Shr<&$rhs> for Bvd {
1142 type Output = Bvd;
1143 fn shr(mut self, rhs: &$rhs) -> Bvd {
1144 self.shr_assign(rhs);
1145 return self;
1146 }
1147 }
1148
1149 impl Shr<$rhs> for &Bvd {
1150 type Output = Bvd;
1151 fn shr(self, rhs: $rhs) -> Bvd {
1152 let shift = usize::try_from(rhs).map_or(0, |s| s);
1153 let mut new_data: Vec<u64> = repeat(0).take(Bvd::capacity_from_bit_len(self.length)).collect();
1154 let mut new_idx = 0;
1155 while new_idx + shift < self.length {
1156 let old_idx = new_idx + shift;
1157 let l = (Bvd::BIT_UNIT - new_idx % Bvd::BIT_UNIT)
1158 .min(Bvd::BIT_UNIT - old_idx % Bvd::BIT_UNIT);
1159 new_data[new_idx / Bvd::BIT_UNIT] |= ((self.data[old_idx / Bvd::BIT_UNIT] >> (old_idx % Bvd::BIT_UNIT)) & u64::mask(l)) << (new_idx % Bvd::BIT_UNIT);
1160 new_idx += l;
1161 }
1162 Bvd {
1163 data: new_data.into_boxed_slice(),
1164 length: self.length
1165 }
1166 }
1167 }
1168
1169 impl Shr<&$rhs> for &Bvd {
1170 type Output = Bvd;
1171 fn shr(self, rhs: &$rhs) -> Bvd {
1172 self.shr(*rhs)
1173 }
1174 }
1175 )+
1176}}
1177
1178impl_shifts!({u8, u16, u32, u64, u128, usize});
1179
1180macro_rules! impl_op_uint {
1185 ($trait:ident, $method:ident, {$($uint:ty),+}) => {
1186 $(
1187 impl $trait<&$uint> for &Bvd
1188 {
1189 type Output = Bvd;
1190 fn $method(self, rhs: &$uint) -> Self::Output {
1191 let temp = Bvd::from(*rhs);
1192 self.$method(temp)
1193 }
1194 }
1195
1196 impl $trait<$uint> for &Bvd
1197 {
1198 type Output = Bvd;
1199 fn $method(self, rhs: $uint) -> Self::Output {
1200 let temp = Bvd::from(rhs);
1201 self.$method(temp)
1202 }
1203 }
1204
1205 impl $trait<&$uint> for Bvd
1206 {
1207 type Output = Bvd;
1208 fn $method(self, rhs: &$uint) -> Self::Output {
1209 let temp = Bvd::from(*rhs);
1210 self.$method(temp)
1211 }
1212 }
1213
1214 impl $trait<$uint> for Bvd
1215 {
1216 type Output = Bvd;
1217 fn $method(self, rhs: $uint) -> Self::Output {
1218 let temp = Bvd::from(rhs);
1219 self.$method(temp)
1220 }
1221 }
1222 )+
1223 };
1224}
1225
1226macro_rules! impl_op_assign_uint {
1227 ($trait:ident, $method:ident, {$($uint:ty),+}) => {
1228 $(
1229 impl $trait<$uint> for Bvd
1230 {
1231 fn $method(&mut self, rhs: $uint) {
1232 let temp = Bvd::from(rhs);
1233 self.$method(&temp);
1234 }
1235 }
1236
1237 impl $trait<&$uint> for Bvd
1238 {
1239 fn $method(&mut self, rhs: &$uint) {
1240 let temp = Bvd::from(*rhs);
1241 self.$method(&temp);
1242 }
1243 }
1244 )+
1245 };
1246}
1247
1248macro_rules! impl_binop_assign {
1253 ($trait:ident, $method:ident, {$($uint:ty),+}) => {
1254 impl $trait<&Bvd> for Bvd {
1255 fn $method(&mut self, rhs: &Bvd) {
1256 for i in 0..usize::min(Self::capacity_from_bit_len(self.length), Self::capacity_from_bit_len(rhs.length)) {
1257 self.data[i].$method(rhs.data[i]);
1258 }
1259 for i in Self::capacity_from_bit_len(rhs.length)
1260 ..Self::capacity_from_bit_len(self.length)
1261 {
1262 self.data[i].$method(0);
1263 }
1264 }
1265 }
1266
1267 impl $trait<Bvd> for Bvd {
1268 fn $method(&mut self, rhs: Bvd) {
1269 self.$method(&rhs);
1270 }
1271 }
1272
1273 impl<I: Integer, const N: usize> $trait<&Bvf<I, N>> for Bvd {
1274 fn $method(&mut self, rhs: &Bvf<I, N>) {
1275 for i in 0..usize::min(IArray::int_len::<u64>(rhs), self.data.len()) {
1276 self.data[i].$method(IArray::get_int::<u64>(rhs, i).unwrap());
1277 }
1278 for i in usize::min(IArray::int_len::<u64>(rhs), self.data.len())..self.data.len() {
1279 self.data[i].$method(0);
1280 }
1281 }
1282 }
1283
1284 impl<I: Integer, const N: usize> $trait<Bvf<I, N>> for Bvd {
1285 fn $method(&mut self, rhs: Bvf<I, N>) {
1286 self.$method(&rhs);
1287 }
1288 }
1289
1290 impl $trait<&Bv> for Bvd {
1291 fn $method(&mut self, rhs: &Bv) {
1292 match rhs {
1293 Bv::Fixed(bvf) => self.$method(bvf),
1294 Bv::Dynamic(bvd) => self.$method(bvd),
1295 }
1296 }
1297 }
1298
1299 impl $trait<Bv> for Bvd {
1300 fn $method(&mut self, rhs: Bv) {
1301 self.$method(&rhs);
1302 }
1303 }
1304
1305 impl_op_assign_uint!($trait, $method, {$($uint),+});
1306 };
1307}
1308
1309impl_binop_assign!(BitAndAssign, bitand_assign, {u8, u16, u32, u64, usize, u128});
1310impl_binop_assign!(BitOrAssign, bitor_assign, {u8, u16, u32, u64, usize, u128});
1311impl_binop_assign!(BitXorAssign, bitxor_assign, {u8, u16, u32, u64, usize, u128});
1312
1313macro_rules! impl_addsub_assign {
1314 ($trait:ident, $method:ident, $overflowing_method:ident, {$($uint:ty),+}) => {
1315 impl $trait<&Bvd> for Bvd {
1316 fn $method(&mut self, rhs: &Bvd) {
1317 let mut carry = 0;
1318 for i in 0..usize::min(Self::capacity_from_bit_len(self.length), Self::capacity_from_bit_len(rhs.length)) {
1319 let (d1, c1) = self.data[i].$overflowing_method(carry);
1320 let (d2, c2) = d1.$overflowing_method(rhs.data[i]);
1321 self.data[i] = d2;
1322 carry = (c1 | c2) as u64;
1323 }
1324 for i in Self::capacity_from_bit_len(rhs.length)
1325 ..Self::capacity_from_bit_len(self.length)
1326 {
1327 let (d, c) = self.data[i].$overflowing_method(carry);
1328 self.data[i] = d;
1329 carry = c as u64;
1330 }
1331 if let Some(l) = self.data.get_mut(self.length / Bvd::BIT_UNIT) {
1332 *l &= u64::mask(self.length % Bvd::BIT_UNIT);
1333 }
1334 }
1335 }
1336
1337 impl $trait<Bvd> for Bvd {
1338 fn $method(&mut self, rhs: Bvd) {
1339 self.$method(&rhs);
1340 }
1341 }
1342
1343 impl<I: Integer, const N: usize> $trait<&Bvf<I, N>> for Bvd {
1344 fn $method(&mut self, rhs: &Bvf<I, N>) {
1345 let mut carry = 0;
1346 for i in 0..usize::min(IArray::int_len::<u64>(rhs), self.data.len()) {
1347 let (d1, c1) = self.data[i].$overflowing_method(carry);
1348 let (d2, c2) = d1.$overflowing_method(IArray::get_int(rhs, i).unwrap());
1349 self.data[i] = d2;
1350 carry = (c1 | c2) as u64;
1351 }
1352 for i in IArray::int_len::<u64>(rhs)..Self::capacity_from_bit_len(self.length) {
1353 let (d, c) = self.data[i].$overflowing_method(carry);
1354 self.data[i] = d;
1355 carry = c as u64;
1356 }
1357 if let Some(l) = self.data.get_mut(self.length / Bvd::BIT_UNIT) {
1358 *l &= u64::mask(self.length % Bvd::BIT_UNIT);
1359 }
1360 }
1361 }
1362
1363 impl<I: Integer, const N: usize> $trait<Bvf<I, N>> for Bvd {
1364 fn $method(&mut self, rhs: Bvf<I, N>) {
1365 self.$method(&rhs);
1366 }
1367 }
1368
1369 impl $trait<&Bv> for Bvd {
1370 fn $method(&mut self, rhs: &Bv) {
1371 match rhs {
1372 Bv::Fixed(bvf) => self.$method(bvf),
1373 Bv::Dynamic(bvd) => self.$method(bvd),
1374 }
1375 }
1376 }
1377
1378 impl $trait<Bv> for Bvd {
1379 fn $method(&mut self, rhs: Bv) {
1380 self.$method(&rhs);
1381 }
1382 }
1383
1384 impl_op_assign_uint!($trait, $method, {$($uint),+});
1385 };
1386}
1387
1388impl_addsub_assign!(AddAssign, add_assign, overflowing_add, {u8, u16, u32, u64, usize, u128});
1389impl_addsub_assign!(SubAssign, sub_assign, overflowing_sub, {u8, u16, u32, u64, usize, u128});
1390
1391macro_rules! impl_op {
1396 ($trait:ident, $method:ident, $assign_trait:ident, $assign_method:ident) => {
1397 impl<T> $trait<T> for Bvd
1398 where
1399 Bvd: $assign_trait<T>,
1400 {
1401 type Output = Bvd;
1402 fn $method(mut self, rhs: T) -> Bvd {
1403 self.$assign_method(rhs);
1404 return self;
1405 }
1406 }
1407
1408 impl<T> $trait<T> for &Bvd
1409 where
1410 Bvd: $assign_trait<T>,
1411 {
1412 type Output = Bvd;
1413 fn $method(self, rhs: T) -> Bvd {
1414 let mut result = self.clone();
1415 result.$assign_method(rhs);
1416 return result;
1417 }
1418 }
1419 };
1420}
1421
1422impl_op!(BitAnd, bitand, BitAndAssign, bitand_assign);
1423impl_op!(BitOr, bitor, BitOrAssign, bitor_assign);
1424impl_op!(BitXor, bitxor, BitXorAssign, bitxor_assign);
1425impl_op!(Add, add, AddAssign, add_assign);
1426impl_op!(Sub, sub, SubAssign, sub_assign);
1427
1428impl Mul<&Bvd> for &Bvd {
1433 type Output = Bvd;
1434 fn mul(self, rhs: &Bvd) -> Bvd {
1435 let mut res = Bvd::zeros(self.length);
1436 let len = Bvd::capacity_from_bit_len(res.length);
1437
1438 for i in 0..len {
1439 let mut carry = 0;
1440 for j in 0..(len - i) {
1441 let product = self.data[i].wmul(*rhs.data.get(j).unwrap_or(&0));
1442 carry = res.data[i + j].cadd(product.0, carry) + product.1;
1443 }
1444 }
1445
1446 if let Some(l) = res.data.get_mut(res.length / Bvd::BIT_UNIT) {
1447 *l &= u64::mask(res.length % Bvd::BIT_UNIT);
1448 }
1449
1450 res
1451 }
1452}
1453
1454impl Mul<Bvd> for &Bvd {
1455 type Output = Bvd;
1456 fn mul(self, rhs: Bvd) -> Bvd {
1457 self.mul(&rhs)
1458 }
1459}
1460
1461impl Mul<&Bvd> for Bvd {
1462 type Output = Bvd;
1463 fn mul(self, rhs: &Bvd) -> Bvd {
1464 (&self).mul(rhs)
1465 }
1466}
1467
1468impl Mul<Bvd> for Bvd {
1469 type Output = Bvd;
1470 fn mul(self, rhs: Bvd) -> Bvd {
1471 (&self).mul(&rhs)
1472 }
1473}
1474
1475impl<I: Integer, const N: usize> Mul<&Bvf<I, N>> for &Bvd {
1476 type Output = Bvd;
1477 fn mul(self, rhs: &Bvf<I, N>) -> Bvd {
1478 let mut res = Bvd::zeros(self.length);
1479 let len = IArray::int_len::<u64>(&res);
1480
1481 for i in 0..len {
1482 let mut carry = 0;
1483 for j in 0..(len - i) {
1484 let product = self.data[i].wmul(IArray::get_int(rhs, j).unwrap_or(0));
1485 carry = res.data[i + j].cadd(product.0, carry) + product.1;
1486 }
1487 }
1488
1489 if let Some(l) = res.data.get_mut(res.length / Bvd::BIT_UNIT) {
1490 *l &= u64::mask(res.length % Bvd::BIT_UNIT);
1491 }
1492
1493 res
1494 }
1495}
1496
1497impl<I: Integer, const N: usize> Mul<Bvf<I, N>> for &Bvd {
1498 type Output = Bvd;
1499 fn mul(self, rhs: Bvf<I, N>) -> Bvd {
1500 self.mul(&rhs)
1501 }
1502}
1503
1504impl<I: Integer, const N: usize> Mul<&Bvf<I, N>> for Bvd {
1505 type Output = Bvd;
1506 fn mul(self, rhs: &Bvf<I, N>) -> Bvd {
1507 (&self).mul(rhs)
1508 }
1509}
1510
1511impl<I: Integer, const N: usize> Mul<Bvf<I, N>> for Bvd {
1512 type Output = Bvd;
1513 fn mul(self, rhs: Bvf<I, N>) -> Bvd {
1514 (&self).mul(&rhs)
1515 }
1516}
1517
1518impl Mul<&Bv> for &Bvd {
1519 type Output = Bvd;
1520 fn mul(self, rhs: &Bv) -> Bvd {
1521 match rhs {
1522 Bv::Fixed(bvf) => self.mul(bvf),
1523 Bv::Dynamic(bvd) => self.mul(bvd),
1524 }
1525 }
1526}
1527
1528impl Mul<Bv> for &Bvd {
1529 type Output = Bvd;
1530 fn mul(self, rhs: Bv) -> Bvd {
1531 self.mul(&rhs)
1532 }
1533}
1534
1535impl Mul<&Bv> for Bvd {
1536 type Output = Bvd;
1537 fn mul(self, rhs: &Bv) -> Bvd {
1538 (&self).mul(rhs)
1539 }
1540}
1541
1542impl Mul<Bv> for Bvd {
1543 type Output = Bvd;
1544 fn mul(self, rhs: Bv) -> Bvd {
1545 (&self).mul(&rhs)
1546 }
1547}
1548
1549impl_op_uint!(Mul, mul, {u8, u16, u32, u64, usize, u128});
1550
1551impl MulAssign<&Bvd> for Bvd {
1552 fn mul_assign(&mut self, rhs: &Bvd) {
1553 *self = Mul::mul(&*self, rhs);
1554 }
1555}
1556
1557impl MulAssign<Bvd> for Bvd {
1558 fn mul_assign(&mut self, rhs: Bvd) {
1559 *self = Mul::mul(&*self, &rhs);
1560 }
1561}
1562
1563impl<I: Integer, const N: usize> MulAssign<&Bvf<I, N>> for Bvd {
1564 fn mul_assign(&mut self, rhs: &Bvf<I, N>) {
1565 *self = Mul::mul(&*self, rhs);
1566 }
1567}
1568
1569impl<I: Integer, const N: usize> MulAssign<Bvf<I, N>> for Bvd {
1570 fn mul_assign(&mut self, rhs: Bvf<I, N>) {
1571 *self = Mul::mul(&*self, &rhs);
1572 }
1573}
1574
1575impl MulAssign<&Bv> for Bvd {
1576 fn mul_assign(&mut self, rhs: &Bv) {
1577 *self = Mul::mul(&*self, rhs);
1578 }
1579}
1580
1581impl MulAssign<Bv> for Bvd {
1582 fn mul_assign(&mut self, rhs: Bv) {
1583 *self = Mul::mul(&*self, &rhs);
1584 }
1585}
1586
1587impl_op_assign_uint!(MulAssign, mul_assign, {u8, u16, u32, u64, usize, u128});
1588
1589impl Div<&Bvd> for &Bvd {
1594 type Output = Bvd;
1595
1596 fn div(self, rhs: &Bvd) -> Self::Output {
1597 self.div_rem(rhs).0
1598 }
1599}
1600
1601impl Div<Bvd> for &Bvd {
1602 type Output = Bvd;
1603 fn div(self, rhs: Bvd) -> Bvd {
1604 self.div_rem(&rhs).0
1605 }
1606}
1607
1608impl Div<&Bvd> for Bvd {
1609 type Output = Bvd;
1610 fn div(self, rhs: &Bvd) -> Bvd {
1611 self.div_rem(rhs).0
1612 }
1613}
1614
1615impl Div<Bvd> for Bvd {
1616 type Output = Bvd;
1617 fn div(self, rhs: Bvd) -> Bvd {
1618 self.div_rem(&rhs).0
1619 }
1620}
1621
1622impl<I: Integer, const N: usize> Div<&Bvf<I, N>> for &Bvd {
1623 type Output = Bvd;
1624
1625 fn div(self, rhs: &Bvf<I, N>) -> Bvd {
1626 self.div_rem(rhs).0
1627 }
1628}
1629
1630impl<I: Integer, const N: usize> Div<Bvf<I, N>> for &Bvd {
1631 type Output = Bvd;
1632 fn div(self, rhs: Bvf<I, N>) -> Bvd {
1633 self.div_rem(&rhs).0
1634 }
1635}
1636
1637impl<I: Integer, const N: usize> Div<&Bvf<I, N>> for Bvd {
1638 type Output = Bvd;
1639 fn div(self, rhs: &Bvf<I, N>) -> Bvd {
1640 self.div_rem(rhs).0
1641 }
1642}
1643
1644impl<I: Integer, const N: usize> Div<Bvf<I, N>> for Bvd {
1645 type Output = Bvd;
1646 fn div(self, rhs: Bvf<I, N>) -> Bvd {
1647 self.div_rem(&rhs).0
1648 }
1649}
1650
1651impl Div<&Bv> for &Bvd {
1652 type Output = Bvd;
1653 fn div(self, rhs: &Bv) -> Bvd {
1654 match rhs {
1655 Bv::Fixed(bvf) => self.div_rem(bvf).0,
1656 Bv::Dynamic(bvd) => self.div_rem(bvd).0,
1657 }
1658 }
1659}
1660
1661impl Div<Bv> for &Bvd {
1662 type Output = Bvd;
1663 fn div(self, rhs: Bv) -> Bvd {
1664 self.div(&rhs)
1665 }
1666}
1667
1668impl Div<&Bv> for Bvd {
1669 type Output = Bvd;
1670 fn div(self, rhs: &Bv) -> Bvd {
1671 (&self).div(rhs)
1672 }
1673}
1674
1675impl Div<Bv> for Bvd {
1676 type Output = Bvd;
1677 fn div(self, rhs: Bv) -> Bvd {
1678 (&self).div(&rhs)
1679 }
1680}
1681
1682impl_op_uint!(Div, div, {u8, u16, u32, u64, usize, u128});
1683
1684impl DivAssign<&Bvd> for Bvd {
1685 fn div_assign(&mut self, rhs: &Bvd) {
1686 *self = Div::div(&*self, rhs);
1687 }
1688}
1689
1690impl DivAssign<Bvd> for Bvd {
1691 fn div_assign(&mut self, rhs: Bvd) {
1692 *self = Div::div(&*self, &rhs);
1693 }
1694}
1695
1696impl<I: Integer, const N: usize> DivAssign<&Bvf<I, N>> for Bvd {
1697 fn div_assign(&mut self, rhs: &Bvf<I, N>) {
1698 *self = Div::div(&*self, rhs);
1699 }
1700}
1701
1702impl<I: Integer, const N: usize> DivAssign<Bvf<I, N>> for Bvd {
1703 fn div_assign(&mut self, rhs: Bvf<I, N>) {
1704 *self = Div::div(&*self, &rhs);
1705 }
1706}
1707
1708impl DivAssign<&Bv> for Bvd {
1709 fn div_assign(&mut self, rhs: &Bv) {
1710 *self = Div::div(&*self, rhs);
1711 }
1712}
1713
1714impl DivAssign<Bv> for Bvd {
1715 fn div_assign(&mut self, rhs: Bv) {
1716 *self = Div::div(&*self, &rhs);
1717 }
1718}
1719
1720impl_op_assign_uint!(DivAssign, div_assign, {u8, u16, u32, u64, usize, u128});
1721
1722impl Rem<&Bvd> for &Bvd {
1727 type Output = Bvd;
1728 fn rem(self, rhs: &Bvd) -> Self::Output {
1729 self.div_rem(rhs).1
1730 }
1731}
1732
1733impl Rem<Bvd> for &Bvd {
1734 type Output = Bvd;
1735 fn rem(self, rhs: Bvd) -> Bvd {
1736 self.div_rem(&rhs).1
1737 }
1738}
1739
1740impl Rem<&Bvd> for Bvd {
1741 type Output = Bvd;
1742 fn rem(self, rhs: &Bvd) -> Bvd {
1743 self.div_rem(rhs).1
1744 }
1745}
1746
1747impl Rem<Bvd> for Bvd {
1748 type Output = Bvd;
1749 fn rem(self, rhs: Bvd) -> Bvd {
1750 self.div_rem(&rhs).1
1751 }
1752}
1753
1754impl<I: Integer, const N: usize> Rem<&Bvf<I, N>> for &Bvd {
1755 type Output = Bvd;
1756 fn rem(self, rhs: &Bvf<I, N>) -> Bvd {
1757 self.div_rem(rhs).1
1758 }
1759}
1760
1761impl<I: Integer, const N: usize> Rem<Bvf<I, N>> for &Bvd {
1762 type Output = Bvd;
1763 fn rem(self, rhs: Bvf<I, N>) -> Bvd {
1764 self.div_rem(&rhs).1
1765 }
1766}
1767
1768impl<I: Integer, const N: usize> Rem<&Bvf<I, N>> for Bvd {
1769 type Output = Bvd;
1770 fn rem(self, rhs: &Bvf<I, N>) -> Bvd {
1771 self.div_rem(rhs).1
1772 }
1773}
1774
1775impl<I: Integer, const N: usize> Rem<Bvf<I, N>> for Bvd {
1776 type Output = Bvd;
1777 fn rem(self, rhs: Bvf<I, N>) -> Bvd {
1778 self.div_rem(&rhs).1
1779 }
1780}
1781
1782impl Rem<&Bv> for &Bvd {
1783 type Output = Bvd;
1784 fn rem(self, rhs: &Bv) -> Bvd {
1785 match rhs {
1786 Bv::Fixed(bvf) => self.div_rem(bvf).1,
1787 Bv::Dynamic(bvd) => self.div_rem(bvd).1,
1788 }
1789 }
1790}
1791
1792impl Rem<Bv> for &Bvd {
1793 type Output = Bvd;
1794 fn rem(self, rhs: Bv) -> Bvd {
1795 self.rem(&rhs)
1796 }
1797}
1798
1799impl Rem<&Bv> for Bvd {
1800 type Output = Bvd;
1801 fn rem(self, rhs: &Bv) -> Bvd {
1802 (&self).rem(rhs)
1803 }
1804}
1805
1806impl Rem<Bv> for Bvd {
1807 type Output = Bvd;
1808 fn rem(self, rhs: Bv) -> Bvd {
1809 (&self).rem(&rhs)
1810 }
1811}
1812
1813impl_op_uint!(Rem, rem, {u8, u16, u32, u64, usize, u128});
1814
1815impl RemAssign<&Bvd> for Bvd {
1816 fn rem_assign(&mut self, rhs: &Bvd) {
1817 *self = Rem::rem(&*self, rhs);
1818 }
1819}
1820
1821impl RemAssign<Bvd> for Bvd {
1822 fn rem_assign(&mut self, rhs: Bvd) {
1823 *self = Rem::rem(&*self, &rhs);
1824 }
1825}
1826
1827impl<I: Integer, const N: usize> RemAssign<&Bvf<I, N>> for Bvd {
1828 fn rem_assign(&mut self, rhs: &Bvf<I, N>) {
1829 *self = Rem::rem(&*self, rhs);
1830 }
1831}
1832
1833impl<I: Integer, const N: usize> RemAssign<Bvf<I, N>> for Bvd {
1834 fn rem_assign(&mut self, rhs: Bvf<I, N>) {
1835 *self = Rem::rem(&*self, &rhs);
1836 }
1837}
1838
1839impl RemAssign<&Bv> for Bvd {
1840 fn rem_assign(&mut self, rhs: &Bv) {
1841 *self = Rem::rem(&*self, rhs);
1842 }
1843}
1844
1845impl RemAssign<Bv> for Bvd {
1846 fn rem_assign(&mut self, rhs: Bv) {
1847 *self = Rem::rem(&*self, &rhs);
1848 }
1849}
1850
1851impl_op_assign_uint!(RemAssign, rem_assign, {u8, u16, u32, u64, usize, u128});