1#[derive(Eq, Debug, Copy, Clone)]
2pub struct NativeIntU64(u64);
7
8mod rt_decls {
13 type BoolU64 = u64;
14
15 extern "C" {
16 pub fn CVT_nativeint_u64_eq(_: u64, _: u64) -> BoolU64;
17 pub fn CVT_nativeint_u64_lt(_: u64, _: u64) -> BoolU64;
18 pub fn CVT_nativeint_u64_le(_: u64, _: u64) -> BoolU64;
19
20 pub fn CVT_nativeint_u64_slt(_: u64, _: u64) -> BoolU64;
21 pub fn CVT_nativeint_u64_sle(_: u64, _: u64) -> BoolU64;
22 pub fn CVT_nativeint_u64_neg(_: u64) -> u64;
23 pub fn CVT_nativeint_u64_sext(_: u64, _: u64) -> u64;
24 pub fn CVT_nativeint_u64_mask(_: u64, _: u64) -> u64;
25
26 pub fn CVT_nativeint_u64_add(_: u64, _: u64) -> u64;
27 pub fn CVT_nativeint_u64_sub(_: u64, _: u64) -> u64;
28 pub fn CVT_nativeint_u64_mul(_: u64, _: u64) -> u64;
29 pub fn CVT_nativeint_u64_div(_: u64, _: u64) -> u64;
30 pub fn CVT_nativeint_u64_div_ceil(_: u64, _: u64) -> u64;
31 pub fn CVT_nativeint_u64_muldiv(_: u64, _: u64, _: u64) -> u64;
32 pub fn CVT_nativeint_u64_muldiv_ceil(_: u64, _: u64, _: u64) -> u64;
33
34 pub fn CVT_nativeint_u64_nondet() -> u64;
35
36 pub fn CVT_nativeint_u64_from_u128(w0: u64, w1: u64) -> u64;
37 pub fn CVT_nativeint_u64_into_u128(_: u64) -> u128;
38 pub fn CVT_nativeint_u64_from_u256(w0: u64, w1: u64, w2: u64, w3: u64) -> u64;
39
40 pub fn CVT_nativeint_u64_u64_max() -> u64;
41 pub fn CVT_nativeint_u64_u128_max() -> u64;
42 pub fn CVT_nativeint_u64_u256_max() -> u64;
43 }
44}
45
46#[cfg(feature = "rt")]
51mod rt_impls {
52 #[no_mangle]
53 pub extern "C" fn CVT_nativeint_u64_eq(a: u64, b: u64) -> u64 {
54 (a == b).into()
55 }
56
57 #[no_mangle]
58 pub extern "C" fn CVT_nativeint_u64_lt(a: u64, b: u64) -> u64 {
59 (a < b).into()
60 }
61
62 #[no_mangle]
63 pub extern "C" fn CVT_nativeint_u64_le(a: u64, b: u64) -> u64 {
64 (a <= b).into()
65 }
66
67 #[no_mangle]
68 pub extern "C" fn CVT_nativeint_u64_add(a: u64, b: u64) -> u64 {
69 a.checked_add(b).unwrap()
70 }
71
72 #[no_mangle]
73 pub extern "C" fn CVT_nativeint_u64_mul(a: u64, b: u64) -> u64 {
74 a.checked_mul(b).unwrap()
75 }
76
77 #[no_mangle]
78 pub extern "C" fn CVT_nativeint_u64_sub(a: u64, b: u64) -> u64 {
79 a.checked_sub(b).unwrap()
80 }
81
82 #[no_mangle]
83 pub extern "C" fn CVT_nativeint_u64_div(a: u64, b: u64) -> u64 {
84 a.checked_div(b).unwrap()
85 }
86
87 #[no_mangle]
88 pub extern "C" fn CVT_nativeint_u64_div_ceil(a: u64, b: u64) -> u64 {
89 a.div_ceil(b)
90 }
91
92 #[no_mangle]
93 pub extern "C" fn CVT_nativeint_u64_muldiv(a: u64, b: u64, c: u64) -> u64 {
94 a.checked_mul(b).unwrap().checked_div(c).unwrap()
95 }
96
97 #[no_mangle]
98 pub extern "C" fn CVT_nativeint_u64_muldiv_ceil(a: u64, b: u64, c: u64) -> u64 {
99 a.checked_mul(b).unwrap().div_ceil(c)
100 }
101
102 #[no_mangle]
103 pub extern "C" fn CVT_nativeint_u64_nondet() -> u64 {
104 0
108 }
109
110 #[no_mangle]
111 pub extern "C" fn CVT_nativeint_u64_from_u128(w0: u64, w1: u64) -> u64 {
112 if w1 != 0 {
113 panic!();
114 }
115 w0
116 }
117
118 #[no_mangle]
119 pub extern "C" fn CVT_nativeint_u64_into_u128(a: u64) -> u128 {
120 a as u128
121 }
122
123 #[no_mangle]
124 pub extern "C" fn CVT_nativeint_u64_from_u256(w0: u64, w1: u64, w2: u64, w3: u64) -> u64 {
125 if w1 != 0 || w2 != 0 || w3 != 0 {
126 panic!();
127 }
128 w0
129 }
130
131 #[no_mangle]
132 pub extern "C" fn CVT_nativeint_u64_u64_max() -> u64 {
133 u64::MAX
134 }
135
136 #[no_mangle]
137 pub extern "C" fn CVT_nativeint_u64_u128_max() -> u64 {
138 assert!(false, "u128_max is not supported");
139 todo!();
140 }
141
142 #[no_mangle]
143 pub extern "C" fn CVT_nativeint_u64_u256_max() -> u64 {
144 assert!(false, "u256_max is not supported");
145 todo!();
146 }
147
148 #[no_mangle]
149 pub extern "C" fn CVT_nativeint_u64_slt(a: u64, b: u64) -> u64 {
150 ((a as i64) < (b as i64)) as u64
151 }
152
153 #[no_mangle]
154 pub extern "C" fn CVT_nativeint_u64_sle(a: u64, b: u64) -> u64 {
155 ((a as i64) <= (b as i64)) as u64
156 }
157
158 #[no_mangle]
159 pub extern "C" fn CVT_nativeint_u64_sext(a: u64, bits: u64) -> u64 {
160 assert!(
162 (bits > 0 && bits <= 64),
163 "bits must be in 1..=64, got {}",
164 bits
165 );
166 let s = 64 - bits;
167 (((a << s) as i64) >> s) as u64
168 }
169
170 #[no_mangle]
171 pub extern "C" fn CVT_nativeint_u64_neg(a: u64) -> u64 {
172 use core::ops::Neg;
173 (a as i64).neg() as u64
174 }
175
176 #[no_mangle]
177 pub extern "C" fn CVT_nativeint_u64_mask(a: u64, bits: u64) -> u64 {
178 assert!(
180 (bits > 0 && bits <= 64),
181 "bits must be in 1..=64, got {}",
182 bits
183 );
184
185 let mask = if bits == 64 {
186 u64::MAX
187 } else {
188 (1 << bits) - 1
189 };
190 a & mask
191 }
192}
193
194use rt_decls::*;
195
196impl NativeIntU64 {
197 pub fn new<T>(v: T) -> Self
198 where
199 T: Into<NativeIntU64>,
200 {
201 v.into()
202 }
203
204 pub fn div_ceil(self, rhs: Self) -> Self {
205 unsafe { Self(CVT_nativeint_u64_div_ceil(self.0, rhs.0)) }
206 }
207
208 pub fn muldiv(self, num: Self, den: Self) -> Self {
209 unsafe { Self(CVT_nativeint_u64_muldiv(self.0, num.0, den.0)) }
210 }
211
212 pub fn muldiv_ceil(self, num: Self, den: Self) -> Self {
213 unsafe { Self(CVT_nativeint_u64_muldiv_ceil(self.0, num.0, den.0)) }
214 }
215
216 pub fn from_u128(w0: u64, w1: u64) -> Self {
217 unsafe { Self(CVT_nativeint_u64_from_u128(w0, w1)) }
218 }
219
220 pub fn into_u128(self) -> u128 {
221 cvlr_asserts::cvlr_assume!(self.is_u128());
222 unsafe { CVT_nativeint_u64_into_u128(self.0) }
223 }
224
225 pub fn from_u256(w0: u64, w1: u64, w2: u64, w3: u64) -> Self {
226 unsafe { Self(CVT_nativeint_u64_from_u256(w0, w1, w2, w3)) }
227 }
228
229 pub fn u64_max() -> Self {
230 unsafe { Self(CVT_nativeint_u64_u64_max()) }
231 }
232
233 pub fn u128_max() -> Self {
234 unsafe { Self(CVT_nativeint_u64_u128_max()) }
235 }
236
237 pub fn u256_max() -> Self {
238 unsafe { Self(CVT_nativeint_u64_u256_max()) }
239 }
240
241 pub fn is_u8(self) -> bool {
242 self <= Self::new(u8::MAX as u64)
243 }
244
245 pub fn is_u16(self) -> bool {
246 self <= Self::new(u16::MAX as u64)
247 }
248
249 pub fn is_u32(self) -> bool {
250 self <= Self::new(u32::MAX as u64)
251 }
252
253 pub fn is_u64(self) -> bool {
254 self <= Self::u64_max()
255 }
256
257 pub fn is_u128(self) -> bool {
258 self <= Self::u128_max()
259 }
260
261 pub fn is_u256(self) -> bool {
262 true
264 }
265
266 pub fn nondet() -> Self {
267 cvlr_nondet::nondet()
268 }
269
270 pub fn checked_sub(&self, v: NativeIntU64) -> Self {
271 *self - v
272 }
273
274 pub fn sext(self, bits: u64) -> Self {
275 unsafe { Self(CVT_nativeint_u64_sext(self.0, bits)) }
276 }
277
278 pub fn slt(self, other: Self) -> bool {
279 unsafe { CVT_nativeint_u64_slt(self.0, other.0) != 0 }
280 }
281
282 pub fn sle(self, other: Self) -> bool {
283 unsafe { CVT_nativeint_u64_sle(self.0, other.0) != 0 }
284 }
285
286 pub fn sgt(self, other: Self) -> bool {
287 unsafe { CVT_nativeint_u64_slt(other.0, self.0) != 0 }
288 }
289
290 pub fn sge(self, other: Self) -> bool {
291 unsafe { CVT_nativeint_u64_sle(other.0, self.0) != 0 }
292 }
293
294 pub fn mask(self, bits: u64) -> Self {
295 unsafe { Self(CVT_nativeint_u64_mask(self.0, bits)) }
296 }
297
298 pub fn as_internal(&self) -> u64 {
300 self.0
301 }
302}
303
304impl PartialEq for NativeIntU64 {
305 #[inline(always)]
306 fn eq(&self, other: &Self) -> bool {
307 unsafe { CVT_nativeint_u64_eq(self.0, other.0) != 0 }
308 }
309}
310
311#[allow(clippy::comparison_chain, clippy::non_canonical_partial_ord_impl)]
314impl PartialOrd for NativeIntU64 {
315 #[inline(always)]
316 fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
317 let ord = if self.0 == other.0 {
318 core::cmp::Ordering::Equal
319 } else if self.0 < other.0 {
320 core::cmp::Ordering::Less
321 } else {
322 core::cmp::Ordering::Greater
323 };
324 Some(ord)
325 }
326 #[inline(always)]
327 fn lt(&self, other: &Self) -> bool {
328 unsafe { CVT_nativeint_u64_lt(self.0, other.0) != 0 }
329 }
330 #[inline(always)]
331 fn le(&self, other: &Self) -> bool {
332 unsafe { CVT_nativeint_u64_le(self.0, other.0) != 0 }
333 }
334 #[inline(always)]
335 fn gt(&self, other: &Self) -> bool {
336 other.lt(self)
337 }
338 #[inline(always)]
339 fn ge(&self, other: &Self) -> bool {
340 other.le(self)
341 }
342}
343
344impl Ord for NativeIntU64 {
345 #[inline(always)]
346 fn cmp(&self, other: &Self) -> core::cmp::Ordering {
347 if self.lt(other) {
348 core::cmp::Ordering::Less
349 } else if self.gt(other) {
350 core::cmp::Ordering::Greater
351 } else {
352 core::cmp::Ordering::Equal
353 }
354 }
355
356 fn max(self, other: Self) -> Self {
357 if self.gt(&other) {
358 self
359 } else {
360 other
361 }
362 }
363
364 fn min(self, other: Self) -> Self {
365 if self.gt(&other) {
366 other
367 } else {
368 self
369 }
370 }
371
372 fn clamp(self, min: Self, max: Self) -> Self {
373 if self.gt(&max) {
374 max
375 } else if self.lt(&min) {
376 min
377 } else {
378 self
379 }
380 }
381}
382
383impl core::ops::Neg for NativeIntU64 {
384 type Output = Self;
385
386 fn neg(self) -> Self::Output {
387 unsafe { Self(CVT_nativeint_u64_neg(self.0)) }
388 }
389}
390
391impl core::ops::Add<NativeIntU64> for NativeIntU64 {
392 type Output = Self;
393
394 fn add(self, rhs: NativeIntU64) -> Self::Output {
395 unsafe { Self(CVT_nativeint_u64_add(self.0, rhs.0)) }
396 }
397}
398
399impl core::ops::Sub<NativeIntU64> for NativeIntU64 {
400 type Output = Self;
401
402 fn sub(self, rhs: NativeIntU64) -> Self::Output {
403 unsafe { Self(CVT_nativeint_u64_sub(self.0, rhs.0)) }
404 }
405}
406
407impl core::ops::Mul<NativeIntU64> for NativeIntU64 {
408 type Output = Self;
409
410 fn mul(self, rhs: NativeIntU64) -> Self::Output {
411 unsafe { Self(CVT_nativeint_u64_mul(self.0, rhs.0)) }
412 }
413}
414
415impl core::ops::Div<NativeIntU64> for NativeIntU64 {
416 type Output = Self;
417
418 fn div(self, rhs: NativeIntU64) -> Self::Output {
419 unsafe { Self(CVT_nativeint_u64_div(self.0, rhs.0)) }
420 }
421}
422
423macro_rules! impl_from_for_small_uint {
424 ($uint:ty) => {
425 impl From<$uint> for NativeIntU64 {
426 fn from(value: $uint) -> Self {
427 Self(value as u64)
428 }
429 }
430 };
431}
432
433macro_rules! impl_core_traits_for_num {
434 ($num:ty) => {
435 impl core::ops::Add<$num> for NativeIntU64 {
436 type Output = Self;
437
438 fn add(self, rhs: $num) -> Self::Output {
439 self + Self::from(rhs)
440 }
441 }
442
443 impl core::ops::Mul<$num> for NativeIntU64 {
444 type Output = Self;
445
446 fn mul(self, rhs: $num) -> Self::Output {
447 self * Self::from(rhs)
448 }
449 }
450
451 impl core::ops::Div<$num> for NativeIntU64 {
452 type Output = Self;
453
454 fn div(self, rhs: $num) -> Self::Output {
455 self / Self::from(rhs)
456 }
457 }
458
459 impl PartialEq<$num> for NativeIntU64 {
460 #[inline(always)]
461 fn eq(&self, other: &$num) -> bool {
462 *self == Self::from(*other)
463 }
464 }
465
466 impl PartialOrd<$num> for NativeIntU64 {
467 #[inline(always)]
468 fn partial_cmp(&self, other: &$num) -> Option<core::cmp::Ordering> {
469 self.partial_cmp(&Self::from(*other))
470 }
471 #[inline(always)]
472 fn lt(&self, other: &$num) -> bool {
473 *self < Self::from(*other)
474 }
475 #[inline(always)]
476 fn le(&self, other: &$num) -> bool {
477 *self <= Self::from(*other)
478 }
479 #[inline(always)]
480 fn gt(&self, other: &$num) -> bool {
481 *self > Self::from(*other)
482 }
483 #[inline(always)]
484 fn ge(&self, other: &$num) -> bool {
485 *self >= Self::from(*other)
486 }
487 }
488 };
489}
490
491impl_from_for_small_uint!(u8);
492impl_from_for_small_uint!(u16);
493impl_from_for_small_uint!(u32);
494impl_from_for_small_uint!(u64);
495
496impl From<u128> for NativeIntU64 {
497 fn from(value: u128) -> Self {
498 let w0: u64 = value as u64;
500 let w1: u64 = (value >> 64) as u64;
501
502 Self::from_u128(w0, w1)
503 }
504}
505
506impl_core_traits_for_num!(u8);
507impl_core_traits_for_num!(u16);
508impl_core_traits_for_num!(u32);
509impl_core_traits_for_num!(u64);
510impl_core_traits_for_num!(u128);
511
512impl From<i32> for NativeIntU64 {
513 fn from(value: i32) -> Self {
514 if value.is_positive() {
515 Self::from(value as u64)
516 } else {
517 Self::from(0u64) - Self::from((value as i64).unsigned_abs())
518 }
519 }
520}
521impl_core_traits_for_num!(i32);
522
523impl From<NativeIntU64> for u64 {
524 fn from(value: NativeIntU64) -> Self {
525 cvlr_asserts::cvlr_assume!(value.is_u64());
526 value.as_internal()
527 }
528}
529
530impl From<NativeIntU64> for u128 {
531 fn from(value: NativeIntU64) -> Self {
532 value.into_u128()
533 }
534}
535
536impl From<&[u64; 2]> for NativeIntU64 {
537 #[inline(always)]
538 fn from(value: &[u64; 2]) -> Self {
539 Self::from_u128(value[0], value[1])
540 }
541}
542
543impl From<&[u64; 4]> for NativeIntU64 {
544 #[inline(always)]
545 fn from(value: &[u64; 4]) -> Self {
546 Self::from_u256(value[0], value[1], value[2], value[3])
547 }
548}
549
550impl From<&[u8; 32]> for NativeIntU64 {
551 #[inline(always)]
552 fn from(value: &[u8; 32]) -> Self {
553 let (w0, rest) = value.split_at(8);
554 let w0 = u64::from_le_bytes(w0.try_into().unwrap());
555 let (w1, rest) = rest.split_at(8);
556 let w1 = u64::from_le_bytes(w1.try_into().unwrap());
557 let (w2, rest) = rest.split_at(8);
558 let w2 = u64::from_le_bytes(w2.try_into().unwrap());
559 let w3 = u64::from_le_bytes(rest.try_into().unwrap());
560 unsafe { Self(CVT_nativeint_u64_from_u256(w0, w1, w2, w3)) }
561 }
562}
563
564impl From<&[u8]> for NativeIntU64 {
565 #[inline(always)]
566 fn from(value: &[u8]) -> Self {
567 let v: &[u8; 32] = value.try_into().unwrap();
568 Self::from(v)
569 }
570}
571
572impl cvlr_nondet::Nondet for NativeIntU64 {
573 fn nondet() -> NativeIntU64 {
574 unsafe { Self(CVT_nativeint_u64_nondet()) }
575 }
576}
577
578macro_rules! impl_is_uint {
579 ($name:ident, $uint:ty, $is_uint:ident) => {
580 pub fn $name(v: $uint) -> bool {
581 NativeIntU64::from(v).$is_uint()
582 }
583 };
584}
585
586impl_is_uint! { is_u8, u8, is_u8 }
587impl_is_uint! { is_u16, u16, is_u16 }
588impl_is_uint! { is_u32, u32, is_u32 }
589impl_is_uint! { is_u64, u64, is_u64 }
590impl_is_uint! { is_u128, u128, is_u128 }
591
592#[cfg(test)]
593mod tests {
594 use super::*;
595
596 #[test]
597 fn it_works() {
598 let x: NativeIntU64 = 2.into();
599 let y: NativeIntU64 = 4.into();
600 assert_eq!(x + y, 6);
601 assert!(x < 6);
602 }
603
604 #[test]
605 fn nondet_test() {
606 let x: NativeIntU64 = cvlr_nondet::nondet();
607 assert_eq!(x, 0);
608 }
609
610 #[test]
611 fn test_arithmetic_operations() {
612 let a: NativeIntU64 = 10.into();
613 let b: NativeIntU64 = 3.into();
614
615 assert_eq!(a + b, 13);
617 assert_eq!(a + 5, 15);
618
619 assert_eq!(a - b, 7);
621 assert_eq!(a * b, 30);
625 assert_eq!(a * 2, 20);
626
627 assert_eq!(a / b, 3);
629 assert_eq!(a / 2, 5);
630 }
631
632 #[test]
633 fn test_comparison_operations() {
634 let a: NativeIntU64 = 5.into();
635 let b: NativeIntU64 = 10.into();
636 let c: NativeIntU64 = 5.into();
637
638 assert_eq!(a, c);
640 assert_ne!(a, b);
641
642 assert!(a < b);
644 assert!(!(b < a));
645 assert!(!(a < c));
646
647 assert!(a <= b);
649 assert!(a <= c);
650 assert!(!(b <= a));
651
652 assert!(b > a);
654 assert!(!(a > b));
655 assert!(!(a > c));
656
657 assert!(b >= a);
659 assert!(a >= c);
660 assert!(!(a >= b));
661 }
662
663 #[test]
664 fn test_div_ceil() {
665 let a: NativeIntU64 = 10.into();
666 let b: NativeIntU64 = 3.into();
667 assert_eq!(a.div_ceil(b), 4); let c: NativeIntU64 = 9.into();
670 let d: NativeIntU64 = 3.into();
671 assert_eq!(c.div_ceil(d), 3); let e: NativeIntU64 = 11.into();
674 let f: NativeIntU64 = 5.into();
675 assert_eq!(e.div_ceil(f), 3); }
677
678 #[test]
679 fn test_muldiv() {
680 let a: NativeIntU64 = 10.into();
681 let b: NativeIntU64 = 3.into();
682 let c: NativeIntU64 = 2.into();
683 assert_eq!(a.muldiv(b, c), 15);
685
686 let d: NativeIntU64 = 100.into();
687 let e: NativeIntU64 = 7.into();
688 let f: NativeIntU64 = 4.into();
689 assert_eq!(d.muldiv(e, f), 175);
691 }
692
693 #[test]
694 fn test_muldiv_ceil() {
695 let a: NativeIntU64 = 10.into();
696 let b: NativeIntU64 = 3.into();
697 let c: NativeIntU64 = 4.into();
698 assert_eq!(a.muldiv_ceil(b, c), 8);
700
701 let d: NativeIntU64 = 10.into();
702 let e: NativeIntU64 = 3.into();
703 let f: NativeIntU64 = 5.into();
704 assert_eq!(d.muldiv_ceil(e, f), 6);
706 }
707
708 #[test]
709 fn test_from_u128() {
710 let val = NativeIntU64::from_u128(42, 0);
711 assert_eq!(val, 42);
712
713 let val2 = NativeIntU64::from_u128(0x1234_5678_9abc_def0, 0);
714 assert_eq!(val2, 0x1234_5678_9abc_def0u64);
715 }
716
717 #[test]
718 fn test_from_u256() {
719 let val = NativeIntU64::from_u256(100, 0, 0, 0);
720 assert_eq!(val, 100);
721
722 let val2 = NativeIntU64::from_u256(0xffff_ffff_ffff_ffff, 0, 0, 0);
723 assert_eq!(val2, 0xffff_ffff_ffff_ffffu64);
724 }
725
726 #[test]
727 fn test_from_primitive_types() {
728 let val_u8: NativeIntU64 = 42u8.into();
730 assert_eq!(val_u8, 42);
731
732 let val_u16: NativeIntU64 = 1000u16.into();
734 assert_eq!(val_u16, 1000);
735
736 let val_u32: NativeIntU64 = 1_000_000u32.into();
738 assert_eq!(val_u32, 1_000_000);
739
740 let val_u64: NativeIntU64 = 1_000_000_000u64.into();
742 assert_eq!(val_u64, 1_000_000_000);
743
744 let val_u128: NativeIntU64 = 1_000_000_000_000u128.into();
746 assert_eq!(val_u128, 1_000_000_000_000u64);
747 }
748
749 #[test]
750 fn test_from_i32() {
751 let val_pos: NativeIntU64 = 42i32.into();
752 assert_eq!(val_pos, 42);
753
754 let val_zero: NativeIntU64 = 0i32.into();
755 assert_eq!(val_zero, 0);
756
757 }
760
761 #[test]
762 fn test_from_array_u64_2() {
763 let arr = [42u64, 0u64];
764 let val: NativeIntU64 = (&arr).into();
765 assert_eq!(val, 42);
766 }
767
768 #[test]
769 fn test_from_array_u64_4() {
770 let arr = [100u64, 0u64, 0u64, 0u64];
771 let val: NativeIntU64 = (&arr).into();
772 assert_eq!(val, 100);
773 }
774
775 #[test]
776 fn test_from_array_u8_32() {
777 let mut arr = [0u8; 32];
778 arr[0..8].copy_from_slice(&0x1234567890abcdefu64.to_le_bytes());
780 let val: NativeIntU64 = (&arr).into();
781 assert_eq!(val, 0x1234567890abcdefu64);
782 }
783
784 #[test]
785 fn test_max_functions() {
786 let u64_max = NativeIntU64::u64_max();
787 assert_eq!(u64_max, u64::MAX);
788 }
789
790 #[test]
791 fn test_is_uint_functions() {
792 assert!(NativeIntU64::from(0u64).is_u8());
794 assert!(NativeIntU64::from(255u64).is_u8());
795 assert!(!NativeIntU64::from(256u64).is_u8());
796
797 assert!(NativeIntU64::from(0u64).is_u16());
799 assert!(NativeIntU64::from(65535u64).is_u16());
800 assert!(!NativeIntU64::from(65536u64).is_u16());
801
802 assert!(NativeIntU64::from(0u64).is_u32());
804 assert!(NativeIntU64::from(4294967295u32).is_u32());
805 assert!(!NativeIntU64::from(4294967296u64).is_u32());
806
807 assert!(NativeIntU64::from(0u64).is_u64());
809 assert!(NativeIntU64::from(u64::MAX).is_u64());
810
811 assert!(NativeIntU64::from(0u64).is_u256());
817 assert!(NativeIntU64::from(u64::MAX).is_u256());
818 }
819
820 #[test]
821 fn test_ord_trait_methods() {
822 let a: NativeIntU64 = 5.into();
823 let b: NativeIntU64 = 10.into();
824 let c: NativeIntU64 = 7.into();
825
826 assert_eq!(a.cmp(&b), core::cmp::Ordering::Less);
828 assert_eq!(b.cmp(&a), core::cmp::Ordering::Greater);
829 assert_eq!(a.cmp(&a), core::cmp::Ordering::Equal);
830
831 assert_eq!(a.max(b), b);
833 assert_eq!(b.max(a), b);
834 assert_eq!(a.max(a), a);
835
836 assert_eq!(a.min(b), a);
838 assert_eq!(b.min(a), a);
839 assert_eq!(a.min(a), a);
840
841 assert_eq!(c.clamp(a, b), c); assert_eq!(a.clamp(c, b), c); assert_eq!(b.clamp(a, c), c); assert_eq!(a.clamp(a, b), a); assert_eq!(b.clamp(a, b), b); }
848
849 #[test]
850 fn test_edge_cases() {
851 let zero: NativeIntU64 = 0.into();
853 assert_eq!(zero + zero, zero);
854 assert_eq!(zero * 100, zero);
855 assert_eq!(zero / 1, zero);
856
857 let one: NativeIntU64 = 1.into();
859 assert_eq!(zero + one, one);
860 assert_eq!(one * 100, 100);
861 assert_eq!(one / one, one);
862
863 let max: NativeIntU64 = u64::MAX.into();
865 assert_eq!(max, NativeIntU64::u64_max());
866 assert!(max.is_u64());
867 }
868
869 #[test]
870 fn test_checked_sub() {
871 let a: NativeIntU64 = 10.into();
872 let b: NativeIntU64 = 3.into();
873 assert_eq!(a.checked_sub(b), 7);
874
875 }
878
879 #[test]
880 fn test_new_method() {
881 let val1 = NativeIntU64::from(42u8);
882 assert_eq!(val1, 42u64);
883
884 let val2 = NativeIntU64::from(1000u16);
885 assert_eq!(val2, 1000u64);
886
887 let val3 = NativeIntU64::from(1_000_000u32);
888 assert_eq!(val3, 1_000_000u64);
889 }
890
891 #[test]
892 fn test_as_internal() {
893 let val = NativeIntU64::from(42);
894 assert_eq!(val.as_internal(), 42u64);
895
896 let max = NativeIntU64::from(u64::MAX);
897 assert_eq!(max.as_internal(), u64::MAX);
898 }
899
900 #[test]
901 fn test_arithmetic_with_primitives() {
902 let a: NativeIntU64 = 10.into();
903
904 assert_eq!(a + 5u8, 15);
906 assert_eq!(a + 5u16, 15);
907 assert_eq!(a + 5u32, 15);
908 assert_eq!(a + 5u64, 15);
909 assert_eq!(a + 5u128, 15);
910
911 assert_eq!(a * 3u8, 30);
913 assert_eq!(a * 3u16, 30);
914 assert_eq!(a * 3u32, 30);
915 assert_eq!(a * 3u64, 30);
916 assert_eq!(a * 3u128, 30);
917
918 assert_eq!(a / 2u8, 5);
920 assert_eq!(a / 2u16, 5);
921 assert_eq!(a / 2u32, 5);
922 assert_eq!(a / 2u64, 5);
923 assert_eq!(a / 2u128, 5);
924 }
925
926 #[test]
927 fn test_sext() {
928 let x: NativeIntU64 = 0x7Fu64.into();
930 assert_eq!(x.sext(8), 0x7Fu64);
931
932 let x: NativeIntU64 = 0xFFu64.into();
934 assert_eq!(x.sext(8), 0xFFFF_FFFF_FFFF_FFFFu64);
935
936 let x: NativeIntU64 = 0x80u64.into();
938 assert_eq!(x.sext(8), 0xFFFF_FFFF_FFFF_FF80u64);
939
940 let x: NativeIntU64 = 0xFFFFu64.into();
942 assert_eq!(x.sext(16), 0xFFFF_FFFF_FFFF_FFFFu64);
943
944 let x: NativeIntU64 = 0x7FFFu64.into();
945 assert_eq!(x.sext(16), 0x7FFFu64);
946
947 let x: NativeIntU64 = 0xFFFF_FFFF_FFFF_FFFFu64.into();
949 assert_eq!(x.sext(64), 0xFFFF_FFFF_FFFF_FFFFu64);
950 }
951
952 #[test]
953 fn test_slt() {
954 let a: NativeIntU64 = 0u64.into();
956 let b: NativeIntU64 = 1u64.into();
957 assert!(a.slt(b));
958 assert!(!b.slt(a));
959
960 let neg_one: NativeIntU64 = 0xFFFF_FFFF_FFFF_FFFFu64.into();
962 let zero: NativeIntU64 = 0u64.into();
963 assert!(neg_one.slt(zero));
964 assert!(!zero.slt(neg_one));
965
966 let x: NativeIntU64 = 42u64.into();
968 assert!(!x.slt(x));
969
970 let small: NativeIntU64 = 10u64.into();
972 let large: NativeIntU64 = 100u64.into();
973 assert!(small.slt(large));
974 assert!(!large.slt(small));
975 }
976
977 #[test]
978 fn test_sle() {
979 let a: NativeIntU64 = 0u64.into();
980 let b: NativeIntU64 = 1u64.into();
981 assert!(a.sle(b));
982 assert!(!b.sle(a));
983
984 let x: NativeIntU64 = 42u64.into();
985 assert!(x.sle(x));
986
987 let neg_one: NativeIntU64 = 0xFFFF_FFFF_FFFF_FFFFu64.into();
988 let zero: NativeIntU64 = 0u64.into();
989 assert!(neg_one.sle(zero));
990 assert!(!zero.sle(neg_one));
991 }
992
993 #[test]
994 fn test_sgt() {
995 let a: NativeIntU64 = 0u64.into();
996 let b: NativeIntU64 = 1u64.into();
997 assert!(b.sgt(a));
998 assert!(!a.sgt(b));
999
1000 let zero: NativeIntU64 = 0u64.into();
1001 let neg_one: NativeIntU64 = 0xFFFF_FFFF_FFFF_FFFFu64.into();
1002 assert!(zero.sgt(neg_one));
1003 assert!(!neg_one.sgt(zero));
1004
1005 let x: NativeIntU64 = 42u64.into();
1006 assert!(!x.sgt(x));
1007 }
1008
1009 #[test]
1010 fn test_sge() {
1011 let a: NativeIntU64 = 0u64.into();
1012 let b: NativeIntU64 = 1u64.into();
1013 assert!(b.sge(a));
1014 assert!(!a.sge(b));
1015
1016 let x: NativeIntU64 = 42u64.into();
1017 assert!(x.sge(x));
1018
1019 let zero: NativeIntU64 = 0u64.into();
1020 let neg_one: NativeIntU64 = 0xFFFF_FFFF_FFFF_FFFFu64.into();
1021 assert!(zero.sge(neg_one));
1022 assert!(!neg_one.sge(zero));
1023 }
1024
1025 #[test]
1026 fn test_mask() {
1027 let x: NativeIntU64 = 0xFFu64.into();
1029 assert_eq!(x.mask(8), 0xFFu64);
1030
1031 let x: NativeIntU64 = 0x1FFu64.into();
1032 assert_eq!(x.mask(8), 0xFFu64);
1033
1034 let x: NativeIntU64 = 0x1234u64.into();
1036 assert_eq!(x.mask(4), 4u64); let x: NativeIntU64 = 3u64.into();
1040 assert_eq!(x.mask(1), 1u64);
1041
1042 let x: NativeIntU64 = 0xFFFF_FFFF_FFFF_FFFFu64.into();
1044 assert_eq!(x.mask(64), 0xFFFF_FFFF_FFFF_FFFFu64);
1045
1046 let x: NativeIntU64 = 0u64.into();
1048 assert_eq!(x.mask(8), 0u64);
1049 }
1050
1051 #[test]
1052 fn test_neg() {
1053 let zero: NativeIntU64 = 0u64.into();
1055 assert_eq!(-zero, 0u64);
1056
1057 let one: NativeIntU64 = 1u64.into();
1059 assert_eq!(-one, 0xFFFF_FFFF_FFFF_FFFFu64);
1060
1061 let neg_one: NativeIntU64 = 0xFFFF_FFFF_FFFF_FFFFu64.into();
1063 assert_eq!(-neg_one, 1u64);
1064
1065 let x: NativeIntU64 = 42u64.into();
1067 assert_eq!(-x, (-42i64 as u64));
1068
1069 let x: NativeIntU64 = 100u64.into();
1071 assert_eq!(-(-x), 100u64);
1072 }
1073}