1#[derive(Eq, Debug, Copy, Clone)]
2pub struct NativeIntU64(u64);
7
8mod rt_decls {
13 type BoolU64 = u64;
14
15 extern "C" {
16 pub fn CVT_nativeint_u64_eq(_: u64, _: u64) -> BoolU64;
17 pub fn CVT_nativeint_u64_lt(_: u64, _: u64) -> BoolU64;
18 pub fn CVT_nativeint_u64_le(_: u64, _: u64) -> BoolU64;
19
20 pub fn CVT_nativeint_u64_slt(_: u64, _: u64) -> BoolU64;
21 pub fn CVT_nativeint_u64_sle(_: u64, _: u64) -> BoolU64;
22 pub fn CVT_nativeint_u64_neg(_: u64) -> u64;
23 pub fn CVT_nativeint_u64_sext(_: u64, _: u64) -> u64;
24 pub fn CVT_nativeint_u64_mask(_: u64, _: u64) -> u64;
25
26 pub fn CVT_nativeint_u64_add(_: u64, _: u64) -> u64;
27 pub fn CVT_nativeint_u64_sub(_: u64, _: u64) -> u64;
28 pub fn CVT_nativeint_u64_mul(_: u64, _: u64) -> u64;
29 pub fn CVT_nativeint_u64_div(_: u64, _: u64) -> u64;
30 pub fn CVT_nativeint_u64_div_ceil(_: u64, _: u64) -> u64;
31 pub fn CVT_nativeint_u64_muldiv(_: u64, _: u64, _: u64) -> u64;
32 pub fn CVT_nativeint_u64_muldiv_ceil(_: u64, _: u64, _: u64) -> u64;
33
34 pub fn CVT_nativeint_u64_nondet() -> u64;
35
36 pub fn CVT_nativeint_u64_from_u128(w0: u64, w1: u64) -> u64;
37 pub fn CVT_nativeint_u64_from_u256(w0: u64, w1: u64, w2: u64, w3: u64) -> u64;
38
39 pub fn CVT_nativeint_u64_u64_max() -> u64;
40 pub fn CVT_nativeint_u64_u128_max() -> u64;
41 pub fn CVT_nativeint_u64_u256_max() -> u64;
42 }
43}
44
45#[cfg(feature = "rt")]
50mod rt_impls {
51 #[no_mangle]
52 pub extern "C" fn CVT_nativeint_u64_eq(a: u64, b: u64) -> u64 {
53 (a == b).into()
54 }
55
56 #[no_mangle]
57 pub extern "C" fn CVT_nativeint_u64_lt(a: u64, b: u64) -> u64 {
58 (a < b).into()
59 }
60
61 #[no_mangle]
62 pub extern "C" fn CVT_nativeint_u64_le(a: u64, b: u64) -> u64 {
63 (a <= b).into()
64 }
65
66 #[no_mangle]
67 pub extern "C" fn CVT_nativeint_u64_add(a: u64, b: u64) -> u64 {
68 a.checked_add(b).unwrap()
69 }
70
71 #[no_mangle]
72 pub extern "C" fn CVT_nativeint_u64_mul(a: u64, b: u64) -> u64 {
73 a.checked_mul(b).unwrap()
74 }
75
76 #[no_mangle]
77 pub extern "C" fn CVT_nativeint_u64_sub(a: u64, b: u64) -> u64 {
78 a.checked_sub(b).unwrap()
79 }
80
81 #[no_mangle]
82 pub extern "C" fn CVT_nativeint_u64_div(a: u64, b: u64) -> u64 {
83 a.checked_div(b).unwrap()
84 }
85
86 #[no_mangle]
87 pub extern "C" fn CVT_nativeint_u64_div_ceil(a: u64, b: u64) -> u64 {
88 a.div_ceil(b)
89 }
90
91 #[no_mangle]
92 pub extern "C" fn CVT_nativeint_u64_muldiv(a: u64, b: u64, c: u64) -> u64 {
93 a.checked_mul(b).unwrap().checked_div(c).unwrap()
94 }
95
96 #[no_mangle]
97 pub extern "C" fn CVT_nativeint_u64_muldiv_ceil(a: u64, b: u64, c: u64) -> u64 {
98 a.checked_mul(b).unwrap().div_ceil(c)
99 }
100
101 #[no_mangle]
102 pub extern "C" fn CVT_nativeint_u64_nondet() -> u64 {
103 0
107 }
108
109 #[no_mangle]
110 pub extern "C" fn CVT_nativeint_u64_from_u128(w0: u64, w1: u64) -> u64 {
111 if w1 != 0 {
112 panic!();
113 }
114 w0
115 }
116
117 #[no_mangle]
118 pub extern "C" fn CVT_nativeint_u64_from_u256(w0: u64, w1: u64, w2: u64, w3: u64) -> u64 {
119 if w1 != 0 || w2 != 0 || w3 != 0 {
120 panic!();
121 }
122 w0
123 }
124
125 #[no_mangle]
126 pub extern "C" fn CVT_nativeint_u64_u64_max() -> u64 {
127 u64::MAX
128 }
129
130 #[no_mangle]
131 pub extern "C" fn CVT_nativeint_u64_u128_max() -> u64 {
132 assert!(false, "u128_max is not supported");
133 todo!();
134 }
135
136 #[no_mangle]
137 pub extern "C" fn CVT_nativeint_u64_u256_max() -> u64 {
138 assert!(false, "u256_max is not supported");
139 todo!();
140 }
141
142 #[no_mangle]
143 pub extern "C" fn CVT_nativeint_u64_slt(a: u64, b: u64) -> u64 {
144 ((a as i64) < (b as i64)) as u64
145 }
146
147 #[no_mangle]
148 pub extern "C" fn CVT_nativeint_u64_sle(a: u64, b: u64) -> u64 {
149 ((a as i64) <= (b as i64)) as u64
150 }
151
152 #[no_mangle]
153 pub extern "C" fn CVT_nativeint_u64_sext(a: u64, bits: u64) -> u64 {
154 assert!(
156 (bits > 0 && bits <= 64),
157 "bits must be in 1..=64, got {}",
158 bits
159 );
160 let s = 64 - bits;
161 (((a << s) as i64) >> s) as u64
162 }
163
164 #[no_mangle]
165 pub extern "C" fn CVT_nativeint_u64_neg(a: u64) -> u64 {
166 use core::ops::Neg;
167 (a as i64).neg() as u64
168 }
169
170 #[no_mangle]
171 pub extern "C" fn CVT_nativeint_u64_mask(a: u64, bits: u64) -> u64 {
172 assert!(
174 (bits > 0 && bits <= 64),
175 "bits must be in 1..=64, got {}",
176 bits
177 );
178
179 let mask = if bits == 64 {
180 u64::MAX
181 } else {
182 (1 << bits) - 1
183 };
184 a & mask
185 }
186}
187
188use rt_decls::*;
189
190impl NativeIntU64 {
191 pub fn new<T>(v: T) -> Self
192 where
193 T: Into<NativeIntU64>,
194 {
195 v.into()
196 }
197
198 pub fn div_ceil(self, rhs: Self) -> Self {
199 unsafe { Self(CVT_nativeint_u64_div_ceil(self.0, rhs.0)) }
200 }
201
202 pub fn muldiv(self, num: Self, den: Self) -> Self {
203 unsafe { Self(CVT_nativeint_u64_muldiv(self.0, num.0, den.0)) }
204 }
205
206 pub fn muldiv_ceil(self, num: Self, den: Self) -> Self {
207 unsafe { Self(CVT_nativeint_u64_muldiv_ceil(self.0, num.0, den.0)) }
208 }
209
210 pub fn from_u128(w0: u64, w1: u64) -> Self {
211 unsafe { Self(CVT_nativeint_u64_from_u128(w0, w1)) }
212 }
213
214 pub fn from_u256(w0: u64, w1: u64, w2: u64, w3: u64) -> Self {
215 unsafe { Self(CVT_nativeint_u64_from_u256(w0, w1, w2, w3)) }
216 }
217
218 pub fn u64_max() -> Self {
219 unsafe { Self(CVT_nativeint_u64_u64_max()) }
220 }
221
222 pub fn u128_max() -> Self {
223 unsafe { Self(CVT_nativeint_u64_u128_max()) }
224 }
225
226 pub fn u256_max() -> Self {
227 unsafe { Self(CVT_nativeint_u64_u256_max()) }
228 }
229
230 pub fn is_u8(self) -> bool {
231 self <= Self::new(u8::MAX as u64)
232 }
233
234 pub fn is_u16(self) -> bool {
235 self <= Self::new(u16::MAX as u64)
236 }
237
238 pub fn is_u32(self) -> bool {
239 self <= Self::new(u32::MAX as u64)
240 }
241
242 pub fn is_u64(self) -> bool {
243 self <= Self::u64_max()
244 }
245
246 pub fn is_u128(self) -> bool {
247 self <= Self::u128_max()
248 }
249
250 pub fn is_u256(self) -> bool {
251 true
253 }
254
255 pub fn nondet() -> Self {
256 cvlr_nondet::nondet()
257 }
258
259 pub fn checked_sub(&self, v: NativeIntU64) -> Self {
260 *self - v
261 }
262
263 pub fn sext(self, bits: u64) -> Self {
264 unsafe { Self(CVT_nativeint_u64_sext(self.0, bits)) }
265 }
266
267 pub fn slt(self, other: Self) -> bool {
268 unsafe { CVT_nativeint_u64_slt(self.0, other.0) != 0 }
269 }
270
271 pub fn sle(self, other: Self) -> bool {
272 unsafe { CVT_nativeint_u64_sle(self.0, other.0) != 0 }
273 }
274
275 pub fn sgt(self, other: Self) -> bool {
276 unsafe { CVT_nativeint_u64_slt(other.0, self.0) != 0 }
277 }
278
279 pub fn sge(self, other: Self) -> bool {
280 unsafe { CVT_nativeint_u64_sle(other.0, self.0) != 0 }
281 }
282
283 pub fn mask(self, bits: u64) -> Self {
284 unsafe { Self(CVT_nativeint_u64_mask(self.0, bits)) }
285 }
286
287 pub fn as_internal(&self) -> u64 {
289 self.0
290 }
291}
292
293impl PartialEq for NativeIntU64 {
294 #[inline(always)]
295 fn eq(&self, other: &Self) -> bool {
296 unsafe { CVT_nativeint_u64_eq(self.0, other.0) != 0 }
297 }
298}
299
300#[allow(clippy::comparison_chain, clippy::non_canonical_partial_ord_impl)]
303impl PartialOrd for NativeIntU64 {
304 #[inline(always)]
305 fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
306 let ord = if self.0 == other.0 {
307 core::cmp::Ordering::Equal
308 } else if self.0 < other.0 {
309 core::cmp::Ordering::Less
310 } else {
311 core::cmp::Ordering::Greater
312 };
313 Some(ord)
314 }
315 #[inline(always)]
316 fn lt(&self, other: &Self) -> bool {
317 unsafe { CVT_nativeint_u64_lt(self.0, other.0) != 0 }
318 }
319 #[inline(always)]
320 fn le(&self, other: &Self) -> bool {
321 unsafe { CVT_nativeint_u64_le(self.0, other.0) != 0 }
322 }
323 #[inline(always)]
324 fn gt(&self, other: &Self) -> bool {
325 other.lt(self)
326 }
327 #[inline(always)]
328 fn ge(&self, other: &Self) -> bool {
329 other.le(self)
330 }
331}
332
333impl Ord for NativeIntU64 {
334 #[inline(always)]
335 fn cmp(&self, other: &Self) -> core::cmp::Ordering {
336 if self.lt(other) {
337 core::cmp::Ordering::Less
338 } else if self.gt(other) {
339 core::cmp::Ordering::Greater
340 } else {
341 core::cmp::Ordering::Equal
342 }
343 }
344
345 fn max(self, other: Self) -> Self {
346 if self.gt(&other) {
347 self
348 } else {
349 other
350 }
351 }
352
353 fn min(self, other: Self) -> Self {
354 if self.gt(&other) {
355 other
356 } else {
357 self
358 }
359 }
360
361 fn clamp(self, min: Self, max: Self) -> Self {
362 if self.gt(&max) {
363 max
364 } else if self.lt(&min) {
365 min
366 } else {
367 self
368 }
369 }
370}
371
372impl core::ops::Neg for NativeIntU64 {
373 type Output = Self;
374
375 fn neg(self) -> Self::Output {
376 unsafe { Self(CVT_nativeint_u64_neg(self.0)) }
377 }
378}
379
380impl core::ops::Add<NativeIntU64> for NativeIntU64 {
381 type Output = Self;
382
383 fn add(self, rhs: NativeIntU64) -> Self::Output {
384 unsafe { Self(CVT_nativeint_u64_add(self.0, rhs.0)) }
385 }
386}
387
388impl core::ops::Sub<NativeIntU64> for NativeIntU64 {
389 type Output = Self;
390
391 fn sub(self, rhs: NativeIntU64) -> Self::Output {
392 unsafe { Self(CVT_nativeint_u64_sub(self.0, rhs.0)) }
393 }
394}
395
396impl core::ops::Mul<NativeIntU64> for NativeIntU64 {
397 type Output = Self;
398
399 fn mul(self, rhs: NativeIntU64) -> Self::Output {
400 unsafe { Self(CVT_nativeint_u64_mul(self.0, rhs.0)) }
401 }
402}
403
404impl core::ops::Div<NativeIntU64> for NativeIntU64 {
405 type Output = Self;
406
407 fn div(self, rhs: NativeIntU64) -> Self::Output {
408 unsafe { Self(CVT_nativeint_u64_div(self.0, rhs.0)) }
409 }
410}
411
412macro_rules! impl_from_for_small_uint {
413 ($uint:ty) => {
414 impl From<$uint> for NativeIntU64 {
415 fn from(value: $uint) -> Self {
416 Self(value as u64)
417 }
418 }
419 };
420}
421
422macro_rules! impl_core_traits_for_num {
423 ($num:ty) => {
424 impl core::ops::Add<$num> for NativeIntU64 {
425 type Output = Self;
426
427 fn add(self, rhs: $num) -> Self::Output {
428 self + Self::from(rhs)
429 }
430 }
431
432 impl core::ops::Mul<$num> for NativeIntU64 {
433 type Output = Self;
434
435 fn mul(self, rhs: $num) -> Self::Output {
436 self * Self::from(rhs)
437 }
438 }
439
440 impl core::ops::Div<$num> for NativeIntU64 {
441 type Output = Self;
442
443 fn div(self, rhs: $num) -> Self::Output {
444 self / Self::from(rhs)
445 }
446 }
447
448 impl PartialEq<$num> for NativeIntU64 {
449 #[inline(always)]
450 fn eq(&self, other: &$num) -> bool {
451 *self == Self::from(*other)
452 }
453 }
454
455 impl PartialOrd<$num> for NativeIntU64 {
456 #[inline(always)]
457 fn partial_cmp(&self, other: &$num) -> Option<core::cmp::Ordering> {
458 self.partial_cmp(&Self::from(*other))
459 }
460 #[inline(always)]
461 fn lt(&self, other: &$num) -> bool {
462 *self < Self::from(*other)
463 }
464 #[inline(always)]
465 fn le(&self, other: &$num) -> bool {
466 *self <= Self::from(*other)
467 }
468 #[inline(always)]
469 fn gt(&self, other: &$num) -> bool {
470 *self > Self::from(*other)
471 }
472 #[inline(always)]
473 fn ge(&self, other: &$num) -> bool {
474 *self >= Self::from(*other)
475 }
476 }
477 };
478}
479
480impl_from_for_small_uint!(u8);
481impl_from_for_small_uint!(u16);
482impl_from_for_small_uint!(u32);
483impl_from_for_small_uint!(u64);
484
485impl From<u128> for NativeIntU64 {
486 fn from(value: u128) -> Self {
487 let w0: u64 = value as u64;
489 let w1: u64 = (value >> 64) as u64;
490
491 Self::from_u128(w0, w1)
492 }
493}
494
495impl_core_traits_for_num!(u8);
496impl_core_traits_for_num!(u16);
497impl_core_traits_for_num!(u32);
498impl_core_traits_for_num!(u64);
499impl_core_traits_for_num!(u128);
500
501impl From<i32> for NativeIntU64 {
502 fn from(value: i32) -> Self {
503 if value.is_positive() {
504 Self::from(value as u64)
505 } else {
506 Self::from(0u64) - Self::from((value as i64).unsigned_abs())
507 }
508 }
509}
510impl_core_traits_for_num!(i32);
511
512impl From<NativeIntU64> for u64 {
513 fn from(value: NativeIntU64) -> Self {
514 cvlr_asserts::cvlr_assume!(value.is_u64());
515 value.as_internal()
516 }
517}
518
519impl From<NativeIntU64> for u128 {
520 fn from(value: NativeIntU64) -> Self {
521 cvlr_asserts::cvlr_assume!(value.is_u128());
522 let res: u128 = cvlr_nondet::nondet();
523 cvlr_asserts::cvlr_assume!(value == res);
524 res
525 }
526}
527
528impl From<&[u64; 2]> for NativeIntU64 {
529 #[inline(always)]
530 fn from(value: &[u64; 2]) -> Self {
531 Self::from_u128(value[0], value[1])
532 }
533}
534
535impl From<&[u64; 4]> for NativeIntU64 {
536 #[inline(always)]
537 fn from(value: &[u64; 4]) -> Self {
538 Self::from_u256(value[0], value[1], value[2], value[3])
539 }
540}
541
542impl From<&[u8; 32]> for NativeIntU64 {
543 #[inline(always)]
544 fn from(value: &[u8; 32]) -> Self {
545 let (w0, rest) = value.split_at(8);
546 let w0 = u64::from_le_bytes(w0.try_into().unwrap());
547 let (w1, rest) = rest.split_at(8);
548 let w1 = u64::from_le_bytes(w1.try_into().unwrap());
549 let (w2, rest) = rest.split_at(8);
550 let w2 = u64::from_le_bytes(w2.try_into().unwrap());
551 let w3 = u64::from_le_bytes(rest.try_into().unwrap());
552 unsafe { Self(CVT_nativeint_u64_from_u256(w0, w1, w2, w3)) }
553 }
554}
555
556impl From<&[u8]> for NativeIntU64 {
557 #[inline(always)]
558 fn from(value: &[u8]) -> Self {
559 let v: &[u8; 32] = value.try_into().unwrap();
560 Self::from(v)
561 }
562}
563
564impl cvlr_nondet::Nondet for NativeIntU64 {
565 fn nondet() -> NativeIntU64 {
566 unsafe { Self(CVT_nativeint_u64_nondet()) }
567 }
568}
569
570macro_rules! impl_is_uint {
571 ($name:ident, $uint:ty, $is_uint:ident) => {
572 pub fn $name(v: $uint) -> bool {
573 NativeIntU64::from(v).$is_uint()
574 }
575 };
576}
577
578impl_is_uint! { is_u8, u8, is_u8 }
579impl_is_uint! { is_u16, u16, is_u16 }
580impl_is_uint! { is_u32, u32, is_u32 }
581impl_is_uint! { is_u64, u64, is_u64 }
582impl_is_uint! { is_u128, u128, is_u128 }
583
584#[cfg(test)]
585mod tests {
586 use super::*;
587
588 #[test]
589 fn it_works() {
590 let x: NativeIntU64 = 2.into();
591 let y: NativeIntU64 = 4.into();
592 assert_eq!(x + y, 6);
593 assert!(x < 6);
594 }
595
596 #[test]
597 fn nondet_test() {
598 let x: NativeIntU64 = cvlr_nondet::nondet();
599 assert_eq!(x, 0);
600 }
601
602 #[test]
603 fn test_arithmetic_operations() {
604 let a: NativeIntU64 = 10.into();
605 let b: NativeIntU64 = 3.into();
606
607 assert_eq!(a + b, 13);
609 assert_eq!(a + 5, 15);
610
611 assert_eq!(a - b, 7);
613 assert_eq!(a * b, 30);
617 assert_eq!(a * 2, 20);
618
619 assert_eq!(a / b, 3);
621 assert_eq!(a / 2, 5);
622 }
623
624 #[test]
625 fn test_comparison_operations() {
626 let a: NativeIntU64 = 5.into();
627 let b: NativeIntU64 = 10.into();
628 let c: NativeIntU64 = 5.into();
629
630 assert_eq!(a, c);
632 assert_ne!(a, b);
633
634 assert!(a < b);
636 assert!(!(b < a));
637 assert!(!(a < c));
638
639 assert!(a <= b);
641 assert!(a <= c);
642 assert!(!(b <= a));
643
644 assert!(b > a);
646 assert!(!(a > b));
647 assert!(!(a > c));
648
649 assert!(b >= a);
651 assert!(a >= c);
652 assert!(!(a >= b));
653 }
654
655 #[test]
656 fn test_div_ceil() {
657 let a: NativeIntU64 = 10.into();
658 let b: NativeIntU64 = 3.into();
659 assert_eq!(a.div_ceil(b), 4); let c: NativeIntU64 = 9.into();
662 let d: NativeIntU64 = 3.into();
663 assert_eq!(c.div_ceil(d), 3); let e: NativeIntU64 = 11.into();
666 let f: NativeIntU64 = 5.into();
667 assert_eq!(e.div_ceil(f), 3); }
669
670 #[test]
671 fn test_muldiv() {
672 let a: NativeIntU64 = 10.into();
673 let b: NativeIntU64 = 3.into();
674 let c: NativeIntU64 = 2.into();
675 assert_eq!(a.muldiv(b, c), 15);
677
678 let d: NativeIntU64 = 100.into();
679 let e: NativeIntU64 = 7.into();
680 let f: NativeIntU64 = 4.into();
681 assert_eq!(d.muldiv(e, f), 175);
683 }
684
685 #[test]
686 fn test_muldiv_ceil() {
687 let a: NativeIntU64 = 10.into();
688 let b: NativeIntU64 = 3.into();
689 let c: NativeIntU64 = 4.into();
690 assert_eq!(a.muldiv_ceil(b, c), 8);
692
693 let d: NativeIntU64 = 10.into();
694 let e: NativeIntU64 = 3.into();
695 let f: NativeIntU64 = 5.into();
696 assert_eq!(d.muldiv_ceil(e, f), 6);
698 }
699
700 #[test]
701 fn test_from_u128() {
702 let val = NativeIntU64::from_u128(42, 0);
703 assert_eq!(val, 42);
704
705 let val2 = NativeIntU64::from_u128(0x1234_5678_9abc_def0, 0);
706 assert_eq!(val2, 0x1234_5678_9abc_def0u64);
707 }
708
709 #[test]
710 fn test_from_u256() {
711 let val = NativeIntU64::from_u256(100, 0, 0, 0);
712 assert_eq!(val, 100);
713
714 let val2 = NativeIntU64::from_u256(0xffff_ffff_ffff_ffff, 0, 0, 0);
715 assert_eq!(val2, 0xffff_ffff_ffff_ffffu64);
716 }
717
718 #[test]
719 fn test_from_primitive_types() {
720 let val_u8: NativeIntU64 = 42u8.into();
722 assert_eq!(val_u8, 42);
723
724 let val_u16: NativeIntU64 = 1000u16.into();
726 assert_eq!(val_u16, 1000);
727
728 let val_u32: NativeIntU64 = 1_000_000u32.into();
730 assert_eq!(val_u32, 1_000_000);
731
732 let val_u64: NativeIntU64 = 1_000_000_000u64.into();
734 assert_eq!(val_u64, 1_000_000_000);
735
736 let val_u128: NativeIntU64 = 1_000_000_000_000u128.into();
738 assert_eq!(val_u128, 1_000_000_000_000u64);
739 }
740
741 #[test]
742 fn test_from_i32() {
743 let val_pos: NativeIntU64 = 42i32.into();
744 assert_eq!(val_pos, 42);
745
746 let val_zero: NativeIntU64 = 0i32.into();
747 assert_eq!(val_zero, 0);
748
749 }
752
753 #[test]
754 fn test_from_array_u64_2() {
755 let arr = [42u64, 0u64];
756 let val: NativeIntU64 = (&arr).into();
757 assert_eq!(val, 42);
758 }
759
760 #[test]
761 fn test_from_array_u64_4() {
762 let arr = [100u64, 0u64, 0u64, 0u64];
763 let val: NativeIntU64 = (&arr).into();
764 assert_eq!(val, 100);
765 }
766
767 #[test]
768 fn test_from_array_u8_32() {
769 let mut arr = [0u8; 32];
770 arr[0..8].copy_from_slice(&0x1234567890abcdefu64.to_le_bytes());
772 let val: NativeIntU64 = (&arr).into();
773 assert_eq!(val, 0x1234567890abcdefu64);
774 }
775
776 #[test]
777 fn test_max_functions() {
778 let u64_max = NativeIntU64::u64_max();
779 assert_eq!(u64_max, u64::MAX);
780 }
781
782 #[test]
783 fn test_is_uint_functions() {
784 assert!(NativeIntU64::from(0u64).is_u8());
786 assert!(NativeIntU64::from(255u64).is_u8());
787 assert!(!NativeIntU64::from(256u64).is_u8());
788
789 assert!(NativeIntU64::from(0u64).is_u16());
791 assert!(NativeIntU64::from(65535u64).is_u16());
792 assert!(!NativeIntU64::from(65536u64).is_u16());
793
794 assert!(NativeIntU64::from(0u64).is_u32());
796 assert!(NativeIntU64::from(4294967295u32).is_u32());
797 assert!(!NativeIntU64::from(4294967296u64).is_u32());
798
799 assert!(NativeIntU64::from(0u64).is_u64());
801 assert!(NativeIntU64::from(u64::MAX).is_u64());
802
803 assert!(NativeIntU64::from(0u64).is_u256());
809 assert!(NativeIntU64::from(u64::MAX).is_u256());
810 }
811
812 #[test]
813 fn test_ord_trait_methods() {
814 let a: NativeIntU64 = 5.into();
815 let b: NativeIntU64 = 10.into();
816 let c: NativeIntU64 = 7.into();
817
818 assert_eq!(a.cmp(&b), core::cmp::Ordering::Less);
820 assert_eq!(b.cmp(&a), core::cmp::Ordering::Greater);
821 assert_eq!(a.cmp(&a), core::cmp::Ordering::Equal);
822
823 assert_eq!(a.max(b), b);
825 assert_eq!(b.max(a), b);
826 assert_eq!(a.max(a), a);
827
828 assert_eq!(a.min(b), a);
830 assert_eq!(b.min(a), a);
831 assert_eq!(a.min(a), a);
832
833 assert_eq!(c.clamp(a, b), c); assert_eq!(a.clamp(c, b), c); assert_eq!(b.clamp(a, c), c); assert_eq!(a.clamp(a, b), a); assert_eq!(b.clamp(a, b), b); }
840
841 #[test]
842 fn test_edge_cases() {
843 let zero: NativeIntU64 = 0.into();
845 assert_eq!(zero + zero, zero);
846 assert_eq!(zero * 100, zero);
847 assert_eq!(zero / 1, zero);
848
849 let one: NativeIntU64 = 1.into();
851 assert_eq!(zero + one, one);
852 assert_eq!(one * 100, 100);
853 assert_eq!(one / one, one);
854
855 let max: NativeIntU64 = u64::MAX.into();
857 assert_eq!(max, NativeIntU64::u64_max());
858 assert!(max.is_u64());
859 }
860
861 #[test]
862 fn test_checked_sub() {
863 let a: NativeIntU64 = 10.into();
864 let b: NativeIntU64 = 3.into();
865 assert_eq!(a.checked_sub(b), 7);
866
867 }
870
871 #[test]
872 fn test_new_method() {
873 let val1 = NativeIntU64::from(42u8);
874 assert_eq!(val1, 42u64);
875
876 let val2 = NativeIntU64::from(1000u16);
877 assert_eq!(val2, 1000u64);
878
879 let val3 = NativeIntU64::from(1_000_000u32);
880 assert_eq!(val3, 1_000_000u64);
881 }
882
883 #[test]
884 fn test_as_internal() {
885 let val = NativeIntU64::from(42);
886 assert_eq!(val.as_internal(), 42u64);
887
888 let max = NativeIntU64::from(u64::MAX);
889 assert_eq!(max.as_internal(), u64::MAX);
890 }
891
892 #[test]
893 fn test_arithmetic_with_primitives() {
894 let a: NativeIntU64 = 10.into();
895
896 assert_eq!(a + 5u8, 15);
898 assert_eq!(a + 5u16, 15);
899 assert_eq!(a + 5u32, 15);
900 assert_eq!(a + 5u64, 15);
901 assert_eq!(a + 5u128, 15);
902
903 assert_eq!(a * 3u8, 30);
905 assert_eq!(a * 3u16, 30);
906 assert_eq!(a * 3u32, 30);
907 assert_eq!(a * 3u64, 30);
908 assert_eq!(a * 3u128, 30);
909
910 assert_eq!(a / 2u8, 5);
912 assert_eq!(a / 2u16, 5);
913 assert_eq!(a / 2u32, 5);
914 assert_eq!(a / 2u64, 5);
915 assert_eq!(a / 2u128, 5);
916 }
917
918 #[test]
919 fn test_sext() {
920 let x: NativeIntU64 = 0x7Fu64.into();
922 assert_eq!(x.sext(8), 0x7Fu64);
923
924 let x: NativeIntU64 = 0xFFu64.into();
926 assert_eq!(x.sext(8), 0xFFFF_FFFF_FFFF_FFFFu64);
927
928 let x: NativeIntU64 = 0x80u64.into();
930 assert_eq!(x.sext(8), 0xFFFF_FFFF_FFFF_FF80u64);
931
932 let x: NativeIntU64 = 0xFFFFu64.into();
934 assert_eq!(x.sext(16), 0xFFFF_FFFF_FFFF_FFFFu64);
935
936 let x: NativeIntU64 = 0x7FFFu64.into();
937 assert_eq!(x.sext(16), 0x7FFFu64);
938
939 let x: NativeIntU64 = 0xFFFF_FFFF_FFFF_FFFFu64.into();
941 assert_eq!(x.sext(64), 0xFFFF_FFFF_FFFF_FFFFu64);
942 }
943
944 #[test]
945 fn test_slt() {
946 let a: NativeIntU64 = 0u64.into();
948 let b: NativeIntU64 = 1u64.into();
949 assert!(a.slt(b));
950 assert!(!b.slt(a));
951
952 let neg_one: NativeIntU64 = 0xFFFF_FFFF_FFFF_FFFFu64.into();
954 let zero: NativeIntU64 = 0u64.into();
955 assert!(neg_one.slt(zero));
956 assert!(!zero.slt(neg_one));
957
958 let x: NativeIntU64 = 42u64.into();
960 assert!(!x.slt(x));
961
962 let small: NativeIntU64 = 10u64.into();
964 let large: NativeIntU64 = 100u64.into();
965 assert!(small.slt(large));
966 assert!(!large.slt(small));
967 }
968
969 #[test]
970 fn test_sle() {
971 let a: NativeIntU64 = 0u64.into();
972 let b: NativeIntU64 = 1u64.into();
973 assert!(a.sle(b));
974 assert!(!b.sle(a));
975
976 let x: NativeIntU64 = 42u64.into();
977 assert!(x.sle(x));
978
979 let neg_one: NativeIntU64 = 0xFFFF_FFFF_FFFF_FFFFu64.into();
980 let zero: NativeIntU64 = 0u64.into();
981 assert!(neg_one.sle(zero));
982 assert!(!zero.sle(neg_one));
983 }
984
985 #[test]
986 fn test_sgt() {
987 let a: NativeIntU64 = 0u64.into();
988 let b: NativeIntU64 = 1u64.into();
989 assert!(b.sgt(a));
990 assert!(!a.sgt(b));
991
992 let zero: NativeIntU64 = 0u64.into();
993 let neg_one: NativeIntU64 = 0xFFFF_FFFF_FFFF_FFFFu64.into();
994 assert!(zero.sgt(neg_one));
995 assert!(!neg_one.sgt(zero));
996
997 let x: NativeIntU64 = 42u64.into();
998 assert!(!x.sgt(x));
999 }
1000
1001 #[test]
1002 fn test_sge() {
1003 let a: NativeIntU64 = 0u64.into();
1004 let b: NativeIntU64 = 1u64.into();
1005 assert!(b.sge(a));
1006 assert!(!a.sge(b));
1007
1008 let x: NativeIntU64 = 42u64.into();
1009 assert!(x.sge(x));
1010
1011 let zero: NativeIntU64 = 0u64.into();
1012 let neg_one: NativeIntU64 = 0xFFFF_FFFF_FFFF_FFFFu64.into();
1013 assert!(zero.sge(neg_one));
1014 assert!(!neg_one.sge(zero));
1015 }
1016
1017 #[test]
1018 fn test_mask() {
1019 let x: NativeIntU64 = 0xFFu64.into();
1021 assert_eq!(x.mask(8), 0xFFu64);
1022
1023 let x: NativeIntU64 = 0x1FFu64.into();
1024 assert_eq!(x.mask(8), 0xFFu64);
1025
1026 let x: NativeIntU64 = 0x1234u64.into();
1028 assert_eq!(x.mask(4), 4u64); let x: NativeIntU64 = 3u64.into();
1032 assert_eq!(x.mask(1), 1u64);
1033
1034 let x: NativeIntU64 = 0xFFFF_FFFF_FFFF_FFFFu64.into();
1036 assert_eq!(x.mask(64), 0xFFFF_FFFF_FFFF_FFFFu64);
1037
1038 let x: NativeIntU64 = 0u64.into();
1040 assert_eq!(x.mask(8), 0u64);
1041 }
1042
1043 #[test]
1044 fn test_neg() {
1045 let zero: NativeIntU64 = 0u64.into();
1047 assert_eq!(-zero, 0u64);
1048
1049 let one: NativeIntU64 = 1u64.into();
1051 assert_eq!(-one, 0xFFFF_FFFF_FFFF_FFFFu64);
1052
1053 let neg_one: NativeIntU64 = 0xFFFF_FFFF_FFFF_FFFFu64.into();
1055 assert_eq!(-neg_one, 1u64);
1056
1057 let x: NativeIntU64 = 42u64.into();
1059 assert_eq!(-x, (-42i64 as u64));
1060
1061 let x: NativeIntU64 = 100u64.into();
1063 assert_eq!(-(-x), 100u64);
1064 }
1065}