Skip to main content

cvlr_mathint/
nativeint_u64.rs

1#[derive(Eq, Debug, Copy, Clone)]
2/// Native Mathematical Integer (represented by u64 number)
3///
4/// The magic is that symbolically an SBF word is mapped to 256 bit symbolic
5/// integer.
6pub struct NativeIntU64(u64);
7
8/// Declaration for external library for mathematical integers
9///
10/// This library is implemented symbolically by Certora Prover
11/// Run-time under-approximation is provided in [rt_impls] module
12mod rt_decls {
13    type BoolU64 = u64;
14
15    extern "C" {
16        pub fn CVT_nativeint_u64_eq(_: u64, _: u64) -> BoolU64;
17        pub fn CVT_nativeint_u64_lt(_: u64, _: u64) -> BoolU64;
18        pub fn CVT_nativeint_u64_le(_: u64, _: u64) -> BoolU64;
19
20        pub fn CVT_nativeint_u64_slt(_: u64, _: u64) -> BoolU64;
21        pub fn CVT_nativeint_u64_sle(_: u64, _: u64) -> BoolU64;
22        pub fn CVT_nativeint_u64_neg(_: u64) -> u64;
23        pub fn CVT_nativeint_u64_sext(_: u64, _: u64) -> u64;
24        pub fn CVT_nativeint_u64_mask(_: u64, _: u64) -> u64;
25
26        pub fn CVT_nativeint_u64_add(_: u64, _: u64) -> u64;
27        pub fn CVT_nativeint_u64_sub(_: u64, _: u64) -> u64;
28        pub fn CVT_nativeint_u64_mul(_: u64, _: u64) -> u64;
29        pub fn CVT_nativeint_u64_div(_: u64, _: u64) -> u64;
30        pub fn CVT_nativeint_u64_div_ceil(_: u64, _: u64) -> u64;
31        pub fn CVT_nativeint_u64_muldiv(_: u64, _: u64, _: u64) -> u64;
32        pub fn CVT_nativeint_u64_muldiv_ceil(_: u64, _: u64, _: u64) -> u64;
33
34        pub fn CVT_nativeint_u64_nondet() -> u64;
35
36        pub fn CVT_nativeint_u64_from_u128(w0: u64, w1: u64) -> u64;
37        pub fn CVT_nativeint_u64_into_u128(_: u64) -> u128;
38        pub fn CVT_nativeint_u64_from_u256(w0: u64, w1: u64, w2: u64, w3: u64) -> u64;
39
40        pub fn CVT_nativeint_u64_u64_max() -> u64;
41        pub fn CVT_nativeint_u64_u128_max() -> u64;
42        pub fn CVT_nativeint_u64_u256_max() -> u64;
43    }
44}
45
46/// Run-time implementation of the external library
47///
48/// This implementation is intended as an under-approximation of the symbolic
49/// behavior. It is intended to be used for testing.
50#[cfg(feature = "rt")]
51mod rt_impls {
52    #[no_mangle]
53    pub extern "C" fn CVT_nativeint_u64_eq(a: u64, b: u64) -> u64 {
54        (a == b).into()
55    }
56
57    #[no_mangle]
58    pub extern "C" fn CVT_nativeint_u64_lt(a: u64, b: u64) -> u64 {
59        (a < b).into()
60    }
61
62    #[no_mangle]
63    pub extern "C" fn CVT_nativeint_u64_le(a: u64, b: u64) -> u64 {
64        (a <= b).into()
65    }
66
67    #[no_mangle]
68    pub extern "C" fn CVT_nativeint_u64_add(a: u64, b: u64) -> u64 {
69        a.checked_add(b).unwrap()
70    }
71
72    #[no_mangle]
73    pub extern "C" fn CVT_nativeint_u64_mul(a: u64, b: u64) -> u64 {
74        a.checked_mul(b).unwrap()
75    }
76
77    #[no_mangle]
78    pub extern "C" fn CVT_nativeint_u64_sub(a: u64, b: u64) -> u64 {
79        a.checked_sub(b).unwrap()
80    }
81
82    #[no_mangle]
83    pub extern "C" fn CVT_nativeint_u64_div(a: u64, b: u64) -> u64 {
84        a.checked_div(b).unwrap()
85    }
86
87    #[no_mangle]
88    pub extern "C" fn CVT_nativeint_u64_div_ceil(a: u64, b: u64) -> u64 {
89        a.div_ceil(b)
90    }
91
92    #[no_mangle]
93    pub extern "C" fn CVT_nativeint_u64_muldiv(a: u64, b: u64, c: u64) -> u64 {
94        a.checked_mul(b).unwrap().checked_div(c).unwrap()
95    }
96
97    #[no_mangle]
98    pub extern "C" fn CVT_nativeint_u64_muldiv_ceil(a: u64, b: u64, c: u64) -> u64 {
99        a.checked_mul(b).unwrap().div_ceil(c)
100    }
101
102    #[no_mangle]
103    pub extern "C" fn CVT_nativeint_u64_nondet() -> u64 {
104        // -- concrete implementation returns some specific number
105        // -- it can, potentially, return a random number instead, or depend on
106        // -- run-time of nondet
107        0
108    }
109
110    #[no_mangle]
111    pub extern "C" fn CVT_nativeint_u64_from_u128(w0: u64, w1: u64) -> u64 {
112        if w1 != 0 {
113            panic!();
114        }
115        w0
116    }
117
118    #[no_mangle]
119    pub extern "C" fn CVT_nativeint_u64_into_u128(a: u64) -> u128 {
120        a as u128
121    }
122
123    #[no_mangle]
124    pub extern "C" fn CVT_nativeint_u64_from_u256(w0: u64, w1: u64, w2: u64, w3: u64) -> u64 {
125        if w1 != 0 || w2 != 0 || w3 != 0 {
126            panic!();
127        }
128        w0
129    }
130
131    #[no_mangle]
132    pub extern "C" fn CVT_nativeint_u64_u64_max() -> u64 {
133        u64::MAX
134    }
135
136    #[no_mangle]
137    pub extern "C" fn CVT_nativeint_u64_u128_max() -> u64 {
138        assert!(false, "u128_max is not supported");
139        todo!();
140    }
141
142    #[no_mangle]
143    pub extern "C" fn CVT_nativeint_u64_u256_max() -> u64 {
144        assert!(false, "u256_max is not supported");
145        todo!();
146    }
147
148    #[no_mangle]
149    pub extern "C" fn CVT_nativeint_u64_slt(a: u64, b: u64) -> u64 {
150        ((a as i64) < (b as i64)) as u64
151    }
152
153    #[no_mangle]
154    pub extern "C" fn CVT_nativeint_u64_sle(a: u64, b: u64) -> u64 {
155        ((a as i64) <= (b as i64)) as u64
156    }
157
158    #[no_mangle]
159    pub extern "C" fn CVT_nativeint_u64_sext(a: u64, bits: u64) -> u64 {
160        // Handle edge case bits==0 to avoid shifting by 64 (UB)
161        assert!(
162            (bits > 0 && bits <= 64),
163            "bits must be in 1..=64, got {}",
164            bits
165        );
166        let s = 64 - bits;
167        (((a << s) as i64) >> s) as u64
168    }
169
170    #[no_mangle]
171    pub extern "C" fn CVT_nativeint_u64_neg(a: u64) -> u64 {
172        use core::ops::Neg;
173        (a as i64).neg() as u64
174    }
175
176    #[no_mangle]
177    pub extern "C" fn CVT_nativeint_u64_mask(a: u64, bits: u64) -> u64 {
178        // Handle edge case bits==0 to avoid shifting by 64 (UB)
179        assert!(
180            (bits > 0 && bits <= 64),
181            "bits must be in 1..=64, got {}",
182            bits
183        );
184
185        let mask = if bits == 64 {
186            u64::MAX
187        } else {
188            (1 << bits) - 1
189        };
190        a & mask
191    }
192}
193
194use rt_decls::*;
195
196impl NativeIntU64 {
197    pub fn new<T>(v: T) -> Self
198    where
199        T: Into<NativeIntU64>,
200    {
201        v.into()
202    }
203
204    pub fn div_ceil(self, rhs: Self) -> Self {
205        unsafe { Self(CVT_nativeint_u64_div_ceil(self.0, rhs.0)) }
206    }
207
208    pub fn muldiv(self, num: Self, den: Self) -> Self {
209        unsafe { Self(CVT_nativeint_u64_muldiv(self.0, num.0, den.0)) }
210    }
211
212    pub fn muldiv_ceil(self, num: Self, den: Self) -> Self {
213        unsafe { Self(CVT_nativeint_u64_muldiv_ceil(self.0, num.0, den.0)) }
214    }
215
216    pub fn from_u128(w0: u64, w1: u64) -> Self {
217        unsafe { Self(CVT_nativeint_u64_from_u128(w0, w1)) }
218    }
219
220    pub fn into_u128(self) -> u128 {
221        cvlr_asserts::cvlr_assume!(self.is_u128());
222        unsafe { CVT_nativeint_u64_into_u128(self.0) }
223    }
224
225    pub fn from_u256(w0: u64, w1: u64, w2: u64, w3: u64) -> Self {
226        unsafe { Self(CVT_nativeint_u64_from_u256(w0, w1, w2, w3)) }
227    }
228
229    pub fn u64_max() -> Self {
230        unsafe { Self(CVT_nativeint_u64_u64_max()) }
231    }
232
233    pub fn u128_max() -> Self {
234        unsafe { Self(CVT_nativeint_u64_u128_max()) }
235    }
236
237    pub fn u256_max() -> Self {
238        unsafe { Self(CVT_nativeint_u64_u256_max()) }
239    }
240
241    pub fn is_u8(self) -> bool {
242        self <= Self::new(u8::MAX as u64)
243    }
244
245    pub fn is_u16(self) -> bool {
246        self <= Self::new(u16::MAX as u64)
247    }
248
249    pub fn is_u32(self) -> bool {
250        self <= Self::new(u32::MAX as u64)
251    }
252
253    pub fn is_u64(self) -> bool {
254        self <= Self::u64_max()
255    }
256
257    pub fn is_u128(self) -> bool {
258        self <= Self::u128_max()
259    }
260
261    pub fn is_u256(self) -> bool {
262        // native ints are 256 bits
263        true
264    }
265
266    pub fn nondet() -> Self {
267        cvlr_nondet::nondet()
268    }
269
270    pub fn checked_sub(&self, v: NativeIntU64) -> Self {
271        *self - v
272    }
273
274    pub fn sext(self, bits: u64) -> Self {
275        unsafe { Self(CVT_nativeint_u64_sext(self.0, bits)) }
276    }
277
278    pub fn slt(self, other: Self) -> bool {
279        unsafe { CVT_nativeint_u64_slt(self.0, other.0) != 0 }
280    }
281
282    pub fn sle(self, other: Self) -> bool {
283        unsafe { CVT_nativeint_u64_sle(self.0, other.0) != 0 }
284    }
285
286    pub fn sgt(self, other: Self) -> bool {
287        unsafe { CVT_nativeint_u64_slt(other.0, self.0) != 0 }
288    }
289
290    pub fn sge(self, other: Self) -> bool {
291        unsafe { CVT_nativeint_u64_sle(other.0, self.0) != 0 }
292    }
293
294    pub fn mask(self, bits: u64) -> Self {
295        unsafe { Self(CVT_nativeint_u64_mask(self.0, bits)) }
296    }
297
298    // Expose internal representation. Internal use only.
299    pub fn as_internal(&self) -> u64 {
300        self.0
301    }
302}
303
304impl PartialEq for NativeIntU64 {
305    #[inline(always)]
306    fn eq(&self, other: &Self) -> bool {
307        unsafe { CVT_nativeint_u64_eq(self.0, other.0) != 0 }
308    }
309}
310
311// We silence these two warnings from clippy: this code should be left as-is
312// for the Certora Prover TAC slicer.
313#[allow(clippy::comparison_chain, clippy::non_canonical_partial_ord_impl)]
314impl PartialOrd for NativeIntU64 {
315    #[inline(always)]
316    fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
317        let ord = if self.0 == other.0 {
318            core::cmp::Ordering::Equal
319        } else if self.0 < other.0 {
320            core::cmp::Ordering::Less
321        } else {
322            core::cmp::Ordering::Greater
323        };
324        Some(ord)
325    }
326    #[inline(always)]
327    fn lt(&self, other: &Self) -> bool {
328        unsafe { CVT_nativeint_u64_lt(self.0, other.0) != 0 }
329    }
330    #[inline(always)]
331    fn le(&self, other: &Self) -> bool {
332        unsafe { CVT_nativeint_u64_le(self.0, other.0) != 0 }
333    }
334    #[inline(always)]
335    fn gt(&self, other: &Self) -> bool {
336        other.lt(self)
337    }
338    #[inline(always)]
339    fn ge(&self, other: &Self) -> bool {
340        other.le(self)
341    }
342}
343
344impl Ord for NativeIntU64 {
345    #[inline(always)]
346    fn cmp(&self, other: &Self) -> core::cmp::Ordering {
347        if self.lt(other) {
348            core::cmp::Ordering::Less
349        } else if self.gt(other) {
350            core::cmp::Ordering::Greater
351        } else {
352            core::cmp::Ordering::Equal
353        }
354    }
355
356    fn max(self, other: Self) -> Self {
357        if self.gt(&other) {
358            self
359        } else {
360            other
361        }
362    }
363
364    fn min(self, other: Self) -> Self {
365        if self.gt(&other) {
366            other
367        } else {
368            self
369        }
370    }
371
372    fn clamp(self, min: Self, max: Self) -> Self {
373        if self.gt(&max) {
374            max
375        } else if self.lt(&min) {
376            min
377        } else {
378            self
379        }
380    }
381}
382
383impl core::ops::Neg for NativeIntU64 {
384    type Output = Self;
385
386    fn neg(self) -> Self::Output {
387        unsafe { Self(CVT_nativeint_u64_neg(self.0)) }
388    }
389}
390
391impl core::ops::Add<NativeIntU64> for NativeIntU64 {
392    type Output = Self;
393
394    fn add(self, rhs: NativeIntU64) -> Self::Output {
395        unsafe { Self(CVT_nativeint_u64_add(self.0, rhs.0)) }
396    }
397}
398
399impl core::ops::Sub<NativeIntU64> for NativeIntU64 {
400    type Output = Self;
401
402    fn sub(self, rhs: NativeIntU64) -> Self::Output {
403        unsafe { Self(CVT_nativeint_u64_sub(self.0, rhs.0)) }
404    }
405}
406
407impl core::ops::Mul<NativeIntU64> for NativeIntU64 {
408    type Output = Self;
409
410    fn mul(self, rhs: NativeIntU64) -> Self::Output {
411        unsafe { Self(CVT_nativeint_u64_mul(self.0, rhs.0)) }
412    }
413}
414
415impl core::ops::Div<NativeIntU64> for NativeIntU64 {
416    type Output = Self;
417
418    fn div(self, rhs: NativeIntU64) -> Self::Output {
419        unsafe { Self(CVT_nativeint_u64_div(self.0, rhs.0)) }
420    }
421}
422
423macro_rules! impl_from_for_small_uint {
424    ($uint:ty) => {
425        impl From<$uint> for NativeIntU64 {
426            fn from(value: $uint) -> Self {
427                Self(value as u64)
428            }
429        }
430    };
431}
432
433macro_rules! impl_core_traits_for_num {
434    ($num:ty) => {
435        impl core::ops::Add<$num> for NativeIntU64 {
436            type Output = Self;
437
438            fn add(self, rhs: $num) -> Self::Output {
439                self + Self::from(rhs)
440            }
441        }
442
443        impl core::ops::Mul<$num> for NativeIntU64 {
444            type Output = Self;
445
446            fn mul(self, rhs: $num) -> Self::Output {
447                self * Self::from(rhs)
448            }
449        }
450
451        impl core::ops::Div<$num> for NativeIntU64 {
452            type Output = Self;
453
454            fn div(self, rhs: $num) -> Self::Output {
455                self / Self::from(rhs)
456            }
457        }
458
459        impl PartialEq<$num> for NativeIntU64 {
460            #[inline(always)]
461            fn eq(&self, other: &$num) -> bool {
462                *self == Self::from(*other)
463            }
464        }
465
466        impl PartialOrd<$num> for NativeIntU64 {
467            #[inline(always)]
468            fn partial_cmp(&self, other: &$num) -> Option<core::cmp::Ordering> {
469                self.partial_cmp(&Self::from(*other))
470            }
471            #[inline(always)]
472            fn lt(&self, other: &$num) -> bool {
473                *self < Self::from(*other)
474            }
475            #[inline(always)]
476            fn le(&self, other: &$num) -> bool {
477                *self <= Self::from(*other)
478            }
479            #[inline(always)]
480            fn gt(&self, other: &$num) -> bool {
481                *self > Self::from(*other)
482            }
483            #[inline(always)]
484            fn ge(&self, other: &$num) -> bool {
485                *self >= Self::from(*other)
486            }
487        }
488    };
489}
490
491impl_from_for_small_uint!(u8);
492impl_from_for_small_uint!(u16);
493impl_from_for_small_uint!(u32);
494impl_from_for_small_uint!(u64);
495
496impl From<u128> for NativeIntU64 {
497    fn from(value: u128) -> Self {
498        // let w0: u64 = (value & 0xffff_ffff_ffff_ffff) as u64;
499        let w0: u64 = value as u64;
500        let w1: u64 = (value >> 64) as u64;
501
502        Self::from_u128(w0, w1)
503    }
504}
505
506impl_core_traits_for_num!(u8);
507impl_core_traits_for_num!(u16);
508impl_core_traits_for_num!(u32);
509impl_core_traits_for_num!(u64);
510impl_core_traits_for_num!(u128);
511
512impl From<i32> for NativeIntU64 {
513    fn from(value: i32) -> Self {
514        if value.is_positive() {
515            Self::from(value as u64)
516        } else {
517            Self::from(0u64) - Self::from((value as i64).unsigned_abs())
518        }
519    }
520}
521impl_core_traits_for_num!(i32);
522
523impl From<NativeIntU64> for u64 {
524    fn from(value: NativeIntU64) -> Self {
525        cvlr_asserts::cvlr_assume!(value.is_u64());
526        value.as_internal()
527    }
528}
529
530impl From<NativeIntU64> for u128 {
531    fn from(value: NativeIntU64) -> Self {
532        value.into_u128()
533    }
534}
535
536impl From<&[u64; 2]> for NativeIntU64 {
537    #[inline(always)]
538    fn from(value: &[u64; 2]) -> Self {
539        Self::from_u128(value[0], value[1])
540    }
541}
542
543impl From<&[u64; 4]> for NativeIntU64 {
544    #[inline(always)]
545    fn from(value: &[u64; 4]) -> Self {
546        Self::from_u256(value[0], value[1], value[2], value[3])
547    }
548}
549
550impl From<&[u8; 32]> for NativeIntU64 {
551    #[inline(always)]
552    fn from(value: &[u8; 32]) -> Self {
553        let (w0, rest) = value.split_at(8);
554        let w0 = u64::from_le_bytes(w0.try_into().unwrap());
555        let (w1, rest) = rest.split_at(8);
556        let w1 = u64::from_le_bytes(w1.try_into().unwrap());
557        let (w2, rest) = rest.split_at(8);
558        let w2 = u64::from_le_bytes(w2.try_into().unwrap());
559        let w3 = u64::from_le_bytes(rest.try_into().unwrap());
560        unsafe { Self(CVT_nativeint_u64_from_u256(w0, w1, w2, w3)) }
561    }
562}
563
564impl From<&[u8]> for NativeIntU64 {
565    #[inline(always)]
566    fn from(value: &[u8]) -> Self {
567        let v: &[u8; 32] = value.try_into().unwrap();
568        Self::from(v)
569    }
570}
571
572impl cvlr_nondet::Nondet for NativeIntU64 {
573    fn nondet() -> NativeIntU64 {
574        unsafe { Self(CVT_nativeint_u64_nondet()) }
575    }
576}
577
578macro_rules! impl_is_uint {
579    ($name:ident, $uint:ty, $is_uint:ident) => {
580        pub fn $name(v: $uint) -> bool {
581            NativeIntU64::from(v).$is_uint()
582        }
583    };
584}
585
586impl_is_uint! { is_u8, u8, is_u8 }
587impl_is_uint! { is_u16, u16, is_u16 }
588impl_is_uint! { is_u32, u32, is_u32 }
589impl_is_uint! { is_u64, u64, is_u64 }
590impl_is_uint! { is_u128, u128, is_u128 }
591
592#[cfg(test)]
593mod tests {
594    use super::*;
595
596    #[test]
597    fn it_works() {
598        let x: NativeIntU64 = 2.into();
599        let y: NativeIntU64 = 4.into();
600        assert_eq!(x + y, 6);
601        assert!(x < 6);
602    }
603
604    #[test]
605    fn nondet_test() {
606        let x: NativeIntU64 = cvlr_nondet::nondet();
607        assert_eq!(x, 0);
608    }
609
610    #[test]
611    fn test_arithmetic_operations() {
612        let a: NativeIntU64 = 10.into();
613        let b: NativeIntU64 = 3.into();
614
615        // Addition
616        assert_eq!(a + b, 13);
617        assert_eq!(a + 5, 15);
618
619        // Subtraction
620        assert_eq!(a - b, 7);
621        // Note: b - a would underflow and panic in rt mode
622
623        // Multiplication
624        assert_eq!(a * b, 30);
625        assert_eq!(a * 2, 20);
626
627        // Division
628        assert_eq!(a / b, 3);
629        assert_eq!(a / 2, 5);
630    }
631
632    #[test]
633    fn test_comparison_operations() {
634        let a: NativeIntU64 = 5.into();
635        let b: NativeIntU64 = 10.into();
636        let c: NativeIntU64 = 5.into();
637
638        // Equality
639        assert_eq!(a, c);
640        assert_ne!(a, b);
641
642        // Less than
643        assert!(a < b);
644        assert!(!(b < a));
645        assert!(!(a < c));
646
647        // Less than or equal
648        assert!(a <= b);
649        assert!(a <= c);
650        assert!(!(b <= a));
651
652        // Greater than
653        assert!(b > a);
654        assert!(!(a > b));
655        assert!(!(a > c));
656
657        // Greater than or equal
658        assert!(b >= a);
659        assert!(a >= c);
660        assert!(!(a >= b));
661    }
662
663    #[test]
664    fn test_div_ceil() {
665        let a: NativeIntU64 = 10.into();
666        let b: NativeIntU64 = 3.into();
667        assert_eq!(a.div_ceil(b), 4); // 10/3 = 3.33... -> 4
668
669        let c: NativeIntU64 = 9.into();
670        let d: NativeIntU64 = 3.into();
671        assert_eq!(c.div_ceil(d), 3); // 9/3 = 3 -> 3
672
673        let e: NativeIntU64 = 11.into();
674        let f: NativeIntU64 = 5.into();
675        assert_eq!(e.div_ceil(f), 3); // 11/5 = 2.2 -> 3
676    }
677
678    #[test]
679    fn test_muldiv() {
680        let a: NativeIntU64 = 10.into();
681        let b: NativeIntU64 = 3.into();
682        let c: NativeIntU64 = 2.into();
683        // (10 * 3) / 2 = 30 / 2 = 15
684        assert_eq!(a.muldiv(b, c), 15);
685
686        let d: NativeIntU64 = 100.into();
687        let e: NativeIntU64 = 7.into();
688        let f: NativeIntU64 = 4.into();
689        // (100 * 7) / 4 = 700 / 4 = 175
690        assert_eq!(d.muldiv(e, f), 175);
691    }
692
693    #[test]
694    fn test_muldiv_ceil() {
695        let a: NativeIntU64 = 10.into();
696        let b: NativeIntU64 = 3.into();
697        let c: NativeIntU64 = 4.into();
698        // (10 * 3) / 4 = 30 / 4 = 7.5 -> 8
699        assert_eq!(a.muldiv_ceil(b, c), 8);
700
701        let d: NativeIntU64 = 10.into();
702        let e: NativeIntU64 = 3.into();
703        let f: NativeIntU64 = 5.into();
704        // (10 * 3) / 5 = 30 / 5 = 6 -> 6
705        assert_eq!(d.muldiv_ceil(e, f), 6);
706    }
707
708    #[test]
709    fn test_from_u128() {
710        let val = NativeIntU64::from_u128(42, 0);
711        assert_eq!(val, 42);
712
713        let val2 = NativeIntU64::from_u128(0x1234_5678_9abc_def0, 0);
714        assert_eq!(val2, 0x1234_5678_9abc_def0u64);
715    }
716
717    #[test]
718    fn test_from_u256() {
719        let val = NativeIntU64::from_u256(100, 0, 0, 0);
720        assert_eq!(val, 100);
721
722        let val2 = NativeIntU64::from_u256(0xffff_ffff_ffff_ffff, 0, 0, 0);
723        assert_eq!(val2, 0xffff_ffff_ffff_ffffu64);
724    }
725
726    #[test]
727    fn test_from_primitive_types() {
728        // From u8
729        let val_u8: NativeIntU64 = 42u8.into();
730        assert_eq!(val_u8, 42);
731
732        // From u16
733        let val_u16: NativeIntU64 = 1000u16.into();
734        assert_eq!(val_u16, 1000);
735
736        // From u32
737        let val_u32: NativeIntU64 = 1_000_000u32.into();
738        assert_eq!(val_u32, 1_000_000);
739
740        // From u64
741        let val_u64: NativeIntU64 = 1_000_000_000u64.into();
742        assert_eq!(val_u64, 1_000_000_000);
743
744        // From u128
745        let val_u128: NativeIntU64 = 1_000_000_000_000u128.into();
746        assert_eq!(val_u128, 1_000_000_000_000u64);
747    }
748
749    #[test]
750    fn test_from_i32() {
751        let val_pos: NativeIntU64 = 42i32.into();
752        assert_eq!(val_pos, 42);
753
754        let val_zero: NativeIntU64 = 0i32.into();
755        assert_eq!(val_zero, 0);
756
757        // Note: Negative i32 values cause underflow in rt mode when converting
758        // (0 - abs(value)), so we only test positive values here
759    }
760
761    #[test]
762    fn test_from_array_u64_2() {
763        let arr = [42u64, 0u64];
764        let val: NativeIntU64 = (&arr).into();
765        assert_eq!(val, 42);
766    }
767
768    #[test]
769    fn test_from_array_u64_4() {
770        let arr = [100u64, 0u64, 0u64, 0u64];
771        let val: NativeIntU64 = (&arr).into();
772        assert_eq!(val, 100);
773    }
774
775    #[test]
776    fn test_from_array_u8_32() {
777        let mut arr = [0u8; 32];
778        // Set first 8 bytes to represent 0x1234567890abcdef in little-endian
779        arr[0..8].copy_from_slice(&0x1234567890abcdefu64.to_le_bytes());
780        let val: NativeIntU64 = (&arr).into();
781        assert_eq!(val, 0x1234567890abcdefu64);
782    }
783
784    #[test]
785    fn test_max_functions() {
786        let u64_max = NativeIntU64::u64_max();
787        assert_eq!(u64_max, u64::MAX);
788    }
789
790    #[test]
791    fn test_is_uint_functions() {
792        // Test is_u8
793        assert!(NativeIntU64::from(0u64).is_u8());
794        assert!(NativeIntU64::from(255u64).is_u8());
795        assert!(!NativeIntU64::from(256u64).is_u8());
796
797        // Test is_u16
798        assert!(NativeIntU64::from(0u64).is_u16());
799        assert!(NativeIntU64::from(65535u64).is_u16());
800        assert!(!NativeIntU64::from(65536u64).is_u16());
801
802        // Test is_u32
803        assert!(NativeIntU64::from(0u64).is_u32());
804        assert!(NativeIntU64::from(4294967295u32).is_u32());
805        assert!(!NativeIntU64::from(4294967296u64).is_u32());
806
807        // Test is_u64
808        assert!(NativeIntU64::from(0u64).is_u64());
809        assert!(NativeIntU64::from(u64::MAX).is_u64());
810
811        // Test is_u128
812        // Note: u128_max() panics in rt mode, so we skip testing is_u128()
813        // as it would call u128_max() which panics
814
815        // Test is_u256
816        assert!(NativeIntU64::from(0u64).is_u256());
817        assert!(NativeIntU64::from(u64::MAX).is_u256());
818    }
819
820    #[test]
821    fn test_ord_trait_methods() {
822        let a: NativeIntU64 = 5.into();
823        let b: NativeIntU64 = 10.into();
824        let c: NativeIntU64 = 7.into();
825
826        // Test cmp
827        assert_eq!(a.cmp(&b), core::cmp::Ordering::Less);
828        assert_eq!(b.cmp(&a), core::cmp::Ordering::Greater);
829        assert_eq!(a.cmp(&a), core::cmp::Ordering::Equal);
830
831        // Test max
832        assert_eq!(a.max(b), b);
833        assert_eq!(b.max(a), b);
834        assert_eq!(a.max(a), a);
835
836        // Test min
837        assert_eq!(a.min(b), a);
838        assert_eq!(b.min(a), a);
839        assert_eq!(a.min(a), a);
840
841        // Test clamp
842        assert_eq!(c.clamp(a, b), c); // c is between a and b
843        assert_eq!(a.clamp(c, b), c); // a is less than min
844        assert_eq!(b.clamp(a, c), c); // b is greater than max
845        assert_eq!(a.clamp(a, b), a); // a is at min
846        assert_eq!(b.clamp(a, b), b); // b is at max
847    }
848
849    #[test]
850    fn test_edge_cases() {
851        // Zero
852        let zero: NativeIntU64 = 0.into();
853        assert_eq!(zero + zero, zero);
854        assert_eq!(zero * 100, zero);
855        assert_eq!(zero / 1, zero);
856
857        // One
858        let one: NativeIntU64 = 1.into();
859        assert_eq!(zero + one, one);
860        assert_eq!(one * 100, 100);
861        assert_eq!(one / one, one);
862
863        // Maximum u64
864        let max: NativeIntU64 = u64::MAX.into();
865        assert_eq!(max, NativeIntU64::u64_max());
866        assert!(max.is_u64());
867    }
868
869    #[test]
870    fn test_checked_sub() {
871        let a: NativeIntU64 = 10.into();
872        let b: NativeIntU64 = 3.into();
873        assert_eq!(a.checked_sub(b), 7);
874
875        // Note: checked_sub is just an alias for sub, so underflow will panic in rt mode
876        // We test the normal case only
877    }
878
879    #[test]
880    fn test_new_method() {
881        let val1 = NativeIntU64::from(42u8);
882        assert_eq!(val1, 42u64);
883
884        let val2 = NativeIntU64::from(1000u16);
885        assert_eq!(val2, 1000u64);
886
887        let val3 = NativeIntU64::from(1_000_000u32);
888        assert_eq!(val3, 1_000_000u64);
889    }
890
891    #[test]
892    fn test_as_internal() {
893        let val = NativeIntU64::from(42);
894        assert_eq!(val.as_internal(), 42u64);
895
896        let max = NativeIntU64::from(u64::MAX);
897        assert_eq!(max.as_internal(), u64::MAX);
898    }
899
900    #[test]
901    fn test_arithmetic_with_primitives() {
902        let a: NativeIntU64 = 10.into();
903
904        // Addition with primitives
905        assert_eq!(a + 5u8, 15);
906        assert_eq!(a + 5u16, 15);
907        assert_eq!(a + 5u32, 15);
908        assert_eq!(a + 5u64, 15);
909        assert_eq!(a + 5u128, 15);
910
911        // Multiplication with primitives
912        assert_eq!(a * 3u8, 30);
913        assert_eq!(a * 3u16, 30);
914        assert_eq!(a * 3u32, 30);
915        assert_eq!(a * 3u64, 30);
916        assert_eq!(a * 3u128, 30);
917
918        // Division with primitives
919        assert_eq!(a / 2u8, 5);
920        assert_eq!(a / 2u16, 5);
921        assert_eq!(a / 2u32, 5);
922        assert_eq!(a / 2u64, 5);
923        assert_eq!(a / 2u128, 5);
924    }
925
926    #[test]
927    fn test_sext() {
928        // Positive value in 8 bits stays positive
929        let x: NativeIntU64 = 0x7Fu64.into();
930        assert_eq!(x.sext(8), 0x7Fu64);
931
932        // Negative value in 8 bits: 0xFF is -1, sign-extends to full u64
933        let x: NativeIntU64 = 0xFFu64.into();
934        assert_eq!(x.sext(8), 0xFFFF_FFFF_FFFF_FFFFu64);
935
936        // 0x80 in 8 bits is -128
937        let x: NativeIntU64 = 0x80u64.into();
938        assert_eq!(x.sext(8), 0xFFFF_FFFF_FFFF_FF80u64);
939
940        // 16-bit sign extension
941        let x: NativeIntU64 = 0xFFFFu64.into();
942        assert_eq!(x.sext(16), 0xFFFF_FFFF_FFFF_FFFFu64);
943
944        let x: NativeIntU64 = 0x7FFFu64.into();
945        assert_eq!(x.sext(16), 0x7FFFu64);
946
947        // 64 bits: no change
948        let x: NativeIntU64 = 0xFFFF_FFFF_FFFF_FFFFu64.into();
949        assert_eq!(x.sext(64), 0xFFFF_FFFF_FFFF_FFFFu64);
950    }
951
952    #[test]
953    fn test_slt() {
954        // Unsigned 0 < 1
955        let a: NativeIntU64 = 0u64.into();
956        let b: NativeIntU64 = 1u64.into();
957        assert!(a.slt(b));
958        assert!(!b.slt(a));
959
960        // Signed: -1 (0xFF...FF) < 0
961        let neg_one: NativeIntU64 = 0xFFFF_FFFF_FFFF_FFFFu64.into();
962        let zero: NativeIntU64 = 0u64.into();
963        assert!(neg_one.slt(zero));
964        assert!(!zero.slt(neg_one));
965
966        // Equal values
967        let x: NativeIntU64 = 42u64.into();
968        assert!(!x.slt(x));
969
970        // Positive comparison
971        let small: NativeIntU64 = 10u64.into();
972        let large: NativeIntU64 = 100u64.into();
973        assert!(small.slt(large));
974        assert!(!large.slt(small));
975    }
976
977    #[test]
978    fn test_sle() {
979        let a: NativeIntU64 = 0u64.into();
980        let b: NativeIntU64 = 1u64.into();
981        assert!(a.sle(b));
982        assert!(!b.sle(a));
983
984        let x: NativeIntU64 = 42u64.into();
985        assert!(x.sle(x));
986
987        let neg_one: NativeIntU64 = 0xFFFF_FFFF_FFFF_FFFFu64.into();
988        let zero: NativeIntU64 = 0u64.into();
989        assert!(neg_one.sle(zero));
990        assert!(!zero.sle(neg_one));
991    }
992
993    #[test]
994    fn test_sgt() {
995        let a: NativeIntU64 = 0u64.into();
996        let b: NativeIntU64 = 1u64.into();
997        assert!(b.sgt(a));
998        assert!(!a.sgt(b));
999
1000        let zero: NativeIntU64 = 0u64.into();
1001        let neg_one: NativeIntU64 = 0xFFFF_FFFF_FFFF_FFFFu64.into();
1002        assert!(zero.sgt(neg_one));
1003        assert!(!neg_one.sgt(zero));
1004
1005        let x: NativeIntU64 = 42u64.into();
1006        assert!(!x.sgt(x));
1007    }
1008
1009    #[test]
1010    fn test_sge() {
1011        let a: NativeIntU64 = 0u64.into();
1012        let b: NativeIntU64 = 1u64.into();
1013        assert!(b.sge(a));
1014        assert!(!a.sge(b));
1015
1016        let x: NativeIntU64 = 42u64.into();
1017        assert!(x.sge(x));
1018
1019        let zero: NativeIntU64 = 0u64.into();
1020        let neg_one: NativeIntU64 = 0xFFFF_FFFF_FFFF_FFFFu64.into();
1021        assert!(zero.sge(neg_one));
1022        assert!(!neg_one.sge(zero));
1023    }
1024
1025    #[test]
1026    fn test_mask() {
1027        // Mask 8 bits: keep low 8
1028        let x: NativeIntU64 = 0xFFu64.into();
1029        assert_eq!(x.mask(8), 0xFFu64);
1030
1031        let x: NativeIntU64 = 0x1FFu64.into();
1032        assert_eq!(x.mask(8), 0xFFu64);
1033
1034        // Mask 4 bits
1035        let x: NativeIntU64 = 0x1234u64.into();
1036        assert_eq!(x.mask(4), 4u64); // 0x1234 & 0xF = 4
1037
1038        // Mask 1 bit
1039        let x: NativeIntU64 = 3u64.into();
1040        assert_eq!(x.mask(1), 1u64);
1041
1042        // Mask 64 bits: full value
1043        let x: NativeIntU64 = 0xFFFF_FFFF_FFFF_FFFFu64.into();
1044        assert_eq!(x.mask(64), 0xFFFF_FFFF_FFFF_FFFFu64);
1045
1046        // Zero masked
1047        let x: NativeIntU64 = 0u64.into();
1048        assert_eq!(x.mask(8), 0u64);
1049    }
1050
1051    #[test]
1052    fn test_neg() {
1053        // neg(0) = 0
1054        let zero: NativeIntU64 = 0u64.into();
1055        assert_eq!(-zero, 0u64);
1056
1057        // neg(1) = -1 as u64 (two's complement)
1058        let one: NativeIntU64 = 1u64.into();
1059        assert_eq!(-one, 0xFFFF_FFFF_FFFF_FFFFu64);
1060
1061        // neg(-1) = 1
1062        let neg_one: NativeIntU64 = 0xFFFF_FFFF_FFFF_FFFFu64.into();
1063        assert_eq!(-neg_one, 1u64);
1064
1065        // neg(42) = -42
1066        let x: NativeIntU64 = 42u64.into();
1067        assert_eq!(-x, (-42i64 as u64));
1068
1069        // neg(neg(x)) = x
1070        let x: NativeIntU64 = 100u64.into();
1071        assert_eq!(-(-x), 100u64);
1072    }
1073}