cvlr_mathint/
nativeint_u64.rs

1#[derive(Eq, Debug, Copy, Clone)]
2/// Native Mathematical Integer (represented by u64 number)
3///
4/// The magic is that symbolically an SBF word is mapped to 256 bit symbolic
5/// integer.
6pub struct NativeIntU64(u64);
7
8/// Declaration for external library for mathematical integers
9///
10/// This library is implemented symbolically by Certora Prover
11/// Run-time under-approximation is provided in [rt_impls] module
12mod rt_decls {
13    type BoolU64 = u64;
14
15    extern "C" {
16        pub fn CVT_nativeint_u64_eq(_: u64, _: u64) -> BoolU64;
17        pub fn CVT_nativeint_u64_lt(_: u64, _: u64) -> BoolU64;
18        pub fn CVT_nativeint_u64_le(_: u64, _: u64) -> BoolU64;
19
20        pub fn CVT_nativeint_u64_add(_: u64, _: u64) -> u64;
21        pub fn CVT_nativeint_u64_sub(_: u64, _: u64) -> u64;
22        pub fn CVT_nativeint_u64_mul(_: u64, _: u64) -> u64;
23        pub fn CVT_nativeint_u64_div(_: u64, _: u64) -> u64;
24        pub fn CVT_nativeint_u64_div_ceil(_: u64, _: u64) -> u64;
25        pub fn CVT_nativeint_u64_muldiv(_: u64, _: u64, _: u64) -> u64;
26        pub fn CVT_nativeint_u64_muldiv_ceil(_: u64, _: u64, _: u64) -> u64;
27
28        pub fn CVT_nativeint_u64_nondet() -> u64;
29
30        pub fn CVT_nativeint_u64_from_u128(w0: u64, w1: u64) -> u64;
31        pub fn CVT_nativeint_u64_from_u256(w0: u64, w1: u64, w2: u64, w3: u64) -> u64;
32
33        pub fn CVT_nativeint_u64_u64_max() -> u64;
34        pub fn CVT_nativeint_u64_u128_max() -> u64;
35        pub fn CVT_nativeint_u64_u256_max() -> u64;
36    }
37}
38
39/// Run-time implementation of the external library
40///
41/// This implementation is intended as an under-approximation of the symbolic
42/// behavior. It is intended to be used for testing.
43#[cfg(feature = "rt")]
44mod rt_impls {
45    #[no_mangle]
46    pub extern "C" fn CVT_nativeint_u64_eq(a: u64, b: u64) -> u64 {
47        (a == b).into()
48    }
49
50    #[no_mangle]
51    pub extern "C" fn CVT_nativeint_u64_lt(a: u64, b: u64) -> u64 {
52        (a < b).into()
53    }
54
55    #[no_mangle]
56    pub extern "C" fn CVT_nativeint_u64_le(a: u64, b: u64) -> u64 {
57        (a <= b).into()
58    }
59
60    #[no_mangle]
61    pub extern "C" fn CVT_nativeint_u64_add(a: u64, b: u64) -> u64 {
62        a.checked_add(b).unwrap()
63    }
64
65    #[no_mangle]
66    pub extern "C" fn CVT_nativeint_u64_mul(a: u64, b: u64) -> u64 {
67        a.checked_mul(b).unwrap()
68    }
69
70    #[no_mangle]
71    pub extern "C" fn CVT_nativeint_u64_sub(a: u64, b: u64) -> u64 {
72        a.checked_sub(b).unwrap()
73    }
74
75    #[no_mangle]
76    pub extern "C" fn CVT_nativeint_u64_div(a: u64, b: u64) -> u64 {
77        a.checked_div(b).unwrap()
78    }
79
80    #[no_mangle]
81    pub extern "C" fn CVT_nativeint_u64_div_ceil(a: u64, b: u64) -> u64 {
82        a.div_ceil(b)
83    }
84
85    #[no_mangle]
86    pub extern "C" fn CVT_nativeint_u64_muldiv(a: u64, b: u64, c: u64) -> u64 {
87        a.checked_mul(b).unwrap().checked_div(c).unwrap()
88    }
89
90    #[no_mangle]
91    pub extern "C" fn CVT_nativeint_u64_muldiv_ceil(a: u64, b: u64, c: u64) -> u64 {
92        a.checked_mul(b).unwrap().div_ceil(c)
93    }
94
95    #[no_mangle]
96    pub extern "C" fn CVT_nativeint_u64_nondet() -> u64 {
97        // -- concrete implementation returns some specific number
98        // -- it can, potentially, return a random number instead, or depend on
99        // -- run-time of nondet
100        0
101    }
102
103    #[no_mangle]
104    pub extern "C" fn CVT_nativeint_u64_from_u128(w0: u64, w1: u64) -> u64 {
105        if w1 != 0 {
106            panic!();
107        }
108        w0
109    }
110
111    #[no_mangle]
112    pub extern "C" fn CVT_nativeint_u64_from_u256(w0: u64, w1: u64, w2: u64, w3: u64) -> u64 {
113        if w1 != 0 || w2 != 0 || w3 != 0 {
114            panic!();
115        }
116        w0
117    }
118
119    #[no_mangle]
120    pub extern "C" fn CVT_nativeint_u64_u64_max() -> u64 {
121        u64::MAX
122    }
123
124    #[no_mangle]
125    pub extern "C" fn CVT_nativeint_u64_u128_max() -> u64 {
126        panic!();
127    }
128
129    #[no_mangle]
130    pub extern "C" fn CVT_nativeint_u64_u256_max() -> u64 {
131        panic!();
132    }
133}
134
135use rt_decls::*;
136
137impl NativeIntU64 {
138    pub fn new<T>(v: T) -> Self
139    where
140        T: Into<NativeIntU64>,
141    {
142        v.into()
143    }
144
145    pub fn div_ceil(self, rhs: Self) -> Self {
146        unsafe { Self(CVT_nativeint_u64_div_ceil(self.0, rhs.0)) }
147    }
148
149    pub fn muldiv(self, num: Self, den: Self) -> Self {
150        unsafe { Self(CVT_nativeint_u64_muldiv(self.0, num.0, den.0)) }
151    }
152
153    pub fn muldiv_ceil(self, num: Self, den: Self) -> Self {
154        unsafe { Self(CVT_nativeint_u64_muldiv_ceil(self.0, num.0, den.0)) }
155    }
156
157    pub fn from_u128(w0: u64, w1: u64) -> Self {
158        unsafe { Self(CVT_nativeint_u64_from_u128(w0, w1)) }
159    }
160
161    pub fn from_u256(w0: u64, w1: u64, w2: u64, w3: u64) -> Self {
162        unsafe { Self(CVT_nativeint_u64_from_u256(w0, w1, w2, w3)) }
163    }
164
165    pub fn u64_max() -> Self {
166        unsafe { Self(CVT_nativeint_u64_u64_max()) }
167    }
168
169    pub fn u128_max() -> Self {
170        unsafe { Self(CVT_nativeint_u64_u128_max()) }
171    }
172
173    pub fn u256_max() -> Self {
174        unsafe { Self(CVT_nativeint_u64_u256_max()) }
175    }
176
177    pub fn is_u8(self) -> bool {
178        self <= Self::new(u8::MAX as u64)
179    }
180
181    pub fn is_u16(self) -> bool {
182        self <= Self::new(u16::MAX as u64)
183    }
184
185    pub fn is_u32(self) -> bool {
186        self <= Self::new(u32::MAX as u64)
187    }
188
189    pub fn is_u64(self) -> bool {
190        self <= Self::u64_max()
191    }
192
193    pub fn is_u128(self) -> bool {
194        self <= Self::u128_max()
195    }
196
197    pub fn is_u256(self) -> bool {
198        // native ints are 256 bits
199        true
200    }
201
202    pub fn nondet() -> Self {
203        cvlr_nondet::nondet()
204    }
205
206    pub fn checked_sub(&self, v: NativeIntU64) -> Self {
207        *self - v
208    }
209
210    // Expose internal representation. Internal use only.
211    pub fn as_internal(&self) -> u64 {
212        self.0
213    }
214}
215
216impl PartialEq for NativeIntU64 {
217    fn eq(&self, other: &Self) -> bool {
218        unsafe { CVT_nativeint_u64_eq(self.0, other.0) != 0 }
219    }
220}
221
222// We silence these two warnings from clippy: this code should be left as-is
223// for the Certora Prover TAC slicer.
224#[allow(clippy::comparison_chain, clippy::non_canonical_partial_ord_impl)]
225impl PartialOrd for NativeIntU64 {
226    fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
227        let ord = if self.0 == other.0 {
228            core::cmp::Ordering::Equal
229        } else if self.0 < other.0 {
230            core::cmp::Ordering::Less
231        } else {
232            core::cmp::Ordering::Greater
233        };
234        Some(ord)
235    }
236    fn lt(&self, other: &Self) -> bool {
237        unsafe { CVT_nativeint_u64_lt(self.0, other.0) != 0 }
238    }
239    fn le(&self, other: &Self) -> bool {
240        unsafe { CVT_nativeint_u64_le(self.0, other.0) != 0 }
241    }
242    fn gt(&self, other: &Self) -> bool {
243        other.lt(self)
244    }
245    fn ge(&self, other: &Self) -> bool {
246        other.le(self)
247    }
248}
249
250impl Ord for NativeIntU64 {
251    fn cmp(&self, other: &Self) -> core::cmp::Ordering {
252        if self.lt(other) {
253            core::cmp::Ordering::Less
254        } else if self.gt(other) {
255            core::cmp::Ordering::Greater
256        } else {
257            core::cmp::Ordering::Equal
258        }
259    }
260
261    fn max(self, other: Self) -> Self {
262        if self.gt(&other) {
263            self
264        } else {
265            other
266        }
267    }
268
269    fn min(self, other: Self) -> Self {
270        if self.gt(&other) {
271            other
272        } else {
273            self
274        }
275    }
276
277    fn clamp(self, min: Self, max: Self) -> Self {
278        if self.gt(&max) {
279            max
280        } else if self.lt(&min) {
281            min
282        } else {
283            self
284        }
285    }
286}
287
288impl core::ops::Add<NativeIntU64> for NativeIntU64 {
289    type Output = Self;
290
291    fn add(self, rhs: NativeIntU64) -> Self::Output {
292        unsafe { Self(CVT_nativeint_u64_add(self.0, rhs.0)) }
293    }
294}
295
296impl core::ops::Sub<NativeIntU64> for NativeIntU64 {
297    type Output = Self;
298
299    fn sub(self, rhs: NativeIntU64) -> Self::Output {
300        unsafe { Self(CVT_nativeint_u64_sub(self.0, rhs.0)) }
301    }
302}
303
304impl core::ops::Mul<NativeIntU64> for NativeIntU64 {
305    type Output = Self;
306
307    fn mul(self, rhs: NativeIntU64) -> Self::Output {
308        unsafe { Self(CVT_nativeint_u64_mul(self.0, rhs.0)) }
309    }
310}
311
312impl core::ops::Div<NativeIntU64> for NativeIntU64 {
313    type Output = Self;
314
315    fn div(self, rhs: NativeIntU64) -> Self::Output {
316        unsafe { Self(CVT_nativeint_u64_div(self.0, rhs.0)) }
317    }
318}
319
320impl core::ops::Add<u64> for NativeIntU64 {
321    type Output = Self;
322
323    fn add(self, rhs: u64) -> Self::Output {
324        self + Self(rhs)
325    }
326}
327
328impl core::ops::Mul<u64> for NativeIntU64 {
329    type Output = Self;
330
331    fn mul(self, rhs: u64) -> Self::Output {
332        self * Self(rhs)
333    }
334}
335
336impl core::ops::Div<u64> for NativeIntU64 {
337    type Output = Self;
338
339    fn div(self, rhs: u64) -> Self::Output {
340        self / Self(rhs)
341    }
342}
343
344macro_rules! impl_from_for_uint {
345    ($t:ty) => {
346        impl From<$t> for NativeIntU64 {
347            fn from(value: $t) -> Self {
348                Self(value as u64)
349            }
350        }
351    };
352}
353
354impl_from_for_uint!(u8);
355impl_from_for_uint!(u16);
356impl_from_for_uint!(u32);
357impl_from_for_uint!(u64);
358
359impl From<NativeIntU64> for u64 {
360    fn from(value: NativeIntU64) -> Self {
361        cvlr_asserts::cvlr_assume!(value.is_u64());
362        value.as_internal()
363    }
364}
365
366impl From<NativeIntU64> for u128 {
367    fn from(value: NativeIntU64) -> Self {
368        cvlr_asserts::cvlr_assume!(value.is_u128());
369        let res: u128 = cvlr_nondet::nondet();
370        cvlr_asserts::cvlr_assume!(value == res.into());
371        res
372    }
373}
374
375impl From<u128> for NativeIntU64 {
376    fn from(value: u128) -> Self {
377        // let w0: u64 = (value & 0xffff_ffff_ffff_ffff) as u64;
378        let w0: u64 = value as u64;
379        let w1: u64 = (value >> 64) as u64;
380
381        Self::from_u128(w0, w1)
382    }
383}
384
385impl From<&[u64; 2]> for NativeIntU64 {
386    fn from(value: &[u64; 2]) -> Self {
387        Self::from_u128(value[0], value[1])
388    }
389}
390
391impl From<&[u64; 4]> for NativeIntU64 {
392    fn from(value: &[u64; 4]) -> Self {
393        Self::from_u256(value[0], value[1], value[2], value[3])
394    }
395}
396
397impl From<&[u8; 32]> for NativeIntU64 {
398    fn from(value: &[u8; 32]) -> Self {
399        let (w0, rest) = value.split_at(8);
400        let w0 = u64::from_le_bytes(w0.try_into().unwrap());
401        let (w1, rest) = rest.split_at(8);
402        let w1 = u64::from_le_bytes(w1.try_into().unwrap());
403        let (w2, rest) = rest.split_at(8);
404        let w2 = u64::from_le_bytes(w2.try_into().unwrap());
405        let w3 = u64::from_le_bytes(rest.try_into().unwrap());
406        unsafe { Self(CVT_nativeint_u64_from_u256(w0, w1, w2, w3)) }
407    }
408}
409
410impl From<&[u8]> for NativeIntU64 {
411    fn from(value: &[u8]) -> Self {
412        let v: &[u8; 32] = value.try_into().unwrap();
413        Self::from(v)
414    }
415}
416
417impl cvlr_nondet::Nondet for NativeIntU64 {
418    fn nondet() -> NativeIntU64 {
419        unsafe { Self(CVT_nativeint_u64_nondet()) }
420    }
421}
422
423macro_rules! impl_is_uint {
424    ($name:ident, $uint:ty, $is_uint:ident) => {
425        pub fn $name(v: $uint) -> bool {
426            NativeIntU64::from(v).$is_uint()
427        }
428    };
429}
430
431impl_is_uint! { is_u8, u8, is_u8 }
432impl_is_uint! { is_u16, u16, is_u16 }
433impl_is_uint! { is_u32, u32, is_u32 }
434impl_is_uint! { is_u64, u64, is_u64 }
435impl_is_uint! { is_u128, u128, is_u128 }
436
437#[cfg(test)]
438mod tests {
439    use super::*;
440
441    #[test]
442    fn it_works() {
443        let x = NativeIntU64(2);
444        let y = NativeIntU64(4);
445        assert_eq!(x + y, NativeIntU64(6));
446        assert_eq!(x + y, 6u64.into());
447        assert!(x < 6u64.into());
448    }
449
450    #[test]
451    fn nondet_test() {
452        let x: NativeIntU64 = cvlr_nondet::nondet();
453        assert_eq!(x, 0u64.into());
454    }
455}