cvlr_mathint/
nativeint_u64.rs

1#[derive(Eq, Debug, Copy, Clone)]
2/// Native Mathematical Integer (represented by u64 number)
3///
4/// The magic is that symbolically an SBF word is mapped to 256 bit symbolic
5/// integer.
6pub struct NativeIntU64(u64);
7
8/// Declaration for external library for mathematical integers
9///
10/// This library is implemented symbolically by Certora Prover
11/// Run-time under-approximation is provided in [rt_impls] module
12mod rt_decls {
13    type BoolU64 = u64;
14
15    extern "C" {
16        pub fn CVT_nativeint_u64_eq(_: u64, _: u64) -> BoolU64;
17        pub fn CVT_nativeint_u64_lt(_: u64, _: u64) -> BoolU64;
18        pub fn CVT_nativeint_u64_le(_: u64, _: u64) -> BoolU64;
19
20        pub fn CVT_nativeint_u64_add(_: u64, _: u64) -> u64;
21        pub fn CVT_nativeint_u64_sub(_: u64, _: u64) -> u64;
22        pub fn CVT_nativeint_u64_mul(_: u64, _: u64) -> u64;
23        pub fn CVT_nativeint_u64_div(_: u64, _: u64) -> u64;
24        pub fn CVT_nativeint_u64_div_ceil(_: u64, _: u64) -> u64;
25        pub fn CVT_nativeint_u64_muldiv(_: u64, _: u64, _: u64) -> u64;
26        pub fn CVT_nativeint_u64_muldiv_ceil(_: u64, _: u64, _: u64) -> u64;
27
28        pub fn CVT_nativeint_u64_nondet() -> u64;
29
30        pub fn CVT_nativeint_u64_from_u128(w0: u64, w1: u64) -> u64;
31        pub fn CVT_nativeint_u64_from_u256(w0: u64, w1: u64, w2: u64, w3: u64) -> u64;
32
33        pub fn CVT_nativeint_u64_u64_max() -> u64;
34        pub fn CVT_nativeint_u64_u128_max() -> u64;
35        pub fn CVT_nativeint_u64_u256_max() -> u64;
36    }
37}
38
39/// Run-time implementation of the external library
40///
41/// This implementation is intended as an under-approximation of the symbolic
42/// behavior. It is intended to be used for testing.
43#[cfg(feature = "rt")]
44mod rt_impls {
45    #[no_mangle]
46    pub extern "C" fn CVT_nativeint_u64_eq(a: u64, b: u64) -> u64 {
47        (a == b).into()
48    }
49
50    #[no_mangle]
51    pub extern "C" fn CVT_nativeint_u64_lt(a: u64, b: u64) -> u64 {
52        (a < b).into()
53    }
54
55    #[no_mangle]
56    pub extern "C" fn CVT_nativeint_u64_le(a: u64, b: u64) -> u64 {
57        (a <= b).into()
58    }
59
60    #[no_mangle]
61    pub extern "C" fn CVT_nativeint_u64_add(a: u64, b: u64) -> u64 {
62        a.checked_add(b).unwrap()
63    }
64
65    #[no_mangle]
66    pub extern "C" fn CVT_nativeint_u64_mul(a: u64, b: u64) -> u64 {
67        a.checked_mul(b).unwrap()
68    }
69
70    #[no_mangle]
71    pub extern "C" fn CVT_nativeint_u64_sub(a: u64, b: u64) -> u64 {
72        a.checked_sub(b).unwrap()
73    }
74
75    #[no_mangle]
76    pub extern "C" fn CVT_nativeint_u64_div(a: u64, b: u64) -> u64 {
77        a.checked_div(b).unwrap()
78    }
79
80    #[no_mangle]
81    pub extern "C" fn CVT_nativeint_u64_div_ceil(a: u64, b: u64) -> u64 {
82        a.div_ceil(b)
83    }
84
85    #[no_mangle]
86    pub extern "C" fn CVT_nativeint_u64_muldiv(a: u64, b: u64, c: u64) -> u64 {
87        a.checked_mul(b).unwrap().checked_div(c).unwrap()
88    }
89
90    #[no_mangle]
91    pub extern "C" fn CVT_nativeint_u64_muldiv_ceil(a: u64, b: u64, c: u64) -> u64 {
92        a.checked_mul(b).unwrap().div_ceil(c)
93    }
94
95    #[no_mangle]
96    pub extern "C" fn CVT_nativeint_u64_nondet() -> u64 {
97        // -- concrete implementation returns some specific number
98        // -- it can, potentially, return a random number instead, or depend on
99        // -- run-time of nondet
100        0
101    }
102
103    #[no_mangle]
104    pub extern "C" fn CVT_nativeint_u64_from_u128(w0: u64, w1: u64) -> u64 {
105        if w1 != 0 {
106            panic!();
107        }
108        w0
109    }
110
111    #[no_mangle]
112    pub extern "C" fn CVT_nativeint_u64_from_u256(w0: u64, w1: u64, w2: u64, w3: u64) -> u64 {
113        if w1 != 0 || w2 != 0 || w3 != 0 {
114            panic!();
115        }
116        w0
117    }
118
119    #[no_mangle]
120    pub extern "C" fn CVT_nativeint_u64_u64_max() -> u64 {
121        u64::MAX
122    }
123
124    #[no_mangle]
125    pub extern "C" fn CVT_nativeint_u64_u128_max() -> u64 {
126        panic!();
127    }
128
129    #[no_mangle]
130    pub extern "C" fn CVT_nativeint_u64_u256_max() -> u64 {
131        panic!();
132    }
133}
134
135use rt_decls::*;
136
137impl NativeIntU64 {
138    pub fn new<T>(v: T) -> Self
139    where
140        T: Into<NativeIntU64>,
141    {
142        v.into()
143    }
144
145    pub fn div_ceil(self, rhs: Self) -> Self {
146        unsafe { Self(CVT_nativeint_u64_div_ceil(self.0, rhs.0)) }
147    }
148
149    pub fn muldiv(self, num: Self, den: Self) -> Self {
150        unsafe { Self(CVT_nativeint_u64_muldiv(self.0, num.0, den.0)) }
151    }
152
153    pub fn muldiv_ceil(self, num: Self, den: Self) -> Self {
154        unsafe { Self(CVT_nativeint_u64_muldiv_ceil(self.0, num.0, den.0)) }
155    }
156
157    pub fn from_u128(w0: u64, w1: u64) -> Self {
158        unsafe { Self(CVT_nativeint_u64_from_u128(w0, w1)) }
159    }
160
161    pub fn from_u256(w0: u64, w1: u64, w2: u64, w3: u64) -> Self {
162        unsafe { Self(CVT_nativeint_u64_from_u256(w0, w1, w2, w3)) }
163    }
164
165    pub fn u64_max() -> Self {
166        unsafe { Self(CVT_nativeint_u64_u64_max()) }
167    }
168
169    pub fn u128_max() -> Self {
170        unsafe { Self(CVT_nativeint_u64_u128_max()) }
171    }
172
173    pub fn u256_max() -> Self {
174        unsafe { Self(CVT_nativeint_u64_u256_max()) }
175    }
176
177    pub fn is_u8(&self) -> bool {
178        *self <= Self::new(u8::MAX as u64)
179    }
180
181    pub fn is_u16(&self) -> bool {
182        *self <= Self::new(u16::MAX as u64)
183    }
184
185    pub fn is_u32(&self) -> bool {
186        *self <= Self::new(u32::MAX as u64)
187    }
188
189    pub fn is_u64(&self) -> bool {
190        *self <= Self::u64_max()
191    }
192
193    pub fn is_u128(&self) -> bool {
194        *self <= Self::u128_max()
195    }
196
197    pub fn is_u256(&self) -> bool {
198        // native ints are 256 bits
199        true
200    }
201
202    pub fn nondet() -> Self {
203        cvlr_nondet::nondet()
204    }
205
206    pub fn checked_sub(&self, v: NativeIntU64) -> Self {
207        *self - v
208    }
209
210    // Expose internal representation. Internal use only.
211    pub fn as_internal(&self) -> u64 {
212        self.0
213    }
214}
215
216impl PartialEq for NativeIntU64 {
217    fn eq(&self, other: &Self) -> bool {
218        unsafe { CVT_nativeint_u64_eq(self.0, other.0) != 0 }
219    }
220}
221
222impl PartialOrd for NativeIntU64 {
223    // We silence these two warnings from clippy: this code should be left as-is
224    // for the Certora Prover TAC slicer.
225    #[allow(clippy::non_canonical_partial_ord_impl)]
226    #[allow(clippy::comparison_chain)]
227    fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
228        let ord = if self.0 == other.0 {
229            core::cmp::Ordering::Equal
230        } else if self.0 < other.0 {
231            core::cmp::Ordering::Less
232        } else {
233            core::cmp::Ordering::Greater
234        };
235        Some(ord)
236    }
237    fn lt(&self, other: &Self) -> bool {
238        unsafe { CVT_nativeint_u64_lt(self.0, other.0) != 0 }
239    }
240    fn le(&self, other: &Self) -> bool {
241        unsafe { CVT_nativeint_u64_le(self.0, other.0) != 0 }
242    }
243    fn gt(&self, other: &Self) -> bool {
244        other.lt(self)
245    }
246    fn ge(&self, other: &Self) -> bool {
247        other.le(self)
248    }
249}
250
251impl Ord for NativeIntU64 {
252    fn cmp(&self, other: &Self) -> core::cmp::Ordering {
253        if self.lt(other) {
254            core::cmp::Ordering::Less
255        } else if self.gt(other) {
256            core::cmp::Ordering::Greater
257        } else {
258            core::cmp::Ordering::Equal
259        }
260    }
261
262    fn max(self, other: Self) -> Self {
263        if self.gt(&other) {
264            self
265        } else {
266            other
267        }
268    }
269
270    fn min(self, other: Self) -> Self {
271        if self.gt(&other) {
272            other
273        } else {
274            self
275        }
276    }
277
278    fn clamp(self, min: Self, max: Self) -> Self {
279        if self.gt(&max) {
280            max
281        } else if self.lt(&min) {
282            min
283        } else {
284            self
285        }
286    }
287}
288
289impl core::ops::Add<NativeIntU64> for NativeIntU64 {
290    type Output = Self;
291
292    fn add(self, rhs: NativeIntU64) -> Self::Output {
293        unsafe { Self(CVT_nativeint_u64_add(self.0, rhs.0)) }
294    }
295}
296
297impl core::ops::Sub<NativeIntU64> for NativeIntU64 {
298    type Output = Self;
299
300    fn sub(self, rhs: NativeIntU64) -> Self::Output {
301        unsafe { Self(CVT_nativeint_u64_sub(self.0, rhs.0)) }
302    }
303}
304
305impl core::ops::Mul<NativeIntU64> for NativeIntU64 {
306    type Output = Self;
307
308    fn mul(self, rhs: NativeIntU64) -> Self::Output {
309        unsafe { Self(CVT_nativeint_u64_mul(self.0, rhs.0)) }
310    }
311}
312
313impl core::ops::Div<NativeIntU64> for NativeIntU64 {
314    type Output = Self;
315
316    fn div(self, rhs: NativeIntU64) -> Self::Output {
317        unsafe { Self(CVT_nativeint_u64_div(self.0, rhs.0)) }
318    }
319}
320
321impl core::ops::Add<u64> for NativeIntU64 {
322    type Output = Self;
323
324    fn add(self, rhs: u64) -> Self::Output {
325        self + Self(rhs)
326    }
327}
328
329impl core::ops::Mul<u64> for NativeIntU64 {
330    type Output = Self;
331
332    fn mul(self, rhs: u64) -> Self::Output {
333        self * Self(rhs)
334    }
335}
336
337impl core::ops::Div<u64> for NativeIntU64 {
338    type Output = Self;
339
340    fn div(self, rhs: u64) -> Self::Output {
341        self / Self(rhs)
342    }
343}
344
345macro_rules! impl_from_for_uint {
346    ($t:ty) => {
347        impl From<$t> for NativeIntU64 {
348            fn from(value: $t) -> Self {
349                Self(value as u64)
350            }
351        }
352    };
353}
354
355impl_from_for_uint!(u8);
356impl_from_for_uint!(u16);
357impl_from_for_uint!(u32);
358impl_from_for_uint!(u64);
359
360impl From<NativeIntU64> for u64 {
361    fn from(value: NativeIntU64) -> Self {
362        cvlr_asserts::cvlr_assume!(value.is_u64());
363        value.as_internal()
364    }
365}
366
367impl From<NativeIntU64> for u128 {
368    fn from(value: NativeIntU64) -> Self {
369        cvlr_asserts::cvlr_assume!(value.is_u128());
370        let res: u128 = cvlr_nondet::nondet();
371        cvlr_asserts::cvlr_assume!(value == res.into());
372        res
373    }
374}
375
376impl From<u128> for NativeIntU64 {
377    fn from(value: u128) -> Self {
378        let w0: u64 = (value & 0xffff_ffff_ffff_ffff) as u64;
379        let w1: u64 = (value >> 64) as u64;
380
381        Self::from_u128(w0, w1)
382    }
383}
384
385impl From<&[u64; 2]> for NativeIntU64 {
386    fn from(value: &[u64; 2]) -> Self {
387        Self::from_u128(value[0], value[1])
388    }
389}
390
391impl From<&[u64; 4]> for NativeIntU64 {
392    fn from(value: &[u64; 4]) -> Self {
393        Self::from_u256(value[0], value[1], value[2], value[3])
394    }
395}
396
397impl From<&[u8; 32]> for NativeIntU64 {
398    fn from(value: &[u8; 32]) -> Self {
399        let (w0, rest) = value.split_at(8);
400        let w0 = u64::from_le_bytes(w0.try_into().unwrap());
401        let (w1, rest) = rest.split_at(8);
402        let w1 = u64::from_le_bytes(w1.try_into().unwrap());
403        let (w2, rest) = rest.split_at(8);
404        let w2 = u64::from_le_bytes(w2.try_into().unwrap());
405        let w3 = u64::from_le_bytes(rest.try_into().unwrap());
406        unsafe { Self(CVT_nativeint_u64_from_u256(w0, w1, w2, w3)) }
407    }
408}
409
410impl From<&[u8]> for NativeIntU64 {
411    fn from(value: &[u8]) -> Self {
412        let v: &[u8; 32] = value.try_into().unwrap();
413        Self::from(v)
414    }
415}
416
417impl cvlr_nondet::Nondet for NativeIntU64 {
418    fn nondet() -> NativeIntU64 {
419        unsafe { Self(CVT_nativeint_u64_nondet()) }
420    }
421}
422
423macro_rules! impl_is_uint {
424    ($name:ident, $uint:ty, $is_uint:ident) => {
425        pub fn $name(v: $uint) -> bool {
426            NativeIntU64::from(v).$is_uint()
427        }
428    };
429}
430
431impl_is_uint! { is_u8, u8, is_u8 }
432impl_is_uint! { is_u16, u16, is_u16 }
433impl_is_uint! { is_u32, u32, is_u32 }
434impl_is_uint! { is_u64, u64, is_u64 }
435impl_is_uint! { is_u128, u128, is_u128 }
436
437#[cfg(test)]
438mod tests {
439    use super::*;
440
441    #[test]
442    fn it_works() {
443        let x = NativeIntU64(2);
444        let y = NativeIntU64(4);
445        assert_eq!(x + y, NativeIntU64(6));
446        assert_eq!(x + y, 6u64.into());
447        assert!(x < 6u64.into());
448    }
449
450    #[test]
451    fn nondet_test() {
452        let x: NativeIntU64 = cvlr_nondet::nondet();
453        assert_eq!(x, 0u64.into());
454    }
455}