cvlr_fixed/
native_fixed.rs1use cvlr_asserts::cvlr_assume;
2use cvlr_mathint::NativeInt;
3use cvlr_nondet::nondet;
4
5macro_rules! native_fixed {
6 ($NativeFixed:ident, $uint:ty, $is_uint:ident) => {
7 #[derive(Copy, Clone, Eq, Debug)]
8 pub struct $NativeFixed<const F: u32> {
10 val: NativeInt,
11 }
12
13 impl<const F: u32> $NativeFixed<F> {
14 const FRAC: u32 = F;
15 const BASE: u64 = 2u64.pow(Self::FRAC);
16
17 pub fn new(v: NativeInt) -> Self {
18 let val = v * Self::BASE;
19 cvlr_assume!(val.$is_uint());
20 Self { val }
21 }
22
23 #[inline(always)]
24 fn from_val(val: NativeInt) -> Self {
25 cvlr_assume!(val.$is_uint());
26 Self { val }
27 }
28
29 pub fn one() -> Self {
30 Self::from_bits(Self::BASE as $uint)
31 }
32
33 pub fn to_bits(&self) -> $uint {
34 cvlr_assume!(self.val.$is_uint());
35 self.val.into()
36 }
37
38 pub fn from_bits(bits: $uint) -> Self {
39 Self { val: bits.into() }
40 }
41
42 pub fn mul_by_int(&self, v: NativeInt) -> Self {
43 Self::from_val(self.val * v)
44 }
45
46 pub fn div_by_int(&self, v: NativeInt) -> Self {
47 Self::from_val(self.val / v)
48 }
49
50 pub fn checked_mul(&self, v: Self) -> Self {
51 Self::from_val((self.val * v.val) / Self::BASE)
52 }
53
54 pub fn checked_add(&self, v: Self) -> Self {
55 Self::from_val(self.val + v.val)
56 }
57
58 pub fn checked_div(&self, v: Self) -> Self {
59 cvlr_assume!(v.val > 0u64.into());
60 Self::from_val(self.val * Self::BASE / v.val)
61 }
62
63 pub fn saturating_sub(&self, v: Self) -> Self {
64 let val = if self.val <= v.val {
65 0u64.into()
66 } else {
67 self.val - v.val
68 };
69 Self::from_val(val)
70 }
71
72 pub fn checked_sub(&self, v: Self) -> Self {
73 cvlr_assume!(self.val >= v.val);
74 let val = self.val - v.val;
75 Self { val }
76 }
77
78 pub fn ge(&self, v: NativeInt) -> bool {
79 self.to_floor() >= v
80 }
81
82 pub fn gt(&self, v: NativeInt) -> bool {
83 self.to_floor() > v
84 }
85
86 pub fn le(&self, v: NativeInt) -> bool {
87 self.to_floor() <= v
88 }
89
90 pub fn lt(&self, v: NativeInt) -> bool {
91 self.to_floor() < v
92 }
93
94 pub fn to_floor(&self) -> NativeInt {
95 self.val / Self::BASE
96 }
97
98 pub fn floor(&self) -> Self {
99 self.to_floor().into()
100 }
101
102 pub fn to_ceil(&self) -> NativeInt {
103 let floor = self.to_floor();
104 let rem = *self - Self::new(floor);
105
106 if rem.val > 0u64.into() {
107 floor + 1
108 } else {
109 floor
110 }
111 }
112
113 pub fn ceil(&self) -> Self {
114 self.to_ceil().into()
115 }
116 }
117
118 impl<const F: u32> cvlr_nondet::Nondet for $NativeFixed<F> {
119 fn nondet() -> Self {
120 Self::from_val(nondet())
121 }
122 }
123
124 impl<const F: u32, T: Into<NativeInt>> From<T> for $NativeFixed<F> {
125 fn from(value: T) -> Self {
126 Self::new(value.into())
127 }
128 }
129
130 impl<const F: u32> cvlr_log::CvlrLog for $NativeFixed<F> {
131 #[inline(always)]
132 fn log(&self, tag: &str, logger: &mut cvlr_log::CvlrLogger) {
133 logger.log_u64_as_fp(tag, self.val.as_internal(), F as u64);
134 }
135 }
136
137 impl<const F: u32> core::ops::Add<$NativeFixed<F>> for $NativeFixed<F> {
138 type Output = Self;
139
140 fn add(self, v: Self) -> Self::Output {
141 self.checked_add(v)
142 }
143 }
144
145 impl<const F: u32> core::ops::Sub<$NativeFixed<F>> for $NativeFixed<F> {
146 type Output = Self;
147
148 fn sub(self, v: Self) -> Self::Output {
149 self.checked_sub(v)
150 }
151 }
152
153 impl<const F: u32> core::ops::Mul<$NativeFixed<F>> for $NativeFixed<F> {
154 type Output = Self;
155
156 fn mul(self, v: Self) -> Self::Output {
157 self.checked_mul(v)
158 }
159 }
160
161 impl<const F: u32, T: Into<NativeInt>> core::ops::Mul<T> for $NativeFixed<F> {
162 type Output = Self;
163
164 fn mul(self, v: T) -> Self::Output {
165 self.mul_by_int(v.into())
166 }
167 }
168
169 impl<const F: u32> core::ops::Div<$NativeFixed<F>> for $NativeFixed<F> {
170 type Output = Self;
171
172 fn div(self, v: Self) -> Self::Output {
173 self.checked_div(v)
174 }
175 }
176
177 impl<const F: u32, T: Into<NativeInt>> core::ops::Div<T> for $NativeFixed<F> {
178 type Output = Self;
179
180 fn div(self, v: T) -> Self::Output {
181 self.div_by_int(v.into())
182 }
183 }
184
185 impl<const F: u32> core::cmp::PartialEq for $NativeFixed<F> {
186 fn eq(&self, other: &Self) -> bool {
187 self.val == other.val
188 }
189 }
190
191 #[allow(clippy::non_canonical_partial_ord_impl)]
192 impl<const F: u32> core::cmp::PartialOrd for $NativeFixed<F> {
193 fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
194 self.val.partial_cmp(&other.val)
195 }
196 fn lt(&self, other: &Self) -> bool {
197 self.val.lt(&other.val)
198 }
199 fn le(&self, other: &Self) -> bool {
200 self.val.le(&other.val)
201 }
202 fn gt(&self, other: &Self) -> bool {
203 self.val.gt(&other.val)
204 }
205 fn ge(&self, other: &Self) -> bool {
206 self.val.ge(&other.val)
207 }
208 }
209
210 impl<const F: u32> core::cmp::Ord for $NativeFixed<F> {
211 fn cmp(&self, other: &Self) -> core::cmp::Ordering {
212 self.val.cmp(&other.val)
213 }
214
215 fn max(self, other: Self) -> Self {
216 if self > other {
217 self
218 } else {
219 other
220 }
221 }
222
223 fn min(self, other: Self) -> Self {
224 if self > other {
225 other
226 } else {
227 self
228 }
229 }
230
231 fn clamp(self, min: Self, max: Self) -> Self {
232 if self > max {
233 max
234 } else if self < min {
235 min
236 } else {
237 self
238 }
239 }
240 }
241 };
242}
243
244native_fixed! { NativeFixedU64, u64, is_u64 }
245native_fixed! { NativeFixedU128, u128, is_u128 }