1use crate::{
17 helpers::{FloatHelper, FloatKind, IntHelper, Widest},
18 traits::Fixed,
19 types::extra::{LeEqU128, LeEqU16, LeEqU32, LeEqU64, LeEqU8},
20 FixedI128, FixedI16, FixedI32, FixedI64, FixedI8, FixedU128, FixedU16, FixedU32, FixedU64,
21 FixedU8,
22};
23use core::cmp::Ordering;
24#[cfg(feature = "f16")]
25use half::{bf16, f16};
26
27macro_rules! fixed_cmp_fixed {
28 ($Lhs:ident($LhsLeEqU:ident), $Rhs:ident($RhsLeEqU:ident)) => {
29 impl<FracLhs: $LhsLeEqU, FracRhs: $RhsLeEqU> PartialEq<$Rhs<FracRhs>> for $Lhs<FracLhs> {
30 #[inline]
31 fn eq(&self, rhs: &$Rhs<FracRhs>) -> bool {
32 let conv = rhs.to_bits().to_fixed_helper(
33 <$Rhs<FracRhs>>::FRAC_NBITS as i32,
34 Self::FRAC_NBITS,
35 Self::INT_NBITS,
36 );
37 let rhs_bits = match conv.bits {
38 Widest::Unsigned(bits) => bits as <Self as Fixed>::Bits,
39 Widest::Negative(bits) => bits as <Self as Fixed>::Bits,
40 };
41 conv.dir == Ordering::Equal && !conv.overflow && rhs_bits == self.to_bits()
42 }
43 }
44
45 impl<FracLhs: $LhsLeEqU, FracRhs: $RhsLeEqU> PartialOrd<$Rhs<FracRhs>> for $Lhs<FracLhs> {
46 #[inline]
47 fn partial_cmp(&self, rhs: &$Rhs<FracRhs>) -> Option<Ordering> {
48 match (self.to_bits().is_negative(), rhs.to_bits().is_negative()) {
49 (false, true) => return Some(Ordering::Greater),
50 (true, false) => return Some(Ordering::Less),
51 _ => {}
52 }
53 let conv = rhs.to_bits().to_fixed_helper(
54 <$Rhs<FracRhs>>::FRAC_NBITS as i32,
55 Self::FRAC_NBITS,
56 Self::INT_NBITS,
57 );
58 if conv.overflow {
59 return if rhs.to_bits().is_negative() {
60 Some(Ordering::Greater)
61 } else {
62 Some(Ordering::Less)
63 };
64 }
65 let rhs_bits = match conv.bits {
66 Widest::Unsigned(bits) => bits as <Self as Fixed>::Bits,
67 Widest::Negative(bits) => bits as <Self as Fixed>::Bits,
68 };
69 Some(self.to_bits().cmp(&rhs_bits).then(conv.dir))
70 }
71
72 #[inline]
73 fn lt(&self, rhs: &$Rhs<FracRhs>) -> bool {
74 match (self.to_bits().is_negative(), rhs.to_bits().is_negative()) {
75 (false, true) => return false,
76 (true, false) => return true,
77 _ => {}
78 }
79 let conv = rhs.to_bits().to_fixed_helper(
80 <$Rhs<FracRhs>>::FRAC_NBITS as i32,
81 Self::FRAC_NBITS,
82 Self::INT_NBITS,
83 );
84 if conv.overflow {
85 return !rhs.to_bits().is_negative();
86 }
87 let rhs_bits = match conv.bits {
88 Widest::Unsigned(bits) => bits as <Self as Fixed>::Bits,
89 Widest::Negative(bits) => bits as <Self as Fixed>::Bits,
90 };
91 self.to_bits() < rhs_bits
92 || (self.to_bits() == rhs_bits && conv.dir == Ordering::Less)
93 }
94
95 #[inline]
96 fn le(&self, rhs: &$Rhs<FracRhs>) -> bool {
97 !rhs.lt(self)
98 }
99
100 #[inline]
101 fn gt(&self, rhs: &$Rhs<FracRhs>) -> bool {
102 rhs.lt(self)
103 }
104
105 #[inline]
106 fn ge(&self, rhs: &$Rhs<FracRhs>) -> bool {
107 !self.lt(rhs)
108 }
109 }
110 };
111}
112
113macro_rules! fixed_cmp_int {
114 ($Fix:ident($LeEqU:ident), $Int:ident) => {
115 impl<Frac: $LeEqU> PartialEq<$Int> for $Fix<Frac> {
116 #[inline]
117 fn eq(&self, rhs: &$Int) -> bool {
118 self.eq(&rhs.to_repr_fixed())
119 }
120 }
121
122 impl<Frac: $LeEqU> PartialEq<$Fix<Frac>> for $Int {
123 #[inline]
124 fn eq(&self, rhs: &$Fix<Frac>) -> bool {
125 self.to_repr_fixed().eq(rhs)
126 }
127 }
128
129 impl<Frac: $LeEqU> PartialOrd<$Int> for $Fix<Frac> {
130 #[inline]
131 fn partial_cmp(&self, rhs: &$Int) -> Option<Ordering> {
132 self.partial_cmp(&rhs.to_repr_fixed())
133 }
134
135 #[inline]
136 fn lt(&self, rhs: &$Int) -> bool {
137 self.lt(&rhs.to_repr_fixed())
138 }
139
140 #[inline]
141 fn le(&self, rhs: &$Int) -> bool {
142 !rhs.lt(self)
143 }
144
145 #[inline]
146 fn gt(&self, rhs: &$Int) -> bool {
147 rhs.lt(self)
148 }
149
150 #[inline]
151 fn ge(&self, rhs: &$Int) -> bool {
152 !self.lt(rhs)
153 }
154 }
155
156 impl<Frac: $LeEqU> PartialOrd<$Fix<Frac>> for $Int {
157 #[inline]
158 fn partial_cmp(&self, rhs: &$Fix<Frac>) -> Option<Ordering> {
159 self.to_repr_fixed().partial_cmp(rhs)
160 }
161
162 #[inline]
163 fn lt(&self, rhs: &$Fix<Frac>) -> bool {
164 self.to_repr_fixed().lt(rhs)
165 }
166
167 #[inline]
168 fn le(&self, rhs: &$Fix<Frac>) -> bool {
169 !rhs.lt(self)
170 }
171
172 #[inline]
173 fn gt(&self, rhs: &$Fix<Frac>) -> bool {
174 rhs.lt(self)
175 }
176
177 #[inline]
178 fn ge(&self, rhs: &$Fix<Frac>) -> bool {
179 !self.lt(rhs)
180 }
181 }
182 };
183}
184
185macro_rules! fixed_cmp_float {
186 ($Fix:ident($LeEqU:ident), $Float:ident) => {
187 impl<Frac: $LeEqU> PartialEq<$Float> for $Fix<Frac> {
188 #[inline]
189 fn eq(&self, rhs: &$Float) -> bool {
190 let conv = match rhs.to_float_kind(Self::FRAC_NBITS, Self::INT_NBITS) {
191 FloatKind::Finite { conv, .. } => conv,
192 _ => return false,
193 };
194 let rhs_bits = match conv.bits {
195 Widest::Unsigned(bits) => bits as <Self as Fixed>::Bits,
196 Widest::Negative(bits) => bits as <Self as Fixed>::Bits,
197 };
198 conv.dir == Ordering::Equal && !conv.overflow && rhs_bits == self.to_bits()
199 }
200 }
201
202 impl<Frac: $LeEqU> PartialEq<$Fix<Frac>> for $Float {
203 #[inline]
204 fn eq(&self, rhs: &$Fix<Frac>) -> bool {
205 rhs.eq(self)
206 }
207 }
208
209 impl<Frac: $LeEqU> PartialOrd<$Float> for $Fix<Frac> {
210 #[inline]
211 fn partial_cmp(&self, rhs: &$Float) -> Option<Ordering> {
212 let (rhs_is_neg, conv) = match rhs.to_float_kind(Self::FRAC_NBITS, Self::INT_NBITS)
213 {
214 FloatKind::NaN => return None,
215 FloatKind::Infinite { neg } => {
216 return if neg {
217 Some(Ordering::Greater)
218 } else {
219 Some(Ordering::Less)
220 };
221 }
222 FloatKind::Finite { neg, conv } => (neg, conv),
223 };
224 match (self.to_bits().is_negative(), rhs_is_neg) {
225 (false, true) => return Some(Ordering::Greater),
226 (true, false) => return Some(Ordering::Less),
227 _ => {}
228 }
229 if conv.overflow {
230 return if rhs_is_neg {
231 Some(Ordering::Greater)
232 } else {
233 Some(Ordering::Less)
234 };
235 }
236 let rhs_bits = match conv.bits {
237 Widest::Unsigned(bits) => bits as <Self as Fixed>::Bits,
238 Widest::Negative(bits) => bits as <Self as Fixed>::Bits,
239 };
240 Some(self.to_bits().cmp(&rhs_bits).then(conv.dir))
241 }
242
243 #[inline]
244 fn lt(&self, rhs: &$Float) -> bool {
245 let (rhs_is_neg, conv) = match rhs.to_float_kind(Self::FRAC_NBITS, Self::INT_NBITS)
246 {
247 FloatKind::NaN => return false,
248 FloatKind::Infinite { neg } => return !neg,
249 FloatKind::Finite { neg, conv } => (neg, conv),
250 };
251
252 match (self.to_bits().is_negative(), rhs_is_neg) {
253 (false, true) => return false,
254 (true, false) => return true,
255 _ => {}
256 }
257 if conv.overflow {
258 return !rhs_is_neg;
259 }
260 let rhs_bits = match conv.bits {
261 Widest::Unsigned(bits) => bits as <Self as Fixed>::Bits,
262 Widest::Negative(bits) => bits as <Self as Fixed>::Bits,
263 };
264 let lhs_bits = self.to_bits();
265 lhs_bits < rhs_bits || (lhs_bits == rhs_bits && conv.dir == Ordering::Less)
266 }
267
268 #[inline]
269 fn le(&self, rhs: &$Float) -> bool {
270 !FloatHelper::is_nan(*rhs) && !rhs.lt(self)
271 }
272
273 #[inline]
274 fn gt(&self, rhs: &$Float) -> bool {
275 rhs.lt(self)
276 }
277
278 #[inline]
279 fn ge(&self, rhs: &$Float) -> bool {
280 !FloatHelper::is_nan(*rhs) && !self.lt(rhs)
281 }
282 }
283
284 impl<Frac: $LeEqU> PartialOrd<$Fix<Frac>> for $Float {
285 #[inline]
286 fn partial_cmp(&self, rhs: &$Fix<Frac>) -> Option<Ordering> {
287 rhs.partial_cmp(self).map(Ordering::reverse)
288 }
289
290 #[inline]
291 fn lt(&self, rhs: &$Fix<Frac>) -> bool {
292 let (lhs_is_neg, conv) =
293 match self.to_float_kind(<$Fix<Frac>>::FRAC_NBITS, <$Fix<Frac>>::INT_NBITS) {
294 FloatKind::NaN => return false,
295 FloatKind::Infinite { neg } => return neg,
296 FloatKind::Finite { neg, conv } => (neg, conv),
297 };
298
299 match (lhs_is_neg, rhs.to_bits().is_negative()) {
300 (false, true) => return false,
301 (true, false) => return true,
302 _ => {}
303 }
304 if conv.overflow {
305 return lhs_is_neg;
306 }
307 let lhs_bits = match conv.bits {
308 Widest::Unsigned(bits) => bits as <$Fix<Frac> as Fixed>::Bits,
309 Widest::Negative(bits) => bits as <$Fix<Frac> as Fixed>::Bits,
310 };
311 let rhs_bits = rhs.to_bits();
312 lhs_bits < rhs_bits || (lhs_bits == rhs_bits && conv.dir == Ordering::Greater)
313 }
314
315 #[inline]
316 fn le(&self, rhs: &$Fix<Frac>) -> bool {
317 !FloatHelper::is_nan(*self) && !rhs.lt(self)
318 }
319
320 #[inline]
321 fn gt(&self, rhs: &$Fix<Frac>) -> bool {
322 rhs.lt(self)
323 }
324
325 #[inline]
326 fn ge(&self, rhs: &$Fix<Frac>) -> bool {
327 !FloatHelper::is_nan(*self) && !self.lt(rhs)
328 }
329 }
330 };
331}
332
333macro_rules! fixed_cmp_all {
334 ($Fix:ident($LeEqU:ident)) => {
335 impl<Frac: $LeEqU> Eq for $Fix<Frac> {}
336
337 impl<Frac: $LeEqU> Ord for $Fix<Frac> {
338 #[inline]
339 fn cmp(&self, rhs: &$Fix<Frac>) -> Ordering {
340 self.to_bits().cmp(&rhs.to_bits())
341 }
342 }
343
344 fixed_cmp_fixed! { $Fix($LeEqU), FixedI8(LeEqU8) }
345 fixed_cmp_fixed! { $Fix($LeEqU), FixedI16(LeEqU16) }
346 fixed_cmp_fixed! { $Fix($LeEqU), FixedI32(LeEqU32) }
347 fixed_cmp_fixed! { $Fix($LeEqU), FixedI64(LeEqU64) }
348 fixed_cmp_fixed! { $Fix($LeEqU), FixedI128(LeEqU128) }
349 fixed_cmp_fixed! { $Fix($LeEqU), FixedU8(LeEqU8) }
350 fixed_cmp_fixed! { $Fix($LeEqU), FixedU16(LeEqU16) }
351 fixed_cmp_fixed! { $Fix($LeEqU), FixedU32(LeEqU32) }
352 fixed_cmp_fixed! { $Fix($LeEqU), FixedU64(LeEqU64) }
353 fixed_cmp_fixed! { $Fix($LeEqU), FixedU128(LeEqU128) }
354 fixed_cmp_int! { $Fix($LeEqU), i8 }
355 fixed_cmp_int! { $Fix($LeEqU), i16 }
356 fixed_cmp_int! { $Fix($LeEqU), i32 }
357 fixed_cmp_int! { $Fix($LeEqU), i64 }
358 fixed_cmp_int! { $Fix($LeEqU), i128 }
359 fixed_cmp_int! { $Fix($LeEqU), isize }
360 fixed_cmp_int! { $Fix($LeEqU), u8 }
361 fixed_cmp_int! { $Fix($LeEqU), u16 }
362 fixed_cmp_int! { $Fix($LeEqU), u32 }
363 fixed_cmp_int! { $Fix($LeEqU), u64 }
364 fixed_cmp_int! { $Fix($LeEqU), u128 }
365 fixed_cmp_int! { $Fix($LeEqU), usize }
366 #[cfg(feature = "f16")]
367 fixed_cmp_float! { $Fix($LeEqU), f16 }
368 #[cfg(feature = "f16")]
369 fixed_cmp_float! { $Fix($LeEqU), bf16 }
370 fixed_cmp_float! { $Fix($LeEqU), f32 }
371 fixed_cmp_float! { $Fix($LeEqU), f64 }
372 };
373}
374
375fixed_cmp_all! { FixedI8(LeEqU8) }
376fixed_cmp_all! { FixedI16(LeEqU16) }
377fixed_cmp_all! { FixedI32(LeEqU32) }
378fixed_cmp_all! { FixedI64(LeEqU64) }
379fixed_cmp_all! { FixedI128(LeEqU128) }
380fixed_cmp_all! { FixedU8(LeEqU8) }
381fixed_cmp_all! { FixedU16(LeEqU16) }
382fixed_cmp_all! { FixedU32(LeEqU32) }
383fixed_cmp_all! { FixedU64(LeEqU64) }
384fixed_cmp_all! { FixedU128(LeEqU128) }
385
386macro_rules! fixed_cmp {
387 ($Fixed:ident($Inner:ty, $Len:ty, $bits_count:expr)) => {};
388}
389
390fixed_cmp! { FixedU8(u8, LeEqU8, 8) }
391fixed_cmp! { FixedU16(u16, LeEqU16, 16) }
392fixed_cmp! { FixedU32(u32, LeEqU32, 32) }
393fixed_cmp! { FixedU64(u64, LeEqU64, 64) }
394fixed_cmp! { FixedU128(u128, LeEqU128, 128) }
395fixed_cmp! { FixedI8(i8, LeEqU8, 8) }
396fixed_cmp! { FixedI16(i16, LeEqU16, 16) }
397fixed_cmp! { FixedI32(i32, LeEqU32, 32) }
398fixed_cmp! { FixedI64(i64, LeEqU64, 64) }
399fixed_cmp! { FixedI128(i128, LeEqU128, 128) }
400
401#[cfg(test)]
402#[allow(clippy::cognitive_complexity, clippy::float_cmp)]
403mod tests {
404 use crate::*;
405
406 #[test]
407 fn cmp_signed() {
408 use core::cmp::Ordering::*;
409 let neg1_16 = FixedI32::<types::extra::U16>::from_num(-1);
410 let neg1_20 = FixedI32::<types::extra::U20>::from_num(-1);
411 let mut a = neg1_16;
412 let mut b = neg1_20;
413 assert!(a.eq(&b) && b.eq(&a));
415 assert_eq!(a.partial_cmp(&b), Some(Equal));
416 assert_eq!(b.partial_cmp(&a), Some(Equal));
417 assert_eq!(a, -1i8);
418 assert_eq!(b, -1i128);
419 a >>= 16;
420 b >>= 16;
421 assert!(a.eq(&b) && b.eq(&a));
423 assert_eq!(a.partial_cmp(&b), Some(Equal));
424 assert_eq!(b.partial_cmp(&a), Some(Equal));
425 assert!(a < 0.0);
426 assert_eq!(a, -(-16f32).exp2());
427 assert!(a <= -(-16f32).exp2());
428 assert!(a >= -(-16f32).exp2());
429 assert!(a < (-16f32).exp2());
430 assert_ne!(a, -0.75 * (-16f32).exp2());
431 assert!(a < -0.75 * (-16f32).exp2());
432 assert!(a <= -0.75 * (-16f32).exp2());
433 assert!(a > -1.25 * (-16f32).exp2());
434 assert!(a >= -1.25 * (-16f32).exp2());
435 a >>= 1;
436 b >>= 1;
437 assert!(a.ne(&b) && b.ne(&a));
439 assert_eq!(a.partial_cmp(&b), Some(Less));
440 assert_eq!(b.partial_cmp(&a), Some(Greater));
441 a = neg1_16 << 11;
442 b = neg1_20 << 11;
443 assert!(a.eq(&b) && b.eq(&a));
445 assert_eq!(a.partial_cmp(&b), Some(Equal));
446 assert_eq!(b.partial_cmp(&a), Some(Equal));
447 assert_eq!(a, -1i16 << 11);
448 assert_eq!(b, -1i64 << 11);
449 a <<= 1;
450 b <<= 1;
451 assert!(a.ne(&b) && b.ne(&a));
453 assert_eq!(a.partial_cmp(&b), Some(Less));
454 assert_eq!(b.partial_cmp(&a), Some(Greater));
455 assert!(a < 1u8);
456 assert_eq!(b, 0);
457 }
458
459 #[test]
460 fn cmp_unsigned() {
461 use core::cmp::Ordering::*;
462 let one_16 = FixedU32::<types::extra::U16>::from_num(1);
463 let one_20 = FixedU32::<types::extra::U20>::from_num(1);
464 let mut a = one_16;
465 let mut b = one_20;
466 assert!(a.eq(&b) && b.eq(&a));
468 assert_eq!(a.partial_cmp(&b), Some(Equal));
469 assert_eq!(b.partial_cmp(&a), Some(Equal));
470 assert_eq!(a, 1u8);
471 assert_eq!(b, 1i128);
472 a >>= 16;
473 b >>= 16;
474 assert!(a.eq(&b) && b.eq(&a));
476 assert_eq!(a.partial_cmp(&b), Some(Equal));
477 assert_eq!(b.partial_cmp(&a), Some(Equal));
478 assert!(a > 0.0);
479 assert_eq!(a, (-16f64).exp2());
480 assert!(a <= (-16f64).exp2());
481 assert!(a >= (-16f64).exp2());
482 assert!(a > -(-16f64).exp2());
483 assert_ne!(a, 0.75 * (-16f64).exp2());
484 assert!(a > 0.75 * (-16f64).exp2());
485 assert!(a >= 0.75 * (-16f64).exp2());
486 assert!(a < 1.25 * (-16f64).exp2());
487 assert!(a <= 1.25 * (-16f64).exp2());
488 a >>= 1;
489 b >>= 1;
490 assert!(a.ne(&b) && b.ne(&a));
492 assert_eq!(a.partial_cmp(&b), Some(Less));
493 assert_eq!(b.partial_cmp(&a), Some(Greater));
494 a = one_16 << 11;
495 b = one_20 << 11;
496 assert!(a.eq(&b) && b.eq(&a));
498 assert_eq!(a.partial_cmp(&b), Some(Equal));
499 assert_eq!(b.partial_cmp(&a), Some(Equal));
500 assert_eq!(a, 1i16 << 11);
501 assert_eq!(b, 1u64 << 11);
502 a <<= 1;
503 b <<= 1;
504 assert!(a.ne(&b) && b.ne(&a));
506 assert_eq!(a.partial_cmp(&b), Some(Greater));
507 assert_eq!(b.partial_cmp(&a), Some(Less));
508 assert!(a > -1i8);
509 assert_eq!(a, 1i32 << 12);
510 assert_eq!(b, 0);
511 }
512}