fast_posit/underlying/
int.rs

1use super::{Int, Sealed};
2
3/// Implementation of almost all functions, a couple nasty ones need handwritten impls!
4macro_rules! impl_common {
5  ($int:ty, $uint:ty, $double:ty, $nonzero:ident) => {
6    type Unsigned = $uint;
7    type Double = $double;
8
9    const ZERO: Self = 0;
10    const ONE: Self = 1;
11    const MIN: Self = <$int>::MIN;
12    const MAX: Self = <$int>::MAX;
13    const BITS: u32 = <$int>::BITS;
14
15    #[inline]
16    fn as_unsigned(self) -> $uint { self as $uint }
17
18    #[inline]
19    fn of_unsigned(x: $uint) -> Self { x as $int }
20
21    #[inline]
22    fn as_u32(self) -> u32 {
23      debug_assert!(u32::try_from(self).is_ok());
24      self as u32
25    }
26
27    #[inline]
28    fn of_u32(x: u32) -> Self {
29      debug_assert!(Self::try_from(x).is_ok());
30      x as $int
31    }
32
33    #[inline]
34    fn to_be(self) -> Self { self.to_be() }
35
36    #[inline]
37    fn from_be(self) -> Self { <$int>::from_be(self) }
38
39    #[inline]
40    fn is_positive(self) -> bool {
41      self >= 0
42      // let mask = self >> (Self::BITS - 1);
43      // unsafe { core::mem::transmute::<u8, bool>((mask & 1) as u8) }
44    }
45
46    #[inline]
47    fn abs(self) -> Self {
48      self.abs()
49    }
50
51    #[inline]
52    fn lshr(self, n: u32) -> Self { ((self as $uint) >> n) as $int }
53
54    #[inline]
55    fn mask_lsb(self, n: u32) -> Self {
56      let mask = (1 as $int << n).wrapping_sub(1);
57      self & mask
58    }
59
60    #[inline]
61    fn mask_msb(self, n: u32) -> Self {
62      let mask = (1 as $int << (Self::BITS - n)).wrapping_sub(1);
63      self & !mask
64    }
65
66    #[inline]
67    fn get_lsb(self) -> bool {
68      self & 1 == 1
69    }
70
71    fn leading_zeros(self) -> u32 {
72      const { assert!(Self::BITS < u8::MAX as u32 - 1) }
73      self.leading_zeros()
74    }
75
76    #[inline]
77    unsafe fn leading_zeros_nonzero(self) -> u32 {
78      const { assert!(Self::BITS < u8::MAX as u32 - 1) }
79      unsafe{core::num::$nonzero::new_unchecked(self)}.leading_zeros()
80    }
81
82    unsafe fn leading_run_minus_one(self) -> u32 {
83      let y = self ^ (self << 1);
84      let z = unsafe { core::num::$nonzero::new_unchecked(y) };
85      z.leading_zeros()
86    }
87
88    #[inline]
89    fn not_if_positive(self, control: Self) -> Self {
90      // !self.not_if_negative(control)
91      // Slightly more ILP
92      let mask = control >> (Self::BITS - 1);
93      !self ^ mask
94    }
95
96    #[inline]
97    fn not_if_negative(self, control: Self) -> Self {
98      let mask = control >> (Self::BITS - 1);
99      self ^ mask
100    }
101
102    #[inline]
103    fn wrapping_add(self, other: Self) -> Self { self.wrapping_add(other) }
104
105    #[inline]
106    fn wrapping_sub(self, other: Self) -> Self { self.wrapping_sub(other) }
107
108    #[inline]
109    fn wrapping_neg(self) -> Self { self.wrapping_neg() }
110
111    #[inline]
112    fn wrapping_abs(self) -> Self { self.wrapping_abs() }
113
114    #[inline]
115    fn overflowing_add(self, other: Self) -> (Self, bool) { self.overflowing_add(other) }
116
117    #[inline]
118    fn carrying_add(self, other: Self, carry: bool) -> (Self, bool) {
119      let (a, b) = (self as $uint).overflowing_add(other as $uint);
120      let (c, d) = a.overflowing_add(carry as $uint);
121      (c as $int, b | d)
122    }
123
124    fn multiword_shl(self, n: u32) -> (Self, Self, usize) {
125      // Codegen seems pretty great when looking in godbolt!
126      let bytes = n / Self::BITS * Self::BITS / 8;
127      let bits = n % Self::BITS;
128      let lo = self.unbounded_shl(bits);
129      let hi = self.unbounded_shr(Self::BITS - bits);
130      (hi, lo, bytes as usize)
131    }
132  }
133}
134
135macro_rules! impl_common_doubling {
136  ($int:ty, $double:ty) => {
137    #[inline]
138    fn doubling_mul(self, other: Self) -> Self::Double {
139      self as $double * other as $double
140    }
141
142    unsafe fn shift_div_rem(self, other: Self, precision: u32) -> (Self, Self) {
143      unsafe { core::hint::assert_unchecked(other != Self::ZERO); }
144      unsafe { core::hint::assert_unchecked(other != -Self::ONE); }
145      let a = self as $double << precision;
146      let b = other as $double;
147      let mut div = a / b;
148      let rem = a % b;
149      // PDP/C/Rust division rounds towards 0, not towards -∞. For positive numbers this is the
150      // same. For negative numbers, we need to subtract 1 if the division is inexact.
151      div -= ((div < 0) & (rem != 0)) as $double;
152      (div as $int, rem as $int)
153    }    
154  }
155}
156
157impl Int for i128 {}
158impl Sealed for i128 {
159  impl_common!{i128, u128, super::double::Pair<i128>, NonZeroI128}
160
161  fn doubling_mul(self, _other: Self) -> Self::Double {
162    todo!()
163  }
164
165  unsafe fn shift_div_rem(self, _other: Self, _precision: u32) -> (Self, Self) {
166    todo!()
167  }
168
169  fn overflowing_add_shift(self, rhs: Self) -> (Self, bool) {
170    let (mut result, carry) = self.overflowing_add(rhs);
171    result >>= u32::from(carry);
172    result ^= Self::from(carry) << (Self::BITS - 1);
173    (result, carry)
174  }
175}
176
177impl Int for i64 {}
178impl Sealed for i64 {
179  impl_common!{i64, u64, i128, NonZeroI64}
180  impl_common_doubling!{i64, i128}
181
182  fn overflowing_add_shift(self, rhs: Self) -> (Self, bool) {
183    let (mut result, carry) = self.overflowing_add(rhs);
184    result >>= u32::from(carry);
185    result ^= Self::from(carry) << (Self::BITS - 1);
186    (result, carry)
187  }
188}
189
190impl Int for i32 {}
191impl Sealed for i32 {
192  impl_common!{i32, u32, i64, NonZeroI32}
193  impl_common_doubling!{i32, i64}
194
195  fn overflowing_add_shift(self, rhs: Self) -> (Self, bool) {
196    let (mut result, carry) = self.overflowing_add(rhs);
197    result >>= u32::from(carry);
198    result ^= Self::from(carry) << (Self::BITS - 1);
199    (result, carry)
200  }
201}
202
203impl Int for i16 {}
204impl Sealed for i16 {
205  impl_common!{i16, u16, i32, NonZeroI16}
206  impl_common_doubling!{i16, i32}
207
208  fn overflowing_add_shift(self, rhs: Self) -> (Self, bool) {
209    let (mut result, carry) = self.overflowing_add(rhs);
210    result >>= u32::from(carry);
211    result ^= Self::from(carry) << (Self::BITS - 1);
212    (result, carry)
213  }
214}
215
216impl Int for i8 {}
217impl Sealed for i8 {
218  impl_common!{i8, u8, i16, NonZeroI8}
219  impl_common_doubling!{i8, i16}
220
221  fn overflowing_add_shift(self, rhs: Self) -> (Self, bool) {
222    let (mut result, carry) = self.overflowing_add(rhs);
223    result >>= u32::from(carry);
224    result ^= Self::from(carry) << (Self::BITS - 1);
225    (result, carry)
226  }
227}
228
229#[cfg(test)]
230#[allow(overflowing_literals)]
231mod tests {
232  use super::*;
233
234  #[test]
235  fn mask_lsb() {
236    assert_eq!(0b01111110_i8.mask_lsb(3), 0b00000110_i8);
237    assert_eq!(0xabcd_i16.mask_lsb(4), 0x000d_i16);
238    assert_eq!(0xabcdabcd_i32.mask_lsb(4), 0x0000000d_i32);
239    assert_eq!(0xdeadbeefdeadbeef_i64.mask_lsb(6), 0x2f_i64);
240  }
241
242  #[test]
243  fn mask_msb() {
244    assert_eq!(0b01111110_i8.mask_msb(3), 0b01100000_i8);
245    assert_eq!(0xabcd_i16.mask_msb(4), 0xa000_i16);
246    assert_eq!(0xabcdabcd_i32.mask_msb(4), 0xa0000000_i32);
247    assert_eq!(0xdeadbeefdeadbeef_i64.mask_msb(12), 0xdea_i64 << 52);
248  }
249
250  #[test]
251  fn leading_run_minus_one_zeroes() {
252    unsafe {
253      assert_eq!((0b00010101i8 as i8).leading_run_minus_one(), 2);
254      assert_eq!((0b00010101i8 as i16).leading_run_minus_one(), 8 + 2);
255      assert_eq!((0b00010101i8 as i32).leading_run_minus_one(), 24 + 2);
256      assert_eq!((0b00010101i8 as i64).leading_run_minus_one(), 56 + 2);
257    }
258  }
259
260  #[test]
261  fn leading_run_minus_one_ones() {
262    unsafe {
263      assert_eq!((0b11111000i8 as i8).leading_run_minus_one(), 4);
264      assert_eq!((0b11111000i8 as i16).leading_run_minus_one(), 8 + 4);
265      assert_eq!((0b11111000i8 as i32).leading_run_minus_one(), 24 + 4);
266      assert_eq!((0b11111000i8 as i64).leading_run_minus_one(), 56 + 4);
267    }
268  }
269
270  #[test]
271  fn not_if_negative() {
272    assert_eq!((0b01110110i8 as i8).not_if_negative(1),  0b01110110i8 as i8);
273    assert_eq!((0b01110110i8 as i8).not_if_negative(-1), 0b10001001i8 as i8);
274    assert_eq!((0b01110110i8 as i16).not_if_negative(1),  0b01110110i8 as i16);
275    assert_eq!((0b01110110i8 as i16).not_if_negative(-1), 0b10001001i8 as i16);
276    assert_eq!((0b01110110i8 as i32).not_if_negative(1),  0b01110110i8 as i32);
277    assert_eq!((0b01110110i8 as i32).not_if_negative(-1), 0b10001001i8 as i32);
278    assert_eq!((0b01110110i8 as i64).not_if_negative(1),  0b01110110i8 as i64);
279    assert_eq!((0b01110110i8 as i64).not_if_negative(-1), 0b10001001i8 as i64);
280  }
281
282  #[test]
283  fn not_if_positive() {
284    assert_eq!((0b11100110i8 as i8).not_if_positive(1),  0b00011001i8 as i8);
285    assert_eq!((0b11100110i8 as i8).not_if_positive(-1), 0b11100110i8 as i8);
286    assert_eq!((0b11100110i8 as i16).not_if_positive(1),  0b00011001i8 as i16);
287    assert_eq!((0b11100110i8 as i16).not_if_positive(-1), 0b11100110i8 as i16);
288    assert_eq!((0b11100110i8 as i32).not_if_positive(1),  0b00011001i8 as i32);
289    assert_eq!((0b11100110i8 as i32).not_if_positive(-1), 0b11100110i8 as i32);
290    assert_eq!((0b11100110i8 as i64).not_if_positive(1),  0b00011001i8 as i64);
291    assert_eq!((0b11100110i8 as i64).not_if_positive(-1), 0b11100110i8 as i64);
292  }
293
294  #[test]
295  fn overflowing_add_shift() {
296    assert_eq!(
297      (0b01_000000i8).overflowing_add_shift(0b00_100000i8),
298      (0b01_100000i8, false)
299    );
300    assert_eq!(
301      (0b01_000000i8).overflowing_add_shift(0b01_000000i8),
302      (0b01_000000i8, true)
303    );
304    assert_eq!(
305      (0b10_000000i8).overflowing_add_shift(0b01_011000i8),
306      (0b11_011000i8, false)
307    );
308    assert_eq!(
309      (0b10_000000i8).overflowing_add_shift(0b10_011000i8),
310      (0b10_001100i8, true)
311    );
312  }
313
314  #[test]
315  fn multiword_shl_small() {
316    assert_eq!(
317      (0x1234abcd_i32).multiword_shl(4),
318      (0x00000001, 0x234abcd0, 0),
319    );
320    assert_eq!(
321      (0xa234abcd_i32).multiword_shl(4),
322      (0xfffffffa, 0x234abcd0, 0),
323    );
324  }
325
326  #[test]
327  fn multiword_shl_exact() {
328    assert_eq!(
329      (0x1234abcd_i32).multiword_shl(32 + 4),
330      (0x00000001, 0x234abcd0, 4),
331    );
332    assert_eq!(
333      (0xa234abcd_i32).multiword_shl(32 + 4),
334      (0xfffffffa, 0x234abcd0, 4),
335    );
336
337    assert_eq!(
338      (0x1234abcd_i32).multiword_shl(64 + 4),
339      (0x00000001, 0x234abcd0, 8),
340    );
341    assert_eq!(
342      (0xa234abcd_i32).multiword_shl(64 + 4),
343      (0xfffffffa, 0x234abcd0, 8),
344    );
345  }
346
347  #[test]
348  fn multiword_shl_inexact() {
349    assert_eq!(
350      (0x1234abcd_i32).multiword_shl(16 + 4),
351      (0x0001234a, 0xbcd00000, 0),
352    );
353    assert_eq!(
354      (0xa234abcd_i32).multiword_shl(16 + 4),
355      (0xfffa234a, 0xbcd00000, 0),
356    );
357
358    assert_eq!(
359      (0x1234abcd_i32).multiword_shl(48 + 4),
360      (0x0001234a, 0xbcd00000, 4),
361    );
362    assert_eq!(
363      (0xa234abcd_i32).multiword_shl(48 + 4),
364      (0xfffa234a, 0xbcd00000, 4),
365    );
366  }
367}