use crate::{SimdBase, SimdBaseIo};
#[derive(Clone, Copy, Debug)]
pub enum EqPrecision {
Exact,
Almost { figs: usize },
}
impl EqPrecision {
pub fn exact() -> EqPrecision {
Self::Exact
}
pub fn almost(figs: usize) -> EqPrecision {
Self::Almost { figs }
}
}
pub trait ScalarNumber: PartialEq + Copy + core::fmt::Display {
fn almost_eq(self, other: Self, _precision: EqPrecision) -> bool {
self == other
}
fn is_float_nan(&self) -> bool {
false
}
fn is_undefined_behavior_when_rounding(self) -> bool {
false
}
fn is_undefined_behavior_when_casting(self) -> bool {
false
}
fn unchecked_add(self, other: Self) -> Self;
}
pub trait IntScalarNumber: ScalarNumber {
fn unsigned_cast_to_i64(self) -> i64;
}
impl ScalarNumber for i8 {
fn unchecked_add(self, other: Self) -> Self {
self.wrapping_add(other)
}
}
impl IntScalarNumber for i8 {
fn unsigned_cast_to_i64(self) -> i64 {
self as u8 as u64 as i64
}
}
impl ScalarNumber for i16 {
fn unchecked_add(self, other: Self) -> Self {
self.wrapping_add(other)
}
}
impl IntScalarNumber for i16 {
fn unsigned_cast_to_i64(self) -> i64 {
self as u16 as u64 as i64
}
}
impl ScalarNumber for i32 {
fn unchecked_add(self, other: Self) -> Self {
self.wrapping_add(other)
}
}
impl IntScalarNumber for i32 {
fn unsigned_cast_to_i64(self) -> i64 {
self as u32 as u64 as i64
}
}
impl ScalarNumber for i64 {
fn unchecked_add(self, other: Self) -> Self {
self.wrapping_add(other)
}
}
impl IntScalarNumber for i64 {
fn unsigned_cast_to_i64(self) -> i64 {
self
}
}
impl ScalarNumber for f32 {
fn almost_eq(self, other: Self, precision: EqPrecision) -> bool {
if self.is_nan() && other.is_nan() {
return true;
}
if (self.is_infinite() && self > 0.0) || (other.is_infinite() && other > 0.0) {
return true;
}
if (self.is_infinite() && self < 0.0) || (other.is_infinite() && other < 0.0) {
return true;
}
if self == 0.0 && other == 0.0 {
return true;
}
if self.is_sign_negative() != other.is_sign_negative() {
return false;
}
match precision {
EqPrecision::Exact => self == other,
EqPrecision::Almost { figs } => {
let epsilon = 10.0f32.powi(-(figs as i32));
if (self - other).abs() < epsilon {
return true;
}
let bigger = self.max(other);
let norm_diff = (self / bigger) - (other / bigger);
norm_diff.abs() < epsilon
}
}
}
fn is_float_nan(&self) -> bool {
self.is_nan()
}
fn is_undefined_behavior_when_rounding(self) -> bool {
((self.abs() % 1.0) - 0.5).abs() < f32::EPSILON
}
fn is_undefined_behavior_when_casting(self) -> bool {
let lower = i32::MIN as f32;
let upper_exclusive = -(i32::MIN as f32);
!(lower..upper_exclusive).contains(&self)
}
fn unchecked_add(self, other: Self) -> Self {
self + other
}
}
impl ScalarNumber for f64 {
fn almost_eq(self, other: Self, precision: EqPrecision) -> bool {
if self.is_nan() && other.is_nan() {
return true;
}
if (self.is_infinite() && self > 0.0) || (other.is_infinite() && other > 0.0) {
return true;
}
if (self.is_infinite() && self < 0.0) || (other.is_infinite() && other < 0.0) {
return true;
}
if self == 0.0 && other == 0.0 {
return true;
}
if self.is_sign_negative() != other.is_sign_negative() {
return false;
}
match precision {
EqPrecision::Exact => self == other,
EqPrecision::Almost { figs } => {
let epsilon = 10.0f64.powi(-(figs as i32));
if (self - other).abs() < epsilon {
return true;
}
let bigger = self.max(other);
let norm_diff = (self / bigger) - (other / bigger);
norm_diff.abs() < epsilon
}
}
}
fn is_float_nan(&self) -> bool {
self.is_nan()
}
fn is_undefined_behavior_when_rounding(self) -> bool {
((self.abs() % 1.0) - 0.5).abs() < f64::EPSILON
}
fn is_undefined_behavior_when_casting(self) -> bool {
let lower = i64::MIN as f64;
let upper_exclusive = -(i64::MIN as f64);
!(lower..upper_exclusive).contains(&self)
}
fn unchecked_add(self, other: Self) -> Self {
self + other
}
}
pub trait SimdTupleIterable<S: ScalarNumber> {
type AsScalar;
type AsTuple<V: SimdBase<Scalar = S>>;
fn wrap_scalars<V: SimdBase<Scalar = S>>(scalars: Self::AsScalar) -> Self::AsTuple<V>;
fn iter_scalars<'a>(&'a self) -> Box<dyn 'a + Iterator<Item = Self::AsScalar>>;
}
impl<S: ScalarNumber, T: SimdBase<Scalar = S>> SimdTupleIterable<S> for (T,) {
type AsScalar = (S,);
type AsTuple<V: SimdBase<Scalar = S>> = (V,);
fn iter_scalars<'a>(&'a self) -> Box<dyn 'a + Iterator<Item = Self::AsScalar>> {
let range = 0..T::WIDTH;
let iter = range.map(move |i| (self.0[i],));
Box::new(iter)
}
fn wrap_scalars<V: SimdBase<Scalar = S>>(scalars: Self::AsScalar) -> Self::AsTuple<V> {
(<V as SimdBaseIo>::set1(scalars.0),)
}
}
impl<S: ScalarNumber, T: SimdBase<Scalar = S>> SimdTupleIterable<S> for (T, T) {
type AsScalar = (S, S);
type AsTuple<V: SimdBase<Scalar = S>> = (V, V);
fn iter_scalars<'a>(&'a self) -> Box<dyn 'a + Iterator<Item = Self::AsScalar>> {
let range = 0..T::WIDTH;
let iter = range.map(move |i| (self.0[i], self.1[i]));
Box::new(iter)
}
fn wrap_scalars<V: SimdBase<Scalar = S>>(scalars: Self::AsScalar) -> Self::AsTuple<V> {
(
<V as SimdBaseIo>::set1(scalars.0),
<V as SimdBaseIo>::set1(scalars.1),
)
}
}
impl<S: ScalarNumber, T: SimdBase<Scalar = S>> SimdTupleIterable<S> for (T, T, T) {
type AsScalar = (S, S, S);
type AsTuple<V: SimdBase<Scalar = S>> = (V, V, V);
fn iter_scalars<'a>(&'a self) -> Box<dyn 'a + Iterator<Item = Self::AsScalar>> {
let range = 0..T::WIDTH;
let iter = range.map(move |i| (self.0[i], self.1[i], self.2[i]));
Box::new(iter)
}
fn wrap_scalars<V: SimdBase<Scalar = S>>(scalars: Self::AsScalar) -> Self::AsTuple<V> {
(
<V as SimdBaseIo>::set1(scalars.0),
<V as SimdBaseIo>::set1(scalars.1),
<V as SimdBaseIo>::set1(scalars.2),
)
}
}
#[cfg(test)]
mod tests {
use super::ScalarNumber;
#[test]
fn float_to_i32_cast_filter_excludes_positive_upper_bound() {
let upper = -(i32::MIN as f32);
assert!(upper.is_undefined_behavior_when_casting());
assert!(!f32::from_bits(upper.to_bits() - 1).is_undefined_behavior_when_casting());
assert!(!(i32::MIN as f32).is_undefined_behavior_when_casting());
}
#[test]
fn float_to_i64_cast_filter_excludes_positive_upper_bound() {
let upper = -(i64::MIN as f64);
assert!(upper.is_undefined_behavior_when_casting());
assert!(!f64::from_bits(upper.to_bits() - 1).is_undefined_behavior_when_casting());
assert!(!(i64::MIN as f64).is_undefined_behavior_when_casting());
}
#[test]
fn float_to_int_cast_filters_reject_non_finite_values() {
for value in [f32::NAN, f32::INFINITY, f32::NEG_INFINITY] {
assert!(value.is_undefined_behavior_when_casting());
}
for value in [f64::NAN, f64::INFINITY, f64::NEG_INFINITY] {
assert!(value.is_undefined_behavior_when_casting());
}
}
#[test]
fn float_to_int_cast_filters_keep_signed_zero_and_subnormals_defined() {
for value in [0.0f32, -0.0, f32::MIN_POSITIVE, f32::from_bits(1)] {
assert!(!value.is_undefined_behavior_when_casting());
}
for value in [0.0f64, -0.0, f64::MIN_POSITIVE, f64::from_bits(1)] {
assert!(!value.is_undefined_behavior_when_casting());
}
}
}