#[cfg(feature = "bytemuck")]
use bytemuck::{Pod, Zeroable};
use core::{
cmp::Ordering,
iter::{Product, Sum},
num::FpCategory,
ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Rem, RemAssign, Sub, SubAssign},
};
#[cfg(not(target_arch = "spirv"))]
use core::{
fmt::{
Binary, Debug, Display, Error, Formatter, LowerExp, LowerHex, Octal, UpperExp, UpperHex,
},
num::ParseFloatError,
str::FromStr,
};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "zerocopy")]
use zerocopy::{AsBytes, FromBytes};
pub(crate) mod convert;
#[allow(non_camel_case_types)]
#[derive(Clone, Copy, Default)]
#[repr(transparent)]
#[cfg_attr(feature = "serde", derive(Serialize))]
#[cfg_attr(feature = "bytemuck", derive(Zeroable, Pod))]
#[cfg_attr(feature = "zerocopy", derive(AsBytes, FromBytes))]
pub struct bf16(u16);
impl bf16 {
#[inline]
#[must_use]
pub const fn from_bits(bits: u16) -> bf16 {
bf16(bits)
}
#[inline]
#[must_use]
pub fn from_f32(value: f32) -> bf16 {
Self::from_f32_const(value)
}
#[inline]
#[must_use]
pub const fn from_f32_const(value: f32) -> bf16 {
bf16(convert::f32_to_bf16(value))
}
#[inline]
#[must_use]
pub fn from_f64(value: f64) -> bf16 {
Self::from_f64_const(value)
}
#[inline]
#[must_use]
pub const fn from_f64_const(value: f64) -> bf16 {
bf16(convert::f64_to_bf16(value))
}
#[inline]
#[must_use]
pub const fn to_bits(self) -> u16 {
self.0
}
#[inline]
#[must_use]
pub const fn to_le_bytes(self) -> [u8; 2] {
self.0.to_le_bytes()
}
#[inline]
#[must_use]
pub const fn to_be_bytes(self) -> [u8; 2] {
self.0.to_be_bytes()
}
#[inline]
#[must_use]
pub const fn to_ne_bytes(self) -> [u8; 2] {
self.0.to_ne_bytes()
}
#[inline]
#[must_use]
pub const fn from_le_bytes(bytes: [u8; 2]) -> bf16 {
bf16::from_bits(u16::from_le_bytes(bytes))
}
#[inline]
#[must_use]
pub const fn from_be_bytes(bytes: [u8; 2]) -> bf16 {
bf16::from_bits(u16::from_be_bytes(bytes))
}
#[inline]
#[must_use]
pub const fn from_ne_bytes(bytes: [u8; 2]) -> bf16 {
bf16::from_bits(u16::from_ne_bytes(bytes))
}
#[inline]
#[must_use]
pub fn to_f32(self) -> f32 {
self.to_f32_const()
}
#[inline]
#[must_use]
pub const fn to_f32_const(self) -> f32 {
convert::bf16_to_f32(self.0)
}
#[inline]
#[must_use]
pub fn to_f64(self) -> f64 {
self.to_f64_const()
}
#[inline]
#[must_use]
pub const fn to_f64_const(self) -> f64 {
convert::bf16_to_f64(self.0)
}
#[inline]
#[must_use]
pub const fn is_nan(self) -> bool {
self.0 & 0x7FFFu16 > 0x7F80u16
}
#[inline]
#[must_use]
pub const fn is_infinite(self) -> bool {
self.0 & 0x7FFFu16 == 0x7F80u16
}
#[inline]
#[must_use]
pub const fn is_finite(self) -> bool {
self.0 & 0x7F80u16 != 0x7F80u16
}
#[inline]
#[must_use]
pub const fn is_normal(self) -> bool {
let exp = self.0 & 0x7F80u16;
exp != 0x7F80u16 && exp != 0
}
#[must_use]
pub const fn classify(self) -> FpCategory {
let exp = self.0 & 0x7F80u16;
let man = self.0 & 0x007Fu16;
match (exp, man) {
(0, 0) => FpCategory::Zero,
(0, _) => FpCategory::Subnormal,
(0x7F80u16, 0) => FpCategory::Infinite,
(0x7F80u16, _) => FpCategory::Nan,
_ => FpCategory::Normal,
}
}
#[must_use]
pub const fn signum(self) -> bf16 {
if self.is_nan() {
self
} else if self.0 & 0x8000u16 != 0 {
Self::NEG_ONE
} else {
Self::ONE
}
}
#[inline]
#[must_use]
pub const fn is_sign_positive(self) -> bool {
self.0 & 0x8000u16 == 0
}
#[inline]
#[must_use]
pub const fn is_sign_negative(self) -> bool {
self.0 & 0x8000u16 != 0
}
#[inline]
#[must_use]
pub const fn copysign(self, sign: bf16) -> bf16 {
bf16((sign.0 & 0x8000u16) | (self.0 & 0x7FFFu16))
}
#[inline]
#[must_use]
pub fn max(self, other: bf16) -> bf16 {
if other > self && !other.is_nan() {
other
} else {
self
}
}
#[inline]
#[must_use]
pub fn min(self, other: bf16) -> bf16 {
if other < self && !other.is_nan() {
other
} else {
self
}
}
#[inline]
#[must_use]
pub fn clamp(self, min: bf16, max: bf16) -> bf16 {
assert!(min <= max);
let mut x = self;
if x < min {
x = min;
}
if x > max {
x = max;
}
x
}
#[inline]
#[must_use]
pub fn total_cmp(&self, other: &Self) -> Ordering {
let mut left = self.to_bits() as i16;
let mut right = other.to_bits() as i16;
left ^= (((left >> 15) as u16) >> 1) as i16;
right ^= (((right >> 15) as u16) >> 1) as i16;
left.cmp(&right)
}
#[cfg(feature = "serde")]
pub fn serialize_as_f32<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
serializer.serialize_f32(self.to_f32())
}
#[cfg(feature = "serde")]
pub fn serialize_as_string<S: serde::Serializer>(
&self,
serializer: S,
) -> Result<S::Ok, S::Error> {
serializer.serialize_str(&self.to_string())
}
pub const DIGITS: u32 = 2;
pub const EPSILON: bf16 = bf16(0x3C00u16);
pub const INFINITY: bf16 = bf16(0x7F80u16);
pub const MANTISSA_DIGITS: u32 = 8;
pub const MAX: bf16 = bf16(0x7F7F);
pub const MAX_10_EXP: i32 = 38;
pub const MAX_EXP: i32 = 128;
pub const MIN: bf16 = bf16(0xFF7F);
pub const MIN_10_EXP: i32 = -37;
pub const MIN_EXP: i32 = -125;
pub const MIN_POSITIVE: bf16 = bf16(0x0080u16);
pub const NAN: bf16 = bf16(0x7FC0u16);
pub const NEG_INFINITY: bf16 = bf16(0xFF80u16);
pub const RADIX: u32 = 2;
pub const MIN_POSITIVE_SUBNORMAL: bf16 = bf16(0x0001u16);
pub const MAX_SUBNORMAL: bf16 = bf16(0x007Fu16);
pub const ONE: bf16 = bf16(0x3F80u16);
pub const ZERO: bf16 = bf16(0x0000u16);
pub const NEG_ZERO: bf16 = bf16(0x8000u16);
pub const NEG_ONE: bf16 = bf16(0xBF80u16);
pub const E: bf16 = bf16(0x402Eu16);
pub const PI: bf16 = bf16(0x4049u16);
pub const FRAC_1_PI: bf16 = bf16(0x3EA3u16);
pub const FRAC_1_SQRT_2: bf16 = bf16(0x3F35u16);
pub const FRAC_2_PI: bf16 = bf16(0x3F23u16);
pub const FRAC_2_SQRT_PI: bf16 = bf16(0x3F90u16);
pub const FRAC_PI_2: bf16 = bf16(0x3FC9u16);
pub const FRAC_PI_3: bf16 = bf16(0x3F86u16);
pub const FRAC_PI_4: bf16 = bf16(0x3F49u16);
pub const FRAC_PI_6: bf16 = bf16(0x3F06u16);
pub const FRAC_PI_8: bf16 = bf16(0x3EC9u16);
pub const LN_10: bf16 = bf16(0x4013u16);
pub const LN_2: bf16 = bf16(0x3F31u16);
pub const LOG10_E: bf16 = bf16(0x3EDEu16);
pub const LOG10_2: bf16 = bf16(0x3E9Au16);
pub const LOG2_E: bf16 = bf16(0x3FB9u16);
pub const LOG2_10: bf16 = bf16(0x4055u16);
pub const SQRT_2: bf16 = bf16(0x3FB5u16);
}
impl From<bf16> for f32 {
#[inline]
fn from(x: bf16) -> f32 {
x.to_f32()
}
}
impl From<bf16> for f64 {
#[inline]
fn from(x: bf16) -> f64 {
x.to_f64()
}
}
impl From<i8> for bf16 {
#[inline]
fn from(x: i8) -> bf16 {
bf16::from_f32(f32::from(x))
}
}
impl From<u8> for bf16 {
#[inline]
fn from(x: u8) -> bf16 {
bf16::from_f32(f32::from(x))
}
}
impl PartialEq for bf16 {
fn eq(&self, other: &bf16) -> bool {
if self.is_nan() || other.is_nan() {
false
} else {
(self.0 == other.0) || ((self.0 | other.0) & 0x7FFFu16 == 0)
}
}
}
impl PartialOrd for bf16 {
fn partial_cmp(&self, other: &bf16) -> Option<Ordering> {
if self.is_nan() || other.is_nan() {
None
} else {
let neg = self.0 & 0x8000u16 != 0;
let other_neg = other.0 & 0x8000u16 != 0;
match (neg, other_neg) {
(false, false) => Some(self.0.cmp(&other.0)),
(false, true) => {
if (self.0 | other.0) & 0x7FFFu16 == 0 {
Some(Ordering::Equal)
} else {
Some(Ordering::Greater)
}
}
(true, false) => {
if (self.0 | other.0) & 0x7FFFu16 == 0 {
Some(Ordering::Equal)
} else {
Some(Ordering::Less)
}
}
(true, true) => Some(other.0.cmp(&self.0)),
}
}
}
fn lt(&self, other: &bf16) -> bool {
if self.is_nan() || other.is_nan() {
false
} else {
let neg = self.0 & 0x8000u16 != 0;
let other_neg = other.0 & 0x8000u16 != 0;
match (neg, other_neg) {
(false, false) => self.0 < other.0,
(false, true) => false,
(true, false) => (self.0 | other.0) & 0x7FFFu16 != 0,
(true, true) => self.0 > other.0,
}
}
}
fn le(&self, other: &bf16) -> bool {
if self.is_nan() || other.is_nan() {
false
} else {
let neg = self.0 & 0x8000u16 != 0;
let other_neg = other.0 & 0x8000u16 != 0;
match (neg, other_neg) {
(false, false) => self.0 <= other.0,
(false, true) => (self.0 | other.0) & 0x7FFFu16 == 0,
(true, false) => true,
(true, true) => self.0 >= other.0,
}
}
}
fn gt(&self, other: &bf16) -> bool {
if self.is_nan() || other.is_nan() {
false
} else {
let neg = self.0 & 0x8000u16 != 0;
let other_neg = other.0 & 0x8000u16 != 0;
match (neg, other_neg) {
(false, false) => self.0 > other.0,
(false, true) => (self.0 | other.0) & 0x7FFFu16 != 0,
(true, false) => false,
(true, true) => self.0 < other.0,
}
}
}
fn ge(&self, other: &bf16) -> bool {
if self.is_nan() || other.is_nan() {
false
} else {
let neg = self.0 & 0x8000u16 != 0;
let other_neg = other.0 & 0x8000u16 != 0;
match (neg, other_neg) {
(false, false) => self.0 >= other.0,
(false, true) => true,
(true, false) => (self.0 | other.0) & 0x7FFFu16 == 0,
(true, true) => self.0 <= other.0,
}
}
}
}
#[cfg(not(target_arch = "spirv"))]
impl FromStr for bf16 {
type Err = ParseFloatError;
fn from_str(src: &str) -> Result<bf16, ParseFloatError> {
f32::from_str(src).map(bf16::from_f32)
}
}
#[cfg(not(target_arch = "spirv"))]
impl Debug for bf16 {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
write!(f, "{:?}", self.to_f32())
}
}
#[cfg(not(target_arch = "spirv"))]
impl Display for bf16 {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
write!(f, "{}", self.to_f32())
}
}
#[cfg(not(target_arch = "spirv"))]
impl LowerExp for bf16 {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
write!(f, "{:e}", self.to_f32())
}
}
#[cfg(not(target_arch = "spirv"))]
impl UpperExp for bf16 {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
write!(f, "{:E}", self.to_f32())
}
}
#[cfg(not(target_arch = "spirv"))]
impl Binary for bf16 {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
write!(f, "{:b}", self.0)
}
}
#[cfg(not(target_arch = "spirv"))]
impl Octal for bf16 {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
write!(f, "{:o}", self.0)
}
}
#[cfg(not(target_arch = "spirv"))]
impl LowerHex for bf16 {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
write!(f, "{:x}", self.0)
}
}
#[cfg(not(target_arch = "spirv"))]
impl UpperHex for bf16 {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
write!(f, "{:X}", self.0)
}
}
impl Neg for bf16 {
type Output = Self;
fn neg(self) -> Self::Output {
Self(self.0 ^ 0x8000)
}
}
impl Neg for &bf16 {
type Output = <bf16 as Neg>::Output;
#[inline]
fn neg(self) -> Self::Output {
Neg::neg(*self)
}
}
impl Add for bf16 {
type Output = Self;
fn add(self, rhs: Self) -> Self::Output {
Self::from_f32(Self::to_f32(self) + Self::to_f32(rhs))
}
}
impl Add<&bf16> for bf16 {
type Output = <bf16 as Add<bf16>>::Output;
#[inline]
fn add(self, rhs: &bf16) -> Self::Output {
self.add(*rhs)
}
}
impl Add<&bf16> for &bf16 {
type Output = <bf16 as Add<bf16>>::Output;
#[inline]
fn add(self, rhs: &bf16) -> Self::Output {
(*self).add(*rhs)
}
}
impl Add<bf16> for &bf16 {
type Output = <bf16 as Add<bf16>>::Output;
#[inline]
fn add(self, rhs: bf16) -> Self::Output {
(*self).add(rhs)
}
}
impl AddAssign for bf16 {
#[inline]
fn add_assign(&mut self, rhs: Self) {
*self = (*self).add(rhs);
}
}
impl AddAssign<&bf16> for bf16 {
#[inline]
fn add_assign(&mut self, rhs: &bf16) {
*self = (*self).add(rhs);
}
}
impl Sub for bf16 {
type Output = Self;
fn sub(self, rhs: Self) -> Self::Output {
Self::from_f32(Self::to_f32(self) - Self::to_f32(rhs))
}
}
impl Sub<&bf16> for bf16 {
type Output = <bf16 as Sub<bf16>>::Output;
#[inline]
fn sub(self, rhs: &bf16) -> Self::Output {
self.sub(*rhs)
}
}
impl Sub<&bf16> for &bf16 {
type Output = <bf16 as Sub<bf16>>::Output;
#[inline]
fn sub(self, rhs: &bf16) -> Self::Output {
(*self).sub(*rhs)
}
}
impl Sub<bf16> for &bf16 {
type Output = <bf16 as Sub<bf16>>::Output;
#[inline]
fn sub(self, rhs: bf16) -> Self::Output {
(*self).sub(rhs)
}
}
impl SubAssign for bf16 {
#[inline]
fn sub_assign(&mut self, rhs: Self) {
*self = (*self).sub(rhs);
}
}
impl SubAssign<&bf16> for bf16 {
#[inline]
fn sub_assign(&mut self, rhs: &bf16) {
*self = (*self).sub(rhs);
}
}
impl Mul for bf16 {
type Output = Self;
fn mul(self, rhs: Self) -> Self::Output {
Self::from_f32(Self::to_f32(self) * Self::to_f32(rhs))
}
}
impl Mul<&bf16> for bf16 {
type Output = <bf16 as Mul<bf16>>::Output;
#[inline]
fn mul(self, rhs: &bf16) -> Self::Output {
self.mul(*rhs)
}
}
impl Mul<&bf16> for &bf16 {
type Output = <bf16 as Mul<bf16>>::Output;
#[inline]
fn mul(self, rhs: &bf16) -> Self::Output {
(*self).mul(*rhs)
}
}
impl Mul<bf16> for &bf16 {
type Output = <bf16 as Mul<bf16>>::Output;
#[inline]
fn mul(self, rhs: bf16) -> Self::Output {
(*self).mul(rhs)
}
}
impl MulAssign for bf16 {
#[inline]
fn mul_assign(&mut self, rhs: Self) {
*self = (*self).mul(rhs);
}
}
impl MulAssign<&bf16> for bf16 {
#[inline]
fn mul_assign(&mut self, rhs: &bf16) {
*self = (*self).mul(rhs);
}
}
impl Div for bf16 {
type Output = Self;
fn div(self, rhs: Self) -> Self::Output {
Self::from_f32(Self::to_f32(self) / Self::to_f32(rhs))
}
}
impl Div<&bf16> for bf16 {
type Output = <bf16 as Div<bf16>>::Output;
#[inline]
fn div(self, rhs: &bf16) -> Self::Output {
self.div(*rhs)
}
}
impl Div<&bf16> for &bf16 {
type Output = <bf16 as Div<bf16>>::Output;
#[inline]
fn div(self, rhs: &bf16) -> Self::Output {
(*self).div(*rhs)
}
}
impl Div<bf16> for &bf16 {
type Output = <bf16 as Div<bf16>>::Output;
#[inline]
fn div(self, rhs: bf16) -> Self::Output {
(*self).div(rhs)
}
}
impl DivAssign for bf16 {
#[inline]
fn div_assign(&mut self, rhs: Self) {
*self = (*self).div(rhs);
}
}
impl DivAssign<&bf16> for bf16 {
#[inline]
fn div_assign(&mut self, rhs: &bf16) {
*self = (*self).div(rhs);
}
}
impl Rem for bf16 {
type Output = Self;
fn rem(self, rhs: Self) -> Self::Output {
Self::from_f32(Self::to_f32(self) % Self::to_f32(rhs))
}
}
impl Rem<&bf16> for bf16 {
type Output = <bf16 as Rem<bf16>>::Output;
#[inline]
fn rem(self, rhs: &bf16) -> Self::Output {
self.rem(*rhs)
}
}
impl Rem<&bf16> for &bf16 {
type Output = <bf16 as Rem<bf16>>::Output;
#[inline]
fn rem(self, rhs: &bf16) -> Self::Output {
(*self).rem(*rhs)
}
}
impl Rem<bf16> for &bf16 {
type Output = <bf16 as Rem<bf16>>::Output;
#[inline]
fn rem(self, rhs: bf16) -> Self::Output {
(*self).rem(rhs)
}
}
impl RemAssign for bf16 {
#[inline]
fn rem_assign(&mut self, rhs: Self) {
*self = (*self).rem(rhs);
}
}
impl RemAssign<&bf16> for bf16 {
#[inline]
fn rem_assign(&mut self, rhs: &bf16) {
*self = (*self).rem(rhs);
}
}
impl Product for bf16 {
#[inline]
fn product<I: Iterator<Item = Self>>(iter: I) -> Self {
bf16::from_f32(iter.map(|f| f.to_f32()).product())
}
}
impl<'a> Product<&'a bf16> for bf16 {
#[inline]
fn product<I: Iterator<Item = &'a bf16>>(iter: I) -> Self {
bf16::from_f32(iter.map(|f| f.to_f32()).product())
}
}
impl Sum for bf16 {
#[inline]
fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {
bf16::from_f32(iter.map(|f| f.to_f32()).sum())
}
}
impl<'a> Sum<&'a bf16> for bf16 {
#[inline]
fn sum<I: Iterator<Item = &'a bf16>>(iter: I) -> Self {
bf16::from_f32(iter.map(|f| f.to_f32()).product())
}
}
#[cfg(feature = "serde")]
struct Visitor;
#[cfg(feature = "serde")]
impl<'de> Deserialize<'de> for bf16 {
fn deserialize<D>(deserializer: D) -> Result<bf16, D::Error>
where
D: serde::de::Deserializer<'de>,
{
deserializer.deserialize_newtype_struct("bf16", Visitor)
}
}
#[cfg(feature = "serde")]
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = bf16;
fn expecting(&self, formatter: &mut alloc::fmt::Formatter) -> alloc::fmt::Result {
write!(formatter, "tuple struct bf16")
}
fn visit_newtype_struct<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: serde::Deserializer<'de>,
{
Ok(bf16(<u16 as Deserialize>::deserialize(deserializer)?))
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
v.parse().map_err(|_| {
serde::de::Error::invalid_value(serde::de::Unexpected::Str(v), &"a float string")
})
}
fn visit_f32<E>(self, v: f32) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(bf16::from_f32(v))
}
fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(bf16::from_f64(v))
}
}
#[allow(
clippy::cognitive_complexity,
clippy::float_cmp,
clippy::neg_cmp_op_on_partial_ord
)]
#[cfg(test)]
mod test {
use super::*;
use core::cmp::Ordering;
#[cfg(feature = "num-traits")]
use num_traits::{AsPrimitive, FromPrimitive, ToPrimitive};
use quickcheck_macros::quickcheck;
#[cfg(feature = "num-traits")]
#[test]
fn as_primitive() {
let two = bf16::from_f32(2.0);
assert_eq!(<i32 as AsPrimitive<bf16>>::as_(2), two);
assert_eq!(<bf16 as AsPrimitive<i32>>::as_(two), 2);
assert_eq!(<f32 as AsPrimitive<bf16>>::as_(2.0), two);
assert_eq!(<bf16 as AsPrimitive<f32>>::as_(two), 2.0);
assert_eq!(<f64 as AsPrimitive<bf16>>::as_(2.0), two);
assert_eq!(<bf16 as AsPrimitive<f64>>::as_(two), 2.0);
}
#[cfg(feature = "num-traits")]
#[test]
fn to_primitive() {
let two = bf16::from_f32(2.0);
assert_eq!(ToPrimitive::to_i32(&two).unwrap(), 2i32);
assert_eq!(ToPrimitive::to_f32(&two).unwrap(), 2.0f32);
assert_eq!(ToPrimitive::to_f64(&two).unwrap(), 2.0f64);
}
#[cfg(feature = "num-traits")]
#[test]
fn from_primitive() {
let two = bf16::from_f32(2.0);
assert_eq!(<bf16 as FromPrimitive>::from_i32(2).unwrap(), two);
assert_eq!(<bf16 as FromPrimitive>::from_f32(2.0).unwrap(), two);
assert_eq!(<bf16 as FromPrimitive>::from_f64(2.0).unwrap(), two);
}
#[test]
fn test_bf16_consts_from_f32() {
let one = bf16::from_f32(1.0);
let zero = bf16::from_f32(0.0);
let neg_zero = bf16::from_f32(-0.0);
let neg_one = bf16::from_f32(-1.0);
let inf = bf16::from_f32(core::f32::INFINITY);
let neg_inf = bf16::from_f32(core::f32::NEG_INFINITY);
let nan = bf16::from_f32(core::f32::NAN);
assert_eq!(bf16::ONE, one);
assert_eq!(bf16::ZERO, zero);
assert!(zero.is_sign_positive());
assert_eq!(bf16::NEG_ZERO, neg_zero);
assert!(neg_zero.is_sign_negative());
assert_eq!(bf16::NEG_ONE, neg_one);
assert!(neg_one.is_sign_negative());
assert_eq!(bf16::INFINITY, inf);
assert_eq!(bf16::NEG_INFINITY, neg_inf);
assert!(nan.is_nan());
assert!(bf16::NAN.is_nan());
let e = bf16::from_f32(core::f32::consts::E);
let pi = bf16::from_f32(core::f32::consts::PI);
let frac_1_pi = bf16::from_f32(core::f32::consts::FRAC_1_PI);
let frac_1_sqrt_2 = bf16::from_f32(core::f32::consts::FRAC_1_SQRT_2);
let frac_2_pi = bf16::from_f32(core::f32::consts::FRAC_2_PI);
let frac_2_sqrt_pi = bf16::from_f32(core::f32::consts::FRAC_2_SQRT_PI);
let frac_pi_2 = bf16::from_f32(core::f32::consts::FRAC_PI_2);
let frac_pi_3 = bf16::from_f32(core::f32::consts::FRAC_PI_3);
let frac_pi_4 = bf16::from_f32(core::f32::consts::FRAC_PI_4);
let frac_pi_6 = bf16::from_f32(core::f32::consts::FRAC_PI_6);
let frac_pi_8 = bf16::from_f32(core::f32::consts::FRAC_PI_8);
let ln_10 = bf16::from_f32(core::f32::consts::LN_10);
let ln_2 = bf16::from_f32(core::f32::consts::LN_2);
let log10_e = bf16::from_f32(core::f32::consts::LOG10_E);
let log10_2 = bf16::from_f32(2f32.log10());
let log2_e = bf16::from_f32(core::f32::consts::LOG2_E);
let log2_10 = bf16::from_f32(10f32.log2());
let sqrt_2 = bf16::from_f32(core::f32::consts::SQRT_2);
assert_eq!(bf16::E, e);
assert_eq!(bf16::PI, pi);
assert_eq!(bf16::FRAC_1_PI, frac_1_pi);
assert_eq!(bf16::FRAC_1_SQRT_2, frac_1_sqrt_2);
assert_eq!(bf16::FRAC_2_PI, frac_2_pi);
assert_eq!(bf16::FRAC_2_SQRT_PI, frac_2_sqrt_pi);
assert_eq!(bf16::FRAC_PI_2, frac_pi_2);
assert_eq!(bf16::FRAC_PI_3, frac_pi_3);
assert_eq!(bf16::FRAC_PI_4, frac_pi_4);
assert_eq!(bf16::FRAC_PI_6, frac_pi_6);
assert_eq!(bf16::FRAC_PI_8, frac_pi_8);
assert_eq!(bf16::LN_10, ln_10);
assert_eq!(bf16::LN_2, ln_2);
assert_eq!(bf16::LOG10_E, log10_e);
assert_eq!(bf16::LOG10_2, log10_2);
assert_eq!(bf16::LOG2_E, log2_e);
assert_eq!(bf16::LOG2_10, log2_10);
assert_eq!(bf16::SQRT_2, sqrt_2);
}
#[test]
fn test_bf16_consts_from_f64() {
let one = bf16::from_f64(1.0);
let zero = bf16::from_f64(0.0);
let neg_zero = bf16::from_f64(-0.0);
let inf = bf16::from_f64(core::f64::INFINITY);
let neg_inf = bf16::from_f64(core::f64::NEG_INFINITY);
let nan = bf16::from_f64(core::f64::NAN);
assert_eq!(bf16::ONE, one);
assert_eq!(bf16::ZERO, zero);
assert_eq!(bf16::NEG_ZERO, neg_zero);
assert_eq!(bf16::INFINITY, inf);
assert_eq!(bf16::NEG_INFINITY, neg_inf);
assert!(nan.is_nan());
assert!(bf16::NAN.is_nan());
let e = bf16::from_f64(core::f64::consts::E);
let pi = bf16::from_f64(core::f64::consts::PI);
let frac_1_pi = bf16::from_f64(core::f64::consts::FRAC_1_PI);
let frac_1_sqrt_2 = bf16::from_f64(core::f64::consts::FRAC_1_SQRT_2);
let frac_2_pi = bf16::from_f64(core::f64::consts::FRAC_2_PI);
let frac_2_sqrt_pi = bf16::from_f64(core::f64::consts::FRAC_2_SQRT_PI);
let frac_pi_2 = bf16::from_f64(core::f64::consts::FRAC_PI_2);
let frac_pi_3 = bf16::from_f64(core::f64::consts::FRAC_PI_3);
let frac_pi_4 = bf16::from_f64(core::f64::consts::FRAC_PI_4);
let frac_pi_6 = bf16::from_f64(core::f64::consts::FRAC_PI_6);
let frac_pi_8 = bf16::from_f64(core::f64::consts::FRAC_PI_8);
let ln_10 = bf16::from_f64(core::f64::consts::LN_10);
let ln_2 = bf16::from_f64(core::f64::consts::LN_2);
let log10_e = bf16::from_f64(core::f64::consts::LOG10_E);
let log10_2 = bf16::from_f64(2f64.log10());
let log2_e = bf16::from_f64(core::f64::consts::LOG2_E);
let log2_10 = bf16::from_f64(10f64.log2());
let sqrt_2 = bf16::from_f64(core::f64::consts::SQRT_2);
assert_eq!(bf16::E, e);
assert_eq!(bf16::PI, pi);
assert_eq!(bf16::FRAC_1_PI, frac_1_pi);
assert_eq!(bf16::FRAC_1_SQRT_2, frac_1_sqrt_2);
assert_eq!(bf16::FRAC_2_PI, frac_2_pi);
assert_eq!(bf16::FRAC_2_SQRT_PI, frac_2_sqrt_pi);
assert_eq!(bf16::FRAC_PI_2, frac_pi_2);
assert_eq!(bf16::FRAC_PI_3, frac_pi_3);
assert_eq!(bf16::FRAC_PI_4, frac_pi_4);
assert_eq!(bf16::FRAC_PI_6, frac_pi_6);
assert_eq!(bf16::FRAC_PI_8, frac_pi_8);
assert_eq!(bf16::LN_10, ln_10);
assert_eq!(bf16::LN_2, ln_2);
assert_eq!(bf16::LOG10_E, log10_e);
assert_eq!(bf16::LOG10_2, log10_2);
assert_eq!(bf16::LOG2_E, log2_e);
assert_eq!(bf16::LOG2_10, log2_10);
assert_eq!(bf16::SQRT_2, sqrt_2);
}
#[test]
fn test_nan_conversion_to_smaller() {
let nan64 = f64::from_bits(0x7FF0_0000_0000_0001u64);
let neg_nan64 = f64::from_bits(0xFFF0_0000_0000_0001u64);
let nan32 = f32::from_bits(0x7F80_0001u32);
let neg_nan32 = f32::from_bits(0xFF80_0001u32);
let nan32_from_64 = nan64 as f32;
let neg_nan32_from_64 = neg_nan64 as f32;
let nan16_from_64 = bf16::from_f64(nan64);
let neg_nan16_from_64 = bf16::from_f64(neg_nan64);
let nan16_from_32 = bf16::from_f32(nan32);
let neg_nan16_from_32 = bf16::from_f32(neg_nan32);
assert!(nan64.is_nan() && nan64.is_sign_positive());
assert!(neg_nan64.is_nan() && neg_nan64.is_sign_negative());
assert!(nan32.is_nan() && nan32.is_sign_positive());
assert!(neg_nan32.is_nan() && neg_nan32.is_sign_negative());
assert!(nan32_from_64.is_nan() && nan32_from_64.is_sign_positive());
assert!(neg_nan32_from_64.is_nan() && neg_nan32_from_64.is_sign_negative());
assert!(nan16_from_64.is_nan() && nan16_from_64.is_sign_positive());
assert!(neg_nan16_from_64.is_nan() && neg_nan16_from_64.is_sign_negative());
assert!(nan16_from_32.is_nan() && nan16_from_32.is_sign_positive());
assert!(neg_nan16_from_32.is_nan() && neg_nan16_from_32.is_sign_negative());
}
#[test]
fn test_nan_conversion_to_larger() {
let nan16 = bf16::from_bits(0x7F81u16);
let neg_nan16 = bf16::from_bits(0xFF81u16);
let nan32 = f32::from_bits(0x7F80_0001u32);
let neg_nan32 = f32::from_bits(0xFF80_0001u32);
let nan32_from_16 = f32::from(nan16);
let neg_nan32_from_16 = f32::from(neg_nan16);
let nan64_from_16 = f64::from(nan16);
let neg_nan64_from_16 = f64::from(neg_nan16);
let nan64_from_32 = f64::from(nan32);
let neg_nan64_from_32 = f64::from(neg_nan32);
assert!(nan16.is_nan() && nan16.is_sign_positive());
assert!(neg_nan16.is_nan() && neg_nan16.is_sign_negative());
assert!(nan32.is_nan() && nan32.is_sign_positive());
assert!(neg_nan32.is_nan() && neg_nan32.is_sign_negative());
assert!(nan32_from_16.is_nan() && nan32_from_16.is_sign_positive());
assert!(neg_nan32_from_16.is_nan() && neg_nan32_from_16.is_sign_negative());
assert!(nan64_from_16.is_nan() && nan64_from_16.is_sign_positive());
assert!(neg_nan64_from_16.is_nan() && neg_nan64_from_16.is_sign_negative());
assert!(nan64_from_32.is_nan() && nan64_from_32.is_sign_positive());
assert!(neg_nan64_from_32.is_nan() && neg_nan64_from_32.is_sign_negative());
}
#[test]
fn test_bf16_to_f32() {
let f = bf16::from_f32(7.0);
assert_eq!(f.to_f32(), 7.0f32);
let f = bf16::from_f32(7.1);
let diff = (f.to_f32() - 7.1f32).abs();
assert!(diff <= 4.0 * bf16::EPSILON.to_f32());
let tiny32 = f32::from_bits(0x0001_0000u32);
assert_eq!(bf16::from_bits(0x0001).to_f32(), tiny32);
assert_eq!(bf16::from_bits(0x0005).to_f32(), 5.0 * tiny32);
assert_eq!(bf16::from_bits(0x0001), bf16::from_f32(tiny32));
assert_eq!(bf16::from_bits(0x0005), bf16::from_f32(5.0 * tiny32));
}
#[test]
fn test_bf16_to_f64() {
let f = bf16::from_f64(7.0);
assert_eq!(f.to_f64(), 7.0f64);
let f = bf16::from_f64(7.1);
let diff = (f.to_f64() - 7.1f64).abs();
assert!(diff <= 4.0 * bf16::EPSILON.to_f64());
let tiny64 = 2.0f64.powi(-133);
assert_eq!(bf16::from_bits(0x0001).to_f64(), tiny64);
assert_eq!(bf16::from_bits(0x0005).to_f64(), 5.0 * tiny64);
assert_eq!(bf16::from_bits(0x0001), bf16::from_f64(tiny64));
assert_eq!(bf16::from_bits(0x0005), bf16::from_f64(5.0 * tiny64));
}
#[test]
fn test_comparisons() {
let zero = bf16::from_f64(0.0);
let one = bf16::from_f64(1.0);
let neg_zero = bf16::from_f64(-0.0);
let neg_one = bf16::from_f64(-1.0);
assert_eq!(zero.partial_cmp(&neg_zero), Some(Ordering::Equal));
assert_eq!(neg_zero.partial_cmp(&zero), Some(Ordering::Equal));
assert!(zero == neg_zero);
assert!(neg_zero == zero);
assert!(!(zero != neg_zero));
assert!(!(neg_zero != zero));
assert!(!(zero < neg_zero));
assert!(!(neg_zero < zero));
assert!(zero <= neg_zero);
assert!(neg_zero <= zero);
assert!(!(zero > neg_zero));
assert!(!(neg_zero > zero));
assert!(zero >= neg_zero);
assert!(neg_zero >= zero);
assert_eq!(one.partial_cmp(&neg_zero), Some(Ordering::Greater));
assert_eq!(neg_zero.partial_cmp(&one), Some(Ordering::Less));
assert!(!(one == neg_zero));
assert!(!(neg_zero == one));
assert!(one != neg_zero);
assert!(neg_zero != one);
assert!(!(one < neg_zero));
assert!(neg_zero < one);
assert!(!(one <= neg_zero));
assert!(neg_zero <= one);
assert!(one > neg_zero);
assert!(!(neg_zero > one));
assert!(one >= neg_zero);
assert!(!(neg_zero >= one));
assert_eq!(one.partial_cmp(&neg_one), Some(Ordering::Greater));
assert_eq!(neg_one.partial_cmp(&one), Some(Ordering::Less));
assert!(!(one == neg_one));
assert!(!(neg_one == one));
assert!(one != neg_one);
assert!(neg_one != one);
assert!(!(one < neg_one));
assert!(neg_one < one);
assert!(!(one <= neg_one));
assert!(neg_one <= one);
assert!(one > neg_one);
assert!(!(neg_one > one));
assert!(one >= neg_one);
assert!(!(neg_one >= one));
}
#[test]
#[allow(clippy::erasing_op, clippy::identity_op)]
fn round_to_even_f32() {
let min_sub = bf16::from_bits(1);
let min_sub_f = (-133f32).exp2();
assert_eq!(bf16::from_f32(min_sub_f).to_bits(), min_sub.to_bits());
assert_eq!(f32::from(min_sub).to_bits(), min_sub_f.to_bits());
assert_eq!(
bf16::from_f32(min_sub_f * 0.49).to_bits(),
min_sub.to_bits() * 0
);
assert_eq!(
bf16::from_f32(min_sub_f * 0.50).to_bits(),
min_sub.to_bits() * 0
);
assert_eq!(
bf16::from_f32(min_sub_f * 0.51).to_bits(),
min_sub.to_bits() * 1
);
assert_eq!(
bf16::from_f32(min_sub_f * 1.49).to_bits(),
min_sub.to_bits() * 1
);
assert_eq!(
bf16::from_f32(min_sub_f * 1.50).to_bits(),
min_sub.to_bits() * 2
);
assert_eq!(
bf16::from_f32(min_sub_f * 1.51).to_bits(),
min_sub.to_bits() * 2
);
assert_eq!(
bf16::from_f32(min_sub_f * 2.49).to_bits(),
min_sub.to_bits() * 2
);
assert_eq!(
bf16::from_f32(min_sub_f * 2.50).to_bits(),
min_sub.to_bits() * 2
);
assert_eq!(
bf16::from_f32(min_sub_f * 2.51).to_bits(),
min_sub.to_bits() * 3
);
assert_eq!(
bf16::from_f32(250.49f32).to_bits(),
bf16::from_f32(250.0).to_bits()
);
assert_eq!(
bf16::from_f32(250.50f32).to_bits(),
bf16::from_f32(250.0).to_bits()
);
assert_eq!(
bf16::from_f32(250.51f32).to_bits(),
bf16::from_f32(251.0).to_bits()
);
assert_eq!(
bf16::from_f32(251.49f32).to_bits(),
bf16::from_f32(251.0).to_bits()
);
assert_eq!(
bf16::from_f32(251.50f32).to_bits(),
bf16::from_f32(252.0).to_bits()
);
assert_eq!(
bf16::from_f32(251.51f32).to_bits(),
bf16::from_f32(252.0).to_bits()
);
assert_eq!(
bf16::from_f32(252.49f32).to_bits(),
bf16::from_f32(252.0).to_bits()
);
assert_eq!(
bf16::from_f32(252.50f32).to_bits(),
bf16::from_f32(252.0).to_bits()
);
assert_eq!(
bf16::from_f32(252.51f32).to_bits(),
bf16::from_f32(253.0).to_bits()
);
}
#[test]
#[allow(clippy::erasing_op, clippy::identity_op)]
fn round_to_even_f64() {
let min_sub = bf16::from_bits(1);
let min_sub_f = (-133f64).exp2();
assert_eq!(bf16::from_f64(min_sub_f).to_bits(), min_sub.to_bits());
assert_eq!(f64::from(min_sub).to_bits(), min_sub_f.to_bits());
assert_eq!(
bf16::from_f64(min_sub_f * 0.49).to_bits(),
min_sub.to_bits() * 0
);
assert_eq!(
bf16::from_f64(min_sub_f * 0.50).to_bits(),
min_sub.to_bits() * 0
);
assert_eq!(
bf16::from_f64(min_sub_f * 0.51).to_bits(),
min_sub.to_bits() * 1
);
assert_eq!(
bf16::from_f64(min_sub_f * 1.49).to_bits(),
min_sub.to_bits() * 1
);
assert_eq!(
bf16::from_f64(min_sub_f * 1.50).to_bits(),
min_sub.to_bits() * 2
);
assert_eq!(
bf16::from_f64(min_sub_f * 1.51).to_bits(),
min_sub.to_bits() * 2
);
assert_eq!(
bf16::from_f64(min_sub_f * 2.49).to_bits(),
min_sub.to_bits() * 2
);
assert_eq!(
bf16::from_f64(min_sub_f * 2.50).to_bits(),
min_sub.to_bits() * 2
);
assert_eq!(
bf16::from_f64(min_sub_f * 2.51).to_bits(),
min_sub.to_bits() * 3
);
assert_eq!(
bf16::from_f64(250.49f64).to_bits(),
bf16::from_f64(250.0).to_bits()
);
assert_eq!(
bf16::from_f64(250.50f64).to_bits(),
bf16::from_f64(250.0).to_bits()
);
assert_eq!(
bf16::from_f64(250.51f64).to_bits(),
bf16::from_f64(251.0).to_bits()
);
assert_eq!(
bf16::from_f64(251.49f64).to_bits(),
bf16::from_f64(251.0).to_bits()
);
assert_eq!(
bf16::from_f64(251.50f64).to_bits(),
bf16::from_f64(252.0).to_bits()
);
assert_eq!(
bf16::from_f64(251.51f64).to_bits(),
bf16::from_f64(252.0).to_bits()
);
assert_eq!(
bf16::from_f64(252.49f64).to_bits(),
bf16::from_f64(252.0).to_bits()
);
assert_eq!(
bf16::from_f64(252.50f64).to_bits(),
bf16::from_f64(252.0).to_bits()
);
assert_eq!(
bf16::from_f64(252.51f64).to_bits(),
bf16::from_f64(253.0).to_bits()
);
}
impl quickcheck::Arbitrary for bf16 {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
bf16(u16::arbitrary(g))
}
}
#[quickcheck]
fn qc_roundtrip_bf16_f32_is_identity(f: bf16) -> bool {
let roundtrip = bf16::from_f32(f.to_f32());
if f.is_nan() {
roundtrip.is_nan() && f.is_sign_negative() == roundtrip.is_sign_negative()
} else {
f.0 == roundtrip.0
}
}
#[quickcheck]
fn qc_roundtrip_bf16_f64_is_identity(f: bf16) -> bool {
let roundtrip = bf16::from_f64(f.to_f64());
if f.is_nan() {
roundtrip.is_nan() && f.is_sign_negative() == roundtrip.is_sign_negative()
} else {
f.0 == roundtrip.0
}
}
}