use crate::common::{dd_fmla, dyad_fmla, f_fmla};
use crate::double_double::DoubleDouble;
use crate::shared_eval::poly_dd_3;
use crate::tangent::atan::{ATAN_CIRCLE, ATAN_REDUCE};
const ONE_OVER_PI_DD: DoubleDouble = DoubleDouble::new(
f64::from_bits(0xbc76b01ec5417056),
f64::from_bits(0x3fd45f306dc9c883),
);
const ONE_OVER_3PI: f64 = f64::from_bits(0x3fbb2995e7b7b604);
fn atanpi_small(x: f64) -> f64 {
if x == 0. {
return x;
}
if x.abs() == f64::from_bits(0x0015cba89af1f855) {
return if x > 0. {
f_fmla(
f64::from_bits(0x9a70000000000000),
f64::from_bits(0x1a70000000000000),
f64::from_bits(0x0006f00f7cd3a40b),
)
} else {
f_fmla(
f64::from_bits(0x1a70000000000000),
f64::from_bits(0x1a70000000000000),
f64::from_bits(0x8006f00f7cd3a40b),
)
};
}
let mut v = x.to_bits();
if (v & 0xfffffffffffff) == 0x59af9a1194efe
{
let e = v >> 52;
if (e & 0x7ff) > 2 {
v = ((e - 2) << 52) | 0xb824198b94a89;
return if x > 0. {
dd_fmla(
f64::from_bits(0x9a70000000000000),
f64::from_bits(0x1a70000000000000),
f64::from_bits(v),
)
} else {
dd_fmla(
f64::from_bits(0x1a70000000000000),
f64::from_bits(0x1a70000000000000),
f64::from_bits(v),
)
};
}
}
let h = x * ONE_OVER_PI_DD.hi;
let mut corr = dd_fmla(
x * f64::from_bits(0x4690000000000000),
ONE_OVER_PI_DD.hi,
-h * f64::from_bits(0x4690000000000000),
);
corr = dd_fmla(
x * f64::from_bits(0x4690000000000000),
ONE_OVER_PI_DD.lo,
corr,
);
dyad_fmla(corr, f64::from_bits(0x3950000000000000), h)
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[target_feature(enable = "avx", enable = "fma")]
unsafe fn atanpi_small_fma(x: f64) -> f64 {
if x == 0. {
return x;
}
if x.abs() == f64::from_bits(0x0015cba89af1f855) {
return if x > 0. {
f64::mul_add(
f64::from_bits(0x9a70000000000000),
f64::from_bits(0x1a70000000000000),
f64::from_bits(0x0006f00f7cd3a40b),
)
} else {
f64::mul_add(
f64::from_bits(0x1a70000000000000),
f64::from_bits(0x1a70000000000000),
f64::from_bits(0x8006f00f7cd3a40b),
)
};
}
let mut v = x.to_bits();
if (v & 0xfffffffffffff) == 0x59af9a1194efe
{
let e = v >> 52;
if (e & 0x7ff) > 2 {
v = ((e - 2) << 52) | 0xb824198b94a89;
return if x > 0. {
f64::mul_add(
f64::from_bits(0x9a70000000000000),
f64::from_bits(0x1a70000000000000),
f64::from_bits(v),
)
} else {
f64::mul_add(
f64::from_bits(0x1a70000000000000),
f64::from_bits(0x1a70000000000000),
f64::from_bits(v),
)
};
}
}
let h = x * ONE_OVER_PI_DD.hi;
let mut corr = f64::mul_add(
x * f64::from_bits(0x4690000000000000),
ONE_OVER_PI_DD.hi,
-h * f64::from_bits(0x4690000000000000),
);
corr = f64::mul_add(
x * f64::from_bits(0x4690000000000000),
ONE_OVER_PI_DD.lo,
corr,
);
f64::mul_add(corr, f64::from_bits(0x3950000000000000), h)
}
fn atanpi_asympt(x: f64) -> f64 {
let h = f64::copysign(0.5, x);
let rcy = DoubleDouble::from_quick_recip(x);
let mut m = DoubleDouble::quick_mult(rcy, ONE_OVER_PI_DD);
m.hi = -m.hi;
m.lo = dd_fmla(ONE_OVER_3PI * rcy.hi, rcy.hi * rcy.hi, -m.lo);
let vh = DoubleDouble::from_exact_add(h, m.hi);
m.hi = vh.hi;
m = DoubleDouble::from_exact_add(vh.lo, m.lo);
if m.hi.abs() == f64::from_bits(0x3c80000000000000) {
m.hi = if m.hi * m.lo > 0. {
f64::copysign(f64::from_bits(0x3c80000000000001), m.hi)
} else {
f64::copysign(f64::from_bits(0x3c7fffffffffffff), m.hi)
};
}
vh.hi + m.hi
}
#[inline]
fn atanpi_tiny(x: f64) -> f64 {
let mut dx = DoubleDouble::quick_mult_f64(ONE_OVER_PI_DD, x);
dx.lo = dd_fmla(-ONE_OVER_3PI * x, x * x, dx.lo);
dx.to_f64()
}
#[cold]
fn as_atanpi_refine2(x: f64, a: f64) -> f64 {
if x.abs() > f64::from_bits(0x413be00000000000) {
return atanpi_asympt(x);
}
if x.abs() < f64::from_bits(0x3e4c700000000000) {
return atanpi_tiny(x);
}
const CH: [(u64, u64); 3] = [
(0xbfd5555555555555, 0xbc75555555555555),
(0x3fc999999999999a, 0xbc6999999999bcb8),
(0xbfc2492492492492, 0xbc6249242093c016),
];
const CL: [u64; 4] = [
0x3fbc71c71c71c71c,
0xbfb745d1745d1265,
0x3fb3b13b115bcbc4,
0xbfb1107c41ad3253,
];
let phi = ((a.abs()) * f64::from_bits(0x40545f306dc9c883) + 256.5).to_bits();
let i: i64 = ((phi >> (52 - 8)) & 0xff) as i64;
let dh: DoubleDouble = if i == 128 {
DoubleDouble::from_quick_recip(-x)
} else {
let ta = f64::copysign(f64::from_bits(ATAN_REDUCE[i as usize].0), x);
let dzta = DoubleDouble::from_exact_mult(x, ta);
let zmta = x - ta;
let v = 1. + dzta.hi;
let d = 1. - v;
let ev = (d + dzta.hi) - ((d + v) - 1.) + dzta.lo;
let r = 1.0 / v;
let rl = f_fmla(-ev, r, dd_fmla(r, -v, 1.0)) * r;
DoubleDouble::quick_mult_f64(DoubleDouble::new(rl, r), zmta)
};
let d2 = DoubleDouble::quick_mult(dh, dh);
let h4 = d2.hi * d2.hi;
let h3 = DoubleDouble::quick_mult(dh, d2);
let fl0 = f_fmla(d2.hi, f64::from_bits(CL[1]), f64::from_bits(CL[0]));
let fl1 = f_fmla(d2.hi, f64::from_bits(CL[3]), f64::from_bits(CL[2]));
let fl = d2.hi * f_fmla(h4, fl1, fl0);
let mut f = poly_dd_3(d2, CH, fl);
f = DoubleDouble::quick_mult(h3, f);
let (ah, mut al, mut at);
if i == 0 {
ah = dh.hi;
al = f.hi;
at = f.lo;
} else {
let mut df = 0.;
if i < 128 {
df = f64::copysign(1.0, x) * f64::from_bits(ATAN_REDUCE[i as usize].1);
}
let id = f64::copysign(i as f64, x);
ah = f64::from_bits(0x3f8921fb54442d00) * id;
al = f64::from_bits(0x3c88469898cc5180) * id;
at = f64::from_bits(0xb97fc8f8cbb5bf80) * id;
let v0 = DoubleDouble::add(DoubleDouble::new(at, al), DoubleDouble::new(0., df));
let v1 = DoubleDouble::add(v0, dh);
let v2 = DoubleDouble::add(v1, f);
al = v2.hi;
at = v2.lo;
}
let v2 = DoubleDouble::from_exact_add(ah, al);
let v1 = DoubleDouble::from_exact_add(v2.lo, at);
let z0 = DoubleDouble::quick_mult(DoubleDouble::new(v1.hi, v2.hi), ONE_OVER_PI_DD);
z0.to_f64()
}
#[inline(always)]
fn atanpi_gen_impl<
Q: Fn(f64, f64, f64) -> f64,
F: Fn(f64, f64, f64) -> f64,
DdMul: Fn(DoubleDouble, DoubleDouble) -> DoubleDouble,
AtanpiSmall: Fn(f64) -> f64,
>(
x: f64,
fma: Q,
dd_fma: F,
dd_mul: DdMul,
atanpi_small: AtanpiSmall,
) -> f64 {
const CH: [u64; 4] = [
0x3ff0000000000000,
0xbfd555555555552b,
0x3fc9999999069c20,
0xbfc248d2c8444ac6,
];
let t = x.to_bits();
let at: u64 = t & 0x7fff_ffff_ffff_ffff;
let mut i = (at >> 51).wrapping_sub(2030u64);
if at < 0x3f7b21c475e6362au64 {
if at < 0x3c90000000000000u64 {
return atanpi_small(x);
}
if x == 0. {
return x;
}
const CH2: [u64; 4] = [
0xbfd5555555555555,
0x3fc99999999998c1,
0xbfc249249176aec0,
0x3fbc711fd121ae80,
];
let x2 = x * x;
let x3 = x * x2;
let x4 = x2 * x2;
let f0 = fma(x2, f64::from_bits(CH2[3]), f64::from_bits(CH2[2]));
let f1 = fma(x2, f64::from_bits(CH2[1]), f64::from_bits(CH2[0]));
let f = x3 * dd_fma(x4, f0, f1);
let hy = dd_mul(DoubleDouble::new(f, x), ONE_OVER_PI_DD);
let mut ub = hy.hi + dd_fma(f64::from_bits(0x3bb6f00000000000), x, hy.lo);
let lb = hy.hi + dd_fma(f64::from_bits(0xbbb6f00000000000), x, hy.lo);
if ub == lb {
return ub;
}
ub = (f + f * f64::from_bits(0x3cd2000000000000)) + x; return as_atanpi_refine2(x, ub);
}
let h;
let mut a: DoubleDouble;
if at > 0x4062ded8e34a9035u64 {
if at >= 0x43445f306dc9c883u64 {
if at >= (0x7ffu64 << 52) {
if at == 0x7ffu64 << 52 {
return f64::copysign(0.5, x);
} return x + x; }
return f64::copysign(0.5, x) - f64::copysign(f64::from_bits(0x3c70000000000000), x);
}
h = -1.0 / x;
a = DoubleDouble::new(
f64::copysign(f64::from_bits(0x3c91a62633145c07), x),
f64::copysign(f64::from_bits(0x3ff921fb54442d18), x),
);
} else {
if at == 0x3ff0000000000000 {
return f64::copysign(f64::from_bits(0x3fd0000000000000), x);
}
let u: u64 = t & 0x0007ffffffffffff;
let ut = u >> (51 - 16);
let ut2 = (ut * ut) >> 16;
let vc = ATAN_CIRCLE[i as usize];
i = (((vc[0] as u64).wrapping_shl(16)) + ut * (vc[1] as u64) - ut2 * (vc[2] as u64))
>> (16 + 9);
let va = ATAN_REDUCE[i as usize];
let ta = f64::copysign(1.0, x) * f64::from_bits(va.0);
let id = f64::copysign(1.0, x) * i as f64;
h = (x - ta) / fma(x, ta, 1.);
a = DoubleDouble::new(
fma(
f64::copysign(1.0, x),
f64::from_bits(va.1),
f64::from_bits(0x3c88469898cc5170) * id,
),
f64::from_bits(0x3f8921fb54442d00) * id,
);
}
let h2 = h * h;
let h4 = h2 * h2;
let f0 = fma(h2, f64::from_bits(CH[3]), f64::from_bits(CH[2]));
let f1 = fma(h2, f64::from_bits(CH[1]), f64::from_bits(CH[0]));
let f = fma(h4, f0, f1);
a.lo = dd_fma(h, f, a.lo);
let e0 = h * f64::from_bits(0x3ccf800000000000); let ub0 = (a.lo + e0) + a.hi; a = DoubleDouble::from_exact_add(a.hi, a.lo);
a = dd_mul(a, ONE_OVER_PI_DD);
let e = h * f64::from_bits(0x3cb4100000000000); let ub = (a.lo + e) + a.hi;
let lb = (a.lo - e) + a.hi;
if ub == lb {
return ub;
}
as_atanpi_refine2(x, ub0)
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[target_feature(enable = "avx", enable = "fma")]
unsafe fn atanpi_fma_impl(x: f64) -> f64 {
atanpi_gen_impl(
x,
f64::mul_add,
f64::mul_add,
DoubleDouble::quick_mult_fma,
|x| unsafe { atanpi_small_fma(x) },
)
}
pub fn f_atanpi(x: f64) -> f64 {
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
{
atanpi_gen_impl(x, f_fmla, dd_fmla, DoubleDouble::quick_mult, atanpi_small)
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
use std::sync::OnceLock;
static EXECUTOR: OnceLock<unsafe fn(f64) -> f64> = OnceLock::new();
let q = EXECUTOR.get_or_init(|| {
if std::arch::is_x86_feature_detected!("avx")
&& std::arch::is_x86_feature_detected!("fma")
{
atanpi_fma_impl
} else {
fn def_atanpi(x: f64) -> f64 {
atanpi_gen_impl(x, f_fmla, dd_fmla, DoubleDouble::quick_mult, atanpi_small)
}
def_atanpi
}
});
unsafe { q(x) }
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn atanpi_test() {
assert_eq!(f_atanpi(119895888825197.97), 0.49999999999999734);
assert_eq!(f_atanpi(1.5610674235815122E-307), 4.969031939254545e-308);
assert_eq!(f_atanpi(0.00004577636903266291), 0.000014571070806516354);
assert_eq!(f_atanpi(-0.00004577636903266291), -0.000014571070806516354);
assert_eq!(f_atanpi(-0.4577636903266291), -0.13664770469904508);
assert_eq!(f_atanpi(0.9876636903266291), 0.24802445507819193);
}
}