pxfm 0.1.29

Fast and accurate math
Documentation
/*
 * // Copyright (c) Radzivon Bartoshyk 7/2025. All rights reserved.
 * //
 * // Redistribution and use in source and binary forms, with or without modification,
 * // are permitted provided that the following conditions are met:
 * //
 * // 1.  Redistributions of source code must retain the above copyright notice, this
 * // list of conditions and the following disclaimer.
 * //
 * // 2.  Redistributions in binary form must reproduce the above copyright notice,
 * // this list of conditions and the following disclaimer in the documentation
 * // and/or other materials provided with the distribution.
 * //
 * // 3.  Neither the name of the copyright holder nor the names of its
 * // contributors may be used to endorse or promote products derived from
 * // this software without specific prior written permission.
 * //
 * // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 * // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
 * // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
 * // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
 * // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
 * // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
use crate::double_double::DoubleDouble;
use crate::sincospi::{GenSinCosPiBackend, SinCosPiBackend};
use crate::tangent::tanpi_table::TANPI_K_PI_OVER_64;

#[inline]
pub(crate) fn tanpi_eval<B: SinCosPiBackend>(x: f64, backend: &B) -> DoubleDouble {
    let x2 = DoubleDouble::from_exact_mult(x, x);
    // tan(pi*x) generated by Sollya:
    // d = [0, 0.0078128];
    // f_tan = tan(y*pi)/y;
    // Q = fpminimax(f_tan, [|0, 2, 4, 6, 8|], [|107, D...|], d, relative, floating);
    const C: [u64; 4] = [
        0x4024abbce625be51,
        0x404466bc677698e5,
        0x40645fff70379ae3,
        0x4084626b091b7fd0,
    ];
    const C0: DoubleDouble = DoubleDouble::from_bit_pair((0x3ca1a6444aa5b996, 0x400921fb54442d18));

    // polyeval 4, estrin scheme
    let u0 = backend.fma(x2.hi, f64::from_bits(C[1]), f64::from_bits(C[0]));
    let u1 = backend.fma(x2.hi, f64::from_bits(C[3]), f64::from_bits(C[2]));
    let tan_poly_lo = backend.fma(x2.hi * x2.hi, u1, u0);

    // We're splitting polynomial in two parts, since first term dominates
    // we compute: (a0_lo + a0_hi) * x + x * (a1 * x^2 + a2 + x^4) ...
    let r_lo = backend.quick_mult_f64(x2, tan_poly_lo);
    let tan_lo = backend.fma(r_lo.lo, x, r_lo.hi * x);
    let tan_hi = backend.quick_mult_f64(C0, x);
    DoubleDouble::full_add_f64(tan_hi, tan_lo)
}

#[cold]
fn tanpi_hard<B: SinCosPiBackend>(x: f64, tan_k: DoubleDouble, backend: &B) -> DoubleDouble {
    const C: [(u64, u64); 6] = [
        (0x3ca1a62632712fc8, 0x400921fb54442d18),
        (0xbcc052338fbb4528, 0x4024abbce625be53),
        (0x3ced42454c5f85b3, 0x404466bc6775aad9),
        (0xbd00c7d6a971a560, 0x40645fff9b4b244d),
        (0x3d205970eff53274, 0x40845f46e96c3a0b),
        (0xbd3589489ad24fc4, 0x40a4630551cd123d),
    ];
    let x2 = backend.exact_mult(x, x);
    let mut tan_y = backend.quick_mul_add(
        x2,
        DoubleDouble::from_bit_pair(C[5]),
        DoubleDouble::from_bit_pair(C[4]),
    );
    tan_y = backend.quick_mul_add(x2, tan_y, DoubleDouble::from_bit_pair(C[3]));
    tan_y = backend.quick_mul_add(x2, tan_y, DoubleDouble::from_bit_pair(C[2]));
    tan_y = backend.quick_mul_add(x2, tan_y, DoubleDouble::from_bit_pair(C[1]));
    tan_y = backend.quick_mul_add(x2, tan_y, DoubleDouble::from_bit_pair(C[0]));
    tan_y = backend.quick_mult_f64(tan_y, x);

    // num = tan(y*pi/64) + tan(k*pi/64)
    let num = DoubleDouble::full_dd_add(tan_y, tan_k);
    // den = 1 - tan(y*pi/64)*tan(k*pi/64)
    let den = backend.mul_add_f64(tan_y, -tan_k, 1.);
    // tan = num / den
    backend.div(num, den)
}

#[inline(always)]
fn tanpi_gen_impl<B: SinCosPiBackend>(x: f64, backend: B) -> f64 {
    if x == 0. {
        return x;
    }
    let ax = x.to_bits() & 0x7fff_ffff_ffff_ffff;
    if ax >= (0x7ffu64 << 52) {
        // NaN, Inf
        if ax > (0x7ffu64 << 52) {
            return x + x;
        } // NaN
        return f64::NAN; // x=Inf
    }
    let e: i32 = (ax >> 52) as i32 - 1023;
    if e > 0 {
        if e >= 52 {
            // when |x| > 2^53 it's always an integer
            return f64::copysign(0., x);
        }
        // |x| > 1 and |x| < 2^53
        let m = (ax & ((1u64 << 52) - 1)) | (1u64 << 52); // mantissa with hidden 1
        let shift = 52 - e;

        let frac = m & ((1u64 << shift) - 1);
        if frac == (1u64 << (shift - 1)) {
            // |x| is always integer.5 means it's inf
            return f64::INFINITY;
        }
    }

    if ax <= 0x3cb0000000000000 {
        // for tiny x ( |x| < f64::EPSILON ) just small taylor expansion
        // tan(PI*x) ~ PI*x + PI^3*x^3/3 + O(x^5)
        const PI: DoubleDouble =
            DoubleDouble::from_bit_pair((0x3ca1a62633145c07, 0x400921fb54442d18));
        if ax <= 0x3ca0000000000000 {
            // |x| <= 2^-53, renormalize value
            let e: i32 = (ax >> 52) as i32;
            let sc = f64::from_bits((2045i64 - e as i64).wrapping_shl(52) as u64);
            let isc = f64::from_bits(1i64.wrapping_add(e as i64).wrapping_shl(52) as u64);
            let dx = x * sc;
            let q0 = backend.quick_mult_f64(PI, dx);
            let r = q0.to_f64() * isc;
            return r;
        }
        let q0 = backend.quick_mult_f64(PI, x);
        let r = q0.to_f64();
        return r;
    }

    // argument reduction
    let (y, k) = backend.arg_reduce_pi_64(x);

    if y == 0.0 {
        let km = (k.abs() & 63) as i32; // k mod 64

        match km {
            0 => return f64::copysign(0f64, x),           // tanpi(n) = 0
            32 => return f64::copysign(f64::INFINITY, x), // tanpi(n+0.5) = ±∞
            16 => return f64::copysign(1.0, x),           // tanpi(n+0.25) = ±1
            48 => return -f64::copysign(1.0, x),          // tanpi(n+0.75) = ∓1
            _ => {}
        }
    }

    let tan_k = DoubleDouble::from_bit_pair(TANPI_K_PI_OVER_64[((k as u64) & 127) as usize]);

    // Computes tan(pi*x) through identities.
    // tan(a+b) = (tan(a) + tan(b)) / (1 - tan(a)tan(b)) = (tan(y*pi/64) + tan(k*pi/64)) / (1 - tan(y*pi/64)*tan(k*pi/64))
    let tan_y = tanpi_eval(y, &backend);
    // num = tan(y*pi/64) + tan(k*pi/64)
    let num = DoubleDouble::add(tan_k, tan_y);
    // den = 1 - tan(y*pi/64)*tan(k*pi/64)
    let den = backend.mul_add_f64(tan_y, -tan_k, 1.);
    // tan = num / den
    let tan_value = backend.div(num, den);
    let err = backend.fma(
        tan_value.hi,
        f64::from_bits(0x3bf0000000000000), // 2^-64
        f64::from_bits(0x3b60000000000000), // 2^-73
    );
    let ub = tan_value.hi + (tan_value.lo + err);
    let lb = tan_value.hi + (tan_value.lo - err);
    if ub == lb {
        return tan_value.to_f64();
    }
    tanpi_hard(y, tan_k, &backend).to_f64()
}

#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[target_feature(enable = "avx", enable = "fma")]
unsafe fn tanpi_fma_impl(x: f64) -> f64 {
    use crate::sincospi::FmaSinCosPiBackend;
    tanpi_gen_impl(x, FmaSinCosPiBackend {})
}

/// Computes tan(PI*x)
///
/// Max found ULP 0.5
pub fn f_tanpi(x: f64) -> f64 {
    #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
    {
        tanpi_gen_impl(x, GenSinCosPiBackend {})
    }
    #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
    {
        use std::sync::OnceLock;
        static EXECUTOR: OnceLock<unsafe fn(f64) -> f64> = OnceLock::new();
        let q = EXECUTOR.get_or_init(|| {
            if std::arch::is_x86_feature_detected!("avx")
                && std::arch::is_x86_feature_detected!("fma")
            {
                tanpi_fma_impl
            } else {
                fn def_tanpi(x: f64) -> f64 {
                    tanpi_gen_impl(x, GenSinCosPiBackend {})
                }
                def_tanpi
            }
        });
        unsafe { q(x) }
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_tanpi() {
        assert_eq!(f_tanpi(0.4999999999119535), 3615246871.564404);
        assert_eq!(f_tanpi(7119681148991743.0), 0.);
        assert_eq!(f_tanpi(63.5), f64::INFINITY);
        assert_eq!(f_tanpi(63.99935913085936), -0.0020133525045719896);
        assert_eq!(f_tanpi(3.3821122649309461E-306), 1.0625219045122997E-305);
        assert_eq!(f_tanpi(1.8010707049867402E-255), 5.6582304953821333E-255);
        assert_eq!(f_tanpi(1.001000000061801), 0.0031416031832113213);
        assert_eq!(f_tanpi(-0.5000000000000226), 14054316517702.594);
        assert_eq!(f_tanpi(0.5000000000000001), -2867080569611329.5);
        assert_eq!(f_tanpi(0.02131), 0.06704753721009375);
        assert!(f_tanpi(f64::INFINITY).is_nan());
        assert!(f_tanpi(f64::NAN).is_nan());
        assert!(f_tanpi(f64::NEG_INFINITY).is_nan());
    }
}