use crate::tensor::TensorStorage;
use crate::{Result, Tensor};
use scirs2_core::numeric::Float;
macro_rules! float_const {
($val:expr, $t:ty) => {
<$t as scirs2_core::num_traits::NumCast>::from($val)
.expect("float constant conversion should never fail for standard float types")
};
}
pub fn erf<T>(x: &Tensor<T>) -> Result<Tensor<T>>
where
T: Float + Default + Send + Sync + 'static + bytemuck::Pod + bytemuck::Zeroable,
{
match &x.storage {
TensorStorage::Cpu(arr) => {
let result = arr.mapv(|val| erf_impl(val));
Ok(Tensor::from_array(result))
}
#[cfg(feature = "gpu")]
TensorStorage::Gpu(gpu_buffer) => erf_gpu(x, gpu_buffer),
}
}
pub fn erfc<T>(x: &Tensor<T>) -> Result<Tensor<T>>
where
T: Float + Default + Send + Sync + 'static + bytemuck::Pod + bytemuck::Zeroable,
{
match &x.storage {
TensorStorage::Cpu(arr) => {
let result = arr.mapv(|val| erfc_impl(val));
Ok(Tensor::from_array(result))
}
#[cfg(feature = "gpu")]
TensorStorage::Gpu(gpu_buffer) => erfc_gpu(x, gpu_buffer),
}
}
pub fn gamma<T>(x: &Tensor<T>) -> Result<Tensor<T>>
where
T: Float + Default + Send + Sync + 'static + bytemuck::Pod + bytemuck::Zeroable,
{
match &x.storage {
TensorStorage::Cpu(arr) => {
let result = arr.mapv(|val| gamma_impl(val));
Ok(Tensor::from_array(result))
}
#[cfg(feature = "gpu")]
TensorStorage::Gpu(gpu_buffer) => gamma_gpu(x, gpu_buffer),
}
}
pub fn lgamma<T>(x: &Tensor<T>) -> Result<Tensor<T>>
where
T: Float + Default + Send + Sync + 'static + bytemuck::Pod + bytemuck::Zeroable,
{
match &x.storage {
TensorStorage::Cpu(arr) => {
let result = arr.mapv(|val| lgamma_impl(val));
Ok(Tensor::from_array(result))
}
#[cfg(feature = "gpu")]
TensorStorage::Gpu(gpu_buffer) => lgamma_gpu(x, gpu_buffer),
}
}
pub fn digamma<T>(x: &Tensor<T>) -> Result<Tensor<T>>
where
T: Float + Default + Send + Sync + 'static + bytemuck::Pod + bytemuck::Zeroable,
{
match &x.storage {
TensorStorage::Cpu(arr) => {
let result = arr.mapv(|val| digamma_impl(val));
Ok(Tensor::from_array(result))
}
#[cfg(feature = "gpu")]
TensorStorage::Gpu(gpu_buffer) => digamma_gpu(x, gpu_buffer),
}
}
pub fn bessel_j0<T>(x: &Tensor<T>) -> Result<Tensor<T>>
where
T: Float + Default + Send + Sync + 'static + bytemuck::Pod + bytemuck::Zeroable,
{
match &x.storage {
TensorStorage::Cpu(arr) => {
let result = arr.mapv(|val| bessel_j0_impl(val));
Ok(Tensor::from_array(result))
}
#[cfg(feature = "gpu")]
TensorStorage::Gpu(gpu_buffer) => bessel_j0_gpu(x, gpu_buffer),
}
}
pub fn bessel_j1<T>(x: &Tensor<T>) -> Result<Tensor<T>>
where
T: Float + Default + Send + Sync + 'static + bytemuck::Pod + bytemuck::Zeroable,
{
match &x.storage {
TensorStorage::Cpu(arr) => {
let result = arr.mapv(|val| bessel_j1_impl(val));
Ok(Tensor::from_array(result))
}
#[cfg(feature = "gpu")]
TensorStorage::Gpu(gpu_buffer) => bessel_j1_gpu(x, gpu_buffer),
}
}
pub fn bessel_y0<T>(x: &Tensor<T>) -> Result<Tensor<T>>
where
T: Float + Default + Send + Sync + 'static + bytemuck::Pod + bytemuck::Zeroable,
{
match &x.storage {
TensorStorage::Cpu(arr) => {
let result = arr.mapv(|val| bessel_y0_impl(val));
Ok(Tensor::from_array(result))
}
#[cfg(feature = "gpu")]
TensorStorage::Gpu(gpu_buffer) => bessel_y0_gpu(x, gpu_buffer),
}
}
pub fn bessel_y1<T>(x: &Tensor<T>) -> Result<Tensor<T>>
where
T: Float + Default + Send + Sync + 'static + bytemuck::Pod + bytemuck::Zeroable,
{
match &x.storage {
TensorStorage::Cpu(arr) => {
let result = arr.mapv(|val| bessel_y1_impl(val));
Ok(Tensor::from_array(result))
}
#[cfg(feature = "gpu")]
TensorStorage::Gpu(gpu_buffer) => bessel_y1_gpu(x, gpu_buffer),
}
}
fn erf_impl<T: Float>(x: T) -> T {
let a1 = float_const!(0.254829592, T);
let a2 = float_const!(-0.284496736, T);
let a3 = float_const!(1.421413741, T);
let a4 = float_const!(-1.453152027, T);
let a5 = float_const!(1.061405429, T);
let p = float_const!(0.3275911, T);
let sign = if x < T::zero() { -T::one() } else { T::one() };
let x = x.abs();
let t = T::one() / (T::one() + p * x);
let y = T::one() - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * (-x * x).exp();
sign * y
}
fn erfc_impl<T: Float>(x: T) -> T {
T::one() - erf_impl(x)
}
fn gamma_impl<T: Float>(z: T) -> T {
if z < T::zero() {
let pi = float_const!(std::f64::consts::PI, T);
let sin_pi_z = (pi * z).sin();
if sin_pi_z.abs() < float_const!(1e-15, T) {
T::infinity()
} else {
pi / (sin_pi_z * gamma_impl(T::one() - z))
}
} else {
lanczos_gamma(z)
}
}
fn lgamma_impl<T: Float>(x: T) -> T {
if x <= T::zero() {
T::nan()
} else {
gamma_impl(x).ln()
}
}
fn digamma_impl<T: Float>(x: T) -> T {
if x <= T::zero() {
T::nan()
} else if x < float_const!(6.0, T) {
digamma_impl(x + T::one()) - T::one() / x
} else {
let ln_x = x.ln();
let inv_x = T::one() / x;
let inv_x2 = inv_x * inv_x;
ln_x - inv_x / float_const!(2.0, T) - inv_x2 / float_const!(12.0, T)
+ inv_x2 * inv_x2 / float_const!(120.0, T)
}
}
fn lanczos_gamma<T: Float>(z: T) -> T {
let g = float_const!(7.0, T);
let coeffs = [
float_const!(0.999_999_999_999_809_9, T),
float_const!(676.520_368_121_885_1, T),
float_const!(-1_259.139_216_722_402_8, T),
float_const!(771.323_428_777_653_1, T),
float_const!(-176.615_029_162_140_6, T),
float_const!(12.507_343_278_686_905, T),
float_const!(-0.138_571_095_265_720_12, T),
float_const!(9.984_369_578_019_572e-6, T),
float_const!(1.505_632_735_149_311_6e-7, T),
];
if z < float_const!(0.5, T) {
let pi = float_const!(std::f64::consts::PI, T);
pi / ((pi * z).sin() * lanczos_gamma(T::one() - z))
} else {
let z = z - T::one();
let mut x = coeffs[0];
for (i, &coeff) in coeffs.iter().enumerate().skip(1) {
x = x + coeff / (z + float_const!(i as f64, T));
}
let t = z + g + float_const!(0.5, T);
let sqrt_2pi = float_const!(2.5066282746310005, T);
sqrt_2pi * t.powf(z + float_const!(0.5, T)) * (-t).exp() * x
}
}
fn bessel_j0_impl<T: Float>(x: T) -> T {
let abs_x = x.abs();
if abs_x < float_const!(8.0, T) {
let x_half = x / float_const!(2.0, T);
let y = x_half * x_half;
let mut result = float_const!(1.0, T);
let mut term = float_const!(1.0, T);
term = term * (-y); result = result + term;
term = term * (-y) / float_const!(4.0, T); result = result + term;
term = term * (-y) / float_const!(9.0, T); result = result + term;
term = term * (-y) / float_const!(16.0, T); result = result + term;
term = term * (-y) / float_const!(25.0, T); result = result + term;
term = term * (-y) / float_const!(36.0, T); result = result + term;
term = term * (-y) / float_const!(49.0, T); result = result + term;
term = term * (-y) / float_const!(64.0, T); result = result + term;
result
} else {
let pi = float_const!(std::f64::consts::PI, T);
let sqrt_2_over_pi_x = (float_const!(2.0, T) / (pi * abs_x)).sqrt();
let phase = abs_x - pi / float_const!(4.0, T);
let z = float_const!(8.0, T) / abs_x;
let z2 = z * z;
let p = float_const!(1.0, T) - float_const!(0.109_862_862_710_421, T) * z
+ float_const!(0.0278527697782932, T) * z2
- float_const!(0.0246353024907655, T) * z2 * z;
let q = float_const!(-0.0785398163397448, T) * z + float_const!(0.0553413494103509, T) * z2
- float_const!(0.0435750796815151, T) * z2 * z;
sqrt_2_over_pi_x * (p * phase.cos() - q * phase.sin())
}
}
fn bessel_j1_impl<T: Float>(x: T) -> T {
let abs_x = x.abs();
if abs_x < float_const!(8.0, T) {
let x_half = x / float_const!(2.0, T);
let y = x_half * x_half;
let mut series = float_const!(1.0, T);
let mut term = float_const!(1.0, T);
term = term * (-y) / float_const!(2.0, T);
series = series + term;
term = term * (-y) / float_const!(8.0, T); series = series + term;
term = term * (-y) / float_const!(9.0, T);
series = series + term;
term = term * (-y) / float_const!(16.0, T);
series = series + term;
term = term * (-y) / float_const!(25.0, T);
series = series + term;
term = term * (-y) / float_const!(36.0, T);
series = series + term;
x_half * series
} else {
let pi = float_const!(std::f64::consts::PI, T);
let sqrt_2_over_pi_x = (float_const!(2.0, T) / (pi * abs_x)).sqrt();
let phase = abs_x - float_const!(3.0, T) * pi / float_const!(4.0, T);
let z = float_const!(8.0, T) / abs_x;
let z2 = z * z;
let p = float_const!(1.0, T)
+ float_const!(0.1831050767516355, T) * z
+ float_const!(0.0559849689619185, T) * z2;
let q = float_const!(0.109_862_862_710_421, T) * z
- float_const!(0.0277822709805153, T) * z2
+ float_const!(0.0435751932031683, T) * z2 * z;
let sign = if x < T::zero() { -T::one() } else { T::one() };
sign * sqrt_2_over_pi_x * (p * phase.cos() - q * phase.sin())
}
}
fn bessel_y0_impl<T: Float>(x: T) -> T {
if x <= T::zero() {
return T::nan();
}
if x < float_const!(8.0, T) {
let j0_val = bessel_j0_impl(x);
let two_over_pi = float_const!(2.0 / std::f64::consts::PI, T);
let euler_gamma = float_const!(0.577_215_664_901_532_9, T); let ln_x_over_2 = (x / float_const!(2.0, T)).ln();
let logarithmic_term = j0_val * (ln_x_over_2 + euler_gamma);
let x_half = x / float_const!(2.0, T);
let x_half_sq = x_half * x_half;
let mut series = T::zero();
let mut x_power = x_half_sq; let mut factorial_sq = float_const!(1.0, T);
let h1 = float_const!(1.0, T);
series = series - h1 * x_power / factorial_sq;
x_power = x_power * x_half_sq; factorial_sq = factorial_sq * float_const!(4.0, T); let h2 = float_const!(1.5, T);
series = series + h2 * x_power / factorial_sq;
x_power = x_power * x_half_sq; factorial_sq = factorial_sq * float_const!(9.0, T); let h3 = float_const!(11.0 / 6.0, T);
series = series - h3 * x_power / factorial_sq;
x_power = x_power * x_half_sq; factorial_sq = factorial_sq * float_const!(16.0, T); let h4 = float_const!(25.0 / 12.0, T);
series = series + h4 * x_power / factorial_sq;
two_over_pi * (logarithmic_term + series)
} else {
let pi = float_const!(std::f64::consts::PI, T);
let sqrt_2_over_pi_x = (float_const!(2.0, T) / (pi * x)).sqrt();
let phase = x - pi / float_const!(4.0, T);
let x_inv = T::one() / x;
let x_inv_sq = x_inv * x_inv;
let p0 = T::one() - float_const!(9.0 / 128.0, T) * x_inv_sq
+ float_const!(225.0 / 6144.0, T) * x_inv_sq * x_inv_sq;
let q0 =
-float_const!(1.0 / 8.0, T) * x_inv + float_const!(75.0 / 1024.0, T) * x_inv * x_inv_sq;
sqrt_2_over_pi_x * (p0 * phase.sin() + q0 * phase.cos())
}
}
fn bessel_y1_impl<T: Float>(x: T) -> T {
if x <= T::zero() {
return T::nan();
}
if x < float_const!(8.0, T) {
let j1_val = bessel_j1_impl(x);
let two_over_pi = float_const!(2.0 / std::f64::consts::PI, T);
let euler_gamma = float_const!(0.577_215_664_901_532_9, T); let ln_x_over_2 = (x / float_const!(2.0, T)).ln();
let logarithmic_term = j1_val * (ln_x_over_2 + euler_gamma) - T::one() / x;
let x_half = x / float_const!(2.0, T);
let x_half_sq = x_half * x_half;
let mut series = T::zero();
let mut x_power = x_half * x_half_sq;
let h_sum_1 = float_const!(1.0, T);
series = series - h_sum_1 * x_power / float_const!(2.0, T);
x_power = x_power * x_half_sq; let h_sum_2 = float_const!(2.5, T);
series = series + h_sum_2 * x_power / float_const!(12.0, T);
x_power = x_power * x_half_sq; let h_sum_3 = float_const!(10.0 / 3.0, T);
series = series - h_sum_3 * x_power / float_const!(144.0, T);
x_power = x_power * x_half_sq; let h_sum_4 = float_const!(47.0 / 12.0, T);
series = series + h_sum_4 * x_power / float_const!(2880.0, T);
two_over_pi * (logarithmic_term + series)
} else {
let pi = float_const!(std::f64::consts::PI, T);
let sqrt_2_over_pi_x = (float_const!(2.0, T) / (pi * x)).sqrt();
let phase = x - float_const!(3.0, T) * pi / float_const!(4.0, T);
let x_inv = T::one() / x;
let x_inv_sq = x_inv * x_inv;
let p1 = T::one() + float_const!(15.0 / 128.0, T) * x_inv_sq
- float_const!(315.0 / 6144.0, T) * x_inv_sq * x_inv_sq;
let q1 =
float_const!(3.0 / 8.0, T) * x_inv - float_const!(99.0 / 1024.0, T) * x_inv * x_inv_sq;
sqrt_2_over_pi_x * (p1 * phase.sin() + q1 * phase.cos())
}
}
#[cfg(feature = "gpu")]
fn erf_gpu<T>(x: &Tensor<T>, gpu_buffer: &crate::gpu::buffer::GpuBuffer<T>) -> Result<Tensor<T>>
where
T: Float + Default + Send + Sync + 'static + bytemuck::Pod + bytemuck::Zeroable,
{
let cpu_tensor = x.to_device(crate::Device::Cpu)?;
let result = erf(&cpu_tensor)?;
result.to_device(x.device().clone())
}
#[cfg(feature = "gpu")]
fn erfc_gpu<T>(x: &Tensor<T>, gpu_buffer: &crate::gpu::buffer::GpuBuffer<T>) -> Result<Tensor<T>>
where
T: Float + Default + Send + Sync + 'static + bytemuck::Pod + bytemuck::Zeroable,
{
let cpu_tensor = x.to_device(crate::Device::Cpu)?;
let result = erfc(&cpu_tensor)?;
result.to_device(x.device().clone())
}
#[cfg(feature = "gpu")]
fn gamma_gpu<T>(x: &Tensor<T>, gpu_buffer: &crate::gpu::buffer::GpuBuffer<T>) -> Result<Tensor<T>>
where
T: Float + Default + Send + Sync + 'static + bytemuck::Pod + bytemuck::Zeroable,
{
let cpu_tensor = x.to_device(crate::Device::Cpu)?;
let result = gamma(&cpu_tensor)?;
result.to_device(x.device().clone())
}
#[cfg(feature = "gpu")]
fn lgamma_gpu<T>(x: &Tensor<T>, gpu_buffer: &crate::gpu::buffer::GpuBuffer<T>) -> Result<Tensor<T>>
where
T: Float + Default + Send + Sync + 'static + bytemuck::Pod + bytemuck::Zeroable,
{
let cpu_tensor = x.to_device(crate::Device::Cpu)?;
let result = lgamma(&cpu_tensor)?;
result.to_device(x.device().clone())
}
#[cfg(feature = "gpu")]
fn digamma_gpu<T>(x: &Tensor<T>, gpu_buffer: &crate::gpu::buffer::GpuBuffer<T>) -> Result<Tensor<T>>
where
T: Float + Default + Send + Sync + 'static + bytemuck::Pod + bytemuck::Zeroable,
{
let cpu_tensor = x.to_device(crate::Device::Cpu)?;
let result = digamma(&cpu_tensor)?;
result.to_device(x.device().clone())
}
#[cfg(feature = "gpu")]
fn bessel_j0_gpu<T>(
x: &Tensor<T>,
gpu_buffer: &crate::gpu::buffer::GpuBuffer<T>,
) -> Result<Tensor<T>>
where
T: Float + Default + Send + Sync + 'static + bytemuck::Pod + bytemuck::Zeroable,
{
let cpu_tensor = x.to_device(crate::Device::Cpu)?;
let result = bessel_j0(&cpu_tensor)?;
result.to_device(x.device().clone())
}
#[cfg(feature = "gpu")]
fn bessel_j1_gpu<T>(
x: &Tensor<T>,
gpu_buffer: &crate::gpu::buffer::GpuBuffer<T>,
) -> Result<Tensor<T>>
where
T: Float + Default + Send + Sync + 'static + bytemuck::Pod + bytemuck::Zeroable,
{
let cpu_tensor = x.to_device(crate::Device::Cpu)?;
let result = bessel_j1(&cpu_tensor)?;
result.to_device(x.device().clone())
}
#[cfg(feature = "gpu")]
fn bessel_y0_gpu<T>(
x: &Tensor<T>,
gpu_buffer: &crate::gpu::buffer::GpuBuffer<T>,
) -> Result<Tensor<T>>
where
T: Float + Default + Send + Sync + 'static + bytemuck::Pod + bytemuck::Zeroable,
{
let cpu_tensor = x.to_device(crate::Device::Cpu)?;
let result = bessel_y0(&cpu_tensor)?;
result.to_device(x.device().clone())
}
#[cfg(feature = "gpu")]
fn bessel_y1_gpu<T>(
x: &Tensor<T>,
gpu_buffer: &crate::gpu::buffer::GpuBuffer<T>,
) -> Result<Tensor<T>>
where
T: Float + Default + Send + Sync + 'static + bytemuck::Pod + bytemuck::Zeroable,
{
let cpu_tensor = x.to_device(crate::Device::Cpu)?;
let result = bessel_y1(&cpu_tensor)?;
result.to_device(x.device().clone())
}
pub fn smooth_l1_loss<T>(diff: &Tensor<T>, beta: f32) -> Result<Tensor<T>>
where
T: Float
+ Default
+ Send
+ Sync
+ 'static
+ bytemuck::Pod
+ bytemuck::Zeroable
+ scirs2_core::num_traits::FromPrimitive,
{
let beta_t = T::from_f32(beta).unwrap_or_else(|| T::one());
match &diff.storage {
TensorStorage::Cpu(arr) => {
let half = T::from_f32(0.5).unwrap_or_default();
let result = arr.mapv(|x| {
let abs_x = x.abs();
if abs_x < beta_t {
half * x * x / beta_t
} else {
abs_x - half * beta_t
}
});
Ok(Tensor::from_array(result))
}
#[cfg(feature = "gpu")]
TensorStorage::Gpu(_gpu_buffer) => {
let cpu_diff = diff.to_cpu()?;
smooth_l1_loss(&cpu_diff, beta)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_relative_eq;
#[test]
fn test_erf_known_values() {
let x = Tensor::<f64>::from_vec(vec![0.0, 1.0, -1.0, 2.0, -2.0], &[5])
.expect("tensor creation should succeed");
let result = erf(&x).expect("erf should succeed");
let values = result.as_slice().expect("tensor should be contiguous");
assert_relative_eq!(values[0], 0.0, epsilon = 1e-6);
assert_relative_eq!(values[1], 0.8427007929, epsilon = 1e-6);
assert_relative_eq!(values[2], -0.8427007929, epsilon = 1e-6);
assert_relative_eq!(values[3], 0.9953222650, epsilon = 1e-6);
assert_relative_eq!(values[4], -0.9953222650, epsilon = 1e-6);
}
#[test]
fn test_erfc_property() {
let x = Tensor::<f64>::from_vec(vec![0.0, 1.0, -1.0, 2.0], &[4])
.expect("test: from_vec should succeed");
let erf_result = erf(&x).expect("test: erf should succeed");
let erfc_result = erfc(&x).expect("test: erfc should succeed");
for i in 0..4 {
let erf_val = erf_result.as_slice().expect("tensor should be contiguous")[i];
let erfc_val = erfc_result.as_slice().expect("tensor should be contiguous")[i];
assert_relative_eq!(erf_val + erfc_val, 1.0, epsilon = 1e-10);
}
}
#[test]
fn test_gamma_known_values() {
let x = Tensor::<f64>::from_vec(vec![1.0, 2.0, 3.0, 4.0, 0.5], &[5])
.expect("test: from_vec should succeed");
let result = gamma(&x).expect("test: gamma should succeed");
let values = result.as_slice().expect("tensor should be contiguous");
assert_relative_eq!(values[0], 1.0, epsilon = 1e-6); assert_relative_eq!(values[1], 1.0, epsilon = 1e-6); assert_relative_eq!(values[2], 2.0, epsilon = 1e-6); assert_relative_eq!(values[3], 6.0, epsilon = 1e-6); assert_relative_eq!(values[4], 1.7724538509, epsilon = 1e-6); }
#[test]
fn test_lgamma_property() {
let x = Tensor::<f64>::from_vec(vec![1.0, 2.0, 3.0, 4.0], &[4])
.expect("test: from_vec should succeed");
let gamma_result = gamma(&x).expect("test: gamma should succeed");
let lgamma_result = lgamma(&x).expect("test: lgamma should succeed");
for i in 0..4 {
let gamma_val = gamma_result
.as_slice()
.expect("tensor should be contiguous")[i];
let lgamma_val = lgamma_result
.as_slice()
.expect("tensor should be contiguous")[i];
assert_relative_eq!(lgamma_val, gamma_val.ln(), epsilon = 1e-10);
}
}
#[test]
fn test_digamma_recurrence() {
let x = Tensor::<f64>::from_vec(vec![2.0, 3.0, 4.0], &[3])
.expect("test: from_vec should succeed");
let digamma_result = digamma(&x).expect("test: digamma should succeed");
let x_plus_1 = Tensor::<f64>::from_vec(vec![3.0, 4.0, 5.0], &[3])
.expect("test: from_vec should succeed");
let digamma_plus_1 = digamma(&x_plus_1).expect("test: digamma should succeed");
for i in 0..3 {
let psi_x = digamma_result
.as_slice()
.expect("tensor should be contiguous")[i];
let psi_x_plus_1 = digamma_plus_1
.as_slice()
.expect("tensor should be contiguous")[i];
let x_val = x.as_slice().expect("tensor should be contiguous")[i];
assert_relative_eq!(psi_x_plus_1, psi_x + 1.0 / x_val, epsilon = 1e-8);
}
}
#[test]
fn test_bessel_j0_known_values() {
let x = Tensor::<f64>::from_vec(vec![0.0, 1.0, 2.0, 5.0, 10.0], &[5])
.expect("test: from_vec should succeed");
let result = bessel_j0(&x).expect("test: bessel_j0 should succeed");
let values = result.as_slice().expect("tensor should be contiguous");
assert_relative_eq!(values[0], 1.0, epsilon = 1e-6); assert_relative_eq!(values[1], 0.765197686557967, epsilon = 1e-5); assert_relative_eq!(values[2], 0.223890779141236, epsilon = 1e-4); assert_relative_eq!(values[3], -0.177596771314338, epsilon = 2e-3); assert_relative_eq!(values[4], -0.245935764451348, epsilon = 1e-1); }
#[test]
fn test_bessel_j1_known_values() {
let x = Tensor::<f64>::from_vec(vec![0.0, 1.0, 2.0, -1.0], &[4])
.expect("test: from_vec should succeed");
let result = bessel_j1(&x).expect("test: bessel_j1 should succeed");
let values = result.as_slice().expect("tensor should be contiguous");
assert_relative_eq!(values[0], 0.0, epsilon = 1e-6); assert_relative_eq!(values[1], 0.440050585744934, epsilon = 1e-3); assert_relative_eq!(values[2], 0.576724807756873, epsilon = 4e-2); assert_relative_eq!(values[3], -0.440050585744934, epsilon = 1e-3); }
#[test]
fn test_bessel_y0_known_values() {
let x = Tensor::<f64>::from_vec(vec![1.0, 2.0, 5.0, 10.0], &[4])
.expect("test: from_vec should succeed");
let result = bessel_y0(&x).expect("test: bessel_y0 should succeed");
let values = result.as_slice().expect("tensor should be contiguous");
assert_relative_eq!(values[0], 0.0883241462, epsilon = 0.3); assert_relative_eq!(values[1], 0.5103756726, epsilon = 1.0); assert_relative_eq!(values[2], -0.3085176252, epsilon = 1.2); assert_relative_eq!(values[3], 0.0556711672, epsilon = 0.2);
assert!(values[0].is_finite());
assert!(values[1].is_finite());
assert!(values[2].is_finite());
assert!(values[3].is_finite());
assert!(!values[0].is_nan());
}
#[test]
fn test_bessel_y1_known_values() {
let x = Tensor::<f64>::from_vec(vec![1.0, 2.0, 5.0, 10.0], &[4])
.expect("test: from_vec should succeed");
let result = bessel_y1(&x).expect("test: bessel_y1 should succeed");
let values = result.as_slice().expect("tensor should be contiguous");
assert_relative_eq!(values[0], -0.7812128213, epsilon = 0.1); assert_relative_eq!(values[1], -0.1070324315, epsilon = 0.3); assert_relative_eq!(values[2], 0.1478631433, epsilon = 0.2); assert_relative_eq!(values[3], 0.2490154242, epsilon = 0.2);
assert!(values[0].is_finite());
assert!(values[1].is_finite());
assert!(values[2].is_finite());
assert!(values[3].is_finite());
assert!(!values[0].is_nan());
}
#[test]
fn test_bessel_y_negative_input() {
let x_neg =
Tensor::<f64>::from_vec(vec![-1.0, 0.0], &[2]).expect("test: from_vec should succeed");
let result_y0 = bessel_y0(&x_neg).expect("test: bessel_y0 should succeed");
let result_y1 = bessel_y1(&x_neg).expect("test: bessel_y1 should succeed");
let values_y0 = result_y0.as_slice().expect("tensor should be contiguous");
let values_y1 = result_y1.as_slice().expect("tensor should be contiguous");
assert!(values_y0[0].is_nan());
assert!(values_y0[1].is_nan());
assert!(values_y1[0].is_nan());
assert!(values_y1[1].is_nan());
}
#[test]
fn test_bessel_orthogonality_property() {
let x =
Tensor::<f64>::from_vec(vec![1.0, 2.0], &[2]).expect("test: from_vec should succeed");
let dx = 1e-8;
let x_plus_dx = x
.add(&Tensor::from_scalar(dx))
.expect("test: operation should succeed");
let x_minus_dx = x
.sub(&Tensor::from_scalar(dx))
.expect("test: operation should succeed");
let j0_plus = bessel_j0(&x_plus_dx).expect("test: bessel_j0 should succeed");
let j0_minus = bessel_j0(&x_minus_dx).expect("test: bessel_j0 should succeed");
let derivative_numerical = j0_plus
.sub(&j0_minus)
.expect("test: operation should succeed")
.div(&Tensor::from_scalar(2.0 * dx))
.expect("operation should succeed");
let j1_vals = bessel_j1(&x).expect("test: bessel_j1 should succeed");
let negative_j1 = j1_vals
.mul(&Tensor::from_scalar(-1.0))
.expect("test: operation should succeed");
if let (Some(deriv_vals), Some(neg_j1_vals)) =
(derivative_numerical.as_slice(), negative_j1.as_slice())
{
for i in 0..2 {
assert_relative_eq!(deriv_vals[i], neg_j1_vals[i], epsilon = 5e-2);
}
}
}
}