use std::f64::consts::PI;
const MAX_ORDER: i32 = 100;
pub fn legendre_p_scalar(n: i32, x: f64) -> f64 {
if n < 0 {
return legendre_p_scalar(-n - 1, x);
}
if n > MAX_ORDER {
return f64::NAN;
}
if x.is_nan() {
return f64::NAN;
}
if n == 0 {
return 1.0;
}
if n == 1 {
return x;
}
let mut p_prev = 1.0; let mut p_curr = x;
for k in 1..n {
let k_f = k as f64;
let p_next = ((2.0 * k_f + 1.0) * x * p_curr - k_f * p_prev) / (k_f + 1.0);
p_prev = p_curr;
p_curr = p_next;
}
p_curr
}
pub fn legendre_p_assoc_scalar(n: i32, m: i32, x: f64) -> f64 {
if n < 0 || m < 0 {
return f64::NAN;
}
if m > n {
return 0.0;
}
if n > MAX_ORDER {
return f64::NAN;
}
if x.is_nan() {
return f64::NAN;
}
if x.abs() > 1.0 {
return f64::NAN;
}
if m == 0 {
return legendre_p_scalar(n, x);
}
let sin_theta = (1.0 - x * x).sqrt();
let mut pmm = 1.0;
let mut fact = 1.0;
for _ in 1..=m {
pmm *= -fact * sin_theta;
fact += 2.0;
}
if n == m {
return pmm;
}
let pmm1 = x * (2 * m + 1) as f64 * pmm;
if n == m + 1 {
return pmm1;
}
let mut p_prev = pmm;
let mut p_curr = pmm1;
for k in (m + 1)..n {
let k_f = k as f64;
let m_f = m as f64;
let p_next = ((2.0 * k_f + 1.0) * x * p_curr - (k_f + m_f) * p_prev) / (k_f - m_f + 1.0);
p_prev = p_curr;
p_curr = p_next;
}
p_curr
}
pub fn sph_harm_scalar(n: i32, m: i32, theta: f64, phi: f64) -> f64 {
if n < 0 {
return f64::NAN;
}
if m.abs() > n {
return 0.0;
}
if theta.is_nan() || phi.is_nan() {
return f64::NAN;
}
let abs_m = m.abs();
let cos_theta = theta.cos();
let p_nm = legendre_p_assoc_scalar(n, abs_m, cos_theta);
let norm = sph_harm_norm(n, abs_m);
let angular = if m > 0 {
(m as f64 * phi).cos()
} else if m < 0 {
(abs_m as f64 * phi).sin()
} else {
1.0
};
norm * p_nm * angular
}
fn sph_harm_norm(n: i32, m: i32) -> f64 {
if m == 0 {
1.0
} else {
let mut ratio = 1.0;
for k in (n - m + 1)..=(n + m) {
ratio /= k as f64;
}
(2.0 * ratio).sqrt()
}
}
pub fn sph_harm_complex_scalar(n: i32, m: i32, theta: f64, phi: f64) -> (f64, f64) {
if n < 0 || m.abs() > n {
return (f64::NAN, f64::NAN);
}
if theta.is_nan() || phi.is_nan() {
return (f64::NAN, f64::NAN);
}
let abs_m = m.abs();
let cos_theta = theta.cos();
let p_nm = legendre_p_assoc_scalar(n, abs_m, cos_theta);
let norm = sph_harm_full_norm(n, abs_m);
let phase = if m < 0 && (abs_m % 2 != 0) { -1.0 } else { 1.0 };
let m_phi = m as f64 * phi;
let real = phase * norm * p_nm * m_phi.cos();
let imag = phase * norm * p_nm * m_phi.sin();
(real, imag)
}
fn sph_harm_full_norm(n: i32, m: i32) -> f64 {
let n_f = n as f64;
let mut ratio = 1.0;
for k in (n - m + 1)..=(n + m) {
ratio /= k as f64;
}
((2.0 * n_f + 1.0) / (4.0 * PI) * ratio).sqrt()
}
#[cfg(test)]
mod tests {
use super::*;
const TOL: f64 = 1e-10;
fn assert_close(a: f64, b: f64, tol: f64, msg: &str) {
let diff = (a - b).abs();
assert!(
diff < tol || (a.is_nan() && b.is_nan()),
"{}: expected {}, got {}, diff {}",
msg,
b,
a,
diff
);
}
#[test]
fn test_legendre_p_special_values() {
for n in 0..=10 {
assert_close(legendre_p_scalar(n, 1.0), 1.0, TOL, &format!("P_{}(1)", n));
}
for n in 0..=10 {
let expected = if n % 2 == 0 { 1.0 } else { -1.0 };
assert_close(
legendre_p_scalar(n, -1.0),
expected,
TOL,
&format!("P_{}(-1)", n),
);
}
for n in [1, 3, 5, 7, 9] {
assert_close(legendre_p_scalar(n, 0.0), 0.0, TOL, &format!("P_{}(0)", n));
}
}
#[test]
fn test_legendre_p_explicit() {
assert_close(legendre_p_scalar(0, 0.5), 1.0, TOL, "P_0(0.5)");
assert_close(legendre_p_scalar(1, 0.5), 0.5, TOL, "P_1(0.5)");
let x = 0.5;
assert_close(
legendre_p_scalar(2, x),
(3.0 * x * x - 1.0) / 2.0,
TOL,
"P_2(0.5)",
);
assert_close(
legendre_p_scalar(3, x),
(5.0 * x * x * x - 3.0 * x) / 2.0,
TOL,
"P_3(0.5)",
);
}
#[test]
fn test_legendre_p_assoc_m0() {
for n in 0..=5 {
for &x in &[-0.5, 0.0, 0.5, 0.9] {
assert_close(
legendre_p_assoc_scalar(n, 0, x),
legendre_p_scalar(n, x),
TOL,
&format!("P_{}^0({}) = P_{}({})", n, x, n, x),
);
}
}
}
#[test]
fn test_legendre_p_assoc_explicit() {
let x: f64 = 0.6;
let sin_theta = (1.0 - x * x).sqrt();
assert_close(legendre_p_assoc_scalar(1, 1, x), -sin_theta, TOL, "P_1^1");
assert_close(
legendre_p_assoc_scalar(2, 1, x),
-3.0 * x * sin_theta,
TOL,
"P_2^1",
);
assert_close(
legendre_p_assoc_scalar(2, 2, x),
3.0 * (1.0 - x * x),
TOL,
"P_2^2",
);
}
#[test]
fn test_legendre_p_assoc_m_greater_n() {
assert_close(legendre_p_assoc_scalar(2, 3, 0.5), 0.0, TOL, "P_2^3");
assert_close(legendre_p_assoc_scalar(1, 5, 0.5), 0.0, TOL, "P_1^5");
}
#[test]
fn test_sph_harm_orthogonality() {
let y00_1 = sph_harm_scalar(0, 0, 0.5, 0.0);
let y00_2 = sph_harm_scalar(0, 0, 1.0, 1.0);
let y00_3 = sph_harm_scalar(0, 0, 2.0, 3.0);
assert_close(y00_1, y00_2, TOL, "Y_0^0 constant");
assert_close(y00_2, y00_3, TOL, "Y_0^0 constant");
}
#[test]
fn test_sph_harm_symmetry() {
let theta = 0.8;
let phi = 1.2;
for n in 0..=3 {
for m in -n..=n {
let y1 = sph_harm_scalar(n, m, theta, phi);
let y2 = sph_harm_scalar(n, m, theta, phi + 2.0 * PI);
assert_close(y1, y2, 1e-9, &format!("Y_{}^{} 2π periodicity", n, m));
}
}
}
#[test]
fn test_sph_harm_complex_normalization() {
let (re, im) = sph_harm_complex_scalar(1, 0, 0.0, 0.0); let expected = (3.0 / (4.0 * PI)).sqrt();
assert_close(re, expected, 1e-9, "Y_1^0(0,0) normalization");
assert_close(im, 0.0, 1e-9, "Y_1^0(0,0) imaginary part");
}
#[test]
fn test_legendre_nan() {
assert!(legendre_p_scalar(5, f64::NAN).is_nan());
assert!(legendre_p_assoc_scalar(5, 2, f64::NAN).is_nan());
assert!(sph_harm_scalar(5, 2, f64::NAN, 0.0).is_nan());
}
}