darkmatter 0.0.2

Dark matter simulation engine — gravitational fields, particle dynamics, halo stability, and cosmological constants
Documentation
use std::f64::consts::PI;

pub fn chi_square(observed: &[f64], expected: &[f64], errors: &[f64]) -> f64 {
    observed
        .iter()
        .zip(expected.iter())
        .zip(errors.iter())
        .map(|((o, e), s)| {
            let diff = o - e;
            diff * diff / (s * s)
        })
        .sum()
}

pub fn reduced_chi_square(
    observed: &[f64],
    expected: &[f64],
    errors: &[f64],
    n_params: usize,
) -> f64 {
    let chi2 = chi_square(observed, expected, errors);
    let dof = observed.len().saturating_sub(n_params);
    if dof == 0 {
        return f64::INFINITY;
    }
    chi2 / dof as f64
}

pub fn log_likelihood_gaussian(observed: &[f64], expected: &[f64], errors: &[f64]) -> f64 {
    observed
        .iter()
        .zip(expected.iter())
        .zip(errors.iter())
        .map(|((o, e), s)| {
            let diff = o - e;
            -0.5 * (diff * diff / (s * s) + (2.0 * PI * s * s).ln())
        })
        .sum()
}

pub fn log_likelihood_poisson(observed: &[f64], expected: &[f64]) -> f64 {
    observed
        .iter()
        .zip(expected.iter())
        .map(|(k, lambda)| {
            if *lambda <= 0.0 {
                return if *k <= 0.0 { 0.0 } else { f64::NEG_INFINITY };
            }
            k * lambda.ln() - lambda - ln_gamma(*k + 1.0)
        })
        .sum()
}

fn ln_gamma(x: f64) -> f64 {
    if x <= 0.0 {
        return 0.0;
    }
    let coeffs = [
        76.180_091_729_471_46,
        -86.505_320_329_416_77,
        24.014_098_240_830_91,
        -1.231_739_572_450_155,
        0.001_208_650_973_866_179,
        -5.395_239_384_953e-6,
    ];
    let mut y = x;
    let mut tmp = x + 5.5;
    tmp -= (y + 0.5) * tmp.ln();
    let mut ser = 1.000_000_000_190_015;
    for c in &coeffs {
        y += 1.0;
        ser += c / y;
    }
    -tmp + (2.506_628_274_631 * ser / x).ln()
}

pub fn fisher_matrix_1d(
    param: f64,
    delta: f64,
    model_fn: &dyn Fn(f64) -> Vec<f64>,
    errors: &[f64],
) -> f64 {
    let model_plus = model_fn(param + delta);
    let model_minus = model_fn(param - delta);
    model_plus
        .iter()
        .zip(model_minus.iter())
        .zip(errors.iter())
        .map(|((mp, mm), s)| {
            let deriv = (mp - mm) / (2.0 * delta);
            deriv * deriv / (s * s)
        })
        .sum()
}

pub fn fisher_matrix_2d(
    params: [f64; 2],
    deltas: [f64; 2],
    model_fn: &dyn Fn(f64, f64) -> Vec<f64>,
    errors: &[f64],
) -> [[f64; 2]; 2] {
    let mut fisher = [[0.0f64; 2]; 2];
    let n = errors.len();

    let deriv = |i: usize| -> Vec<f64> {
        let mut p_plus = params;
        let mut p_minus = params;
        p_plus[i] += deltas[i];
        p_minus[i] -= deltas[i];
        let m_plus = model_fn(p_plus[0], p_plus[1]);
        let m_minus = model_fn(p_minus[0], p_minus[1]);
        m_plus
            .iter()
            .zip(m_minus.iter())
            .map(|(a, b)| (a - b) / (2.0 * deltas[i]))
            .collect()
    };

    let d0 = deriv(0);
    let d1 = deriv(1);

    for k in 0..n {
        fisher[0][0] += d0[k] * d0[k] / (errors[k] * errors[k]);
        fisher[0][1] += d0[k] * d1[k] / (errors[k] * errors[k]);
        fisher[1][0] += d0[k] * d1[k] / (errors[k] * errors[k]);
        fisher[1][1] += d1[k] * d1[k] / (errors[k] * errors[k]);
    }

    fisher
}

pub fn parameter_uncertainty_1d(fisher_element: f64) -> f64 {
    if fisher_element <= 0.0 {
        return f64::INFINITY;
    }
    1.0 / fisher_element.sqrt()
}

pub fn metropolis_step(
    current: f64,
    current_log_likelihood: f64,
    proposal_sigma: f64,
    log_likelihood_fn: &dyn Fn(f64) -> f64,
    random_uniform: f64,
    random_normal: f64,
) -> (f64, f64, bool) {
    let proposal = current + proposal_sigma * random_normal;
    let proposal_ll = log_likelihood_fn(proposal);
    let log_alpha = proposal_ll - current_log_likelihood;
    if log_alpha.ln_1p().is_nan() || random_uniform.ln() < log_alpha {
        (proposal, proposal_ll, true)
    } else {
        (current, current_log_likelihood, false)
    }
}

pub fn bayesian_information_criterion(log_likelihood: f64, n_params: usize, n_data: usize) -> f64 {
    -2.0 * log_likelihood + (n_params as f64) * (n_data as f64).ln()
}

pub fn akaike_information_criterion(log_likelihood: f64, n_params: usize) -> f64 {
    -2.0 * log_likelihood + 2.0 * n_params as f64
}

pub fn delta_chi2_for_confidence(n_sigma: f64, n_params: usize) -> f64 {
    match n_params {
        1 => n_sigma * n_sigma,
        2 => {
            let p = 1.0 - (-n_sigma * n_sigma / 2.0).exp();
            -2.0 * (1.0 - p).ln()
        }
        _ => n_sigma * n_sigma * n_params as f64,
    }
}

pub fn weighted_mean(values: &[f64], errors: &[f64]) -> (f64, f64) {
    let mut sum_w = 0.0;
    let mut sum_wx = 0.0;
    for (v, e) in values.iter().zip(errors.iter()) {
        let w = 1.0 / (e * e);
        sum_w += w;
        sum_wx += w * v;
    }
    if sum_w <= 0.0 {
        return (0.0, f64::INFINITY);
    }
    (sum_wx / sum_w, 1.0 / sum_w.sqrt())
}

pub fn covariance(x: &[f64], y: &[f64]) -> f64 {
    let n = x.len() as f64;
    if n < 2.0 {
        return 0.0;
    }
    let mean_x: f64 = x.iter().sum::<f64>() / n;
    let mean_y: f64 = y.iter().sum::<f64>() / n;
    x.iter()
        .zip(y.iter())
        .map(|(xi, yi)| (xi - mean_x) * (yi - mean_y))
        .sum::<f64>()
        / (n - 1.0)
}

pub fn correlation_coefficient(x: &[f64], y: &[f64]) -> f64 {
    let cov = covariance(x, y);
    let var_x = covariance(x, x);
    let var_y = covariance(y, y);
    let denom = (var_x * var_y).sqrt();
    if denom <= 0.0 {
        return 0.0;
    }
    cov / denom
}