use crate::OaxacaError;
use nalgebra::{DMatrix, DVector};
#[derive(Debug)]
pub struct LogitResult {
pub coefficients: DVector<f64>,
pub predicted_probs: DVector<f64>,
pub converged: bool,
pub iterations: usize,
}
fn sigmoid(z: f64) -> f64 {
1.0 / (1.0 + (-z).exp())
}
pub fn logit(
y: &DVector<f64>,
x: &DMatrix<f64>,
max_iter: usize,
tol: f64,
) -> Result<LogitResult, OaxacaError> {
let n = x.nrows();
let k = x.ncols();
let mut beta = DVector::zeros(k);
for iter in 0..max_iter {
let xb = x * β
let probs: DVector<f64> = xb.map(sigmoid);
let error = y - &probs;
let gradient = x.transpose() * &error;
let w_diag: DVector<f64> = probs.map(|p| p * (1.0 - p));
let sqrt_w: DVector<f64> = w_diag.map(|w| w.sqrt());
let mut x_tilde = x.clone();
for i in 0..n {
let w_i = sqrt_w[i];
for j in 0..k {
x_tilde[(i, j)] *= w_i;
}
}
let hessian = -(x_tilde.transpose() * x_tilde);
let information_matrix = -hessian;
let inv_info = information_matrix.try_inverse().ok_or_else(|| {
OaxacaError::NalgebraError("Failed to invert Information Matrix in Logit. Perfect separation?".to_string())
})?;
let step = &inv_info * &gradient;
beta += &step;
if step.norm() < tol {
return Ok(LogitResult {
coefficients: beta,
predicted_probs: probs, converged: true,
iterations: iter + 1,
});
}
}
let xb = x * β
let probs = xb.map(sigmoid);
Ok(LogitResult {
coefficients: beta,
predicted_probs: probs,
converged: false,
iterations: max_iter,
})
}