use anyhow::Result;
use num::Float;
use std::cmp;
use std::convert::From;
use std::ops::{Add, AddAssign, Div};
pub fn acf<T: Float + From<u32> + From<f64> + Copy + Add + AddAssign + Div>(
x: &[T],
max_lag: Option<usize>,
covariance: bool,
) -> Result<Vec<T>> {
let max_lag = match max_lag {
Some(max_lag) => cmp::min(max_lag, x.len() - 1),
None => x.len() - 1,
};
let m = max_lag + 1;
let len_x_usize = x.len();
let len_x: T = From::from(len_x_usize as u32);
let sum: T = From::from(0.0);
let sum_x: T = x.iter().fold(sum, |sum, &xi| sum + xi);
let mean_x: T = sum_x / len_x;
let mut y: Vec<T> = vec![From::from(0.0); m];
for t in 0..m {
for i in 0..len_x_usize - t {
let xi = x[i] - mean_x;
let xi_t = x[i + t] - mean_x;
y[t] += (xi * xi_t) / len_x;
}
if !covariance && t > 0 {
y[t] = y[t] / y[0];
}
}
if !covariance {
y[0] = From::from(1.0);
}
Ok(y)
}
pub fn ar<T: Float + From<u32> + From<f64> + Into<f64> + Copy + AddAssign>(
x: &[T],
order: Option<usize>,
) -> Result<(Vec<T>, T)> {
let max_lag = order.map(|order| order + 1);
let rho = acf(x, max_lag, false)?;
let cov0 = acf(x, Some(0), true)?[0];
ar_dl_rho_cov(&rho, cov0, order)
}
#[cfg(feature = "lapack")]
pub fn ar_lapack_rho<T: Float + From<f64> + Into<f64> + Copy>(
rho: &[T],
order: Option<usize>,
) -> Result<Vec<T>> {
let n = match order {
Some(order) => cmp::min(order, rho.len() - 1),
None => rho.len() - 1,
};
let mut mr: Vec<f64> = vec![1.0; n * n];
for i in 0..n {
for j in i + 1..n {
mr[i * n + j] = std::convert::Into::into(rho[j - i]);
}
}
let mut b: Vec<f64> = vec![0.0; n];
for i in 0..n {
b[i] = std::convert::Into::into(rho[i + 1]);
}
let mut info: i32 = 0;
let ni = n as i32;
unsafe {
lapack::dposv(b'L', ni, 1, &mut mr, ni, &mut b, ni, &mut info);
}
if info != 0 {
anyhow::bail!("Matrix is not positive-definite");
}
let mut phi: Vec<T> = vec![From::from(0.0); n];
for i in 0..n {
phi[i] = std::convert::Into::into(b[i]);
}
Ok(phi)
}
pub fn ar_dl_rho_cov<T: Float + From<u32> + From<f64> + Copy + Add + AddAssign + Div>(
rho: &[T],
cov0: T,
order: Option<usize>,
) -> Result<(Vec<T>, T)> {
let order = match order {
Some(order) => cmp::min(order, rho.len() - 1),
None => rho.len() - 1,
};
let zero = From::from(0.0);
let one = From::from(1.0);
let mut phi: Vec<Vec<T>> = vec![Vec::new(); order + 1];
let mut var: Vec<T> = Vec::new();
phi[0].push(zero);
var.push(cov0);
for i in 1..order + 1 {
for _ in 0..i {
phi[i].push(zero);
}
let mut num_sum = zero; let mut den_sum = one;
for k in 1..i {
let p = phi[i - 1][k - 1];
num_sum += p * rho[i - k];
den_sum += -p * rho[k];
}
let phi_ii = (rho[i] - num_sum) / den_sum;
phi[i][i - 1] = phi_ii;
var.push(var[i - 1] * (one - phi_ii * phi_ii));
for k in 1..i {
phi[i][k - 1] = phi[i - 1][k - 1] - phi[i][i - 1] * phi[i - 1][i - k - 1];
}
}
Ok((phi[order].clone(), var[order]))
}
pub fn var<T: Float + From<u32> + From<f64> + Into<f64> + Copy + Add + AddAssign + Div>(
x: &[T],
order: Option<usize>,
) -> Result<T> {
let max_lag = order.map(|order| order + 1);
let rho = acf(x, max_lag, false)?;
let cov0 = acf(x, Some(0), true)?[0];
let (_phi, var) = ar_dl_rho_cov(&rho, cov0, order).unwrap();
Ok(var)
}
pub fn var_phi_rho_cov<T: Float + From<u32> + From<f64> + Copy + Add + AddAssign + Div>(
phi: &[T],
rho: &[T],
cov0: T,
) -> Result<T> {
assert!(rho.len() > phi.len());
let mut sum: T = From::from(0.0);
for i in 0..phi.len() {
sum += phi[i] * rho[i + 1];
}
let one: T = From::from(1.0);
Ok(cov0 * (one - sum))
}
pub fn pacf<T: Float + From<u32> + From<f64> + Into<f64> + Copy + AddAssign>(
x: &[T],
max_lag: Option<usize>,
) -> Result<Vec<T>> {
let rho = acf(x, max_lag, false)?;
let cov0 = acf(x, Some(0), true)?[0];
pacf_rho_cov0(&rho, cov0, max_lag)
}
pub fn pacf_rho_cov0<T: Float + From<u32> + From<f64> + Into<f64> + Copy + AddAssign>(
rho: &[T],
cov0: T,
max_lag: Option<usize>,
) -> Result<Vec<T>> {
let max_lag = match max_lag {
Some(max_lag) => cmp::min(max_lag, rho.len() - 1),
None => rho.len() - 1,
};
let m = max_lag + 1;
let mut y: Vec<T> = Vec::new();
for i in 1..m {
let (coef, _var) = ar_dl_rho_cov(rho, cov0, Some(i))?;
y.push(coef[i - 1]);
}
Ok(y)
}