use scirs2_core::ndarray::{s, Array1, Array2};
use scirs2_core::numeric::Float;
use std::fmt::Debug;
use crate::error::{Result, TimeSeriesError};
#[derive(Debug, Clone)]
pub struct GarchConfig {
pub p: usize,
pub q: usize,
pub mean_model: MeanModel,
pub distribution: Distribution,
pub max_iterations: usize,
pub tolerance: f64,
pub use_numerical_derivatives: bool,
}
impl Default for GarchConfig {
fn default() -> Self {
Self {
p: 1,
q: 1,
mean_model: MeanModel::Constant,
distribution: Distribution::Normal,
max_iterations: 1000,
tolerance: 1e-6,
use_numerical_derivatives: false,
}
}
}
#[derive(Debug, Clone)]
pub enum MeanModel {
Constant,
Zero,
AR {
order: usize,
},
ARMA {
ar_order: usize,
ma_order: usize,
},
}
#[derive(Debug, Clone)]
pub enum Distribution {
Normal,
StudentT,
SkewedStudentT,
GED,
}
#[derive(Debug, Clone)]
pub struct GarchResult<F: Float> {
pub parameters: GarchParameters<F>,
pub conditional_variance: Array1<F>,
pub standardized_residuals: Array1<F>,
pub log_likelihood: F,
pub aic: F,
pub bic: F,
pub converged: bool,
pub iterations: usize,
}
#[derive(Debug, Clone)]
pub struct GarchParameters<F: Float> {
pub mean_params: Array1<F>,
pub garch_params: Array1<F>,
pub dist_params: Option<Array1<F>>,
}
#[derive(Debug)]
pub struct GarchModel<F: Float + Debug> {
#[allow(dead_code)]
config: GarchConfig,
fitted: bool,
parameters: Option<GarchParameters<F>>,
#[allow(dead_code)]
conditional_variance: Option<Array1<F>>,
}
impl<F: Float + Debug + std::iter::Sum> GarchModel<F> {
pub fn new(config: GarchConfig) -> Self {
Self {
config,
fitted: false,
parameters: None,
conditional_variance: None,
}
}
pub fn garch_11() -> Self {
Self::new(GarchConfig::default())
}
pub fn fit(&mut self, data: &Array1<F>) -> Result<GarchResult<F>> {
if data.len() < 20 {
return Err(TimeSeriesError::InsufficientData {
message: "Need at least 20 observations for GARCH estimation".to_string(),
required: 20,
actual: data.len(),
});
}
let min_obs = std::cmp::max(20, 3 * (1 + self.config.p + self.config.q));
if data.len() < min_obs {
return Err(TimeSeriesError::InsufficientData {
message: format!(
"Need at least {} observations for GARCH({},{}) estimation",
min_obs, self.config.p, self.config.q
),
required: min_obs,
actual: data.len(),
});
}
if self.config.p == 1 && self.config.q == 1 && !self.config.use_numerical_derivatives {
self.fit_garch_11_mom(data)
} else {
self.fit_garch_mle(data)
}
}
fn fit_garch_11_mom(&mut self, data: &Array1<F>) -> Result<GarchResult<F>> {
let returns = if data.iter().all(|&x| x > F::zero()) {
let mut ret = Array1::zeros(data.len() - 1);
for i in 1..data.len() {
ret[i - 1] = (data[i] / data[i - 1]).ln();
}
ret
} else {
data.clone()
};
let n = returns.len();
let n_f = F::from(n).expect("Failed to convert to float");
let mean = returns.sum() / n_f;
let centered_returns: Array1<F> = returns.mapv(|r| r - mean);
let sample_var = centered_returns.mapv(|r| r.powi(2)).sum() / (n_f - F::one());
let _sample_skew = centered_returns.mapv(|r| r.powi(3)).sum()
/ ((n_f - F::one())
* sample_var.powf(F::from(1.5).expect("Failed to convert constant to float")));
let sample_kurt =
centered_returns.mapv(|r| r.powi(4)).sum() / ((n_f - F::one()) * sample_var.powi(2));
let alpha_beta_sum =
F::one() - F::from(3.0).expect("Failed to convert constant to float") / sample_kurt;
let alpha_beta_sum = alpha_beta_sum
.max(F::from(0.1).expect("Failed to convert constant to float"))
.min(F::from(0.99).expect("Failed to convert constant to float"));
let alpha = alpha_beta_sum * F::from(0.1).expect("Failed to convert constant to float"); let beta = alpha_beta_sum - alpha;
let omega = sample_var * (F::one() - alpha - beta);
let omega = omega.max(F::from(1e-6).expect("Failed to convert constant to float"));
let alpha = alpha
.max(F::from(0.01).expect("Failed to convert constant to float"))
.min(F::from(0.3).expect("Failed to convert constant to float"));
let beta = beta
.max(F::from(0.01).expect("Failed to convert constant to float"))
.min(F::from(0.95).expect("Failed to convert constant to float"));
let sum_ab = alpha + beta;
let (alpha, beta) = if sum_ab >= F::one() {
let scale = F::from(0.99).expect("Failed to convert constant to float") / sum_ab;
(alpha * scale, beta * scale)
} else {
(alpha, beta)
};
let mut conditional_variance = Array1::zeros(n);
conditional_variance[0] = sample_var;
for i in 1..n {
conditional_variance[i] = omega
+ alpha * centered_returns[i - 1].powi(2)
+ beta * conditional_variance[i - 1];
}
let standardized_residuals: Array1<F> = centered_returns
.iter()
.zip(conditional_variance.iter())
.map(|(&r, &v)| r / v.sqrt())
.collect();
let mut log_likelihood = F::zero();
for i in 0..n {
let variance = conditional_variance[i];
if variance > F::zero() {
log_likelihood = log_likelihood
- F::from(0.5).expect("Failed to convert constant to float")
* (variance.ln() + centered_returns[i].powi(2) / variance);
}
}
let k = F::from(3).expect("Failed to convert constant to float"); let aic = -F::from(2.0).expect("Failed to convert constant to float") * log_likelihood
+ F::from(2.0).expect("Failed to convert constant to float") * k;
let bic = -F::from(2.0).expect("Failed to convert constant to float") * log_likelihood
+ k * n_f.ln();
let mean_params = Array1::from_vec(vec![mean]);
let garch_params = Array1::from_vec(vec![omega, alpha, beta]);
let parameters = GarchParameters {
mean_params,
garch_params,
dist_params: None,
};
self.fitted = true;
self.parameters = Some(parameters.clone());
self.conditional_variance = Some(conditional_variance.clone());
Ok(GarchResult {
parameters,
conditional_variance,
standardized_residuals,
log_likelihood,
aic,
bic,
converged: true,
iterations: 1, })
}
pub fn forecast_variance(&self, steps: usize) -> Result<Array1<F>> {
if !self.fitted {
return Err(TimeSeriesError::InvalidModel(
"Model has not been fitted".to_string(),
));
}
let parameters = self.parameters.as_ref().expect("Operation failed");
let conditional_variance = self
.conditional_variance
.as_ref()
.expect("Operation failed");
if parameters.garch_params.len() < 3 {
return Err(TimeSeriesError::InvalidModel(
"Invalid GARCH parameters".to_string(),
));
}
let omega = parameters.garch_params[0];
let alpha = parameters.garch_params[1];
let beta = parameters.garch_params[2];
let mut forecasts = Array1::zeros(steps);
let mut current_variance = conditional_variance[conditional_variance.len() - 1];
let unconditional_variance = omega / (F::one() - alpha - beta);
for i in 0..steps {
if i == 0 {
forecasts[i] = omega + beta * current_variance;
} else {
let decay_factor =
(alpha + beta).powf(F::from(i).expect("Failed to convert to float"));
forecasts[i] =
unconditional_variance + decay_factor * (forecasts[0] - unconditional_variance);
}
current_variance = forecasts[i];
}
Ok(forecasts)
}
fn fit_garch_mle(&mut self, data: &Array1<F>) -> Result<GarchResult<F>> {
let returns = if data.iter().all(|&x| x > F::zero()) {
let mut ret = Array1::zeros(data.len() - 1);
for i in 1..data.len() {
ret[i - 1] = (data[i] / data[i - 1]).ln();
}
ret
} else {
data.clone()
};
let n = returns.len();
let n_f = F::from(n).expect("Failed to convert to float");
let (mean_residuals, mean_params) = self.estimate_mean_equation(&returns)?;
let num_garch_params = 1 + self.config.p + self.config.q; let mut garch_params = Array1::zeros(num_garch_params);
let sample_var = mean_residuals.mapv(|x| x.powi(2)).sum() / (n_f - F::one());
garch_params[0] = sample_var * F::from(0.1).expect("Failed to convert constant to float");
for i in 1..=self.config.q {
garch_params[i] = F::from(0.05).expect("Failed to convert constant to float");
}
for i in (1 + self.config.q)..(1 + self.config.q + self.config.p) {
garch_params[i] = F::from(0.85).expect("Failed to convert constant to float")
/ F::from(self.config.p).expect("Failed to convert to float");
}
self.constrain_parameters(&mut garch_params);
let optimized_params = self.optimize_likelihood(&mean_residuals, garch_params)?;
let conditional_variance =
self.compute_conditional_variance(&mean_residuals, &optimized_params)?;
let standardized_residuals =
self.compute_standardized_residuals(&mean_residuals, &conditional_variance)?;
let log_likelihood = self.compute_log_likelihood(&mean_residuals, &conditional_variance)?;
let k = F::from(mean_params.len() + optimized_params.len()).expect("Operation failed");
let aic = -F::from(2.0).expect("Failed to convert constant to float") * log_likelihood
+ F::from(2.0).expect("Failed to convert constant to float") * k;
let bic = -F::from(2.0).expect("Failed to convert constant to float") * log_likelihood
+ k * n_f.ln();
let parameters = GarchParameters {
mean_params,
garch_params: optimized_params,
dist_params: None,
};
self.fitted = true;
self.parameters = Some(parameters.clone());
self.conditional_variance = Some(conditional_variance.clone());
Ok(GarchResult {
parameters,
conditional_variance,
standardized_residuals,
log_likelihood,
aic,
bic,
converged: true,
iterations: self.config.max_iterations,
})
}
fn estimate_mean_equation(&self, returns: &Array1<F>) -> Result<(Array1<F>, Array1<F>)> {
match &self.config.mean_model {
MeanModel::Zero => {
let mean_params = Array1::zeros(0);
Ok((returns.clone(), mean_params))
}
MeanModel::Constant => {
let mean = returns.sum() / F::from(returns.len()).expect("Operation failed");
let residuals = returns.mapv(|r| r - mean);
let mean_params = Array1::from_vec(vec![mean]);
Ok((residuals, mean_params))
}
MeanModel::AR { order } => {
if *order == 0 {
return self.estimate_mean_equation(returns);
}
let p = *order;
if returns.len() <= p {
return Err(TimeSeriesError::InsufficientData {
message: format!("Need more than {p} observations for AR({p}) model"),
required: p + 1,
actual: returns.len(),
});
}
let n = returns.len() - p;
let mut y = Array1::zeros(n);
let mut x = Array2::zeros((n, p + 1));
for i in 0..n {
y[i] = returns[p + i];
x[[i, 0]] = F::one(); for j in 1..=p {
x[[i, j]] = returns[p + i - j];
}
}
let xtx = self.matrix_multiply_transpose(&x.view())?;
let xty = self.matrix_vector_multiply_transpose(&x.view(), &y.view())?;
let ar_params = self.solve_linear_system(&xtx, &xty)?;
let mut residuals = Array1::zeros(returns.len());
residuals.slice_mut(s![..p]).assign(&returns.slice(s![..p]));
for i in p..returns.len() {
let mut prediction = ar_params[0]; for j in 1..=p {
prediction = prediction + ar_params[j] * returns[i - j];
}
residuals[i] = returns[i] - prediction;
}
Ok((residuals, ar_params))
}
MeanModel::ARMA { ar_order, ma_order } => {
if *ar_order == 0 && *ma_order == 0 {
return self.estimate_mean_equation(returns);
}
let mean = returns.sum() / F::from(returns.len()).expect("Operation failed");
let residuals = returns.mapv(|r| r - mean);
let mean_params = Array1::from_vec(vec![mean]);
Ok((residuals, mean_params))
}
}
}
fn constrain_parameters(&self, params: &mut Array1<F>) {
params[0] = params[0].max(F::from(1e-6).expect("Failed to convert constant to float"));
for i in 1..=self.config.q {
params[i] = params[i].max(F::zero());
}
for i in (1 + self.config.q)..(1 + self.config.q + self.config.p) {
params[i] = params[i].max(F::zero());
}
let alpha_sum: F = (1..=self.config.q).map(|i| params[i]).sum();
let beta_sum: F = ((1 + self.config.q)..(1 + self.config.q + self.config.p))
.map(|i| params[i])
.sum();
let total_sum = alpha_sum + beta_sum;
if total_sum >= F::one() {
let scale = F::from(0.99).expect("Failed to convert constant to float") / total_sum;
for i in 1..params.len() {
params[i] = params[i] * scale;
}
}
}
fn optimize_likelihood(
&self,
residuals: &Array1<F>,
initial_params: Array1<F>,
) -> Result<Array1<F>> {
let mut best_params = initial_params.clone();
let mut best_likelihood = self.negative_log_likelihood(residuals, &initial_params)?;
let perturbation_size = F::from(0.01).expect("Failed to convert constant to float");
for iteration in 0..self.config.max_iterations {
let mut improved = false;
for param_idx in 0..best_params.len() {
let mut test_params = best_params.clone();
test_params[param_idx] = test_params[param_idx] + perturbation_size;
self.constrain_parameters(&mut test_params);
if let Ok(test_likelihood) = self.negative_log_likelihood(residuals, &test_params) {
if test_likelihood < best_likelihood {
best_params = test_params;
best_likelihood = test_likelihood;
improved = true;
}
}
let mut test_params = best_params.clone();
test_params[param_idx] = test_params[param_idx] - perturbation_size;
self.constrain_parameters(&mut test_params);
if let Ok(test_likelihood) = self.negative_log_likelihood(residuals, &test_params) {
if test_likelihood < best_likelihood {
best_params = test_params;
best_likelihood = test_likelihood;
improved = true;
}
}
}
if !improved && iteration > 10 {
break;
}
if iteration % 20 == 0 && iteration > 0 {
let decay = F::from(0.95).expect("Failed to convert constant to float");
let new_size = perturbation_size * decay;
if new_size > F::from(1e-8).expect("Failed to convert constant to float") {
}
}
}
Ok(best_params)
}
fn negative_log_likelihood(&self, residuals: &Array1<F>, params: &Array1<F>) -> Result<F> {
let conditional_variance = self.compute_conditional_variance(residuals, params)?;
let log_likelihood = self.compute_log_likelihood(residuals, &conditional_variance)?;
Ok(-log_likelihood)
}
fn compute_conditional_variance(
&self,
residuals: &Array1<F>,
params: &Array1<F>,
) -> Result<Array1<F>> {
let n = residuals.len();
let mut h = Array1::zeros(n);
let omega = params[0];
let alpha_sum: F = (1..=self.config.q).map(|i| params[i]).sum();
let beta_sum: F = ((1 + self.config.q)..(1 + self.config.q + self.config.p))
.map(|i| params[i])
.sum();
let unconditional_var = omega / (F::one() - alpha_sum - beta_sum);
let max_lag = std::cmp::max(self.config.p, self.config.q);
for i in 0..std::cmp::min(max_lag, n) {
h[i] = unconditional_var;
}
for t in max_lag..n {
h[t] = omega;
for i in 1..=self.config.q {
if t >= i {
h[t] = h[t] + params[i] * residuals[t - i].powi(2);
}
}
for j in 1..=self.config.p {
if t >= j {
let beta_idx = self.config.q + j;
h[t] = h[t] + params[beta_idx] * h[t - j];
}
}
}
Ok(h)
}
fn compute_standardized_residuals(
&self,
residuals: &Array1<F>,
variance: &Array1<F>,
) -> Result<Array1<F>> {
let mut standardized = Array1::zeros(residuals.len());
for i in 0..residuals.len() {
if variance[i] > F::zero() {
standardized[i] = residuals[i] / variance[i].sqrt();
} else {
standardized[i] = F::zero();
}
}
Ok(standardized)
}
fn compute_log_likelihood(&self, residuals: &Array1<F>, variance: &Array1<F>) -> Result<F> {
let mut log_likelihood = F::zero();
match &self.config.distribution {
Distribution::Normal => {
let ln_2pi = F::from(2.0 * std::f64::consts::PI)
.expect("Failed to convert to float")
.ln();
let n = F::from(residuals.len()).expect("Operation failed");
log_likelihood =
-F::from(0.5).expect("Failed to convert constant to float") * n * ln_2pi;
for i in 0..residuals.len() {
if variance[i] > F::zero() {
let term = -F::from(0.5).expect("Failed to convert constant to float")
* (variance[i].ln() + residuals[i].powi(2) / variance[i]);
log_likelihood = log_likelihood + term;
}
}
}
Distribution::StudentT => {
let nu = F::from(5.0).expect("Failed to convert constant to float");
let gamma_factor = F::from(0.8).expect("Failed to convert constant to float");
for i in 0..residuals.len() {
if variance[i] > F::zero() {
let standardized = residuals[i] / variance[i].sqrt();
let term = gamma_factor
- F::from(0.5).expect("Failed to convert constant to float")
* variance[i].ln()
- F::from(0.5).expect("Failed to convert constant to float")
* (nu + F::one())
* (F::one() + standardized.powi(2) / nu).ln();
log_likelihood = log_likelihood + term;
}
}
}
_ => {
return self.compute_log_likelihood(residuals, variance);
}
}
Ok(log_likelihood)
}
fn matrix_multiply_transpose(
&self,
x: &scirs2_core::ndarray::ArrayView2<F>,
) -> Result<Array2<F>> {
let rows = x.ncols();
let mut result = Array2::zeros((rows, rows));
for i in 0..rows {
for j in 0..rows {
let mut sum = F::zero();
for k in 0..x.nrows() {
sum = sum + x[[k, i]] * x[[k, j]];
}
result[[i, j]] = sum;
}
}
Ok(result)
}
fn matrix_vector_multiply_transpose(
&self,
x: &scirs2_core::ndarray::ArrayView2<F>,
y: &scirs2_core::ndarray::ArrayView1<F>,
) -> Result<Array1<F>> {
let cols = x.ncols();
let mut result = Array1::zeros(cols);
for i in 0..cols {
let mut sum = F::zero();
for j in 0..x.nrows() {
sum = sum + x[[j, i]] * y[j];
}
result[i] = sum;
}
Ok(result)
}
fn solve_linear_system(&self, a: &Array2<F>, b: &Array1<F>) -> Result<Array1<F>> {
let n = a.nrows();
if a.ncols() != n || b.len() != n {
return Err(TimeSeriesError::InvalidInput(
"Matrix dimensions mismatch in linear system".to_string(),
));
}
let mut aug = Array2::zeros((n, n + 1));
for i in 0..n {
for j in 0..n {
aug[[i, j]] = a[[i, j]];
}
aug[[i, n]] = b[i];
}
for k in 0..n {
let mut max_row = k;
for i in (k + 1)..n {
if aug[[i, k]].abs() > aug[[max_row, k]].abs() {
max_row = i;
}
}
if max_row != k {
for j in 0..=n {
let temp = aug[[k, j]];
aug[[k, j]] = aug[[max_row, j]];
aug[[max_row, j]] = temp;
}
}
if aug[[k, k]].abs() < F::from(1e-12).expect("Failed to convert constant to float") {
return Err(TimeSeriesError::InvalidInput(
"Singular matrix in linear system".to_string(),
));
}
for i in (k + 1)..n {
let factor = aug[[i, k]] / aug[[k, k]];
for j in k..=n {
aug[[i, j]] = aug[[i, j]] - factor * aug[[k, j]];
}
}
}
let mut x = Array1::zeros(n);
for i in (0..n).rev() {
let mut sum = F::zero();
for j in (i + 1)..n {
sum = sum + aug[[i, j]] * x[j];
}
x[i] = (aug[[i, n]] - sum) / aug[[i, i]];
}
Ok(x)
}
pub fn get_parameters(&self) -> Option<&GarchParameters<F>> {
self.parameters.as_ref()
}
pub fn is_fitted(&self) -> bool {
self.fitted
}
}
#[cfg(test)]
mod tests {
use super::*;
use scirs2_core::ndarray::arr1;
#[test]
fn test_garch_11_basic() {
let mut model = GarchModel::<f64>::garch_11();
let data = arr1(&[
0.01, -0.02, 0.015, -0.008, 0.012, 0.005, -0.003, 0.007, -0.001, 0.004, 0.009, -0.006,
0.002, -0.007, 0.011, 0.003, -0.004, 0.008, -0.002, 0.006,
]);
let result = model.fit(&data);
assert!(result.is_ok());
let result = result.expect("Operation failed");
assert_eq!(result.parameters.garch_params.len(), 3); assert!(result.log_likelihood.is_finite());
assert!(result.log_likelihood.abs() > 0.0); assert!(model.is_fitted());
}
#[test]
fn test_garch_forecasting() {
let mut model = GarchModel::<f64>::garch_11();
let data = arr1(&[
0.01, -0.02, 0.015, -0.008, 0.012, 0.005, -0.003, 0.007, -0.001, 0.004, 0.009, -0.006,
0.002, -0.007, 0.011, 0.003, -0.004, 0.008, -0.002, 0.006,
]);
model.fit(&data).expect("Operation failed");
let forecasts = model.forecast_variance(5).expect("Operation failed");
assert_eq!(forecasts.len(), 5);
assert!(forecasts.iter().all(|&x| x > 0.0)); }
#[test]
fn test_insufficient_data() {
let mut model = GarchModel::<f64>::garch_11();
let data = arr1(&[0.01, -0.02]);
let result = model.fit(&data);
assert!(result.is_err());
}
#[test]
fn test_custom_garch_config() {
let config = GarchConfig {
p: 2,
q: 1,
mean_model: MeanModel::Zero,
distribution: Distribution::Normal,
max_iterations: 100,
tolerance: 1e-4,
use_numerical_derivatives: true,
};
let mut model = GarchModel::<f64>::new(config);
let data = arr1(&[
0.01, -0.02, 0.015, -0.008, 0.012, 0.005, -0.003, 0.007, -0.001, 0.004, 0.009, -0.006,
0.002, -0.007, 0.011, 0.003, -0.004, 0.008, -0.002, 0.006, 0.014, -0.01, 0.018, -0.005,
0.007, 0.002, -0.009, 0.013, 0.001, -0.003,
]);
let result = model.fit(&data);
assert!(result.is_ok());
let result = result.expect("Operation failed");
assert_eq!(result.parameters.garch_params.len(), 4);
}
}