1#![allow(non_snake_case)] use super::timeseries::TimeSeries;
28use ndarray::Array1;
29use serde::{Deserialize, Serialize};
30use so_core::error::{Error, Result};
31use statrs::function::gamma;
32
33#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
35pub enum GARCHDistribution {
36 Normal,
38 StudentsT(f64),
40 GED(f64),
42}
43
44#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
46pub struct GARCHOrder {
47 pub p: usize,
49 pub q: usize,
51}
52
53#[derive(Debug, Clone)]
55pub struct GARCHConfig {
56 pub order: GARCHOrder,
58 pub distribution: GARCHDistribution,
60 pub with_constant: bool,
62 pub max_iter: usize,
64 pub tol: f64,
66}
67
68impl Default for GARCHConfig {
69 fn default() -> Self {
70 Self {
71 order: GARCHOrder { p: 1, q: 1 },
72 distribution: GARCHDistribution::Normal,
73 with_constant: false,
74 max_iter: 100,
75 tol: 1e-6,
76 }
77 }
78}
79
80#[derive(Debug, Clone, Serialize, Deserialize)]
82pub struct GARCHResults {
83 pub omega: f64,
85 pub arch_coef: Array1<f64>,
87 pub garch_coef: Array1<f64>,
89 pub mu: Option<f64>,
91 pub df: Option<f64>,
93 pub log_likelihood: f64,
95 pub aic: f64,
97 pub bic: f64,
99 pub n_obs: usize,
101 pub residuals: Array1<f64>,
103 pub conditional_variances: Array1<f64>,
105 pub standardized_residuals: Array1<f64>,
107}
108
109pub struct GARCHBuilder {
111 config: GARCHConfig,
112}
113
114impl GARCHBuilder {
115 pub fn new(p: usize, q: usize) -> Self {
117 Self {
118 config: GARCHConfig {
119 order: GARCHOrder { p, q },
120 ..Default::default()
121 },
122 }
123 }
124
125 pub fn arch(q: usize) -> Self {
127 Self::new(0, q)
128 }
129
130 pub fn distribution(mut self, distribution: GARCHDistribution) -> Self {
132 self.config.distribution = distribution;
133 self
134 }
135
136 pub fn with_constant(mut self, include: bool) -> Self {
138 self.config.with_constant = include;
139 self
140 }
141
142 pub fn max_iter(mut self, max_iter: usize) -> Self {
144 self.config.max_iter = max_iter;
145 self
146 }
147
148 pub fn tol(mut self, tol: f64) -> Self {
150 self.config.tol = tol;
151 self
152 }
153
154 pub fn fit(self, ts: &TimeSeries) -> Result<GARCHResults> {
156 let mut garch = GARCH::new(self.config);
157 garch.fit(ts)
158 }
159}
160
161pub struct GARCH {
163 config: GARCHConfig,
164}
165
166impl GARCH {
167 pub fn new(config: GARCHConfig) -> Self {
169 Self { config }
170 }
171
172 pub fn builder(p: usize, q: usize) -> GARCHBuilder {
174 GARCHBuilder::new(p, q)
175 }
176
177 pub fn arch(q: usize) -> GARCHBuilder {
179 GARCHBuilder::arch(q)
180 }
181
182 pub fn fit(&mut self, ts: &TimeSeries) -> Result<GARCHResults> {
184 let y = ts.values();
185 let n = y.len();
186 let order = self.config.order;
187
188 if n < order.p.max(order.q) + 10 {
189 return Err(Error::DataError(format!(
190 "Not enough observations for GARCH({},{}), need at least {}, got {}",
191 order.p,
192 order.q,
193 order.p.max(order.q) + 10,
194 n
195 )));
196 }
197
198 let (residuals, mu) = if self.config.with_constant {
200 let mean = y.mean().unwrap_or(0.0);
201 let residuals = y - mean;
202 (residuals, Some(mean))
203 } else {
204 (y.clone(), None)
205 };
206
207 let mut params = self.initial_parameters(&residuals);
209
210 let (final_params, log_lik) = self.maximize_likelihood(&residuals, &mut params)?;
212
213 let (omega, arch_coef, garch_coef, df) = self.extract_parameters(&final_params);
215
216 let (conditional_variances, standardized_residuals) =
218 self.calculate_conditional_variances(&residuals, omega, &arch_coef, &garch_coef);
219
220 let n_params = 1
222 + order.q
223 + order.p
224 + if self.config.with_constant { 1 } else { 0 }
225 + match self.config.distribution {
226 GARCHDistribution::Normal => 0,
227 GARCHDistribution::StudentsT(_) => 1,
228 GARCHDistribution::GED(_) => 1,
229 };
230
231 let aic = 2.0 * n_params as f64 - 2.0 * log_lik;
232 let bic = (n as f64).ln() * n_params as f64 - 2.0 * log_lik;
233
234 Ok(GARCHResults {
235 omega,
236 arch_coef,
237 garch_coef,
238 mu,
239 df,
240 log_likelihood: log_lik,
241 aic,
242 bic,
243 n_obs: n,
244 residuals: residuals.clone(),
245 conditional_variances,
246 standardized_residuals,
247 })
248 }
249
250 fn initial_parameters(&self, residuals: &Array1<f64>) -> Array1<f64> {
252 let order = self.config.order;
253 let n_params = 1 + order.q + order.p; let mut params = Array1::zeros(n_params);
256
257 let variance = residuals.var(1.0);
259 params[0] = variance * 0.1;
260
261 if order.q > 0 {
263 let alpha_sum = 0.1;
264 for i in 0..order.q {
265 params[1 + i] = alpha_sum / order.q as f64;
266 }
267 }
268
269 if order.p > 0 {
271 let beta_sum = 0.8;
272 for i in 0..order.p {
273 params[1 + order.q + i] = beta_sum / order.p as f64;
274 }
275 }
276
277 match self.config.distribution {
279 GARCHDistribution::StudentsT(_) => {
280 let mut extended = Array1::zeros(n_params + 1);
281 extended.slice_mut(ndarray::s![..n_params]).assign(¶ms);
282 extended[n_params] = 8.0; extended
284 }
285 GARCHDistribution::GED(_) => {
286 let mut extended = Array1::zeros(n_params + 1);
287 extended.slice_mut(ndarray::s![..n_params]).assign(¶ms);
288 extended[n_params] = 1.5; extended
290 }
291 GARCHDistribution::Normal => params,
292 }
293 }
294
295 fn maximize_likelihood(
297 &self,
298 residuals: &Array1<f64>,
299 params: &mut Array1<f64>,
300 ) -> Result<(Array1<f64>, f64)> {
301 let _n = residuals.len();
302 let order = self.config.order;
303
304 let mut log_lik_old = f64::NEG_INFINITY;
305 let mut iteration = 0;
306
307 while iteration < self.config.max_iter {
308 let (log_lik, gradient) = self.log_likelihood_and_gradient(residuals, params);
310
311 if (log_lik - log_lik_old).abs() < self.config.tol {
313 return Ok((params.clone(), log_lik));
314 }
315
316 let step_size = 0.01;
318 for i in 0..params.len() {
319 params[i] += step_size * gradient[i];
320
321 if i == 0 {
323 params[i] = params[i].max(1e-8);
325 } else if i <= order.q {
326 params[i] = params[i].max(0.0);
328 } else if i <= order.q + order.p {
329 params[i] = params[i].max(0.0);
331 } else if let GARCHDistribution::StudentsT(_) = self.config.distribution {
332 params[i] = params[i].max(2.1);
334 } else if let GARCHDistribution::GED(_) = self.config.distribution {
335 params[i] = params[i].max(0.1);
337 }
338 }
339
340 let alpha_sum: f64 = (1..=order.q).map(|i| params[i]).sum();
342 let beta_sum: f64 = (1..=order.p).map(|i| params[order.q + i]).sum();
343
344 if alpha_sum + beta_sum >= 1.0 {
345 let scale = 0.99 / (alpha_sum + beta_sum);
347 for i in 1..=order.q {
348 params[i] *= scale;
349 }
350 for i in 1..=order.p {
351 params[order.q + i] *= scale;
352 }
353 }
354
355 log_lik_old = log_lik;
356 iteration += 1;
357 }
358
359 Err(Error::DataError(format!(
360 "GARCH optimization did not converge after {} iterations",
361 self.config.max_iter
362 )))
363 }
364
365 fn log_likelihood_and_gradient(
367 &self,
368 residuals: &Array1<f64>,
369 params: &Array1<f64>,
370 ) -> (f64, Array1<f64>) {
371 let n = residuals.len();
372 let order = self.config.order;
373
374 let (omega, arch_coef, garch_coef, _df) = self.extract_parameters(params);
376
377 let conditional_variances =
379 self.calculate_variances(residuals, omega, &arch_coef, &garch_coef);
380
381 let mut log_lik = 0.0;
383 let mut gradient = Array1::zeros(params.len());
384
385 for t in order.q.max(order.p)..n {
386 let sigma2 = conditional_variances[t];
387 let sigma = sigma2.sqrt();
388 let z = residuals[t] / sigma;
389
390 match self.config.distribution {
392 GARCHDistribution::Normal => {
393 log_lik += -0.5 * (2.0 * std::f64::consts::PI * sigma2).ln() - 0.5 * z.powi(2);
394 }
395 GARCHDistribution::StudentsT(nu) => {
396 let nu = nu.max(2.1);
398 let constant = gamma::ln_gamma((nu + 1.0) / 2.0)
399 - gamma::ln_gamma(nu / 2.0)
400 - 0.5 * (std::f64::consts::PI * (nu - 2.0)).ln();
401 log_lik += constant
402 - 0.5 * sigma2.ln()
403 - ((nu + 1.0) / 2.0) * (1.0 + z.powi(2) / (nu - 2.0)).ln();
404 }
405 GARCHDistribution::GED(nu) => {
406 let nu = nu.max(0.1);
408 let lambda = (2.0f64.powf(-2.0 / nu) * gamma::gamma(1.0 / nu)
409 / gamma::gamma(3.0 / nu))
410 .sqrt();
411 let constant = -0.5
412 * (lambda.powi(2) * gamma::gamma(1.0 / nu) / gamma::gamma(3.0 / nu)).ln()
413 - gamma::ln_gamma(1.0 + 1.0 / nu);
414 log_lik += constant - 0.5 * sigma2.ln() - 0.5 * (z.abs() / lambda).powf(nu);
415 }
416 }
417
418 let eps = 1e-8;
421 for i in 0..params.len() {
422 let mut params_plus = params.clone();
423 params_plus[i] += eps;
424 let (omega_p, arch_coef_p, garch_coef_p, _) = self.extract_parameters(¶ms_plus);
425 let sigma2_p =
426 self.calculate_variance_t(residuals, t, omega_p, &arch_coef_p, &garch_coef_p);
427
428 let mut params_minus = params.clone();
429 params_minus[i] -= eps;
430 let (omega_m, arch_coef_m, garch_coef_m, _) =
431 self.extract_parameters(¶ms_minus);
432 let sigma2_m =
433 self.calculate_variance_t(residuals, t, omega_m, &arch_coef_m, &garch_coef_m);
434
435 let deriv = (sigma2_p - sigma2_m) / (2.0 * eps);
436 gradient[i] += deriv * (-0.5 / sigma2 + 0.5 * z.powi(2) / sigma2.powi(2));
437 }
438 }
439
440 (log_lik, gradient)
441 }
442
443 fn extract_parameters(
445 &self,
446 params: &Array1<f64>,
447 ) -> (f64, Array1<f64>, Array1<f64>, Option<f64>) {
448 let order = self.config.order;
449
450 let omega = params[0];
451
452 let arch_coef = if order.q > 0 {
453 params.slice(ndarray::s![1..1 + order.q]).to_owned()
454 } else {
455 Array1::zeros(0)
456 };
457
458 let garch_coef = if order.p > 0 {
459 params
460 .slice(ndarray::s![1 + order.q..1 + order.q + order.p])
461 .to_owned()
462 } else {
463 Array1::zeros(0)
464 };
465
466 let df = match self.config.distribution {
467 GARCHDistribution::Normal => None,
468 GARCHDistribution::StudentsT(_) => Some(params[params.len() - 1]),
469 GARCHDistribution::GED(_) => Some(params[params.len() - 1]),
470 };
471
472 (omega, arch_coef, garch_coef, df)
473 }
474
475 fn calculate_variances(
477 &self,
478 residuals: &Array1<f64>,
479 omega: f64,
480 arch_coef: &Array1<f64>,
481 garch_coef: &Array1<f64>,
482 ) -> Array1<f64> {
483 let n = residuals.len();
484 let p = garch_coef.len();
485 let q = arch_coef.len();
486 let max_lag = p.max(q);
487
488 let mut variances = Array1::zeros(n);
489
490 let initial_variance = residuals.var(1.0).max(1e-8);
492
493 for t in 0..n {
494 if t < max_lag {
495 variances[t] = initial_variance;
496 } else {
497 let mut variance = omega;
498
499 for lag in 1..=q {
501 variance += arch_coef[lag - 1] * residuals[t - lag].powi(2);
502 }
503
504 for lag in 1..=p {
506 variance += garch_coef[lag - 1] * variances[t - lag];
507 }
508
509 variances[t] = variance.max(1e-8);
510 }
511 }
512
513 variances
514 }
515
516 fn calculate_variance_t(
518 &self,
519 residuals: &Array1<f64>,
520 t: usize,
521 omega: f64,
522 arch_coef: &Array1<f64>,
523 garch_coef: &Array1<f64>,
524 ) -> f64 {
525 let p = garch_coef.len();
526 let q = arch_coef.len();
527
528 let mut variance = omega;
529
530 for lag in 1..=q {
532 if t >= lag {
533 variance += arch_coef[lag - 1] * residuals[t - lag].powi(2);
534 }
535 }
536
537 for lag in 1..=p {
540 variance += garch_coef[lag - 1] * omega / (1.0 - arch_coef.sum() - garch_coef.sum());
541 }
542
543 variance.max(1e-8)
544 }
545
546 fn calculate_conditional_variances(
548 &self,
549 residuals: &Array1<f64>,
550 omega: f64,
551 arch_coef: &Array1<f64>,
552 garch_coef: &Array1<f64>,
553 ) -> (Array1<f64>, Array1<f64>) {
554 let variances = self.calculate_variances(residuals, omega, arch_coef, garch_coef);
555 let standardized = residuals / &variances.mapv(|v| v.sqrt());
556
557 (variances, standardized)
558 }
559
560 pub fn forecast_variances(&self, results: &GARCHResults, steps: usize) -> Array1<f64> {
562 let n = results.n_obs;
563 let order = self.config.order;
564
565 let mut forecasts = Array1::zeros(steps);
566 let mut past_variances = results.conditional_variances.clone();
567 let mut past_residuals = results.residuals.clone();
568
569 let unconditional_variance =
571 results.omega / (1.0 - results.arch_coef.sum() - results.garch_coef.sum());
572
573 for h in 0..steps {
574 let mut variance = results.omega;
575
576 for lag in 1..=order.q {
578 let idx = n + h - lag;
579 if idx < n {
580 variance += results.arch_coef[lag - 1] * past_residuals[idx].powi(2);
581 } else if idx < n + h {
582 variance += results.arch_coef[lag - 1] * unconditional_variance;
584 }
585 }
586
587 for lag in 1..=order.p {
589 let idx = n + h - lag;
590 if idx < n {
591 variance += results.garch_coef[lag - 1] * past_variances[idx];
592 } else if idx < n + h {
593 variance += results.garch_coef[lag - 1] * forecasts[idx - n];
594 }
595 }
596
597 forecasts[h] = variance.max(1e-8);
598
599 past_variances = ndarray::concatenate(
601 ndarray::Axis(0),
602 &[past_variances.view(), ndarray::array![variance].view()],
603 )
604 .unwrap();
605
606 past_residuals = ndarray::concatenate(
607 ndarray::Axis(0),
608 &[past_residuals.view(), ndarray::array![0.0].view()],
609 )
610 .unwrap();
611 }
612
613 forecasts
614 }
615}
616
617pub type ARCH = GARCH;
619
620pub trait GARCHExt {
622 fn garch(&self, p: usize, q: usize) -> Result<GARCHResults>;
624
625 fn arch(&self, q: usize) -> Result<GARCHResults>;
627}
628
629impl GARCHExt for TimeSeries {
630 fn garch(&self, p: usize, q: usize) -> Result<GARCHResults> {
631 GARCH::builder(p, q).fit(self)
632 }
633
634 fn arch(&self, q: usize) -> Result<GARCHResults> {
635 GARCH::arch(q).fit(self)
636 }
637}