use std::fmt::Display;
use ndarray::Array1;
use ndarray::ArrayView1;
use stochastic_rs_distributions::special::ndtri;
use crate::traits::FloatExt;
#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum PnlOrLoss {
#[default]
Pnl,
Loss,
}
impl Display for PnlOrLoss {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Pnl => write!(f, "PnL"),
Self::Loss => write!(f, "Loss"),
}
}
}
#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum VarMethod {
#[default]
Gaussian,
Historical,
MonteCarlo,
}
impl Display for VarMethod {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Gaussian => write!(f, "Gaussian"),
Self::Historical => write!(f, "Historical simulation"),
Self::MonteCarlo => write!(f, "Monte Carlo"),
}
}
}
pub fn value_at_risk<T: FloatExt>(
samples: ArrayView1<T>,
confidence: T,
orientation: PnlOrLoss,
method: VarMethod,
) -> T {
assert_confidence(confidence);
match method {
VarMethod::Gaussian => gaussian_var(samples, confidence, orientation),
VarMethod::Historical | VarMethod::MonteCarlo => {
historical_var(samples, confidence, orientation)
}
}
}
pub fn gaussian_var<T: FloatExt>(
samples: ArrayView1<T>,
confidence: T,
orientation: PnlOrLoss,
) -> T {
assert_confidence(confidence);
let losses = losses_from_samples(samples, orientation);
let n = losses.len();
assert!(n >= 2, "need at least two observations for Gaussian VaR");
let mean = losses.iter().fold(T::zero(), |acc, &v| acc + v) / T::from_usize_(n);
let var = losses
.iter()
.fold(T::zero(), |acc, &v| acc + (v - mean).powi(2))
/ T::from_usize_(n - 1);
let sigma = var.sqrt();
let z = T::from_f64_fast(standard_normal_quantile(confidence.to_f64().unwrap()));
mean + sigma * z
}
pub fn historical_var<T: FloatExt>(
samples: ArrayView1<T>,
confidence: T,
orientation: PnlOrLoss,
) -> T {
assert_confidence(confidence);
let losses = losses_from_samples(samples, orientation);
let n = losses.len();
assert!(n >= 1, "need at least one observation for historical VaR");
let mut sorted = losses.to_vec();
sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
sample_quantile(&sorted, confidence)
}
pub fn monte_carlo_var<T: FloatExt>(
simulated_samples: ArrayView1<T>,
confidence: T,
orientation: PnlOrLoss,
) -> T {
historical_var(simulated_samples, confidence, orientation)
}
pub fn monte_carlo_var_with_sampler<T, D>(
sampler: &D,
n_samples: usize,
confidence: T,
orientation: PnlOrLoss,
) -> T
where
T: FloatExt,
D: stochastic_rs_distributions::traits::DistributionSampler<T>,
{
let samples = sampler.sample_n(n_samples);
monte_carlo_var(samples.view(), confidence, orientation)
}
pub(crate) fn losses_from_samples<T: FloatExt>(
samples: ArrayView1<T>,
orientation: PnlOrLoss,
) -> Array1<T> {
match orientation {
PnlOrLoss::Pnl => samples.mapv(|v| -v),
PnlOrLoss::Loss => samples.to_owned(),
}
}
pub(crate) fn sample_quantile<T: FloatExt>(sorted: &[T], confidence: T) -> T {
let n = sorted.len();
assert!(n >= 1, "empty sample");
let idx = (confidence * T::from_usize_(n - 1)).to_f64().unwrap_or(0.0);
let lo = idx.floor() as usize;
let hi = idx.ceil() as usize;
if lo == hi {
sorted[lo]
} else {
let w = T::from_f64_fast(idx - idx.floor());
sorted[lo] * (T::one() - w) + sorted[hi] * w
}
}
pub(crate) fn assert_confidence<T: FloatExt>(c: T) {
let v = c.to_f64().unwrap_or(0.0);
assert!(v > 0.0 && v < 1.0, "confidence must lie in (0, 1); got {v}");
}
fn standard_normal_quantile(p: f64) -> f64 {
ndtri(p)
}