use ndarray::ArrayView1;
use stochastic_rs_distributions::special::ndtri;
use stochastic_rs_distributions::special::norm_pdf;
use super::var::PnlOrLoss;
use super::var::VarMethod;
use super::var::assert_confidence;
use super::var::gaussian_var;
use super::var::losses_from_samples;
use super::var::sample_quantile;
use crate::traits::FloatExt;
pub fn expected_shortfall<T: FloatExt>(
samples: ArrayView1<T>,
confidence: T,
orientation: PnlOrLoss,
method: VarMethod,
) -> T {
assert_confidence(confidence);
match method {
VarMethod::Gaussian => gaussian_es(samples, confidence, orientation),
VarMethod::Historical | VarMethod::MonteCarlo => {
historical_es(samples, confidence, orientation)
}
}
}
pub fn gaussian_es<T: FloatExt>(
samples: ArrayView1<T>,
confidence: T,
orientation: PnlOrLoss,
) -> T {
assert_confidence(confidence);
let losses = losses_from_samples(samples, orientation);
let n = losses.len();
assert!(n >= 2, "need at least two observations for Gaussian ES");
let mean = losses.iter().fold(T::zero(), |acc, &v| acc + v) / T::from_usize_(n);
let var = losses
.iter()
.fold(T::zero(), |acc, &v| acc + (v - mean).powi(2))
/ T::from_usize_(n - 1);
let sigma = var.sqrt();
let c = confidence.to_f64().unwrap();
let q = ndtri(c);
let phi_q = norm_pdf(q);
let factor = T::from_f64_fast(phi_q / (1.0 - c));
mean + sigma * factor
}
pub fn historical_es<T: FloatExt>(
samples: ArrayView1<T>,
confidence: T,
orientation: PnlOrLoss,
) -> T {
assert_confidence(confidence);
let losses = losses_from_samples(samples, orientation);
let n = losses.len();
assert!(n >= 1, "need at least one observation for historical ES");
let mut sorted = losses.to_vec();
sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
let var = sample_quantile(&sorted, confidence);
let confidence_f64 = confidence.to_f64().unwrap_or(0.0);
let target = (n as f64) * (1.0 - confidence_f64);
if target <= 0.0 {
return var;
}
let mut strict_sum = T::zero();
let mut strict_count = 0usize;
for &l in sorted.iter() {
if l > var {
strict_sum += l;
strict_count += 1;
}
}
let weight_var = (target - strict_count as f64).max(0.0);
let total = strict_sum + var * T::from_f64_fast(weight_var);
total / T::from_f64_fast(target)
}
pub fn monte_carlo_es<T: FloatExt>(
simulated_samples: ArrayView1<T>,
confidence: T,
orientation: PnlOrLoss,
) -> T {
historical_es(simulated_samples, confidence, orientation)
}
pub fn gaussian_var_es<T: FloatExt>(
samples: ArrayView1<T>,
confidence: T,
orientation: PnlOrLoss,
) -> (T, T) {
(
gaussian_var(samples, confidence, orientation),
gaussian_es(samples, confidence, orientation),
)
}
#[cfg(test)]
mod tests {
use ndarray::Array1;
use super::*;
#[test]
fn historical_es_ru_finite_sample_correction() {
let losses = Array1::from_vec(vec![
-3.0_f64, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 10.0,
]);
let es = historical_es(losses.view(), 0.85, PnlOrLoss::Loss);
assert!(es > 5.0, "ES = {es} must exceed second-worst loss");
assert!(es < 10.0, "ES = {es} must be below worst loss");
}
#[test]
fn historical_es_constant_losses() {
let losses = Array1::from_vec(vec![1.5_f64; 50]);
let es = historical_es(losses.view(), 0.95, PnlOrLoss::Loss);
assert!((es - 1.5).abs() < 1e-12, "ES = {es}, expected 1.5");
}
}