use nalgebra::{DMatrix, DVector, SymmetricEigen};
use crate::expected_returns::ReturnsKind;
use crate::prelude::{
column_means, log_returns_from_prices, returns_from_prices, sample_covariance, symmetrise,
LabeledMatrix,
};
use crate::{PortfolioError, Result, TRADING_DAYS_PER_YEAR};
fn returns_with_kind(prices: &DMatrix<f64>, kind: ReturnsKind) -> Result<DMatrix<f64>> {
match kind {
ReturnsKind::Simple => returns_from_prices(prices),
ReturnsKind::Log => log_returns_from_prices(prices),
}
}
pub const DEFAULT_SEMICOV_BENCHMARK: f64 = 0.000079;
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
pub enum FixMethod {
#[default]
Spectral,
Diag,
}
pub fn fix_nonpositive_semidefinite(
matrix: &DMatrix<f64>,
method: FixMethod,
) -> Result<DMatrix<f64>> {
let n = crate::prelude::assert_square(matrix, "fix_nonpositive_semidefinite")?;
let mut sym = matrix.clone();
symmetrise(&mut sym);
let eig = SymmetricEigen::new(sym.clone());
let min_eig = eig
.eigenvalues
.iter()
.cloned()
.fold(f64::INFINITY, f64::min);
if min_eig >= -1e-12 {
return Ok(sym);
}
let fixed = match method {
FixMethod::Spectral => {
let mut clipped = eig.eigenvalues.clone();
for v in clipped.iter_mut() {
if *v < 0.0 {
*v = 0.0;
}
}
let v = &eig.eigenvectors;
let mut diag = DMatrix::<f64>::zeros(n, n);
for i in 0..n {
diag[(i, i)] = clipped[i];
}
v * diag * v.transpose()
}
FixMethod::Diag => {
let shift = -1.1 * min_eig;
let mut out = sym.clone();
for i in 0..n {
out[(i, i)] += shift;
}
out
}
};
let mut out = fixed;
symmetrise(&mut out);
Ok(out)
}
pub fn sample_cov(
prices: &DMatrix<f64>,
kind: ReturnsKind,
frequency: Option<usize>,
) -> Result<DMatrix<f64>> {
let returns = returns_with_kind(prices, kind)?;
sample_cov_from_returns(&returns, frequency)
}
pub fn sample_cov_from_returns(
returns: &DMatrix<f64>,
frequency: Option<usize>,
) -> Result<DMatrix<f64>> {
let cov = sample_covariance(returns)?;
let f = frequency.unwrap_or(TRADING_DAYS_PER_YEAR) as f64;
Ok(cov * f)
}
pub fn semicovariance(
prices: &DMatrix<f64>,
benchmark: Option<f64>,
kind: ReturnsKind,
frequency: Option<usize>,
) -> Result<DMatrix<f64>> {
let returns = returns_with_kind(prices, kind)?;
semicovariance_from_returns(&returns, benchmark, frequency)
}
pub fn semicovariance_from_returns(
returns: &DMatrix<f64>,
benchmark: Option<f64>,
frequency: Option<usize>,
) -> Result<DMatrix<f64>> {
let benchmark = benchmark.unwrap_or(DEFAULT_SEMICOV_BENCHMARK);
let (rows, cols) = returns.shape();
if rows < 2 {
return Err(PortfolioError::InvalidArgument(
"need at least two observations for semicovariance".into(),
));
}
let mut downside = returns.clone();
for j in 0..cols {
for i in 0..rows {
let v = downside[(i, j)] - benchmark;
downside[(i, j)] = v.min(0.0);
}
}
let cov = downside.transpose() * &downside / (rows as f64);
let f = frequency.unwrap_or(TRADING_DAYS_PER_YEAR) as f64;
Ok(cov * f)
}
pub const DEFAULT_EXP_COV_SPAN: usize = 180;
pub fn exp_cov(
prices: &DMatrix<f64>,
kind: ReturnsKind,
span: Option<usize>,
frequency: Option<usize>,
) -> Result<DMatrix<f64>> {
let returns = returns_with_kind(prices, kind)?;
exp_cov_from_returns(&returns, span, frequency)
}
pub fn exp_cov_from_returns(
returns: &DMatrix<f64>,
span: Option<usize>,
frequency: Option<usize>,
) -> Result<DMatrix<f64>> {
let span = span.unwrap_or(DEFAULT_EXP_COV_SPAN);
if span < 1 {
return Err(PortfolioError::InvalidArgument("span must be >= 1".into()));
}
let (rows, cols) = returns.shape();
if rows < 2 {
return Err(PortfolioError::InvalidArgument(
"need at least two observations for exp_cov".into(),
));
}
let alpha = 2.0 / (span as f64 + 1.0);
let one_minus_alpha = 1.0 - alpha;
let mut weights = vec![0.0_f64; rows];
let mut w = 1.0;
for i in (0..rows).rev() {
weights[i] = w;
w *= one_minus_alpha;
}
let weight_sum: f64 = weights.iter().sum();
let mut means = DVector::<f64>::zeros(cols);
for j in 0..cols {
let mut acc = 0.0;
for i in 0..rows {
acc += weights[i] * returns[(i, j)];
}
means[j] = acc / weight_sum;
}
let mut cov = DMatrix::<f64>::zeros(cols, cols);
for j in 0..cols {
for k in 0..=j {
let mut acc = 0.0;
for i in 0..rows {
acc += weights[i] * (returns[(i, j)] - means[j]) * (returns[(i, k)] - means[k]);
}
let v = acc / weight_sum;
cov[(j, k)] = v;
cov[(k, j)] = v;
}
}
let f = frequency.unwrap_or(TRADING_DAYS_PER_YEAR) as f64;
Ok(cov * f)
}
pub fn cov_to_corr(cov: &DMatrix<f64>) -> Result<DMatrix<f64>> {
let n = crate::prelude::assert_square(cov, "cov_to_corr")?;
let mut std = DVector::<f64>::zeros(n);
for i in 0..n {
let v = cov[(i, i)];
if v < 0.0 {
return Err(PortfolioError::InvalidArgument(format!(
"negative variance at index {i}"
)));
}
std[i] = v.sqrt();
}
let mut corr = DMatrix::<f64>::zeros(n, n);
for i in 0..n {
for j in 0..n {
if std[i] == 0.0 || std[j] == 0.0 {
corr[(i, j)] = 0.0;
} else {
corr[(i, j)] = cov[(i, j)] / (std[i] * std[j]);
}
}
}
for i in 0..n {
corr[(i, i)] = 1.0;
}
Ok(corr)
}
pub fn corr_to_cov(corr: &DMatrix<f64>, std: &DVector<f64>) -> Result<DMatrix<f64>> {
let n = crate::prelude::assert_square(corr, "corr_to_cov")?;
if std.len() != n {
return Err(PortfolioError::DimensionMismatch(format!(
"std has length {}, expected {n}",
std.len()
)));
}
let mut cov = DMatrix::<f64>::zeros(n, n);
for i in 0..n {
for j in 0..n {
cov[(i, j)] = corr[(i, j)] * std[i] * std[j];
}
}
Ok(cov)
}
pub fn risk_matrix(
prices: &DMatrix<f64>,
method: &str,
kind: ReturnsKind,
frequency: Option<usize>,
) -> Result<DMatrix<f64>> {
let m = method.trim().to_ascii_lowercase();
match m.as_str() {
"sample_cov" => sample_cov(prices, kind, frequency),
"semicovariance" | "semivariance" => semicovariance(prices, None, kind, frequency),
"exp_cov" => exp_cov(prices, kind, None, frequency),
"ledoit_wolf" | "ledoit_wolf_constant_variance" => {
CovarianceShrinkage::new(prices, kind, frequency)?
.ledoit_wolf(LedoitWolfTarget::ConstantVariance)
}
"ledoit_wolf_single_factor" => CovarianceShrinkage::new(prices, kind, frequency)?
.ledoit_wolf(LedoitWolfTarget::SingleFactor),
"ledoit_wolf_constant_correlation" => CovarianceShrinkage::new(prices, kind, frequency)?
.ledoit_wolf(LedoitWolfTarget::ConstantCorrelation),
"oracle_approximating" => {
CovarianceShrinkage::new(prices, kind, frequency)?.oracle_approximating()
}
other => Err(PortfolioError::InvalidArgument(format!(
"unknown risk_matrix method '{other}'"
))),
}
}
fn validate_labels(prices: &DMatrix<f64>, tickers: &[String]) -> Result<()> {
if prices.ncols() != tickers.len() {
return Err(PortfolioError::DimensionMismatch(format!(
"prices has {} columns but {} tickers were supplied",
prices.ncols(),
tickers.len()
)));
}
Ok(())
}
pub fn sample_cov_labeled(
prices: &DMatrix<f64>,
tickers: &[String],
kind: ReturnsKind,
frequency: Option<usize>,
) -> Result<LabeledMatrix> {
validate_labels(prices, tickers)?;
let cov = sample_cov(prices, kind, frequency)?;
LabeledMatrix::new(cov, tickers.to_vec())
}
pub fn semicovariance_labeled(
prices: &DMatrix<f64>,
tickers: &[String],
benchmark: Option<f64>,
kind: ReturnsKind,
frequency: Option<usize>,
) -> Result<LabeledMatrix> {
validate_labels(prices, tickers)?;
let cov = semicovariance(prices, benchmark, kind, frequency)?;
LabeledMatrix::new(cov, tickers.to_vec())
}
pub fn exp_cov_labeled(
prices: &DMatrix<f64>,
tickers: &[String],
kind: ReturnsKind,
span: Option<usize>,
frequency: Option<usize>,
) -> Result<LabeledMatrix> {
validate_labels(prices, tickers)?;
let cov = exp_cov(prices, kind, span, frequency)?;
LabeledMatrix::new(cov, tickers.to_vec())
}
pub fn risk_matrix_labeled(
prices: &DMatrix<f64>,
tickers: &[String],
method: &str,
kind: ReturnsKind,
frequency: Option<usize>,
) -> Result<LabeledMatrix> {
validate_labels(prices, tickers)?;
let cov = risk_matrix(prices, method, kind, frequency)?;
LabeledMatrix::new(cov, tickers.to_vec())
}
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
pub enum LedoitWolfTarget {
#[default]
ConstantVariance,
SingleFactor,
ConstantCorrelation,
}
pub struct CovarianceShrinkage {
pub returns: DMatrix<f64>,
pub frequency: f64,
pub delta: f64,
}
impl CovarianceShrinkage {
pub fn new(prices: &DMatrix<f64>, kind: ReturnsKind, frequency: Option<usize>) -> Result<Self> {
let returns = returns_with_kind(prices, kind)?;
Ok(Self {
returns,
frequency: frequency.unwrap_or(TRADING_DAYS_PER_YEAR) as f64,
delta: 0.0,
})
}
pub fn from_returns(returns: DMatrix<f64>, frequency: Option<usize>) -> Self {
Self {
returns,
frequency: frequency.unwrap_or(TRADING_DAYS_PER_YEAR) as f64,
delta: 0.0,
}
}
fn t(&self) -> usize {
self.returns.nrows()
}
fn n(&self) -> usize {
self.returns.ncols()
}
fn constant_correlation_target(&self, sample: &DMatrix<f64>) -> DMatrix<f64> {
let n = self.n();
let mut std = DVector::<f64>::zeros(n);
for i in 0..n {
std[i] = sample[(i, i)].sqrt();
}
let mut sum_corr = 0.0;
let mut count = 0;
for i in 0..n {
for j in (i + 1)..n {
if std[i] > 0.0 && std[j] > 0.0 {
sum_corr += sample[(i, j)] / (std[i] * std[j]);
count += 1;
}
}
}
let avg_corr = if count > 0 {
sum_corr / count as f64
} else {
0.0
};
let mut target = DMatrix::<f64>::zeros(n, n);
for i in 0..n {
for j in 0..n {
if i == j {
target[(i, j)] = sample[(i, i)];
} else {
target[(i, j)] = avg_corr * std[i] * std[j];
}
}
}
target
}
fn shrink_toward(
&self,
sample: &DMatrix<f64>,
target: &DMatrix<f64>,
delta: f64,
) -> DMatrix<f64> {
let mut shrunk = (1.0 - delta) * sample + delta * target;
symmetrise(&mut shrunk);
shrunk
}
fn sample_cov_raw(&self) -> Result<DMatrix<f64>> {
sample_covariance(&self.returns)
}
fn centered(&self) -> (DMatrix<f64>, DVector<f64>) {
let means = column_means(&self.returns);
let (rows, cols) = self.returns.shape();
let mut centered = self.returns.clone();
for j in 0..cols {
for i in 0..rows {
centered[(i, j)] -= means[j];
}
}
(centered, means)
}
pub fn shrunk_covariance(&mut self, delta: f64) -> Result<DMatrix<f64>> {
if !(0.0..=1.0).contains(&delta) {
return Err(PortfolioError::InvalidArgument(
"delta must be in [0, 1]".into(),
));
}
let sample = self.sample_cov_raw()?;
let target = self.constant_variance_target(&sample);
self.delta = delta;
let shrunk = self.shrink_toward(&sample, &target, delta);
Ok(shrunk * self.frequency)
}
fn constant_variance_target(&self, sample: &DMatrix<f64>) -> DMatrix<f64> {
let n = self.n();
let mu: f64 = (0..n).map(|i| sample[(i, i)]).sum::<f64>() / n as f64;
DMatrix::<f64>::identity(n, n) * mu
}
pub fn ledoit_wolf(&mut self, target: LedoitWolfTarget) -> Result<DMatrix<f64>> {
match target {
LedoitWolfTarget::ConstantVariance => self.lw_constant_variance(),
LedoitWolfTarget::SingleFactor => self.lw_single_factor(),
LedoitWolfTarget::ConstantCorrelation => self.lw_constant_correlation(),
}
}
fn lw_constant_variance(&mut self) -> Result<DMatrix<f64>> {
let t = self.t();
let n = self.n();
if t < 2 {
return Err(PortfolioError::InvalidArgument(
"need at least two observations for Ledoit-Wolf".into(),
));
}
let tt = t as f64;
let nn = n as f64;
let (centered, _) = self.centered();
let mut s = DMatrix::<f64>::zeros(n, n);
for i in 0..n {
for j in 0..n {
let mut acc = 0.0;
for k in 0..t {
acc += centered[(k, i)] * centered[(k, j)];
}
s[(i, j)] = acc / tt;
}
}
let mu: f64 = (0..n).map(|i| s[(i, i)]).sum::<f64>() / nn;
let mut s_frob_sq = 0.0;
for v in s.iter() {
s_frob_sq += v * v;
}
let delta_sq = s_frob_sq - nn * mu * mu;
let mut sum_x4 = 0.0;
for k in 0..t {
let mut nm2 = 0.0;
for i in 0..n {
nm2 += centered[(k, i)] * centered[(k, i)];
}
sum_x4 += nm2 * nm2;
}
let beta_bar_sq = sum_x4 / (tt * tt) - s_frob_sq / tt;
let beta_sq = beta_bar_sq.min(delta_sq);
let delta = if delta_sq > 0.0 {
(beta_sq / delta_sq).clamp(0.0, 1.0)
} else {
0.0
};
let target = DMatrix::<f64>::identity(n, n) * mu;
let shrunk = self.shrink_toward(&s, &target, delta);
self.delta = delta;
Ok(shrunk * self.frequency)
}
fn lw_single_factor(&mut self) -> Result<DMatrix<f64>> {
let t = self.t();
let n = self.n();
if t < 2 {
return Err(PortfolioError::InvalidArgument(
"need at least two observations for Ledoit-Wolf".into(),
));
}
let tt = t as f64;
let (centered, _) = self.centered();
let mut s = DMatrix::<f64>::zeros(n, n);
for i in 0..n {
for j in 0..n {
let mut acc = 0.0;
for k in 0..t {
acc += centered[(k, i)] * centered[(k, j)];
}
s[(i, j)] = acc / tt;
}
}
let mut xmkt = DVector::<f64>::zeros(t);
for i in 0..t {
let mut sum = 0.0;
for j in 0..n {
sum += centered[(i, j)];
}
xmkt[i] = sum / n as f64;
}
let var_mkt: f64 = xmkt.iter().map(|v| v * v).sum::<f64>() / tt;
if var_mkt <= 0.0 {
return Err(PortfolioError::InvalidArgument(
"single-factor variance is zero or negative".into(),
));
}
let mut betas = DVector::<f64>::zeros(n);
for j in 0..n {
let mut acc = 0.0;
for i in 0..t {
acc += centered[(i, j)] * xmkt[i];
}
betas[j] = (acc / tt) / var_mkt;
}
let mut f = DMatrix::<f64>::zeros(n, n);
for i in 0..n {
for j in 0..n {
f[(i, j)] = if i == j {
s[(i, i)]
} else {
betas[i] * betas[j] * var_mkt
};
}
}
let c = {
let mut acc = 0.0;
for i in 0..n {
for j in 0..n {
let d = s[(i, j)] - f[(i, j)];
acc += d * d;
}
}
acc
};
let y = {
let mut m = DMatrix::<f64>::zeros(t, n);
for i in 0..t {
for j in 0..n {
let v = centered[(i, j)];
m[(i, j)] = v * v;
}
}
m
};
let yty = y.transpose() * &y;
let sum_yty: f64 = yty.iter().sum();
let sum_s_sq: f64 = s.iter().map(|v| v * v).sum();
let p = sum_yty / tt - sum_s_sq;
let sum_y_sq: f64 = y.iter().map(|v| v * v).sum();
let sum_diag_s_sq: f64 = (0..n).map(|i| s[(i, i)] * s[(i, i)]).sum::<f64>();
let r_diag = sum_y_sq / tt - sum_diag_s_sq;
let mut z = DMatrix::<f64>::zeros(t, n);
for i in 0..t {
for j in 0..n {
z[(i, j)] = centered[(i, j)] * xmkt[i];
}
}
let ytz = y.transpose() * &z;
let mut v1 = DMatrix::<f64>::zeros(n, n);
for i in 0..n {
for j in 0..n {
v1[(i, j)] = ytz[(i, j)] / tt - betas[i] * s[(i, j)];
}
}
let mut roff1 = 0.0;
for i in 0..n {
for j in 0..n {
roff1 += v1[(i, j)] * betas[j];
}
}
roff1 /= var_mkt;
let mut diag_term = 0.0;
for i in 0..n {
diag_term += v1[(i, i)] * betas[i];
}
roff1 -= diag_term / var_mkt;
let ztz = z.transpose() * &z;
let mut v3 = DMatrix::<f64>::zeros(n, n);
for i in 0..n {
for j in 0..n {
v3[(i, j)] = ztz[(i, j)] / tt - var_mkt * s[(i, j)];
}
}
let mut roff3 = 0.0;
for i in 0..n {
for j in 0..n {
roff3 += v3[(i, j)] * betas[i] * betas[j];
}
}
roff3 /= var_mkt * var_mkt;
let mut roff3b = 0.0;
for i in 0..n {
roff3b += v3[(i, i)] * betas[i] * betas[i];
}
roff3 -= roff3b / (var_mkt * var_mkt);
let roff = 2.0 * roff1 - roff3;
let r = r_diag + roff;
let k = if c > 0.0 { (p - r) / c } else { 0.0 };
let delta = (k / tt).clamp(0.0, 1.0);
let shrunk = self.shrink_toward(&s, &f, delta);
self.delta = delta;
Ok(shrunk * self.frequency)
}
fn lw_constant_correlation(&mut self) -> Result<DMatrix<f64>> {
let t = self.t() as f64;
if t < 2.0 {
return Err(PortfolioError::InvalidArgument(
"need at least two observations for Ledoit-Wolf".into(),
));
}
let sample = self.sample_cov_raw()?;
let target = self.constant_correlation_target(&sample);
let n = self.n();
let (centered, _) = self.centered();
let mut pi_mat = DMatrix::<f64>::zeros(n, n);
for i in 0..n {
for j in 0..n {
let mut acc = 0.0;
for tt in 0..self.t() {
let prod = centered[(tt, i)] * centered[(tt, j)];
let dev = prod - sample[(i, j)];
acc += dev * dev;
}
pi_mat[(i, j)] = acc / t;
}
}
let pi: f64 = pi_mat.iter().sum();
let mut gamma = 0.0;
for i in 0..n {
for j in 0..n {
let d = sample[(i, j)] - target[(i, j)];
gamma += d * d;
}
}
let mut std = DVector::<f64>::zeros(n);
for i in 0..n {
std[i] = sample[(i, i)].sqrt();
}
let mut sum_corr = 0.0;
let mut count = 0;
for i in 0..n {
for j in (i + 1)..n {
if std[i] > 0.0 && std[j] > 0.0 {
sum_corr += sample[(i, j)] / (std[i] * std[j]);
count += 1;
}
}
}
let avg_corr = if count > 0 {
sum_corr / count as f64
} else {
0.0
};
let theta = |i: usize, j: usize, k: usize, l: usize| -> f64 {
let mut acc = 0.0;
for tt in 0..self.t() {
let a = centered[(tt, i)] * centered[(tt, j)] - sample[(i, j)];
let b = centered[(tt, k)] * centered[(tt, l)] - sample[(k, l)];
acc += a * b;
}
acc / t
};
let mut rho_diag = 0.0;
for i in 0..n {
rho_diag += pi_mat[(i, i)];
}
let mut rho_off = 0.0;
for i in 0..n {
for j in 0..n {
if i == j {
continue;
}
if std[i] == 0.0 || std[j] == 0.0 {
continue;
}
let term =
(std[j] / std[i]) * theta(i, i, i, j) + (std[i] / std[j]) * theta(j, j, i, j);
rho_off += 0.5 * avg_corr * term;
}
}
let rho = rho_diag + rho_off;
let kappa = if gamma > 0.0 { (pi - rho) / gamma } else { 0.0 };
let delta = (kappa / t).clamp(0.0, 1.0);
let shrunk = self.shrink_toward(&sample, &target, delta);
self.delta = delta;
Ok(shrunk * self.frequency)
}
pub fn oracle_approximating(&self) -> Result<DMatrix<f64>> {
let t = self.t() as f64;
let n = self.n() as f64;
if t < 2.0 {
return Err(PortfolioError::InvalidArgument(
"need at least two observations for oracle shrinkage".into(),
));
}
let sample = self.sample_cov_raw()?;
let trace_s: f64 = (0..self.n()).map(|i| sample[(i, i)]).sum();
let mu = trace_s / n;
let target = DMatrix::<f64>::identity(self.n(), self.n()) * mu;
let trace_s2: f64 = sample.iter().map(|x| x * x).sum();
let trace_s_sq = trace_s.powi(2);
let num = (1.0 - 2.0 / n) * trace_s2 + trace_s_sq;
let denom = (t + 1.0 - 2.0 / n) * (trace_s2 - trace_s_sq / n);
let rho = if denom > 0.0 {
(num / denom).clamp(0.0, 1.0)
} else {
1.0
};
let shrunk = self.shrink_toward(&sample, &target, rho);
Ok(shrunk * self.frequency)
}
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_relative_eq;
fn make_prices(seed: u64) -> DMatrix<f64> {
let mut state = seed;
let next = |s: &mut u64| -> f64 {
*s = s
.wrapping_mul(6364136223846793005)
.wrapping_add(1442695040888963407);
((*s >> 33) as u32 as f64) / (u32::MAX as f64) - 0.5
};
let rows = 200;
let mut p = DMatrix::<f64>::zeros(rows, 3);
p[(0, 0)] = 100.0;
p[(0, 1)] = 50.0;
p[(0, 2)] = 20.0;
for i in 1..rows {
let shock = next(&mut state) * 0.02;
p[(i, 0)] = p[(i - 1, 0)] * (1.0 + shock + 0.0005);
p[(i, 1)] = p[(i - 1, 1)] * (1.0 + 0.7 * shock + next(&mut state) * 0.005);
p[(i, 2)] = p[(i - 1, 2)] * (1.0 + next(&mut state) * 0.01);
}
p
}
#[test]
fn sample_cov_is_symmetric_and_psd_ish() {
let p = make_prices(7);
let cov = sample_cov(&p, ReturnsKind::Simple, Some(252)).unwrap();
for i in 0..3 {
for j in 0..3 {
assert_relative_eq!(cov[(i, j)], cov[(j, i)], max_relative = 1e-12);
}
assert!(cov[(i, i)] > 0.0);
}
}
#[test]
fn semicov_matches_pypfopt_formula() {
let p = make_prices(11);
let semi = semicovariance(&p, Some(0.0), ReturnsKind::Simple, Some(252)).unwrap();
let returns = returns_from_prices(&p).unwrap();
let (t, n) = returns.shape();
for i in 0..n {
let mut acc = 0.0;
for k in 0..t {
let d = (returns[(k, i)] - 0.0).min(0.0);
acc += d * d;
}
let expected = acc / (t as f64) * 252.0;
assert_relative_eq!(semi[(i, i)], expected, max_relative = 1e-12);
assert!(semi[(i, i)] >= 0.0);
}
}
#[test]
fn exp_cov_recovers_sample_for_huge_span() {
let p = make_prices(3);
let cov = sample_cov(&p, ReturnsKind::Simple, Some(252)).unwrap();
let ewma = exp_cov(&p, ReturnsKind::Simple, Some(10_000), Some(252)).unwrap();
for i in 0..3 {
assert!((ewma[(i, i)] - cov[(i, i)]).abs() / cov[(i, i)] < 0.05);
}
}
#[test]
fn cov_corr_round_trip() {
let p = make_prices(13);
let cov = sample_cov(&p, ReturnsKind::Simple, Some(252)).unwrap();
let corr = cov_to_corr(&cov).unwrap();
let std = DVector::from_iterator(3, (0..3).map(|i| cov[(i, i)].sqrt()));
let cov2 = corr_to_cov(&corr, &std).unwrap();
for i in 0..3 {
for j in 0..3 {
assert_relative_eq!(cov[(i, j)], cov2[(i, j)], max_relative = 1e-9);
}
}
}
#[test]
fn corr_diagonal_is_one() {
let p = make_prices(17);
let cov = sample_cov(&p, ReturnsKind::Simple, Some(252)).unwrap();
let corr = cov_to_corr(&cov).unwrap();
for i in 0..3 {
assert_relative_eq!(corr[(i, i)], 1.0, max_relative = 1e-12);
}
}
#[test]
fn ledoit_wolf_runs_and_is_symmetric() {
let p = make_prices(19);
let mut cs = CovarianceShrinkage::new(&p, ReturnsKind::Simple, Some(252)).unwrap();
let shrunk = cs.ledoit_wolf(LedoitWolfTarget::ConstantVariance).unwrap();
for i in 0..3 {
for j in 0..3 {
assert_relative_eq!(shrunk[(i, j)], shrunk[(j, i)], max_relative = 1e-9);
}
assert!(shrunk[(i, i)] > 0.0);
}
assert!((0.0..=1.0).contains(&cs.delta));
}
#[test]
fn ledoit_wolf_all_targets_produce_psd() {
let p = make_prices(31);
for target in [
LedoitWolfTarget::ConstantVariance,
LedoitWolfTarget::SingleFactor,
LedoitWolfTarget::ConstantCorrelation,
] {
let mut cs = CovarianceShrinkage::new(&p, ReturnsKind::Simple, Some(252)).unwrap();
let shrunk = cs.ledoit_wolf(target).unwrap();
for i in 0..3 {
for j in 0..3 {
assert_relative_eq!(shrunk[(i, j)], shrunk[(j, i)], max_relative = 1e-9);
}
assert!(shrunk[(i, i)] > 0.0);
}
}
}
#[test]
fn shrunk_covariance_intensity_validation() {
let p = make_prices(29);
let mut cs = CovarianceShrinkage::new(&p, ReturnsKind::Simple, Some(252)).unwrap();
assert!(cs.shrunk_covariance(-0.1).is_err());
assert!(cs.shrunk_covariance(1.5).is_err());
let shrunk = cs.shrunk_covariance(0.2).unwrap();
assert_relative_eq!(cs.delta, 0.2, max_relative = 1e-12);
for i in 0..3 {
assert!(shrunk[(i, i)] > 0.0);
}
}
#[test]
fn fix_nonpositive_semidefinite_repairs_negatives() {
let m = DMatrix::from_row_slice(3, 3, &[1.0, 0.9, 0.9, 0.9, 1.0, -0.95, 0.9, -0.95, 1.0]);
let fixed = fix_nonpositive_semidefinite(&m, FixMethod::Spectral).unwrap();
let eig = SymmetricEigen::new(fixed);
for v in eig.eigenvalues.iter() {
assert!(*v >= -1e-10, "eigenvalue {v} not >= 0 after repair");
}
}
#[test]
fn labeled_sample_cov_carries_tickers() {
let p = make_prices(7);
let tickers = vec!["AAPL".into(), "MSFT".into(), "TSLA".into()];
let lc = sample_cov_labeled(&p, &tickers, ReturnsKind::Simple, Some(252)).unwrap();
assert_eq!(lc.tickers, tickers);
let unlabeled = sample_cov(&p, ReturnsKind::Simple, Some(252)).unwrap();
for i in 0..3 {
for j in 0..3 {
assert_relative_eq!(lc.values[(i, j)], unlabeled[(i, j)], max_relative = 1e-12);
}
}
assert!(lc.get("AAPL", "MSFT").is_some());
assert!(lc.get("AAPL", "GOOG").is_none());
}
#[test]
fn labeled_rejects_mismatched_count() {
let p = make_prices(7);
let tickers = vec!["AAPL".into(), "MSFT".into()];
assert!(sample_cov_labeled(&p, &tickers, ReturnsKind::Simple, None).is_err());
}
#[test]
fn oracle_runs_and_is_symmetric() {
let p = make_prices(23);
let shrunk = CovarianceShrinkage::new(&p, ReturnsKind::Simple, Some(252))
.unwrap()
.oracle_approximating()
.unwrap();
for i in 0..3 {
for j in 0..3 {
assert_relative_eq!(shrunk[(i, j)], shrunk[(j, i)], max_relative = 1e-9);
}
assert!(shrunk[(i, i)] > 0.0);
}
}
}