use crate::error::{IntegrateError, IntegrateResult};
use scirs2_core::ndarray::{Array1, Array2, ArrayView1, ArrayView2, Axis};
pub trait MLPortfolioOptimizer: Send + Sync {
fn fit(&mut self, returns: ArrayView2<f64>) -> IntegrateResult<()>;
fn optimal_weights(&self, target_return: Option<f64>) -> IntegrateResult<Array1<f64>>;
fn sharpe_ratio(&self, weights: ArrayView1<f64>, risk_free: f64) -> IntegrateResult<f64>;
fn expected_return(&self, weights: ArrayView1<f64>) -> IntegrateResult<f64>;
fn portfolio_variance(&self, weights: ArrayView1<f64>) -> IntegrateResult<f64>;
}
pub(crate) fn invert_matrix(m: &Array2<f64>) -> IntegrateResult<Array2<f64>> {
let n = m.nrows();
if n != m.ncols() {
return Err(IntegrateError::ValueError(format!(
"Matrix must be square, got {}×{}",
n,
m.ncols()
)));
}
let mut aug = Array2::<f64>::zeros((n, 2 * n));
for i in 0..n {
for j in 0..n {
aug[[i, j]] = m[[i, j]];
}
aug[[i, n + i]] = 1.0;
}
for col in 0..n {
let mut pivot_row = col;
let mut max_val = aug[[col, col]].abs();
for row in (col + 1)..n {
let v = aug[[row, col]].abs();
if v > max_val {
max_val = v;
pivot_row = row;
}
}
if max_val < 1e-12 {
return Err(IntegrateError::LinearSolveError(
"Matrix is singular or nearly singular; cannot invert".to_string(),
));
}
if pivot_row != col {
for k in 0..(2 * n) {
let tmp = aug[[col, k]];
aug[[col, k]] = aug[[pivot_row, k]];
aug[[pivot_row, k]] = tmp;
}
}
let pivot = aug[[col, col]];
for k in 0..(2 * n) {
aug[[col, k]] /= pivot;
}
for row in 0..n {
if row != col {
let factor = aug[[row, col]];
for k in 0..(2 * n) {
let sub = factor * aug[[col, k]];
aug[[row, k]] -= sub;
}
}
}
}
let mut inv = Array2::<f64>::zeros((n, n));
for i in 0..n {
for j in 0..n {
inv[[i, j]] = aug[[i, n + j]];
}
}
Ok(inv)
}
fn sample_covariance(returns: ArrayView2<f64>) -> IntegrateResult<(Array1<f64>, Array2<f64>)> {
let n = returns.nrows();
let p = returns.ncols();
if n < 2 {
return Err(IntegrateError::ValueError(
"At least 2 observations are required to compute covariance".to_string(),
));
}
let mu = returns
.mean_axis(Axis(0))
.ok_or_else(|| IntegrateError::ComputationError("Failed to compute mean".to_string()))?;
let centered = {
let mut c = returns.to_owned();
for mut row in c.rows_mut() {
for j in 0..p {
row[j] -= mu[j];
}
}
c
};
let sigma_raw = centered.t().dot(¢ered);
let sigma = sigma_raw.mapv(|v| v / (n as f64 - 1.0));
Ok((mu, sigma))
}
#[derive(Debug, Clone)]
pub struct MeanVariancePortfolio {
mu: Option<Array1<f64>>,
sigma: Option<Array2<f64>>,
sigma_inv: Option<Array2<f64>>,
n_assets: usize,
}
impl MeanVariancePortfolio {
pub fn new(n_assets: usize) -> Self {
Self {
mu: None,
sigma: None,
sigma_inv: None,
n_assets,
}
}
pub fn mu(&self) -> IntegrateResult<&Array1<f64>> {
self.mu.as_ref().ok_or_else(|| {
IntegrateError::ValueError("Portfolio has not been fitted yet".to_string())
})
}
pub fn sigma(&self) -> IntegrateResult<&Array2<f64>> {
self.sigma.as_ref().ok_or_else(|| {
IntegrateError::ValueError("Portfolio has not been fitted yet".to_string())
})
}
fn sigma_inv(&self) -> IntegrateResult<&Array2<f64>> {
self.sigma_inv.as_ref().ok_or_else(|| {
IntegrateError::ValueError("Portfolio has not been fitted yet".to_string())
})
}
fn check_weights(&self, weights: ArrayView1<f64>) -> IntegrateResult<()> {
if weights.len() != self.n_assets {
return Err(IntegrateError::DimensionMismatch(format!(
"Expected {} weights, got {}",
self.n_assets,
weights.len()
)));
}
Ok(())
}
}
impl MLPortfolioOptimizer for MeanVariancePortfolio {
fn fit(&mut self, returns: ArrayView2<f64>) -> IntegrateResult<()> {
if returns.ncols() != self.n_assets {
return Err(IntegrateError::DimensionMismatch(format!(
"Expected {} assets, got {}",
self.n_assets,
returns.ncols()
)));
}
let (mu, mut sigma) = sample_covariance(returns)?;
let lambda = 1e-6_f64;
for i in 0..self.n_assets {
sigma[[i, i]] += lambda;
}
let sigma_inv = invert_matrix(&sigma)?;
self.mu = Some(mu);
self.sigma = Some(sigma);
self.sigma_inv = Some(sigma_inv);
Ok(())
}
fn optimal_weights(&self, target_return: Option<f64>) -> IntegrateResult<Array1<f64>> {
let mu = self.mu()?;
let sigma_inv = self.sigma_inv()?;
let n = self.n_assets;
let ones = Array1::<f64>::ones(n);
let si_ones = sigma_inv.dot(&ones);
let c = ones.dot(&si_ones);
match target_return {
None => {
if c.abs() < 1e-12 {
return Err(IntegrateError::LinearSolveError(
"Degenerate covariance (C ≈ 0); cannot solve for min-variance".to_string(),
));
}
Ok(si_ones.mapv(|v| v / c))
}
Some(mu_target) => {
let si_mu = sigma_inv.dot(mu);
let a = mu.dot(&si_mu);
let b = ones.dot(&si_mu);
let delta = a * c - b * b;
if delta.abs() < 1e-12 {
return Err(IntegrateError::LinearSolveError(
"Efficient frontier is degenerate (Δ ≈ 0)".to_string(),
));
}
let lambda_lag = (c * mu_target - b) / delta;
let gamma = (a - b * mu_target) / delta;
let rhs = mu.mapv(|v| lambda_lag * v) + ones.mapv(|v| gamma * v);
let weights = sigma_inv.dot(&rhs);
let sum_w = weights.sum();
if (sum_w - 1.0).abs() > 1e-4 {
return Err(IntegrateError::ComputationError(format!(
"Weight sum constraint violated: sum(w) = {:.6}",
sum_w
)));
}
Ok(weights)
}
}
}
fn sharpe_ratio(&self, weights: ArrayView1<f64>, risk_free: f64) -> IntegrateResult<f64> {
self.check_weights(weights)?;
let exp_ret = self.expected_return(weights)?;
let var = self.portfolio_variance(weights)?;
if var < 1e-16 {
return Err(IntegrateError::ComputationError(
"Portfolio variance is effectively zero; Sharpe ratio undefined".to_string(),
));
}
Ok((exp_ret - risk_free) / var.sqrt())
}
fn expected_return(&self, weights: ArrayView1<f64>) -> IntegrateResult<f64> {
self.check_weights(weights)?;
let mu = self.mu()?;
Ok(weights.dot(mu))
}
fn portfolio_variance(&self, weights: ArrayView1<f64>) -> IntegrateResult<f64> {
self.check_weights(weights)?;
let sigma = self.sigma()?;
let sw = sigma.dot(&weights.to_owned());
Ok(weights.dot(&sw))
}
}
#[cfg(test)]
mod tests {
use super::*;
fn make_returns(n_samples: usize, n_assets: usize) -> Array2<f64> {
Array2::from_shape_fn((n_samples, n_assets), |(i, j)| {
let t = (i * 7 + j * 13) as f64 * 0.01;
t.sin() * 0.02 + (j as f64) * 0.001
})
}
#[test]
fn test_mean_variance_fit() {
let returns = make_returns(100, 3);
let mut p = MeanVariancePortfolio::new(3);
p.fit(returns.view()).expect("fit should succeed");
assert!(p.mu.is_some());
assert!(p.sigma.is_some());
assert!(p.sigma_inv.is_some());
}
#[test]
fn test_min_variance_weights_sum_to_one() {
let returns = make_returns(100, 3);
let mut p = MeanVariancePortfolio::new(3);
p.fit(returns.view()).expect("fit should succeed");
let w = p
.optimal_weights(None)
.expect("optimal_weights should succeed");
let sum = w.sum();
assert!(
(sum - 1.0).abs() < 1e-8,
"weights should sum to 1, got {:.10}",
sum
);
}
#[test]
fn test_sharpe_ratio_positive_for_good_portfolio() {
let returns = Array2::<f64>::from_shape_fn((100, 3), |(i, j)| {
((i * 7 + j * 13) as f64 * 0.01).sin() * 0.005 + 0.002 + j as f64 * 0.001
});
let mut p = MeanVariancePortfolio::new(3);
p.fit(returns.view()).expect("fit should succeed");
let w = p
.optimal_weights(None)
.expect("optimal_weights should succeed");
let sharpe = p
.sharpe_ratio(w.view(), 0.0)
.expect("sharpe_ratio should succeed");
assert!(sharpe > 0.0, "Expected positive Sharpe, got {:.4}", sharpe);
}
#[test]
fn test_target_return_feasibility() {
let returns = Array2::<f64>::from_shape_fn((100, 3), |(i, j)| {
((i * 11 + j * 7) as f64 * 0.01).cos() * 0.02 + j as f64 * 0.002
});
let mut p = MeanVariancePortfolio::new(3);
p.fit(returns.view()).expect("fit should succeed");
let mu = p.mu().expect("mu should be available");
let mean_ret = mu.mean().unwrap_or(0.0);
let w = p.optimal_weights(Some(mean_ret));
match w {
Ok(weights) => {
let s = weights.sum();
assert!(
(s - 1.0).abs() < 1e-3,
"Lagrangian weights should sum to 1, got {:.6}",
s
);
}
Err(e) => {
println!("Lagrangian returned error (acceptable for degenerate data): {e}");
}
}
}
#[test]
fn test_covariance_positive_definite() {
let returns = make_returns(100, 3);
let mut p = MeanVariancePortfolio::new(3);
p.fit(returns.view()).expect("fit should succeed");
let sigma = p.sigma().expect("sigma should be available");
let m11 = sigma[[0, 0]];
let m22 = sigma[[0, 0]] * sigma[[1, 1]] - sigma[[0, 1]] * sigma[[1, 0]];
let m33 = sigma[[0, 0]] * (sigma[[1, 1]] * sigma[[2, 2]] - sigma[[1, 2]] * sigma[[2, 1]])
- sigma[[0, 1]] * (sigma[[1, 0]] * sigma[[2, 2]] - sigma[[1, 2]] * sigma[[2, 0]])
+ sigma[[0, 2]] * (sigma[[1, 0]] * sigma[[2, 1]] - sigma[[1, 1]] * sigma[[2, 0]]);
assert!(
m11 > 0.0,
"1×1 leading minor should be positive: {:.6}",
m11
);
assert!(
m22 > 0.0,
"2×2 leading minor should be positive: {:.6}",
m22
);
assert!(
m33 > 0.0,
"3×3 leading minor (det) should be positive: {:.6}",
m33
);
}
#[test]
fn test_expected_return_linear() {
let returns = make_returns(50, 3);
let mut p = MeanVariancePortfolio::new(3);
p.fit(returns.view()).expect("fit should succeed");
let w: Array1<f64> = Array1::from_vec(vec![0.5, 0.3, 0.2]);
let er = p
.expected_return(w.view())
.expect("expected_return should succeed");
let mu = p.mu().expect("mu");
let manual = w[0] * mu[0] + w[1] * mu[1] + w[2] * mu[2];
assert!((er - manual).abs() < 1e-12, "expected_return mismatch");
}
#[test]
fn test_portfolio_variance_non_negative() {
let returns = make_returns(100, 3);
let mut p = MeanVariancePortfolio::new(3);
p.fit(returns.view()).expect("fit should succeed");
let w: Array1<f64> = Array1::from_vec(vec![1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0]);
let var = p
.portfolio_variance(w.view())
.expect("portfolio_variance should succeed");
assert!(
var >= 0.0,
"Portfolio variance must be non-negative, got {:.6}",
var
);
}
#[test]
fn test_dimension_mismatch_error() {
let returns = make_returns(100, 3);
let mut p = MeanVariancePortfolio::new(3);
p.fit(returns.view()).expect("fit should succeed");
let wrong_w: Array1<f64> = Array1::from_vec(vec![0.5, 0.5]);
assert!(p.expected_return(wrong_w.view()).is_err());
}
}