use std::cmp::Ordering;
pub type SensitivityRow = Vec<f64>;
#[derive(Debug, Clone)]
pub struct RedundancyAnalysis {
pub num_objectives: usize,
pub num_design_points: usize,
pub eigenvalues: Vec<f64>,
pub cosines: Vec<f64>,
pub gram: Vec<f64>,
}
impl RedundancyAnalysis {
pub fn effective_dimension(&self, eps: f64) -> usize {
let max_ev = self.eigenvalues.first().copied().unwrap_or(0.0);
if max_ev <= 0.0 {
return 0;
}
let threshold = eps * max_ev;
self.eigenvalues
.iter()
.filter(|&&ev| ev > threshold)
.count()
}
pub fn pareto_dimension_bound(&self, tol: f64) -> usize {
let rank = self.eigenvalues.iter().filter(|&&ev| ev > tol).count();
if rank == 0 {
return 0;
}
(rank - 1).min(self.num_objectives.saturating_sub(1))
}
pub fn variance_fraction(&self, k: usize) -> f64 {
let total: f64 = self.eigenvalues.iter().sum();
if total <= 0.0 {
return 0.0;
}
let top_k: f64 = self.eigenvalues.iter().take(k).sum();
top_k / total
}
pub fn cosine(&self, i: usize, j: usize) -> Option<f64> {
let m = self.num_objectives;
if i >= m || j >= m {
return None;
}
Some(self.cosines[i * m + j])
}
pub fn trace(&self) -> f64 {
let m = self.num_objectives;
(0..m).map(|i| self.gram[i * m + i]).sum()
}
pub fn redundant_pairs(&self, threshold: f64) -> Vec<(usize, usize, f64)> {
let m = self.num_objectives;
let mut pairs = Vec::new();
for i in 0..m {
for j in (i + 1)..m {
let c = self.cosines[i * m + j];
if c.abs() > threshold {
pairs.push((i, j, c));
}
}
}
pairs
}
}
pub fn analyze_redundancy(sensitivities: &[SensitivityRow]) -> Option<RedundancyAnalysis> {
if sensitivities.is_empty() {
return None;
}
let m = sensitivities.len();
let n = sensitivities[0].len();
if n == 0 || sensitivities.iter().any(|r| r.len() != n) {
return None;
}
if sensitivities
.iter()
.any(|r| r.iter().any(|v| !v.is_finite()))
{
return None;
}
let mut gram = vec![0.0; m * m];
for i in 0..m {
for j in i..m {
let dot: f64 = sensitivities[i]
.iter()
.zip(sensitivities[j].iter())
.map(|(&a, &b)| a * b)
.sum();
gram[i * m + j] = dot;
gram[j * m + i] = dot;
}
}
let norms: Vec<f64> = (0..m).map(|i| gram[i * m + i].sqrt()).collect();
let mut cosines = vec![0.0; m * m];
for i in 0..m {
for j in 0..m {
if norms[i] > 0.0 && norms[j] > 0.0 {
cosines[i * m + j] = (gram[i * m + j] / (norms[i] * norms[j])).clamp(-1.0, 1.0);
} else if i == j {
cosines[i * m + j] = 1.0;
}
}
}
let eigenvalues = symmetric_eigenvalues(&gram, m);
Some(RedundancyAnalysis {
num_objectives: m,
num_design_points: n,
eigenvalues,
cosines,
gram,
})
}
pub fn finite_difference_jacobian<F>(mu: &[f64], objectives: &[F], eps: f64) -> Vec<SensitivityRow>
where
F: Fn(&[f64]) -> f64,
{
assert!(eps > 0.0, "eps must be positive, got {eps}");
let n = mu.len();
let m = objectives.len();
let mut jacobian = vec![vec![0.0; n]; m];
let mut perturbed = mu.to_vec();
for j in 0..n {
let old = perturbed[j];
perturbed[j] = old + eps;
let fwd: Vec<f64> = objectives.iter().map(|f| f(&perturbed)).collect();
perturbed[j] = old - eps;
let bwd: Vec<f64> = objectives.iter().map(|f| f(&perturbed)).collect();
perturbed[j] = old;
for i in 0..m {
jacobian[i][j] = (fwd[i] - bwd[i]) / (2.0 * eps);
}
}
jacobian
}
fn symmetric_eigenvalues(a: &[f64], n: usize) -> Vec<f64> {
debug_assert_eq!(a.len(), n * n);
if n == 0 {
return Vec::new();
}
if n == 1 {
return vec![a[0].max(0.0)];
}
let mut mat = a.to_vec();
let max_iter = 100 * n * n;
let tol = 1e-12;
for _ in 0..max_iter {
let mut max_off = 0.0_f64;
let mut p = 0;
let mut q = 1;
for i in 0..n {
for j in (i + 1)..n {
let v = mat[i * n + j].abs();
if v > max_off {
max_off = v;
p = i;
q = j;
}
}
}
if max_off < tol {
break; }
let app = mat[p * n + p];
let aqq = mat[q * n + q];
let apq = mat[p * n + q];
let theta = if (app - aqq).abs() < tol {
core::f64::consts::FRAC_PI_4
} else {
0.5 * (2.0 * apq / (app - aqq)).atan()
};
let c = theta.cos();
let s = theta.sin();
let mut new_mat = mat.clone();
for i in 0..n {
if i != p && i != q {
let ip = mat[i * n + p];
let iq = mat[i * n + q];
new_mat[i * n + p] = c * ip + s * iq;
new_mat[p * n + i] = new_mat[i * n + p]; new_mat[i * n + q] = -s * ip + c * iq;
new_mat[q * n + i] = new_mat[i * n + q]; }
}
new_mat[p * n + p] = c * c * app + 2.0 * c * s * apq + s * s * aqq;
new_mat[q * n + q] = s * s * app - 2.0 * c * s * apq + c * c * aqq;
new_mat[p * n + q] = 0.0;
new_mat[q * n + p] = 0.0;
mat = new_mat;
}
let mut eigenvalues: Vec<f64> = (0..n).map(|i| mat[i * n + i].max(0.0)).collect();
eigenvalues.sort_by(|a, b| b.partial_cmp(a).unwrap_or(Ordering::Equal));
eigenvalues
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn proportional_sensitivities_have_rank_one() {
let s = vec![
vec![1.0, 2.0, 3.0, 4.0],
vec![3.0, 6.0, 9.0, 12.0], ];
let a = analyze_redundancy(&s).unwrap();
assert_eq!(a.effective_dimension(0.01), 1);
assert_eq!(a.pareto_dimension_bound(1e-6), 0);
assert!((a.cosine(0, 1).unwrap() - 1.0).abs() < 1e-9);
}
#[test]
fn anti_proportional_sensitivities_have_rank_one() {
let s = vec![vec![1.0, 2.0, 3.0], vec![-2.0, -4.0, -6.0]];
let a = analyze_redundancy(&s).unwrap();
assert_eq!(a.pareto_dimension_bound(1e-6), 0);
let cos = a.cosine(0, 1).unwrap();
assert!(
(cos - (-1.0)).abs() < 1e-9,
"expected cosine = -1.0, got {cos}"
);
}
#[test]
fn orthogonal_sensitivities_have_full_rank() {
let s = vec![vec![1.0, 0.0, 0.0], vec![0.0, 1.0, 0.0]];
let a = analyze_redundancy(&s).unwrap();
assert_eq!(a.effective_dimension(0.01), 2);
assert_eq!(a.pareto_dimension_bound(1e-6), 1);
assert!(a.cosine(0, 1).unwrap().abs() < 1e-9);
}
#[test]
fn three_independent_objectives() {
let s = vec![
vec![1.0, 0.0, 0.0, 0.5],
vec![0.0, 1.0, 0.0, 0.3],
vec![0.0, 0.0, 1.0, 0.1],
];
let a = analyze_redundancy(&s).unwrap();
assert_eq!(a.effective_dimension(0.01), 3);
assert_eq!(a.pareto_dimension_bound(1e-6), 2);
}
#[test]
fn eigenvalue_sum_equals_gram_trace() {
let s = vec![
vec![1.0, 2.0, 3.0],
vec![4.0, 5.0, 6.0],
vec![7.0, 8.0, 9.0],
];
let a = analyze_redundancy(&s).unwrap();
let ev_sum: f64 = a.eigenvalues.iter().sum();
let trace = a.trace();
assert!(
(ev_sum - trace).abs() < 1e-6,
"eigenvalue sum = {ev_sum}, trace = {trace}"
);
let norm_sq_sum: f64 = s
.iter()
.map(|row| row.iter().map(|&x| x * x).sum::<f64>())
.sum();
assert!(
(trace - norm_sq_sum).abs() < 1e-9,
"trace = {trace}, norm_sq_sum = {norm_sq_sum}"
);
}
#[test]
fn permuting_rows_preserves_eigenvalues() {
let s_orig = vec![
vec![1.0, 2.0, 3.0],
vec![0.0, 5.0, 0.0],
vec![4.0, 0.0, 1.0],
];
let s_perm = vec![s_orig[2].clone(), s_orig[0].clone(), s_orig[1].clone()];
let a1 = analyze_redundancy(&s_orig).unwrap();
let a2 = analyze_redundancy(&s_perm).unwrap();
for (e1, e2) in a1.eigenvalues.iter().zip(a2.eigenvalues.iter()) {
assert!(
(e1 - e2).abs() < 1e-9,
"eigenvalues differ after permutation: {e1} vs {e2}"
);
}
}
#[test]
fn non_contextual_collapse_two_arms() {
let delta = 0.5_f64;
let delta_det = 0.3_f64;
let n = 50.0_f64;
let t = 1000.0_f64;
let c = 1.0_f64;
let s_r = delta;
let s_mse = -1.0 / (n * n);
let s_d = -c * t / (n * n * delta_det * delta_det);
let sensitivities = vec![vec![s_r], vec![s_mse], vec![s_d]];
let a = analyze_redundancy(&sensitivities).unwrap();
let cos_mse_d = a.cosine(1, 2).unwrap();
assert!(
(cos_mse_d - 1.0).abs() < 1e-9,
"MSE and Detection should be perfectly correlated, got {cos_mse_d}"
);
let cos_r_mse = a.cosine(0, 1).unwrap();
assert!(
(cos_r_mse - (-1.0)).abs() < 1e-9,
"Regret and MSE should be anti-proportional, got {cos_r_mse}"
);
assert_eq!(a.pareto_dimension_bound(1e-6), 0);
}
#[test]
fn contextual_revival_three_objectives() {
let cells: [f64; 5] = [0.1, 0.3, 0.5, 0.7, 0.9];
let s_regret: Vec<f64> = cells.iter().map(|&x| (2.0 * x - 1.0).abs()).collect();
let s_estimation: Vec<f64> = cells
.iter()
.map(|&x| -(x * x + (1.0 - x) * (1.0 - x)))
.collect();
let mut s_detection = vec![0.0; 5];
s_detection[0] = -1.0;
let sensitivities = vec![s_regret, s_estimation, s_detection];
let a = analyze_redundancy(&sensitivities).unwrap();
assert_eq!(a.effective_dimension(0.01), 3);
assert_eq!(a.pareto_dimension_bound(1e-6), 2);
let cos_re = a.cosine(0, 1).unwrap();
assert!(cos_re.abs() < 0.95, "regret-estimation cosine = {cos_re}");
}
#[test]
fn average_detection_is_redundant_with_estimation_even_contextual() {
let p2: [f64; 5] = [0.1, 0.2, 0.4, 0.2, 0.1];
let s_imse: Vec<f64> = p2.iter().map(|&p| -1.0 / (p * p)).collect();
let c: f64 = 3.5;
let s_d_avg: Vec<f64> = p2.iter().map(|&p| -c / (p * p)).collect();
let s_regret: Vec<f64> = p2.iter().map(|&p| 10.0 * p).collect();
let sensitivities = vec![s_regret, s_imse, s_d_avg];
let a = analyze_redundancy(&sensitivities).unwrap();
let cos_imse_davg = a.cosine(1, 2).unwrap();
assert!(
(cos_imse_davg - 1.0).abs() < 1e-9,
"IMSE and avg detection should be perfectly correlated, got {cos_imse_davg}"
);
assert_eq!(a.pareto_dimension_bound(1e-6), 1);
let pairs = a.redundant_pairs(0.999);
assert_eq!(pairs.len(), 1);
assert_eq!((pairs[0].0, pairs[0].1), (1, 2));
}
#[test]
fn variance_fraction_sums_correctly() {
let s = vec![
vec![1.0, 0.0], vec![0.0, 0.01], ];
let a = analyze_redundancy(&s).unwrap();
let frac = a.variance_fraction(1);
assert!(frac > 0.99, "top eigenvalue should dominate, got {frac}");
let frac_all = a.variance_fraction(2);
assert!((frac_all - 1.0).abs() < 1e-9);
}
#[test]
fn redundant_pairs_identifies_proportional_objectives() {
let s = vec![
vec![1.0, 2.0, 3.0],
vec![2.0, 4.0, 6.0], vec![0.0, 0.0, 1.0], ];
let a = analyze_redundancy(&s).unwrap();
let pairs = a.redundant_pairs(0.99);
assert_eq!(pairs.len(), 1);
assert_eq!(pairs[0].0, 0);
assert_eq!(pairs[0].1, 1);
}
#[test]
fn trace_matches_gram_diagonal() {
let s = vec![
vec![3.0, 4.0], vec![1.0, 0.0], ];
let a = analyze_redundancy(&s).unwrap();
assert!((a.trace() - 26.0).abs() < 1e-9, "trace = {}", a.trace());
}
#[test]
fn empty_input_returns_none() {
assert!(analyze_redundancy(&[]).is_none());
assert!(analyze_redundancy(&[vec![]]).is_none());
}
#[test]
fn mismatched_row_lengths_returns_none() {
let s = vec![vec![1.0, 2.0], vec![3.0]];
assert!(analyze_redundancy(&s).is_none());
}
#[test]
fn single_objective_single_point() {
let s = vec![vec![5.0]];
let a = analyze_redundancy(&s).unwrap();
assert_eq!(a.effective_dimension(0.01), 1);
assert_eq!(a.pareto_dimension_bound(1e-6), 0);
}
#[test]
fn zero_sensitivity_vector() {
let s = vec![
vec![1.0, 2.0, 3.0],
vec![0.0, 0.0, 0.0], ];
let a = analyze_redundancy(&s).unwrap();
assert_eq!(a.effective_dimension(0.01), 1);
}
#[test]
fn jacobi_eigenvalues_identity() {
let id = vec![1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0];
let ev = symmetric_eigenvalues(&id, 3);
assert_eq!(ev.len(), 3);
for &e in &ev {
assert!((e - 1.0).abs() < 1e-9, "eigenvalue = {e}");
}
}
#[test]
fn jacobi_eigenvalues_known_2x2() {
let a = vec![2.0, 1.0, 1.0, 2.0];
let ev = symmetric_eigenvalues(&a, 2);
assert!((ev[0] - 3.0).abs() < 1e-9, "ev[0] = {}", ev[0]);
assert!((ev[1] - 1.0).abs() < 1e-9, "ev[1] = {}", ev[1]);
}
#[test]
fn jacobi_eigenvalues_5x5_trace_preserved() {
let mut mat = vec![0.0; 25];
let rows: Vec<Vec<f64>> = vec![
vec![1.0, 2.0, 3.0],
vec![4.0, 5.0, 6.0],
vec![7.0, 8.0, 9.0],
vec![0.5, 1.5, 2.5],
vec![3.0, 1.0, 4.0],
];
for i in 0..5 {
for j in 0..5 {
let dot: f64 = (0..3).map(|k| rows[i][k] * rows[j][k]).sum();
mat[i * 5 + j] = dot;
}
}
let trace: f64 = (0..5).map(|i| mat[i * 5 + i]).sum();
let ev = symmetric_eigenvalues(&mat, 5);
let ev_sum: f64 = ev.iter().sum();
assert!(
(ev_sum - trace).abs() < 1e-6,
"eigenvalue sum = {ev_sum}, trace = {trace}"
);
}
#[test]
fn finite_difference_jacobian_linear() {
let f0 = |x: &[f64]| x[0] + 2.0 * x[1];
let f1 = |x: &[f64]| 3.0 * x[0] - x[1];
let objectives = [f0, f1];
let mu = vec![1.0, 1.0];
let jac = finite_difference_jacobian(&mu, &objectives, 1e-7);
assert!((jac[0][0] - 1.0).abs() < 1e-4);
assert!((jac[0][1] - 2.0).abs() < 1e-4);
assert!((jac[1][0] - 3.0).abs() < 1e-4);
assert!((jac[1][1] - (-1.0)).abs() < 1e-4);
}
#[test]
fn finite_difference_jacobian_quadratic() {
let f0 = |x: &[f64]| x[0] * x[0] + x[1] * x[1];
let objectives = [f0];
let mu = vec![3.0, 4.0];
let jac = finite_difference_jacobian(&mu, &objectives, 1e-7);
assert!((jac[0][0] - 6.0).abs() < 1e-3, "J[0][0] = {}", jac[0][0]);
assert!((jac[0][1] - 8.0).abs() < 1e-3, "J[0][1] = {}", jac[0][1]);
}
#[test]
fn nan_input_returns_none() {
let s = vec![vec![1.0, f64::NAN, 3.0], vec![4.0, 5.0, 6.0]];
assert!(analyze_redundancy(&s).is_none());
}
#[test]
fn inf_input_returns_none() {
let s = vec![vec![1.0, 2.0], vec![f64::INFINITY, 1.0]];
assert!(analyze_redundancy(&s).is_none());
}
#[test]
#[should_panic(expected = "eps must be positive")]
fn finite_difference_jacobian_zero_eps_panics() {
let f = |x: &[f64]| x[0];
finite_difference_jacobian(&[1.0], &[f], 0.0);
}
}