use crate::error::OptimizeError;
use scirs2_core::random::rngs::StdRng;
use scirs2_core::random::{Rng, SeedableRng};
pub fn dominates(a: &[f64], b: &[f64]) -> bool {
debug_assert_eq!(a.len(), b.len(), "objective vectors must have equal length");
let mut any_strictly_better = false;
for (ai, bi) in a.iter().zip(b.iter()) {
if ai > bi {
return false;
}
if ai < bi {
any_strictly_better = true;
}
}
any_strictly_better
}
pub fn non_dominated_sort(points: &[Vec<f64>]) -> Vec<Vec<usize>> {
let n = points.len();
if n == 0 {
return vec![];
}
let mut dominated_by_count = vec![0usize; n];
let mut dominates_list: Vec<Vec<usize>> = vec![vec![]; n];
for i in 0..n {
for j in 0..n {
if i == j {
continue;
}
if dominates(&points[i], &points[j]) {
dominates_list[i].push(j);
} else if dominates(&points[j], &points[i]) {
dominated_by_count[i] += 1;
}
}
}
let mut fronts: Vec<Vec<usize>> = Vec::new();
let mut current_front: Vec<usize> = (0..n)
.filter(|&i| dominated_by_count[i] == 0)
.collect();
while !current_front.is_empty() {
let mut next_front: Vec<usize> = Vec::new();
for &i in ¤t_front {
for &j in &dominates_list[i] {
dominated_by_count[j] -= 1;
if dominated_by_count[j] == 0 {
next_front.push(j);
}
}
}
fronts.push(current_front);
current_front = next_front;
}
fronts
}
pub fn hypervolume_2d(pareto_front: &[Vec<f64>], reference_point: &[f64]) -> f64 {
if pareto_front.is_empty() {
return 0.0;
}
debug_assert_eq!(reference_point.len(), 2);
let mut pts: Vec<(f64, f64)> = pareto_front
.iter()
.filter(|p| p[0] < reference_point[0] && p[1] < reference_point[1])
.map(|p| (p[0], p[1]))
.collect();
if pts.is_empty() {
return 0.0;
}
pts.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap_or(std::cmp::Ordering::Equal));
let mut pareto_2d: Vec<(f64, f64)> = Vec::new();
let mut min_f2 = f64::INFINITY;
for (f1, f2) in &pts {
if *f2 < min_f2 {
min_f2 = *f2;
pareto_2d.push((*f1, *f2));
}
}
let ref_x = reference_point[0];
let ref_y = reference_point[1];
let np = pareto_2d.len();
let mut volume = 0.0;
for i in (0..np).rev() {
let x_start = pareto_2d[i].0;
let x_end = if i + 1 < np { pareto_2d[i + 1].0 } else { ref_x };
let y = pareto_2d[i].1;
volume += (x_end - x_start) * (ref_y - y);
}
volume
}
pub fn hypervolume_mc(
pareto_front: &[Vec<f64>],
reference_point: &[f64],
n_samples: usize,
seed: u64,
) -> f64 {
if pareto_front.is_empty() || n_samples == 0 {
return 0.0;
}
let n_obj = reference_point.len();
let mut rng = StdRng::seed_from_u64(seed);
let mut ideal = vec![f64::INFINITY; n_obj];
for pt in pareto_front {
for (i, &v) in pt.iter().enumerate() {
if v < ideal[i] {
ideal[i] = v;
}
}
}
let box_volume: f64 = (0..n_obj)
.map(|i| (reference_point[i] - ideal[i]).max(0.0))
.product();
if box_volume == 0.0 {
return 0.0;
}
let mut dominated_count = 0usize;
for _ in 0..n_samples {
let sample: Vec<f64> = (0..n_obj)
.map(|i| ideal[i] + rng.random::<f64>() * (reference_point[i] - ideal[i]))
.collect();
'outer: for front_pt in pareto_front {
let mut pt_dominates_sample = true;
for j in 0..n_obj {
if front_pt[j] >= sample[j] {
pt_dominates_sample = false;
break;
}
}
if pt_dominates_sample {
dominated_count += 1;
break 'outer;
}
}
}
box_volume * (dominated_count as f64 / n_samples as f64)
}
pub fn igd(true_front: &[Vec<f64>], approx_front: &[Vec<f64>]) -> f64 {
if true_front.is_empty() || approx_front.is_empty() {
return f64::INFINITY;
}
let sum: f64 = true_front
.iter()
.map(|tp| {
approx_front
.iter()
.map(|ap| euclidean_distance(tp, ap))
.fold(f64::INFINITY, f64::min)
})
.sum();
sum / true_front.len() as f64
}
pub fn generational_distance(true_front: &[Vec<f64>], approx_front: &[Vec<f64>]) -> f64 {
if true_front.is_empty() || approx_front.is_empty() {
return f64::INFINITY;
}
let sum: f64 = approx_front
.iter()
.map(|ap| {
true_front
.iter()
.map(|tp| euclidean_distance(ap, tp))
.fold(f64::INFINITY, f64::min)
})
.sum();
sum / approx_front.len() as f64
}
pub fn additive_epsilon_indicator(true_front: &[Vec<f64>], approx_front: &[Vec<f64>]) -> f64 {
if true_front.is_empty() || approx_front.is_empty() {
return f64::INFINITY;
}
true_front
.iter()
.map(|tp| {
approx_front
.iter()
.map(|ap| {
ap.iter()
.zip(tp.iter())
.map(|(a, t)| a - t)
.fold(f64::NEG_INFINITY, f64::max)
})
.fold(f64::INFINITY, f64::min)
})
.fold(f64::NEG_INFINITY, f64::max)
}
pub fn spread(pareto_front: &[Vec<f64>]) -> f64 {
let n = pareto_front.len();
if n < 2 {
return 0.0;
}
let nn_dists: Vec<f64> = (0..n)
.map(|i| {
(0..n)
.filter(|&j| j != i)
.map(|j| euclidean_distance(&pareto_front[i], &pareto_front[j]))
.fold(f64::INFINITY, f64::min)
})
.collect();
let mean = nn_dists.iter().sum::<f64>() / n as f64;
if mean < f64::EPSILON {
return 0.0;
}
let variance = nn_dists.iter().map(|d| (d - mean).powi(2)).sum::<f64>() / n as f64;
variance.sqrt() / mean
}
fn euclidean_distance(a: &[f64], b: &[f64]) -> f64 {
a.iter()
.zip(b.iter())
.map(|(x, y)| (x - y).powi(2))
.sum::<f64>()
.sqrt()
}
pub fn spacing_metric(front: &[Vec<f64>]) -> f64 {
let n = front.len();
if n < 2 {
return 0.0;
}
let dists: Vec<f64> = (0..n)
.map(|i| {
(0..n)
.filter(|&j| j != i)
.map(|j| euclidean_distance(&front[i], &front[j]))
.fold(f64::INFINITY, f64::min)
})
.collect();
let mean = dists.iter().sum::<f64>() / n as f64;
let variance = dists.iter().map(|d| (d - mean).powi(2)).sum::<f64>() / n as f64;
variance.sqrt()
}
pub fn delta_metric(front: &[Vec<f64>], true_extremes: &[Vec<f64>]) -> f64 {
let n = front.len();
if n == 0 {
return f64::INFINITY;
}
if n == 1 {
return 0.0;
}
let dists: Vec<f64> = (0..n)
.map(|i| {
(0..n)
.filter(|&j| j != i)
.map(|j| euclidean_distance(&front[i], &front[j]))
.fold(f64::INFINITY, f64::min)
})
.collect();
let d_bar = dists.iter().sum::<f64>() / n as f64;
let (d_f, d_l) = if true_extremes.is_empty() {
(0.0_f64, 0.0_f64)
} else {
let mut sorted_front = front.to_vec();
sorted_front.sort_by(|a, b| {
a[0].partial_cmp(&b[0])
.unwrap_or(std::cmp::Ordering::Equal)
});
let first = &sorted_front[0];
let last = &sorted_front[n - 1];
let df = true_extremes
.iter()
.map(|p| euclidean_distance(first, p))
.fold(f64::INFINITY, f64::min);
let dl = true_extremes
.iter()
.map(|p| euclidean_distance(last, p))
.fold(f64::INFINITY, f64::min);
(df.min(1e10), dl.min(1e10))
};
let sum_diff: f64 = dists.iter().map(|d| (d - d_bar).abs()).sum();
let denominator = d_f + d_l + (n as f64 - 1.0) * d_bar;
if denominator < f64::EPSILON {
0.0
} else {
(d_f + d_l + sum_diff) / denominator
}
}
pub fn igd_plus(true_front: &[Vec<f64>], approx_front: &[Vec<f64>]) -> f64 {
if true_front.is_empty() || approx_front.is_empty() {
return f64::INFINITY;
}
let sum: f64 = true_front
.iter()
.map(|tp| {
approx_front
.iter()
.map(|ap| igd_plus_dist(tp, ap))
.fold(f64::INFINITY, f64::min)
})
.sum();
sum / true_front.len() as f64
}
fn igd_plus_dist(true_point: &[f64], approx_point: &[f64]) -> f64 {
true_point
.iter()
.zip(approx_point.iter())
.map(|(p, q)| (q - p).max(0.0).powi(2))
.sum::<f64>()
.sqrt()
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum R2Utility {
Tchebycheff,
WeightedSum,
Pbi,
}
pub fn r2_indicator(
approx_front: &[Vec<f64>],
weight_vectors: &[Vec<f64>],
reference_point: &[f64],
utility: R2Utility,
) -> f64 {
if approx_front.is_empty() || weight_vectors.is_empty() {
return f64::INFINITY;
}
let sum: f64 = weight_vectors
.iter()
.map(|w| {
approx_front
.iter()
.map(|a| r2_utility_value(a, w, reference_point, utility))
.fold(f64::INFINITY, f64::min)
})
.sum();
sum / weight_vectors.len() as f64
}
fn r2_utility_value(
f: &[f64],
weights: &[f64],
reference: &[f64],
utility: R2Utility,
) -> f64 {
match utility {
R2Utility::Tchebycheff => f
.iter()
.zip(weights.iter())
.zip(reference.iter())
.map(|((fi, wi), zi)| wi * (fi - zi).abs())
.fold(f64::NEG_INFINITY, f64::max),
R2Utility::WeightedSum => f
.iter()
.zip(weights.iter())
.zip(reference.iter())
.map(|((fi, wi), zi)| wi * (fi - zi))
.sum(),
R2Utility::Pbi => {
let theta = 5.0_f64;
let f_shifted: Vec<f64> = f
.iter()
.zip(reference.iter())
.map(|(fi, zi)| fi - zi)
.collect();
let w_norm_sq: f64 = weights.iter().map(|w| w * w).sum();
let dot: f64 = f_shifted
.iter()
.zip(weights.iter())
.map(|(a, b)| a * b)
.sum();
let d1 = if w_norm_sq < 1e-14 {
0.0
} else {
(dot / w_norm_sq.sqrt()).abs()
};
let w_proj: Vec<f64> = if w_norm_sq < 1e-14 {
vec![0.0; weights.len()]
} else {
weights.iter().map(|w| dot * w / w_norm_sq).collect()
};
let d2 = f_shifted
.iter()
.zip(w_proj.iter())
.map(|(fi, wp)| (fi - wp).powi(2))
.sum::<f64>()
.sqrt();
d1 + theta * d2
}
}
}
pub fn hypervolume_contribution(
front: &[Vec<f64>],
reference_point: &[f64],
mc_samples: usize,
seed: u64,
) -> Vec<f64> {
let n = front.len();
if n == 0 {
return vec![];
}
let n_obj = reference_point.len();
let total_hv = if n_obj == 2 {
hypervolume_2d(front, reference_point)
} else {
hypervolume_mc(front, reference_point, mc_samples, seed)
};
(0..n)
.map(|i| {
let reduced: Vec<Vec<f64>> = front
.iter()
.enumerate()
.filter(|&(j, _)| j != i)
.map(|(_, p)| p.clone())
.collect();
let hv_without = if reduced.is_empty() {
0.0
} else if n_obj == 2 {
hypervolume_2d(&reduced, reference_point)
} else {
hypervolume_mc(&reduced, reference_point, mc_samples, seed)
};
(total_hv - hv_without).max(0.0)
})
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_dominates_basic_2d() {
assert!(dominates(&[1.0, 1.0], &[2.0, 2.0]));
assert!(dominates(&[1.0, 2.0], &[2.0, 2.0]));
assert!(!dominates(&[1.0, 3.0], &[2.0, 2.0]));
assert!(!dominates(&[2.0, 2.0], &[2.0, 2.0])); }
#[test]
fn test_dominates_identical_points() {
assert!(!dominates(&[1.0, 2.0, 3.0], &[1.0, 2.0, 3.0]));
}
#[test]
fn test_dominates_three_objectives() {
assert!(dominates(&[1.0, 1.0, 1.0], &[2.0, 2.0, 2.0]));
assert!(!dominates(&[1.0, 2.0, 1.0], &[1.0, 1.0, 2.0])); }
#[test]
fn test_non_dominated_sort_trivial() {
let pts = vec![vec![1.0, 2.0], vec![2.0, 1.0], vec![3.0, 3.0]];
let fronts = non_dominated_sort(&pts);
assert_eq!(fronts.len(), 2);
assert_eq!(fronts[0].len(), 2); assert_eq!(fronts[1].len(), 1); assert!(fronts[1].contains(&2));
}
#[test]
fn test_non_dominated_sort_all_non_dominated() {
let pts = vec![vec![1.0, 3.0], vec![2.0, 2.0], vec![3.0, 1.0]];
let fronts = non_dominated_sort(&pts);
assert_eq!(fronts.len(), 1);
assert_eq!(fronts[0].len(), 3);
}
#[test]
fn test_non_dominated_sort_chain() {
let pts = vec![vec![1.0, 1.0], vec![2.0, 2.0], vec![3.0, 3.0]];
let fronts = non_dominated_sort(&pts);
assert_eq!(fronts.len(), 3);
assert_eq!(fronts[0], vec![0]);
assert_eq!(fronts[1], vec![1]);
assert_eq!(fronts[2], vec![2]);
}
#[test]
fn test_non_dominated_sort_empty() {
let fronts = non_dominated_sort(&[]);
assert!(fronts.is_empty());
}
#[test]
fn test_hypervolume_2d_single_point() {
let front = vec![vec![1.0, 1.0]];
let hv = hypervolume_2d(&front, &[2.0, 2.0]);
assert!((hv - 1.0).abs() < 1e-10, "Expected 1.0, got {hv}");
}
#[test]
fn test_hypervolume_2d_two_points() {
let front = vec![vec![0.0, 2.0], vec![2.0, 0.0]];
let hv = hypervolume_2d(&front, &[3.0, 3.0]);
assert!((hv - 5.0).abs() < 1e-10, "Expected 5.0, got {hv}");
}
#[test]
fn test_hypervolume_2d_empty_front() {
let hv = hypervolume_2d(&[], &[2.0, 2.0]);
assert_eq!(hv, 0.0);
}
#[test]
fn test_hypervolume_2d_point_outside_reference() {
let front = vec![vec![5.0, 5.0]];
let hv = hypervolume_2d(&front, &[2.0, 2.0]);
assert_eq!(hv, 0.0);
}
#[test]
fn test_hypervolume_2d_with_dominated_point() {
let front = vec![vec![1.0, 1.0], vec![2.0, 2.0]];
let hv = hypervolume_2d(&front, &[3.0, 3.0]);
assert!((hv - 4.0).abs() < 1e-10, "Expected 4.0, got {hv}");
}
#[test]
fn test_hypervolume_mc_approximation() {
let front = vec![vec![1.0, 1.0]];
let hv = hypervolume_mc(&front, &[2.0, 2.0], 100_000, 42);
assert!((hv - 1.0).abs() < 0.05, "MC approx too far from 1.0: {hv}");
}
#[test]
fn test_hypervolume_mc_empty() {
let hv = hypervolume_mc(&[], &[2.0, 2.0], 1000, 42);
assert_eq!(hv, 0.0);
}
#[test]
fn test_igd_perfect_coverage() {
let tf = vec![vec![0.0, 1.0], vec![0.5, 0.5], vec![1.0, 0.0]];
let af = tf.clone();
let val = igd(&tf, &af);
assert!(val < 1e-10, "IGD of identical fronts: {val}");
}
#[test]
fn test_igd_offset_front() {
let tf = vec![vec![0.0, 0.0]];
let af = vec![vec![0.1, 0.1]];
let val = igd(&tf, &af);
let expected = (0.1f64.powi(2) + 0.1f64.powi(2)).sqrt();
assert!((val - expected).abs() < 1e-10);
}
#[test]
fn test_igd_empty_inputs() {
assert_eq!(igd(&[], &[vec![1.0, 1.0]]), f64::INFINITY);
assert_eq!(igd(&[vec![1.0, 1.0]], &[]), f64::INFINITY);
}
#[test]
fn test_gd_perfect_coverage() {
let tf = vec![vec![0.0, 1.0], vec![1.0, 0.0]];
let af = tf.clone();
let val = generational_distance(&tf, &af);
assert!(val < 1e-10, "GD of identical fronts: {val}");
}
#[test]
fn test_epsilon_perfect_coverage() {
let tf = vec![vec![0.0, 1.0], vec![1.0, 0.0]];
let af = tf.clone();
let eps = additive_epsilon_indicator(&tf, &af);
assert!(eps.abs() < 1e-10, "Epsilon for identical fronts: {eps}");
}
#[test]
fn test_epsilon_offset_front() {
let tf = vec![vec![1.0, 1.0]];
let af = vec![vec![0.5, 1.0]]; let eps = additive_epsilon_indicator(&tf, &af);
assert!(eps <= 0.0, "approx is better: eps should be <= 0, got {eps}");
}
#[test]
fn test_epsilon_worse_front() {
let tf = vec![vec![0.0, 0.0]];
let af = vec![vec![1.0, 1.0]]; let eps = additive_epsilon_indicator(&tf, &af);
assert!((eps - 1.0).abs() < 1e-10, "Expected eps=1.0, got {eps}");
}
#[test]
fn test_spread_uniform_distribution() {
let front: Vec<Vec<f64>> = (0..5)
.map(|i| {
let f1 = i as f64 * 0.25;
vec![f1, 1.0 - f1]
})
.collect();
let sp = spread(&front);
assert!(sp < 0.01, "Uniform front should have near-zero spread: {sp}");
}
#[test]
fn test_spread_single_point() {
let front = vec![vec![0.5, 0.5]];
assert_eq!(spread(&front), 0.0);
}
#[test]
fn test_spacing_metric_uniform() {
let front: Vec<Vec<f64>> = (0..5)
.map(|i| vec![i as f64 * 0.25, 1.0 - i as f64 * 0.25])
.collect();
let sp = spacing_metric(&front);
assert!(sp < 1e-10, "uniform front spacing should be 0, got {sp}");
}
#[test]
fn test_spacing_metric_single_point() {
assert_eq!(spacing_metric(&[vec![0.5, 0.5]]), 0.0);
}
#[test]
fn test_spacing_metric_non_uniform() {
let clustered = vec![
vec![0.0, 1.0],
vec![0.1, 0.9],
vec![0.9, 0.1],
vec![1.0, 0.0],
];
let sp = spacing_metric(&clustered);
assert!(sp > 0.0, "clustered front should have nonzero spacing");
}
#[test]
fn test_delta_metric_uniform_no_extremes() {
let front = vec![
vec![0.0, 1.0],
vec![0.5, 0.5],
vec![1.0, 0.0],
];
let d = delta_metric(&front, &[]);
assert!(d >= 0.0, "delta metric should be non-negative, got {d}");
}
#[test]
fn test_delta_metric_empty() {
assert_eq!(delta_metric(&[], &[]), f64::INFINITY);
}
#[test]
fn test_delta_metric_single_point() {
assert_eq!(delta_metric(&[vec![0.5, 0.5]], &[]), 0.0);
}
#[test]
fn test_delta_metric_with_true_extremes() {
let front = vec![vec![0.0, 1.0], vec![0.5, 0.5], vec![1.0, 0.0]];
let true_extremes = vec![vec![0.0, 1.0], vec![1.0, 0.0]];
let d = delta_metric(&front, &true_extremes);
assert!(d >= 0.0);
assert!(d < 0.5, "well-spread front with covered extremes: {d}");
}
#[test]
fn test_igd_plus_identical_fronts() {
let tf = vec![vec![0.0, 1.0], vec![0.5, 0.5], vec![1.0, 0.0]];
let af = tf.clone();
let val = igd_plus(&tf, &af);
assert!(val < 1e-10, "IGD+ of identical fronts should be 0, got {val}");
}
#[test]
fn test_igd_plus_empty_inputs() {
assert_eq!(igd_plus(&[], &[vec![1.0]]), f64::INFINITY);
assert_eq!(igd_plus(&[vec![1.0]], &[]), f64::INFINITY);
}
#[test]
fn test_igd_plus_dominating_approx() {
let tf = vec![vec![1.0, 1.0]];
let af = vec![vec![0.5, 0.5]]; let val = igd_plus(&tf, &af);
assert!(val < 1e-10, "dominating approx should give IGD+=0, got {val}");
}
#[test]
fn test_igd_plus_worse_approx() {
let tf = vec![vec![0.0, 0.0]];
let af = vec![vec![1.0, 1.0]];
let val = igd_plus(&tf, &af);
let expected = (1.0f64.powi(2) + 1.0f64.powi(2)).sqrt();
assert!((val - expected).abs() < 1e-10, "Expected {expected}, got {val}");
}
#[test]
fn test_igd_plus_less_or_equal_to_igd() {
let tf = vec![vec![0.0, 1.0], vec![0.5, 0.5], vec![1.0, 0.0]];
let af = vec![vec![0.2, 0.8], vec![0.6, 0.4], vec![0.9, 0.1]];
let igdp = igd_plus(&tf, &af);
let igd_val = igd(&tf, &af);
assert!(igdp <= igd_val + 1e-10, "IGD+={igdp} should be <= IGD={igd_val}");
}
#[test]
fn test_r2_tchebycheff_basic() {
let front = vec![vec![0.0, 1.0], vec![0.5, 0.5], vec![1.0, 0.0]];
let weights = vec![vec![1.0, 0.0], vec![0.5, 0.5], vec![0.0, 1.0]];
let z_star = vec![0.0, 0.0];
let r2 = r2_indicator(&front, &weights, &z_star, R2Utility::Tchebycheff);
assert!(r2.is_finite(), "R2 should be finite");
assert!(r2 >= 0.0, "R2 should be non-negative for this case");
}
#[test]
fn test_r2_weighted_sum_basic() {
let front = vec![vec![0.5, 0.5]];
let weights = vec![vec![0.5, 0.5]];
let z_star = vec![0.0, 0.0];
let r2 = r2_indicator(&front, &weights, &z_star, R2Utility::WeightedSum);
assert!((r2 - 0.5).abs() < 1e-10, "Expected 0.5, got {r2}");
}
#[test]
fn test_r2_pbi_basic() {
let front = vec![vec![0.5, 0.5], vec![0.0, 1.0], vec![1.0, 0.0]];
let weights = vec![vec![0.5, 0.5]];
let z_star = vec![0.0, 0.0];
let r2 = r2_indicator(&front, &weights, &z_star, R2Utility::Pbi);
assert!(r2.is_finite());
}
#[test]
fn test_r2_empty_inputs() {
assert_eq!(
r2_indicator(&[], &[vec![0.5, 0.5]], &[0.0, 0.0], R2Utility::Tchebycheff),
f64::INFINITY
);
assert_eq!(
r2_indicator(&[vec![0.5, 0.5]], &[], &[0.0, 0.0], R2Utility::Tchebycheff),
f64::INFINITY
);
}
#[test]
fn test_r2_better_front_lower_r2() {
let weights = vec![vec![1.0, 0.0], vec![0.5, 0.5], vec![0.0, 1.0]];
let z_star = vec![0.0, 0.0];
let good_front = vec![vec![0.1, 0.9], vec![0.5, 0.5], vec![0.9, 0.1]];
let bad_front = vec![vec![0.5, 2.0], vec![1.5, 1.5], vec![2.0, 0.5]];
let r2_good = r2_indicator(&good_front, &weights, &z_star, R2Utility::Tchebycheff);
let r2_bad = r2_indicator(&bad_front, &weights, &z_star, R2Utility::Tchebycheff);
assert!(r2_good < r2_bad, "Good front R2={r2_good} should be < bad front R2={r2_bad}");
}
#[test]
fn test_hv_contribution_2d_basic() {
let front = vec![vec![0.0, 1.0], vec![0.5, 0.5], vec![1.0, 0.0]];
let ref_pt = vec![2.0, 2.0];
let hvc = hypervolume_contribution(&front, &ref_pt, 10_000, 42);
assert_eq!(hvc.len(), 3);
for &v in &hvc {
assert!(v >= 0.0, "HVC must be non-negative");
}
let total_hvc: f64 = hvc.iter().sum();
let total_hv = hypervolume_2d(&front, &ref_pt);
assert!(total_hvc <= total_hv + 1e-9, "Sum of HVC should not exceed total HV");
}
#[test]
fn test_hv_contribution_empty_front() {
let hvc = hypervolume_contribution(&[], &[2.0, 2.0], 1000, 42);
assert!(hvc.is_empty());
}
#[test]
fn test_hv_contribution_extreme_points_higher() {
let front = vec![vec![0.0, 2.0], vec![1.0, 1.0], vec![2.0, 0.0]];
let ref_pt = vec![3.0, 3.0];
let hvc = hypervolume_contribution(&front, &ref_pt, 10_000, 42);
assert!(hvc[0] > 0.0, "Extreme point 0 should have positive HVC");
assert!(hvc[2] > 0.0, "Extreme point 2 should have positive HVC");
}
}