#![warn(missing_docs)]
use std::cmp::Ordering;
use thiserror::Error;
pub mod sensitivity;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum Direction {
Maximize,
Minimize,
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Point<V> {
pub values: Vec<f64>,
pub data: V,
}
#[derive(Debug, Clone, Copy, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct NormalizationStats {
pub mean: f64,
pub std: f64,
pub min: f64,
pub max: f64,
}
impl NormalizationStats {
pub fn from_values(values: &[f64]) -> Self {
if values.is_empty() {
return Self::default();
}
let n = values.len() as f64;
let mean = values.iter().sum::<f64>() / n;
let var = values.iter().map(|v| (v - mean) * (v - mean)).sum::<f64>() / n;
let std = var.sqrt();
let min = values.iter().copied().fold(f64::INFINITY, f64::min);
let max = values.iter().copied().fold(f64::NEG_INFINITY, f64::max);
Self {
mean,
std,
min,
max,
}
}
}
#[derive(Debug, Clone)]
pub struct ParetoFrontier<V> {
points: Vec<Point<V>>,
directions: Vec<Direction>,
stats: Vec<NormalizationStats>,
labels: Vec<String>,
eps: f64,
}
#[derive(Debug, Clone, PartialEq, Eq, Error)]
pub enum FrontierError {
#[error("no points provided")]
Empty,
#[error("points must be non-empty and have consistent dimensionality")]
InconsistentDimensions,
#[error("point value at [{point_idx}][{dim_idx}] is not finite")]
NonFinite {
point_idx: usize,
dim_idx: usize,
},
}
impl<V> ParetoFrontier<V> {
pub fn new(directions: Vec<Direction>) -> Self {
let dim = directions.len();
Self {
points: Vec::new(),
directions,
stats: vec![NormalizationStats::default(); dim],
labels: (0..dim).map(|i| i.to_string()).collect(),
eps: 1e-9,
}
}
pub fn with_labels(mut self, labels: Vec<String>) -> Self {
assert_eq!(
labels.len(),
self.directions.len(),
"labels len ({}) must match directions len ({})",
labels.len(),
self.directions.len(),
);
self.labels = labels;
self
}
pub fn labels(&self) -> &[String] {
&self.labels
}
pub fn directions(&self) -> &[Direction] {
&self.directions
}
pub fn with_eps(mut self, eps: f64) -> Self {
self.eps = eps;
self
}
pub fn eps(&self) -> f64 {
self.eps
}
pub fn stats(&self) -> &[NormalizationStats] {
&self.stats
}
pub fn len(&self) -> usize {
self.points.len()
}
pub fn is_empty(&self) -> bool {
self.points.is_empty()
}
pub fn points(&self) -> &[Point<V>] {
&self.points
}
pub fn points_mut(&mut self) -> &mut [Point<V>] {
&mut self.points
}
pub fn push(&mut self, values: Vec<f64>, data: V) -> bool {
assert_eq!(
values.len(),
self.directions.len(),
"values len ({}) must match directions len ({})",
values.len(),
self.directions.len(),
);
assert!(
values.iter().all(|v| v.is_finite()),
"all values must be finite (no NaN or Inf)",
);
for existing in &self.points {
if dominates(&self.directions, self.eps, &existing.values, &values) {
return false;
}
}
let directions = &self.directions;
let eps = self.eps;
self.points
.retain(|existing| !dominates(directions, eps, &values, &existing.values));
self.points.push(Point { values, data });
self.update_stats();
true
}
fn update_stats(&mut self) {
for i in 0..self.directions.len() {
let dim_values: Vec<f64> = self.points.iter().map(|p| p.values[i]).collect();
self.stats[i] = NormalizationStats::from_values(&dim_values);
}
}
pub fn scalar_score(&self, point_idx: usize, weights: &[f64]) -> f64 {
assert!(
point_idx < self.points.len(),
"point_idx ({point_idx}) out of bounds (len={})",
self.points.len(),
);
assert!(
weights.len() <= self.directions.len(),
"weights len ({}) exceeds directions len ({})",
weights.len(),
self.directions.len(),
);
let p = &self.points[point_idx];
let mut score = 0.0;
let mut w_sum = 0.0;
for (i, &w) in weights.iter().enumerate() {
let val = p.values[i];
let stat = self.stats[i];
let range = stat.max - stat.min;
let norm = if range > self.eps {
(val - stat.min) / range
} else {
0.5
};
let oriented = match self.directions[i] {
Direction::Maximize => norm,
Direction::Minimize => 1.0 - norm,
};
score += oriented * w;
w_sum += w;
}
if w_sum > 0.0 {
score / w_sum
} else {
0.0
}
}
pub fn scalar_score_static(
&self,
point_idx: usize,
weights: &[f64],
bounds: &[(f64, f64)],
) -> f64 {
assert!(
point_idx < self.points.len(),
"point_idx ({point_idx}) out of bounds (len={})",
self.points.len(),
);
let p = &self.points[point_idx];
let dims = p.values.len().min(weights.len()).min(bounds.len());
let mut score = 0.0;
let mut w_sum = 0.0;
for i in 0..dims {
let (lo, hi) = bounds[i];
let w = weights[i];
let range = hi - lo;
let norm = if range.abs() > self.eps {
((p.values[i] - lo) / range).clamp(0.0, 1.0)
} else {
0.0
};
let oriented = match self.directions[i] {
Direction::Maximize => norm,
Direction::Minimize => 1.0 - norm,
};
score += oriented * w;
w_sum += w;
}
if w_sum > 0.0 {
score / w_sum
} else {
0.0
}
}
pub fn best_index(&self, weights: &[f64]) -> Option<usize> {
if self.is_empty() {
return None;
}
(0..self.points.len()).max_by(|&a, &b| {
self.scalar_score(a, weights)
.partial_cmp(&self.scalar_score(b, weights))
.unwrap_or(Ordering::Equal)
})
}
pub fn point_dominates(&self, a: &[f64], b: &[f64]) -> bool {
dominates(&self.directions, self.eps, a, b)
}
pub fn crowding_distances(&self) -> Vec<f64> {
let n = self.points.len();
if n == 0 {
return Vec::new();
}
if n == 1 {
return vec![f64::INFINITY];
}
let mut distances = vec![0.0; n];
let mut indices: Vec<usize> = (0..n).collect();
for i in 0..self.directions.len() {
indices.sort_by(|&a, &b| {
let av = self.points[a].values[i];
let bv = self.points[b].values[i];
av.partial_cmp(&bv).unwrap_or(Ordering::Equal)
});
let min_val = self.points[indices[0]].values[i];
let max_val = self.points[indices[n - 1]].values[i];
let range = max_val - min_val;
let mut lo = 0;
while lo < n && (self.points[indices[lo]].values[i] - min_val).abs() <= self.eps {
distances[indices[lo]] = f64::INFINITY;
lo += 1;
}
let mut hi = n - 1;
while hi > 0 && (self.points[indices[hi]].values[i] - max_val).abs() <= self.eps {
distances[indices[hi]] = f64::INFINITY;
hi -= 1;
}
if range > self.eps {
for window in indices.windows(3) {
let prev = window[0];
let curr = window[1];
let next = window[2];
if distances[curr].is_infinite() {
continue;
}
let val_prev = self.points[prev].values[i];
let val_next = self.points[next].values[i];
distances[curr] += (val_next - val_prev) / range;
}
}
}
distances
}
pub fn ideal_point(&self) -> Option<Vec<f64>> {
if self.is_empty() {
return None;
}
Some(
(0..self.directions.len())
.map(|i| {
let vals = self.points.iter().map(|p| p.values[i]);
match self.directions[i] {
Direction::Maximize => vals.fold(f64::NEG_INFINITY, f64::max),
Direction::Minimize => vals.fold(f64::INFINITY, f64::min),
}
})
.collect(),
)
}
pub fn nadir_point(&self) -> Option<Vec<f64>> {
if self.is_empty() {
return None;
}
Some(
(0..self.directions.len())
.map(|i| {
let vals = self.points.iter().map(|p| p.values[i]);
match self.directions[i] {
Direction::Maximize => vals.fold(f64::INFINITY, f64::min),
Direction::Minimize => vals.fold(f64::NEG_INFINITY, f64::max),
}
})
.collect(),
)
}
pub fn normalized_values(&self, point_idx: usize) -> Option<Vec<f64>> {
if self.is_empty() {
return None;
}
assert!(
point_idx < self.points.len(),
"point_idx ({point_idx}) out of bounds (len={})",
self.points.len(),
);
let ideal = self.ideal_point()?;
let nadir = self.nadir_point()?;
let p = &self.points[point_idx];
Some(
(0..self.directions.len())
.map(|i| {
let range = (ideal[i] - nadir[i]).abs();
if range < self.eps {
return 0.5;
}
match self.directions[i] {
Direction::Maximize => (p.values[i] - nadir[i]) / range,
Direction::Minimize => (nadir[i] - p.values[i]) / range,
}
})
.collect(),
)
}
pub fn suggest_ref_point(&self, margin: f64) -> Option<Vec<f64>> {
if self.is_empty() {
return None;
}
let ideal = self.ideal_point()?;
let nadir = self.nadir_point()?;
Some(
(0..self.directions.len())
.map(|i| {
let range = (ideal[i] - nadir[i]).abs();
let shift = margin * range;
match self.directions[i] {
Direction::Maximize => nadir[i] - shift,
Direction::Minimize => nadir[i] + shift,
}
})
.collect(),
)
}
pub fn ranked_indices(&self, weights: &[f64]) -> Vec<usize> {
let mut indices: Vec<usize> = (0..self.points.len()).collect();
indices.sort_by(|&a, &b| {
let sa = self.scalar_score(a, weights);
let sb = self.scalar_score(b, weights);
sb.partial_cmp(&sa).unwrap_or(Ordering::Equal)
});
indices
}
pub fn asf(&self, point_idx: usize, weights: &[f64], ideal: &[f64]) -> f64 {
assert!(
point_idx < self.points.len(),
"point_idx ({point_idx}) out of bounds (len={})",
self.points.len(),
);
let p = &self.points[point_idx];
let dims = p.values.len().min(weights.len()).min(ideal.len());
let mut max_val = f64::NEG_INFINITY;
for i in 0..dims {
let w = weights[i];
if w.abs() < 1e-30 {
return f64::INFINITY;
}
let (fi, zi) = match self.directions[i] {
Direction::Maximize => (-p.values[i], -ideal[i]),
Direction::Minimize => (p.values[i], ideal[i]),
};
let val = (fi - zi) / w;
if val > max_val {
max_val = val;
}
}
max_val
}
pub fn best_asf(&self, weights: &[f64], ideal: &[f64]) -> Option<usize> {
if self.is_empty() {
return None;
}
(0..self.points.len()).min_by(|&a, &b| {
let sa = self.asf(a, weights, ideal);
let sb = self.asf(b, weights, ideal);
sa.partial_cmp(&sb).unwrap_or(Ordering::Equal)
})
}
pub fn retain<F>(&mut self, f: F)
where
F: FnMut(&Point<V>) -> bool,
{
self.points.retain(f);
self.update_stats();
}
pub fn hypervolume(&self, ref_point: &[f64]) -> f64 {
let dim = self.directions.len();
if dim == 0 || self.is_empty() {
return 0.0;
}
assert_eq!(
ref_point.len(),
dim,
"ref_point len ({}) must match directions len ({dim})",
ref_point.len(),
);
let mut oriented: Vec<Vec<f64>> = self
.points
.iter()
.map(|p| {
p.values
.iter()
.enumerate()
.map(|(i, &v)| match self.directions[i] {
Direction::Maximize => (v - ref_point[i]).max(0.0),
Direction::Minimize => (ref_point[i] - v).max(0.0),
})
.collect::<Vec<_>>()
})
.collect();
oriented.retain(|p| p.iter().all(|&x| x > self.eps));
if oriented.is_empty() {
return 0.0;
}
let oriented = nondominated_max(&oriented, self.eps);
hypervolume_max_exact(&oriented, dim, self.eps)
}
pub fn hypervolume_contributions(&self, ref_point: &[f64]) -> Vec<f64> {
let n = self.points.len();
if n == 0 {
return Vec::new();
}
let total = self.hypervolume(ref_point);
let dim = self.directions.len();
(0..n)
.map(|skip| {
let mut oriented: Vec<Vec<f64>> = self
.points
.iter()
.enumerate()
.filter(|&(j, _)| j != skip)
.map(|(_, p)| {
p.values
.iter()
.enumerate()
.map(|(i, &v)| match self.directions[i] {
Direction::Maximize => (v - ref_point[i]).max(0.0),
Direction::Minimize => (ref_point[i] - v).max(0.0),
})
.collect::<Vec<_>>()
})
.collect();
oriented.retain(|p| p.iter().all(|&x| x > self.eps));
if oriented.is_empty() {
return total;
}
let oriented = nondominated_max(&oriented, self.eps);
let hv_without = hypervolume_max_exact(&oriented, dim, self.eps);
(total - hv_without).max(0.0)
})
.collect()
}
pub fn knee_index(&self) -> Option<usize> {
let n = self.points.len();
if n < 3 {
return None;
}
let dim = self.directions.len();
let ideal = self.ideal_point()?;
let nadir = self.nadir_point()?;
let normalize = |p: &Point<V>| -> Vec<f64> {
(0..dim)
.map(|i| {
let range = (ideal[i] - nadir[i]).abs();
if range < self.eps {
return 0.5;
}
match self.directions[i] {
Direction::Maximize => (p.values[i] - nadir[i]) / range,
Direction::Minimize => (nadir[i] - p.values[i]) / range,
}
})
.collect()
};
let normed: Vec<Vec<f64>> = self.points.iter().map(normalize).collect();
let inv_sqrt_d = 1.0 / (dim as f64).sqrt();
let mut best_idx = 0;
let mut best_dist = f64::NEG_INFINITY;
for (idx, p) in normed.iter().enumerate() {
let sum: f64 = p.iter().sum();
let dist = (sum - 1.0).abs() * inv_sqrt_d;
let signed_dist = if sum >= 1.0 { dist } else { -dist };
if signed_dist > best_dist {
best_dist = signed_dist;
best_idx = idx;
}
}
Some(best_idx)
}
pub fn from_points(
directions: Vec<Direction>,
items: impl IntoIterator<Item = (Vec<f64>, V)>,
) -> Self {
let mut frontier = Self::new(directions);
for (values, data) in items {
frontier.push(values, data);
}
frontier
}
}
impl<V> Extend<(Vec<f64>, V)> for ParetoFrontier<V> {
fn extend<I: IntoIterator<Item = (Vec<f64>, V)>>(&mut self, iter: I) {
for (values, data) in iter {
self.push(values, data);
}
}
}
#[derive(Debug, Clone)]
pub struct EpsilonArchive<V> {
grid_eps: Vec<f64>,
directions: Vec<Direction>,
archive: Vec<Point<V>>,
}
impl<V> EpsilonArchive<V> {
pub fn new(directions: Vec<Direction>, grid_eps: Vec<f64>) -> Self {
assert_eq!(
directions.len(),
grid_eps.len(),
"directions len ({}) must match grid_eps len ({})",
directions.len(),
grid_eps.len(),
);
assert!(
grid_eps.iter().all(|&e| e > 0.0 && e.is_finite()),
"all grid_eps values must be positive and finite"
);
Self {
grid_eps,
directions,
archive: Vec::new(),
}
}
pub fn new_uniform(directions: Vec<Direction>, eps: f64) -> Self {
let d = directions.len();
Self::new(directions, vec![eps; d])
}
fn cell(&self, value: f64, dim: usize) -> i64 {
let oriented = match self.directions[dim] {
Direction::Maximize => value,
Direction::Minimize => -value,
};
(oriented / self.grid_eps[dim]).floor() as i64
}
fn cell_vec(&self, values: &[f64]) -> Vec<i64> {
(0..self.directions.len())
.map(|i| self.cell(values[i], i))
.collect()
}
fn cell_dominates(a: &[i64], b: &[i64]) -> bool {
let mut strictly_better = false;
for (&ai, &bi) in a.iter().zip(b.iter()) {
if ai < bi {
return false;
}
if ai > bi {
strictly_better = true;
}
}
strictly_better
}
pub fn push(&mut self, values: Vec<f64>, data: V) -> bool {
assert_eq!(
values.len(),
self.directions.len(),
"values len ({}) must match directions len ({})",
values.len(),
self.directions.len(),
);
assert!(
values.iter().all(|v| v.is_finite()),
"all values must be finite"
);
let new_cell = self.cell_vec(&values);
for existing in &self.archive {
let ex_cell = self.cell_vec(&existing.values);
if Self::cell_dominates(&ex_cell, &new_cell) {
return false;
}
if ex_cell == new_cell {
let new_better = self.raw_dominates(&values, &existing.values);
if !new_better {
return false; }
}
}
let existing_cells: Vec<Vec<i64>> = self
.archive
.iter()
.map(|p| self.cell_vec(&p.values))
.collect();
let mut idx = 0;
self.archive.retain(|_| {
let keep = !Self::cell_dominates(&new_cell, &existing_cells[idx])
&& existing_cells[idx] != new_cell;
idx += 1;
keep
});
self.archive.push(Point { values, data });
true
}
fn raw_dominates(&self, a: &[f64], b: &[f64]) -> bool {
let mut strictly_better = false;
for (i, (&av, &bv)) in a.iter().zip(b.iter()).enumerate() {
match self.directions[i] {
Direction::Maximize => {
if av < bv {
return false;
}
if av > bv {
strictly_better = true;
}
}
Direction::Minimize => {
if av > bv {
return false;
}
if av < bv {
strictly_better = true;
}
}
}
}
strictly_better
}
pub fn len(&self) -> usize {
self.archive.len()
}
pub fn is_empty(&self) -> bool {
self.archive.is_empty()
}
pub fn points(&self) -> &[Point<V>] {
&self.archive
}
pub fn grid_eps(&self) -> &[f64] {
&self.grid_eps
}
pub fn directions(&self) -> &[Direction] {
&self.directions
}
pub fn into_frontier(self) -> ParetoFrontier<V> {
let mut frontier = ParetoFrontier::new(self.directions);
for point in self.archive {
frontier.push(point.values, point.data);
}
frontier
}
}
impl ParetoFrontier<usize> {
pub fn try_new(points: &[Vec<f64>]) -> Result<Self, FrontierError> {
if points.is_empty() {
return Err(FrontierError::Empty);
}
let d = points[0].len();
if d == 0 || points.iter().any(|p| p.len() != d) {
return Err(FrontierError::InconsistentDimensions);
}
for (pi, p) in points.iter().enumerate() {
for (di, &v) in p.iter().enumerate() {
if !v.is_finite() {
return Err(FrontierError::NonFinite {
point_idx: pi,
dim_idx: di,
});
}
}
}
let mut frontier = ParetoFrontier::new(vec![Direction::Maximize; d]);
for (i, p) in points.iter().enumerate() {
frontier.push(p.clone(), i);
}
Ok(frontier)
}
pub fn indices(&self) -> Vec<usize> {
self.points.iter().map(|p| p.data).collect()
}
}
fn nondominated_max(points: &[Vec<f64>], eps: f64) -> Vec<Vec<f64>> {
let mut out: Vec<Vec<f64>> = Vec::new();
'outer: for p in points {
for q in &out {
if dominates_max(q, p, eps) {
continue 'outer;
}
}
out.retain(|q| !dominates_max(p, q, eps));
out.push(p.clone());
}
out
}
fn dominates_max(a: &[f64], b: &[f64], eps: f64) -> bool {
let mut strictly_better = false;
for (&av, &bv) in a.iter().zip(b.iter()) {
if av + eps < bv {
return false;
}
if av > bv + eps {
strictly_better = true;
}
}
strictly_better
}
fn hypervolume_max_exact(points: &[Vec<f64>], dim: usize, eps: f64) -> f64 {
debug_assert!(dim >= 1);
if points.is_empty() {
return 0.0;
}
if dim == 1 {
return points.iter().map(|p| p[0]).fold(0.0, f64::max);
}
if dim == 2 {
return hypervolume_max_2d(points, eps);
}
if dim == 3 {
return hypervolume_max_3d(points, eps);
}
let mut levels: Vec<f64> = points
.iter()
.map(|p| p[dim - 1])
.filter(|&v| v > eps)
.collect();
levels.sort_by(|a, b| b.partial_cmp(a).unwrap_or(Ordering::Equal)); levels.dedup_by(|a, b| (*a - *b).abs() <= eps);
let mut hv = 0.0;
for (idx, &level) in levels.iter().enumerate() {
let next = if idx + 1 < levels.len() {
levels[idx + 1]
} else {
0.0
};
let thickness = (level - next).max(0.0);
if thickness <= eps {
continue;
}
let mut projected: Vec<Vec<f64>> = points
.iter()
.filter(|p| p[dim - 1] + eps >= level)
.map(|p| p[0..dim - 1].to_vec())
.collect();
if projected.is_empty() {
continue;
}
projected = nondominated_max(&projected, eps);
let cross = hypervolume_max_exact(&projected, dim - 1, eps);
hv += thickness * cross;
}
hv
}
fn hypervolume_max_2d(points: &[Vec<f64>], eps: f64) -> f64 {
let mut idxs: Vec<usize> = (0..points.len()).collect();
idxs.sort_by(|&i, &j| {
let xi = points[i][0];
let xj = points[j][0];
match xi.partial_cmp(&xj).unwrap_or(Ordering::Equal) {
Ordering::Equal => points[j][1]
.partial_cmp(&points[i][1])
.unwrap_or(Ordering::Equal),
ord => ord,
}
});
let mut area = 0.0;
let mut prev_x = 0.0;
for i in idxs {
let x = points[i][0].max(0.0);
let y = points[i][1].max(0.0);
let dx = (x - prev_x).max(0.0);
if dx > eps && y > eps {
area += dx * y;
}
prev_x = x;
}
area
}
fn hypervolume_max_3d(points: &[Vec<f64>], eps: f64) -> f64 {
if points.is_empty() {
return 0.0;
}
let mut sorted: Vec<usize> = (0..points.len()).collect();
sorted.sort_by(|&i, &j| {
points[j][2]
.partial_cmp(&points[i][2])
.unwrap_or(Ordering::Equal)
});
let mut front_2d: Vec<(f64, f64)> = Vec::new();
let mut hv = 0.0;
let mut prev_z = points[sorted[0]][2];
front_2d.push((points[sorted[0]][0], points[sorted[0]][1]));
for &idx in sorted.iter().skip(1) {
let z = points[idx][2];
let thickness = (prev_z - z).max(0.0);
if thickness > eps {
let hv_2d = sweep_2d_front(&front_2d, eps);
hv += thickness * hv_2d;
}
prev_z = z;
let x = points[idx][0];
let y = points[idx][1];
insert_2d_front(&mut front_2d, x, y, eps);
}
if prev_z > eps {
let hv_2d = sweep_2d_front(&front_2d, eps);
hv += prev_z * hv_2d;
}
hv
}
fn insert_2d_front(front: &mut Vec<(f64, f64)>, x: f64, y: f64, eps: f64) {
for &(fx, fy) in front.iter() {
if fx + eps >= x && fy + eps >= y {
return; }
}
front.retain(|&(fx, fy)| !(x + eps >= fx && y + eps >= fy));
let pos = front.partition_point(|&(fx, _)| fx < x);
front.insert(pos, (x, y));
}
fn sweep_2d_front(front: &[(f64, f64)], eps: f64) -> f64 {
let mut area = 0.0;
let mut prev_x = 0.0;
for &(x, y) in front {
let x = x.max(0.0);
let y = y.max(0.0);
let dx = (x - prev_x).max(0.0);
if dx > eps && y > eps {
area += dx * y;
}
prev_x = x;
}
area
}
pub fn dominates(directions: &[Direction], eps: f64, a: &[f64], b: &[f64]) -> bool {
debug_assert_eq!(
a.len(),
b.len(),
"dominates: a.len() ({}) != b.len() ({})",
a.len(),
b.len()
);
debug_assert_eq!(
a.len(),
directions.len(),
"dominates: a.len() ({}) != directions.len() ({})",
a.len(),
directions.len()
);
let mut strictly_better = false;
for (i, (&av, &bv)) in a.iter().zip(b.iter()).enumerate() {
let dir = directions[i];
match dir {
Direction::Maximize => {
if av + eps < bv {
return false;
}
if av > bv + eps {
strictly_better = true;
}
}
Direction::Minimize => {
if av > bv + eps {
return false;
}
if av + eps < bv {
strictly_better = true;
}
}
}
}
strictly_better
}
pub fn pareto_indices(points: &[Vec<f32>]) -> Option<Vec<usize>> {
if points.is_empty() {
return Some(Vec::new());
}
let d = points[0].len();
if d == 0 || points.iter().any(|p| p.len() != d) {
return None;
}
if points.iter().any(|p| p.iter().any(|v| !v.is_finite())) {
return None;
}
let directions = vec![Direction::Maximize; d];
let eps = 1e-9;
let as_f64: Vec<Vec<f64>> = points
.iter()
.map(|p| p.iter().map(|&x| x as f64).collect())
.collect();
let mut keep = vec![true; points.len()];
for i in 0..as_f64.len() {
if !keep[i] {
continue;
}
for j in 0..as_f64.len() {
if i == j || !keep[j] {
continue;
}
if dominates(&directions, eps, &as_f64[j], &as_f64[i]) {
keep[i] = false;
break; }
}
}
Some(
keep.into_iter()
.enumerate()
.filter_map(|(i, ok)| ok.then_some(i))
.collect(),
)
}
pub fn pareto_indices_2d(points: &[Vec<f32>]) -> Option<Vec<usize>> {
if points.is_empty() {
return Some(Vec::new());
}
if points.iter().any(|p| p.len() != 2) {
return pareto_indices(points);
}
if points.iter().any(|p| p.iter().any(|v| !v.is_finite())) {
return None;
}
let mut idxs: Vec<usize> = (0..points.len()).collect();
idxs.sort_by(|&i, &j| {
let xi = points[i][0];
let xj = points[j][0];
xj.partial_cmp(&xi)
.unwrap_or(Ordering::Equal)
.then_with(|| {
points[j][1]
.partial_cmp(&points[i][1])
.unwrap_or(Ordering::Equal)
})
});
let mut best_y = f32::NEG_INFINITY;
let mut out = Vec::new();
for i in idxs {
let y = points[i][1];
if y > best_y {
out.push(i);
best_y = y;
}
}
Some(out)
}
pub fn pareto_layers(points: &[Vec<f32>]) -> Option<Vec<Vec<usize>>> {
if points.is_empty() {
return Some(Vec::new());
}
let d = points[0].len();
if d == 0 || points.iter().any(|p| p.len() != d) {
return None;
}
if points.iter().any(|p| p.iter().any(|v| !v.is_finite())) {
return None;
}
let directions = vec![Direction::Maximize; d];
let eps = 1e-9_f64;
let as_f64: Vec<Vec<f64>> = points
.iter()
.map(|p| p.iter().map(|&x| x as f64).collect())
.collect();
let mut remaining: Vec<usize> = (0..points.len()).collect();
let mut layers = Vec::new();
while !remaining.is_empty() {
let mut keep = vec![true; remaining.len()];
for i in 0..remaining.len() {
if !keep[i] {
continue;
}
for j in 0..remaining.len() {
if i == j || !keep[j] {
continue;
}
if dominates(
&directions,
eps,
&as_f64[remaining[j]],
&as_f64[remaining[i]],
) {
keep[i] = false;
break;
}
}
}
let layer: Vec<usize> = remaining
.iter()
.zip(keep.iter())
.filter_map(|(&idx, &k)| k.then_some(idx))
.collect();
remaining = remaining
.into_iter()
.zip(keep.iter())
.filter_map(|(idx, &k)| (!k).then_some(idx))
.collect();
layers.push(layer);
}
Some(layers)
}
pub fn pareto_indices_k_dominance(points: &[Vec<f32>], k: usize) -> Option<Vec<usize>> {
if points.is_empty() {
return Some(Vec::new());
}
let d = points[0].len();
if d == 0 || points.iter().any(|p| p.len() != d) {
return None;
}
if points.iter().any(|p| p.iter().any(|v| !v.is_finite())) {
return None;
}
let k = k.min(d);
let eps = 1e-9_f64;
let as_f64: Vec<Vec<f64>> = points
.iter()
.map(|p| p.iter().map(|&x| x as f64).collect())
.collect();
let mut keep = vec![true; points.len()];
for i in 0..as_f64.len() {
if !keep[i] {
continue;
}
'cmp: for j in 0..as_f64.len() {
if i == j || !keep[i] {
continue;
}
let mut better = 0usize;
for (&aj, &ai) in as_f64[j].iter().zip(as_f64[i].iter()).take(d) {
if aj + eps < ai {
continue 'cmp; }
if aj > ai + eps {
better += 1;
}
}
if better >= k {
keep[i] = false;
}
}
}
Some(
keep.into_iter()
.enumerate()
.filter_map(|(i, ok)| ok.then_some(i))
.collect(),
)
}
fn euclidean_dist(a: &[f64], b: &[f64]) -> f64 {
a.iter()
.zip(b.iter())
.map(|(&ai, &bi)| (ai - bi) * (ai - bi))
.sum::<f64>()
.sqrt()
}
pub fn generational_distance(front: &[Vec<f64>], reference: &[Vec<f64>]) -> Option<f64> {
if front.is_empty() || reference.is_empty() {
return None;
}
let d = front[0].len();
if d == 0 || front.iter().any(|p| p.len() != d) || reference.iter().any(|p| p.len() != d) {
return None;
}
let sum: f64 = front
.iter()
.map(|f| {
reference
.iter()
.map(|r| euclidean_dist(f, r))
.fold(f64::INFINITY, f64::min)
})
.sum();
Some(sum / front.len() as f64)
}
pub fn inverted_generational_distance(front: &[Vec<f64>], reference: &[Vec<f64>]) -> Option<f64> {
generational_distance(reference, front)
}