pub fn mrr(ranks: &[usize]) -> f64 {
if ranks.is_empty() {
return 0.0;
}
let sum: f64 = ranks
.iter()
.filter(|&&r| r > 0)
.map(|&r| 1.0 / r as f64)
.sum();
sum / ranks.len() as f64
}
pub fn hits_at_k(ranks: &[usize], k: usize) -> f64 {
if ranks.is_empty() || k == 0 {
return 0.0;
}
let hits = ranks.iter().filter(|&&r| r > 0 && r <= k).count();
hits as f64 / ranks.len() as f64
}
pub fn mean_rank(ranks: &[usize]) -> f64 {
if ranks.is_empty() {
return 0.0;
}
let sum: f64 = ranks.iter().map(|&r| r as f64).sum();
sum / ranks.len() as f64
}
pub fn ndcg(relevance: &[f64], ideal: &[f64]) -> f64 {
let actual_dcg = dcg(relevance);
let ideal_dcg = dcg(ideal);
if ideal_dcg == 0.0 {
0.0
} else {
actual_dcg / ideal_dcg
}
}
pub fn dcg(relevance: &[f64]) -> f64 {
relevance
.iter()
.enumerate()
.map(|(i, &rel)| {
let position = i + 1;
rel / (position as f64 + 1.0).log2()
})
.sum()
}
pub fn ndcg_at_k(relevance: &[f64], ideal: &[f64], k: usize) -> f64 {
let rel_k: Vec<f64> = relevance.iter().take(k).copied().collect();
let ideal_k: Vec<f64> = ideal.iter().take(k).copied().collect();
ndcg(&rel_k, &ideal_k)
}
pub fn compute_rank(target_score: f64, all_scores: &[f64], higher_is_better: bool) -> usize {
let mut rank = 1;
for &score in all_scores {
if higher_is_better {
if score > target_score {
rank += 1;
}
} else if score < target_score {
rank += 1;
}
}
rank
}
#[derive(Debug, Clone, Default)]
pub struct RankingMetrics {
pub mrr: f64,
pub hits_at_1: f64,
pub hits_at_3: f64,
pub hits_at_10: f64,
pub mean_rank: f64,
pub count: usize,
}
pub fn precision_at_k(ranks: &[usize], k: usize) -> f64 {
if k == 0 {
return 0.0;
}
let hits = ranks.iter().filter(|&&r| r > 0 && r <= k).count();
hits as f64 / k as f64
}
pub fn recall_at_k(ranks: &[usize], n_relevant: usize, k: usize) -> f64 {
if n_relevant == 0 {
return 0.0;
}
let hits = ranks.iter().filter(|&&r| r > 0 && r <= k).count();
hits as f64 / n_relevant as f64
}
pub fn average_precision(ranks: &[usize], n_relevant: usize) -> f64 {
if n_relevant == 0 || ranks.is_empty() {
return 0.0;
}
let mut sorted_ranks = ranks.to_vec();
sorted_ranks.sort_unstable();
let mut sum = 0.0;
let mut hits = 0;
for &r in &sorted_ranks {
if r > 0 {
hits += 1;
sum += hits as f64 / r as f64;
}
}
sum / n_relevant as f64
}
pub fn f_measure_at_k(ranks: &[usize], n_relevant: usize, k: usize, beta: f64) -> f64 {
let p = precision_at_k(ranks, k);
let r = recall_at_k(ranks, n_relevant, k);
if p == 0.0 && r == 0.0 {
return 0.0;
}
let beta_sq = beta * beta;
(1.0 + beta_sq) * (p * r) / (beta_sq * p + r)
}
pub fn r_precision(ranks: &[usize], n_relevant: usize) -> f64 {
precision_at_k(ranks, n_relevant)
}
pub fn err_at_k(ranks: &[usize], k: usize) -> f64 {
if ranks.is_empty() {
return 0.0;
}
let mut p_stop = 1.0;
let mut err = 0.0;
let mut sorted_ranks = ranks.to_vec();
sorted_ranks.sort_unstable();
for &r in &sorted_ranks {
if r > 0 && r <= k {
let r_val = 1.0; err += p_stop * r_val / r as f64;
p_stop *= 1.0 - r_val;
if p_stop <= 0.0 {
break;
}
}
}
err
}
pub fn rbp_at_k(ranks: &[usize], k: usize, persistence: f64) -> f64 {
if persistence <= 0.0 || persistence >= 1.0 {
return 0.0;
}
let mut rbp = 0.0;
for &r in ranks {
if r > 0 && r <= k {
rbp += persistence.powi(r as i32 - 1);
}
}
(1.0 - persistence) * rbp
}
impl RankingMetrics {
pub fn from_ranks(ranks: &[usize]) -> Self {
Self {
mrr: mrr(ranks),
hits_at_1: hits_at_k(ranks, 1),
hits_at_3: hits_at_k(ranks, 3),
hits_at_10: hits_at_k(ranks, 10),
mean_rank: mean_rank(ranks),
count: ranks.len(),
}
}
pub fn summary(&self) -> String {
format!(
"MRR: {:.4}, Hits@1: {:.4}, Hits@3: {:.4}, Hits@10: {:.4}, MR: {:.2}",
self.mrr, self.hits_at_1, self.hits_at_3, self.hits_at_10, self.mean_rank
)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mrr() {
let ranks = [1, 3, 2, 5];
let expected = (1.0 + 1.0 / 3.0 + 0.5 + 0.2) / 4.0;
assert!((mrr(&ranks) - expected).abs() < 1e-6);
}
#[test]
fn test_mrr_empty() {
assert_eq!(mrr(&[]), 0.0);
}
#[test]
fn test_mrr_all_rank_one() {
let ranks = [1, 1, 1, 1];
assert!((mrr(&ranks) - 1.0).abs() < 1e-6);
}
#[test]
fn test_hits_at_k() {
let ranks = [1, 3, 2, 5, 10, 15];
assert!((hits_at_k(&ranks, 1) - 1.0 / 6.0).abs() < 1e-6);
assert!((hits_at_k(&ranks, 3) - 3.0 / 6.0).abs() < 1e-6);
assert!((hits_at_k(&ranks, 10) - 5.0 / 6.0).abs() < 1e-6);
}
#[test]
fn test_hits_at_k_empty() {
assert_eq!(hits_at_k(&[], 10), 0.0);
}
#[test]
fn test_mean_rank() {
let ranks = [1, 3, 2, 5];
let expected = (1.0 + 3.0 + 2.0 + 5.0) / 4.0;
assert!((mean_rank(&ranks) - expected).abs() < 1e-6);
}
#[test]
fn test_ndcg_perfect() {
let relevance = [3.0, 2.0, 1.0, 0.0];
let ideal = [3.0, 2.0, 1.0, 0.0];
assert!((ndcg(&relevance, &ideal) - 1.0).abs() < 1e-6);
}
#[test]
fn test_ndcg_suboptimal() {
let relevance = [0.0, 3.0, 2.0, 1.0]; let ideal = [3.0, 2.0, 1.0, 0.0];
let score = ndcg(&relevance, &ideal);
assert!(score < 1.0);
assert!(score > 0.0);
}
#[test]
fn test_compute_rank() {
let scores = [0.1, 0.5, 0.3, 0.8];
assert_eq!(compute_rank(0.8, &scores, true), 1);
assert_eq!(compute_rank(0.5, &scores, true), 2);
assert_eq!(compute_rank(0.3, &scores, true), 3);
assert_eq!(compute_rank(0.1, &scores, true), 4);
assert_eq!(compute_rank(0.1, &scores, false), 1);
assert_eq!(compute_rank(0.8, &scores, false), 4);
}
#[test]
fn test_ranking_metrics_struct() {
let ranks = [1, 2, 3, 10];
let metrics = RankingMetrics::from_ranks(&ranks);
assert!((metrics.mrr - mrr(&ranks)).abs() < 1e-6);
assert!((metrics.hits_at_1 - hits_at_k(&ranks, 1)).abs() < 1e-6);
assert!((metrics.hits_at_10 - hits_at_k(&ranks, 10)).abs() < 1e-6);
assert!((metrics.mean_rank - mean_rank(&ranks)).abs() < 1e-6);
assert_eq!(metrics.count, 4);
}
}