rankops 0.1.6

Operations on ranked lists: fusion (RRF, Copeland, CombMNZ, DBSF, 14 methods), reranking (MaxSim/ColBERT, MMR, DPP), evaluation (NDCG, MAP, MRR), diagnostics, pipeline builder.
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
//! Ranking evaluation metrics.
//!
//! Standard metrics for evaluating ranking quality in information retrieval,
//! knowledge graph completion, and learning-to-rank tasks.
//!
//! # Metrics Overview
//!
//! | Metric | Range | Interpretation |
//! |--------|-------|----------------|
//! | MRR | [0, 1] | Average reciprocal of correct answer's rank |
//! | Hits@k | [0, 1] | Fraction with correct in top k |
//! | Mean Rank | [1, n] | Average rank of correct answer |
//! | NDCG | [0, 1] | Position-weighted relevance score |
//!
//! # Example
//!
//! ```rust
//! use rankops::metrics::{mrr, hits_at_k, mean_rank, ndcg};
//!
//! // Ranks of correct answers for 4 queries (1-indexed)
//! let ranks = [1, 3, 2, 5];
//!
//! assert!((mrr(&ranks) - 0.508).abs() < 0.01);
//! assert!((hits_at_k(&ranks, 3) - 0.75).abs() < 0.01);
//! assert!((mean_rank(&ranks) - 2.75).abs() < 0.01);
//! ```
//!
//! # Research Background
//!
//! These metrics are standard in:
//! - **Information Retrieval**: Evaluating search engines (Croft et al., 2010)
//! - **Knowledge Graphs**: Link prediction (Bordes et al., 2013)
//! - **Recommendation**: Ranking candidate items
//!
//! MRR was popularized for KG evaluation by Bordes et al. (2013) "Translating
//! Embeddings for Modeling Multi-relational Data".

/// Mean Reciprocal Rank (MRR).
///
/// Average of 1/rank for correct answers. Gives exponentially more weight
/// to top positions: rank 1 → 1.0, rank 2 → 0.5, rank 10 → 0.1.
///
/// # Formula
///
/// ```text
/// MRR = (1/|Q|) Σᵢ 1/rankᵢ
/// ```
///
/// # Parameters
///
/// - `ranks`: Slice of ranks (1-indexed, positive integers)
///
/// # Returns
///
/// MRR in [0, 1]. Higher is better.
///
/// # Example
///
/// ```rust
/// use rankops::metrics::mrr;
///
/// let ranks = [1, 3, 2, 5];
/// let score = mrr(&ranks);
/// // (1/1 + 1/3 + 1/2 + 1/5) / 4 ≈ 0.508
/// assert!((score - 0.508).abs() < 0.01);
/// ```
pub fn mrr(ranks: &[usize]) -> f64 {
    if ranks.is_empty() {
        return 0.0;
    }

    let sum: f64 = ranks
        .iter()
        .filter(|&&r| r > 0)
        .map(|&r| 1.0 / r as f64)
        .sum();

    sum / ranks.len() as f64
}

/// Hits@k: fraction of queries with correct answer in top k.
///
/// Binary metric: either the answer is in top k (1) or not (0).
///
/// # Formula
///
/// ```text
/// Hits@k = (1/|Q|) Σᵢ 𝟙[rankᵢ ≤ k]
/// ```
///
/// # Parameters
///
/// - `ranks`: Slice of ranks (1-indexed)
/// - `k`: Cutoff (typically 1, 3, or 10)
///
/// # Returns
///
/// Hits@k in [0, 1]. Higher is better.
///
/// # Example
///
/// ```rust
/// use rankops::metrics::hits_at_k;
///
/// let ranks = [1, 3, 2, 5];
/// assert!((hits_at_k(&ranks, 3) - 0.75).abs() < 0.01);  // 3 of 4 in top 3
/// assert!((hits_at_k(&ranks, 1) - 0.25).abs() < 0.01);  // 1 of 4 at rank 1
/// ```
pub fn hits_at_k(ranks: &[usize], k: usize) -> f64 {
    if ranks.is_empty() || k == 0 {
        return 0.0;
    }

    let hits = ranks.iter().filter(|&&r| r > 0 && r <= k).count();
    hits as f64 / ranks.len() as f64
}

/// Mean Rank: average position of correct answers.
///
/// Unlike MRR, this is linear in rank. Lower is better.
///
/// # Formula
///
/// ```text
/// MR = (1/|Q|) Σᵢ rankᵢ
/// ```
///
/// # Returns
///
/// Mean Rank in [1, n]. Lower is better.
///
/// # Example
///
/// ```rust
/// use rankops::metrics::mean_rank;
///
/// let ranks = [1, 3, 2, 5];
/// assert!((mean_rank(&ranks) - 2.75).abs() < 0.01);
/// ```
pub fn mean_rank(ranks: &[usize]) -> f64 {
    if ranks.is_empty() {
        return 0.0;
    }

    let sum: f64 = ranks.iter().map(|&r| r as f64).sum();
    sum / ranks.len() as f64
}

/// Normalized Discounted Cumulative Gain (NDCG).
///
/// Measures ranking quality with position-weighted relevance.
/// Unlike Hits@k, NDCG considers graded relevance (not just binary).
///
/// Uses the Jarvelin & Kekalainen (2002) DCG formulation: `rel / log2(position + 1)`.
///
/// # Formula
///
/// ```text
/// DCG = Σᵢ relᵢ / log₂(i + 1)
/// NDCG = DCG / IDCG
/// ```
///
/// where IDCG is DCG of the ideal (perfectly sorted) ranking.
///
/// # Parameters
///
/// - `relevance`: Relevance scores in ranked order (position 1, 2, 3, ...)
/// - `ideal`: Ideal relevance scores (sorted descending)
///
/// # Returns
///
/// NDCG in [0, 1]. Higher is better.
///
/// # Example
///
/// ```rust
/// use rankops::metrics::ndcg;
///
/// // Actual ranking: relevance [3, 2, 1, 0]
/// // Ideal ranking:  relevance [3, 2, 1, 0] (already optimal)
/// let relevance = [3.0, 2.0, 1.0, 0.0];
/// let ideal = [3.0, 2.0, 1.0, 0.0];
/// assert!((ndcg(&relevance, &ideal) - 1.0).abs() < 0.01);
///
/// // Suboptimal ranking
/// let relevance = [1.0, 3.0, 2.0, 0.0];
/// let ideal = [3.0, 2.0, 1.0, 0.0];
/// assert!(ndcg(&relevance, &ideal) < 1.0);
/// ```
pub fn ndcg(relevance: &[f64], ideal: &[f64]) -> f64 {
    let actual_dcg = dcg(relevance);
    let ideal_dcg = dcg(ideal);

    if ideal_dcg == 0.0 {
        0.0
    } else {
        actual_dcg / ideal_dcg
    }
}

/// Discounted Cumulative Gain (DCG).
///
/// Helper for NDCG. Sums relevance weighted by log position.
pub fn dcg(relevance: &[f64]) -> f64 {
    relevance
        .iter()
        .enumerate()
        .map(|(i, &rel)| {
            let position = i + 1;
            rel / (position as f64 + 1.0).log2()
        })
        .sum()
}

/// NDCG@k: NDCG truncated at position k.
///
/// Only considers the top k positions in the ranking.
///
/// # Example
///
/// ```rust
/// use rankops::metrics::ndcg_at_k;
///
/// let relevance = [3.0, 1.0, 2.0, 0.0];
/// let ideal = [3.0, 2.0, 1.0, 0.0];
/// let score = ndcg_at_k(&relevance, &ideal, 3);
/// ```
pub fn ndcg_at_k(relevance: &[f64], ideal: &[f64], k: usize) -> f64 {
    let rel_k: Vec<f64> = relevance.iter().take(k).copied().collect();
    let ideal_k: Vec<f64> = ideal.iter().take(k).copied().collect();
    ndcg(&rel_k, &ideal_k)
}

/// Compute rank of a target score among all scores.
///
/// Useful for link prediction: given scores for all entities,
/// find the rank of the correct entity.
///
/// # Parameters
///
/// - `target_score`: Score of the correct answer
/// - `all_scores`: Scores of all candidates (including target)
/// - `higher_is_better`: If true, higher scores rank higher
///
/// # Returns
///
/// Rank (1-indexed). Rank 1 = best.
///
/// # Example
///
/// ```rust
/// use rankops::metrics::compute_rank;
///
/// let scores = [0.1, 0.5, 0.3, 0.8];
/// let rank = compute_rank(0.5, &scores, true);
/// // 0.8 > 0.5 > 0.3 > 0.1, so 0.5 is rank 2
/// assert_eq!(rank, 2);
/// ```
pub fn compute_rank(target_score: f64, all_scores: &[f64], higher_is_better: bool) -> usize {
    let mut rank = 1;
    for &score in all_scores {
        if higher_is_better {
            if score > target_score {
                rank += 1;
            }
        } else if score < target_score {
            rank += 1;
        }
    }
    rank
}

/// Aggregate ranking metrics.
///
/// Computes MRR, Hits@1/3/10, and Mean Rank in one pass.
#[derive(Debug, Clone, Default)]
pub struct RankingMetrics {
    /// Mean Reciprocal Rank
    pub mrr: f64,
    /// Hits@1
    pub hits_at_1: f64,
    /// Hits@3
    pub hits_at_3: f64,
    /// Hits@10
    pub hits_at_10: f64,
    /// Mean Rank
    pub mean_rank: f64,
    /// Number of queries
    pub count: usize,
}

/// Precision at k: fraction of top-k that are relevant.
///
/// P@k = |relevant ∩ top-k| / k
///
/// For rank-based input, this assumes rank <= k means relevant.
pub fn precision_at_k(ranks: &[usize], k: usize) -> f64 {
    if k == 0 {
        return 0.0;
    }
    let hits = ranks.iter().filter(|&&r| r > 0 && r <= k).count();
    hits as f64 / k as f64
}

/// Recall at k: fraction of relevant docs in top-k.
///
/// R@k = |relevant ∩ top-k| / |relevant|
pub fn recall_at_k(ranks: &[usize], n_relevant: usize, k: usize) -> f64 {
    if n_relevant == 0 {
        return 0.0;
    }
    let hits = ranks.iter().filter(|&&r| r > 0 && r <= k).count();
    hits as f64 / n_relevant as f64
}

/// Average Precision: average of precision at each relevant doc.
///
/// AP = (1/|R|) × Σᵢ (P@rankᵢ × 𝟙[rankᵢ exists])
pub fn average_precision(ranks: &[usize], n_relevant: usize) -> f64 {
    if n_relevant == 0 || ranks.is_empty() {
        return 0.0;
    }

    let mut sorted_ranks = ranks.to_vec();
    sorted_ranks.sort_unstable();

    let mut sum = 0.0;
    let mut hits = 0;

    for &r in &sorted_ranks {
        if r > 0 {
            hits += 1;
            sum += hits as f64 / r as f64;
        }
    }

    sum / n_relevant as f64
}

/// F-measure at k: harmonic mean of precision and recall.
pub fn f_measure_at_k(ranks: &[usize], n_relevant: usize, k: usize, beta: f64) -> f64 {
    let p = precision_at_k(ranks, k);
    let r = recall_at_k(ranks, n_relevant, k);

    if p == 0.0 && r == 0.0 {
        return 0.0;
    }

    let beta_sq = beta * beta;
    (1.0 + beta_sq) * (p * r) / (beta_sq * p + r)
}

/// R-Precision: Precision at R, where R is the number of relevant documents.
pub fn r_precision(ranks: &[usize], n_relevant: usize) -> f64 {
    precision_at_k(ranks, n_relevant)
}

/// Expected Reciprocal Rank (ERR).
///
/// ERR models user behavior using a cascade model. For binary relevance,
/// ERR reduces to Reciprocal Rank of the first relevant document.
pub fn err_at_k(ranks: &[usize], k: usize) -> f64 {
    if ranks.is_empty() {
        return 0.0;
    }

    let mut p_stop = 1.0;
    let mut err = 0.0;

    let mut sorted_ranks = ranks.to_vec();
    sorted_ranks.sort_unstable();

    for &r in &sorted_ranks {
        if r > 0 && r <= k {
            let r_val = 1.0; // Binary relevance
            err += p_stop * r_val / r as f64;
            p_stop *= 1.0 - r_val;
            if p_stop <= 0.0 {
                break;
            }
        }
    }

    err
}

/// Rank-Biased Precision (RBP).
///
/// persistence: probability of user continuing to next rank (p).
pub fn rbp_at_k(ranks: &[usize], k: usize, persistence: f64) -> f64 {
    if persistence <= 0.0 || persistence >= 1.0 {
        return 0.0;
    }

    let mut rbp = 0.0;
    for &r in ranks {
        if r > 0 && r <= k {
            rbp += persistence.powi(r as i32 - 1);
        }
    }

    (1.0 - persistence) * rbp
}

impl RankingMetrics {
    /// Compute all metrics from ranks.
    pub fn from_ranks(ranks: &[usize]) -> Self {
        Self {
            mrr: mrr(ranks),
            hits_at_1: hits_at_k(ranks, 1),
            hits_at_3: hits_at_k(ranks, 3),
            hits_at_10: hits_at_k(ranks, 10),
            mean_rank: mean_rank(ranks),
            count: ranks.len(),
        }
    }

    /// Pretty print metrics.
    pub fn summary(&self) -> String {
        format!(
            "MRR: {:.4}, Hits@1: {:.4}, Hits@3: {:.4}, Hits@10: {:.4}, MR: {:.2}",
            self.mrr, self.hits_at_1, self.hits_at_3, self.hits_at_10, self.mean_rank
        )
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_mrr() {
        let ranks = [1, 3, 2, 5];
        let expected = (1.0 + 1.0 / 3.0 + 0.5 + 0.2) / 4.0;
        assert!((mrr(&ranks) - expected).abs() < 1e-6);
    }

    #[test]
    fn test_mrr_empty() {
        assert_eq!(mrr(&[]), 0.0);
    }

    #[test]
    fn test_mrr_all_rank_one() {
        let ranks = [1, 1, 1, 1];
        assert!((mrr(&ranks) - 1.0).abs() < 1e-6);
    }

    #[test]
    fn test_hits_at_k() {
        let ranks = [1, 3, 2, 5, 10, 15];

        assert!((hits_at_k(&ranks, 1) - 1.0 / 6.0).abs() < 1e-6);
        assert!((hits_at_k(&ranks, 3) - 3.0 / 6.0).abs() < 1e-6);
        assert!((hits_at_k(&ranks, 10) - 5.0 / 6.0).abs() < 1e-6);
    }

    #[test]
    fn test_hits_at_k_empty() {
        assert_eq!(hits_at_k(&[], 10), 0.0);
    }

    #[test]
    fn test_mean_rank() {
        let ranks = [1, 3, 2, 5];
        let expected = (1.0 + 3.0 + 2.0 + 5.0) / 4.0;
        assert!((mean_rank(&ranks) - expected).abs() < 1e-6);
    }

    #[test]
    fn test_ndcg_perfect() {
        let relevance = [3.0, 2.0, 1.0, 0.0];
        let ideal = [3.0, 2.0, 1.0, 0.0];
        assert!((ndcg(&relevance, &ideal) - 1.0).abs() < 1e-6);
    }

    #[test]
    fn test_ndcg_suboptimal() {
        let relevance = [0.0, 3.0, 2.0, 1.0]; // worst item first
        let ideal = [3.0, 2.0, 1.0, 0.0];
        let score = ndcg(&relevance, &ideal);
        assert!(score < 1.0);
        assert!(score > 0.0);
    }

    #[test]
    fn test_compute_rank() {
        let scores = [0.1, 0.5, 0.3, 0.8];

        // Higher is better: 0.8 > 0.5 > 0.3 > 0.1
        assert_eq!(compute_rank(0.8, &scores, true), 1);
        assert_eq!(compute_rank(0.5, &scores, true), 2);
        assert_eq!(compute_rank(0.3, &scores, true), 3);
        assert_eq!(compute_rank(0.1, &scores, true), 4);

        // Lower is better: 0.1 < 0.3 < 0.5 < 0.8
        assert_eq!(compute_rank(0.1, &scores, false), 1);
        assert_eq!(compute_rank(0.8, &scores, false), 4);
    }

    #[test]
    fn test_ranking_metrics_struct() {
        let ranks = [1, 2, 3, 10];
        let metrics = RankingMetrics::from_ranks(&ranks);

        assert!((metrics.mrr - mrr(&ranks)).abs() < 1e-6);
        assert!((metrics.hits_at_1 - hits_at_k(&ranks, 1)).abs() < 1e-6);
        assert!((metrics.hits_at_10 - hits_at_k(&ranks, 10)).abs() < 1e-6);
        assert!((metrics.mean_rank - mean_rank(&ranks)).abs() < 1e-6);
        assert_eq!(metrics.count, 4);
    }
}