ripmap 0.1.0

Ultra-fast codebase cartography for LLMs
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
//! Information retrieval evaluation metrics for ranking quality.
//!
//! ## Metrics Overview
//!
//! | Metric         | What it measures                                    | Range   |
//! |----------------|-----------------------------------------------------|---------|
//! | NDCG@k         | Quality of top-k ranking (graded relevance)         | 0.0-1.0 |
//! | Precision@k    | Fraction of top-k that are relevant (binary)        | 0.0-1.0 |
//! | Recall@k       | Fraction of relevant items in top-k                 | 0.0-1.0 |
//! | MRR            | Reciprocal rank of first relevant item              | 0.0-1.0 |
//! | MAP            | Mean average precision (all relevant items)         | 0.0-1.0 |
//!
//! ## Weighted vs Binary Relevance
//!
//! Git-derived ground truth has **graded relevance**:
//! - Files that always change together: relevance ~1.0
//! - Occasional co-changes: relevance ~0.3
//! - Never co-changed: relevance 0.0
//!
//! NDCG is the primary metric because it handles graded relevance.
//! Precision/Recall/MRR binarize at a threshold (default 0.1).
//!
//! ## Aggregation
//!
//! When evaluating over multiple cases, we report:
//! - Mean across cases (primary)
//! - Std dev (for significance testing)
//! - Median (robust to outliers)
//! - Weighted mean (by case quality)

use std::collections::HashMap;

use serde::{Deserialize, Serialize};

/// Aggregated evaluation metrics over a dataset.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct EvalMetrics {
    /// NDCG@10 (primary metric for graded relevance)
    pub ndcg_at_10: f64,
    /// NDCG@5 (stricter top ranking quality)
    pub ndcg_at_5: f64,

    /// Precision@k (binary relevance at threshold 0.1)
    pub precision_at_5: f64,
    pub precision_at_10: f64,

    /// Recall@k
    pub recall_at_5: f64,
    pub recall_at_10: f64,

    /// Mean Reciprocal Rank
    pub mrr: f64,

    /// Mean Average Precision
    pub map: f64,

    /// Number of cases evaluated
    pub n_cases: usize,

    /// Standard deviations (for significance testing)
    pub ndcg_at_10_std: f64,
    pub mrr_std: f64,
}

impl EvalMetrics {
    /// Aggregate metrics from per-case results.
    pub fn aggregate(per_case: &[CaseMetrics]) -> Self {
        if per_case.is_empty() {
            return Self::default();
        }

        let n = per_case.len() as f64;

        let ndcg_10: Vec<_> = per_case.iter().map(|c| c.ndcg_at_10).collect();
        let ndcg_5: Vec<_> = per_case.iter().map(|c| c.ndcg_at_5).collect();
        let p_5: Vec<_> = per_case.iter().map(|c| c.precision_at_5).collect();
        let p_10: Vec<_> = per_case.iter().map(|c| c.precision_at_10).collect();
        let r_5: Vec<_> = per_case.iter().map(|c| c.recall_at_5).collect();
        let r_10: Vec<_> = per_case.iter().map(|c| c.recall_at_10).collect();
        let mrr: Vec<_> = per_case.iter().map(|c| c.mrr).collect();
        let map: Vec<_> = per_case.iter().map(|c| c.map).collect();

        Self {
            ndcg_at_10: mean(&ndcg_10),
            ndcg_at_5: mean(&ndcg_5),
            precision_at_5: mean(&p_5),
            precision_at_10: mean(&p_10),
            recall_at_5: mean(&r_5),
            recall_at_10: mean(&r_10),
            mrr: mean(&mrr),
            map: mean(&map),
            n_cases: per_case.len(),
            ndcg_at_10_std: std_dev(&ndcg_10),
            mrr_std: std_dev(&mrr),
        }
    }

    /// Aggregate with case weighting (weight by case quality).
    pub fn aggregate_weighted(per_case: &[(CaseMetrics, f64)]) -> Self {
        if per_case.is_empty() {
            return Self::default();
        }

        let total_weight: f64 = per_case.iter().map(|(_, w)| w).sum();
        if total_weight == 0.0 {
            return Self::default();
        }

        let weighted_mean = |f: fn(&CaseMetrics) -> f64| -> f64 {
            per_case.iter().map(|(c, w)| f(c) * w).sum::<f64>() / total_weight
        };

        Self {
            ndcg_at_10: weighted_mean(|c| c.ndcg_at_10),
            ndcg_at_5: weighted_mean(|c| c.ndcg_at_5),
            precision_at_5: weighted_mean(|c| c.precision_at_5),
            precision_at_10: weighted_mean(|c| c.precision_at_10),
            recall_at_5: weighted_mean(|c| c.recall_at_5),
            recall_at_10: weighted_mean(|c| c.recall_at_10),
            mrr: weighted_mean(|c| c.mrr),
            map: weighted_mean(|c| c.map),
            n_cases: per_case.len(),
            ndcg_at_10_std: 0.0, // TODO: weighted std dev
            mrr_std: 0.0,
        }
    }
}

/// Metrics for a single evaluation case.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct CaseMetrics {
    pub ndcg_at_5: f64,
    pub ndcg_at_10: f64,
    pub precision_at_5: f64,
    pub precision_at_10: f64,
    pub recall_at_5: f64,
    pub recall_at_10: f64,
    pub mrr: f64,
    pub map: f64,
}

impl CaseMetrics {
    /// Compute all metrics for a single case.
    ///
    /// # Arguments
    ///
    /// * `ranking` - Our output ranking (file paths in order, best first)
    /// * `ground_truth` - Expected relevant files with weights: (file, relevance)
    /// * `relevance_threshold` - Minimum weight to count as "relevant" for binary metrics
    pub fn compute(
        ranking: &[String],
        ground_truth: &[(String, f64)],
        relevance_threshold: f64,
    ) -> Self {
        Self {
            ndcg_at_5: weighted_ndcg(ranking, ground_truth, 5),
            ndcg_at_10: weighted_ndcg(ranking, ground_truth, 10),
            precision_at_5: precision_at_k(ranking, ground_truth, 5, relevance_threshold),
            precision_at_10: precision_at_k(ranking, ground_truth, 10, relevance_threshold),
            recall_at_5: recall_at_k(ranking, ground_truth, 5, relevance_threshold),
            recall_at_10: recall_at_k(ranking, ground_truth, 10, relevance_threshold),
            mrr: mean_reciprocal_rank(ranking, ground_truth, relevance_threshold),
            map: mean_average_precision(ranking, ground_truth, relevance_threshold),
        }
    }
}

/// Normalized Discounted Cumulative Gain at position k.
///
/// NDCG handles **graded relevance**: files with higher coupling weight
/// contribute more to the score, and rank position matters (early = better).
///
/// ```text
/// DCG@k = Σᵢ (rel[i] / log₂(i + 2))  for i in 0..k
/// NDCG@k = DCG@k / IDCG@k
/// ```
///
/// Where IDCG is the ideal DCG (perfect ranking by relevance).
///
/// # Arguments
///
/// * `ranking` - Our output (file paths in ranked order)
/// * `ground_truth` - (file, relevance_weight) pairs from git oracle
/// * `k` - Cutoff position
///
/// # Returns
///
/// NDCG score in range [0.0, 1.0]. Higher is better.
pub fn weighted_ndcg(ranking: &[String], ground_truth: &[(String, f64)], k: usize) -> f64 {
    if ground_truth.is_empty() {
        return 0.0;
    }

    let truth_map: HashMap<_, _> = ground_truth.iter().cloned().collect();

    // DCG: sum of relevance / logâ‚‚(rank + 2)
    let dcg: f64 = ranking
        .iter()
        .take(k)
        .enumerate()
        .map(|(rank, file)| {
            let relevance = truth_map.get(file).copied().unwrap_or(0.0);
            relevance / (rank as f64 + 2.0).log2()
        })
        .sum();

    // Ideal DCG: sort ground truth by relevance descending
    let mut ideal_weights: Vec<_> = ground_truth.iter().map(|(_, w)| *w).collect();
    ideal_weights.sort_by(|a, b| b.partial_cmp(a).unwrap_or(std::cmp::Ordering::Equal));

    let idcg: f64 = ideal_weights
        .iter()
        .take(k)
        .enumerate()
        .map(|(rank, &rel)| rel / (rank as f64 + 2.0).log2())
        .sum();

    if idcg == 0.0 { 0.0 } else { dcg / idcg }
}

/// Precision at position k.
///
/// Fraction of top-k results that are relevant (binary relevance).
/// Files with weight >= threshold count as relevant.
///
/// ```text
/// P@k = |relevant ∩ top-k| / k
/// ```
pub fn precision_at_k(
    ranking: &[String],
    ground_truth: &[(String, f64)],
    k: usize,
    threshold: f64,
) -> f64 {
    let relevant: std::collections::HashSet<_> = ground_truth
        .iter()
        .filter(|(_, w)| *w >= threshold)
        .map(|(f, _)| f.as_str())
        .collect();

    if relevant.is_empty() || k == 0 {
        return 0.0;
    }

    let hits = ranking
        .iter()
        .take(k)
        .filter(|f| relevant.contains(f.as_str()))
        .count();

    hits as f64 / k.min(ranking.len()) as f64
}

/// Recall at position k.
///
/// Fraction of relevant items that appear in top-k.
///
/// ```text
/// R@k = |relevant ∩ top-k| / |relevant|
/// ```
pub fn recall_at_k(
    ranking: &[String],
    ground_truth: &[(String, f64)],
    k: usize,
    threshold: f64,
) -> f64 {
    let relevant: std::collections::HashSet<_> = ground_truth
        .iter()
        .filter(|(_, w)| *w >= threshold)
        .map(|(f, _)| f.as_str())
        .collect();

    if relevant.is_empty() {
        return 0.0;
    }

    let top_k: std::collections::HashSet<_> = ranking.iter().take(k).map(|f| f.as_str()).collect();

    let hits = relevant.intersection(&top_k).count();

    hits as f64 / relevant.len() as f64
}

/// Mean Reciprocal Rank.
///
/// Reciprocal of the rank of the first relevant item.
/// Measures how quickly we surface ANY relevant result.
///
/// ```text
/// MRR = 1 / rank_of_first_relevant
/// ```
///
/// If no relevant item in ranking, returns 0.
pub fn mean_reciprocal_rank(
    ranking: &[String],
    ground_truth: &[(String, f64)],
    threshold: f64,
) -> f64 {
    let relevant: std::collections::HashSet<_> = ground_truth
        .iter()
        .filter(|(_, w)| *w >= threshold)
        .map(|(f, _)| f.as_str())
        .collect();

    for (rank, file) in ranking.iter().enumerate() {
        if relevant.contains(file.as_str()) {
            return 1.0 / (rank as f64 + 1.0);
        }
    }

    0.0
}

/// Mean Average Precision.
///
/// Average of precision values at each relevant item's rank.
/// Measures overall quality of ranking for all relevant items.
///
/// ```text
/// AP = (1/|relevant|) × Σᵢ P@i × rel(i)
/// ```
///
/// Where the sum is over all positions and rel(i) = 1 if item i is relevant.
pub fn mean_average_precision(
    ranking: &[String],
    ground_truth: &[(String, f64)],
    threshold: f64,
) -> f64 {
    let relevant: std::collections::HashSet<_> = ground_truth
        .iter()
        .filter(|(_, w)| *w >= threshold)
        .map(|(f, _)| f.as_str())
        .collect();

    if relevant.is_empty() {
        return 0.0;
    }

    let mut relevant_seen = 0;
    let mut precision_sum = 0.0;

    for (rank, file) in ranking.iter().enumerate() {
        if relevant.contains(file.as_str()) {
            relevant_seen += 1;
            // Precision at this rank
            let precision = relevant_seen as f64 / (rank as f64 + 1.0);
            precision_sum += precision;
        }
    }

    precision_sum / relevant.len() as f64
}

// === Utility functions ===

fn mean(values: &[f64]) -> f64 {
    if values.is_empty() {
        return 0.0;
    }
    values.iter().sum::<f64>() / values.len() as f64
}

fn std_dev(values: &[f64]) -> f64 {
    if values.len() < 2 {
        return 0.0;
    }
    let m = mean(values);
    let variance = values.iter().map(|x| (x - m).powi(2)).sum::<f64>() / (values.len() - 1) as f64;
    variance.sqrt()
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_ndcg_perfect_ranking() {
        // Perfect ranking: items in order of relevance
        let ranking = vec!["a".to_string(), "b".to_string(), "c".to_string()];
        let truth = vec![
            ("a".to_string(), 1.0),
            ("b".to_string(), 0.5),
            ("c".to_string(), 0.25),
        ];

        let ndcg = weighted_ndcg(&ranking, &truth, 3);
        assert!(
            (ndcg - 1.0).abs() < 1e-6,
            "Perfect ranking should have NDCG=1.0, got {}",
            ndcg
        );
    }

    #[test]
    fn test_ndcg_reversed_ranking() {
        // Worst ranking: items in reverse order
        let ranking = vec!["c".to_string(), "b".to_string(), "a".to_string()];
        let truth = vec![
            ("a".to_string(), 1.0),
            ("b".to_string(), 0.5),
            ("c".to_string(), 0.25),
        ];

        let ndcg = weighted_ndcg(&ranking, &truth, 3);
        assert!(
            ndcg < 1.0,
            "Reversed ranking should have NDCG < 1.0, got {}",
            ndcg
        );
        assert!(ndcg > 0.0, "Reversed ranking should have NDCG > 0.0");
    }

    #[test]
    fn test_ndcg_partial_match() {
        // Only some items in ranking
        let ranking = vec!["a".to_string(), "x".to_string(), "y".to_string()];
        let truth = vec![("a".to_string(), 1.0), ("b".to_string(), 0.5)];

        let ndcg = weighted_ndcg(&ranking, &truth, 3);
        assert!(ndcg > 0.0, "Should get credit for 'a'");
        assert!(ndcg < 1.0, "Missing 'b' should hurt score");
    }

    #[test]
    fn test_precision_at_k() {
        let ranking = vec![
            "a".to_string(),
            "b".to_string(),
            "x".to_string(),
            "c".to_string(),
            "y".to_string(),
        ];
        let truth = vec![
            ("a".to_string(), 1.0),
            ("b".to_string(), 0.8),
            ("c".to_string(), 0.6),
        ];

        // P@2 = 2/2 (both a and b are relevant)
        let p2 = precision_at_k(&ranking, &truth, 2, 0.5);
        assert!((p2 - 1.0).abs() < 1e-6, "P@2 should be 1.0, got {}", p2);

        // P@3 = 2/3 (a, b relevant, x not)
        let p3 = precision_at_k(&ranking, &truth, 3, 0.5);
        assert!(
            (p3 - 2.0 / 3.0).abs() < 1e-6,
            "P@3 should be 0.667, got {}",
            p3
        );

        // P@5 = 3/5 (a, b, c relevant)
        let p5 = precision_at_k(&ranking, &truth, 5, 0.5);
        assert!((p5 - 0.6).abs() < 1e-6, "P@5 should be 0.6, got {}", p5);
    }

    #[test]
    fn test_recall_at_k() {
        let ranking = vec!["a".to_string(), "x".to_string(), "b".to_string()];
        let truth = vec![
            ("a".to_string(), 1.0),
            ("b".to_string(), 0.8),
            ("c".to_string(), 0.6), // not in ranking
        ];

        // R@1 = 1/3 (only a in top-1)
        let r1 = recall_at_k(&ranking, &truth, 1, 0.5);
        assert!(
            (r1 - 1.0 / 3.0).abs() < 1e-6,
            "R@1 should be 0.333, got {}",
            r1
        );

        // R@3 = 2/3 (a, b in top-3, c missing)
        let r3 = recall_at_k(&ranking, &truth, 3, 0.5);
        assert!(
            (r3 - 2.0 / 3.0).abs() < 1e-6,
            "R@3 should be 0.667, got {}",
            r3
        );
    }

    #[test]
    fn test_mrr() {
        // Relevant item at rank 3 (0-indexed: 2)
        let ranking = vec!["x".to_string(), "y".to_string(), "a".to_string()];
        let truth = vec![("a".to_string(), 1.0)];

        let mrr = mean_reciprocal_rank(&ranking, &truth, 0.5);
        assert!(
            (mrr - 1.0 / 3.0).abs() < 1e-6,
            "MRR should be 0.333, got {}",
            mrr
        );
    }

    #[test]
    fn test_mrr_first_position() {
        let ranking = vec!["a".to_string(), "x".to_string()];
        let truth = vec![("a".to_string(), 1.0)];

        let mrr = mean_reciprocal_rank(&ranking, &truth, 0.5);
        assert!(
            (mrr - 1.0).abs() < 1e-6,
            "MRR should be 1.0 when first is relevant"
        );
    }

    #[test]
    fn test_map() {
        // a at rank 1, b at rank 3
        let ranking = vec![
            "a".to_string(),
            "x".to_string(),
            "b".to_string(),
            "y".to_string(),
        ];
        let truth = vec![("a".to_string(), 1.0), ("b".to_string(), 0.8)];

        // At rank 1: P=1/1=1.0, At rank 3: P=2/3
        // MAP = (1.0 + 0.667) / 2 = 0.833
        let map = mean_average_precision(&ranking, &truth, 0.5);
        let expected = (1.0 + 2.0 / 3.0) / 2.0;
        assert!(
            (map - expected).abs() < 1e-6,
            "MAP should be {}, got {}",
            expected,
            map
        );
    }

    #[test]
    fn test_case_metrics() {
        let ranking = vec![
            "a".to_string(),
            "b".to_string(),
            "x".to_string(),
            "c".to_string(),
        ];
        let truth = vec![
            ("a".to_string(), 1.0),
            ("b".to_string(), 0.8),
            ("c".to_string(), 0.6),
        ];

        let metrics = CaseMetrics::compute(&ranking, &truth, 0.5);

        assert!(metrics.ndcg_at_5 > 0.9, "NDCG@5 should be high");
        assert!(metrics.precision_at_5 > 0.5, "P@5 should be decent");
        assert!(metrics.mrr > 0.9, "MRR should be ~1.0");
    }

    #[test]
    fn test_aggregate() {
        let cases = vec![
            CaseMetrics {
                ndcg_at_10: 0.8,
                ndcg_at_5: 0.9,
                precision_at_5: 0.6,
                precision_at_10: 0.5,
                recall_at_5: 0.4,
                recall_at_10: 0.6,
                mrr: 1.0,
                map: 0.7,
            },
            CaseMetrics {
                ndcg_at_10: 0.6,
                ndcg_at_5: 0.7,
                precision_at_5: 0.4,
                precision_at_10: 0.3,
                recall_at_5: 0.2,
                recall_at_10: 0.4,
                mrr: 0.5,
                map: 0.5,
            },
        ];

        let agg = EvalMetrics::aggregate(&cases);

        assert!((agg.ndcg_at_10 - 0.7).abs() < 1e-6);
        assert!((agg.mrr - 0.75).abs() < 1e-6);
        assert_eq!(agg.n_cases, 2);
    }

    #[test]
    fn test_std_dev() {
        // 0, 10 -> mean 5, std dev ~7.07
        let values = vec![0.0, 10.0];
        let sd = std_dev(&values);
        assert!(
            (sd - 7.071).abs() < 0.01,
            "Std dev should be ~7.07, got {}",
            sd
        );
    }
}