nypc_perf/lib.rs
1//! A library for calculating player performance ratings based on battle results.
2//!
3//! This library implements a Bradley-Terry model based performance system that estimates player performance
4//! levels from head-to-head battle outcomes. The algorithm uses Newton-Raphson iteration to find
5//! maximum likelihood estimates of player ratings that best explain the observed win/loss patterns.
6//!
7//! # Usage
8//!
9//! ```rust
10//! use nypc_perf::{BattleResult, PerfCalc, Rating};
11//!
12//! // Create battle results between players
13//! let battles = vec![
14//! BattleResult {
15//! i: 0, // Player 0
16//! j: 1, // Player 1
17//! wij: 2.0, // Player 0 won twice against Player 1
18//! wji: 1.0, // Player 1 won once against Player 0
19//! },
20//! BattleResult {
21//! i: 0,
22//! j: 2,
23//! wij: 1.0,
24//! wji: 0.0,
25//! }
26//! ];
27//!
28//! // Initialize performance ratings to 0
29//! let mut perf = vec![Rating::new(0.0), Rating::new(0.0), Rating::new_fixed(0.0)];
30//!
31//! // Run the rating calculation
32//! let result = PerfCalc::new()
33//! .max_iters(100) // Maximum iterations (default: 100)
34//! .epsilon(1e-6) // Convergence threshold (default: 1e-6)
35//! .run(&mut perf, &battles);
36//!
37//! // Check if calculation converged
38//! match result {
39//! Ok(iters) => println!("Converged after {} iterations", iters),
40//! Err(err) => println!("Did not converge, final error: {}", err)
41//! }
42//!
43//! // perf now contains the estimated log-performance ratings
44//! // Higher values indicate better performance
45//! ```
46//!
47//! # Mathematical Model
48//!
49//! The library uses the Bradley-Terry model. Under this model, the probability of
50//! player i winning against player j is:
51//!
52//! P(i beats j) = 1 / (1 + exp(βⱼ - βᵢ))
53//!
54//! where βᵢ is the log-performance rating of player i.
55//!
56//! The algorithm finds values of βᵢ that maximize the likelihood of the observed
57//! battle outcomes, with normal prior. For more details on the mathematical foundations
58//! and implementation, see `docs/rating.pdf`.
59//!
60//! # Performance
61//!
62//! The algorithm typically converges within 10-20 iterations for moderately sized problems
63//! (100s of players, 10000s of battles).
64
65/// Represents the outcome of battles between two players.
66/// wij is the number of wins of i over j, wji is the number of wins of j over i.
67/// The scale matter, since we have the normal prior. Higher number means higher observance.
68#[derive(Debug, Clone, Copy)]
69pub struct BattleResult {
70 pub i: usize,
71 pub j: usize,
72 /// Number of wins of i over j
73 pub wij: f64,
74 /// Number of wins of j over i
75 pub wji: f64,
76}
77
78/// Represents a player's rating.
79/// If fixed, the value is fixed and cannot be changed.
80/// If not fixed, the value is a variable and can be changed.
81/// The value is the log-performance rating of the player.
82#[derive(Debug, Clone, Copy)]
83pub struct Rating {
84 pub fixed: bool,
85 pub value: f64,
86}
87
88impl Rating {
89 pub fn new(value: f64) -> Self {
90 Self {
91 fixed: false,
92 value,
93 }
94 }
95 pub fn new_fixed(value: f64) -> Self {
96 Self { fixed: true, value }
97 }
98}
99
100/// Calculates new beta values based on current beta values and battle results.
101///
102/// # Arguments
103/// * `beta` - Current log(performance π_i) values
104/// * `battles` - Iterator of battle results
105///
106/// # Returns
107/// Vector of new beta values after one Newton-Raphson iteration
108fn iterate(beta: &[Rating], battles: impl IntoIterator<Item = BattleResult>) -> Vec<Rating> {
109 let n = beta.len();
110 let mut f = vec![0.0; n];
111 let mut df = vec![0.0; n];
112
113 // Pre-calculate sums for each player
114 for BattleResult { i, j, wij, wji } in battles {
115 let eji = (beta[j].value - beta[i].value).exp();
116 let eij = (beta[i].value - beta[j].value).exp();
117 f[i] += (eji * wij - wji) / (1.0 + eji);
118 df[i] += (eji * (wij + wji)) / ((1.0 + eji) * (1.0 + eji));
119 f[j] += (eij * wji - wij) / (1.0 + eij);
120 df[j] += (eij * (wji + wij)) / ((1.0 + eij) * (1.0 + eij));
121 }
122
123 // Apply Newton-Raphson iteration for each player
124 beta.iter()
125 .zip(f.iter().zip(&df))
126 .map(|(b, (f, df))| Rating {
127 fixed: b.fixed,
128 value: if b.fixed {
129 b.value
130 } else {
131 b.value - (b.value - f) / (1.0 + df)
132 },
133 })
134 .collect()
135}
136
137/// Builder for configuring and running the performance calculation algorithm
138/// This builder is used to configure the number of iterations and the convergence threshold.
139/// The default settings are:
140/// - Maximum iterations: 100
141/// - Convergence threshold: 1e-6
142///
143/// # Example
144///
145/// ```rust
146/// use nypc_perf::{BattleResult, PerfCalc, Rating};
147/// let mut perf = vec![Rating::new(0.0); 2];
148/// let battles = vec![
149/// BattleResult {
150/// i: 0,
151/// j: 1,
152/// wij: 2.0,
153/// wji: 1.0,
154/// }
155/// ];
156///
157/// let calc = PerfCalc::new().max_iters(100).epsilon(1e-6);
158/// calc.run(&mut perf, &battles);
159/// ```
160#[derive(Debug, Clone, Copy)]
161pub struct PerfCalc {
162 num_iters: usize,
163 eps: f64,
164}
165
166impl Default for PerfCalc {
167 fn default() -> Self {
168 Self::new()
169 }
170}
171
172impl PerfCalc {
173 /// Creates a new `PerfCalc` with default settings.
174 ///
175 /// The default settings are:
176 /// - Maximum iterations: 100
177 /// - Convergence threshold: 1e-6
178 pub fn new() -> Self {
179 Self {
180 num_iters: 100,
181 eps: 1e-6,
182 }
183 }
184
185 /// Sets the maximum number of iterations.
186 pub fn max_iters(mut self, max_iterations: usize) -> Self {
187 self.num_iters = max_iterations;
188 self
189 }
190
191 /// Sets the convergence threshold.
192 ///
193 /// ```rust
194 /// use nypc_perf::PerfCalc;
195 ///
196 /// let calc = PerfCalc::new().epsilon(1e-6);
197 /// ```
198 pub fn epsilon(mut self, epsilon: f64) -> Self {
199 self.eps = epsilon;
200 self
201 }
202
203 /// Runs the iterative algorithm to calculate performance ratings.
204 ///
205 /// # Arguments
206 /// * `perf` - Current performance rating values
207 /// * `battles` - Iterator of battle results
208 ///
209 /// # Returns
210 /// Ok(iterations) if convergence is reached within max_iterations,
211 /// or Err(final_error) if not converged.
212 pub fn run(self, perf: &mut [Rating], battles: &[BattleResult]) -> Result<usize, f64> {
213 let mut err = f64::NAN;
214 for i in 0..self.num_iters {
215 let new_perf = iterate(perf, battles.iter().copied());
216
217 err = new_perf
218 .iter()
219 .zip(perf.iter())
220 .map(|(a, b)| (a.value - b.value).abs())
221 .max_by(|a, b| a.partial_cmp(b).unwrap())
222 .unwrap();
223 perf.copy_from_slice(&new_perf);
224
225 if err < self.eps {
226 return Ok(i + 1);
227 }
228 }
229 Err(err)
230 }
231}
232
233#[cfg(test)]
234mod tests {
235 use rand::{Rng, SeedableRng};
236
237 use super::*;
238
239 #[test]
240 fn test_simple() {
241 let battles = vec![
242 BattleResult {
243 i: 0,
244 j: 1,
245 wij: 2.0,
246 wji: 1.0,
247 },
248 BattleResult {
249 i: 0,
250 j: 2,
251 wij: 1.0,
252 wji: 0.0,
253 },
254 ];
255
256 let mut perf = vec![Rating::new(0.0), Rating::new(0.0), Rating::new_fixed(0.0)];
257 let res = PerfCalc::new().epsilon(1e-6).run(&mut perf, &battles);
258 assert!(res.is_ok());
259 assert!((perf[0].value - 0.473034477).abs() < 1e-6);
260 assert!((perf[1].value - (-0.0891364)).abs() < 1e-6);
261 assert!((perf[2].value - 0.0).abs() < 1e-6);
262 }
263
264 #[test]
265 fn test_coalesece() {
266 let mut rng = rand::rngs::StdRng::seed_from_u64(2025);
267 const N: usize = 10;
268 let mut battles = vec![];
269 let mut battles_merge = vec![];
270 for i in 0..N {
271 for j in 0..i {
272 let mut wij = 0.0;
273 let mut wji = 0.0;
274 for _ in 0..5 {
275 let s = rng.random_bool(0.5);
276 let p = rng.random_range(0..=2) as f64 / 2.0;
277 if s {
278 wij += p;
279 wji += 1.0 - p;
280 battles.push(BattleResult {
281 i,
282 j,
283 wij: p,
284 wji: 1.0 - p,
285 });
286 } else {
287 wij += 1.0 - p;
288 wji += p;
289 battles.push(BattleResult {
290 j,
291 i,
292 wij: 1.0 - p,
293 wji: p,
294 });
295 }
296 }
297 battles_merge.push(BattleResult { i, j, wij, wji });
298 }
299 }
300
301 let mut perf = vec![Rating::new(0.0); N];
302 eprintln!(
303 "Convergence (Non-merged): {:?}",
304 PerfCalc::new()
305 .epsilon(1e-3)
306 .run(&mut perf, &battles)
307 .unwrap()
308 );
309
310 let mut perf_merge = vec![Rating::new(0.0); N];
311 eprintln!(
312 "Convergence (Merged): {:?}",
313 PerfCalc::new()
314 .epsilon(1e-3)
315 .run(&mut perf_merge, &battles_merge)
316 .unwrap()
317 );
318
319 for (p, q) in perf.iter().zip(perf_merge.iter()) {
320 assert!((p.value - q.value).abs() < 1e-6);
321 }
322 }
323
324 #[test]
325 fn test_full() {
326 let mut rng = rand::rngs::StdRng::seed_from_u64(2025);
327 const N: usize = 100;
328 const B: usize = 100_000;
329 const M: u64 = 10;
330 let real_perf = (0..N)
331 .map(|_| rng.sample(rand_distr::StandardNormal))
332 .collect::<Vec<f64>>();
333 let mut battles = vec![];
334 for b in 0..B {
335 let i = b % N;
336 let mut j = rng.random_range(0..N - 1);
337 if j >= i {
338 j += 1;
339 }
340 let logit = 1.0 / (1.0 + (real_perf[j] - real_perf[i]).exp());
341 let wij = rng.sample(rand_distr::Binomial::new(M, logit).unwrap()) as f64;
342 let wji = M as f64 - wij;
343 battles.push(BattleResult { i, j, wij, wji });
344 }
345
346 let mut perf = vec![Rating::new(0.0); N];
347
348 let start = std::time::Instant::now();
349 let res = PerfCalc::new()
350 .epsilon(1e-4)
351 .run(&mut perf, &battles)
352 .unwrap();
353 let duration = start.elapsed();
354 eprintln!("Convergence: {:?}", res);
355 eprintln!("Time: {:?}", duration);
356
357 // Calculate MSE between real and estimated performance
358 let sqrt_mse = (real_perf
359 .iter()
360 .zip(perf.iter())
361 .map(|(r, p)| (r - p.value).powi(2))
362 .sum::<f64>()
363 / N as f64)
364 .sqrt();
365 let mx_diff = real_perf
366 .iter()
367 .zip(perf.iter())
368 .map(|(r, p)| (r - p.value).abs())
369 .max_by(|a, b| a.total_cmp(b))
370 .unwrap();
371 eprintln!("MSE: {}", sqrt_mse);
372 eprintln!("Max diff: {}", mx_diff);
373 assert!(sqrt_mse < 0.2 && mx_diff < 0.4);
374 }
375}