mod erf;
mod factor_graph;
mod gaussian;
mod math;
mod matrix;
mod schedule;
mod weights;
use std::cell::RefCell;
use std::rc::Rc;
use factor_graph::Variable;
use matrix::Matrix;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use crate::trueskill::erf::{cdf, inverse_cdf};
use crate::trueskill::math::{new_rating, new_uncertainty, v_draw, v_non_draw, w_draw, w_non_draw};
use crate::trueskill::schedule::run_schedule;
use crate::trueskill::weights::get_weights;
use crate::{weng_lin::WengLinRating, Outcomes};
use crate::{
MultiTeamOutcome, MultiTeamRatingSystem, Rating, RatingPeriodSystem, RatingSystem,
TeamRatingSystem,
};
pub use crate::trueskill::weights::WeightError;
const MIN_DELTA: f64 = 0.0001;
#[derive(Copy, Clone, Debug, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct TrueSkillRating {
pub rating: f64,
pub uncertainty: f64,
}
impl TrueSkillRating {
#[must_use]
pub fn new() -> Self {
Self {
rating: 25.0,
uncertainty: 25.0 / 3.0,
}
}
}
impl Default for TrueSkillRating {
fn default() -> Self {
Self::new()
}
}
impl Rating for TrueSkillRating {
fn rating(&self) -> f64 {
self.rating
}
fn uncertainty(&self) -> Option<f64> {
Some(self.uncertainty)
}
fn new(rating: Option<f64>, uncertainty: Option<f64>) -> Self {
Self {
rating: rating.unwrap_or(25.0),
uncertainty: uncertainty.unwrap_or(25.0 / 3.0),
}
}
}
impl From<(f64, f64)> for TrueSkillRating {
fn from((r, u): (f64, f64)) -> Self {
Self {
rating: r,
uncertainty: u,
}
}
}
impl From<WengLinRating> for TrueSkillRating {
fn from(w: WengLinRating) -> Self {
Self {
rating: w.rating,
uncertainty: w.uncertainty,
}
}
}
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct TrueSkillConfig {
pub draw_probability: f64,
pub beta: f64,
pub dynamics_factor: f64,
}
impl TrueSkillConfig {
#[must_use]
pub fn new() -> Self {
Self {
draw_probability: 0.1,
beta: (25.0 / 3.0) * 0.5,
dynamics_factor: 25.0 / 300.0,
}
}
}
impl Default for TrueSkillConfig {
fn default() -> Self {
Self::new()
}
}
pub struct TrueSkill {
config: TrueSkillConfig,
}
impl RatingSystem for TrueSkill {
type RATING = TrueSkillRating;
type CONFIG = TrueSkillConfig;
fn new(config: Self::CONFIG) -> Self {
Self { config }
}
fn rate(
&self,
player_one: &TrueSkillRating,
player_two: &TrueSkillRating,
outcome: &Outcomes,
) -> (TrueSkillRating, TrueSkillRating) {
trueskill(player_one, player_two, outcome, &self.config)
}
fn expected_score(
&self,
player_one: &TrueSkillRating,
player_two: &TrueSkillRating,
) -> (f64, f64) {
expected_score(player_one, player_two, &self.config)
}
}
impl RatingPeriodSystem for TrueSkill {
type RATING = TrueSkillRating;
type CONFIG = TrueSkillConfig;
fn new(config: Self::CONFIG) -> Self {
Self { config }
}
fn rate(
&self,
player: &TrueSkillRating,
results: &[(TrueSkillRating, Outcomes)],
) -> TrueSkillRating {
trueskill_rating_period(player, results, &self.config)
}
fn expected_score(&self, player: &Self::RATING, opponents: &[Self::RATING]) -> Vec<f64> {
expected_score_rating_period(player, opponents, &self.config)
}
}
impl TeamRatingSystem for TrueSkill {
type RATING = TrueSkillRating;
type CONFIG = TrueSkillConfig;
fn new(config: Self::CONFIG) -> Self {
Self { config }
}
fn rate(
&self,
team_one: &[TrueSkillRating],
team_two: &[TrueSkillRating],
outcome: &Outcomes,
) -> (Vec<TrueSkillRating>, Vec<TrueSkillRating>) {
trueskill_two_teams(team_one, team_two, outcome, &self.config)
}
fn expected_score(&self, team_one: &[Self::RATING], team_two: &[Self::RATING]) -> (f64, f64) {
expected_score_two_teams(team_one, team_two, &self.config)
}
}
impl MultiTeamRatingSystem for TrueSkill {
type RATING = TrueSkillRating;
type CONFIG = TrueSkillConfig;
fn new(config: Self::CONFIG) -> Self {
Self { config }
}
fn rate(
&self,
teams_and_ranks: &[(&[Self::RATING], MultiTeamOutcome)],
) -> Vec<Vec<TrueSkillRating>> {
trueskill_multi_team(teams_and_ranks, &self.config, None)
.unwrap_or_else(|_| teams_and_ranks.iter().map(|(t, _)| t.to_vec()).collect())
}
fn expected_score(&self, teams: &[&[Self::RATING]]) -> Vec<f64> {
expected_score_multi_team(teams, &self.config)
}
}
#[must_use]
pub fn trueskill(
player_one: &TrueSkillRating,
player_two: &TrueSkillRating,
outcome: &Outcomes,
config: &TrueSkillConfig,
) -> (TrueSkillRating, TrueSkillRating) {
let draw_margin = draw_margin(config.draw_probability, config.beta, 2.0);
let c = 2.0f64
.mul_add(
config.beta.powi(2),
player_one
.uncertainty
.mul_add(player_one.uncertainty, player_two.uncertainty.powi(2)),
)
.sqrt();
let rating_delta = match outcome {
Outcomes::WIN | Outcomes::DRAW => player_one.rating - player_two.rating,
Outcomes::LOSS => player_two.rating - player_one.rating,
};
let (v, w) = if outcome == &Outcomes::DRAW {
(
v_draw(rating_delta, draw_margin, c),
w_draw(rating_delta, draw_margin, c),
)
} else {
(
v_non_draw(rating_delta, draw_margin, c),
w_non_draw(rating_delta, draw_margin, c),
)
};
let (rank_multiplier1, rank_multiplier2) = match outcome {
Outcomes::WIN | Outcomes::DRAW => (1.0, -1.0),
Outcomes::LOSS => (-1.0, 1.0),
};
let new_rating1 = new_rating(
player_one.rating,
player_one.uncertainty,
v,
c,
config.dynamics_factor,
rank_multiplier1,
);
let new_rating2 = new_rating(
player_two.rating,
player_two.uncertainty,
v,
c,
config.dynamics_factor,
rank_multiplier2,
);
let new_uncertainty1 = new_uncertainty(player_one.uncertainty, c, w, config.dynamics_factor);
let new_uncertainty2 = new_uncertainty(player_two.uncertainty, c, w, config.dynamics_factor);
(
TrueSkillRating {
rating: new_rating1,
uncertainty: new_uncertainty1,
},
TrueSkillRating {
rating: new_rating2,
uncertainty: new_uncertainty2,
},
)
}
#[must_use]
pub fn trueskill_rating_period(
player: &TrueSkillRating,
results: &[(TrueSkillRating, Outcomes)],
config: &TrueSkillConfig,
) -> TrueSkillRating {
let mut player_rating = player.rating;
let mut player_uncertainty = player.uncertainty;
let draw_margin = draw_margin(config.draw_probability, config.beta, 2.0);
for (opponent, result) in results {
let c = 2.0f64
.mul_add(
config.beta.powi(2),
player_uncertainty.mul_add(player_uncertainty, opponent.uncertainty.powi(2)),
)
.sqrt();
let rating_delta = match result {
Outcomes::WIN | Outcomes::DRAW => player_rating - opponent.rating,
Outcomes::LOSS => opponent.rating - player_rating,
};
let (v, w) = if result == &Outcomes::DRAW {
(
v_draw(rating_delta, draw_margin, c),
w_draw(rating_delta, draw_margin, c),
)
} else {
(
v_non_draw(rating_delta, draw_margin, c),
w_non_draw(rating_delta, draw_margin, c),
)
};
let rank_multiplier = match result {
Outcomes::WIN | Outcomes::DRAW => 1.0,
Outcomes::LOSS => -1.0,
};
player_rating = new_rating(
player_rating,
player_uncertainty,
v,
c,
config.dynamics_factor,
rank_multiplier,
);
player_uncertainty = new_uncertainty(player_uncertainty, c, w, config.dynamics_factor);
}
TrueSkillRating {
rating: player_rating,
uncertainty: player_uncertainty,
}
}
#[must_use]
pub fn trueskill_two_teams(
team_one: &[TrueSkillRating],
team_two: &[TrueSkillRating],
outcome: &Outcomes,
config: &TrueSkillConfig,
) -> (Vec<TrueSkillRating>, Vec<TrueSkillRating>) {
if team_one.is_empty() || team_two.is_empty() {
return (team_one.to_vec(), team_two.to_vec());
}
let total_players = (team_one.len() + team_two.len()) as f64;
let draw_margin = draw_margin(config.draw_probability, config.beta, total_players);
let rating_one_sum: f64 = team_one.iter().map(|p| p.rating).sum();
let rating_two_sum: f64 = team_two.iter().map(|p| p.rating).sum();
let uncertainty_one_sum: f64 = team_one.iter().map(|p| p.uncertainty.powi(2)).sum();
let uncertainty_two_sum: f64 = team_two.iter().map(|p| p.uncertainty.powi(2)).sum();
let c = total_players
.mul_add(
config.beta.powi(2),
uncertainty_one_sum + uncertainty_two_sum,
)
.sqrt();
let rating_delta = match outcome {
Outcomes::WIN | Outcomes::DRAW => rating_one_sum - rating_two_sum,
Outcomes::LOSS => rating_two_sum - rating_one_sum,
};
let (v, w) = if outcome == &Outcomes::DRAW {
(
v_draw(rating_delta, draw_margin, c),
w_draw(rating_delta, draw_margin, c),
)
} else {
(
v_non_draw(rating_delta, draw_margin, c),
w_non_draw(rating_delta, draw_margin, c),
)
};
let (rank_multiplier1, rank_multiplier2) = match outcome {
Outcomes::WIN | Outcomes::DRAW => (1.0, -1.0),
Outcomes::LOSS => (-1.0, 1.0),
};
let mut new_team_one = Vec::new();
let mut new_team_two = Vec::new();
for player in team_one {
let new_rating = new_rating(
player.rating,
player.uncertainty,
v,
c,
config.dynamics_factor,
rank_multiplier1,
);
let new_uncertainty = new_uncertainty(player.uncertainty, c, w, config.dynamics_factor);
new_team_one.push(TrueSkillRating {
rating: new_rating,
uncertainty: new_uncertainty,
});
}
for player in team_two {
let new_rating = new_rating(
player.rating,
player.uncertainty,
v,
c,
config.dynamics_factor,
rank_multiplier2,
);
let new_uncertainty = new_uncertainty(player.uncertainty, c, w, config.dynamics_factor);
new_team_two.push(TrueSkillRating {
rating: new_rating,
uncertainty: new_uncertainty,
});
}
(new_team_one, new_team_two)
}
#[allow(clippy::too_many_lines)]
pub fn trueskill_multi_team(
teams_and_ranks: &[(&[TrueSkillRating], MultiTeamOutcome)],
config: &TrueSkillConfig,
weights: Option<&[&[f64]]>,
) -> Result<Vec<Vec<TrueSkillRating>>, WeightError> {
if teams_and_ranks.is_empty() {
return Ok(Vec::new());
}
for (team, _) in teams_and_ranks {
if team.is_empty() {
return Ok(teams_and_ranks
.iter()
.map(|(team, _)| team.to_vec())
.collect());
}
}
let weights = get_weights(
&teams_and_ranks
.iter()
.map(|(t, _)| *t)
.collect::<Vec<&[TrueSkillRating]>>(),
weights,
)?;
let mut sorted_teams_and_ranks_with_pos = Vec::new();
for (pos, ((team, outcome), weights)) in teams_and_ranks.iter().zip(&weights).enumerate() {
sorted_teams_and_ranks_with_pos.push((pos, (*team, *outcome, &weights[..])));
}
sorted_teams_and_ranks_with_pos.sort_by_key(|v| v.1 .1);
let teams_and_ranks: Vec<(&[TrueSkillRating], MultiTeamOutcome)> =
sorted_teams_and_ranks_with_pos
.iter()
.map(|v| (v.1 .0, v.1 .1))
.collect();
let weights: Vec<&[f64]> = sorted_teams_and_ranks_with_pos
.iter()
.map(|v| v.1 .2)
.collect();
let mut flattened_ratings = Vec::new();
for &(team, _) in &teams_and_ranks {
for &player in team {
flattened_ratings.push(player);
}
}
let mut flattened_weights = Vec::new();
for group in weights {
for &weight in group {
flattened_weights.push(weight);
}
}
let rating_vars = {
let mut v = Vec::with_capacity(flattened_ratings.len());
for _ in 0..flattened_ratings.len() {
v.push(Rc::new(RefCell::new(Variable::new())));
}
v
};
let perf_vars = {
let mut v = Vec::with_capacity(flattened_ratings.len());
for _ in 0..flattened_ratings.len() {
v.push(Rc::new(RefCell::new(Variable::new())));
}
v
};
let team_perf_vars = {
let mut v = Vec::with_capacity(teams_and_ranks.len());
for _ in 0..teams_and_ranks.len() {
v.push(Rc::new(RefCell::new(Variable::new())));
}
v
};
let team_diff_vars = {
let mut v = Vec::with_capacity(teams_and_ranks.len() - 1);
for _ in 0..(teams_and_ranks.len() - 1) {
v.push(Rc::new(RefCell::new(Variable::new())));
}
v
};
let team_sizes = team_sizes(&teams_and_ranks);
let rating_layer = run_schedule(
&rating_vars,
&perf_vars,
&team_perf_vars,
&team_diff_vars,
&team_sizes,
&teams_and_ranks,
&flattened_ratings,
&flattened_weights,
config.dynamics_factor,
config.beta,
config.draw_probability,
MIN_DELTA,
);
let mut transformed_groups = Vec::new();
let mut iter_team_sizes = vec![0];
iter_team_sizes.extend_from_slice(&team_sizes[..(team_sizes.len() - 1)]);
for (start, end) in iter_team_sizes.into_iter().zip(&team_sizes) {
let mut group = Vec::new();
for f in &rating_layer[start..*end] {
let gaussian = f.variable.borrow().gaussian;
let mu = gaussian.mu();
let sigma = gaussian.sigma();
group.push(TrueSkillRating {
rating: mu,
uncertainty: sigma,
});
}
transformed_groups.push(group);
}
let mut unsorted_with_pos = sorted_teams_and_ranks_with_pos
.iter()
.map(|v| v.0)
.zip(transformed_groups)
.collect::<Vec<_>>();
unsorted_with_pos.sort_by_key(|v| v.0);
Ok(unsorted_with_pos.into_iter().map(|v| v.1).collect())
}
#[must_use]
pub fn match_quality(
player_one: &TrueSkillRating,
player_two: &TrueSkillRating,
config: &TrueSkillConfig,
) -> f64 {
let delta: f64 = player_one.rating - player_two.rating;
let a = ((2.0 * config.beta.powi(2))
/ player_two.uncertainty.mul_add(
player_two.uncertainty,
2.0f64.mul_add(config.beta.powi(2), player_one.uncertainty.powi(2)),
))
.sqrt();
let b = ((-delta.powi(2))
/ (2.0
* player_two.uncertainty.mul_add(
player_two.uncertainty,
2.0f64.mul_add(config.beta.powi(2), player_one.uncertainty.powi(2)),
)))
.exp();
a * b
}
#[must_use]
pub fn match_quality_rating_period(
player: &TrueSkillRating,
results: &[TrueSkillRating],
config: &TrueSkillConfig,
) -> Vec<f64> {
results
.iter()
.map(|r| match_quality(player, r, config))
.collect()
}
#[must_use]
pub fn match_quality_two_teams(
team_one: &[TrueSkillRating],
team_two: &[TrueSkillRating],
config: &TrueSkillConfig,
) -> f64 {
let total_players = (team_one.len() + team_two.len()) as f64;
let rating_one_sum: f64 = team_one.iter().map(|p| p.rating).sum();
let rating_two_sum: f64 = team_two.iter().map(|p| p.rating).sum();
let uncertainty_one_sum: f64 = team_one.iter().map(|p| p.uncertainty.powi(2)).sum();
let uncertainty_two_sum: f64 = team_two.iter().map(|p| p.uncertainty.powi(2)).sum();
let a = ((total_players * config.beta.powi(2))
/ (total_players.mul_add(config.beta.powi(2), uncertainty_one_sum) + uncertainty_two_sum))
.sqrt();
let b = ((-(rating_one_sum - rating_two_sum).powi(2))
/ (2.0
* (total_players.mul_add(config.beta.powi(2), uncertainty_one_sum)
+ uncertainty_two_sum)))
.exp();
a * b
}
pub fn match_quality_multi_team(
teams: &[&[TrueSkillRating]],
config: &TrueSkillConfig,
weights: Option<&[&[f64]]>,
) -> Result<f64, WeightError> {
if teams.is_empty() {
return Ok(0.0);
}
for team in teams {
if team.is_empty() {
return Ok(0.0);
}
}
let total_players = teams.iter().map(|t| t.len()).sum::<usize>();
let team_uncertainties_sq_flatten = teams
.iter()
.flat_map(|team| {
team.iter()
.map(|p| p.uncertainty.powi(2))
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
let team_ratings_flatten = teams
.iter()
.flat_map(|team| team.iter().map(|p| p.rating).collect::<Vec<_>>())
.collect::<Vec<_>>();
let mean_matrix = Matrix::new_from_data(&team_ratings_flatten, total_players, 1);
let variance_matrix = Matrix::new_diagonal(&team_uncertainties_sq_flatten);
let rotated_a_matrix = Matrix::create_rotated_a_matrix(teams, weights)?;
let a_matrix = rotated_a_matrix.transpose();
let a_ta = rotated_a_matrix.clone() * a_matrix.clone() * config.beta.powi(2);
let atsa = rotated_a_matrix.clone() * variance_matrix * a_matrix.clone();
let start = a_matrix * mean_matrix.transpose();
let middle = a_ta.clone() + atsa;
let end = rotated_a_matrix * mean_matrix;
let e_arg = (start * middle.inverse() * end * -0.5).determinant();
let s_arg = a_ta.determinant() / middle.determinant();
Ok(e_arg.exp() * s_arg.sqrt())
}
#[must_use]
pub fn expected_score(
player_one: &TrueSkillRating,
player_two: &TrueSkillRating,
config: &TrueSkillConfig,
) -> (f64, f64) {
let delta = player_one.rating - player_two.rating;
let denom = player_two
.uncertainty
.mul_add(
player_two.uncertainty,
2.0f64.mul_add(config.beta.powi(2), player_one.uncertainty.powi(2)),
)
.sqrt();
let exp_one = cdf(delta / denom, 0.0, 1.0);
let exp_two = 1.0 - exp_one;
(exp_one, exp_two)
}
#[must_use]
pub fn expected_score_two_teams(
team_one: &[TrueSkillRating],
team_two: &[TrueSkillRating],
config: &TrueSkillConfig,
) -> (f64, f64) {
let player_count = (team_one.len() + team_two.len()) as f64;
let rating_one_sum: f64 = team_one.iter().map(|p| p.rating).sum();
let rating_two_sum: f64 = team_two.iter().map(|p| p.rating).sum();
let uncertainty_one_sum: f64 = team_one.iter().map(|p| p.uncertainty.powi(2)).sum();
let uncertainty_two_sum: f64 = team_two.iter().map(|p| p.uncertainty.powi(2)).sum();
let delta = rating_one_sum - rating_two_sum;
let denom = (uncertainty_two_sum
+ player_count.mul_add(config.beta.powi(2), uncertainty_one_sum))
.sqrt();
let exp_one = cdf(delta / denom, 0.0, 1.0);
let exp_two = 1.0 - exp_one;
(exp_one, exp_two)
}
#[must_use]
pub fn expected_score_multi_team(
teams: &[&[TrueSkillRating]],
config: &TrueSkillConfig,
) -> Vec<f64> {
let player_count = teams.iter().map(|t| t.len()).sum::<usize>() as f64;
let mut win_probabilities = Vec::with_capacity(teams.len());
let mut total_probability = 0.0;
for (i, team_one) in teams.iter().enumerate() {
let mut current_team_probabilities = Vec::with_capacity(teams.len() - 1);
let team_one_ratings = team_one.iter().map(|p| p.rating).sum::<f64>();
let team_one_uncertainties = team_one.iter().map(|p| p.uncertainty.powi(2)).sum::<f64>();
for (j, team_two) in teams.iter().enumerate() {
if i == j {
continue;
}
let team_two_ratings = team_two.iter().map(|p| p.rating).sum::<f64>();
let team_two_uncertainties =
team_two.iter().map(|p| p.uncertainty.powi(2)).sum::<f64>();
let delta = team_one_ratings - team_two_ratings;
let denom = (team_two_uncertainties
+ player_count.mul_add(config.beta.powi(2), team_one_uncertainties))
.sqrt();
let result = cdf(delta / denom, 0.0, 1.0);
current_team_probabilities.push(result);
total_probability += result;
}
win_probabilities.push(current_team_probabilities);
}
let mut expected_scores = Vec::new();
for probability in win_probabilities {
expected_scores.push(probability.iter().sum::<f64>() / total_probability);
}
expected_scores
}
#[must_use]
pub fn expected_score_rating_period(
player: &TrueSkillRating,
opponents: &[TrueSkillRating],
config: &TrueSkillConfig,
) -> Vec<f64> {
opponents
.iter()
.map(|o| {
let delta = player.rating - o.rating;
let denom = o
.uncertainty
.mul_add(
o.uncertainty,
2.0f64.mul_add(config.beta.powi(2), player.uncertainty.powi(2)),
)
.sqrt();
cdf(delta / denom, 0.0, 1.0)
})
.collect()
}
#[must_use]
pub fn get_rank(player: &TrueSkillRating) -> f64 {
player.uncertainty.mul_add(-3.0, player.rating)
}
fn draw_margin(draw_probability: f64, beta: f64, total_players: f64) -> f64 {
inverse_cdf(f64::midpoint(draw_probability, 1.0), 0.0, 1.0) * total_players.sqrt() * beta
}
fn team_sizes(teams_and_ranks: &[(&[TrueSkillRating], MultiTeamOutcome)]) -> Vec<usize> {
let mut team_sizes = Vec::new();
for (team, _) in teams_and_ranks {
if team_sizes.is_empty() {
team_sizes.push(team.len());
} else {
team_sizes.push(team.len() + team_sizes[team_sizes.len() - 1]);
}
}
team_sizes
}
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use crate::MultiTeamOutcome;
use super::*;
const ERROR_MARGIN: f64 = 0.000_000_000_1;
#[test]
fn test_trueskill() {
let player_one = TrueSkillRating::new();
let player_two = TrueSkillRating {
rating: 30.0,
uncertainty: 1.2,
};
let (p1, p2) = trueskill(
&player_one,
&player_two,
&Outcomes::WIN,
&TrueSkillConfig::new(),
);
assert!(((p1.rating * 100.0).round() - 3300.0).abs() < f64::EPSILON);
assert!(((p1.uncertainty * 100.0).round() - 597.0).abs() < f64::EPSILON);
assert!(((p2.rating * 100.0).round() - 2983.0).abs() < f64::EPSILON);
assert!(((p2.uncertainty * 100.0).round() - 120.0).abs() < f64::EPSILON);
let (p1, p2) = trueskill(
&player_two,
&player_one,
&Outcomes::LOSS,
&TrueSkillConfig::new(),
);
assert!(((p2.rating * 100.0).round() - 3300.0).abs() < f64::EPSILON);
assert!(((p2.uncertainty * 100.0).round() - 597.0).abs() < f64::EPSILON);
assert!(((p1.rating * 100.0).round() - 2983.0).abs() < f64::EPSILON);
assert!(((p1.uncertainty * 100.0).round() - 120.0).abs() < f64::EPSILON);
let player_two = TrueSkillRating::new();
let (p1, p2) = trueskill(
&player_one,
&player_two,
&Outcomes::WIN,
&TrueSkillConfig::new(),
);
assert!((p1.rating.round() - 29.0).abs() < f64::EPSILON);
assert!(((p1.uncertainty * 100.0).round() - 717.0).abs() < f64::EPSILON);
assert!((p2.rating.round() - 21.0).abs() < f64::EPSILON);
assert!(((p2.uncertainty * 100.0).round() - 717.0).abs() < f64::EPSILON);
}
#[test]
fn test_trueskill_rating_period() {
let player_one = TrueSkillRating::new();
let player_two = TrueSkillRating {
rating: 30.0,
uncertainty: 1.2,
};
let player_three = TrueSkillRating {
rating: 12.0,
uncertainty: 1.9,
};
let player_four = TrueSkillRating {
rating: 49.0,
uncertainty: 1.2,
};
let player = trueskill_rating_period(
&player_one,
&[(player_two, Outcomes::WIN)],
&TrueSkillConfig::new(),
);
assert!(((player.rating * 100.0).round() - 3300.0).abs() < f64::EPSILON);
assert!(((player.uncertainty * 100.0).round() - 597.0).abs() < f64::EPSILON);
let player = trueskill_rating_period(
&player_one,
&[
(player_two, Outcomes::WIN),
(player_three, Outcomes::DRAW),
(player_four, Outcomes::LOSS),
],
&TrueSkillConfig::new(),
);
assert!(((player.rating * 100.0).round() - 2291.0).abs() < f64::EPSILON);
assert!(((player.uncertainty * 100.0).round() - 430.0).abs() < f64::EPSILON);
}
#[test]
fn test_draw() {
let player_one = TrueSkillRating::new();
let player_two = TrueSkillRating {
rating: 30.0,
uncertainty: 1.2,
};
let (p1, p2) = trueskill(
&player_one,
&player_two,
&Outcomes::DRAW,
&TrueSkillConfig::new(),
);
assert!((p1.rating - 28.282_523_394_245_658).abs() < f64::EPSILON);
assert!(((p1.uncertainty * 100.0).round() - 488.0).abs() < f64::EPSILON);
assert!((p2.rating - 29.931_612_181_339_364).abs() < f64::EPSILON);
assert!(((p2.uncertainty * 100.0).round() - 119.0).abs() < f64::EPSILON);
let (p2, p1) = trueskill(
&player_two,
&player_one,
&Outcomes::DRAW,
&TrueSkillConfig::new(),
);
assert!((p1.rating - 28.282_523_394_245_658).abs() < f64::EPSILON);
assert!(((p1.uncertainty * 100.0).round() - 488.0).abs() < f64::EPSILON);
assert!((p2.rating - 29.931_612_181_339_364).abs() < f64::EPSILON);
assert!(((p2.uncertainty * 100.0).round() - 119.0).abs() < f64::EPSILON);
let p1 = trueskill_rating_period(
&player_one,
&[(player_two, Outcomes::DRAW)],
&TrueSkillConfig::new(),
);
assert!((p1.rating - 28.282_523_394_245_658).abs() < f64::EPSILON);
assert!(((p1.uncertainty * 100.0).round() - 488.0).abs() < f64::EPSILON);
}
#[test]
fn test_unlikely_values() {
let player_one = TrueSkillRating {
rating: -9.0,
uncertainty: -5.0,
};
let player_two = TrueSkillRating {
rating: 7000.0,
uncertainty: 6000.0,
};
let (p1, p2) = trueskill(
&player_one,
&player_two,
&Outcomes::WIN,
&TrueSkillConfig::new(),
);
assert!((p1.rating.round() - -9.0).abs() < f64::EPSILON);
assert!((p1.uncertainty.round() - 5.0).abs() < f64::EPSILON);
assert!((p2.rating.round() - -2969.0).abs() < f64::EPSILON);
assert!((p2.uncertainty.round() - 2549.0).abs() < f64::EPSILON);
}
#[test]
#[allow(clippy::cognitive_complexity)]
fn test_teams() {
let player_one = TrueSkillRating {
rating: 20.0,
uncertainty: 8.0,
};
let player_two = TrueSkillRating {
rating: 25.0,
uncertainty: 6.0,
};
let player_three = TrueSkillRating {
rating: 35.0,
uncertainty: 7.0,
};
let player_four = TrueSkillRating {
rating: 40.0,
uncertainty: 5.0,
};
let (team_one, team_two) = trueskill_two_teams(
&[player_one, player_two],
&[player_three, player_four],
&Outcomes::WIN,
&TrueSkillConfig::new(),
);
assert!((team_one[0].rating - 29.698_800_676_796_665).abs() < f64::EPSILON);
assert!((team_one[1].rating - 30.456_035_750_156_31).abs() < f64::EPSILON);
assert!((team_two[0].rating - 27.574_109_105_332_1).abs() < f64::EPSILON);
assert!((team_two[1].rating - 36.210_764_756_738_115).abs() < f64::EPSILON);
assert!((team_one[0].uncertainty - 7.007_955_406_085_773).abs() < f64::EPSILON);
assert!((team_one[1].uncertainty - 5.594_025_202_259_947).abs() < f64::EPSILON);
assert!((team_two[0].uncertainty - 6.346_250_279_230_62).abs() < f64::EPSILON);
assert!((team_two[1].uncertainty - 4.767_945_180_134_836).abs() < f64::EPSILON);
let (team_two, team_one) = trueskill_two_teams(
&[player_three, player_four],
&[player_one, player_two],
&Outcomes::LOSS,
&TrueSkillConfig::new(),
);
assert!((team_one[0].rating - 29.698_800_676_796_665).abs() < f64::EPSILON);
assert!((team_one[1].rating - 30.456_035_750_156_31).abs() < f64::EPSILON);
assert!((team_two[0].rating - 27.574_109_105_332_1).abs() < f64::EPSILON);
assert!((team_two[1].rating - 36.210_764_756_738_115).abs() < f64::EPSILON);
assert!((team_one[0].uncertainty - 7.007_955_406_085_773).abs() < f64::EPSILON);
assert!((team_one[1].uncertainty - 5.594_025_202_259_947).abs() < f64::EPSILON);
assert!((team_two[0].uncertainty - 6.346_250_279_230_62).abs() < f64::EPSILON);
assert!((team_two[1].uncertainty - 4.767_945_180_134_836).abs() < f64::EPSILON);
let player_one = TrueSkillRating {
rating: 15.0,
uncertainty: 8.0,
};
let player_two = TrueSkillRating {
rating: 20.0,
uncertainty: 6.0,
};
let player_three = TrueSkillRating {
rating: 25.0,
uncertainty: 4.0,
};
let player_four = TrueSkillRating {
rating: 30.0,
uncertainty: 3.0,
};
let (team_one, team_two) = trueskill_two_teams(
&[player_one, player_two],
&[player_three, player_four],
&Outcomes::DRAW,
&TrueSkillConfig::new(),
);
assert!((team_one[0].rating - 21.571_213_060_731_655).abs() < f64::EPSILON);
assert!((team_one[1].rating - 23.696_619_260_051_385).abs() < f64::EPSILON);
assert!((team_two[0].rating - 23.356_662_026_148_804).abs() < f64::EPSILON);
assert!((team_two[1].rating - 29.075_310_476_318_872).abs() < f64::EPSILON);
assert!((team_one[0].uncertainty - 6.555_663_733_192_403).abs() < f64::EPSILON);
assert!((team_one[1].uncertainty - 5.417_723_612_401_869).abs() < f64::EPSILON);
assert!((team_two[0].uncertainty - 3.832_975_356_683_128).abs() < f64::EPSILON);
assert!((team_two[1].uncertainty - 2.930_957_525_591_959_5).abs() < f64::EPSILON);
let (team_one, _) =
trueskill_two_teams(&[player_one], &[], &Outcomes::WIN, &TrueSkillConfig::new());
assert_eq!(team_one[0], player_one);
}
#[test]
fn test_solo_team() {
let player_one = TrueSkillRating::new();
let player_two = TrueSkillRating {
rating: 12.0,
uncertainty: 3.2,
};
let (p1, p2) = trueskill(
&player_one,
&player_two,
&Outcomes::WIN,
&TrueSkillConfig::new(),
);
let (tp1, tp2) = trueskill_two_teams(
&[player_one],
&[player_two],
&Outcomes::WIN,
&TrueSkillConfig::new(),
);
let mp = trueskill_multi_team(
&[
(&[player_one], MultiTeamOutcome::new(1)),
(&[player_two], MultiTeamOutcome::new(2)),
],
&TrueSkillConfig::new(),
None,
)
.unwrap();
assert_eq!(p1, tp1[0]);
assert_eq!(p2, tp2[0]);
assert!((p1.rating - mp[0][0].rating).abs() < 0.001);
assert!((p2.rating - mp[1][0].rating).abs() < 0.001);
assert!((p1.uncertainty - mp[0][0].uncertainty).abs() < 0.001);
assert!((p2.uncertainty - mp[1][0].uncertainty).abs() < 0.001);
}
#[test]
fn test_match_quality_two_teams() {
let player_one = TrueSkillRating {
rating: 20.0,
uncertainty: 8.0,
};
let player_two = TrueSkillRating {
rating: 25.0,
uncertainty: 6.0,
};
let player_three = TrueSkillRating {
rating: 35.0,
uncertainty: 7.0,
};
let player_four = TrueSkillRating {
rating: 40.0,
uncertainty: 5.0,
};
let quality = match_quality_two_teams(
&[player_one, player_two],
&[player_three, player_four],
&TrueSkillConfig::new(),
);
let quality2 = match_quality_two_teams(
&[player_three, player_four],
&[player_one, player_two],
&TrueSkillConfig::new(),
);
assert!((quality - 0.084_108_145_418_343_24).abs() < f64::EPSILON);
assert!((quality - quality2).abs() < f64::EPSILON);
}
#[test]
fn test_expected_score_two_teams() {
let player_one = TrueSkillRating {
rating: 38.0,
uncertainty: 3.0,
};
let player_two = TrueSkillRating {
rating: 38.0,
uncertainty: 3.0,
};
let player_three = TrueSkillRating {
rating: 44.0,
uncertainty: 3.0,
};
let player_four = TrueSkillRating {
rating: 44.0,
uncertainty: 3.0,
};
let (exp1, exp2) = expected_score_two_teams(
&[player_one, player_two],
&[player_three, player_four],
&TrueSkillConfig::new(),
);
assert!((exp1 + exp2 - 1.0).abs() < f64::EPSILON);
assert!((exp1 - 0.121_280_517_547_482_7).abs() < f64::EPSILON);
}
#[test]
fn test_quality() {
let player_one = TrueSkillRating::new();
let player_two = TrueSkillRating::new();
let quality = match_quality(&player_one, &player_two, &TrueSkillConfig::new());
assert!(((quality * 1000.0).round() - 447.0).abs() < f64::EPSILON);
let player_one = TrueSkillRating {
rating: 48.0,
uncertainty: 1.2,
};
let player_two = TrueSkillRating {
rating: 12.0,
..Default::default()
};
let quality = match_quality(&player_one, &player_two, &TrueSkillConfig::new());
assert!(((quality * 10000.0).round() - 12.0).abs() < f64::EPSILON);
let quality2 = match_quality(&player_two, &player_one, &TrueSkillConfig::new());
assert!((quality - quality2).abs() < f64::EPSILON);
}
#[test]
fn test_expected_score() {
let player_one = TrueSkillRating::new();
let player_two = TrueSkillRating::new();
let (exp1, exp2) = expected_score(&player_one, &player_two, &TrueSkillConfig::new());
assert!(exp1.mul_add(100.0, -50.0).round().abs() < f64::EPSILON);
assert!(exp2.mul_add(100.0, -50.0).round().abs() < f64::EPSILON);
let better_player = TrueSkillRating {
rating: 44.0,
uncertainty: 3.0,
};
let worse_player = TrueSkillRating {
rating: 38.0,
uncertainty: 3.0,
};
let (exp1, exp2) =
expected_score(&better_player, &worse_player, &TrueSkillConfig::default());
assert!(exp1.mul_add(100.0, -80.0).round().abs() < f64::EPSILON);
assert!(exp2.mul_add(100.0, -20.0).round().abs() < f64::EPSILON);
assert!((exp1.mul_add(100.0, exp2 * 100.0).round() - 100.0).abs() < f64::EPSILON);
let team_one = [TrueSkillRating::from((44.0, 3.0))];
let team_two = [TrueSkillRating::from((38.0, 3.0))];
let (e0, e1) = expected_score_two_teams(&team_one, &team_two, &TrueSkillConfig::new());
let e = expected_score_multi_team(&[&team_one, &team_two], &TrueSkillConfig::new());
assert!((e0 - e[0]).abs() < f64::EPSILON);
assert!((e1 - e[1]).abs() < f64::EPSILON);
assert!((exp1 - e[0]).abs() < f64::EPSILON);
assert!((exp2 - e[1]).abs() < f64::EPSILON);
}
#[test]
fn test_match_quality_multi_team() {
let team_one = vec![TrueSkillRating::new(); 2];
let team_two = vec![TrueSkillRating::from((30.0, 3.0)); 2];
let team_three = vec![TrueSkillRating::from((40.0, 2.0)); 2];
let exp = match_quality_multi_team(
&[&team_one, &team_two, &team_three],
&TrueSkillConfig::new(),
None,
)
.unwrap();
assert!((exp - 0.017_538_349_223_941_27).abs() < f64::EPSILON);
let exp = match_quality_multi_team(&[], &TrueSkillConfig::default(), None).unwrap();
assert!(exp < f64::EPSILON);
let exp =
match_quality_multi_team(&[&team_one, &[]], &TrueSkillConfig::default(), None).unwrap();
assert!(exp < f64::EPSILON);
}
#[test]
fn test_match_quality_weighted() {
let team_one = vec![TrueSkillRating::new(); 2];
let team_two = vec![TrueSkillRating::from((30.0, 3.0)); 3];
let team_three = vec![TrueSkillRating::from((40.0, 2.0)); 2];
let exp = match_quality_multi_team(
&[&team_one, &team_two, &team_three],
&TrueSkillConfig::new(),
Some(&[&[1.0, 0.8], &[1.0, 0.1, 0.222], &[0.12, 0.99]]),
)
.unwrap();
assert!((exp - 0.325_899_305_412_196_14).abs() < f64::EPSILON);
}
#[test]
fn test_multi_team_expected() {
let team_one = vec![
TrueSkillRating {
rating: 38.0,
uncertainty: 3.0,
},
TrueSkillRating {
rating: 38.0,
uncertainty: 3.0,
},
];
let team_two = vec![
TrueSkillRating {
rating: 44.0,
uncertainty: 3.0,
},
TrueSkillRating {
rating: 44.0,
uncertainty: 3.0,
},
];
let team_three = vec![
TrueSkillRating {
rating: 50.0,
uncertainty: 3.0,
},
TrueSkillRating {
rating: 50.0,
uncertainty: 3.0,
},
];
let exp = expected_score_multi_team(
&[&team_one, &team_two, &team_three],
&TrueSkillConfig::new(),
);
assert!((exp.iter().sum::<f64>() - 1.0).abs() < f64::EPSILON);
assert_eq!(
exp,
vec![
0.058_904_655_169_257_615,
0.333_333_333_333_333_3,
0.607_762_011_497_409
]
);
let team_one = vec![TrueSkillRating::new(); 10];
let team_two = vec![TrueSkillRating::new(); 10];
let team_three = vec![TrueSkillRating::new(); 10];
let team_four = vec![TrueSkillRating::new(); 10];
let exp = expected_score_multi_team(
&[&team_one, &team_two, &team_three, &team_four],
&TrueSkillConfig::new(),
);
assert!((exp.iter().sum::<f64>() - 1.0).abs() < f64::EPSILON);
assert_eq!(
exp,
vec![
0.249_999_999_999_999_97,
0.249_999_999_999_999_97,
0.249_999_999_999_999_97,
0.249_999_999_999_999_97
]
);
}
#[test]
fn test_get_rank() {
let new_player = TrueSkillRating::new();
let older_player = TrueSkillRating {
rating: 43.1,
uncertainty: 1.92,
};
let new_rank = get_rank(&new_player);
let older_rank = get_rank(&older_player);
assert!((new_rank.round() - 0.0).abs() < f64::EPSILON);
assert!((older_rank.round() - 37.0).abs() < f64::EPSILON);
}
#[test]
#[allow(clippy::clone_on_copy)]
fn test_misc_stuff() {
let player_one = TrueSkillRating::new();
let config = TrueSkillConfig::new();
assert_eq!(player_one, player_one.clone());
assert!((config.beta - config.clone().beta).abs() < f64::EPSILON);
assert!(!format!("{player_one:?}").is_empty());
assert!(!format!("{config:?}").is_empty());
assert_eq!(player_one, TrueSkillRating::from((25.0, 25.0 / 3.0)));
}
#[test]
fn test_traits() {
let player_one: TrueSkillRating = Rating::new(Some(24.0), Some(2.0));
let player_two: TrueSkillRating = Rating::new(Some(24.0), Some(2.0));
let rating_system: TrueSkill = RatingSystem::new(TrueSkillConfig::new());
assert!((player_one.rating() - 24.0).abs() < f64::EPSILON);
assert_eq!(player_one.uncertainty(), Some(2.0));
let (new_player_one, new_player_two) =
RatingSystem::rate(&rating_system, &player_one, &player_two, &Outcomes::WIN);
let (exp1, exp2) = RatingSystem::expected_score(&rating_system, &player_one, &player_two);
assert!((new_player_one.rating - 24.534_185_520_312_818).abs() < f64::EPSILON);
assert!((new_player_two.rating - 23.465_814_479_687_182).abs() < f64::EPSILON);
assert!((exp1 + exp2 - 1.0).abs() < f64::EPSILON);
let rating_period_system: TrueSkill = RatingPeriodSystem::new(TrueSkillConfig::new());
let exp_rp =
RatingPeriodSystem::expected_score(&rating_period_system, &player_one, &[player_two]);
assert!((exp1 - exp_rp[0]).abs() < f64::EPSILON);
let player_one: TrueSkillRating = Rating::new(Some(24.0), Some(2.0));
let player_two: TrueSkillRating = Rating::new(Some(24.0), Some(2.0));
let rating_period: TrueSkill = RatingPeriodSystem::new(TrueSkillConfig::new());
let new_player_one =
RatingPeriodSystem::rate(&rating_period, &player_one, &[(player_two, Outcomes::WIN)]);
assert!((new_player_one.rating - 24.534_185_520_312_818).abs() < f64::EPSILON);
let player_one: TrueSkillRating = Rating::new(Some(24.0), Some(2.0));
let player_two: TrueSkillRating = Rating::new(Some(24.0), Some(2.0));
let team_rating: TrueSkill = TeamRatingSystem::new(TrueSkillConfig::new());
let (new_team_one, new_team_two) =
TeamRatingSystem::rate(&team_rating, &[player_one], &[player_two], &Outcomes::WIN);
assert!((new_team_one[0].rating - 24.534_185_520_312_818).abs() < f64::EPSILON);
assert!((new_team_two[0].rating - 23.465_814_479_687_182).abs() < f64::EPSILON);
let (exp1, exp2) =
TeamRatingSystem::expected_score(&rating_system, &[player_one], &[player_two]);
assert!((exp1 + exp2 - 1.0).abs() < f64::EPSILON);
let multi_team_rating: TrueSkill = MultiTeamRatingSystem::new(TrueSkillConfig::new());
let mtr = MultiTeamRatingSystem::rate(
&multi_team_rating,
&[
(&[player_one], MultiTeamOutcome::new(1)),
(&[player_two], MultiTeamOutcome::new(2)),
],
);
assert!((mtr[0][0].rating - 24.534_091_256_161_39).abs() < ERROR_MARGIN);
assert!((mtr[1][0].rating - 23.465_908_743_838_607).abs() < ERROR_MARGIN);
let exp = MultiTeamRatingSystem::expected_score(
&multi_team_rating,
&[&[player_one], &[player_two]],
);
assert!(((exp.iter().sum::<f64>()) - 1.0).abs() < f64::EPSILON);
}
#[test]
fn test_trueskill_multi_team() {
let t1p1 = TrueSkillRating {
rating: 40.0,
uncertainty: 4.0,
};
let t1p2 = TrueSkillRating {
rating: 45.0,
uncertainty: 3.0,
};
let t2p1 = TrueSkillRating {
rating: 20.0,
uncertainty: 7.0,
};
let t2p2 = TrueSkillRating {
rating: 19.0,
uncertainty: 6.0,
};
let t2p3 = TrueSkillRating {
rating: 30.0,
uncertainty: 9.0,
};
let t2p4 = TrueSkillRating {
rating: 10.0,
uncertainty: 4.0,
};
let t3p1 = TrueSkillRating {
rating: 50.0,
uncertainty: 5.0,
};
let t3p2 = TrueSkillRating {
rating: 30.0,
uncertainty: 2.0,
};
let t1 = [t1p1, t1p2];
let t2 = [t2p1, t2p2, t2p3, t2p4];
let t3 = [t3p1, t3p2];
let teams_and_ranks = [
(&t1[..], MultiTeamOutcome::new(0)),
(&t2[..], MultiTeamOutcome::new(1)),
(&t3[..], MultiTeamOutcome::new(1)),
];
let results =
trueskill_multi_team(&teams_and_ranks, &TrueSkillConfig::new(), None).unwrap();
assert!((results[0][0].rating - 40.876_849_177_315_655).abs() < ERROR_MARGIN);
assert!((results[0][1].rating - 45.493_394_092_398_45).abs() < ERROR_MARGIN);
assert!((results[1][0].rating - 19.608_650_920_845_23).abs() < ERROR_MARGIN);
assert!((results[1][1].rating - 18.712_463_514_890_54).abs() < ERROR_MARGIN);
assert!((results[1][2].rating - 29.353_112_227_810_637).abs() < ERROR_MARGIN);
assert!((results[1][3].rating - 9.872_175_198_037_164).abs() < ERROR_MARGIN);
assert!((results[2][0].rating - 48.829_832_201_455_31).abs() < ERROR_MARGIN);
assert!((results[2][1].rating - 29.812_500_188_902_998).abs() < ERROR_MARGIN);
assert!((results[0][0].uncertainty - 3.839_527_589_355_369_8).abs() < ERROR_MARGIN);
assert!((results[0][1].uncertainty - 2.933_671_613_522_051).abs() < ERROR_MARGIN);
assert!((results[1][0].uncertainty - 6.396_044_310_523_896).abs() < ERROR_MARGIN);
assert!((results[1][1].uncertainty - 5.624_556_429_622_889).abs() < ERROR_MARGIN);
assert!((results[1][2].uncertainty - 7.673_456_361_986_593).abs() < ERROR_MARGIN);
assert!((results[1][3].uncertainty - 3.891_408_425_994_520_3).abs() < ERROR_MARGIN);
assert!((results[2][0].uncertainty - 4.590_018_525_151_379).abs() < ERROR_MARGIN);
assert!((results[2][1].uncertainty - 1.976_314_792_712_798).abs() < ERROR_MARGIN);
}
#[test]
fn test_trueskill_multi_team_weighted() {
let t1p1 = TrueSkillRating {
rating: 40.0,
uncertainty: 4.0,
};
let t1p2 = TrueSkillRating {
rating: 45.0,
uncertainty: 3.0,
};
let t2p1 = TrueSkillRating {
rating: 20.0,
uncertainty: 7.0,
};
let t2p2 = TrueSkillRating {
rating: 19.0,
uncertainty: 6.0,
};
let t2p3 = TrueSkillRating {
rating: 30.0,
uncertainty: 9.0,
};
let t2p4 = TrueSkillRating {
rating: 10.0,
uncertainty: 4.0,
};
let t3p1 = TrueSkillRating {
rating: 50.0,
uncertainty: 5.0,
};
let t3p2 = TrueSkillRating {
rating: 30.0,
uncertainty: 2.0,
};
let t1 = [t1p1, t1p2];
let t2 = [t2p1, t2p2, t2p3, t2p4];
let t3 = [t3p1, t3p2];
let teams_and_ranks = [
(&t1[..], MultiTeamOutcome::new(0)),
(&t2[..], MultiTeamOutcome::new(1)),
(&t3[..], MultiTeamOutcome::new(1)),
];
let results = trueskill_multi_team(
&teams_and_ranks,
&TrueSkillConfig::new(),
Some(&[&[0.7, 1.0], &[0.33, 0.33, 0.9, 1.0], &[1.0, 0.0333]]),
)
.unwrap();
assert!((results[0][0].rating - 40.027_231_346_252_364).abs() < ERROR_MARGIN);
assert!((results[0][1].rating - 45.021_889_715_580_61).abs() < ERROR_MARGIN);
assert!((results[1][0].rating - 20.098_036_453_507_61).abs() < ERROR_MARGIN);
assert!((results[1][1].rating - 19.072_030_467_824_426).abs() < ERROR_MARGIN);
assert!((results[1][2].rating - 30.441_957_784_082_72).abs() < ERROR_MARGIN);
assert!((results[1][3].rating - 10.097_034_118_427_457).abs() < ERROR_MARGIN);
assert!((results[2][0].rating - 49.787_633_108_041_61).abs() < ERROR_MARGIN);
assert!((results[2][1].rating - 29.998_866_859_567_126).abs() < ERROR_MARGIN);
assert!((results[0][0].uncertainty - 3.990_045_287_325_199).abs() < ERROR_MARGIN);
assert!((results[0][1].uncertainty - 2.991_832_600_124_326_7).abs() < ERROR_MARGIN);
assert!((results[1][0].uncertainty - 6.888_248_560_920_697).abs() < ERROR_MARGIN);
assert!((results[1][1].uncertainty - 5.930_038_936_969_937).abs() < ERROR_MARGIN);
assert!((results[1][2].uncertainty - 7.023_102_950_896_653).abs() < ERROR_MARGIN);
assert!((results[1][3].uncertainty - 3.805_217_937_147_945).abs() < ERROR_MARGIN);
assert!((results[2][0].uncertainty - 4.589_940_271_249_083).abs() < ERROR_MARGIN);
assert!((results[2][1].uncertainty - 2.001_707_343_315_781_7).abs() < ERROR_MARGIN);
}
#[test]
fn test_ffa() {
let p1 = TrueSkillRating {
rating: 41.023,
uncertainty: 2.1333,
};
let p2 = TrueSkillRating {
rating: 21.0,
uncertainty: 1.87,
};
let p3 = TrueSkillRating {
rating: 42.0,
uncertainty: 1.223,
};
let teams_and_ranks: &[(&[TrueSkillRating], MultiTeamOutcome)] = &[
(&[p1], MultiTeamOutcome::new(1)),
(&[p2], MultiTeamOutcome::new(3)),
(&[p3], MultiTeamOutcome::new(2)),
];
let results = trueskill_multi_team(teams_and_ranks, &TrueSkillConfig::new(), None).unwrap();
assert!((results[0][0].rating - 41.720_925_460_665_01).abs() < ERROR_MARGIN);
assert!((results[1][0].rating - 20.997_268_045_415_94).abs() < ERROR_MARGIN);
assert!((results[2][0].rating - 41.771_076_420_914_83).abs() < ERROR_MARGIN);
assert!((results[0][0].uncertainty - 2.050_533_079_246_658_7).abs() < ERROR_MARGIN);
assert!((results[1][0].uncertainty - 1.870_534_805_422_220_2).abs() < ERROR_MARGIN);
assert!((results[2][0].uncertainty - 1.209_939_281_670_434_9).abs() < ERROR_MARGIN);
}
#[test]
fn test_unlikely_ffa() {
let p1 = TrueSkillRating {
rating: 0.4,
uncertainty: 8.1333,
};
let p2 = TrueSkillRating {
rating: -21.0,
uncertainty: 1.87,
};
let p3 = TrueSkillRating {
rating: 122.0,
uncertainty: 0.01,
};
let p4 = TrueSkillRating {
rating: -1.0,
uncertainty: -1.223,
};
let teams_and_ranks: &[(&[TrueSkillRating], MultiTeamOutcome)] = &[
(&[p1], MultiTeamOutcome::new(1)),
(&[p2], MultiTeamOutcome::new(3)),
(&[p3], MultiTeamOutcome::new(2)),
(&[p4], MultiTeamOutcome::new(2)),
];
let results = trueskill_multi_team(teams_and_ranks, &TrueSkillConfig::new(), None).unwrap();
assert!((results[0][0].rating - 46.844_398_641_974_97).abs() < ERROR_MARGIN);
assert!((results[1][0].rating - -21.0).abs() < ERROR_MARGIN);
assert!((results[2][0].rating - 121.973_594_228_967_43).abs() < ERROR_MARGIN);
assert!((results[3][0].rating - 3.577_783_039_440_43).abs() < ERROR_MARGIN);
assert!((results[0][0].uncertainty - 4.453_979_220_477_661).abs() < ERROR_MARGIN);
assert!((results[1][0].uncertainty - 1.871_855_882_391_709_3).abs() < ERROR_MARGIN);
assert!((results[2][0].uncertainty - 0.083_922_196_135_183_55).abs() < ERROR_MARGIN);
assert!((results[3][0].uncertainty - 1.197_926_990_096_302_3).abs() < ERROR_MARGIN);
}
#[test]
fn test_multi_teams_empty() {
let res = trueskill_multi_team(&[], &TrueSkillConfig::new(), None);
assert!(res.unwrap().is_empty());
let res = trueskill_multi_team(
&[
(&[TrueSkillRating::new()], MultiTeamOutcome::new(1)),
(&[], MultiTeamOutcome::new(2)),
],
&TrueSkillConfig::new(),
None,
)
.unwrap();
assert!(res[1].is_empty());
assert!((res[0][0].rating - 25.0).abs() < f64::EPSILON);
assert!((res[0][0].uncertainty - 25.0 / 3.0).abs() < f64::EPSILON);
}
#[test]
fn test_weight_error() {
let res = trueskill_multi_team(
&[
(&[TrueSkillRating::new()], MultiTeamOutcome::new(1)),
(&[TrueSkillRating::new()], MultiTeamOutcome::new(2)),
],
&TrueSkillConfig::new(),
Some(&[&[0.0]]),
);
assert!(matches!(res, Err(WeightError::TeamAmount)));
}
}