deeprust/metrics/
confusion_matrix.rs

1// Task : Evaluation metrics for deeplearning algorithms results
2// Date :- 4th Feb 2017
3// Authors :- @dvigneshwer
4// Version :- 0.0.1
5
6// Modified by : @nifey
7// Date : 6th Feb 2017
8
9use std::{i32, f32};
10
11// Sample function for assigning values to confusion matrix
12pub fn exec() {
13    // assigning random values to the confusion matrix
14    let sample = Confusionmatrix {
15        true_positive: 100,
16        true_negative: 50,
17        false_positive: 10,
18        false_negative: 5,
19    };
20
21    println!("The total predictions {}", sample.total());
22    // Calculating the accuracy of the model
23    println!("Accuracy of the model {:.2}", sample.accuracy());
24    // Calculating the precision of the model
25    println!("Precision of the model {:.2}", sample.precision());
26    // Calculating the true positive rate of the model
27    println!("True positive rate of the model {:.2}",
28             sample.true_poitive_rate());
29    // Calculating the false positive rate of the model
30    println!("False positive rate of the model {:.2}",
31             sample.false_positive_rate());
32    // Calculating the misclassification rate of the model
33    println!("Misclassification rate of the model {:.2}",
34             sample.misclassification_rate());
35    // Calculating the specificity of the model
36    println!("Specificity of the model {:.2}", sample.specificity());
37    // Calculating the prevalance of the model
38    println!("Prevalance of the model {:.2}", sample.prevalance());
39
40
41}
42
43//defining a struct to represent a confusion matrix for a binary classifier
44struct Confusionmatrix {
45    true_positive: i32,
46    true_negative: i32,
47    false_positive: i32,
48    false_negative: i32,
49}
50
51impl Confusionmatrix {
52    //to find total number of predictions
53    fn total(&self) -> i32 {
54        self.true_positive + self.true_negative + self.false_positive + self.false_negative
55    }
56    //to find the accuracy of the model
57    fn accuracy(&self) -> f32 {
58        percentage((self.true_positive as f32 + self.true_negative as f32) / (self.total() as f32))
59    }
60    //to find the precision of the model
61    fn precision(&self) -> f32 {
62        percentage((self.true_positive as f32) /
63                   (self.true_positive as f32 + self.false_positive as f32))
64    }
65    //to find the true positive rate of the model
66    fn true_poitive_rate(&self) -> f32 {
67        percentage((self.true_positive as f32) /
68                   (self.true_positive as f32 + self.false_negative as f32))
69    }
70    //to find the false positive rate of the model
71    fn false_positive_rate(&self) -> f32 {
72        percentage((self.false_positive as f32) /
73                   (self.false_positive as f32 + self.true_negative as f32))
74    }
75    //to find the misclassification rate of the model
76    fn misclassification_rate(&self) -> f32 {
77        percentage((self.false_positive as f32 + self.false_negative as f32) /
78                   (self.total() as f32))
79    }
80    //to find the specificity of the model
81    fn specificity(&self) -> f32 {
82        percentage((self.true_negative as f32) /
83                   (self.false_positive as f32 + self.true_negative as f32))
84    }
85    //to find the prevalance of the model
86    fn prevalance(&self) -> f32 {
87        percentage((self.true_positive as f32 + self.false_negative as f32) / (self.total() as f32))
88    }
89}
90
91// Converting to percentage
92fn percentage(value: f32) -> f32 {
93    value as f32 * 100.0
94}