sdl_parser/
evaluation.rs

1use std::collections::HashMap;
2
3use anyhow::{anyhow, Result};
4use serde::{Deserialize, Serialize};
5
6use crate::{helpers::Connection, metric::Metric, metric::Metrics, Formalize};
7
8#[derive(PartialEq, Eq, Debug, Serialize, Deserialize, Clone)]
9pub struct MinScore {
10    pub absolute: Option<u32>,
11    pub percentage: Option<u32>,
12}
13
14#[derive(PartialEq, Eq, Debug, Serialize, Deserialize, Clone)]
15#[serde(untagged)]
16pub enum HelperScore {
17    MinScore(MinScore),
18    ShortMinScore(u32),
19}
20
21impl From<HelperScore> for MinScore {
22    fn from(helper_source: HelperScore) -> Self {
23        match helper_source {
24            HelperScore::MinScore(score) => score,
25            HelperScore::ShortMinScore(source) => MinScore {
26                percentage: Some(source),
27                absolute: None,
28            },
29        }
30    }
31}
32
33#[derive(PartialEq, Eq, Debug, Serialize, Deserialize, Clone)]
34pub struct Evaluation {
35    #[serde(default, alias = "Name", alias = "NAME")]
36    pub name: Option<String>,
37    #[serde(alias = "Description", alias = "DESCRIPTION")]
38    pub description: Option<String>,
39    #[serde(alias = "Metrics", alias = "METRICS")]
40    pub metrics: Vec<String>,
41    #[serde(
42        default,
43        rename = "min-score",
44        alias = "Min-score",
45        alias = "MIN-SCORE",
46        skip_serializing
47    )]
48    pub _helper_min_score: Option<HelperScore>,
49    #[serde(default, skip_deserializing)]
50    pub min_score: Option<MinScore>,
51}
52
53impl Evaluation {
54    pub fn validate_evaluation_metric_scores(
55        &self,
56        potential_metrics: Option<&Metrics>,
57    ) -> Result<()> {
58        if let Some(metrics) = potential_metrics {
59            let metric_score_sum = metrics.iter().map(|s| s.1.max_score).sum();
60            if let Some(min_score) = &self.min_score {
61                if let Some(absolute_min_score) = min_score.absolute {
62                    if absolute_min_score > metric_score_sum {
63                        return Err(anyhow!(
64                            "Sum of metric scores has to be smaller than the evaluation min-score"
65                        ));
66                    }
67                }
68            }
69        } else {
70            return Err(anyhow!(
71                "Evaluation requires Metrics but none found under Scenario",
72            ));
73        }
74        Ok(())
75    }
76}
77
78impl Connection<Metric> for (&String, &Evaluation) {
79    fn validate_connections(&self, potential_metric_names: &Option<Vec<String>>) -> Result<()> {
80        if let Some(metric_names) = potential_metric_names {
81            for metric_name in &self.1.metrics {
82                if !metric_names.contains(metric_name) {
83                    return Err(anyhow::anyhow!(
84                        "Evaluation \"{evaluation_name}\" Metric \"{metric_name}\" not found under Scenario Metrics",
85                        evaluation_name = self.0
86                    ));
87                }
88            }
89        } else {
90            return Err(anyhow::anyhow!(
91                "Evaluation \"{evaluation_name}\" requires Metrics but none found under Scenario",
92                evaluation_name = self.0
93            ));
94        }
95        Ok(())
96    }
97}
98
99pub type Evaluations = HashMap<String, Evaluation>;
100
101impl Formalize for Evaluation {
102    fn formalize(&mut self) -> Result<()> {
103        if let Some(helper_min_score) = &self._helper_min_score {
104            self.min_score = Some(helper_min_score.to_owned().into());
105        } else {
106            return Err(anyhow!("An Evaluation is missing min-score"));
107        }
108        if let Some(score) = &self.min_score {
109            if score.absolute.is_some() && score.percentage.is_some() {
110                return Err(anyhow!(
111                    "An Evaluations min-score can only have either Absolute or Percentage defined, not both"
112                ));
113            }
114        }
115        if self.metrics.is_empty() {
116            return Err(anyhow!("An Evaluation must have at least one Metric"));
117        }
118        if let Some(min_score) = &self.min_score {
119            if let Some(percentage) = min_score.percentage {
120                if percentage > 100 {
121                    return Err(anyhow!("Min score percentage can not be over 100%"));
122                }
123            }
124        }
125        Ok(())
126    }
127}
128
129#[cfg(test)]
130mod tests {
131    use super::*;
132    use crate::parse_sdl;
133
134    #[test]
135    fn parses_sdl_with_evaluation() {
136        let sdl = r#"
137            name: test-scenario
138            description: some description
139            start: 2022-01-20T13:00:00Z
140            end: 2022-01-20T23:00:00Z
141            conditions:
142                condition-1:
143                    command: executable/path.sh
144                    interval: 30
145            metrics:
146                metric-1:
147                    type: MANUAL
148                    artifact: true
149                    max-score: 10
150                metric-2:
151                    type: CONDITIONAL
152                    max-score: 10
153                    condition: condition-1
154            evaluations:
155                evaluation-1:
156                    description: some description
157                    metrics:
158                        - metric-1
159                        - metric-2
160                    min-score: 50
161        "#;
162        let evaluations = parse_sdl(sdl).unwrap().evaluations;
163        insta::with_settings!({sort_maps => true}, {
164                insta::assert_yaml_snapshot!(evaluations);
165        });
166    }
167
168    #[test]
169    #[should_panic(
170        expected = "Evaluation \"evaluation-1\" Metric \"metric-2\" not found under Scenario Metrics"
171    )]
172    fn fails_with_missing_metric() {
173        let sdl = r#"
174            name: test-scenario
175            description: some description
176            conditions:
177                condition-1:
178                    command: executable/path.sh
179                    interval: 30
180            metrics:
181                metric-1:
182                    type: MANUAL
183                    artifact: true
184                    max-score: 10
185            evaluations:
186                evaluation-1:
187                    description: some description
188                    metrics:
189                        - metric-1
190                        - metric-2
191                    min-score: 50
192        "#;
193        parse_sdl(sdl).unwrap();
194    }
195
196    #[test]
197    fn parses_shorthand_evaluation() {
198        let evaluation_string = r#"
199          description: some-description
200          metrics:
201            - metric-1
202            - metric-2
203          min-score: 50
204        "#;
205        let mut evaluation: Evaluation = serde_yaml::from_str(evaluation_string).unwrap();
206        assert!(evaluation.formalize().is_ok());
207    }
208
209    #[test]
210    fn parses_longhand_evaluation() {
211        let evaluation_string = r#"
212        description: some-description
213        metrics:
214          - metric-1
215          - metric-2
216        min-score:
217          absolute: 50
218      "#;
219        let mut evaluation: Evaluation = serde_yaml::from_str(evaluation_string).unwrap();
220        assert!(evaluation.formalize().is_ok());
221    }
222
223    #[test]
224    fn fails_to_parse_evaluation_with_both_scores() {
225        let evaluation_string = r#"
226          description: some-description
227          metrics:
228            - metric-1
229            - metric-2
230          min-score:
231            absolute: 50
232            percentage: 60
233        "#;
234        let mut evaluation: Evaluation = serde_yaml::from_str(evaluation_string).unwrap();
235        assert!(evaluation.formalize().is_err());
236    }
237
238    #[test]
239    #[should_panic(
240        expected = "Sum of metric scores has to be smaller than the evaluation min-score"
241    )]
242    fn fails_to_parse_evaluation_too_small_min_score() {
243        let sdl = r#"
244            name: test-scenario
245            description: some description
246            conditions:
247                condition-1:
248                    command: executable/path.sh
249                    interval: 30
250            metrics:
251                metric-1:
252                    type: MANUAL
253                    artifact: true
254                    max-score: 10
255                metric-2:
256                    type: CONDITIONAL
257                    max-score: 10
258                    condition: condition-1
259            evaluations:
260                evaluation-1:
261                    description: some description
262                    metrics:
263                        - metric-1
264                        - metric-2
265                    min-score:
266                        absolute: 9999
267        "#;
268        parse_sdl(sdl).unwrap();
269    }
270
271    #[test]
272    fn fails_to_parse_too_high_min_score() {
273        let evaluation_string = r#"
274            description: some description
275            metrics:
276                - metric-1
277                - metric-2
278            min_score: 101
279        "#;
280        let mut evaluation: Evaluation = serde_yaml::from_str(evaluation_string).unwrap();
281        assert!(evaluation.formalize().is_err());
282    }
283}