iai_callgrind_runner/runner/tool/
regression.rs

1//! The module containing the basic elements for regression check configurations
2use std::fmt::Display;
3use std::hash::Hash;
4
5use crate::api;
6use crate::runner::cachegrind::regression::CachegrindRegressionConfig;
7use crate::runner::callgrind::regression::CallgrindRegressionConfig;
8use crate::runner::dhat::regression::DhatRegressionConfig;
9use crate::runner::format::print_regressions;
10use crate::runner::metrics::{Metric, MetricsSummary, Summarize};
11use crate::runner::summary::ToolRegression;
12use crate::util::EitherOrBoth;
13
14/// A short-lived utility enum used to hold the raw regressions until they can be transformed into a
15/// real [`ToolRegression`]
16pub enum RegressionMetrics<T> {
17    /// The result of a checked soft limit
18    Soft(T, Metric, Metric, f64, f64),
19    /// The result of a checked hard limit
20    Hard(T, Metric, Metric, Metric),
21}
22
23/// The tool specific regression check configuration
24#[derive(Debug, Clone, PartialEq)]
25pub enum ToolRegressionConfig {
26    /// The Callgrind configuration
27    Callgrind(CallgrindRegressionConfig),
28    /// The Cachegrind configuration
29    Cachegrind(CachegrindRegressionConfig),
30    /// The DHAT configuration
31    Dhat(DhatRegressionConfig),
32    /// If there is no configuration
33    None,
34}
35
36/// The trait which needs to be implemented in a tool specific regression check configuration
37pub trait RegressionConfig<T: Hash + Eq + Summarize + Display + Clone> {
38    /// Check the `MetricsSummary` for regressions.
39    ///
40    /// The limits for event kinds which are not present in the `MetricsSummary` are ignored.
41    fn check(&self, metrics_summary: &MetricsSummary<T>) -> Vec<ToolRegression>;
42
43    /// Check for regressions and print them if present
44    fn check_and_print(&self, metrics_summary: &MetricsSummary<T>) -> Vec<ToolRegression> {
45        let regressions = self.check(metrics_summary);
46        print_regressions(&regressions);
47        regressions
48    }
49
50    /// Check for regressions and return the [`RegressionMetrics`]
51    fn check_regressions(&self, metrics_summary: &MetricsSummary<T>) -> Vec<RegressionMetrics<T>> {
52        let mut regressions = vec![];
53        for (metric, new_cost, old_cost, pct, limit) in
54            self.get_soft_limits().iter().filter_map(|(kind, limit)| {
55                metrics_summary.diff_by_kind(kind).and_then(|d| {
56                    if let EitherOrBoth::Both(new, old) = d.metrics {
57                        // This unwrap is safe since the diffs are calculated if both costs are
58                        // present
59                        Some((kind, new, old, d.diffs.unwrap().diff_pct, limit))
60                    } else {
61                        None
62                    }
63                })
64            })
65        {
66            if limit.is_sign_positive() {
67                if pct > *limit {
68                    regressions.push(RegressionMetrics::Soft(
69                        metric.clone(),
70                        new_cost,
71                        old_cost,
72                        pct,
73                        *limit,
74                    ));
75                }
76            } else if pct < *limit {
77                regressions.push(RegressionMetrics::Soft(
78                    metric.clone(),
79                    new_cost,
80                    old_cost,
81                    pct,
82                    *limit,
83                ));
84            } else {
85                // no regression
86            }
87        }
88
89        for (metric, new_cost, limit) in
90            self.get_hard_limits().iter().filter_map(|(kind, limit)| {
91                metrics_summary
92                    .diff_by_kind(kind)
93                    .and_then(|d| d.metrics.left().map(|metric| (kind, metric, limit)))
94            })
95        {
96            if new_cost > limit {
97                regressions.push(RegressionMetrics::Hard(
98                    metric.clone(),
99                    *new_cost,
100                    *new_cost - *limit,
101                    *limit,
102                ));
103            }
104        }
105        regressions
106    }
107
108    /// Return the hard limits
109    fn get_hard_limits(&self) -> &[(T, Metric)];
110
111    /// Return the soft limits
112    fn get_soft_limits(&self) -> &[(T, f64)];
113}
114
115impl ToolRegressionConfig {
116    /// Return true if the configuration has fail fast set to true
117    pub fn is_fail_fast(&self) -> bool {
118        match self {
119            Self::Callgrind(regression_config) => regression_config.fail_fast,
120            Self::Cachegrind(regression_config) => regression_config.fail_fast,
121            Self::Dhat(regression_config) => regression_config.fail_fast,
122            Self::None => false,
123        }
124    }
125}
126
127impl TryFrom<api::ToolRegressionConfig> for ToolRegressionConfig {
128    type Error = String;
129
130    fn try_from(value: api::ToolRegressionConfig) -> std::result::Result<Self, Self::Error> {
131        match value {
132            api::ToolRegressionConfig::Callgrind(regression_config) => {
133                regression_config.try_into().map(Self::Callgrind)
134            }
135            api::ToolRegressionConfig::Cachegrind(regression_config) => {
136                regression_config.try_into().map(Self::Cachegrind)
137            }
138            api::ToolRegressionConfig::Dhat(regression_config) => {
139                regression_config.try_into().map(Self::Dhat)
140            }
141            api::ToolRegressionConfig::None => Ok(Self::None),
142        }
143    }
144}