benchmark_rs/
benchmarks.rs

1use std::collections::{HashMap, HashSet};
2use std::fmt::Display;
3use std::fs::{create_dir_all, File};
4use std::io::{BufWriter, Write};
5use std::path::PathBuf;
6
7use anyhow::{anyhow, Context, Error};
8
9use crate::analysis_result::AnalysisResult;
10use crate::benchmark::Benchmark;
11use crate::benchmark_comparison::BenchmarkComparison;
12use crate::run_summary::RunSummary;
13use crate::series_summary::SeriesSummary;
14use crate::stopwatch::StopWatch;
15use crate::summary::Summary;
16
17/// Run and analyze a benchmarks suite
18///
19/// * `C` - configuration
20/// * `W` - workload unit
21/// * `E` - error type
22///
23pub struct Benchmarks<C, W, E>
24where
25    C: Clone + Display,
26    W: Clone + Display,
27    Error: From<E>,
28{
29    name: String,
30    names: HashSet<String>,
31    benchmarks: Vec<Benchmark<C, W, E>>,
32    summaries: HashMap<String, SeriesSummary>,
33}
34
35impl<C, W, E> Benchmarks<C, W, E>
36where
37    C: Clone + Display,
38    W: Clone + Display,
39    Error: From<E>,
40{
41    /// Create a new [Benchmarks]
42    ///
43    /// * `name` - the name of the benchmark session
44    pub fn new(name: &str) -> Benchmarks<C, W, E> {
45        Benchmarks {
46            name: name.to_string(),
47            names: Default::default(),
48            benchmarks: vec![],
49            summaries: Default::default(),
50        }
51    }
52
53    /// Run all benchmarks
54    pub fn run(&mut self) -> Result<(), Error> {
55        for benchmark in &self.benchmarks {
56            let summary = benchmark.run()?;
57            self.summaries.insert(benchmark.name().clone(), summary);
58        }
59        Ok(())
60    }
61
62    /// Create and add a Benchmark
63    ///
64    /// * `name` - the name of the benchmark series. The result will be accessible by the name as
65    /// a key from the summary.
66    /// * `f` - the function that runs the benchmark.
67    /// * `config` - the configuration value for this benchmark series
68    /// * `work` - workload points vector for this benchmark series. Elements of this vector are
69    /// passed to `f` in each iteration
70    /// * `repeat` - number of times the benchmark will be repeated
71    /// * `ramp_up` - number of times the benchmark will be performed before the measurement is
72    /// taken
73    ///
74    pub fn add(
75        &mut self,
76        name: &str,
77        f: fn(stop_watch: &mut StopWatch, config: C, workload_point: W) -> Result<(), E>,
78        config: C,
79        work: Vec<W>,
80        repeat: usize,
81        ramp_up: usize,
82    ) -> Result<(), Error> {
83        let exists = !self.names.insert(name.to_string());
84        if exists {
85            Err(anyhow!(
86                "Benchmark with identical name exists: {}",
87                name.to_string()
88            ))
89        } else if repeat == 0 {
90            Err(anyhow!("Cannot benchmark 0 runs"))
91        } else {
92            self.benchmarks.push(Benchmark::new(
93                name.to_string(),
94                f,
95                config,
96                work,
97                repeat,
98                ramp_up,
99            ));
100            Ok(())
101        }
102    }
103
104    /// Produce [Summary] for all series
105    pub fn summary(&self) -> Summary {
106        let mut summary = Summary::new(self.name().clone());
107        for (name, series_summary) in &self.summaries {
108            summary.add(name.clone(), series_summary.clone());
109        }
110        summary
111    }
112
113    /// Produce [Summary] for all series as JSON string.
114    pub fn summary_as_json(&self) -> String {
115        let summary = self.summary();
116        serde_json::to_string_pretty(&summary).unwrap()
117    }
118
119    /// Produce summary for each series as vector of CSV lines. The key for series data is the
120    /// series name used in [Self::add] method, values are placed as the headers returned by [Self::csv_headers]
121    pub fn summary_as_csv(
122        &self,
123        with_headers: bool,
124        with_config: bool,
125    ) -> HashMap<String, Vec<String>> {
126        let mut result = HashMap::new();
127        for (name, summary) in &self.summaries {
128            result.insert(name.clone(), summary.as_csv(with_headers, with_config));
129        }
130        result
131    }
132
133    /// Save each benchmark summary to its own CSV file
134    ///
135    /// The name of each CSV file is the name of the benchmark.
136    /// * `dir` - directory to store results. If doesn't exist - create it.
137    /// * `with_headers` - add column headers on the first line
138    /// * `with_config` - add the configuration string in the headers row
139    ///
140    /// ```
141    /// use std::path::PathBuf;
142    /// use benchmark_rs::benchmarks::Benchmarks;
143    /// let benchmarks = Benchmarks::<usize, usize, anyhow::Error>::new("example");
144    /// benchmarks.save_to_csv(PathBuf::from("./target/benchmarks"), true, true).expect("failed to save to csv");
145    /// ```
146    pub fn save_to_csv(
147        &self,
148        dir: PathBuf,
149        with_headers: bool,
150        with_config: bool,
151    ) -> Result<(), anyhow::Error> {
152        if !dir.exists() {
153            create_dir_all(&dir)?;
154        }
155        let series_csv = self.summary_as_csv(with_headers, with_config);
156        for (name, series) in series_csv {
157            let mut results_path = dir.join(name.clone());
158            results_path.set_extension("csv");
159            let mut results_writer = BufWriter::new(
160                File::create(&results_path)
161                    .with_context(|| anyhow!("path: {}", results_path.to_string_lossy()))?,
162            );
163            for record in series {
164                writeln!(results_writer, "{}", record)?;
165            }
166        }
167        Ok(())
168    }
169
170    /// Save the summary to a json file.
171    ///
172    /// The name of the JSON file is the name of the suite of benchmarks.
173    /// If dir doesn't exist - create it.
174    /// ```
175    /// use std::path::PathBuf;
176    /// use anyhow::anyhow;
177    /// use benchmark_rs::benchmarks::Benchmarks;
178    /// let benchmarks = Benchmarks::<usize, usize, anyhow::Error>::new("example");
179    /// benchmarks.save_to_json(PathBuf::from("./target/benchmarks")).expect("failed to save to json");
180    /// ```
181    pub fn save_to_json(&self, dir: PathBuf) -> Result<(), anyhow::Error> {
182        if !dir.exists() {
183            create_dir_all(&dir)?;
184        }
185        let mut results_path = dir.join(self.name());
186        results_path.set_extension("json");
187        let mut writer = BufWriter::new(
188            File::create(&results_path)
189                .with_context(|| anyhow!("path: {}", results_path.to_string_lossy()))?,
190        );
191        writer.write_all(self.summary_as_json().as_bytes())?;
192        Ok(())
193    }
194
195    /// Produce description of configurations for each series. The key for series config is the
196    /// series name used in [Self::add] method.
197    pub fn configs(&self) -> HashMap<String, String> {
198        let mut result = HashMap::new();
199        for (name, summary) in &self.summaries {
200            result.insert(name.clone(), summary.config());
201        }
202        result
203    }
204
205    ///  The benchmarks suite name
206    pub fn name(&self) -> &String {
207        &self.name
208    }
209
210    fn compare_median(
211        point: &str,
212        current: u64,
213        previous: u64,
214        threshold: f64,
215    ) -> BenchmarkComparison {
216        let change = (current as f64 / (previous as f64 / 100.0)) - 100.0;
217        let point = point.to_owned();
218        if (current == previous) || (change.abs() <= threshold.abs()) {
219            BenchmarkComparison::Equal {
220                point,
221                previous,
222                current,
223                change,
224            }
225        } else if change < 0.0 {
226            BenchmarkComparison::Less {
227                point,
228                previous,
229                current,
230                change,
231            }
232        } else {
233            BenchmarkComparison::Greater {
234                point,
235                previous,
236                current,
237                change,
238            }
239        }
240    }
241
242    fn compare_series(
243        current_series: &[(String, RunSummary)],
244        previous_series: &[(String, RunSummary)],
245        threshold: f64,
246    ) -> Result<HashMap<String, BenchmarkComparison>, Error> {
247        let current_points: Vec<String> = current_series
248            .iter()
249            .map(|(point, _run_summary)| point.clone())
250            .collect();
251        let previous_points: Vec<String> = previous_series
252            .iter()
253            .map(|(point, _run_summary)| point.clone())
254            .collect();
255
256        if current_points.is_empty() || previous_points.is_empty() {
257            Err(anyhow!("Can compare only non empty series"))
258        } else if current_points != previous_points {
259            Err(anyhow!(
260                "Can compare series with identical workload points only"
261            ))
262        } else {
263            let mut comparisons = HashMap::new();
264            for i in 0..current_series.len() {
265                let point = current_series[i].0.clone();
266                let comparison = Self::compare_median(
267                    point.as_str(),
268                    current_series[i].1.median_nanos(),
269                    previous_series[i].1.median_nanos(),
270                    threshold,
271                );
272
273                comparisons.insert(point, comparison);
274            }
275            Ok(comparisons)
276        }
277    }
278
279    /// Compare the current result against a previous result.
280    ///
281    /// * `prev_result_string_opt` - a JSON string of the [Summary] of previous run
282    /// * `threshold` - threshold used to determine equality.
283    pub fn analyze(
284        &self,
285        prev_result_string_opt: Option<String>,
286        threshold: f64,
287    ) -> Result<AnalysisResult, Error> {
288        let current_summary = self.summary();
289        let prev_summary = match prev_result_string_opt {
290            None => Summary::new(self.name().clone()),
291            Some(prev_result_string) => {
292                serde_json::from_str::<Summary>(prev_result_string.as_str())?
293            }
294        };
295        if current_summary.name() != prev_summary.name() {
296            Err(anyhow!(
297                "Comparing differently named benchmarks.rs: {} <=> {}",
298                current_summary.name(),
299                prev_summary.name()
300            ))
301        } else {
302            let mut analysis_result = AnalysisResult::new(current_summary.name().clone());
303            for (name, current_series_summary) in current_summary.series() {
304                match prev_summary.series().get(name) {
305                    None => {
306                        analysis_result.add_new(name.clone());
307                    }
308                    Some(prev_series_summary) => {
309                        let comparisons = Self::compare_series(
310                            current_series_summary.runs(),
311                            prev_series_summary.runs(),
312                            threshold,
313                        )?;
314                        analysis_result.add(name.clone(), comparisons);
315                    }
316                }
317            }
318            Ok(analysis_result)
319        }
320    }
321}