iai_callgrind_runner/runner/
bin_bench.rs

1//! The module responsible for running a binary benchmark
2
3mod defaults {
4    use crate::api::Stdin;
5
6    pub const COMPARE_BY_ID: bool = false;
7    pub const ENV_CLEAR: bool = true;
8    pub const STDIN: Stdin = Stdin::Pipe;
9    pub const WORKSPACE_ROOT_ENV: &str = "_WORKSPACE_ROOT";
10}
11
12use std::collections::HashMap;
13use std::ffi::OsString;
14use std::io::ErrorKind::WouldBlock;
15use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, TcpStream, UdpSocket};
16use std::path::{Path, PathBuf};
17use std::sync::mpsc::{self, RecvTimeoutError};
18use std::time::{Duration, Instant};
19use std::{panic, thread};
20
21use anyhow::{anyhow, Context, Result};
22use log::{debug, warn};
23
24use super::common::{Assistant, AssistantKind, Baselines, BenchmarkSummaries, Config, ModulePath};
25use super::format::{BinaryBenchmarkHeader, OutputFormat};
26use super::meta::Metadata;
27use super::summary::{BaselineKind, BaselineName, BenchmarkKind, BenchmarkSummary, SummaryOutput};
28use super::tool::config::ToolConfigs;
29use super::tool::path::{ToolOutputPath, ToolOutputPathKind};
30use super::tool::run::RunOptions;
31use crate::api::{
32    self, BinaryBenchmarkBench, BinaryBenchmarkConfig, BinaryBenchmarkGroups, DelayKind,
33    EntryPoint, Stdin, ValgrindTool,
34};
35use crate::error::Error;
36use crate::runner::format;
37
38#[derive(Debug)]
39struct BaselineBenchmark {
40    baseline_kind: BaselineKind,
41}
42
43/// A `BinBench` represents a single benchmark under the `#[binary_benchmark]` macro
44#[derive(Debug)]
45pub struct BinBench {
46    /// The arguments of `args` attribute as a single string
47    pub args: Option<String>,
48    /// The [`Command`] to execute under valgrind
49    pub command: Command,
50    /// The default [`ValgrindTool`]. If not changed it is `Callgrind`.
51    pub default_tool: ValgrindTool,
52    /// The name of the annotated function
53    pub function_name: String,
54    /// The id of the benchmark as in `#[bench::id]`
55    pub id: Option<String>,
56    /// The [`ModulePath`].
57    pub module_path: ModulePath,
58    /// The [`OutputFormat`]
59    pub output_format: OutputFormat,
60    /// The [`RunOptions`]
61    pub run_options: RunOptions,
62    /// The tool configurations for this benchmark run
63    pub tools: ToolConfigs,
64}
65
66/// The Command derived from the `api::Command`
67///
68/// If the path is relative we convert it to an absolute path relative to the workspace root.
69/// `stdin`, `stdout`, `stderr` of the `api::Command` are part of the `RunOptions` and not part of
70/// this `Command`
71#[derive(Debug, Clone)]
72pub struct Command {
73    /// The arguments to pass to the executable
74    pub args: Vec<OsString>,
75    /// The path to the executable
76    pub path: PathBuf,
77}
78
79/// The `Delay` which should be applied to the [`Command`]
80#[derive(Debug, Clone, PartialEq, Eq)]
81pub struct Delay {
82    /// The kind of delay
83    pub kind: DelayKind,
84    /// The polling time to check the delay condition
85    pub poll: Duration,
86    /// The timeout for the delay
87    pub timeout: Duration,
88}
89
90#[derive(Debug)]
91struct Group {
92    benches: Vec<BinBench>,
93    compare_by_id: bool,
94    /// The module path so far which should be `file_name::group_name`
95    module_path: ModulePath,
96    /// This name is the name from the `library_benchmark_group!` macro
97    ///
98    /// Due to the way we expand the `library_benchmark_group!` macro, we can safely assume that
99    /// this name is unique.
100    name: String,
101    setup: Option<Assistant>,
102    teardown: Option<Assistant>,
103}
104
105#[derive(Debug)]
106struct Groups(Vec<Group>);
107
108#[derive(Debug)]
109struct LoadBaselineBenchmark {
110    baseline: BaselineName,
111    loaded_baseline: BaselineName,
112}
113
114#[derive(Debug)]
115struct Runner {
116    benchmark: Box<dyn Benchmark>,
117    config: Config,
118    groups: Groups,
119    setup: Option<Assistant>,
120    teardown: Option<Assistant>,
121}
122
123#[derive(Debug)]
124struct SaveBaselineBenchmark {
125    baseline: BaselineName,
126}
127
128trait Benchmark: std::fmt::Debug {
129    fn baselines(&self) -> Baselines;
130    fn output_path(&self, bin_bench: &BinBench, config: &Config, group: &Group) -> ToolOutputPath;
131    fn run(&self, bin_bench: &BinBench, config: &Config, group: &Group)
132        -> Result<BenchmarkSummary>;
133}
134
135impl Benchmark for BaselineBenchmark {
136    fn output_path(&self, bin_bench: &BinBench, config: &Config, group: &Group) -> ToolOutputPath {
137        let kind = if bin_bench.default_tool.has_output_file() {
138            ToolOutputPathKind::Out
139        } else {
140            ToolOutputPathKind::Log
141        };
142        ToolOutputPath::new(
143            kind,
144            bin_bench.default_tool,
145            &self.baseline_kind,
146            &config.meta.target_dir,
147            &group.module_path,
148            &bin_bench.name(),
149        )
150    }
151
152    fn baselines(&self) -> Baselines {
153        match &self.baseline_kind {
154            BaselineKind::Old => (None, None),
155            BaselineKind::Name(name) => (None, Some(name.to_string())),
156        }
157    }
158
159    fn run(
160        &self,
161        bin_bench: &BinBench,
162        config: &Config,
163        group: &Group,
164    ) -> Result<BenchmarkSummary> {
165        let header = BinaryBenchmarkHeader::new(&config.meta, bin_bench);
166        header.print();
167
168        let out_path = self.output_path(bin_bench, config, group);
169        out_path.init()?;
170
171        for path in bin_bench.tools.output_paths(&out_path) {
172            path.shift()?;
173            if path.kind == ToolOutputPathKind::Out {
174                path.to_log_output().shift()?;
175            }
176            if let Some(path) = path.to_xtree_output() {
177                path.shift()?;
178            }
179            if let Some(path) = path.to_xleak_output() {
180                path.shift()?;
181            }
182        }
183
184        let benchmark_summary = bin_bench.create_benchmark_summary(
185            config,
186            &out_path,
187            &bin_bench.function_name,
188            header.description(),
189            self.baselines(),
190        )?;
191
192        bin_bench.tools.run(
193            &header.to_title(),
194            benchmark_summary,
195            &self.baselines(),
196            &self.baseline_kind,
197            config,
198            &bin_bench.command.path,
199            &bin_bench.command.args,
200            &bin_bench.run_options,
201            &out_path,
202            false,
203            &bin_bench.module_path,
204            &bin_bench.output_format,
205        )
206    }
207}
208
209impl BinBench {
210    #[allow(clippy::too_many_lines)]
211    fn new(
212        meta: &Metadata,
213        group: &Group,
214        config: BinaryBenchmarkConfig,
215        group_index: usize,
216        bench_index: usize,
217        binary_benchmark_bench: BinaryBenchmarkBench,
218        default_tool: ValgrindTool,
219    ) -> Result<Self> {
220        let module_path = group
221            .module_path
222            .join(&binary_benchmark_bench.function_name);
223
224        let default_tool = meta
225            .args
226            .default_tool
227            .unwrap_or_else(|| config.default_tool.unwrap_or(default_tool));
228
229        let api::Command {
230            path,
231            args,
232            stdin,
233            stdout,
234            stderr,
235            delay,
236            ..
237        } = binary_benchmark_bench.command;
238
239        let command = Command::new(&module_path, path, args).map_err(|error| {
240            Error::ConfigurationError(
241                module_path.clone(),
242                binary_benchmark_bench.id.clone(),
243                error.to_string(),
244            )
245        })?;
246
247        let mut assistant_envs = config.collect_envs();
248        assistant_envs.push((
249            OsString::from(defaults::WORKSPACE_ROOT_ENV),
250            meta.project_root.clone().into(),
251        ));
252
253        let command_envs = config.resolve_envs();
254
255        let mut output_format = config
256            .output_format
257            .map_or_else(OutputFormat::default, Into::into);
258        output_format.kind = meta.args.output_format;
259
260        let tool_configs = ToolConfigs::new(
261            &mut output_format,
262            config.tools,
263            &module_path,
264            binary_benchmark_bench.id.as_ref(),
265            meta,
266            default_tool,
267            &EntryPoint::None,
268            &config.valgrind_args,
269            &HashMap::default(),
270        )
271        .map_err(|error| {
272            Error::ConfigurationError(
273                module_path.clone(),
274                binary_benchmark_bench.id.clone(),
275                error.to_string(),
276            )
277        })?;
278
279        let setup = binary_benchmark_bench
280            .has_setup
281            .then_some(Assistant::new_bench_assistant(
282                AssistantKind::Setup,
283                &group.name,
284                (group_index, bench_index),
285                stdin.as_ref().and_then(|s| {
286                    if let Stdin::Setup(p) = s {
287                        Some(*p)
288                    } else {
289                        None
290                    }
291                }),
292                assistant_envs.clone(),
293                config.setup_parallel.unwrap_or(false),
294            ));
295        let teardown =
296            binary_benchmark_bench
297                .has_teardown
298                .then_some(Assistant::new_bench_assistant(
299                    AssistantKind::Teardown,
300                    &group.name,
301                    (group_index, bench_index),
302                    None,
303                    assistant_envs,
304                    false,
305                ));
306
307        Ok(Self {
308            id: binary_benchmark_bench.id,
309            args: binary_benchmark_bench.args,
310            function_name: binary_benchmark_bench.function_name,
311            tools: tool_configs,
312            run_options: RunOptions {
313                env_clear: config.env_clear.unwrap_or(defaults::ENV_CLEAR),
314                envs: command_envs,
315                stdin: stdin.or(Some(defaults::STDIN)),
316                stdout,
317                stderr,
318                exit_with: config.exit_with,
319                current_dir: config.current_dir,
320                setup,
321                teardown,
322                sandbox: config.sandbox,
323                delay: delay.map(Into::into),
324            },
325            module_path,
326            command,
327            output_format,
328            default_tool,
329        })
330    }
331
332    fn name(&self) -> String {
333        if let Some(bench_id) = &self.id {
334            format!("{}.{}", self.function_name, bench_id)
335        } else {
336            self.function_name.clone()
337        }
338    }
339
340    fn create_benchmark_summary(
341        &self,
342        config: &Config,
343        output_path: &ToolOutputPath,
344        function_name: &str,
345        description: Option<String>,
346        baselines: Baselines,
347    ) -> Result<BenchmarkSummary> {
348        let summary_output = if let Some(format) = config.meta.args.save_summary {
349            let output = SummaryOutput::new(format, &output_path.dir);
350            output.init()?;
351            Some(output)
352        } else {
353            None
354        };
355
356        Ok(BenchmarkSummary::new(
357            BenchmarkKind::BinaryBenchmark,
358            config.meta.project_root.clone(),
359            config.package_dir.clone(),
360            config.bench_file.clone(),
361            self.command.path.clone(),
362            &self.module_path,
363            function_name,
364            self.id.clone(),
365            description,
366            summary_output,
367            baselines,
368        ))
369    }
370}
371
372impl Command {
373    fn new(module_path: &ModulePath, path: PathBuf, args: Vec<OsString>) -> Result<Self> {
374        if path.as_os_str().is_empty() {
375            return Err(anyhow!("{module_path}: Empty path in command",));
376        }
377
378        Ok(Self { args, path })
379    }
380}
381
382impl Delay {
383    /// Create a new `Delay`
384    pub fn new(poll: Duration, timeout: Duration, kind: DelayKind) -> Self {
385        Self {
386            kind,
387            poll,
388            timeout,
389        }
390    }
391
392    /// Apply the `Delay`
393    pub fn run(&self) -> Result<()> {
394        if let DelayKind::DurationElapse(_) = self.kind {
395            self.exec_delay_fn()
396        } else {
397            let (tx, rx) = mpsc::channel::<std::result::Result<(), anyhow::Error>>();
398
399            let delay = self.clone();
400            let handle = thread::spawn(move || {
401                tx.send(delay.exec_delay_fn()).map_err(|error| {
402                    anyhow!("Command::Delay MPSC channel send error. Error: {error:?}")
403                })
404            });
405
406            match rx.recv_timeout(self.timeout) {
407                Ok(result) => {
408                    // These unwraps are safe
409                    handle.join().unwrap().unwrap();
410                    result.map(|()| debug!("Command::Delay successfully executed."))
411                }
412                Err(RecvTimeoutError::Timeout) => {
413                    Err(anyhow!("Timeout of '{:?}' reached", self.timeout))
414                }
415                Err(RecvTimeoutError::Disconnected) => {
416                    // The disconnect is caused by a panic in the thread, so the `unwrap_err` is
417                    // safe. We propagate the panic as is.
418                    panic::resume_unwind(handle.join().unwrap_err())
419                }
420            }
421        }
422    }
423
424    fn exec_delay_fn(&self) -> Result<()> {
425        match &self.kind {
426            DelayKind::DurationElapse(duration) => {
427                thread::sleep(*duration);
428            }
429            DelayKind::TcpConnect(addr) => {
430                while let Err(_err) = TcpStream::connect(addr) {
431                    thread::sleep(self.poll);
432                }
433            }
434            DelayKind::UdpResponse(remote, req) => {
435                let socket = match remote {
436                    SocketAddr::V4(_) => {
437                        UdpSocket::bind(SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 0))
438                            .context("Could not bind local IPv4 UDP socket.")?
439                    }
440                    SocketAddr::V6(_) => {
441                        UdpSocket::bind(SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 0))
442                            .context("Could not bind local IPv6 UDP socket.")?
443                    }
444                };
445
446                socket.set_read_timeout(Some(self.poll))?;
447                socket.set_write_timeout(Some(self.poll))?;
448
449                loop {
450                    while let Err(_err) = socket.send_to(req.as_slice(), remote) {
451                        thread::sleep(self.poll);
452                    }
453
454                    let mut buf = [0; 1];
455                    match socket.recv(&mut buf) {
456                        Ok(_size) => break,
457                        Err(e) => {
458                            if e.kind() != WouldBlock {
459                                thread::sleep(self.poll);
460                            }
461                        }
462                    }
463                }
464            }
465            DelayKind::PathExists(path) => {
466                let wait_for_path = std::path::PathBuf::from(Path::new(path));
467                while !wait_for_path.exists() {
468                    thread::sleep(self.poll);
469                }
470            }
471        }
472
473        Ok(())
474    }
475}
476
477impl From<api::Delay> for Delay {
478    fn from(value: api::Delay) -> Self {
479        let (poll, timeout) = if let DelayKind::DurationElapse(_) = value.kind {
480            if value.poll.is_some() {
481                warn!("Ignoring poll setting. Not supported for {:?}", value.kind);
482            }
483            if value.timeout.is_some() {
484                warn!(
485                    "Ignoring timeout setting. Not supported for {:?}",
486                    value.kind
487                );
488            }
489            (Duration::ZERO, Duration::ZERO)
490        } else {
491            let mut poll = value.poll.unwrap_or_else(|| Duration::from_millis(10));
492            let timeout = value.timeout.map_or_else(
493                || Duration::from_secs(600),
494                |t| {
495                    if t < Duration::from_millis(10) {
496                        warn!("The minimum timeout setting is 10ms");
497                        Duration::from_millis(10)
498                    } else {
499                        t
500                    }
501                },
502            );
503
504            if poll >= timeout {
505                warn!(
506                    "Poll duration is equal to or greater than the timeout duration ({poll:?} >= \
507                     {timeout:?})."
508                );
509                poll = timeout - Duration::from_millis(5);
510                warn!("Using poll duration {poll:?} instead");
511            }
512            (poll, timeout)
513        };
514
515        Self {
516            poll,
517            timeout,
518            kind: value.kind,
519        }
520    }
521}
522
523impl Group {
524    fn run(&self, benchmark: &dyn Benchmark, config: &Config) -> Result<BenchmarkSummaries> {
525        let mut benchmark_summaries = BenchmarkSummaries::default();
526
527        let mut summaries: HashMap<String, Vec<BenchmarkSummary>> =
528            HashMap::with_capacity(self.benches.len());
529        for bench in &self.benches {
530            let fail_fast = bench
531                .tools
532                .0
533                .iter()
534                .any(|c| c.regression_config.is_fail_fast());
535
536            let summary = benchmark.run(bench, config, self)?;
537            summary.print_and_save(&config.meta.args.output_format)?;
538            summary.check_regression(fail_fast)?;
539
540            benchmark_summaries.add_summary(summary.clone());
541            if self.compare_by_id && bench.output_format.is_default() {
542                if let Some(id) = &summary.id {
543                    if let Some(sums) = summaries.get_mut(id) {
544                        for sum in sums.iter() {
545                            sum.compare_and_print(id, &summary, &bench.output_format)?;
546                        }
547                        sums.push(summary);
548                    } else {
549                        summaries.insert(id.clone(), vec![summary]);
550                    }
551                }
552            }
553        }
554
555        Ok(benchmark_summaries)
556    }
557}
558
559impl Groups {
560    fn from_binary_benchmark(
561        module: &ModulePath,
562        benchmark_groups: BinaryBenchmarkGroups,
563        meta: &Metadata,
564    ) -> Result<Self> {
565        let global_config = benchmark_groups.config;
566        let default_tool = benchmark_groups.default_tool;
567
568        let mut groups = vec![];
569        for binary_benchmark_group in benchmark_groups.groups {
570            let group_module_path = module.join(&binary_benchmark_group.id);
571            let group_config = global_config
572                .clone()
573                .update_from_all([binary_benchmark_group.config.as_ref()]);
574
575            let setup = binary_benchmark_group
576                .has_setup
577                .then_some(Assistant::new_group_assistant(
578                    AssistantKind::Setup,
579                    &binary_benchmark_group.id,
580                    group_config.collect_envs(),
581                    false,
582                ));
583            let teardown =
584                binary_benchmark_group
585                    .has_teardown
586                    .then_some(Assistant::new_group_assistant(
587                        AssistantKind::Teardown,
588                        &binary_benchmark_group.id,
589                        group_config.collect_envs(),
590                        false,
591                    ));
592
593            let mut group = Group {
594                name: binary_benchmark_group.id,
595                module_path: group_module_path,
596                benches: vec![],
597                setup,
598                teardown,
599                compare_by_id: binary_benchmark_group
600                    .compare_by_id
601                    .unwrap_or(defaults::COMPARE_BY_ID),
602            };
603
604            for (group_index, binary_benchmark_benches) in binary_benchmark_group
605                .binary_benchmarks
606                .into_iter()
607                .enumerate()
608            {
609                for (bench_index, binary_benchmark_bench) in
610                    binary_benchmark_benches.benches.into_iter().enumerate()
611                {
612                    let config = group_config.clone().update_from_all([
613                        binary_benchmark_benches.config.as_ref(),
614                        binary_benchmark_bench.config.as_ref(),
615                        Some(&binary_benchmark_bench.command.config),
616                    ]);
617
618                    let bin_bench = BinBench::new(
619                        meta,
620                        &group,
621                        config,
622                        group_index,
623                        bench_index,
624                        binary_benchmark_bench,
625                        default_tool,
626                    )?;
627                    group.benches.push(bin_bench);
628                }
629            }
630
631            groups.push(group);
632        }
633        Ok(Self(groups))
634    }
635
636    /// Run all [`Group`] benchmarks
637    ///
638    /// # Errors
639    ///
640    /// Return an [`anyhow::Error`] with sources:
641    ///
642    /// * [`Error::RegressionError`] if a regression occurred.
643    fn run(&self, benchmark: &dyn Benchmark, config: &Config) -> Result<BenchmarkSummaries> {
644        let mut benchmark_summaries = BenchmarkSummaries::default();
645        for group in &self.0 {
646            if let Some(setup) = &group.setup {
647                setup.run(config, &group.module_path)?;
648            }
649
650            let summaries = group.run(benchmark, config)?;
651
652            if let Some(teardown) = &group.teardown {
653                teardown.run(config, &group.module_path)?;
654            }
655
656            benchmark_summaries.add_other(summaries);
657        }
658
659        Ok(benchmark_summaries)
660    }
661}
662
663impl Benchmark for LoadBaselineBenchmark {
664    fn output_path(&self, bin_bench: &BinBench, config: &Config, group: &Group) -> ToolOutputPath {
665        let kind = if bin_bench.default_tool.has_output_file() {
666            ToolOutputPathKind::BaseOut(self.loaded_baseline.to_string())
667        } else {
668            ToolOutputPathKind::BaseLog(self.loaded_baseline.to_string())
669        };
670        ToolOutputPath::new(
671            kind,
672            bin_bench.default_tool,
673            &BaselineKind::Name(self.baseline.clone()),
674            &config.meta.target_dir,
675            &group.module_path,
676            &bin_bench.name(),
677        )
678    }
679
680    fn baselines(&self) -> Baselines {
681        (
682            Some(self.loaded_baseline.to_string()),
683            Some(self.baseline.to_string()),
684        )
685    }
686
687    fn run(
688        &self,
689        bin_bench: &BinBench,
690        config: &Config,
691        group: &Group,
692    ) -> Result<BenchmarkSummary> {
693        let header = BinaryBenchmarkHeader::new(&config.meta, bin_bench);
694        header.print();
695
696        let out_path = self.output_path(bin_bench, config, group);
697        let benchmark_summary = bin_bench.create_benchmark_summary(
698            config,
699            &out_path,
700            &bin_bench.function_name,
701            header.description(),
702            self.baselines(),
703        )?;
704
705        bin_bench.tools.run_loaded_vs_base(
706            &header.to_title(),
707            &self.baseline,
708            &self.loaded_baseline,
709            benchmark_summary,
710            &self.baselines(),
711            config,
712            &out_path,
713            &bin_bench.output_format,
714        )
715    }
716}
717
718impl Runner {
719    fn new(benchmark_groups: BinaryBenchmarkGroups, config: Config) -> Result<Self> {
720        let setup = benchmark_groups
721            .has_setup
722            .then_some(Assistant::new_main_assistant(
723                AssistantKind::Setup,
724                benchmark_groups.config.collect_envs(),
725                false,
726            ));
727        let teardown = benchmark_groups
728            .has_teardown
729            .then_some(Assistant::new_main_assistant(
730                AssistantKind::Teardown,
731                benchmark_groups.config.collect_envs(),
732                false,
733            ));
734
735        let groups =
736            Groups::from_binary_benchmark(&config.module_path, benchmark_groups, &config.meta)?;
737
738        let benchmark: Box<dyn Benchmark> =
739            if let Some(baseline_name) = &config.meta.args.save_baseline {
740                Box::new(SaveBaselineBenchmark {
741                    baseline: baseline_name.clone(),
742                })
743            } else if let Some(baseline_name) = &config.meta.args.load_baseline {
744                Box::new(LoadBaselineBenchmark {
745                    loaded_baseline: baseline_name.clone(),
746                    baseline: config
747                        .meta
748                        .args
749                        .baseline
750                        .as_ref()
751                        .expect("A baseline should be present")
752                        .clone(),
753                })
754            } else {
755                Box::new(BaselineBenchmark {
756                    baseline_kind: config
757                        .meta
758                        .args
759                        .baseline
760                        .as_ref()
761                        .map_or(BaselineKind::Old, |name| BaselineKind::Name(name.clone())),
762                })
763            };
764
765        Ok(Self {
766            benchmark,
767            config,
768            groups,
769            setup,
770            teardown,
771        })
772    }
773
774    fn run(&self) -> Result<BenchmarkSummaries> {
775        if let Some(setup) = &self.setup {
776            setup.run(&self.config, &self.config.module_path)?;
777        }
778
779        let summaries = self.groups.run(self.benchmark.as_ref(), &self.config)?;
780
781        if let Some(teardown) = &self.teardown {
782            teardown.run(&self.config, &self.config.module_path)?;
783        }
784
785        Ok(summaries)
786    }
787}
788
789impl Benchmark for SaveBaselineBenchmark {
790    fn output_path(&self, bin_bench: &BinBench, config: &Config, group: &Group) -> ToolOutputPath {
791        let kind = if bin_bench.default_tool.has_output_file() {
792            ToolOutputPathKind::BaseOut(self.baseline.to_string())
793        } else {
794            ToolOutputPathKind::BaseLog(self.baseline.to_string())
795        };
796        ToolOutputPath::new(
797            kind,
798            bin_bench.default_tool,
799            &BaselineKind::Name(self.baseline.clone()),
800            &config.meta.target_dir,
801            &group.module_path,
802            &bin_bench.name(),
803        )
804    }
805
806    fn baselines(&self) -> Baselines {
807        (
808            Some(self.baseline.to_string()),
809            Some(self.baseline.to_string()),
810        )
811    }
812
813    fn run(
814        &self,
815        bin_bench: &BinBench,
816        config: &Config,
817        group: &Group,
818    ) -> Result<BenchmarkSummary> {
819        let header = BinaryBenchmarkHeader::new(&config.meta, bin_bench);
820        header.print();
821
822        let out_path = self.output_path(bin_bench, config, group);
823        out_path.init()?;
824
825        let benchmark_summary = bin_bench.create_benchmark_summary(
826            config,
827            &out_path,
828            &bin_bench.function_name,
829            header.description(),
830            self.baselines(),
831        )?;
832
833        bin_bench.tools.run(
834            &header.to_title(),
835            benchmark_summary,
836            &self.baselines(),
837            &BaselineKind::Name(self.baseline.clone()),
838            config,
839            &bin_bench.command.path,
840            &bin_bench.command.args,
841            &bin_bench.run_options,
842            &out_path,
843            true,
844            &bin_bench.module_path,
845            &bin_bench.output_format,
846        )
847    }
848}
849
850/// Print a list of all benchmarks with a short summary
851pub fn list(benchmark_groups: BinaryBenchmarkGroups, config: &Config) -> Result<()> {
852    let groups =
853        Groups::from_binary_benchmark(&config.module_path, benchmark_groups, &config.meta)?;
854
855    let mut sum = 0u64;
856    for group in groups.0 {
857        for bench in group.benches {
858            sum += 1;
859            format::print_list_benchmark(&bench.module_path, bench.id.as_ref());
860        }
861    }
862
863    format::print_benchmark_list_summary(sum);
864
865    Ok(())
866}
867
868/// The top-level method which should be used to initiate running all benchmarks
869pub fn run(benchmark_groups: BinaryBenchmarkGroups, config: Config) -> Result<BenchmarkSummaries> {
870    let runner = Runner::new(benchmark_groups, config)?;
871
872    let start = Instant::now();
873    let mut summaries = runner.run()?;
874    summaries.elapsed(start);
875
876    Ok(summaries)
877}
878
879#[cfg(test)]
880mod tests {
881    use std::fs::File;
882    use std::net::TcpListener;
883
884    use pretty_assertions::assert_eq;
885    use rstest::rstest;
886    use tempfile::tempdir;
887
888    use super::*;
889
890    fn api_delay_fixture<T, U>(poll: T, timeout: U, kind: DelayKind) -> api::Delay
891    where
892        T: Into<Option<u64>>,
893        U: Into<Option<u64>>,
894    {
895        api::Delay {
896            poll: poll.into().map(Duration::from_millis),
897            timeout: timeout.into().map(Duration::from_millis),
898            kind,
899        }
900    }
901
902    #[rstest]
903    #[case::duration_elapse_when_no_poll_no_timeout(
904        api_delay_fixture(None, None, DelayKind::DurationElapse(Duration::from_millis(100))),
905        Duration::ZERO,
906        Duration::ZERO
907    )]
908    #[case::duration_elapse_when_poll_no_timeout(
909        api_delay_fixture(10, None, DelayKind::DurationElapse(Duration::from_millis(100))),
910        Duration::ZERO,
911        Duration::ZERO
912    )]
913    #[case::duration_elapse_when_no_poll_but_timeout(
914        api_delay_fixture(None, 10, DelayKind::DurationElapse(Duration::from_millis(100))),
915        Duration::ZERO,
916        Duration::ZERO
917    )]
918    #[case::duration_elapse_when_poll_and_timeout(
919        api_delay_fixture(10, 100, DelayKind::DurationElapse(Duration::from_millis(100))),
920        Duration::ZERO,
921        Duration::ZERO
922    )]
923    #[case::path_when_no_poll_no_timeout(
924        api_delay_fixture(None, None, DelayKind::PathExists(PathBuf::from("/some/path"))),
925        Duration::from_millis(10),
926        Duration::from_secs(600)
927    )]
928    #[case::path_when_poll_no_timeout(
929        api_delay_fixture(20, None, DelayKind::PathExists(PathBuf::from("/some/path"))),
930        Duration::from_millis(20),
931        Duration::from_secs(600)
932    )]
933    #[case::path_when_no_poll_but_timeout(
934        api_delay_fixture(None, 200, DelayKind::PathExists(PathBuf::from("/some/path"))),
935        Duration::from_millis(10),
936        Duration::from_millis(200)
937    )]
938    #[case::path_when_poll_and_timeout(
939        api_delay_fixture(20, 200, DelayKind::PathExists(PathBuf::from("/some/path"))),
940        Duration::from_millis(20),
941        Duration::from_millis(200)
942    )]
943    #[case::path_when_poll_equal_to_timeout(
944        api_delay_fixture(200, 200, DelayKind::PathExists(PathBuf::from("/some/path"))),
945        Duration::from_millis(195),
946        Duration::from_millis(200)
947    )]
948    #[case::path_when_poll_higher_than_timeout(
949        api_delay_fixture(201, 200, DelayKind::PathExists(PathBuf::from("/some/path"))),
950        Duration::from_millis(195),
951        Duration::from_millis(200)
952    )]
953    #[case::path_when_poll_equal_to_timeout_smaller_than_10(
954        api_delay_fixture(10, 9, DelayKind::PathExists(PathBuf::from("/some/path"))),
955        Duration::from_millis(5),
956        Duration::from_millis(10)
957    )]
958    #[case::path_when_poll_lower_than_timeout_smaller_than_10(
959        api_delay_fixture(7, 9, DelayKind::PathExists(PathBuf::from("/some/path"))),
960        Duration::from_millis(7),
961        Duration::from_millis(10)
962    )]
963    fn test_from_api_delay_for_delay(
964        #[case] delay: api::Delay,
965        #[case] poll: Duration,
966        #[case] timeout: Duration,
967    ) {
968        let expected = Delay::new(poll, timeout, delay.kind.clone());
969        assert_eq!(Delay::from(delay), expected);
970    }
971
972    #[test]
973    fn test_delay_path() {
974        let dir = tempdir().unwrap();
975        let file_path = dir.path().join("file.pid");
976
977        let delay = Delay {
978            poll: Duration::from_millis(50),
979            timeout: Duration::from_millis(200),
980            kind: DelayKind::PathExists(file_path.clone()),
981        };
982        let handle = thread::spawn(move || {
983            delay.run().unwrap();
984        });
985
986        thread::sleep(Duration::from_millis(100));
987        File::create(file_path).unwrap();
988
989        handle.join().unwrap();
990        drop(dir);
991    }
992
993    #[test]
994    fn test_delay_tcp_connect() {
995        let addr = "127.0.0.1:32000".parse::<SocketAddr>().unwrap();
996        let _listener = TcpListener::bind(addr).unwrap();
997
998        let delay = Delay {
999            poll: Duration::from_millis(20),
1000            timeout: Duration::from_secs(1),
1001            kind: DelayKind::TcpConnect(addr),
1002        };
1003        delay.run().unwrap();
1004    }
1005
1006    #[test]
1007    fn test_delay_tcp_connect_poll() {
1008        let addr = "127.0.0.1:32001".parse::<SocketAddr>().unwrap();
1009
1010        let check_addr = addr;
1011        let handle = thread::spawn(move || {
1012            let delay = Delay {
1013                poll: Duration::from_millis(20),
1014                timeout: Duration::from_secs(1),
1015                kind: DelayKind::TcpConnect(check_addr),
1016            };
1017            delay.run().unwrap();
1018        });
1019
1020        thread::sleep(Duration::from_millis(100));
1021        let _listener = TcpListener::bind(addr).unwrap();
1022
1023        handle.join().unwrap();
1024    }
1025
1026    #[test]
1027    fn test_delay_tcp_connect_timeout() {
1028        let addr = "127.0.0.1:32002".parse::<SocketAddr>().unwrap();
1029        let delay = Delay {
1030            poll: Duration::from_millis(20),
1031            timeout: Duration::from_secs(1),
1032            kind: DelayKind::TcpConnect(addr),
1033        };
1034
1035        let result = delay.run();
1036        assert!(result.is_err());
1037        assert_eq!(result.unwrap_err().to_string(), "Timeout of '1s' reached");
1038    }
1039
1040    #[test]
1041    fn test_delay_udp_response() {
1042        let addr = "127.0.0.1:34000".parse::<SocketAddr>().unwrap();
1043
1044        thread::spawn(move || -> ! {
1045            let server = UdpSocket::bind(addr).unwrap();
1046            server
1047                .set_read_timeout(Some(Duration::from_millis(100)))
1048                .unwrap();
1049            server
1050                .set_write_timeout(Some(Duration::from_millis(100)))
1051                .unwrap();
1052
1053            loop {
1054                let mut buf = [0; 1];
1055
1056                match server.recv_from(&mut buf) {
1057                    Ok((_size, from)) => {
1058                        server.send_to(&[2], from).unwrap();
1059                    }
1060                    Err(_e) => {}
1061                }
1062            }
1063        });
1064
1065        let delay = Delay {
1066            poll: Duration::from_millis(20),
1067            timeout: Duration::from_millis(100),
1068            kind: DelayKind::UdpResponse(addr, vec![1]),
1069        };
1070
1071        delay.run().unwrap();
1072    }
1073
1074    #[test]
1075    fn test_delay_udp_response_poll() {
1076        let addr = "127.0.0.1:34001".parse::<SocketAddr>().unwrap();
1077
1078        thread::spawn(move || {
1079            let delay = Delay {
1080                poll: Duration::from_millis(20),
1081                timeout: Duration::from_millis(100),
1082                kind: DelayKind::UdpResponse(addr, vec![1]),
1083            };
1084            delay.run().unwrap();
1085        });
1086
1087        let server = UdpSocket::bind(addr).unwrap();
1088        server
1089            .set_read_timeout(Some(Duration::from_millis(100)))
1090            .unwrap();
1091        server
1092            .set_write_timeout(Some(Duration::from_millis(100)))
1093            .unwrap();
1094
1095        loop {
1096            let mut buf = [0; 1];
1097
1098            thread::sleep(Duration::from_millis(70));
1099
1100            match server.recv_from(&mut buf) {
1101                Ok((_size, from)) => {
1102                    server.send_to(&[2], from).unwrap();
1103                    break;
1104                }
1105                Err(_e) => {}
1106            }
1107        }
1108    }
1109
1110    #[test]
1111    fn test_delay_udp_response_timeout() {
1112        let addr = "127.0.0.1:34002".parse::<SocketAddr>().unwrap();
1113        let delay = Delay {
1114            poll: Duration::from_millis(20),
1115            timeout: Duration::from_millis(100),
1116            kind: DelayKind::UdpResponse(addr, vec![1]),
1117        };
1118        let result = delay.run();
1119        assert!(result.is_err());
1120        assert_eq!(
1121            result.unwrap_err().to_string(),
1122            "Timeout of '100ms' reached"
1123        );
1124    }
1125}