1use std::collections::HashMap;
2use std::ffi::OsString;
3use std::io::stderr;
4use std::io::ErrorKind::WouldBlock;
5use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, TcpStream, UdpSocket};
6use std::path::{Path, PathBuf};
7use std::sync::mpsc::{self, RecvTimeoutError};
8use std::time::Duration;
9use std::{panic, thread};
10
11use anyhow::{anyhow, Context, Result};
12use log::{debug, warn};
13
14use super::args::NoCapture;
15use super::callgrind::args::Args;
16use super::callgrind::flamegraph::{
17 BaselineFlamegraphGenerator, Config as FlamegraphConfig, Flamegraph, FlamegraphGenerator,
18 LoadBaselineFlamegraphGenerator, SaveBaselineFlamegraphGenerator,
19};
20use super::callgrind::parser::CallgrindParser;
21use super::callgrind::summary_parser::SummaryParser;
22use super::callgrind::{RegressionConfig, Summaries};
23use super::common::{Assistant, AssistantKind, Config, ModulePath, Sandbox};
24use super::format::{BinaryBenchmarkHeader, Formatter, OutputFormat, VerticalFormatter};
25use super::meta::Metadata;
26use super::summary::{
27 BaselineKind, BaselineName, BenchmarkKind, BenchmarkSummary, CallgrindSummary, MetricsSummary,
28 SummaryOutput, ToolRun,
29};
30use super::tool::{
31 RunOptions, ToolCommand, ToolConfig, ToolConfigs, ToolOutputPath, ToolOutputPathKind,
32 ValgrindTool,
33};
34use crate::api::{
35 self, BinaryBenchmarkBench, BinaryBenchmarkConfig, BinaryBenchmarkGroups, DelayKind, Stdin,
36};
37use crate::error::Error;
38use crate::runner::format;
39
40mod defaults {
41 use crate::api::Stdin;
42
43 pub const COMPARE_BY_ID: bool = false;
44 pub const ENV_CLEAR: bool = true;
45 pub const REGRESSION_FAIL_FAST: bool = false;
46 pub const STDIN: Stdin = Stdin::Pipe;
47 pub const WORKSPACE_ROOT_ENV: &str = "_WORKSPACE_ROOT";
48}
49
50#[derive(Debug)]
51struct BaselineBenchmark {
52 baseline_kind: BaselineKind,
53}
54
55#[derive(Debug)]
56pub struct BinBench {
57 pub id: Option<String>,
58 pub args: Option<String>,
59 pub function_name: String,
60 pub command: Command,
61 pub run_options: RunOptions,
62 pub callgrind_args: Args,
63 pub flamegraph_config: Option<FlamegraphConfig>,
64 pub regression_config: Option<RegressionConfig>,
65 pub tools: ToolConfigs,
66 pub setup: Option<Assistant>,
67 pub teardown: Option<Assistant>,
68 pub sandbox: Option<api::Sandbox>,
69 pub module_path: ModulePath,
70 pub output_format: OutputFormat,
71}
72
73#[derive(Debug, Clone)]
79pub struct Command {
80 pub path: PathBuf,
81 pub args: Vec<OsString>,
82 pub delay: Option<Delay>,
83}
84
85#[derive(Debug, Clone, PartialEq, Eq)]
86pub struct Delay {
87 pub poll: Duration,
88 pub timeout: Duration,
89 pub kind: DelayKind,
90}
91
92#[derive(Debug)]
93struct Group {
94 name: String,
99 module_path: ModulePath,
101 benches: Vec<BinBench>,
102 setup: Option<Assistant>,
103 teardown: Option<Assistant>,
104 compare_by_id: bool,
105}
106
107#[derive(Debug)]
108struct Groups(Vec<Group>);
109
110#[derive(Debug)]
111struct LoadBaselineBenchmark {
112 loaded_baseline: BaselineName,
113 baseline: BaselineName,
114}
115
116#[derive(Debug)]
117struct Runner {
118 groups: Groups,
119 config: Config,
120 benchmark: Box<dyn Benchmark>,
121 setup: Option<Assistant>,
122 teardown: Option<Assistant>,
123}
124
125#[derive(Debug)]
126struct SaveBaselineBenchmark {
127 baseline: BaselineName,
128}
129
130trait Benchmark: std::fmt::Debug {
131 fn output_path(&self, bin_bench: &BinBench, config: &Config, group: &Group) -> ToolOutputPath;
132 fn baselines(&self) -> (Option<String>, Option<String>);
133 fn run(&self, bin_bench: &BinBench, config: &Config, group: &Group)
134 -> Result<BenchmarkSummary>;
135}
136
137impl Benchmark for BaselineBenchmark {
138 fn output_path(&self, bin_bench: &BinBench, config: &Config, group: &Group) -> ToolOutputPath {
139 ToolOutputPath::new(
140 ToolOutputPathKind::Out,
141 ValgrindTool::Callgrind,
142 &self.baseline_kind,
143 &config.meta.target_dir,
144 &group.module_path,
145 &bin_bench.name(),
146 )
147 }
148
149 fn baselines(&self) -> (Option<String>, Option<String>) {
150 match &self.baseline_kind {
151 BaselineKind::Old => (None, None),
152 BaselineKind::Name(name) => (None, Some(name.to_string())),
153 }
154 }
155
156 fn run(
157 &self,
158 bin_bench: &BinBench,
159 config: &Config,
160 group: &Group,
161 ) -> Result<BenchmarkSummary> {
162 let header = BinaryBenchmarkHeader::new(&config.meta, bin_bench);
163 header.print();
164
165 let callgrind_command = ToolCommand::new(
166 ValgrindTool::Callgrind,
167 &config.meta,
168 config.meta.args.nocapture,
169 );
170
171 let tool_config = ToolConfig::new(
172 ValgrindTool::Callgrind,
173 true,
174 bin_bench.callgrind_args.clone(),
175 None,
176 );
177
178 let out_path = self.output_path(bin_bench, config, group);
179 out_path.init()?;
180 out_path.shift()?;
181
182 let old_path = out_path.to_base_path();
183 let log_path = out_path.to_log_output();
184 log_path.shift()?;
185
186 for path in bin_bench.tools.output_paths(&out_path) {
187 path.shift()?;
188 path.to_log_output().shift()?;
189 }
190
191 let mut benchmark_summary = bin_bench.create_benchmark_summary(
192 config,
193 &out_path,
194 &bin_bench.function_name,
195 header.description(),
196 )?;
197
198 let sandbox = bin_bench
202 .sandbox
203 .as_ref()
204 .map(|sandbox| Sandbox::setup(sandbox, &config.meta))
205 .transpose()?;
206
207 let mut child = bin_bench
208 .setup
209 .as_ref()
210 .map_or(Ok(None), |setup| setup.run(config, &bin_bench.module_path))?;
211
212 if let Some(delay) = &bin_bench.command.delay {
213 if let Err(error) = delay.run() {
214 if let Some(mut child) = child.take() {
215 child.kill()?;
217 return Err(error);
218 }
219 }
220 }
221
222 let output = callgrind_command.run(
223 tool_config,
224 &bin_bench.command.path,
225 &bin_bench.command.args,
226 bin_bench.run_options.clone(),
227 &out_path,
228 &bin_bench.module_path,
229 child,
230 )?;
231
232 if let Some(teardown) = &bin_bench.teardown {
233 teardown.run(config, &bin_bench.module_path)?;
234 }
235
236 bin_bench.print_nocapture_footer(config.meta.args.nocapture);
239
240 if let Some(sandbox) = sandbox {
241 sandbox.reset()?;
242 }
243
244 let parsed_new = SummaryParser.parse(&out_path)?;
245 let parsed_old = old_path
246 .exists()
247 .then(|| SummaryParser.parse(&old_path))
248 .transpose()?;
249
250 let summaries = Summaries::new(parsed_new, parsed_old);
251 VerticalFormatter::new(bin_bench.output_format).print(
252 config,
253 self.baselines(),
254 &ToolRun::from(&summaries),
255 )?;
256
257 output.dump_log(log::Level::Info);
258 log_path.dump_log(log::Level::Info, &mut stderr())?;
259
260 let regressions = bin_bench.check_and_print_regressions(&summaries.total);
261
262 let callgrind_summary = benchmark_summary
263 .callgrind_summary
264 .insert(CallgrindSummary::new(
265 log_path.real_paths()?,
266 out_path.real_paths()?,
267 ));
268
269 callgrind_summary.add_summaries(
270 &bin_bench.command.path,
271 &bin_bench.command.args,
272 &self.baselines(),
273 summaries,
274 regressions,
275 );
276
277 if let Some(flamegraph_config) = bin_bench.flamegraph_config.clone() {
278 callgrind_summary.flamegraphs = BaselineFlamegraphGenerator {
279 baseline_kind: self.baseline_kind.clone(),
280 }
281 .create(
282 &Flamegraph::new(header.to_title(), flamegraph_config),
283 &out_path,
284 None,
285 &config.meta.project_root,
286 )?;
287 }
288
289 benchmark_summary.tool_summaries = bin_bench.tools.run(
290 config,
291 &bin_bench.command.path,
292 &bin_bench.command.args,
293 &bin_bench.run_options,
294 &out_path,
295 false,
296 &bin_bench.module_path,
297 bin_bench.sandbox.as_ref(),
298 bin_bench.setup.as_ref(),
299 bin_bench.teardown.as_ref(),
300 bin_bench.command.delay.as_ref(),
301 &bin_bench.output_format,
302 )?;
303
304 Ok(benchmark_summary)
305 }
306}
307
308impl BinBench {
309 fn new(
310 meta: &Metadata,
311 group: &Group,
312 config: BinaryBenchmarkConfig,
313 group_index: usize,
314 bench_index: usize,
315 meta_callgrind_args: &api::RawArgs,
316 binary_benchmark_bench: BinaryBenchmarkBench,
317 ) -> Result<Self> {
318 let module_path = group
319 .module_path
320 .join(&binary_benchmark_bench.function_name);
321
322 let api::Command {
323 path,
324 args,
325 stdin,
326 stdout,
327 stderr,
328 delay,
329 ..
330 } = binary_benchmark_bench.command;
331
332 let command = Command::new(&module_path, path, args, delay.map(Into::into))?;
333
334 let callgrind_args = Args::try_from_raw_args(&[
335 &config.valgrind_args,
336 &config.callgrind_args,
337 meta_callgrind_args,
338 ])?;
339
340 let mut assistant_envs = config.collect_envs();
341 assistant_envs.push((
342 OsString::from(defaults::WORKSPACE_ROOT_ENV),
343 meta.project_root.clone().into(),
344 ));
345
346 let command_envs = config.resolve_envs();
347 let flamegraph_config = config.flamegraph_config.map(Into::into);
348 let mut output_format = config
349 .output_format
350 .map_or_else(OutputFormat::default, Into::into);
351 output_format.kind = meta.args.output_format;
352
353 Ok(Self {
354 id: binary_benchmark_bench.id,
355 args: binary_benchmark_bench.args,
356 function_name: binary_benchmark_bench.function_name,
357 callgrind_args,
358 flamegraph_config,
359 regression_config: api::update_option(
360 &config.regression_config,
361 &meta.regression_config,
362 )
363 .map(Into::into),
364 tools: ToolConfigs(
365 config
366 .tools
367 .0
368 .into_iter()
369 .map(|mut t| {
370 if !config.valgrind_args.is_empty() {
371 let mut new_args = config.valgrind_args.clone();
372 new_args.extend_ignore_flag(t.raw_args.0.iter());
373 t.raw_args = new_args;
374 }
375 t.try_into()
376 })
377 .collect::<Result<Vec<_>, _>>()?,
378 ),
379 setup: binary_benchmark_bench
380 .has_setup
381 .then_some(Assistant::new_bench_assistant(
382 AssistantKind::Setup,
383 &group.name,
384 (group_index, bench_index),
385 stdin.as_ref().and_then(|s| {
386 if let Stdin::Setup(p) = s {
387 Some(*p)
388 } else {
389 None
390 }
391 }),
392 assistant_envs.clone(),
393 config.setup_parallel.unwrap_or(false),
394 )),
395 teardown: binary_benchmark_bench.has_teardown.then_some(
396 Assistant::new_bench_assistant(
397 AssistantKind::Teardown,
398 &group.name,
399 (group_index, bench_index),
400 None,
401 assistant_envs,
402 false,
403 ),
404 ),
405 run_options: RunOptions {
406 env_clear: config.env_clear.unwrap_or(defaults::ENV_CLEAR),
407 envs: command_envs,
408 stdin: stdin.or(Some(defaults::STDIN)),
409 stdout,
410 stderr,
411 exit_with: config.exit_with,
412 current_dir: config.current_dir,
413 },
414 sandbox: config.sandbox,
415 module_path,
416 command,
417 output_format,
418 })
419 }
420
421 fn name(&self) -> String {
422 if let Some(bench_id) = &self.id {
423 format!("{}.{}", self.function_name, bench_id)
424 } else {
425 self.function_name.clone()
426 }
427 }
428
429 fn print_nocapture_footer(&self, nocapture: NoCapture) {
430 format::print_no_capture_footer(
431 nocapture,
432 self.run_options.stdout.as_ref(),
433 self.run_options.stderr.as_ref(),
434 );
435 }
436
437 fn create_benchmark_summary(
438 &self,
439 config: &Config,
440 output_path: &ToolOutputPath,
441 function_name: &str,
442 description: Option<String>,
443 ) -> Result<BenchmarkSummary> {
444 let summary_output = if let Some(format) = config.meta.args.save_summary {
445 let output = SummaryOutput::new(format, &output_path.dir);
446 output.init()?;
447 Some(output)
448 } else {
449 None
450 };
451
452 Ok(BenchmarkSummary::new(
453 BenchmarkKind::BinaryBenchmark,
454 config.meta.project_root.clone(),
455 config.package_dir.clone(),
456 config.bench_file.clone(),
457 self.command.path.clone(),
458 &self.module_path,
459 function_name,
460 self.id.clone(),
461 description,
462 summary_output,
463 ))
464 }
465
466 fn check_and_print_regressions(
467 &self,
468 metrics_summary: &MetricsSummary,
469 ) -> Vec<super::summary::CallgrindRegression> {
470 if let Some(regression_config) = &self.regression_config {
471 regression_config.check_and_print(metrics_summary)
472 } else {
473 vec![]
474 }
475 }
476}
477
478impl Command {
479 fn new(
480 module_path: &ModulePath,
481 path: PathBuf,
482 args: Vec<OsString>,
483 delay: Option<Delay>,
484 ) -> Result<Self> {
485 if path.as_os_str().is_empty() {
486 return Err(anyhow!("{module_path}: Empty path in command",));
487 }
488
489 Ok(Self { path, args, delay })
490 }
491}
492
493impl From<api::Delay> for Delay {
494 fn from(value: api::Delay) -> Self {
495 let (poll, timeout) = if let DelayKind::DurationElapse(_) = value.kind {
496 if value.poll.is_some() {
497 warn!("Ignoring poll setting. Not supported for {:?}", value.kind);
498 }
499 if value.timeout.is_some() {
500 warn!(
501 "Ignoring timeout setting. Not supported for {:?}",
502 value.kind
503 );
504 }
505 (Duration::ZERO, Duration::ZERO)
506 } else {
507 let mut poll = value.poll.unwrap_or_else(|| Duration::from_millis(10));
508 let timeout = value.timeout.map_or_else(
509 || Duration::from_secs(600),
510 |t| {
511 if t < Duration::from_millis(10) {
512 warn!("The minimum timeout setting is 10ms");
513 Duration::from_millis(10)
514 } else {
515 t
516 }
517 },
518 );
519
520 if poll >= timeout {
521 warn!(
522 "Poll duration is equal to or greater than the timeout duration ({:?} >= \
523 {:?}).",
524 poll, timeout
525 );
526 poll = timeout - Duration::from_millis(5);
527 warn!("Using poll duration {:?} instead", poll);
528 }
529 (poll, timeout)
530 };
531
532 Self {
533 poll,
534 timeout,
535 kind: value.kind,
536 }
537 }
538}
539
540impl Delay {
541 pub fn new(poll: Duration, timeout: Duration, kind: DelayKind) -> Self {
542 Self {
543 poll,
544 timeout,
545 kind,
546 }
547 }
548
549 pub fn run(&self) -> Result<()> {
550 if let DelayKind::DurationElapse(_) = self.kind {
551 self.exec_delay_fn()
552 } else {
553 let (tx, rx) = mpsc::channel::<std::result::Result<(), anyhow::Error>>();
554
555 let delay = self.clone();
556 let handle = thread::spawn(move || {
557 tx.send(delay.exec_delay_fn()).map_err(|error| {
558 anyhow!("Command::Delay MPSC channel send error. Error: {error:?}")
559 })
560 });
561
562 match rx.recv_timeout(self.timeout) {
563 Ok(result) => {
564 handle.join().unwrap().unwrap();
566 result.map(|()| debug!("Command::Delay successfully executed."))
567 }
568 Err(RecvTimeoutError::Timeout) => {
569 Err(anyhow!("Timeout of '{:?}' reached", self.timeout))
570 }
571 Err(RecvTimeoutError::Disconnected) => {
572 panic::resume_unwind(handle.join().unwrap_err())
575 }
576 }
577 }
578 }
579
580 fn exec_delay_fn(&self) -> Result<()> {
581 match &self.kind {
582 DelayKind::DurationElapse(duration) => {
583 thread::sleep(*duration);
584 }
585 DelayKind::TcpConnect(addr) => {
586 while let Err(_err) = TcpStream::connect(addr) {
587 thread::sleep(self.poll);
588 }
589 }
590 DelayKind::UdpResponse(remote, req) => {
591 let socket = match remote {
592 SocketAddr::V4(_) => {
593 UdpSocket::bind(SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 0))
594 .context("Could not bind local IPv4 UDP socket.")?
595 }
596 SocketAddr::V6(_) => {
597 UdpSocket::bind(SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 0))
598 .context("Could not bind local IPv6 UDP socket.")?
599 }
600 };
601
602 socket.set_read_timeout(Some(self.poll))?;
603 socket.set_write_timeout(Some(self.poll))?;
604
605 loop {
606 while let Err(_err) = socket.send_to(req.as_slice(), remote) {
607 thread::sleep(self.poll);
608 }
609
610 let mut buf = [0; 1];
611 match socket.recv(&mut buf) {
612 Ok(_size) => break,
613 Err(e) => {
614 if e.kind() != WouldBlock {
615 thread::sleep(self.poll);
616 }
617 }
618 }
619 }
620 }
621 DelayKind::PathExists(path) => {
622 let wait_for_path = std::path::PathBuf::from(Path::new(path));
623 while !wait_for_path.exists() {
624 thread::sleep(self.poll);
625 }
626 }
627 }
628
629 Ok(())
630 }
631}
632
633impl Group {
634 fn run(
635 &self,
636 benchmark: &dyn Benchmark,
637 is_regressed: &mut bool,
638 config: &Config,
639 ) -> Result<()> {
640 let mut summaries: HashMap<String, Vec<BenchmarkSummary>> =
641 HashMap::with_capacity(self.benches.len());
642 for bench in &self.benches {
643 let fail_fast = bench
644 .regression_config
645 .as_ref()
646 .map_or(defaults::REGRESSION_FAIL_FAST, |r| r.fail_fast);
647
648 let summary = benchmark.run(bench, config, self)?;
649 summary.print_and_save(&config.meta.args.output_format)?;
650 summary.check_regression(is_regressed, fail_fast)?;
651
652 if self.compare_by_id && bench.output_format.is_default() {
653 if let Some(id) = &summary.id {
654 if let Some(sums) = summaries.get_mut(id) {
655 for sum in sums.iter() {
656 sum.compare_and_print(id, &summary, &bench.output_format)?;
657 }
658 sums.push(summary);
659 } else {
660 summaries.insert(id.clone(), vec![summary]);
661 }
662 }
663 }
664 }
665
666 Ok(())
667 }
668}
669
670impl Groups {
671 fn from_binary_benchmark(
672 module: &ModulePath,
673 benchmark_groups: BinaryBenchmarkGroups,
674 meta: &Metadata,
675 ) -> Result<Self> {
676 let global_config = benchmark_groups.config;
677 let meta_callgrind_args = meta.args.callgrind_args.clone().unwrap_or_default();
678
679 let mut groups = vec![];
680 for binary_benchmark_group in benchmark_groups.groups {
681 let group_module_path = module.join(&binary_benchmark_group.id);
682 let group_config = global_config
683 .clone()
684 .update_from_all([binary_benchmark_group.config.as_ref()]);
685
686 let setup = binary_benchmark_group
687 .has_setup
688 .then_some(Assistant::new_group_assistant(
689 AssistantKind::Setup,
690 &binary_benchmark_group.id,
691 group_config.collect_envs(),
692 false,
693 ));
694 let teardown =
695 binary_benchmark_group
696 .has_teardown
697 .then_some(Assistant::new_group_assistant(
698 AssistantKind::Teardown,
699 &binary_benchmark_group.id,
700 group_config.collect_envs(),
701 false,
702 ));
703
704 let mut group = Group {
705 name: binary_benchmark_group.id,
706 module_path: group_module_path,
707 benches: vec![],
708 setup,
709 teardown,
710 compare_by_id: binary_benchmark_group
711 .compare_by_id
712 .unwrap_or(defaults::COMPARE_BY_ID),
713 };
714
715 for (group_index, binary_benchmark_benches) in binary_benchmark_group
716 .binary_benchmarks
717 .into_iter()
718 .enumerate()
719 {
720 for (bench_index, binary_benchmark_bench) in
721 binary_benchmark_benches.benches.into_iter().enumerate()
722 {
723 let config = group_config.clone().update_from_all([
724 binary_benchmark_benches.config.as_ref(),
725 binary_benchmark_bench.config.as_ref(),
726 Some(&binary_benchmark_bench.command.config),
727 ]);
728
729 let bin_bench = BinBench::new(
730 meta,
731 &group,
732 config,
733 group_index,
734 bench_index,
735 &meta_callgrind_args,
736 binary_benchmark_bench,
737 )?;
738 group.benches.push(bin_bench);
739 }
740 }
741
742 groups.push(group);
743 }
744 Ok(Self(groups))
745 }
746
747 fn run(&self, benchmark: &dyn Benchmark, config: &Config) -> Result<()> {
755 let mut is_regressed = false;
756 for group in &self.0 {
757 if let Some(setup) = &group.setup {
758 setup.run(config, &group.module_path)?;
759 }
760
761 group.run(benchmark, &mut is_regressed, config)?;
762
763 if let Some(teardown) = &group.teardown {
764 teardown.run(config, &group.module_path)?;
765 }
766 }
767
768 if is_regressed {
769 Err(Error::RegressionError(false).into())
770 } else {
771 Ok(())
772 }
773 }
774}
775
776impl Benchmark for LoadBaselineBenchmark {
777 fn output_path(&self, bin_bench: &BinBench, config: &Config, group: &Group) -> ToolOutputPath {
778 ToolOutputPath::new(
779 ToolOutputPathKind::Base(self.loaded_baseline.to_string()),
780 ValgrindTool::Callgrind,
781 &BaselineKind::Name(self.baseline.clone()),
782 &config.meta.target_dir,
783 &group.module_path,
784 &bin_bench.name(),
785 )
786 }
787
788 fn baselines(&self) -> (Option<String>, Option<String>) {
789 (
790 Some(self.loaded_baseline.to_string()),
791 Some(self.baseline.to_string()),
792 )
793 }
794
795 fn run(
796 &self,
797 bin_bench: &BinBench,
798 config: &Config,
799 group: &Group,
800 ) -> Result<BenchmarkSummary> {
801 let header = BinaryBenchmarkHeader::new(&config.meta, bin_bench);
802 header.print();
803
804 let out_path = self.output_path(bin_bench, config, group);
805 let old_path = out_path.to_base_path();
806 let log_path = out_path.to_log_output();
807
808 let mut benchmark_summary = bin_bench.create_benchmark_summary(
809 config,
810 &out_path,
811 &bin_bench.function_name,
812 header.description(),
813 )?;
814
815 let parsed_new = SummaryParser.parse(&out_path)?;
816 let parsed_old = Some(SummaryParser.parse(&old_path)?);
817 let summaries = Summaries::new(parsed_new, parsed_old);
818
819 VerticalFormatter::new(bin_bench.output_format).print(
820 config,
821 self.baselines(),
822 &ToolRun::from(&summaries),
823 )?;
824
825 let regressions = bin_bench.check_and_print_regressions(&summaries.total);
826
827 let callgrind_summary = benchmark_summary
828 .callgrind_summary
829 .insert(CallgrindSummary::new(
830 log_path.real_paths()?,
831 out_path.real_paths()?,
832 ));
833
834 callgrind_summary.add_summaries(
835 &bin_bench.command.path,
836 &bin_bench.command.args,
837 &self.baselines(),
838 summaries,
839 regressions,
840 );
841
842 if let Some(flamegraph_config) = bin_bench.flamegraph_config.clone() {
843 callgrind_summary.flamegraphs = LoadBaselineFlamegraphGenerator {
844 loaded_baseline: self.loaded_baseline.clone(),
845 baseline: self.baseline.clone(),
846 }
847 .create(
848 &Flamegraph::new(header.to_title(), flamegraph_config),
849 &out_path,
850 None,
851 &config.meta.project_root,
852 )?;
853 }
854
855 benchmark_summary.tool_summaries =
856 bin_bench
857 .tools
858 .run_loaded_vs_base(config, &out_path, &bin_bench.output_format)?;
859
860 Ok(benchmark_summary)
861 }
862}
863
864impl Runner {
865 fn new(benchmark_groups: BinaryBenchmarkGroups, config: Config) -> Result<Self> {
866 let setup = benchmark_groups
867 .has_setup
868 .then_some(Assistant::new_main_assistant(
869 AssistantKind::Setup,
870 benchmark_groups.config.collect_envs(),
871 false,
872 ));
873 let teardown = benchmark_groups
874 .has_teardown
875 .then_some(Assistant::new_main_assistant(
876 AssistantKind::Teardown,
877 benchmark_groups.config.collect_envs(),
878 false,
879 ));
880
881 let groups =
882 Groups::from_binary_benchmark(&config.module_path, benchmark_groups, &config.meta)?;
883
884 let benchmark: Box<dyn Benchmark> =
885 if let Some(baseline_name) = &config.meta.args.save_baseline {
886 Box::new(SaveBaselineBenchmark {
887 baseline: baseline_name.clone(),
888 })
889 } else if let Some(baseline_name) = &config.meta.args.load_baseline {
890 Box::new(LoadBaselineBenchmark {
891 loaded_baseline: baseline_name.clone(),
892 baseline: config
893 .meta
894 .args
895 .baseline
896 .as_ref()
897 .expect("A baseline should be present")
898 .clone(),
899 })
900 } else {
901 Box::new(BaselineBenchmark {
902 baseline_kind: config
903 .meta
904 .args
905 .baseline
906 .as_ref()
907 .map_or(BaselineKind::Old, |name| BaselineKind::Name(name.clone())),
908 })
909 };
910
911 Ok(Self {
912 groups,
913 config,
914 benchmark,
915 setup,
916 teardown,
917 })
918 }
919
920 fn run(&self) -> Result<()> {
921 if let Some(setup) = &self.setup {
922 setup.run(&self.config, &self.config.module_path)?;
923 }
924
925 self.groups.run(self.benchmark.as_ref(), &self.config)?;
926
927 if let Some(teardown) = &self.teardown {
928 teardown.run(&self.config, &self.config.module_path)?;
929 }
930 Ok(())
931 }
932}
933
934impl Benchmark for SaveBaselineBenchmark {
935 fn output_path(&self, bin_bench: &BinBench, config: &Config, group: &Group) -> ToolOutputPath {
936 ToolOutputPath::new(
937 ToolOutputPathKind::Base(self.baseline.to_string()),
938 ValgrindTool::Callgrind,
939 &BaselineKind::Name(self.baseline.clone()),
940 &config.meta.target_dir,
941 &group.module_path,
942 &bin_bench.name(),
943 )
944 }
945
946 fn baselines(&self) -> (Option<String>, Option<String>) {
947 (
948 Some(self.baseline.to_string()),
949 Some(self.baseline.to_string()),
950 )
951 }
952
953 fn run(
954 &self,
955 bin_bench: &BinBench,
956 config: &Config,
957 group: &Group,
958 ) -> Result<BenchmarkSummary> {
959 let header = BinaryBenchmarkHeader::new(&config.meta, bin_bench);
960 header.print();
961
962 let callgrind_command = ToolCommand::new(
963 ValgrindTool::Callgrind,
964 &config.meta,
965 config.meta.args.nocapture,
966 );
967
968 let tool_config = ToolConfig::new(
969 ValgrindTool::Callgrind,
970 true,
971 bin_bench.callgrind_args.clone(),
972 None,
973 );
974
975 let out_path = self.output_path(bin_bench, config, group);
976 out_path.init()?;
977
978 let parsed_old = out_path
979 .exists()
980 .then(|| {
981 SummaryParser
982 .parse(&out_path)
983 .and_then(|parsed| out_path.clear().map(|()| parsed))
984 })
985 .transpose()?;
986
987 let log_path = out_path.to_log_output();
988 log_path.clear()?;
989
990 let mut benchmark_summary = bin_bench.create_benchmark_summary(
991 config,
992 &out_path,
993 &bin_bench.function_name,
994 header.description(),
995 )?;
996
997 let sandbox = bin_bench
998 .sandbox
999 .as_ref()
1000 .map(|sandbox| Sandbox::setup(sandbox, &config.meta))
1001 .transpose()?;
1002
1003 let mut child = bin_bench
1004 .setup
1005 .as_ref()
1006 .map_or(Ok(None), |setup| setup.run(config, &bin_bench.module_path))?;
1007
1008 if let Some(delay) = &bin_bench.command.delay {
1009 if let Err(error) = delay.run() {
1010 if let Some(mut child) = child.take() {
1011 child.kill()?;
1013 return Err(error);
1014 }
1015 }
1016 }
1017
1018 let output = callgrind_command.run(
1019 tool_config,
1020 &bin_bench.command.path,
1021 &bin_bench.command.args,
1022 bin_bench.run_options.clone(),
1023 &out_path,
1024 &bin_bench.module_path,
1025 child,
1026 )?;
1027
1028 if let Some(teardown) = &bin_bench.teardown {
1029 teardown.run(config, &bin_bench.module_path)?;
1030 }
1031
1032 bin_bench.print_nocapture_footer(config.meta.args.nocapture);
1033
1034 if let Some(sandbox) = sandbox {
1035 sandbox.reset()?;
1036 }
1037
1038 let parsed_new = SummaryParser.parse(&out_path)?;
1039 let summaries = Summaries::new(parsed_new, parsed_old);
1040 VerticalFormatter::new(bin_bench.output_format).print(
1041 config,
1042 self.baselines(),
1043 &ToolRun::from(&summaries),
1044 )?;
1045
1046 output.dump_log(log::Level::Info);
1047 log_path.dump_log(log::Level::Info, &mut stderr())?;
1048
1049 let regressions = bin_bench.check_and_print_regressions(&summaries.total);
1050
1051 let callgrind_summary = benchmark_summary
1052 .callgrind_summary
1053 .insert(CallgrindSummary::new(
1054 log_path.real_paths()?,
1055 out_path.real_paths()?,
1056 ));
1057
1058 callgrind_summary.add_summaries(
1059 &bin_bench.command.path,
1060 &bin_bench.command.args,
1061 &self.baselines(),
1062 summaries,
1063 regressions,
1064 );
1065
1066 if let Some(flamegraph_config) = bin_bench.flamegraph_config.clone() {
1067 callgrind_summary.flamegraphs = SaveBaselineFlamegraphGenerator {
1068 baseline: self.baseline.clone(),
1069 }
1070 .create(
1071 &Flamegraph::new(header.to_title(), flamegraph_config),
1072 &out_path,
1073 None,
1074 &config.meta.project_root,
1075 )?;
1076 }
1077
1078 benchmark_summary.tool_summaries = bin_bench.tools.run(
1079 config,
1080 &bin_bench.command.path,
1081 &bin_bench.command.args,
1082 &bin_bench.run_options,
1083 &out_path,
1084 true,
1085 &bin_bench.module_path,
1086 bin_bench.sandbox.as_ref(),
1087 bin_bench.setup.as_ref(),
1088 bin_bench.teardown.as_ref(),
1089 bin_bench.command.delay.as_ref(),
1090 &bin_bench.output_format,
1091 )?;
1092
1093 Ok(benchmark_summary)
1094 }
1095}
1096
1097pub fn run(benchmark_groups: BinaryBenchmarkGroups, config: Config) -> Result<()> {
1098 Runner::new(benchmark_groups, config)?.run()
1099}
1100
1101#[cfg(test)]
1102mod tests {
1103 use std::fs::File;
1104 use std::net::TcpListener;
1105
1106 use pretty_assertions::assert_eq;
1107 use rstest::rstest;
1108 use tempfile::tempdir;
1109
1110 use super::*;
1111
1112 fn api_delay_fixture<T, U>(poll: T, timeout: U, kind: DelayKind) -> api::Delay
1113 where
1114 T: Into<Option<u64>>,
1115 U: Into<Option<u64>>,
1116 {
1117 api::Delay {
1118 poll: poll.into().map(Duration::from_millis),
1119 timeout: timeout.into().map(Duration::from_millis),
1120 kind,
1121 }
1122 }
1123
1124 #[rstest]
1125 #[case::duration_elapse_when_no_poll_no_timeout(
1126 api_delay_fixture(None, None, DelayKind::DurationElapse(Duration::from_millis(100))),
1127 Duration::ZERO,
1128 Duration::ZERO
1129 )]
1130 #[case::duration_elapse_when_poll_no_timeout(
1131 api_delay_fixture(10, None, DelayKind::DurationElapse(Duration::from_millis(100))),
1132 Duration::ZERO,
1133 Duration::ZERO
1134 )]
1135 #[case::duration_elapse_when_no_poll_but_timeout(
1136 api_delay_fixture(None, 10, DelayKind::DurationElapse(Duration::from_millis(100))),
1137 Duration::ZERO,
1138 Duration::ZERO
1139 )]
1140 #[case::duration_elapse_when_poll_and_timeout(
1141 api_delay_fixture(10, 100, DelayKind::DurationElapse(Duration::from_millis(100))),
1142 Duration::ZERO,
1143 Duration::ZERO
1144 )]
1145 #[case::path_when_no_poll_no_timeout(
1146 api_delay_fixture(None, None, DelayKind::PathExists(PathBuf::from("/some/path"))),
1147 Duration::from_millis(10),
1148 Duration::from_secs(600)
1149 )]
1150 #[case::path_when_poll_no_timeout(
1151 api_delay_fixture(20, None, DelayKind::PathExists(PathBuf::from("/some/path"))),
1152 Duration::from_millis(20),
1153 Duration::from_secs(600)
1154 )]
1155 #[case::path_when_no_poll_but_timeout(
1156 api_delay_fixture(None, 200, DelayKind::PathExists(PathBuf::from("/some/path"))),
1157 Duration::from_millis(10),
1158 Duration::from_millis(200)
1159 )]
1160 #[case::path_when_poll_and_timeout(
1161 api_delay_fixture(20, 200, DelayKind::PathExists(PathBuf::from("/some/path"))),
1162 Duration::from_millis(20),
1163 Duration::from_millis(200)
1164 )]
1165 #[case::path_when_poll_equal_to_timeout(
1166 api_delay_fixture(200, 200, DelayKind::PathExists(PathBuf::from("/some/path"))),
1167 Duration::from_millis(195),
1168 Duration::from_millis(200)
1169 )]
1170 #[case::path_when_poll_higher_than_timeout(
1171 api_delay_fixture(201, 200, DelayKind::PathExists(PathBuf::from("/some/path"))),
1172 Duration::from_millis(195),
1173 Duration::from_millis(200)
1174 )]
1175 #[case::path_when_poll_equal_to_timeout_smaller_than_10(
1176 api_delay_fixture(10, 9, DelayKind::PathExists(PathBuf::from("/some/path"))),
1177 Duration::from_millis(5),
1178 Duration::from_millis(10)
1179 )]
1180 #[case::path_when_poll_lower_than_timeout_smaller_than_10(
1181 api_delay_fixture(7, 9, DelayKind::PathExists(PathBuf::from("/some/path"))),
1182 Duration::from_millis(7),
1183 Duration::from_millis(10)
1184 )]
1185 fn test_from_api_delay_for_delay(
1186 #[case] delay: api::Delay,
1187 #[case] poll: Duration,
1188 #[case] timeout: Duration,
1189 ) {
1190 let expected = Delay::new(poll, timeout, delay.kind.clone());
1191 assert_eq!(Delay::from(delay), expected);
1192 }
1193
1194 #[test]
1195 fn test_delay_path() {
1196 let dir = tempdir().unwrap();
1197 let file_path = dir.path().join("file.pid");
1198
1199 let delay = Delay {
1200 poll: Duration::from_millis(50),
1201 timeout: Duration::from_millis(200),
1202 kind: DelayKind::PathExists(file_path.clone()),
1203 };
1204 let handle = thread::spawn(move || {
1205 delay.run().unwrap();
1206 });
1207
1208 thread::sleep(Duration::from_millis(100));
1209 File::create(file_path).unwrap();
1210
1211 handle.join().unwrap();
1212 drop(dir);
1213 }
1214
1215 #[test]
1216 fn test_delay_tcp_connect() {
1217 let addr = "127.0.0.1:32000".parse::<SocketAddr>().unwrap();
1218 let _listener = TcpListener::bind(addr).unwrap();
1219
1220 let delay = Delay {
1221 poll: Duration::from_millis(20),
1222 timeout: Duration::from_secs(1),
1223 kind: DelayKind::TcpConnect(addr),
1224 };
1225 delay.run().unwrap();
1226 }
1227
1228 #[test]
1229 fn test_delay_tcp_connect_poll() {
1230 let addr = "127.0.0.1:32001".parse::<SocketAddr>().unwrap();
1231
1232 let check_addr = addr;
1233 let handle = thread::spawn(move || {
1234 let delay = Delay {
1235 poll: Duration::from_millis(20),
1236 timeout: Duration::from_secs(1),
1237 kind: DelayKind::TcpConnect(check_addr),
1238 };
1239 delay.run().unwrap();
1240 });
1241
1242 thread::sleep(Duration::from_millis(100));
1243 let _listener = TcpListener::bind(addr).unwrap();
1244
1245 handle.join().unwrap();
1246 }
1247
1248 #[test]
1249 fn test_delay_tcp_connect_timeout() {
1250 let addr = "127.0.0.1:32002".parse::<SocketAddr>().unwrap();
1251 let delay = Delay {
1252 poll: Duration::from_millis(20),
1253 timeout: Duration::from_secs(1),
1254 kind: DelayKind::TcpConnect(addr),
1255 };
1256
1257 let result = delay.run();
1258 assert!(result.is_err());
1259 assert_eq!(result.unwrap_err().to_string(), "Timeout of '1s' reached");
1260 }
1261
1262 #[test]
1263 fn test_delay_udp_response() {
1264 let addr = "127.0.0.1:34000".parse::<SocketAddr>().unwrap();
1265
1266 thread::spawn(move || {
1267 let server = UdpSocket::bind(addr).unwrap();
1268 server
1269 .set_read_timeout(Some(Duration::from_millis(100)))
1270 .unwrap();
1271 server
1272 .set_write_timeout(Some(Duration::from_millis(100)))
1273 .unwrap();
1274
1275 loop {
1276 let mut buf = [0; 1];
1277
1278 match server.recv_from(&mut buf) {
1279 Ok((_size, from)) => {
1280 server.send_to(&[2], from).unwrap();
1281 }
1282 Err(_e) => {}
1283 }
1284 }
1285 });
1286
1287 let delay = Delay {
1288 poll: Duration::from_millis(20),
1289 timeout: Duration::from_millis(100),
1290 kind: DelayKind::UdpResponse(addr, vec![1]),
1291 };
1292
1293 delay.run().unwrap();
1294 }
1295
1296 #[test]
1297 fn test_delay_udp_response_poll() {
1298 let addr = "127.0.0.1:34001".parse::<SocketAddr>().unwrap();
1299
1300 thread::spawn(move || {
1301 let delay = Delay {
1302 poll: Duration::from_millis(20),
1303 timeout: Duration::from_millis(100),
1304 kind: DelayKind::UdpResponse(addr, vec![1]),
1305 };
1306 delay.run().unwrap();
1307 });
1308
1309 let server = UdpSocket::bind(addr).unwrap();
1310 server
1311 .set_read_timeout(Some(Duration::from_millis(100)))
1312 .unwrap();
1313 server
1314 .set_write_timeout(Some(Duration::from_millis(100)))
1315 .unwrap();
1316
1317 loop {
1318 let mut buf = [0; 1];
1319
1320 thread::sleep(Duration::from_millis(70));
1321
1322 match server.recv_from(&mut buf) {
1323 Ok((_size, from)) => {
1324 server.send_to(&[2], from).unwrap();
1325 break;
1326 }
1327 Err(_e) => {}
1328 }
1329 }
1330 }
1331
1332 #[test]
1333 fn test_delay_udp_response_timeout() {
1334 let addr = "127.0.0.1:34002".parse::<SocketAddr>().unwrap();
1335 let delay = Delay {
1336 poll: Duration::from_millis(20),
1337 timeout: Duration::from_millis(100),
1338 kind: DelayKind::UdpResponse(addr, vec![1]),
1339 };
1340 let result = delay.run();
1341 assert!(result.is_err());
1342 assert_eq!(
1343 result.unwrap_err().to_string(),
1344 "Timeout of '100ms' reached"
1345 );
1346 }
1347}