1#![allow(clippy::all, unexpected_cfgs, dead_code)]
19#![warn(missing_docs)]
20#![warn(bare_trait_objects)]
21#![cfg_attr(
22 feature = "cargo-clippy",
23 allow(
24 clippy::just_underscores_and_digits, clippy::transmute_ptr_to_ptr, clippy::manual_non_exhaustive, )
28)]
29
30#[cfg(all(feature = "rayon", target_arch = "wasm32"))]
31compile_error!("Rayon cannot be used when targeting wasi32. Try disabling default features.");
32
33#[cfg(test)]
34extern crate approx;
35
36#[cfg(test)]
37extern crate quickcheck;
38
39use is_terminal::IsTerminal;
40use regex::Regex;
41
42#[macro_use]
43extern crate serde_derive;
44
45#[macro_use]
48mod macros_private;
49#[macro_use]
50mod analysis;
51mod benchmark;
52#[macro_use]
53mod benchmark_group;
54pub mod async_executor;
55mod bencher;
56mod connection;
57#[cfg(feature = "csv_output")]
58mod csv_report;
59mod error;
60mod estimate;
61mod format;
62mod fs;
63mod html;
64mod kde;
65mod macros;
66pub mod measurement;
67mod plot;
68pub mod profiler;
69mod report;
70mod routine;
71mod stats;
72
73use std::cell::RefCell;
74use std::collections::HashSet;
75use std::default::Default;
76use std::env;
77use std::io::stdout;
78use std::net::TcpStream;
79use std::path::{Path, PathBuf};
80use std::process::Command;
81use std::sync::{Mutex, MutexGuard};
82use std::time::Duration;
83
84use criterion_plot::{Version, VersionError};
85use once_cell::sync::Lazy;
86
87use crate::benchmark::BenchmarkConfig;
88use crate::connection::Connection;
89use crate::connection::OutgoingMessage;
90use crate::html::Html;
91use crate::measurement::{Measurement, WallTime};
92#[cfg(feature = "plotters")]
93use crate::plot::PlottersBackend;
94use crate::plot::{Gnuplot, Plotter};
95use crate::profiler::{ExternalProfiler, Profiler};
96use crate::report::{BencherReport, CliReport, CliVerbosity, Report, ReportContext, Reports};
97
98#[cfg(feature = "async")]
99pub use crate::bencher::AsyncBencher;
100pub use crate::bencher::Bencher;
101pub use crate::benchmark_group::{BenchmarkGroup, BenchmarkId};
102
103static DEBUG_ENABLED: Lazy<bool> = Lazy::new(|| std::env::var_os("CRITERION_DEBUG").is_some());
104static GNUPLOT_VERSION: Lazy<Result<Version, VersionError>> = Lazy::new(criterion_plot::version);
105static DEFAULT_PLOTTING_BACKEND: Lazy<PlottingBackend> = Lazy::new(|| match &*GNUPLOT_VERSION {
106 Ok(_) => PlottingBackend::Gnuplot,
107 #[cfg(feature = "plotters")]
108 Err(e) => {
109 match e {
110 VersionError::Exec(_) => eprintln!("Gnuplot not found, using plotters backend"),
111 e => eprintln!(
112 "Gnuplot not found or not usable, using plotters backend\n{}",
113 e
114 ),
115 };
116 PlottingBackend::Plotters
117 }
118 #[cfg(not(feature = "plotters"))]
119 Err(_) => PlottingBackend::None,
120});
121static CARGO_CRITERION_CONNECTION: Lazy<Option<Mutex<Connection>>> =
122 Lazy::new(|| match std::env::var("CARGO_CRITERION_PORT") {
123 Ok(port_str) => {
124 let port: u16 = port_str.parse().ok()?;
125 let stream = TcpStream::connect(("localhost", port)).ok()?;
126 Some(Mutex::new(Connection::new(stream).ok()?))
127 }
128 Err(_) => None,
129 });
130static DEFAULT_OUTPUT_DIRECTORY: Lazy<PathBuf> = Lazy::new(|| {
131 if let Some(value) = env::var_os("CRITERION_HOME") {
137 PathBuf::from(value)
138 } else if let Some(path) = cargo_target_directory() {
139 path.join("criterion")
140 } else {
141 PathBuf::from("target/criterion")
142 }
143});
144
145fn debug_enabled() -> bool {
146 *DEBUG_ENABLED
147}
148
149#[cfg(feature = "real_blackbox")]
154pub fn black_box<T>(dummy: T) -> T {
155 core::hint::black_box(dummy)
156}
157
158#[cfg(not(feature = "real_blackbox"))]
164pub fn black_box<T>(dummy: T) -> T {
165 unsafe {
166 let ret = std::ptr::read_volatile(&dummy);
167 std::mem::forget(dummy);
168 ret
169 }
170}
171
172#[derive(Debug, Eq, PartialEq, Copy, Hash, Clone)]
195pub enum BatchSize {
196 SmallInput,
203
204 LargeInput,
211
212 PerIteration,
221
222 NumBatches(u64),
228
229 NumIterations(u64),
235
236 #[doc(hidden)]
237 __NonExhaustive,
238}
239impl BatchSize {
240 fn iters_per_batch(self, iters: u64) -> u64 {
247 match self {
248 BatchSize::SmallInput => (iters + 10 - 1) / 10,
249 BatchSize::LargeInput => (iters + 1000 - 1) / 1000,
250 BatchSize::PerIteration => 1,
251 BatchSize::NumBatches(batches) => (iters + batches - 1) / batches,
252 BatchSize::NumIterations(size) => size,
253 BatchSize::__NonExhaustive => panic!("__NonExhaustive is not a valid BatchSize."),
254 }
255 }
256}
257
258#[derive(Debug, Clone, Copy)]
260pub enum Baseline {
261 CompareLenient,
264 CompareStrict,
267 Save,
270 Discard,
272}
273
274#[derive(Debug, Clone, Copy)]
276pub enum PlottingBackend {
277 Gnuplot,
280 Plotters,
283 None,
285}
286impl PlottingBackend {
287 fn create_plotter(&self) -> Option<Box<dyn Plotter>> {
288 match self {
289 PlottingBackend::Gnuplot => Some(Box::<Gnuplot>::default()),
290 #[cfg(feature = "plotters")]
291 PlottingBackend::Plotters => Some(Box::<PlottersBackend>::default()),
292 #[cfg(not(feature = "plotters"))]
293 PlottingBackend::Plotters => panic!("Criterion was built without plotters support."),
294 PlottingBackend::None => None,
295 }
296 }
297}
298
299#[derive(Debug, Clone)]
300pub(crate) enum Mode {
302 Benchmark,
304 List(ListFormat),
306 Test,
308 Profile(Duration),
310}
311impl Mode {
312 pub fn is_benchmark(&self) -> bool {
313 matches!(self, Mode::Benchmark)
314 }
315
316 pub fn is_terse(&self) -> bool {
317 matches!(self, Mode::List(ListFormat::Terse))
318 }
319}
320
321#[derive(Debug, Clone)]
322pub(crate) enum ListFormat {
324 Pretty,
326 Terse,
329}
330
331impl Default for ListFormat {
332 fn default() -> Self {
333 Self::Pretty
334 }
335}
336
337#[derive(Clone, Debug)]
339pub enum BenchmarkFilter {
340 AcceptAll,
342 Regex(Regex),
344 Exact(String),
346 RejectAll,
348}
349
350pub struct Criterion<M: Measurement = WallTime> {
365 config: BenchmarkConfig,
366 filter: BenchmarkFilter,
367 report: Reports,
368 output_directory: PathBuf,
369 baseline_directory: String,
370 baseline: Baseline,
371 load_baseline: Option<String>,
372 all_directories: HashSet<String>,
373 all_titles: HashSet<String>,
374 measurement: M,
375 profiler: Box<RefCell<dyn Profiler>>,
376 connection: Option<MutexGuard<'static, Connection>>,
377 mode: Mode,
378
379 current_file: String,
380 macro_group: String,
381}
382
383pub use ::codspeed::abs_file; mod codspeed {
385 use crate::{measurement::Measurement, Criterion};
386
387 impl<M: Measurement> Criterion<M> {
388 #[doc(hidden)]
389 pub fn set_current_file(&mut self, file: impl Into<String>) {
390 self.current_file = file.into();
391 }
392
393 #[doc(hidden)]
394 pub fn set_macro_group(&mut self, macro_group: impl Into<String>) {
395 self.macro_group = macro_group.into();
396 }
397 }
398}
399
400fn cargo_target_directory() -> Option<PathBuf> {
403 #[derive(Deserialize)]
404 struct Metadata {
405 target_directory: PathBuf,
406 }
407
408 env::var_os("CARGO_TARGET_DIR")
409 .map(PathBuf::from)
410 .or_else(|| {
411 let output = Command::new(env::var_os("CARGO")?)
412 .args(["metadata", "--format-version", "1"])
413 .output()
414 .ok()?;
415 let metadata: Metadata = serde_json::from_slice(&output.stdout).ok()?;
416 Some(metadata.target_directory)
417 })
418}
419
420impl Default for Criterion {
421 fn default() -> Criterion {
433 let reports = Reports {
434 cli_enabled: true,
435 cli: CliReport::new(false, false, CliVerbosity::Normal),
436 bencher_enabled: false,
437 bencher: BencherReport,
438 html: DEFAULT_PLOTTING_BACKEND.create_plotter().map(Html::new),
439 csv_enabled: cfg!(feature = "csv_output"),
440 };
441
442 let mut criterion = Criterion {
443 config: BenchmarkConfig {
444 confidence_level: 0.95,
445 measurement_time: Duration::from_secs(5),
446 noise_threshold: 0.01,
447 nresamples: 100_000,
448 sample_size: 100,
449 significance_level: 0.05,
450 warm_up_time: Duration::from_secs(3),
451 sampling_mode: SamplingMode::Auto,
452 quick_mode: false,
453 },
454 filter: BenchmarkFilter::AcceptAll,
455 report: reports,
456 baseline_directory: "base".to_owned(),
457 baseline: Baseline::Save,
458 load_baseline: None,
459 output_directory: DEFAULT_OUTPUT_DIRECTORY.clone(),
460 all_directories: HashSet::new(),
461 all_titles: HashSet::new(),
462 measurement: WallTime,
463 profiler: Box::new(RefCell::new(ExternalProfiler)),
464 connection: CARGO_CRITERION_CONNECTION
465 .as_ref()
466 .map(|mtx| mtx.lock().unwrap()),
467 mode: Mode::Benchmark,
468 current_file: String::new(),
469 macro_group: String::new(),
470 };
471
472 if criterion.connection.is_some() {
473 criterion.report.cli_enabled = false;
475 criterion.report.bencher_enabled = false;
476 criterion.report.csv_enabled = false;
477 criterion.report.html = None;
478 }
479 criterion
480 }
481}
482
483impl<M: Measurement> Criterion<M> {
484 pub fn with_measurement<M2: Measurement>(self, m: M2) -> Criterion<M2> {
487 Criterion {
489 config: self.config,
490 filter: self.filter,
491 report: self.report,
492 baseline_directory: self.baseline_directory,
493 baseline: self.baseline,
494 load_baseline: self.load_baseline,
495 output_directory: self.output_directory,
496 all_directories: self.all_directories,
497 all_titles: self.all_titles,
498 measurement: m,
499 profiler: self.profiler,
500 connection: self.connection,
501 mode: self.mode,
502 current_file: self.current_file,
503 macro_group: self.macro_group,
504 }
505 }
506
507 #[must_use]
508 pub fn with_profiler<P: Profiler + 'static>(self, p: P) -> Criterion<M> {
511 Criterion {
512 profiler: Box::new(RefCell::new(p)),
513 ..self
514 }
515 }
516
517 #[must_use]
518 pub fn plotting_backend(mut self, backend: PlottingBackend) -> Criterion<M> {
523 if let PlottingBackend::Gnuplot = backend {
524 assert!(
525 !GNUPLOT_VERSION.is_err(),
526 "Gnuplot plotting backend was requested, but gnuplot is not available. \
527 To continue, either install Gnuplot or allow Criterion.rs to fall back \
528 to using plotters."
529 );
530 }
531
532 self.report.html = backend.create_plotter().map(Html::new);
533 self
534 }
535
536 #[must_use]
537 pub fn sample_size(mut self, n: usize) -> Criterion<M> {
548 assert!(n >= 10);
549
550 self.config.sample_size = n;
551 self
552 }
553
554 #[must_use]
555 pub fn warm_up_time(mut self, dur: Duration) -> Criterion<M> {
561 assert!(dur.as_nanos() > 0);
562
563 self.config.warm_up_time = dur;
564 self
565 }
566
567 #[must_use]
568 pub fn measurement_time(mut self, dur: Duration) -> Criterion<M> {
579 assert!(dur.as_nanos() > 0);
580
581 self.config.measurement_time = dur;
582 self
583 }
584
585 #[must_use]
586 pub fn nresamples(mut self, n: usize) -> Criterion<M> {
598 assert!(n > 0);
599 if n <= 1000 {
600 eprintln!("\nWarning: It is not recommended to reduce nresamples below 1000.");
601 }
602
603 self.config.nresamples = n;
604 self
605 }
606
607 #[must_use]
608 pub fn noise_threshold(mut self, threshold: f64) -> Criterion<M> {
621 assert!(threshold >= 0.0);
622
623 self.config.noise_threshold = threshold;
624 self
625 }
626
627 #[must_use]
628 pub fn confidence_level(mut self, cl: f64) -> Criterion<M> {
637 assert!(cl > 0.0 && cl < 1.0);
638 if cl < 0.5 {
639 eprintln!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
640 }
641
642 self.config.confidence_level = cl;
643 self
644 }
645
646 #[must_use]
647 pub fn significance_level(mut self, sl: f64) -> Criterion<M> {
668 assert!(sl > 0.0 && sl < 1.0);
669
670 self.config.significance_level = sl;
671 self
672 }
673
674 #[must_use]
675 pub fn with_plots(mut self) -> Criterion<M> {
677 if self.connection.is_none() && self.report.html.is_none() {
679 let default_backend = DEFAULT_PLOTTING_BACKEND.create_plotter();
680 if let Some(backend) = default_backend {
681 self.report.html = Some(Html::new(backend));
682 } else {
683 panic!("Cannot find a default plotting backend!");
684 }
685 }
686 self
687 }
688
689 #[must_use]
690 pub fn without_plots(mut self) -> Criterion<M> {
692 self.report.html = None;
693 self
694 }
695
696 #[must_use]
697 pub fn save_baseline(mut self, baseline: String) -> Criterion<M> {
699 self.baseline_directory = baseline;
700 self.baseline = Baseline::Save;
701 self
702 }
703
704 #[must_use]
705 pub fn retain_baseline(mut self, baseline: String, strict: bool) -> Criterion<M> {
707 self.baseline_directory = baseline;
708 self.baseline = if strict {
709 Baseline::CompareStrict
710 } else {
711 Baseline::CompareLenient
712 };
713 self
714 }
715
716 #[must_use]
717 pub fn with_filter<S: Into<String>>(mut self, filter: S) -> Criterion<M> {
722 let filter_text = filter.into();
723 let filter = Regex::new(&filter_text).unwrap_or_else(|err| {
724 panic!(
725 "Unable to parse '{}' as a regular expression: {}",
726 filter_text, err
727 )
728 });
729 self.filter = BenchmarkFilter::Regex(filter);
730
731 self
732 }
733
734 pub fn with_benchmark_filter(mut self, filter: BenchmarkFilter) -> Criterion<M> {
738 self.filter = filter;
739
740 self
741 }
742
743 #[must_use]
744 pub fn with_output_color(mut self, enabled: bool) -> Criterion<M> {
747 self.report.cli.enable_text_coloring = enabled;
748 self
749 }
750
751 #[must_use]
753 #[doc(hidden)]
754 pub fn output_directory(mut self, path: &Path) -> Criterion<M> {
755 self.output_directory = path.to_owned();
756
757 self
758 }
759
760 #[must_use]
762 #[doc(hidden)]
763 pub fn profile_time(mut self, profile_time: Option<Duration>) -> Criterion<M> {
764 match profile_time {
765 Some(time) => self.mode = Mode::Profile(time),
766 None => self.mode = Mode::Benchmark,
767 }
768
769 self
770 }
771
772 #[doc(hidden)]
774 pub fn final_summary(&self) {
775 if !self.mode.is_benchmark() {
776 return;
777 }
778
779 let report_context = ReportContext {
780 output_directory: self.output_directory.clone(),
781 plot_config: PlotConfiguration::default(),
782 };
783
784 self.report.final_summary(&report_context);
785 }
786
787 #[must_use]
790 #[cfg_attr(feature = "cargo-clippy", allow(clippy::cognitive_complexity))]
791 pub fn configure_from_args(mut self) -> Criterion<M> {
792 use clap::{value_parser, Arg, Command};
793 let matches = Command::new("Criterion Benchmark")
794 .arg(Arg::new("FILTER")
795 .help("Skip benchmarks whose names do not contain FILTER.")
796 .index(1))
797 .arg(Arg::new("color")
798 .short('c')
799 .long("color")
800 .alias("colour")
801 .value_parser(["auto", "always", "never"])
802 .default_value("auto")
803 .help("Configure coloring of output. always = always colorize output, never = never colorize output, auto = colorize output if output is a tty and compiled for unix."))
804 .arg(Arg::new("verbose")
805 .short('v')
806 .long("verbose")
807 .num_args(0)
808 .help("Print additional statistical information."))
809 .arg(Arg::new("quiet")
810 .long("quiet")
811 .num_args(0)
812 .conflicts_with("verbose")
813 .help("Print only the benchmark results."))
814 .arg(Arg::new("noplot")
815 .short('n')
816 .long("noplot")
817 .num_args(0)
818 .help("Disable plot and HTML generation."))
819 .arg(Arg::new("save-baseline")
820 .short('s')
821 .long("save-baseline")
822 .default_value("base")
823 .help("Save results under a named baseline."))
824 .arg(Arg::new("discard-baseline")
825 .long("discard-baseline")
826 .num_args(0)
827 .conflicts_with_all(["save-baseline", "baseline", "baseline-lenient"])
828 .help("Discard benchmark results."))
829 .arg(Arg::new("baseline")
830 .short('b')
831 .long("baseline")
832 .conflicts_with_all(["save-baseline", "baseline-lenient"])
833 .help("Compare to a named baseline. If any benchmarks do not have the specified baseline this command fails."))
834 .arg(Arg::new("baseline-lenient")
835 .long("baseline-lenient")
836 .conflicts_with_all(["save-baseline", "baseline"])
837 .help("Compare to a named baseline. If any benchmarks do not have the specified baseline then just those benchmarks are not compared against the baseline while every other benchmark is compared against the baseline."))
838 .arg(Arg::new("list")
839 .long("list")
840 .num_args(0)
841 .help("List all benchmarks")
842 .conflicts_with_all(["test", "profile-time"]))
843 .arg(Arg::new("format")
844 .long("format")
845 .value_parser(["pretty", "terse"])
846 .default_value("pretty")
847 .help("Output formatting"))
850 .arg(Arg::new("ignored")
851 .long("ignored")
852 .num_args(0)
853 .help("List or run ignored benchmarks (currently means skip all benchmarks)"))
854 .arg(Arg::new("exact")
855 .long("exact")
856 .num_args(0)
857 .help("Run benchmarks that exactly match the provided filter"))
858 .arg(Arg::new("profile-time")
859 .long("profile-time")
860 .value_parser(value_parser!(f64))
861 .help("Iterate each benchmark for approximately the given number of seconds, doing no analysis and without storing the results. Useful for running the benchmarks in a profiler.")
862 .conflicts_with_all(["test", "list"]))
863 .arg(Arg::new("load-baseline")
864 .long("load-baseline")
865 .conflicts_with("profile-time")
866 .requires("baseline")
867 .help("Load a previous baseline instead of sampling new data."))
868 .arg(Arg::new("sample-size")
869 .long("sample-size")
870 .value_parser(value_parser!(usize))
871 .help(format!("Changes the default size of the sample for this run. [default: {}]", self.config.sample_size)))
872 .arg(Arg::new("warm-up-time")
873 .long("warm-up-time")
874 .value_parser(value_parser!(f64))
875 .help(format!("Changes the default warm up time for this run. [default: {}]", self.config.warm_up_time.as_secs())))
876 .arg(Arg::new("measurement-time")
877 .long("measurement-time")
878 .value_parser(value_parser!(f64))
879 .help(format!("Changes the default measurement time for this run. [default: {}]", self.config.measurement_time.as_secs())))
880 .arg(Arg::new("nresamples")
881 .long("nresamples")
882 .value_parser(value_parser!(usize))
883 .help(format!("Changes the default number of resamples for this run. [default: {}]", self.config.nresamples)))
884 .arg(Arg::new("noise-threshold")
885 .long("noise-threshold")
886 .value_parser(value_parser!(f64))
887 .help(format!("Changes the default noise threshold for this run. [default: {}]", self.config.noise_threshold)))
888 .arg(Arg::new("confidence-level")
889 .long("confidence-level")
890 .value_parser(value_parser!(f64))
891 .help(format!("Changes the default confidence level for this run. [default: {}]", self.config.confidence_level)))
892 .arg(Arg::new("significance-level")
893 .long("significance-level")
894 .value_parser(value_parser!(f64))
895 .help(format!("Changes the default significance level for this run. [default: {}]", self.config.significance_level)))
896 .arg(Arg::new("quick")
897 .long("quick")
898 .num_args(0)
899 .conflicts_with("sample-size")
900 .help(format!("Benchmark only until the significance level has been reached [default: {}]", self.config.quick_mode)))
901 .arg(Arg::new("test")
902 .hide(true)
903 .long("test")
904 .num_args(0)
905 .help("Run the benchmarks once, to verify that they execute successfully, but do not measure or report the results.")
906 .conflicts_with_all(["list", "profile-time"]))
907 .arg(Arg::new("bench")
908 .hide(true)
909 .long("bench")
910 .num_args(0))
911 .arg(Arg::new("plotting-backend")
912 .long("plotting-backend")
913 .value_parser(["gnuplot", "plotters"])
914 .help("Set the plotting backend. By default, Criterion.rs will use the gnuplot backend if gnuplot is available, or the plotters backend if it isn't."))
915 .arg(Arg::new("output-format")
916 .long("output-format")
917 .value_parser(["criterion", "bencher"])
918 .default_value("criterion")
919 .help("Change the CLI output format. By default, Criterion.rs will use its own format. If output format is set to 'bencher', Criterion.rs will print output in a format that resembles the 'bencher' crate."))
920 .arg(Arg::new("nocapture")
921 .long("nocapture")
922 .num_args(0)
923 .hide(true)
924 .help("Ignored, but added for compatibility with libtest."))
925 .arg(Arg::new("show-output")
926 .long("show-output")
927 .num_args(0)
928 .hide(true)
929 .help("Ignored, but added for compatibility with libtest."))
930 .arg(Arg::new("version")
931 .hide(true)
932 .short('V')
933 .long("version")
934 .num_args(0))
935 .after_help("
936This executable is a Criterion.rs benchmark.
937See https://github.com/bheisler/criterion.rs for more details.
938
939To enable debug output, define the environment variable CRITERION_DEBUG.
940Criterion.rs will output more debug information and will save the gnuplot
941scripts alongside the generated plots.
942
943To test that the benchmarks work, run `cargo test --benches`
944
945NOTE: If you see an 'unrecognized option' error using any of the options above, see:
946https://bheisler.github.io/criterion.rs/book/faq.html
947")
948 .get_matches();
949
950 if self.connection.is_some() {
951 if let Some(color) = matches.get_one::<String>("color") {
952 if color != "auto" {
953 eprintln!("Warning: --color will be ignored when running with cargo-criterion. Use `cargo criterion --color {} -- <args>` instead.", color);
954 }
955 }
956 if matches.get_flag("verbose") {
957 eprintln!("Warning: --verbose will be ignored when running with cargo-criterion. Use `cargo criterion --output-format verbose -- <args>` instead.");
958 }
959 if matches.get_flag("noplot") {
960 eprintln!("Warning: --noplot will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend disabled -- <args>` instead.");
961 }
962 if let Some(backend) = matches.get_one::<String>("plotting-backend") {
963 eprintln!("Warning: --plotting-backend will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend {} -- <args>` instead.", backend);
964 }
965 if let Some(format) = matches.get_one::<String>("output-format") {
966 if format != "criterion" {
967 eprintln!("Warning: --output-format will be ignored when running with cargo-criterion. Use `cargo criterion --output-format {} -- <args>` instead.", format);
968 }
969 }
970
971 if matches.contains_id("baseline")
972 || matches
973 .get_one::<String>("save-baseline")
974 .map_or(false, |base| base != "base")
975 || matches.contains_id("load-baseline")
976 {
977 eprintln!("Error: baselines are not supported when running with cargo-criterion.");
978 std::process::exit(1);
979 }
980 }
981
982 let bench = matches.get_flag("bench");
983 let test = matches.get_flag("test");
984 let test_mode = match (bench, test) {
985 (true, true) => true, (true, false) => false, (false, _) => true, };
989
990 self.mode = if matches.get_flag("list") {
991 let list_format = match matches
992 .get_one::<String>("format")
993 .expect("a default value was provided for this")
994 .as_str()
995 {
996 "pretty" => ListFormat::Pretty,
997 "terse" => ListFormat::Terse,
998 other => unreachable!(
999 "unrecognized value for --format that isn't part of possible-values: {}",
1000 other
1001 ),
1002 };
1003 Mode::List(list_format)
1004 } else if test_mode {
1005 Mode::Test
1006 } else if let Some(&num_seconds) = matches.get_one("profile-time") {
1007 if num_seconds < 1.0 {
1008 eprintln!("Profile time must be at least one second.");
1009 std::process::exit(1);
1010 }
1011
1012 Mode::Profile(Duration::from_secs_f64(num_seconds))
1013 } else {
1014 Mode::Benchmark
1015 };
1016
1017 if !self.mode.is_benchmark() {
1019 self.connection = None;
1020 }
1021
1022 let filter = if matches.get_flag("ignored") {
1023 BenchmarkFilter::RejectAll
1025 } else if let Some(filter) = matches.get_one::<String>("FILTER") {
1026 if matches.get_flag("exact") {
1027 BenchmarkFilter::Exact(filter.to_owned())
1028 } else {
1029 let regex = Regex::new(filter).unwrap_or_else(|err| {
1030 panic!(
1031 "Unable to parse '{}' as a regular expression: {}",
1032 filter, err
1033 )
1034 });
1035 BenchmarkFilter::Regex(regex)
1036 }
1037 } else {
1038 BenchmarkFilter::AcceptAll
1039 };
1040 self = self.with_benchmark_filter(filter);
1041
1042 match matches.get_one("plotting-backend").map(String::as_str) {
1043 Some("gnuplot") => self = self.plotting_backend(PlottingBackend::Gnuplot),
1045 Some("plotters") => self = self.plotting_backend(PlottingBackend::Plotters),
1046 Some(val) => panic!("Unexpected plotting backend '{}'", val),
1047 None => {}
1048 }
1049
1050 if matches.get_flag("noplot") {
1051 self = self.without_plots();
1052 }
1053
1054 if let Some(dir) = matches.get_one::<String>("save-baseline") {
1055 self.baseline = Baseline::Save;
1056 self.baseline_directory = dir.to_owned()
1057 }
1058 if matches.get_flag("discard-baseline") {
1059 self.baseline = Baseline::Discard;
1060 }
1061 if let Some(dir) = matches.get_one::<String>("baseline") {
1062 self.baseline = Baseline::CompareStrict;
1063 self.baseline_directory = dir.to_owned();
1064 }
1065 if let Some(dir) = matches.get_one::<String>("baseline-lenient") {
1066 self.baseline = Baseline::CompareLenient;
1067 self.baseline_directory = dir.to_owned();
1068 }
1069
1070 if self.connection.is_some() {
1071 self.report.cli_enabled = false;
1073 self.report.bencher_enabled = false;
1074 self.report.csv_enabled = false;
1075 self.report.html = None;
1076 } else {
1077 match matches.get_one("output-format").map(String::as_str) {
1078 Some("bencher") => {
1079 self.report.bencher_enabled = true;
1080 self.report.cli_enabled = false;
1081 }
1082 _ => {
1083 let verbose = matches.get_flag("verbose");
1084 let verbosity = if verbose {
1085 CliVerbosity::Verbose
1086 } else if matches.get_flag("quiet") {
1087 CliVerbosity::Quiet
1088 } else {
1089 CliVerbosity::Normal
1090 };
1091 let stdout_isatty = stdout().is_terminal();
1092 let mut enable_text_overwrite = stdout_isatty && !verbose && !debug_enabled();
1093 let enable_text_coloring;
1094 match matches.get_one("color").map(String::as_str) {
1095 Some("always") => {
1096 enable_text_coloring = true;
1097 }
1098 Some("never") => {
1099 enable_text_coloring = false;
1100 enable_text_overwrite = false;
1101 }
1102 _ => enable_text_coloring = stdout_isatty,
1103 };
1104 self.report.bencher_enabled = false;
1105 self.report.cli_enabled = true;
1106 self.report.cli =
1107 CliReport::new(enable_text_overwrite, enable_text_coloring, verbosity);
1108 }
1109 };
1110 }
1111
1112 if let Some(dir) = matches.get_one::<String>("load-baseline") {
1113 self.load_baseline = Some(dir.to_owned());
1114 }
1115
1116 if let Some(&num_size) = matches.get_one("sample-size") {
1117 assert!(num_size >= 10);
1118 self.config.sample_size = num_size;
1119 }
1120 if let Some(&num_seconds) = matches.get_one("warm-up-time") {
1121 let dur = std::time::Duration::from_secs_f64(num_seconds);
1122 assert!(dur.as_nanos() > 0);
1123
1124 self.config.warm_up_time = dur;
1125 }
1126 if let Some(&num_seconds) = matches.get_one("measurement-time") {
1127 let dur = std::time::Duration::from_secs_f64(num_seconds);
1128 assert!(dur.as_nanos() > 0);
1129
1130 self.config.measurement_time = dur;
1131 }
1132 if let Some(&num_resamples) = matches.get_one("nresamples") {
1133 assert!(num_resamples > 0);
1134
1135 self.config.nresamples = num_resamples;
1136 }
1137 if let Some(&num_noise_threshold) = matches.get_one("noise-threshold") {
1138 assert!(num_noise_threshold > 0.0);
1139
1140 self.config.noise_threshold = num_noise_threshold;
1141 }
1142 if let Some(&num_confidence_level) = matches.get_one("confidence-level") {
1143 assert!(num_confidence_level > 0.0 && num_confidence_level < 1.0);
1144
1145 self.config.confidence_level = num_confidence_level;
1146 }
1147 if let Some(&num_significance_level) = matches.get_one("significance-level") {
1148 assert!(num_significance_level > 0.0 && num_significance_level < 1.0);
1149
1150 self.config.significance_level = num_significance_level;
1151 }
1152
1153 if matches.get_flag("quick") {
1154 self.config.quick_mode = true;
1155 }
1156
1157 self
1158 }
1159
1160 fn filter_matches(&self, id: &str) -> bool {
1161 match &self.filter {
1162 BenchmarkFilter::AcceptAll => true,
1163 BenchmarkFilter::Regex(regex) => regex.is_match(id),
1164 BenchmarkFilter::Exact(exact) => id == exact,
1165 BenchmarkFilter::RejectAll => false,
1166 }
1167 }
1168
1169 fn should_save_baseline(&self) -> bool {
1172 self.connection.is_none()
1173 && self.load_baseline.is_none()
1174 && !matches!(self.baseline, Baseline::Discard)
1175 }
1176
1177 pub fn benchmark_group<S: Into<String>>(&mut self, group_name: S) -> BenchmarkGroup<'_, M> {
1201 let group_name = group_name.into();
1202 assert!(!group_name.is_empty(), "Group name must not be empty.");
1203
1204 if let Some(conn) = &self.connection {
1205 conn.send(&OutgoingMessage::BeginningBenchmarkGroup { group: &group_name })
1206 .unwrap();
1207 }
1208
1209 BenchmarkGroup::new(self, group_name)
1210 }
1211}
1212impl<M> Criterion<M>
1213where
1214 M: Measurement + 'static,
1215{
1216 pub fn bench_function<F>(&mut self, id: &str, f: F) -> &mut Criterion<M>
1238 where
1239 F: FnMut(&mut Bencher<'_, M>),
1240 {
1241 self.benchmark_group(id)
1242 .bench_function(BenchmarkId::no_function(), f);
1243 self
1244 }
1245
1246 pub fn bench_with_input<F, I>(&mut self, id: BenchmarkId, input: &I, f: F) -> &mut Criterion<M>
1270 where
1271 F: FnMut(&mut Bencher<'_, M>, &I),
1272 {
1273 let group_name = id.function_name.expect(
1277 "Cannot use BenchmarkId::from_parameter with Criterion::bench_with_input. \
1278 Consider using a BenchmarkGroup or BenchmarkId::new instead.",
1279 );
1280 let parameter = id.parameter.unwrap();
1282 self.benchmark_group(group_name).bench_with_input(
1283 BenchmarkId::no_function_with_input(parameter),
1284 input,
1285 f,
1286 );
1287 self
1288 }
1289}
1290
1291#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
1296pub enum Throughput {
1297 Bytes(u64),
1301
1302 BytesDecimal(u64),
1306
1307 Elements(u64),
1312}
1313
1314#[derive(Debug, Clone, Copy)]
1316pub enum AxisScale {
1317 Linear,
1319
1320 Logarithmic,
1322}
1323
1324#[derive(Debug, Clone)]
1340pub struct PlotConfiguration {
1341 summary_scale: AxisScale,
1342}
1343
1344impl Default for PlotConfiguration {
1345 fn default() -> PlotConfiguration {
1346 PlotConfiguration {
1347 summary_scale: AxisScale::Linear,
1348 }
1349 }
1350}
1351
1352impl PlotConfiguration {
1353 #[must_use]
1354 pub fn summary_scale(mut self, new_scale: AxisScale) -> PlotConfiguration {
1358 self.summary_scale = new_scale;
1359 self
1360 }
1361}
1362
1363#[derive(Debug, Clone, Copy)]
1367pub enum SamplingMode {
1368 Auto,
1371
1372 Linear,
1375
1376 Flat,
1381}
1382impl SamplingMode {
1383 pub(crate) fn choose_sampling_mode(
1384 &self,
1385 warmup_mean_execution_time: f64,
1386 sample_count: u64,
1387 target_time: f64,
1388 ) -> ActualSamplingMode {
1389 match self {
1390 SamplingMode::Linear => ActualSamplingMode::Linear,
1391 SamplingMode::Flat => ActualSamplingMode::Flat,
1392 SamplingMode::Auto => {
1393 let total_runs = sample_count * (sample_count + 1) / 2;
1395 let d =
1396 (target_time / warmup_mean_execution_time / total_runs as f64).ceil() as u64;
1397 let expected_ns = total_runs as f64 * d as f64 * warmup_mean_execution_time;
1398
1399 if expected_ns > (2.0 * target_time) {
1400 ActualSamplingMode::Flat
1401 } else {
1402 ActualSamplingMode::Linear
1403 }
1404 }
1405 }
1406 }
1407}
1408
1409#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
1411pub(crate) enum ActualSamplingMode {
1412 Linear,
1413 Flat,
1414}
1415impl ActualSamplingMode {
1416 pub(crate) fn iteration_counts(
1417 &self,
1418 warmup_mean_execution_time: f64,
1419 sample_count: u64,
1420 target_time: &Duration,
1421 ) -> Vec<u64> {
1422 match self {
1423 ActualSamplingMode::Linear => {
1424 let n = sample_count;
1425 let met = warmup_mean_execution_time;
1426 let m_ns = target_time.as_nanos();
1427 let total_runs = n * (n + 1) / 2;
1429 let d = ((m_ns as f64 / met / total_runs as f64).ceil() as u64).max(1);
1430 let expected_ns = total_runs as f64 * d as f64 * met;
1431
1432 if d == 1 {
1433 let recommended_sample_size =
1434 ActualSamplingMode::recommend_linear_sample_size(m_ns as f64, met);
1435 let actual_time = Duration::from_nanos(expected_ns as u64);
1436 eprint!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
1437 n, target_time, actual_time);
1438
1439 if recommended_sample_size != n {
1440 eprintln!(
1441 ", enable flat sampling, or reduce sample count to {}.",
1442 recommended_sample_size
1443 );
1444 } else {
1445 eprintln!(" or enable flat sampling.");
1446 }
1447 }
1448
1449 (1..(n + 1)).map(|a| a * d).collect::<Vec<u64>>()
1450 }
1451 ActualSamplingMode::Flat => {
1452 let n = sample_count;
1453 let met = warmup_mean_execution_time;
1454 let m_ns = target_time.as_nanos() as f64;
1455 let time_per_sample = m_ns / (n as f64);
1456 let iterations_per_sample = ((time_per_sample / met).ceil() as u64).max(1);
1458
1459 let expected_ns = met * (iterations_per_sample * n) as f64;
1460
1461 if iterations_per_sample == 1 {
1462 let recommended_sample_size =
1463 ActualSamplingMode::recommend_flat_sample_size(m_ns, met);
1464 let actual_time = Duration::from_nanos(expected_ns as u64);
1465 eprint!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
1466 n, target_time, actual_time);
1467
1468 if recommended_sample_size != n {
1469 eprintln!(", or reduce sample count to {}.", recommended_sample_size);
1470 } else {
1471 eprintln!(".");
1472 }
1473 }
1474
1475 vec![iterations_per_sample; n as usize]
1476 }
1477 }
1478 }
1479
1480 fn is_linear(&self) -> bool {
1481 matches!(self, ActualSamplingMode::Linear)
1482 }
1483
1484 fn recommend_linear_sample_size(target_time: f64, met: f64) -> u64 {
1485 let c = target_time / met;
1493 let sample_size = (-1.0 + (4.0 * c).sqrt()) / 2.0;
1494 let sample_size = sample_size as u64;
1495
1496 let sample_size = (sample_size / 10) * 10;
1498
1499 if sample_size < 10 {
1501 10
1502 } else {
1503 sample_size
1504 }
1505 }
1506
1507 fn recommend_flat_sample_size(target_time: f64, met: f64) -> u64 {
1508 let sample_size = (target_time / met) as u64;
1509
1510 let sample_size = (sample_size / 10) * 10;
1512
1513 if sample_size < 10 {
1515 10
1516 } else {
1517 sample_size
1518 }
1519 }
1520}
1521
1522#[derive(Debug, Serialize, Deserialize)]
1523pub(crate) struct SavedSample {
1524 sampling_mode: ActualSamplingMode,
1525 iters: Vec<f64>,
1526 times: Vec<f64>,
1527}
1528
1529#[doc(hidden)]
1531pub fn runner(benches: &[&dyn Fn()]) {
1532 for bench in benches {
1533 bench();
1534 }
1535 Criterion::default().configure_from_args().final_summary();
1536}