1#![warn(missing_docs)]
19#![warn(bare_trait_objects)]
20#![cfg_attr(feature = "real_blackbox", feature(test))]
21#![cfg_attr(
22 feature = "cargo-clippy",
23 allow(
24 clippy::just_underscores_and_digits, clippy::transmute_ptr_to_ptr, clippy::manual_non_exhaustive, )
28)]
29
30#[cfg(all(feature = "rayon", target_arch = "wasm32"))]
31compile_error!("Rayon cannot be used when targeting wasi32. Try disabling default features.");
32
33#[cfg(test)]
34extern crate approx;
35
36#[cfg(test)]
37extern crate quickcheck;
38
39use is_terminal::IsTerminal;
40use regex::Regex;
41
42#[cfg(feature = "real_blackbox")]
43extern crate test;
44
45#[macro_use]
46extern crate serde_derive;
47
48#[macro_use]
51mod macros_private;
52#[macro_use]
53mod analysis;
54mod benchmark;
55#[macro_use]
56mod benchmark_group;
57pub mod async_executor;
58mod bencher;
59mod connection;
60#[cfg(feature = "csv_output")]
61mod csv_report;
62mod error;
63mod estimate;
64mod format;
65mod fs;
66mod html;
67mod kde;
68mod macros;
69pub mod measurement;
70mod plot;
71pub mod profiler;
72mod report;
73mod routine;
74mod stats;
75
76use std::cell::RefCell;
77use std::collections::HashSet;
78use std::default::Default;
79use std::env;
80use std::io::stdout;
81use std::net::TcpStream;
82use std::path::{Path, PathBuf};
83use std::process::Command;
84use std::sync::{Mutex, MutexGuard};
85use std::time::Duration;
86
87use criterion_plot::{Version, VersionError};
88use once_cell::sync::Lazy;
89
90use crate::benchmark::BenchmarkConfig;
91use crate::connection::Connection;
92use crate::connection::OutgoingMessage;
93use crate::html::Html;
94use crate::measurement::{Measurement, WallTime};
95#[cfg(feature = "plotters")]
96use crate::plot::PlottersBackend;
97use crate::plot::{Gnuplot, Plotter};
98use crate::profiler::{ExternalProfiler, Profiler};
99use crate::report::{BencherReport, CliReport, CliVerbosity, Report, ReportContext, Reports};
100
101#[cfg(feature = "async")]
102pub use crate::bencher::AsyncBencher;
103pub use crate::bencher::Bencher;
104pub use crate::benchmark_group::{BenchmarkGroup, BenchmarkId};
105
106static DEBUG_ENABLED: Lazy<bool> = Lazy::new(|| std::env::var_os("CRITERION_DEBUG").is_some());
107static GNUPLOT_VERSION: Lazy<Result<Version, VersionError>> = Lazy::new(criterion_plot::version);
108static DEFAULT_PLOTTING_BACKEND: Lazy<PlottingBackend> = Lazy::new(|| match &*GNUPLOT_VERSION {
109 Ok(_) => PlottingBackend::Gnuplot,
110 #[cfg(feature = "plotters")]
111 Err(e) => {
112 match e {
113 VersionError::Exec(_) => eprintln!("Gnuplot not found, using plotters backend"),
114 e => eprintln!(
115 "Gnuplot not found or not usable, using plotters backend\n{}",
116 e
117 ),
118 };
119 PlottingBackend::Plotters
120 }
121 #[cfg(not(feature = "plotters"))]
122 Err(_) => PlottingBackend::None,
123});
124static CARGO_CRITERION_CONNECTION: Lazy<Option<Mutex<Connection>>> =
125 Lazy::new(|| match std::env::var("CARGO_CRITERION_PORT") {
126 Ok(port_str) => {
127 let port: u16 = port_str.parse().ok()?;
128 let stream = TcpStream::connect(("localhost", port)).ok()?;
129 Some(Mutex::new(Connection::new(stream).ok()?))
130 }
131 Err(_) => None,
132 });
133static DEFAULT_OUTPUT_DIRECTORY: Lazy<PathBuf> = Lazy::new(|| {
134 if let Some(value) = env::var_os("CRITERION_HOME") {
140 PathBuf::from(value)
141 } else if let Some(path) = cargo_target_directory() {
142 path.join("criterion")
143 } else {
144 PathBuf::from("target/criterion")
145 }
146});
147
148fn debug_enabled() -> bool {
149 *DEBUG_ENABLED
150}
151
152#[cfg(feature = "real_blackbox")]
157pub fn black_box<T>(dummy: T) -> T {
158 test::black_box(dummy)
159}
160
161#[cfg(not(feature = "real_blackbox"))]
167pub fn black_box<T>(dummy: T) -> T {
168 unsafe {
169 let ret = std::ptr::read_volatile(&dummy);
170 std::mem::forget(dummy);
171 ret
172 }
173}
174
175#[derive(Debug, Eq, PartialEq, Copy, Hash, Clone)]
198pub enum BatchSize {
199 SmallInput,
206
207 LargeInput,
214
215 PerIteration,
224
225 NumBatches(u64),
231
232 NumIterations(u64),
238
239 #[doc(hidden)]
240 __NonExhaustive,
241}
242impl BatchSize {
243 fn iters_per_batch(self, iters: u64) -> u64 {
250 match self {
251 BatchSize::SmallInput => (iters + 10 - 1) / 10,
252 BatchSize::LargeInput => (iters + 1000 - 1) / 1000,
253 BatchSize::PerIteration => 1,
254 BatchSize::NumBatches(batches) => (iters + batches - 1) / batches,
255 BatchSize::NumIterations(size) => size,
256 BatchSize::__NonExhaustive => panic!("__NonExhaustive is not a valid BatchSize."),
257 }
258 }
259}
260
261#[derive(Debug, Clone, Copy)]
263pub enum Baseline {
264 CompareLenient,
267 CompareStrict,
270 Save,
273 Discard,
275}
276
277#[derive(Debug, Clone, Copy)]
279pub enum PlottingBackend {
280 Gnuplot,
283 Plotters,
286 None,
288}
289impl PlottingBackend {
290 fn create_plotter(&self) -> Option<Box<dyn Plotter>> {
291 match self {
292 PlottingBackend::Gnuplot => Some(Box::<Gnuplot>::default()),
293 #[cfg(feature = "plotters")]
294 PlottingBackend::Plotters => Some(Box::<PlottersBackend>::default()),
295 #[cfg(not(feature = "plotters"))]
296 PlottingBackend::Plotters => panic!("Criterion was built without plotters support."),
297 PlottingBackend::None => None,
298 }
299 }
300}
301
302#[derive(Debug, Clone)]
303pub(crate) enum Mode {
305 Benchmark,
307 List(ListFormat),
309 Test,
311 Profile(Duration),
313}
314impl Mode {
315 pub fn is_benchmark(&self) -> bool {
316 matches!(self, Mode::Benchmark)
317 }
318
319 pub fn is_terse(&self) -> bool {
320 matches!(self, Mode::List(ListFormat::Terse))
321 }
322}
323
324#[derive(Debug, Clone)]
325pub(crate) enum ListFormat {
327 Pretty,
329 Terse,
332}
333
334impl Default for ListFormat {
335 fn default() -> Self {
336 Self::Pretty
337 }
338}
339
340#[derive(Clone, Debug)]
342pub enum BenchmarkFilter {
343 AcceptAll,
345 Regex(Regex),
347 Exact(String),
349 RejectAll,
351}
352
353pub struct Criterion<M: Measurement = WallTime> {
368 config: BenchmarkConfig,
369 filter: BenchmarkFilter,
370 report: Reports,
371 output_directory: PathBuf,
372 baseline_directory: String,
373 baseline: Baseline,
374 load_baseline: Option<String>,
375 all_directories: HashSet<String>,
376 all_titles: HashSet<String>,
377 measurement: M,
378 profiler: Box<RefCell<dyn Profiler>>,
379 connection: Option<MutexGuard<'static, Connection>>,
380 mode: Mode,
381}
382
383fn cargo_target_directory() -> Option<PathBuf> {
386 #[derive(Deserialize)]
387 struct Metadata {
388 target_directory: PathBuf,
389 }
390
391 env::var_os("CARGO_TARGET_DIR")
392 .map(PathBuf::from)
393 .or_else(|| {
394 let output = Command::new(env::var_os("CARGO")?)
395 .args(["metadata", "--format-version", "1"])
396 .output()
397 .ok()?;
398 let metadata: Metadata = serde_json::from_slice(&output.stdout).ok()?;
399 Some(metadata.target_directory)
400 })
401}
402
403impl Default for Criterion {
404 fn default() -> Criterion {
416 let reports = Reports {
417 cli_enabled: true,
418 cli: CliReport::new(false, false, CliVerbosity::Normal),
419 bencher_enabled: false,
420 bencher: BencherReport,
421 html: DEFAULT_PLOTTING_BACKEND.create_plotter().map(Html::new),
422 csv_enabled: cfg!(feature = "csv_output"),
423 };
424
425 let mut criterion = Criterion {
426 config: BenchmarkConfig {
427 confidence_level: 0.95,
428 measurement_time: Duration::from_secs(5),
429 noise_threshold: 0.01,
430 nresamples: 100_000,
431 sample_size: 100,
432 significance_level: 0.05,
433 warm_up_time: Duration::from_secs(3),
434 sampling_mode: SamplingMode::Auto,
435 quick_mode: false,
436 },
437 filter: BenchmarkFilter::AcceptAll,
438 report: reports,
439 baseline_directory: "base".to_owned(),
440 baseline: Baseline::Save,
441 load_baseline: None,
442 output_directory: DEFAULT_OUTPUT_DIRECTORY.clone(),
443 all_directories: HashSet::new(),
444 all_titles: HashSet::new(),
445 measurement: WallTime,
446 profiler: Box::new(RefCell::new(ExternalProfiler)),
447 connection: CARGO_CRITERION_CONNECTION
448 .as_ref()
449 .map(|mtx| mtx.lock().unwrap()),
450 mode: Mode::Benchmark,
451 };
452
453 if criterion.connection.is_some() {
454 criterion.report.cli_enabled = false;
456 criterion.report.bencher_enabled = false;
457 criterion.report.csv_enabled = false;
458 criterion.report.html = None;
459 }
460 criterion
461 }
462}
463
464impl<M: Measurement> Criterion<M> {
465 pub fn with_measurement<M2: Measurement>(self, m: M2) -> Criterion<M2> {
468 Criterion {
470 config: self.config,
471 filter: self.filter,
472 report: self.report,
473 baseline_directory: self.baseline_directory,
474 baseline: self.baseline,
475 load_baseline: self.load_baseline,
476 output_directory: self.output_directory,
477 all_directories: self.all_directories,
478 all_titles: self.all_titles,
479 measurement: m,
480 profiler: self.profiler,
481 connection: self.connection,
482 mode: self.mode,
483 }
484 }
485
486 #[must_use]
487 pub fn with_profiler<P: Profiler + 'static>(self, p: P) -> Criterion<M> {
490 Criterion {
491 profiler: Box::new(RefCell::new(p)),
492 ..self
493 }
494 }
495
496 #[must_use]
497 pub fn plotting_backend(mut self, backend: PlottingBackend) -> Criterion<M> {
502 if let PlottingBackend::Gnuplot = backend {
503 assert!(
504 !GNUPLOT_VERSION.is_err(),
505 "Gnuplot plotting backend was requested, but gnuplot is not available. \
506 To continue, either install Gnuplot or allow Criterion.rs to fall back \
507 to using plotters."
508 );
509 }
510
511 self.report.html = backend.create_plotter().map(Html::new);
512 self
513 }
514
515 #[must_use]
516 pub fn sample_size(mut self, n: usize) -> Criterion<M> {
527 assert!(n >= 10);
528
529 self.config.sample_size = n;
530 self
531 }
532
533 #[must_use]
534 pub fn warm_up_time(mut self, dur: Duration) -> Criterion<M> {
540 assert!(dur.as_nanos() > 0);
541
542 self.config.warm_up_time = dur;
543 self
544 }
545
546 #[must_use]
547 pub fn measurement_time(mut self, dur: Duration) -> Criterion<M> {
558 assert!(dur.as_nanos() > 0);
559
560 self.config.measurement_time = dur;
561 self
562 }
563
564 #[must_use]
565 pub fn nresamples(mut self, n: usize) -> Criterion<M> {
577 assert!(n > 0);
578 if n <= 1000 {
579 eprintln!("\nWarning: It is not recommended to reduce nresamples below 1000.");
580 }
581
582 self.config.nresamples = n;
583 self
584 }
585
586 #[must_use]
587 pub fn noise_threshold(mut self, threshold: f64) -> Criterion<M> {
600 assert!(threshold >= 0.0);
601
602 self.config.noise_threshold = threshold;
603 self
604 }
605
606 #[must_use]
607 pub fn confidence_level(mut self, cl: f64) -> Criterion<M> {
616 assert!(cl > 0.0 && cl < 1.0);
617 if cl < 0.5 {
618 eprintln!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
619 }
620
621 self.config.confidence_level = cl;
622 self
623 }
624
625 #[must_use]
626 pub fn significance_level(mut self, sl: f64) -> Criterion<M> {
647 assert!(sl > 0.0 && sl < 1.0);
648
649 self.config.significance_level = sl;
650 self
651 }
652
653 #[must_use]
654 pub fn with_plots(mut self) -> Criterion<M> {
656 if self.connection.is_none() && self.report.html.is_none() {
658 let default_backend = DEFAULT_PLOTTING_BACKEND.create_plotter();
659 if let Some(backend) = default_backend {
660 self.report.html = Some(Html::new(backend));
661 } else {
662 panic!("Cannot find a default plotting backend!");
663 }
664 }
665 self
666 }
667
668 #[must_use]
669 pub fn without_plots(mut self) -> Criterion<M> {
671 self.report.html = None;
672 self
673 }
674
675 #[must_use]
676 pub fn save_baseline(mut self, baseline: String) -> Criterion<M> {
678 self.baseline_directory = baseline;
679 self.baseline = Baseline::Save;
680 self
681 }
682
683 #[must_use]
684 pub fn retain_baseline(mut self, baseline: String, strict: bool) -> Criterion<M> {
686 self.baseline_directory = baseline;
687 self.baseline = if strict {
688 Baseline::CompareStrict
689 } else {
690 Baseline::CompareLenient
691 };
692 self
693 }
694
695 #[must_use]
696 pub fn with_filter<S: Into<String>>(mut self, filter: S) -> Criterion<M> {
701 let filter_text = filter.into();
702 let filter = Regex::new(&filter_text).unwrap_or_else(|err| {
703 panic!(
704 "Unable to parse '{}' as a regular expression: {}",
705 filter_text, err
706 )
707 });
708 self.filter = BenchmarkFilter::Regex(filter);
709
710 self
711 }
712
713 pub fn with_benchmark_filter(mut self, filter: BenchmarkFilter) -> Criterion<M> {
717 self.filter = filter;
718
719 self
720 }
721
722 #[must_use]
723 pub fn with_output_color(mut self, enabled: bool) -> Criterion<M> {
726 self.report.cli.enable_text_coloring = enabled;
727 self
728 }
729
730 #[must_use]
732 #[doc(hidden)]
733 pub fn output_directory(mut self, path: &Path) -> Criterion<M> {
734 self.output_directory = path.to_owned();
735
736 self
737 }
738
739 #[must_use]
741 #[doc(hidden)]
742 pub fn profile_time(mut self, profile_time: Option<Duration>) -> Criterion<M> {
743 match profile_time {
744 Some(time) => self.mode = Mode::Profile(time),
745 None => self.mode = Mode::Benchmark,
746 }
747
748 self
749 }
750
751 #[doc(hidden)]
753 pub fn final_summary(&self) {
754 if !self.mode.is_benchmark() {
755 return;
756 }
757
758 let report_context = ReportContext {
759 output_directory: self.output_directory.clone(),
760 plot_config: PlotConfiguration::default(),
761 };
762
763 self.report.final_summary(&report_context);
764 }
765
766 #[must_use]
769 #[cfg_attr(feature = "cargo-clippy", allow(clippy::cognitive_complexity))]
770 pub fn configure_from_args(mut self) -> Criterion<M> {
771 use clap::{value_parser, Arg, Command};
772 let matches = Command::new("Criterion Benchmark")
773 .arg(Arg::new("FILTER")
774 .help("Skip benchmarks whose names do not contain FILTER.")
775 .index(1))
776 .arg(Arg::new("color")
777 .short('c')
778 .long("color")
779 .alias("colour")
780 .value_parser(["auto", "always", "never"])
781 .default_value("auto")
782 .help("Configure coloring of output. always = always colorize output, never = never colorize output, auto = colorize output if output is a tty and compiled for unix."))
783 .arg(Arg::new("verbose")
784 .short('v')
785 .long("verbose")
786 .num_args(0)
787 .help("Print additional statistical information."))
788 .arg(Arg::new("quiet")
789 .long("quiet")
790 .num_args(0)
791 .conflicts_with("verbose")
792 .help("Print only the benchmark results."))
793 .arg(Arg::new("noplot")
794 .short('n')
795 .long("noplot")
796 .num_args(0)
797 .help("Disable plot and HTML generation."))
798 .arg(Arg::new("save-baseline")
799 .short('s')
800 .long("save-baseline")
801 .default_value("base")
802 .help("Save results under a named baseline."))
803 .arg(Arg::new("discard-baseline")
804 .long("discard-baseline")
805 .num_args(0)
806 .conflicts_with_all(["save-baseline", "baseline", "baseline-lenient"])
807 .help("Discard benchmark results."))
808 .arg(Arg::new("baseline")
809 .short('b')
810 .long("baseline")
811 .conflicts_with_all(["save-baseline", "baseline-lenient"])
812 .help("Compare to a named baseline. If any benchmarks do not have the specified baseline this command fails."))
813 .arg(Arg::new("baseline-lenient")
814 .long("baseline-lenient")
815 .conflicts_with_all(["save-baseline", "baseline"])
816 .help("Compare to a named baseline. If any benchmarks do not have the specified baseline then just those benchmarks are not compared against the baseline while every other benchmark is compared against the baseline."))
817 .arg(Arg::new("list")
818 .long("list")
819 .num_args(0)
820 .help("List all benchmarks")
821 .conflicts_with_all(["test", "profile-time"]))
822 .arg(Arg::new("format")
823 .long("format")
824 .value_parser(["pretty", "terse"])
825 .default_value("pretty")
826 .help("Output formatting"))
829 .arg(Arg::new("ignored")
830 .long("ignored")
831 .num_args(0)
832 .help("List or run ignored benchmarks (currently means skip all benchmarks)"))
833 .arg(Arg::new("exact")
834 .long("exact")
835 .num_args(0)
836 .help("Run benchmarks that exactly match the provided filter"))
837 .arg(Arg::new("profile-time")
838 .long("profile-time")
839 .value_parser(value_parser!(f64))
840 .help("Iterate each benchmark for approximately the given number of seconds, doing no analysis and without storing the results. Useful for running the benchmarks in a profiler.")
841 .conflicts_with_all(["test", "list"]))
842 .arg(Arg::new("load-baseline")
843 .long("load-baseline")
844 .conflicts_with("profile-time")
845 .requires("baseline")
846 .help("Load a previous baseline instead of sampling new data."))
847 .arg(Arg::new("sample-size")
848 .long("sample-size")
849 .value_parser(value_parser!(usize))
850 .help(format!("Changes the default size of the sample for this run. [default: {}]", self.config.sample_size)))
851 .arg(Arg::new("warm-up-time")
852 .long("warm-up-time")
853 .value_parser(value_parser!(f64))
854 .help(format!("Changes the default warm up time for this run. [default: {}]", self.config.warm_up_time.as_secs())))
855 .arg(Arg::new("measurement-time")
856 .long("measurement-time")
857 .value_parser(value_parser!(f64))
858 .help(format!("Changes the default measurement time for this run. [default: {}]", self.config.measurement_time.as_secs())))
859 .arg(Arg::new("nresamples")
860 .long("nresamples")
861 .value_parser(value_parser!(usize))
862 .help(format!("Changes the default number of resamples for this run. [default: {}]", self.config.nresamples)))
863 .arg(Arg::new("noise-threshold")
864 .long("noise-threshold")
865 .value_parser(value_parser!(f64))
866 .help(format!("Changes the default noise threshold for this run. [default: {}]", self.config.noise_threshold)))
867 .arg(Arg::new("confidence-level")
868 .long("confidence-level")
869 .value_parser(value_parser!(f64))
870 .help(format!("Changes the default confidence level for this run. [default: {}]", self.config.confidence_level)))
871 .arg(Arg::new("significance-level")
872 .long("significance-level")
873 .value_parser(value_parser!(f64))
874 .help(format!("Changes the default significance level for this run. [default: {}]", self.config.significance_level)))
875 .arg(Arg::new("quick")
876 .long("quick")
877 .num_args(0)
878 .conflicts_with("sample-size")
879 .help(format!("Benchmark only until the significance level has been reached [default: {}]", self.config.quick_mode)))
880 .arg(Arg::new("test")
881 .hide(true)
882 .long("test")
883 .num_args(0)
884 .help("Run the benchmarks once, to verify that they execute successfully, but do not measure or report the results.")
885 .conflicts_with_all(["list", "profile-time"]))
886 .arg(Arg::new("bench")
887 .hide(true)
888 .long("bench")
889 .num_args(0))
890 .arg(Arg::new("plotting-backend")
891 .long("plotting-backend")
892 .value_parser(["gnuplot", "plotters"])
893 .help("Set the plotting backend. By default, Criterion.rs will use the gnuplot backend if gnuplot is available, or the plotters backend if it isn't."))
894 .arg(Arg::new("output-format")
895 .long("output-format")
896 .value_parser(["criterion", "bencher"])
897 .default_value("criterion")
898 .help("Change the CLI output format. By default, Criterion.rs will use its own format. If output format is set to 'bencher', Criterion.rs will print output in a format that resembles the 'bencher' crate."))
899 .arg(Arg::new("nocapture")
900 .long("nocapture")
901 .num_args(0)
902 .hide(true)
903 .help("Ignored, but added for compatibility with libtest."))
904 .arg(Arg::new("show-output")
905 .long("show-output")
906 .num_args(0)
907 .hide(true)
908 .help("Ignored, but added for compatibility with libtest."))
909 .arg(Arg::new("version")
910 .hide(true)
911 .short('V')
912 .long("version")
913 .num_args(0))
914 .after_help("
915This executable is a Criterion.rs benchmark.
916See https://github.com/bheisler/criterion.rs for more details.
917
918To enable debug output, define the environment variable CRITERION_DEBUG.
919Criterion.rs will output more debug information and will save the gnuplot
920scripts alongside the generated plots.
921
922To test that the benchmarks work, run `cargo test --benches`
923
924NOTE: If you see an 'unrecognized option' error using any of the options above, see:
925https://bheisler.github.io/criterion.rs/book/faq.html
926")
927 .get_matches();
928
929 if self.connection.is_some() {
930 if let Some(color) = matches.get_one::<String>("color") {
931 if color != "auto" {
932 eprintln!("Warning: --color will be ignored when running with cargo-criterion. Use `cargo criterion --color {} -- <args>` instead.", color);
933 }
934 }
935 if matches.get_flag("verbose") {
936 eprintln!("Warning: --verbose will be ignored when running with cargo-criterion. Use `cargo criterion --output-format verbose -- <args>` instead.");
937 }
938 if matches.get_flag("noplot") {
939 eprintln!("Warning: --noplot will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend disabled -- <args>` instead.");
940 }
941 if let Some(backend) = matches.get_one::<String>("plotting-backend") {
942 eprintln!("Warning: --plotting-backend will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend {} -- <args>` instead.", backend);
943 }
944 if let Some(format) = matches.get_one::<String>("output-format") {
945 if format != "criterion" {
946 eprintln!("Warning: --output-format will be ignored when running with cargo-criterion. Use `cargo criterion --output-format {} -- <args>` instead.", format);
947 }
948 }
949
950 if matches.contains_id("baseline")
951 || matches
952 .get_one::<String>("save-baseline")
953 .map_or(false, |base| base != "base")
954 || matches.contains_id("load-baseline")
955 {
956 eprintln!("Error: baselines are not supported when running with cargo-criterion.");
957 std::process::exit(1);
958 }
959 }
960
961 let bench = matches.get_flag("bench");
962 let test = matches.get_flag("test");
963 let test_mode = match (bench, test) {
964 (true, true) => true, (true, false) => false, (false, _) => true, };
968
969 self.mode = if matches.get_flag("list") {
970 let list_format = match matches
971 .get_one::<String>("format")
972 .expect("a default value was provided for this")
973 .as_str()
974 {
975 "pretty" => ListFormat::Pretty,
976 "terse" => ListFormat::Terse,
977 other => unreachable!(
978 "unrecognized value for --format that isn't part of possible-values: {}",
979 other
980 ),
981 };
982 Mode::List(list_format)
983 } else if test_mode {
984 Mode::Test
985 } else if let Some(&num_seconds) = matches.get_one("profile-time") {
986 if num_seconds < 1.0 {
987 eprintln!("Profile time must be at least one second.");
988 std::process::exit(1);
989 }
990
991 Mode::Profile(Duration::from_secs_f64(num_seconds))
992 } else {
993 Mode::Benchmark
994 };
995
996 if !self.mode.is_benchmark() {
998 self.connection = None;
999 }
1000
1001 let filter = if matches.get_flag("ignored") {
1002 BenchmarkFilter::RejectAll
1004 } else if let Some(filter) = matches.get_one::<String>("FILTER") {
1005 if matches.get_flag("exact") {
1006 BenchmarkFilter::Exact(filter.to_owned())
1007 } else {
1008 let regex = Regex::new(filter).unwrap_or_else(|err| {
1009 panic!(
1010 "Unable to parse '{}' as a regular expression: {}",
1011 filter, err
1012 )
1013 });
1014 BenchmarkFilter::Regex(regex)
1015 }
1016 } else {
1017 BenchmarkFilter::AcceptAll
1018 };
1019 self = self.with_benchmark_filter(filter);
1020
1021 match matches.get_one("plotting-backend").map(String::as_str) {
1022 Some("gnuplot") => self = self.plotting_backend(PlottingBackend::Gnuplot),
1024 Some("plotters") => self = self.plotting_backend(PlottingBackend::Plotters),
1025 Some(val) => panic!("Unexpected plotting backend '{}'", val),
1026 None => {}
1027 }
1028
1029 if matches.get_flag("noplot") {
1030 self = self.without_plots();
1031 }
1032
1033 if let Some(dir) = matches.get_one::<String>("save-baseline") {
1034 self.baseline = Baseline::Save;
1035 self.baseline_directory = dir.to_owned()
1036 }
1037 if matches.get_flag("discard-baseline") {
1038 self.baseline = Baseline::Discard;
1039 }
1040 if let Some(dir) = matches.get_one::<String>("baseline") {
1041 self.baseline = Baseline::CompareStrict;
1042 self.baseline_directory = dir.to_owned();
1043 }
1044 if let Some(dir) = matches.get_one::<String>("baseline-lenient") {
1045 self.baseline = Baseline::CompareLenient;
1046 self.baseline_directory = dir.to_owned();
1047 }
1048
1049 if self.connection.is_some() {
1050 self.report.cli_enabled = false;
1052 self.report.bencher_enabled = false;
1053 self.report.csv_enabled = false;
1054 self.report.html = None;
1055 } else {
1056 match matches.get_one("output-format").map(String::as_str) {
1057 Some("bencher") => {
1058 self.report.bencher_enabled = true;
1059 self.report.cli_enabled = false;
1060 }
1061 _ => {
1062 let verbose = matches.get_flag("verbose");
1063 let verbosity = if verbose {
1064 CliVerbosity::Verbose
1065 } else if matches.get_flag("quiet") {
1066 CliVerbosity::Quiet
1067 } else {
1068 CliVerbosity::Normal
1069 };
1070 let stdout_isatty = stdout().is_terminal();
1071 let mut enable_text_overwrite = stdout_isatty && !verbose && !debug_enabled();
1072 let enable_text_coloring;
1073 match matches.get_one("color").map(String::as_str) {
1074 Some("always") => {
1075 enable_text_coloring = true;
1076 }
1077 Some("never") => {
1078 enable_text_coloring = false;
1079 enable_text_overwrite = false;
1080 }
1081 _ => enable_text_coloring = stdout_isatty,
1082 };
1083 self.report.bencher_enabled = false;
1084 self.report.cli_enabled = true;
1085 self.report.cli =
1086 CliReport::new(enable_text_overwrite, enable_text_coloring, verbosity);
1087 }
1088 };
1089 }
1090
1091 if let Some(dir) = matches.get_one::<String>("load-baseline") {
1092 self.load_baseline = Some(dir.to_owned());
1093 }
1094
1095 if let Some(&num_size) = matches.get_one("sample-size") {
1096 assert!(num_size >= 10);
1097 self.config.sample_size = num_size;
1098 }
1099 if let Some(&num_seconds) = matches.get_one("warm-up-time") {
1100 let dur = std::time::Duration::from_secs_f64(num_seconds);
1101 assert!(dur.as_nanos() > 0);
1102
1103 self.config.warm_up_time = dur;
1104 }
1105 if let Some(&num_seconds) = matches.get_one("measurement-time") {
1106 let dur = std::time::Duration::from_secs_f64(num_seconds);
1107 assert!(dur.as_nanos() > 0);
1108
1109 self.config.measurement_time = dur;
1110 }
1111 if let Some(&num_resamples) = matches.get_one("nresamples") {
1112 assert!(num_resamples > 0);
1113
1114 self.config.nresamples = num_resamples;
1115 }
1116 if let Some(&num_noise_threshold) = matches.get_one("noise-threshold") {
1117 assert!(num_noise_threshold > 0.0);
1118
1119 self.config.noise_threshold = num_noise_threshold;
1120 }
1121 if let Some(&num_confidence_level) = matches.get_one("confidence-level") {
1122 assert!(num_confidence_level > 0.0 && num_confidence_level < 1.0);
1123
1124 self.config.confidence_level = num_confidence_level;
1125 }
1126 if let Some(&num_significance_level) = matches.get_one("significance-level") {
1127 assert!(num_significance_level > 0.0 && num_significance_level < 1.0);
1128
1129 self.config.significance_level = num_significance_level;
1130 }
1131
1132 if matches.get_flag("quick") {
1133 self.config.quick_mode = true;
1134 }
1135
1136 self
1137 }
1138
1139 fn filter_matches(&self, id: &str) -> bool {
1140 match &self.filter {
1141 BenchmarkFilter::AcceptAll => true,
1142 BenchmarkFilter::Regex(regex) => regex.is_match(id),
1143 BenchmarkFilter::Exact(exact) => id == exact,
1144 BenchmarkFilter::RejectAll => false,
1145 }
1146 }
1147
1148 fn should_save_baseline(&self) -> bool {
1151 self.connection.is_none()
1152 && self.load_baseline.is_none()
1153 && !matches!(self.baseline, Baseline::Discard)
1154 }
1155
1156 pub fn benchmark_group<S: Into<String>>(&mut self, group_name: S) -> BenchmarkGroup<'_, M> {
1180 let group_name = group_name.into();
1181 assert!(!group_name.is_empty(), "Group name must not be empty.");
1182
1183 if let Some(conn) = &self.connection {
1184 conn.send(&OutgoingMessage::BeginningBenchmarkGroup { group: &group_name })
1185 .unwrap();
1186 }
1187
1188 BenchmarkGroup::new(self, group_name)
1189 }
1190}
1191impl<M> Criterion<M>
1192where
1193 M: Measurement + 'static,
1194{
1195 pub fn bench_function<F>(&mut self, id: &str, f: F) -> &mut Criterion<M>
1217 where
1218 F: FnMut(&mut Bencher<'_, M>),
1219 {
1220 self.benchmark_group(id)
1221 .bench_function(BenchmarkId::no_function(), f);
1222 self
1223 }
1224
1225 pub fn bench_with_input<F, I>(&mut self, id: BenchmarkId, input: &I, f: F) -> &mut Criterion<M>
1249 where
1250 F: FnMut(&mut Bencher<'_, M>, &I),
1251 {
1252 let group_name = id.function_name.expect(
1256 "Cannot use BenchmarkId::from_parameter with Criterion::bench_with_input. \
1257 Consider using a BenchmarkGroup or BenchmarkId::new instead.",
1258 );
1259 let parameter = id.parameter.unwrap();
1261 self.benchmark_group(group_name).bench_with_input(
1262 BenchmarkId::no_function_with_input(parameter),
1263 input,
1264 f,
1265 );
1266 self
1267 }
1268}
1269
1270#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
1275pub enum Throughput {
1276 Bytes(u64),
1280
1281 BytesDecimal(u64),
1285
1286 Elements(u64),
1291}
1292
1293#[derive(Debug, Clone, Copy)]
1295pub enum AxisScale {
1296 Linear,
1298
1299 Logarithmic,
1301}
1302
1303#[derive(Debug, Clone)]
1319pub struct PlotConfiguration {
1320 summary_scale: AxisScale,
1321}
1322
1323impl Default for PlotConfiguration {
1324 fn default() -> PlotConfiguration {
1325 PlotConfiguration {
1326 summary_scale: AxisScale::Linear,
1327 }
1328 }
1329}
1330
1331impl PlotConfiguration {
1332 #[must_use]
1333 pub fn summary_scale(mut self, new_scale: AxisScale) -> PlotConfiguration {
1337 self.summary_scale = new_scale;
1338 self
1339 }
1340}
1341
1342#[derive(Debug, Clone, Copy)]
1346pub enum SamplingMode {
1347 Auto,
1350
1351 Linear,
1354
1355 Flat,
1360}
1361impl SamplingMode {
1362 pub(crate) fn choose_sampling_mode(
1363 &self,
1364 warmup_mean_execution_time: f64,
1365 sample_count: u64,
1366 target_time: f64,
1367 ) -> ActualSamplingMode {
1368 match self {
1369 SamplingMode::Linear => ActualSamplingMode::Linear,
1370 SamplingMode::Flat => ActualSamplingMode::Flat,
1371 SamplingMode::Auto => {
1372 let total_runs = sample_count * (sample_count + 1) / 2;
1374 let d =
1375 (target_time / warmup_mean_execution_time / total_runs as f64).ceil() as u64;
1376 let expected_ns = total_runs as f64 * d as f64 * warmup_mean_execution_time;
1377
1378 if expected_ns > (2.0 * target_time) {
1379 ActualSamplingMode::Flat
1380 } else {
1381 ActualSamplingMode::Linear
1382 }
1383 }
1384 }
1385 }
1386}
1387
1388#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
1390pub(crate) enum ActualSamplingMode {
1391 Linear,
1392 Flat,
1393}
1394impl ActualSamplingMode {
1395 pub(crate) fn iteration_counts(
1396 &self,
1397 warmup_mean_execution_time: f64,
1398 sample_count: u64,
1399 target_time: &Duration,
1400 ) -> Vec<u64> {
1401 match self {
1402 ActualSamplingMode::Linear => {
1403 let n = sample_count;
1404 let met = warmup_mean_execution_time;
1405 let m_ns = target_time.as_nanos();
1406 let total_runs = n * (n + 1) / 2;
1408 let d = ((m_ns as f64 / met / total_runs as f64).ceil() as u64).max(1);
1409 let expected_ns = total_runs as f64 * d as f64 * met;
1410
1411 if d == 1 {
1412 let recommended_sample_size =
1413 ActualSamplingMode::recommend_linear_sample_size(m_ns as f64, met);
1414 let actual_time = Duration::from_nanos(expected_ns as u64);
1415 eprint!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
1416 n, target_time, actual_time);
1417
1418 if recommended_sample_size != n {
1419 eprintln!(
1420 ", enable flat sampling, or reduce sample count to {}.",
1421 recommended_sample_size
1422 );
1423 } else {
1424 eprintln!(" or enable flat sampling.");
1425 }
1426 }
1427
1428 (1..(n + 1)).map(|a| a * d).collect::<Vec<u64>>()
1429 }
1430 ActualSamplingMode::Flat => {
1431 let n = sample_count;
1432 let met = warmup_mean_execution_time;
1433 let m_ns = target_time.as_nanos() as f64;
1434 let time_per_sample = m_ns / (n as f64);
1435 let iterations_per_sample = ((time_per_sample / met).ceil() as u64).max(1);
1437
1438 let expected_ns = met * (iterations_per_sample * n) as f64;
1439
1440 if iterations_per_sample == 1 {
1441 let recommended_sample_size =
1442 ActualSamplingMode::recommend_flat_sample_size(m_ns, met);
1443 let actual_time = Duration::from_nanos(expected_ns as u64);
1444 eprint!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
1445 n, target_time, actual_time);
1446
1447 if recommended_sample_size != n {
1448 eprintln!(", or reduce sample count to {}.", recommended_sample_size);
1449 } else {
1450 eprintln!(".");
1451 }
1452 }
1453
1454 vec![iterations_per_sample; n as usize]
1455 }
1456 }
1457 }
1458
1459 fn is_linear(&self) -> bool {
1460 matches!(self, ActualSamplingMode::Linear)
1461 }
1462
1463 fn recommend_linear_sample_size(target_time: f64, met: f64) -> u64 {
1464 let c = target_time / met;
1472 let sample_size = (-1.0 + (4.0 * c).sqrt()) / 2.0;
1473 let sample_size = sample_size as u64;
1474
1475 let sample_size = (sample_size / 10) * 10;
1477
1478 if sample_size < 10 {
1480 10
1481 } else {
1482 sample_size
1483 }
1484 }
1485
1486 fn recommend_flat_sample_size(target_time: f64, met: f64) -> u64 {
1487 let sample_size = (target_time / met) as u64;
1488
1489 let sample_size = (sample_size / 10) * 10;
1491
1492 if sample_size < 10 {
1494 10
1495 } else {
1496 sample_size
1497 }
1498 }
1499}
1500
1501#[derive(Debug, Serialize, Deserialize)]
1502pub(crate) struct SavedSample {
1503 sampling_mode: ActualSamplingMode,
1504 iters: Vec<f64>,
1505 times: Vec<f64>,
1506}
1507
1508#[doc(hidden)]
1510pub fn runner(benches: &[&dyn Fn()]) {
1511 for bench in benches {
1512 bench();
1513 }
1514 Criterion::default().configure_from_args().final_summary();
1515}