#![cfg_attr(feature = "asm_black_box", feature(asm))]
#![cfg_attr(feature = "capture", feature(internal_output_capture))]
extern crate getopts;
extern crate rustc_serialize;
extern crate term;
extern crate libc;
extern crate time;
pub use self::TestFn::*;
pub use self::ColorConfig::*;
pub use self::TestResult::*;
pub use self::TestName::*;
use self::TestEvent::*;
use self::NamePadding::*;
use self::OutputLocation::*;
use time::{PreciseTime, Duration};
use std::any::Any;
use std::cmp;
use std::collections::BTreeMap;
use std::env;
use std::fmt;
use std::fs::File;
use std::io::prelude::*;
use std::io;
use std::iter::repeat;
use std::path::PathBuf;
use std::sync::mpsc::{channel, Sender};
use std::sync::{Arc, Mutex};
use std::thread;
pub mod test {
pub use {Bencher, TestName, TestResult, TestDesc, TestDescAndFn, TestOpts, TrFailed,
TrIgnored, TrOk, Metric, MetricMap, StaticTestFn, StaticTestName, DynTestName,
DynTestFn, run_test, test_main, test_main_static, filter_tests, parse_opts,
StaticBenchFn, ShouldPanic};
}
pub mod stats;
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
pub enum TestName {
StaticTestName(&'static str),
DynTestName(String),
}
impl TestName {
fn as_slice(&self) -> &str {
match *self {
StaticTestName(s) => s,
DynTestName(ref s) => s,
}
}
}
impl fmt::Display for TestName {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(self.as_slice(), f)
}
}
#[derive(Clone, Copy, PartialEq, Eq)]
enum NamePadding {
PadNone,
PadOnRight,
}
impl TestDesc {
#[cfg(not(rustc_emits_allow_fail))]
pub fn new(name: TestName) -> Self {
TestDesc {
name: name,
ignore: false,
should_panic: ShouldPanic::No,
}
}
#[cfg(rustc_emits_allow_fail)]
pub fn new(name: TestName) -> Self {
TestDesc {
name: name,
ignore: false,
should_panic: ShouldPanic::No,
allow_fail: false,
}
}
fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
let mut name = String::from(self.name.as_slice());
let fill = column_count.saturating_sub(name.len());
let pad = repeat(" ").take(fill).collect::<String>();
match align {
PadNone => name,
PadOnRight => {
name.push_str(&pad);
name
}
}
}
}
pub trait TDynBenchFn: Send {
fn run(&self, harness: &mut Bencher);
}
pub enum TestFn {
StaticTestFn(fn()),
StaticBenchFn(fn(&mut Bencher)),
StaticMetricFn(fn(&mut MetricMap)),
DynTestFn(Box<FnMut() + Send>),
DynMetricFn(Box<FnMut(&mut MetricMap) + Send>),
DynBenchFn(Box<TDynBenchFn + 'static>),
}
impl TestFn {
pub fn dyn_test_fn<F: FnOnce() + Send + 'static>(f: F) -> Self {
let mut f = Some(f);
TestFn::DynTestFn(Box::new(move || f.take().unwrap()()))
}
pub fn dyn_metric_fn<F: FnOnce(&mut MetricMap) + Send + 'static>(f: F) -> Self {
let mut f = Some(f);
TestFn::DynMetricFn(Box::new(move |map| f.take().unwrap()(map)))
}
fn padding(&self) -> NamePadding {
match *self {
StaticTestFn(..) => PadNone,
StaticBenchFn(..) => PadOnRight,
StaticMetricFn(..) => PadOnRight,
DynTestFn(..) => PadNone,
DynMetricFn(..) => PadOnRight,
DynBenchFn(..) => PadOnRight,
}
}
}
impl fmt::Debug for TestFn {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match *self {
StaticTestFn(..) => "StaticTestFn(..)",
StaticBenchFn(..) => "StaticBenchFn(..)",
StaticMetricFn(..) => "StaticMetricFn(..)",
DynTestFn(..) => "DynTestFn(..)",
DynMetricFn(..) => "DynMetricFn(..)",
DynBenchFn(..) => "DynBenchFn(..)",
})
}
}
#[derive(Copy, Clone)]
pub struct Bencher {
iterations: u64,
dur: Duration,
pub bytes: u64,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum ShouldPanic {
No,
Yes,
YesWithMessage(&'static str),
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct TestDesc {
pub name: TestName,
pub ignore: bool,
pub should_panic: ShouldPanic,
#[cfg(rustc_emits_allow_fail)]
pub allow_fail: bool,
}
unsafe impl Send for TestDesc {}
#[derive(Debug)]
pub struct TestDescAndFn {
pub desc: TestDesc,
pub testfn: TestFn,
}
#[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug, Copy)]
pub struct Metric {
value: f64,
noise: f64,
}
impl Metric {
pub fn new(value: f64, noise: f64) -> Metric {
Metric {
value: value,
noise: noise,
}
}
}
#[derive(PartialEq)]
pub struct MetricMap(BTreeMap<String, Metric>);
impl Clone for MetricMap {
fn clone(&self) -> MetricMap {
let MetricMap(ref map) = *self;
MetricMap(map.clone())
}
}
pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>) {
let opts = match parse_opts(args) {
Some(Ok(o)) => o,
Some(Err(msg)) => panic!("{:?}", msg),
None => return,
};
match run_tests_console(&opts, tests) {
Ok(true) => {}
Ok(false) => std::process::exit(101),
Err(e) => panic!("io error when running tests: {:?}", e),
}
}
pub fn test_main_static(tests: &[TestDescAndFn]) {
let args = env::args().collect::<Vec<_>>();
let owned_tests = tests.iter()
.map(|t| {
match t.testfn {
StaticTestFn(f) => {
TestDescAndFn {
testfn: StaticTestFn(f),
desc: t.desc.clone(),
}
}
StaticBenchFn(f) => {
TestDescAndFn {
testfn: StaticBenchFn(f),
desc: t.desc.clone(),
}
}
_ => panic!("non-static tests passed to test::test_main_static"),
}
})
.collect();
test_main(&args, owned_tests)
}
#[derive(Copy, Clone)]
pub enum ColorConfig {
AutoColor,
AlwaysColor,
NeverColor,
}
pub struct TestOpts {
pub filter: Option<String>,
pub run_ignored: bool,
pub run_tests: bool,
pub bench_benchmarks: bool,
pub logfile: Option<PathBuf>,
pub nocapture: bool,
pub color: ColorConfig,
pub verbose: bool,
}
impl TestOpts {
#[cfg(test)]
fn new() -> TestOpts {
TestOpts {
filter: None,
run_ignored: false,
run_tests: false,
bench_benchmarks: false,
logfile: None,
nocapture: false,
color: AutoColor,
verbose: false,
}
}
}
pub type OptRes = Result<TestOpts, String>;
#[cfg_attr(rustfmt, rustfmt_skip)]
fn options() -> getopts::Options {
let mut opts = getopts::Options::new();
opts.optflag("", "ignored", "Run ignored tests")
.optflag("", "test", "Run tests and not benchmarks")
.optflag("", "bench", "Run benchmarks instead of tests")
.optflag("h", "help", "Display this message (longer with --help)")
.optopt("", "logfile", "Write logs to the specified file instead \
of stdout", "PATH");
if cfg!(feature = "capture") {
opts.optflag("", "nocapture", "don't capture stdout/stderr of each \
task, allow printing directly");
}
opts.optopt("", "color", "Configure coloring of output:
auto = colorize if stdout is a tty and tests are run on serially (default);
always = always colorize output;
never = never colorize output;", "auto|always|never")
.optflag("v", "verbose", "Display the name of each test when it starts");
opts
}
fn usage(binary: &str) {
let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
println!(r#"{usage}
The FILTER string is tested against the name of all tests, and only those
tests whose names contain the filter are run.
By default, all tests are run in parallel. This can be altered with the
RUST_TEST_THREADS environment variable when running tests (set it to 1).
"#, usage = options().usage(&message));
if cfg!(feature = "capture") {
println!(r#"
All tests have their standard output and standard error captured by default.
This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
environment variable. Logging is not captured by default.
"#);
}
println!(r#"
Test Attributes:
#[test] - Indicates a function is a test to be run. This function
takes no arguments.
#[bench] - Indicates a function is a benchmark to be run. This
function takes one argument (test::Bencher).
#[should_panic] - This function (also labeled with #[test]) will only pass if
the code causes a panic (an assertion failure or panic!)
A message may be provided, which the failure string must
contain: #[should_panic(expected = "foo")].
#[ignore] - When applied to a function which is already attributed as a
test, then the test runner will ignore these tests during
normal test runs. Running with --ignored will run these
tests."#);
}
pub fn parse_opts(args: &[String]) -> Option<OptRes> {
let args_ = &args[1..];
let matches = match options().parse(args_) {
Ok(m) => m,
Err(f) => return Some(Err(f.to_string())),
};
if matches.opt_present("h") {
usage(&args[0]);
return None;
}
let filter = if !matches.free.is_empty() {
Some(matches.free[0].clone())
} else {
None
};
let run_ignored = matches.opt_present("ignored");
let verbose = matches.opt_present("verbose");
let logfile = matches.opt_str("logfile");
let logfile = logfile.map(|s| PathBuf::from(&s));
let bench_benchmarks = matches.opt_present("bench");
let run_tests = !bench_benchmarks || matches.opt_present("test");
let mut nocapture =
if cfg!(feature = "capture") { matches.opt_present("nocapture") } else { false };
if !nocapture {
nocapture = env::var("RUST_TEST_NOCAPTURE").is_ok();
}
let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
Some("auto") | None => AutoColor,
Some("always") => AlwaysColor,
Some("never") => NeverColor,
Some(v) => {
return Some(Err(format!("argument for --color must be auto, always, or never (was \
{})",
v)))
}
};
let test_opts = TestOpts {
filter: filter,
run_ignored: run_ignored,
run_tests: run_tests,
bench_benchmarks: bench_benchmarks,
logfile: logfile,
nocapture: nocapture,
color: color,
verbose: verbose,
};
Some(Ok(test_opts))
}
#[derive(Clone, PartialEq)]
pub struct BenchSamples {
ns_iter_summ: stats::Summary,
mb_s: usize,
}
#[derive(Clone, PartialEq)]
pub enum TestResult {
TrOk,
TrFailed,
TrIgnored,
TrMetrics(MetricMap),
TrBench(BenchSamples),
}
unsafe impl Send for TestResult {}
enum OutputLocation<T> {
Pretty(Box<term::StdoutTerminal>),
Raw(T),
}
struct ConsoleTestState<T> {
log_out: Option<File>,
out: OutputLocation<T>,
use_color: bool,
verbose: bool,
total: usize,
passed: usize,
failed: usize,
ignored: usize,
measured: usize,
metrics: MetricMap,
failures: Vec<(TestDesc, Vec<u8>)>,
max_name_len: usize, }
impl<T: Write> ConsoleTestState<T> {
pub fn new(opts: &TestOpts, _: Option<T>) -> io::Result<ConsoleTestState<io::Stdout>> {
let log_out = match opts.logfile {
Some(ref path) => Some(try!(File::create(path))),
None => None,
};
let out = match term::stdout() {
None => Raw(io::stdout()),
Some(t) => Pretty(t),
};
Ok(ConsoleTestState {
out: out,
log_out: log_out,
use_color: use_color(opts),
verbose: opts.verbose,
total: 0,
passed: 0,
failed: 0,
ignored: 0,
measured: 0,
metrics: MetricMap::new(),
failures: Vec::new(),
max_name_len: 0,
})
}
pub fn write_ok(&mut self) -> io::Result<()> {
self.write_short_result("ok", ".", term::color::GREEN)
}
pub fn write_failed(&mut self) -> io::Result<()> {
self.write_short_result("FAILED", "F", term::color::RED)
}
pub fn write_ignored(&mut self) -> io::Result<()> {
self.write_short_result("ignored", "i", term::color::YELLOW)
}
pub fn write_metric(&mut self) -> io::Result<()> {
self.write_pretty("metric", term::color::CYAN)
}
pub fn write_bench(&mut self) -> io::Result<()> {
self.write_pretty("bench", term::color::CYAN)
}
pub fn write_short_result(&mut self, verbose: &str, quiet: &str, color: term::color::Color)
-> io::Result<()> {
if self.verbose {
try!(self.write_pretty(verbose, color));
self.write_plain("\n")
} else {
self.write_pretty(quiet, color)
}
}
pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> {
match self.out {
Pretty(ref mut term) => {
if self.use_color {
try!(term.fg(color));
}
try!(term.write_all(word.as_bytes()));
if self.use_color {
try!(term.reset());
}
term.flush()
}
Raw(ref mut stdout) => {
try!(stdout.write_all(word.as_bytes()));
stdout.flush()
}
}
}
pub fn write_plain(&mut self, s: &str) -> io::Result<()> {
match self.out {
Pretty(ref mut term) => {
try!(term.write_all(s.as_bytes()));
term.flush()
}
Raw(ref mut stdout) => {
try!(stdout.write_all(s.as_bytes()));
stdout.flush()
}
}
}
pub fn write_run_start(&mut self, len: usize) -> io::Result<()> {
self.total = len;
let noun = if len != 1 {
"tests"
} else {
"test"
};
self.write_plain(&format!("\nrunning {} {}\n", len, noun))
}
pub fn write_test_start(&mut self, test: &TestDesc, align: NamePadding) -> io::Result<()> {
if self.verbose || align == PadOnRight {
let name = test.padded_name(self.max_name_len, align);
self.write_plain(&format!("test {} ... ", name))
} else {
Ok(())
}
}
pub fn write_result(&mut self, result: &TestResult) -> io::Result<()> {
match *result {
TrOk => self.write_ok(),
TrFailed => self.write_failed(),
TrIgnored => self.write_ignored(),
TrMetrics(ref mm) => {
try!(self.write_metric());
self.write_plain(&format!(": {}\n", mm.fmt_metrics()))
}
TrBench(ref bs) => {
try!(self.write_bench());
self.write_plain(&format!(": {}\n", fmt_bench_samples(bs)))
}
}
}
pub fn write_log(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
match self.log_out {
None => Ok(()),
Some(ref mut o) => {
let s = format!("{} {}\n",
match *result {
TrOk => "ok".to_owned(),
TrFailed => "failed".to_owned(),
TrIgnored => "ignored".to_owned(),
TrMetrics(ref mm) => mm.fmt_metrics(),
TrBench(ref bs) => fmt_bench_samples(bs),
},
test.name);
o.write_all(s.as_bytes())
}
}
}
pub fn write_failures(&mut self) -> io::Result<()> {
try!(self.write_plain("\nfailures:\n"));
let mut failures = Vec::new();
let mut fail_out = String::new();
for &(ref f, ref stdout) in &self.failures {
failures.push(f.name.to_string());
if !stdout.is_empty() {
fail_out.push_str(&format!("---- {} stdout ----\n\t", f.name));
let output = String::from_utf8_lossy(stdout);
fail_out.push_str(&output);
fail_out.push_str("\n");
}
}
if !fail_out.is_empty() {
try!(self.write_plain("\n"));
try!(self.write_plain(&fail_out));
}
try!(self.write_plain("\nfailures:\n"));
failures.sort();
for name in &failures {
try!(self.write_plain(&format!(" {}\n", name)));
}
Ok(())
}
pub fn write_run_finish(&mut self) -> io::Result<bool> {
assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
let success = self.failed == 0;
if !success {
try!(self.write_failures());
}
try!(self.write_plain("\ntest result: "));
if success {
try!(self.write_pretty("ok", term::color::GREEN));
} else {
try!(self.write_pretty("FAILED", term::color::RED));
}
let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
self.passed,
self.failed,
self.ignored,
self.measured);
try!(self.write_plain(&s));
return Ok(success);
}
}
fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
use std::fmt::Write;
let mut output = String::new();
let mut trailing = false;
for &pow in &[9, 6, 3, 0] {
let base = 10_usize.pow(pow);
if pow == 0 || trailing || n / base != 0 {
if !trailing {
output.write_fmt(format_args!("{}", n / base)).unwrap();
} else {
output.write_fmt(format_args!("{:03}", n / base)).unwrap();
}
if pow != 0 {
output.push(sep);
}
trailing = true;
}
n %= base;
}
output
}
pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
use std::fmt::Write;
let mut output = String::new();
let median = bs.ns_iter_summ.median as usize;
let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
output.write_fmt(format_args!("{:>11} ns/iter (+/- {})",
fmt_thousands_sep(median, ','),
fmt_thousands_sep(deviation, ',')))
.unwrap();
if bs.mb_s != 0 {
output.write_fmt(format_args!(" = {} MB/s", bs.mb_s)).unwrap();
}
output
}
pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
fn callback<T: Write>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::Result<()> {
match (*event).clone() {
TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
TeWait(ref test, padding) => st.write_test_start(test, padding),
TeResult(test, result, stdout) => {
try!(st.write_log(&test, &result));
try!(st.write_result(&result));
match result {
TrOk => st.passed += 1,
TrIgnored => st.ignored += 1,
TrMetrics(mm) => {
let tname = test.name;
let MetricMap(mm) = mm;
for (k, v) in &mm {
st.metrics
.insert_metric(&format!("{}.{}", tname, k), v.value, v.noise);
}
st.measured += 1
}
TrBench(bs) => {
st.metrics.insert_metric(test.name.as_slice(),
bs.ns_iter_summ.median,
bs.ns_iter_summ.max - bs.ns_iter_summ.min);
st.measured += 1
}
TrFailed => {
st.failed += 1;
st.failures.push((test, stdout));
}
}
Ok(())
}
}
}
let mut st = try!(ConsoleTestState::new(opts, None::<io::Stdout>));
fn len_if_padded(t: &TestDescAndFn) -> usize {
match t.testfn.padding() {
PadNone => 0,
PadOnRight => t.desc.name.as_slice().len(),
}
}
match tests.iter().max_by_key(|t| len_if_padded(*t)) {
Some(t) => {
let n = t.desc.name.as_slice();
st.max_name_len = n.len();
}
None => {}
}
try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
return st.write_run_finish();
}
#[cfg(test)]
impl TestDesc {
#[cfg(not(rustc_emits_allow_fail))]
fn default() -> Self {
TestDesc {
name: StaticTestName(""),
ignore: false,
should_panic: ShouldPanic::No,
}
}
#[cfg(rustc_emits_allow_fail)]
fn default() -> Self {
TestDesc {
name: StaticTestName(""),
ignore: false,
should_panic: ShouldPanic::No,
allow_fail: false,
}
}
}
#[test]
fn should_sort_failures_before_printing_them() {
let test_a = TestDesc {
name: StaticTestName("a"),
ignore: false,
should_panic: ShouldPanic::No,
.. TestDesc::default()
};
let test_b = TestDesc {
name: StaticTestName("b"),
ignore: false,
should_panic: ShouldPanic::No,
.. TestDesc::default()
};
let mut st = ConsoleTestState {
log_out: None,
out: Raw(Vec::new()),
use_color: false,
verbose: false,
total: 0,
passed: 0,
failed: 0,
ignored: 0,
measured: 0,
max_name_len: 10,
metrics: MetricMap::new(),
failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
};
st.write_failures().unwrap();
let s = match st.out {
Raw(ref m) => String::from_utf8_lossy(&m[..]),
Pretty(_) => unreachable!(),
};
let apos = s.find("a").unwrap();
let bpos = s.find("b").unwrap();
assert!(apos < bpos);
}
fn use_color(opts: &TestOpts) -> bool {
match opts.color {
AutoColor => !opts.nocapture && stdout_isatty(),
AlwaysColor => true,
NeverColor => false,
}
}
#[cfg(unix)]
fn stdout_isatty() -> bool {
unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
}
#[cfg(windows)]
fn stdout_isatty() -> bool {
type DWORD = u32;
type BOOL = i32;
type HANDLE = *mut u8;
type LPDWORD = *mut u32;
const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
extern "system" {
fn GetStdHandle(which: DWORD) -> HANDLE;
fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
}
unsafe {
let handle = GetStdHandle(STD_OUTPUT_HANDLE);
let mut out = 0;
GetConsoleMode(handle, &mut out) != 0
}
}
#[derive(Clone)]
enum TestEvent {
TeFiltered(Vec<TestDesc>),
TeWait(TestDesc, NamePadding),
TeResult(TestDesc, TestResult, Vec<u8>),
}
pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
where F: FnMut(TestEvent) -> io::Result<()>
{
let mut filtered_tests = filter_tests(opts, tests);
if !opts.bench_benchmarks {
filtered_tests = convert_benchmarks_to_tests(filtered_tests);
}
let filtered_descs = filtered_tests.iter()
.map(|t| t.desc.clone())
.collect();
try!(callback(TeFiltered(filtered_descs)));
let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
filtered_tests.into_iter().partition(|e| {
match e.testfn {
StaticTestFn(_) | DynTestFn(_) => true,
_ => false,
}
});
let concurrency = get_concurrency();
let mut remaining = filtered_tests;
remaining.reverse();
let mut pending = 0;
let (tx, rx) = channel::<MonitorMsg>();
while pending > 0 || !remaining.is_empty() {
while pending < concurrency && !remaining.is_empty() {
let test = remaining.pop().unwrap();
if concurrency == 1 {
try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
}
run_test(opts, !opts.run_tests, test, tx.clone());
pending += 1;
}
let (desc, result, stdout) = rx.recv().unwrap();
if concurrency != 1 {
try!(callback(TeWait(desc.clone(), PadNone)));
}
try!(callback(TeResult(desc, result, stdout)));
pending -= 1;
}
if opts.bench_benchmarks {
for b in filtered_benchs_and_metrics {
try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
run_test(opts, false, b, tx.clone());
let (test, result, stdout) = rx.recv().unwrap();
try!(callback(TeResult(test, result, stdout)));
}
}
Ok(())
}
#[allow(deprecated)]
fn get_concurrency() -> usize {
return match env::var("RUST_TEST_THREADS") {
Ok(s) => {
let opt_n: Option<usize> = s.parse().ok();
match opt_n {
Some(n) if n > 0 => n,
_ => {
panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.",
s)
}
}
}
Err(..) => num_cpus(),
};
#[cfg(windows)]
#[allow(bad_style)]
fn num_cpus() -> usize {
#[repr(C)]
struct SYSTEM_INFO {
wProcessorArchitecture: u16,
wReserved: u16,
dwPageSize: u32,
lpMinimumApplicationAddress: *mut u8,
lpMaximumApplicationAddress: *mut u8,
dwActiveProcessorMask: *mut u8,
dwNumberOfProcessors: u32,
dwProcessorType: u32,
dwAllocationGranularity: u32,
wProcessorLevel: u16,
wProcessorRevision: u16,
}
extern "system" {
fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
}
unsafe {
let mut sysinfo = std::mem::zeroed();
GetSystemInfo(&mut sysinfo);
sysinfo.dwNumberOfProcessors as usize
}
}
#[cfg(any(target_os = "linux",
target_os = "macos",
target_os = "ios",
target_os = "android"))]
fn num_cpus() -> usize {
unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
}
#[cfg(any(target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
target_os = "netbsd"))]
fn num_cpus() -> usize {
let mut cpus: libc::c_uint = 0;
let mut cpus_size = std::mem::size_of_val(&cpus);
let mut mib = [libc::CTL_HW, libc::HW_AVAILCPU, 0, 0];
unsafe {
libc::sysctl(mib.as_mut_ptr(),
2,
&mut cpus as *mut _ as *mut _,
&mut cpus_size as *mut _ as *mut _,
0 as *mut _,
0);
}
if cpus < 1 {
mib[1] = libc::HW_NCPU;
unsafe {
libc::sysctl(mib.as_mut_ptr(),
2,
&mut cpus as *mut _ as *mut _,
&mut cpus_size as *mut _ as *mut _,
0 as *mut _,
0);
}
if cpus < 1 {
cpus = 1;
}
}
cpus as usize
}
#[cfg(target_os = "openbsd")]
fn num_cpus() -> usize {
let mut cpus: libc::c_uint = 0;
let mut cpus_size = std::mem::size_of_val(&cpus);
let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
unsafe {
libc::sysctl(mib.as_mut_ptr(),
2,
&mut cpus as *mut _ as *mut _,
&mut cpus_size as *mut _ as *mut _,
0 as *mut _,
0);
}
if cpus < 1 {
cpus = 1;
}
cpus as usize
}
}
pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
let mut filtered = tests;
filtered = match opts.filter {
None => filtered,
Some(ref filter) => {
filtered.into_iter()
.filter(|test| test.desc.name.as_slice().contains(&filter[..]))
.collect()
}
};
filtered = if !opts.run_ignored {
filtered
} else {
fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
if test.desc.ignore {
let TestDescAndFn {desc, testfn} = test;
Some(TestDescAndFn {
desc: TestDesc { ignore: false, ..desc },
testfn: testfn,
})
} else {
None
}
}
filtered.into_iter().filter_map(filter).collect()
};
filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
filtered
}
pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
tests.into_iter()
.map(|x| {
let testfn = match x.testfn {
DynBenchFn(bench) => {
DynTestFn(Box::new(move || bench::run_once(|b| bench.run(b))))
}
StaticBenchFn(benchfn) => {
DynTestFn(Box::new(move || bench::run_once(|b| benchfn(b))))
}
f => f,
};
TestDescAndFn {
desc: x.desc,
testfn: testfn,
}
})
.collect()
}
pub fn run_test(opts: &TestOpts,
force_ignore: bool,
test: TestDescAndFn,
monitor_ch: Sender<MonitorMsg>) {
let TestDescAndFn {desc, testfn} = test;
if force_ignore || desc.ignore {
monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
return;
}
fn run_test_inner(desc: TestDesc,
monitor_ch: Sender<MonitorMsg>,
nocapture: bool,
mut testfn: Box<FnMut() + Send>) {
thread::spawn(move || {
let data = Arc::new(Mutex::new(Vec::new()));
let data2 = data.clone();
let cfg = thread::Builder::new().name(match desc.name {
DynTestName(ref name) => name.clone(),
StaticTestName(name) => name.to_owned(),
});
#[cfg(feature = "capture")]
fn capture(data2: Arc<Mutex<Vec<u8>>>) {
io::set_output_capture(Some(data2));
}
#[cfg(not(feature = "capture"))]
fn capture(_data2: Arc<Mutex<Vec<u8>>>) {}
let result_guard = cfg.spawn(move || {
if !nocapture {
capture(data2)
}
testfn()
})
.unwrap();
let test_result = calc_result(&desc, result_guard.join());
let stdout = data.lock().unwrap().to_vec();
monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
});
}
match testfn {
DynBenchFn(bencher) => {
let bs = ::bench::benchmark(|harness| bencher.run(harness));
monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
return;
}
StaticBenchFn(benchfn) => {
let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
return;
}
DynMetricFn(mut f) => {
let mut mm = MetricMap::new();
f(&mut mm);
monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
return;
}
StaticMetricFn(f) => {
let mut mm = MetricMap::new();
f(&mut mm);
monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
return;
}
DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(f)),
}
}
fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any + Send>>) -> TestResult {
match (&desc.should_panic, task_result) {
(&ShouldPanic::No, Ok(())) |
(&ShouldPanic::Yes, Err(_)) => TrOk,
(&ShouldPanic::YesWithMessage(msg), Err(ref err))
if err.downcast_ref::<String>()
.map(|e| &**e)
.or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
.map(|e| e.contains(msg))
.unwrap_or(false) => TrOk,
_ => TrFailed,
}
}
impl MetricMap {
pub fn new() -> MetricMap {
MetricMap(BTreeMap::new())
}
pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
let m = Metric {
value: value,
noise: noise,
};
let MetricMap(ref mut map) = *self;
map.insert(name.to_owned(), m);
}
pub fn fmt_metrics(&self) -> String {
let MetricMap(ref mm) = *self;
let v: Vec<String> = mm.iter()
.map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
.collect();
v.join(", ")
}
}
#[cfg(all(feature = "asm_black_box", not(all(target_os = "nacl", target_arch = "le32"))))]
pub fn black_box<T>(dummy: T) -> T {
unsafe { asm!("" : : "r"(&dummy)) }
dummy
}
#[cfg(not(all(feature = "asm_black_box", not(all(target_os = "nacl", target_arch = "le32")))))]
#[inline(never)]
pub fn black_box<T>(dummy: T) -> T {
dummy
}
impl Bencher {
pub fn iter<T, F>(&mut self, mut inner: F)
where F: FnMut() -> T
{
let start = PreciseTime::now();
let k = self.iterations;
for _ in 0..k {
black_box(inner());
}
self.dur = start.to(PreciseTime::now());
}
pub fn ns_elapsed(&mut self) -> u64 {
self.dur.num_nanoseconds().unwrap() as u64
}
pub fn ns_per_iter(&mut self) -> u64 {
if self.iterations == 0 {
0
} else {
self.ns_elapsed() / cmp::max(self.iterations, 1)
}
}
pub fn bench_n<F>(&mut self, n: u64, f: F)
where F: FnOnce(&mut Bencher)
{
self.iterations = n;
f(self);
}
pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary
where F: FnMut(&mut Bencher)
{
let mut n = 1;
self.bench_n(n, |x| f(x));
if self.ns_per_iter() == 0 {
n = 1_000_000;
} else {
n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
}
if n == 0 {
n = 1;
}
let mut total_run = Duration::seconds(0);
let samples: &mut [f64] = &mut [0.0_f64; 50];
loop {
let loop_start = PreciseTime::now();
for p in &mut *samples {
self.bench_n(n, |x| f(x));
*p = self.ns_per_iter() as f64;
}
stats::winsorize(samples, 5.0);
let summ = stats::Summary::new(samples);
for p in &mut *samples {
self.bench_n(5 * n, |x| f(x));
*p = self.ns_per_iter() as f64;
}
stats::winsorize(samples, 5.0);
let summ5 = stats::Summary::new(samples);
let loop_run = loop_start.to(PreciseTime::now());
if loop_run > Duration::milliseconds(100) && summ.median_abs_dev_pct < 1.0 &&
summ.median - summ5.median < summ5.median_abs_dev {
return summ5;
}
total_run = total_run + loop_run;
if total_run > Duration::seconds(3) {
return summ5;
}
n = match n.checked_mul(10) {
Some(_) => n * 2,
None => return summ5,
};
}
}
}
pub mod bench {
use std::cmp;
use time::Duration;
use super::{Bencher, BenchSamples};
pub fn benchmark<F>(f: F) -> BenchSamples
where F: FnMut(&mut Bencher)
{
let mut bs = Bencher {
iterations: 0,
dur: Duration::seconds(0),
bytes: 0,
};
let ns_iter_summ = bs.auto_bench(f);
let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
let mb_s = bs.bytes * 1000 / ns_iter;
BenchSamples {
ns_iter_summ: ns_iter_summ,
mb_s: mb_s as usize,
}
}
pub fn run_once<F>(f: F)
where F: FnOnce(&mut Bencher)
{
let mut bs = Bencher {
iterations: 0,
dur: Duration::seconds(0),
bytes: 0,
};
bs.bench_n(1, f);
}
}
#[cfg(test)]
mod tests {
use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts, TestDesc, TestDescAndFn,
TestOpts, run_test, MetricMap, StaticTestName, DynTestName, DynTestFn, ShouldPanic};
use std::sync::mpsc::channel;
#[test]
pub fn do_not_run_ignored_tests() {
fn f() {
panic!();
}
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: true,
should_panic: ShouldPanic::No,
.. TestDesc::default()
},
testfn: DynTestFn(Box::new(move || f())),
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, tx);
let (_, res, _) = rx.recv().unwrap();
assert!(res != TrOk);
}
#[test]
pub fn ignored_tests_result_in_ignored() {
fn f() {}
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: true,
should_panic: ShouldPanic::No,
.. TestDesc::default()
},
testfn: DynTestFn(Box::new(move || f())),
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, tx);
let (_, res, _) = rx.recv().unwrap();
assert!(res == TrIgnored);
}
#[test]
fn test_should_panic() {
fn f() {
panic!();
}
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: false,
should_panic: ShouldPanic::Yes,
.. TestDesc::default()
},
testfn: DynTestFn(Box::new(move || f())),
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, tx);
let (_, res, _) = rx.recv().unwrap();
assert!(res == TrOk);
}
#[test]
fn test_should_panic_good_message() {
fn f() {
panic!("an error message");
}
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: false,
should_panic: ShouldPanic::YesWithMessage("error message"),
.. TestDesc::default()
},
testfn: DynTestFn(Box::new(move || f())),
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, tx);
let (_, res, _) = rx.recv().unwrap();
assert!(res == TrOk);
}
#[test]
fn test_should_panic_bad_message() {
fn f() {
panic!("an error message");
}
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: false,
should_panic: ShouldPanic::YesWithMessage("foobar"),
.. TestDesc::default()
},
testfn: DynTestFn(Box::new(move || f())),
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, tx);
let (_, res, _) = rx.recv().unwrap();
assert!(res == TrFailed);
}
#[test]
fn test_should_panic_but_succeeds() {
fn f() {}
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: false,
should_panic: ShouldPanic::Yes,
.. TestDesc::default()
},
testfn: DynTestFn(Box::new(move || f())),
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, tx);
let (_, res, _) = rx.recv().unwrap();
assert!(res == TrFailed);
}
#[test]
fn parse_ignored_flag() {
let args = vec!["progname".to_string(), "filter".to_string(), "--ignored".to_string()];
let opts = match parse_opts(&args) {
Some(Ok(o)) => o,
_ => panic!("Malformed arg in parse_ignored_flag"),
};
assert!((opts.run_ignored));
}
#[test]
pub fn filter_for_ignored_option() {
let mut opts = TestOpts::new();
opts.run_tests = true;
opts.run_ignored = true;
let tests = vec![TestDescAndFn {
desc: TestDesc {
name: StaticTestName("1"),
ignore: true,
should_panic: ShouldPanic::No,
.. TestDesc::default()
},
testfn: DynTestFn(Box::new(move || {})),
},
TestDescAndFn {
desc: TestDesc {
name: StaticTestName("2"),
ignore: false,
should_panic: ShouldPanic::No,
.. TestDesc::default()
},
testfn: DynTestFn(Box::new(move || {})),
}];
let filtered = filter_tests(&opts, tests);
assert_eq!(filtered.len(), 1);
assert_eq!(filtered[0].desc.name.to_string(), "1");
assert!(filtered[0].desc.ignore == false);
}
#[test]
pub fn sort_tests() {
let mut opts = TestOpts::new();
opts.run_tests = true;
let names = vec!["sha1::test".to_string(),
"isize::test_to_str".to_string(),
"isize::test_pow".to_string(),
"test::do_not_run_ignored_tests".to_string(),
"test::ignored_tests_result_in_ignored".to_string(),
"test::first_free_arg_should_be_a_filter".to_string(),
"test::parse_ignored_flag".to_string(),
"test::filter_for_ignored_option".to_string(),
"test::sort_tests".to_string()];
let tests = {
fn testfn() {}
let mut tests = Vec::new();
for name in &names {
let test = TestDescAndFn {
desc: TestDesc {
name: DynTestName((*name).clone()),
ignore: false,
should_panic: ShouldPanic::No,
.. TestDesc::default()
},
testfn: DynTestFn(Box::new(testfn)),
};
tests.push(test);
}
tests
};
let filtered = filter_tests(&opts, tests);
let expected = vec!["isize::test_pow".to_string(),
"isize::test_to_str".to_string(),
"sha1::test".to_string(),
"test::do_not_run_ignored_tests".to_string(),
"test::filter_for_ignored_option".to_string(),
"test::first_free_arg_should_be_a_filter".to_string(),
"test::ignored_tests_result_in_ignored".to_string(),
"test::parse_ignored_flag".to_string(),
"test::sort_tests".to_string()];
for (a, b) in expected.iter().zip(filtered) {
assert!(*a == b.desc.name.to_string());
}
}
#[test]
pub fn test_metricmap_compare() {
let mut m1 = MetricMap::new();
let mut m2 = MetricMap::new();
m1.insert_metric("in-both-noise", 1000.0, 200.0);
m2.insert_metric("in-both-noise", 1100.0, 200.0);
m1.insert_metric("in-first-noise", 1000.0, 2.0);
m2.insert_metric("in-second-noise", 1000.0, 2.0);
m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
}
}