Auto merge of #25048 - huonw:test-benches, r=alexcrichton
E.g. if `foo.rs` looks like #![feature(test)] extern crate test; #[bench] fn bar(b: &mut test::Bencher) { b.iter(|| { 1 }) } #[test] fn baz() {} #[bench] fn qux(b: &mut test::Bencher) { b.iter(|| { panic!() }) } Then $ rustc --test foo.rs $ ./foo running 3 tests test baz ... ok test qux ... FAILED test bar ... ok failures: ---- qux stdout ---- thread 'qux' panicked at 'explicit panic', bench.rs:17 failures: qux test result: FAILED. 2 passed; 1 failed; 0 ignored; 0 measured $ ./foo --bench ba running 2 tests test baz ... ignored test bar ... bench: 97 ns/iter (+/- 74) test result: ok. 0 passed; 0 failed; 1 ignored; 1 measured In particular, the two benchmark are being run as tests in the default mode. This helps for the main distribution, since benchmarks are only run with `PLEASE_BENCH=1`, which is rarely set (and never set on the test bots), and helps for code-coverage tools: benchmarks are run and so don't count as dead code. Fixes #15842.
This commit is contained in:
commit
26933a638c
@ -269,7 +269,7 @@ pub fn test_opts(config: &Config) -> test::TestOpts {
|
||||
run_ignored: config.run_ignored,
|
||||
logfile: config.logfile.clone(),
|
||||
run_tests: true,
|
||||
run_benchmarks: true,
|
||||
bench_benchmarks: true,
|
||||
nocapture: env::var("RUST_TEST_NOCAPTURE").is_ok(),
|
||||
color: test::AutoColor,
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ impl TestDesc {
|
||||
}
|
||||
|
||||
/// Represents a benchmark function.
|
||||
pub trait TDynBenchFn {
|
||||
pub trait TDynBenchFn: Send {
|
||||
fn run(&self, harness: &mut Bencher);
|
||||
}
|
||||
|
||||
@ -285,7 +285,7 @@ pub struct TestOpts {
|
||||
pub filter: Option<String>,
|
||||
pub run_ignored: bool,
|
||||
pub run_tests: bool,
|
||||
pub run_benchmarks: bool,
|
||||
pub bench_benchmarks: bool,
|
||||
pub logfile: Option<PathBuf>,
|
||||
pub nocapture: bool,
|
||||
pub color: ColorConfig,
|
||||
@ -298,7 +298,7 @@ impl TestOpts {
|
||||
filter: None,
|
||||
run_ignored: false,
|
||||
run_tests: false,
|
||||
run_benchmarks: false,
|
||||
bench_benchmarks: false,
|
||||
logfile: None,
|
||||
nocapture: false,
|
||||
color: AutoColor,
|
||||
@ -377,8 +377,8 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
|
||||
let logfile = matches.opt_str("logfile");
|
||||
let logfile = logfile.map(|s| PathBuf::from(&s));
|
||||
|
||||
let run_benchmarks = matches.opt_present("bench");
|
||||
let run_tests = ! run_benchmarks ||
|
||||
let bench_benchmarks = matches.opt_present("bench");
|
||||
let run_tests = ! bench_benchmarks ||
|
||||
matches.opt_present("test");
|
||||
|
||||
let mut nocapture = matches.opt_present("nocapture");
|
||||
@ -400,7 +400,7 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
|
||||
filter: filter,
|
||||
run_ignored: run_ignored,
|
||||
run_tests: run_tests,
|
||||
run_benchmarks: run_benchmarks,
|
||||
bench_benchmarks: bench_benchmarks,
|
||||
logfile: logfile,
|
||||
nocapture: nocapture,
|
||||
color: color,
|
||||
@ -778,7 +778,11 @@ fn run_tests<F>(opts: &TestOpts,
|
||||
mut callback: F) -> io::Result<()> where
|
||||
F: FnMut(TestEvent) -> io::Result<()>,
|
||||
{
|
||||
let filtered_tests = filter_tests(opts, tests);
|
||||
let mut filtered_tests = filter_tests(opts, tests);
|
||||
if !opts.bench_benchmarks {
|
||||
filtered_tests = convert_benchmarks_to_tests(filtered_tests);
|
||||
}
|
||||
|
||||
let filtered_descs = filtered_tests.iter()
|
||||
.map(|t| t.desc.clone())
|
||||
.collect();
|
||||
@ -824,13 +828,15 @@ fn run_tests<F>(opts: &TestOpts,
|
||||
pending -= 1;
|
||||
}
|
||||
|
||||
// All benchmarks run at the end, in serial.
|
||||
// (this includes metric fns)
|
||||
for b in filtered_benchs_and_metrics {
|
||||
try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
|
||||
run_test(opts, !opts.run_benchmarks, b, tx.clone());
|
||||
let (test, result, stdout) = rx.recv().unwrap();
|
||||
try!(callback(TeResult(test, result, stdout)));
|
||||
if opts.bench_benchmarks {
|
||||
// All benchmarks run at the end, in serial.
|
||||
// (this includes metric fns)
|
||||
for b in filtered_benchs_and_metrics {
|
||||
try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
|
||||
run_test(opts, false, b, tx.clone());
|
||||
let (test, result, stdout) = rx.recv().unwrap();
|
||||
try!(callback(TeResult(test, result, stdout)));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -893,6 +899,22 @@ pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescA
|
||||
filtered
|
||||
}
|
||||
|
||||
pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
|
||||
// convert benchmarks to tests, if we're not benchmarking them
|
||||
tests.into_iter().map(|x| {
|
||||
let testfn = match x.testfn {
|
||||
DynBenchFn(bench) => {
|
||||
DynTestFn(Box::new(move || bench::run_once(|b| bench.run(b))))
|
||||
}
|
||||
StaticBenchFn(benchfn) => {
|
||||
DynTestFn(Box::new(move || bench::run_once(|b| benchfn(b))))
|
||||
}
|
||||
f => f
|
||||
};
|
||||
TestDescAndFn { desc: x.desc, testfn: testfn }
|
||||
}).collect()
|
||||
}
|
||||
|
||||
pub fn run_test(opts: &TestOpts,
|
||||
force_ignore: bool,
|
||||
test: TestDescAndFn,
|
||||
@ -1159,6 +1181,15 @@ pub mod bench {
|
||||
mb_s: mb_s as usize
|
||||
}
|
||||
}
|
||||
|
||||
pub fn run_once<F>(f: F) where F: FnOnce(&mut Bencher) {
|
||||
let mut bs = Bencher {
|
||||
iterations: 0,
|
||||
dur: Duration::nanoseconds(0),
|
||||
bytes: 0
|
||||
};
|
||||
bs.bench_n(1, f);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
Loading…
Reference in New Issue
Block a user