Auto merge of #56243 - RalfJung:test-deterministic, r=alexcrichton
libtest: Use deterministic HashMap, avoid spawning thread if there is no concurrency It seems desirable to make a test and bench runner deterministic, which this achieves by using a deterministic hasher. Also, we we only have 1 thread, we don't bother spawning one and just use the main thread. The motivation for this is to be able to run the test harness in miri, where we can neither access the OS RNG, nor spawn threads.
This commit is contained in:
commit
3499575282
@ -99,6 +99,10 @@ mod formatters;
|
||||
|
||||
use formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter};
|
||||
|
||||
/// Whether to execute tests concurrently or not
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
pub enum Concurrent { Yes, No }
|
||||
|
||||
// The name of a test. By convention this follows the rules for rust
|
||||
// paths; i.e., it should be a series of identifiers separated by double
|
||||
// colons. This way if some test runner wants to arrange the tests
|
||||
@ -1073,8 +1077,12 @@ pub fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F)
|
||||
where
|
||||
F: FnMut(TestEvent) -> io::Result<()>,
|
||||
{
|
||||
use std::collections::HashMap;
|
||||
use std::collections::{self, HashMap};
|
||||
use std::hash::BuildHasherDefault;
|
||||
use std::sync::mpsc::RecvTimeoutError;
|
||||
// Use a deterministic hasher
|
||||
type TestMap =
|
||||
HashMap<TestDesc, Instant, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
|
||||
|
||||
let tests_len = tests.len();
|
||||
|
||||
@ -1113,9 +1121,9 @@ where
|
||||
|
||||
let (tx, rx) = channel::<MonitorMsg>();
|
||||
|
||||
let mut running_tests: HashMap<TestDesc, Instant> = HashMap::new();
|
||||
let mut running_tests: TestMap = HashMap::default();
|
||||
|
||||
fn get_timed_out_tests(running_tests: &mut HashMap<TestDesc, Instant>) -> Vec<TestDesc> {
|
||||
fn get_timed_out_tests(running_tests: &mut TestMap) -> Vec<TestDesc> {
|
||||
let now = Instant::now();
|
||||
let timed_out = running_tests
|
||||
.iter()
|
||||
@ -1133,7 +1141,7 @@ where
|
||||
timed_out
|
||||
};
|
||||
|
||||
fn calc_timeout(running_tests: &HashMap<TestDesc, Instant>) -> Option<Duration> {
|
||||
fn calc_timeout(running_tests: &TestMap) -> Option<Duration> {
|
||||
running_tests.values().min().map(|next_timeout| {
|
||||
let now = Instant::now();
|
||||
if *next_timeout >= now {
|
||||
@ -1148,7 +1156,7 @@ where
|
||||
while !remaining.is_empty() {
|
||||
let test = remaining.pop().unwrap();
|
||||
callback(TeWait(test.desc.clone()))?;
|
||||
run_test(opts, !opts.run_tests, test, tx.clone());
|
||||
run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::No);
|
||||
let (test, result, stdout) = rx.recv().unwrap();
|
||||
callback(TeResult(test, result, stdout))?;
|
||||
}
|
||||
@ -1159,7 +1167,7 @@ where
|
||||
let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
|
||||
running_tests.insert(test.desc.clone(), timeout);
|
||||
callback(TeWait(test.desc.clone()))?; //here no pad
|
||||
run_test(opts, !opts.run_tests, test, tx.clone());
|
||||
run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::Yes);
|
||||
pending += 1;
|
||||
}
|
||||
|
||||
@ -1191,7 +1199,7 @@ where
|
||||
// All benchmarks run at the end, in serial.
|
||||
for b in filtered_benchs {
|
||||
callback(TeWait(b.desc.clone()))?;
|
||||
run_test(opts, false, b, tx.clone());
|
||||
run_test(opts, false, b, tx.clone(), Concurrent::No);
|
||||
let (test, result, stdout) = rx.recv().unwrap();
|
||||
callback(TeResult(test, result, stdout))?;
|
||||
}
|
||||
@ -1393,6 +1401,7 @@ pub fn run_test(
|
||||
force_ignore: bool,
|
||||
test: TestDescAndFn,
|
||||
monitor_ch: Sender<MonitorMsg>,
|
||||
concurrency: Concurrent,
|
||||
) {
|
||||
let TestDescAndFn { desc, testfn } = test;
|
||||
|
||||
@ -1409,6 +1418,7 @@ pub fn run_test(
|
||||
monitor_ch: Sender<MonitorMsg>,
|
||||
nocapture: bool,
|
||||
testfn: Box<dyn FnBox() + Send>,
|
||||
concurrency: Concurrent,
|
||||
) {
|
||||
// Buffer for capturing standard I/O
|
||||
let data = Arc::new(Mutex::new(Vec::new()));
|
||||
@ -1443,7 +1453,7 @@ pub fn run_test(
|
||||
// the test synchronously, regardless of the concurrency
|
||||
// level.
|
||||
let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32");
|
||||
if supports_threads {
|
||||
if concurrency == Concurrent::Yes && supports_threads {
|
||||
let cfg = thread::Builder::new().name(name.as_slice().to_owned());
|
||||
cfg.spawn(runtest).unwrap();
|
||||
} else {
|
||||
@ -1464,13 +1474,14 @@ pub fn run_test(
|
||||
}
|
||||
DynTestFn(f) => {
|
||||
let cb = move || __rust_begin_short_backtrace(f);
|
||||
run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb))
|
||||
run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb), concurrency)
|
||||
}
|
||||
StaticTestFn(f) => run_test_inner(
|
||||
desc,
|
||||
monitor_ch,
|
||||
opts.nocapture,
|
||||
Box::new(move || __rust_begin_short_backtrace(f)),
|
||||
concurrency,
|
||||
),
|
||||
}
|
||||
}
|
||||
@ -1753,6 +1764,7 @@ mod tests {
|
||||
use std::sync::mpsc::channel;
|
||||
use bench;
|
||||
use Bencher;
|
||||
use Concurrent;
|
||||
|
||||
|
||||
fn one_ignored_one_unignored_test() -> Vec<TestDescAndFn> {
|
||||
@ -1793,7 +1805,7 @@ mod tests {
|
||||
testfn: DynTestFn(Box::new(f)),
|
||||
};
|
||||
let (tx, rx) = channel();
|
||||
run_test(&TestOpts::new(), false, desc, tx);
|
||||
run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
|
||||
let (_, res, _) = rx.recv().unwrap();
|
||||
assert!(res != TrOk);
|
||||
}
|
||||
@ -1811,7 +1823,7 @@ mod tests {
|
||||
testfn: DynTestFn(Box::new(f)),
|
||||
};
|
||||
let (tx, rx) = channel();
|
||||
run_test(&TestOpts::new(), false, desc, tx);
|
||||
run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
|
||||
let (_, res, _) = rx.recv().unwrap();
|
||||
assert!(res == TrIgnored);
|
||||
}
|
||||
@ -1831,7 +1843,7 @@ mod tests {
|
||||
testfn: DynTestFn(Box::new(f)),
|
||||
};
|
||||
let (tx, rx) = channel();
|
||||
run_test(&TestOpts::new(), false, desc, tx);
|
||||
run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
|
||||
let (_, res, _) = rx.recv().unwrap();
|
||||
assert!(res == TrOk);
|
||||
}
|
||||
@ -1851,7 +1863,7 @@ mod tests {
|
||||
testfn: DynTestFn(Box::new(f)),
|
||||
};
|
||||
let (tx, rx) = channel();
|
||||
run_test(&TestOpts::new(), false, desc, tx);
|
||||
run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
|
||||
let (_, res, _) = rx.recv().unwrap();
|
||||
assert!(res == TrOk);
|
||||
}
|
||||
@ -1873,7 +1885,7 @@ mod tests {
|
||||
testfn: DynTestFn(Box::new(f)),
|
||||
};
|
||||
let (tx, rx) = channel();
|
||||
run_test(&TestOpts::new(), false, desc, tx);
|
||||
run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
|
||||
let (_, res, _) = rx.recv().unwrap();
|
||||
assert!(res == TrFailedMsg(format!("{} '{}'", failed_msg, expected)));
|
||||
}
|
||||
@ -1891,7 +1903,7 @@ mod tests {
|
||||
testfn: DynTestFn(Box::new(f)),
|
||||
};
|
||||
let (tx, rx) = channel();
|
||||
run_test(&TestOpts::new(), false, desc, tx);
|
||||
run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
|
||||
let (_, res, _) = rx.recv().unwrap();
|
||||
assert!(res == TrFailed);
|
||||
}
|
||||
|
@ -2,7 +2,7 @@
|
||||
{ "type": "test", "event": "started", "name": "a" }
|
||||
{ "type": "test", "name": "a", "event": "ok" }
|
||||
{ "type": "test", "event": "started", "name": "b" }
|
||||
{ "type": "test", "name": "b", "event": "failed", "stdout": "thread 'b' panicked at 'assertion failed: false', f.rs:18:5\nnote: Run with `RUST_BACKTRACE=1` for a backtrace.\n" }
|
||||
{ "type": "test", "name": "b", "event": "failed", "stdout": "thread 'main' panicked at 'assertion failed: false', f.rs:18:5\nnote: Run with `RUST_BACKTRACE=1` for a backtrace.\n" }
|
||||
{ "type": "test", "event": "started", "name": "c" }
|
||||
{ "type": "test", "name": "c", "event": "ok" }
|
||||
{ "type": "test", "event": "started", "name": "d" }
|
||||
|
Loading…
x
Reference in New Issue
Block a user