auto merge of #7623 : graydon/rust/codegen-compiletests, r=pcwalton
This is some initial sketch-work for #7461 though it will depend on #7459 to be useful for anything. For the time being, just infrastructure.
This commit is contained in:
commit
4478ded57c
1
configure
vendored
1
configure
vendored
@ -731,6 +731,7 @@ do
|
||||
make_dir $h/test/perf
|
||||
make_dir $h/test/pretty
|
||||
make_dir $h/test/debug-info
|
||||
make_dir $h/test/codegen
|
||||
make_dir $h/test/doc-tutorial
|
||||
make_dir $h/test/doc-tutorial-ffi
|
||||
make_dir $h/test/doc-tutorial-macros
|
||||
|
19
mk/tests.mk
19
mk/tests.mk
@ -246,6 +246,7 @@ check-stage$(1)-T-$(2)-H-$(3)-exec: \
|
||||
check-stage$(1)-T-$(2)-H-$(3)-crates-exec \
|
||||
check-stage$(1)-T-$(2)-H-$(3)-bench-exec \
|
||||
check-stage$(1)-T-$(2)-H-$(3)-debuginfo-exec \
|
||||
check-stage$(1)-T-$(2)-H-$(3)-codegen-exec \
|
||||
check-stage$(1)-T-$(2)-H-$(3)-doc-exec \
|
||||
check-stage$(1)-T-$(2)-H-$(3)-pretty-exec
|
||||
|
||||
@ -430,6 +431,8 @@ CFAIL_RS := $(wildcard $(S)src/test/compile-fail/*.rs)
|
||||
BENCH_RS := $(wildcard $(S)src/test/bench/*.rs)
|
||||
PRETTY_RS := $(wildcard $(S)src/test/pretty/*.rs)
|
||||
DEBUGINFO_RS := $(wildcard $(S)src/test/debug-info/*.rs)
|
||||
CODEGEN_RS := $(wildcard $(S)src/test/codegen/*.rs)
|
||||
CODEGEN_CC := $(wildcard $(S)src/test/codegen/*.cc)
|
||||
|
||||
# perf tests are the same as bench tests only they run under
|
||||
# a performance monitor.
|
||||
@ -443,6 +446,7 @@ BENCH_TESTS := $(BENCH_RS)
|
||||
PERF_TESTS := $(PERF_RS)
|
||||
PRETTY_TESTS := $(PRETTY_RS)
|
||||
DEBUGINFO_TESTS := $(DEBUGINFO_RS)
|
||||
CODEGEN_TESTS := $(CODEGEN_RS) $(CODEGEN_CC)
|
||||
|
||||
CTEST_SRC_BASE_rpass = run-pass
|
||||
CTEST_BUILD_BASE_rpass = run-pass
|
||||
@ -479,10 +483,19 @@ CTEST_BUILD_BASE_debuginfo = debug-info
|
||||
CTEST_MODE_debuginfo = debug-info
|
||||
CTEST_RUNTOOL_debuginfo = $(CTEST_RUNTOOL)
|
||||
|
||||
CTEST_SRC_BASE_codegen = codegen
|
||||
CTEST_BUILD_BASE_codegen = codegen
|
||||
CTEST_MODE_codegen = codegen
|
||||
CTEST_RUNTOOL_codegen = $(CTEST_RUNTOOL)
|
||||
|
||||
ifeq ($(CFG_GDB),)
|
||||
CTEST_DISABLE_debuginfo = "no gdb found"
|
||||
endif
|
||||
|
||||
ifeq ($(CFG_CLANG),)
|
||||
CTEST_DISABLE_codegen = "no clang found"
|
||||
endif
|
||||
|
||||
ifeq ($(CFG_OSTYPE),apple-darwin)
|
||||
CTEST_DISABLE_debuginfo = "gdb on darwing needs root"
|
||||
endif
|
||||
@ -507,6 +520,8 @@ CTEST_COMMON_ARGS$(1)-T-$(2)-H-$(3) := \
|
||||
--compile-lib-path $$(HLIB$(1)_H_$(3)) \
|
||||
--run-lib-path $$(TLIB$(1)_T_$(2)_H_$(3)) \
|
||||
--rustc-path $$(HBIN$(1)_H_$(3))/rustc$$(X_$(3)) \
|
||||
--clang-path $(if $(CFG_CLANG),$(CFG_CLANG),clang) \
|
||||
--llvm-bin-path $(CFG_LLVM_INST_DIR_$(CFG_BUILD_TRIPLE))/bin \
|
||||
--aux-base $$(S)src/test/auxiliary/ \
|
||||
--stage-id stage$(1)-$(2) \
|
||||
--target $(2) \
|
||||
@ -522,6 +537,7 @@ CTEST_DEPS_cfail_$(1)-T-$(2)-H-$(3) = $$(CFAIL_TESTS)
|
||||
CTEST_DEPS_bench_$(1)-T-$(2)-H-$(3) = $$(BENCH_TESTS)
|
||||
CTEST_DEPS_perf_$(1)-T-$(2)-H-$(3) = $$(PERF_TESTS)
|
||||
CTEST_DEPS_debuginfo_$(1)-T-$(2)-H-$(3) = $$(DEBUGINFO_TESTS)
|
||||
CTEST_DEPS_codegen_$(1)-T-$(2)-H-$(3) = $$(CODEGEN_TESTS)
|
||||
|
||||
endef
|
||||
|
||||
@ -565,7 +581,7 @@ endif
|
||||
|
||||
endef
|
||||
|
||||
CTEST_NAMES = rpass rpass-full rfail cfail bench perf debuginfo
|
||||
CTEST_NAMES = rpass rpass-full rfail cfail bench perf debuginfo codegen
|
||||
|
||||
$(foreach host,$(CFG_HOST_TRIPLES), \
|
||||
$(eval $(foreach target,$(CFG_TARGET_TRIPLES), \
|
||||
@ -674,6 +690,7 @@ TEST_GROUPS = \
|
||||
bench \
|
||||
perf \
|
||||
debuginfo \
|
||||
codegen \
|
||||
doc \
|
||||
$(foreach docname,$(DOC_TEST_NAMES),doc-$(docname)) \
|
||||
pretty \
|
||||
|
@ -15,6 +15,7 @@ pub enum mode {
|
||||
mode_run_pass,
|
||||
mode_pretty,
|
||||
mode_debug_info,
|
||||
mode_codegen
|
||||
}
|
||||
|
||||
pub struct config {
|
||||
@ -27,6 +28,12 @@ pub struct config {
|
||||
// The rustc executable
|
||||
rustc_path: Path,
|
||||
|
||||
// The clang executable
|
||||
clang_path: Option<Path>,
|
||||
|
||||
// The llvm binaries path
|
||||
llvm_bin_path: Option<Path>,
|
||||
|
||||
// The directory containing the tests to run
|
||||
src_base: Path,
|
||||
|
||||
|
@ -19,6 +19,7 @@ extern mod extra;
|
||||
use std::os;
|
||||
|
||||
use extra::getopts;
|
||||
use extra::getopts::groups::{optopt, optflag, reqopt};
|
||||
use extra::test;
|
||||
|
||||
use common::config;
|
||||
@ -27,6 +28,7 @@ use common::mode_run_fail;
|
||||
use common::mode_compile_fail;
|
||||
use common::mode_pretty;
|
||||
use common::mode_debug_info;
|
||||
use common::mode_codegen;
|
||||
use common::mode;
|
||||
use util::logv;
|
||||
|
||||
@ -45,31 +47,54 @@ pub fn main() {
|
||||
}
|
||||
|
||||
pub fn parse_config(args: ~[~str]) -> config {
|
||||
let opts =
|
||||
~[getopts::reqopt("compile-lib-path"),
|
||||
getopts::reqopt("run-lib-path"),
|
||||
getopts::reqopt("rustc-path"), getopts::reqopt("src-base"),
|
||||
getopts::reqopt("build-base"), getopts::reqopt("aux-base"),
|
||||
getopts::reqopt("stage-id"),
|
||||
getopts::reqopt("mode"), getopts::optflag("ignored"),
|
||||
getopts::optopt("runtool"), getopts::optopt("rustcflags"),
|
||||
getopts::optflag("verbose"),
|
||||
getopts::optopt("logfile"),
|
||||
getopts::optflag("jit"),
|
||||
getopts::optflag("newrt"),
|
||||
getopts::optopt("target"),
|
||||
getopts::optopt("adb-path"),
|
||||
getopts::optopt("adb-test-dir")
|
||||
|
||||
let groups : ~[getopts::groups::OptGroup] =
|
||||
~[reqopt("", "compile-lib-path", "path to host shared libraries", "PATH"),
|
||||
reqopt("", "run-lib-path", "path to target shared libraries", "PATH"),
|
||||
reqopt("", "rustc-path", "path to rustc to use for compiling", "PATH"),
|
||||
optopt("", "clang-path", "path to executable for codegen tests", "PATH"),
|
||||
optopt("", "llvm-bin-path", "path to directory holding llvm binaries", "DIR"),
|
||||
reqopt("", "src-base", "directory to scan for test files", "PATH"),
|
||||
reqopt("", "build-base", "directory to deposit test outputs", "PATH"),
|
||||
reqopt("", "aux-base", "directory to find auxiliary test files", "PATH"),
|
||||
reqopt("", "stage-id", "the target-stage identifier", "stageN-TARGET"),
|
||||
reqopt("", "mode", "which sort of compile tests to run",
|
||||
"(compile-fail|run-fail|run-pass|pretty|debug-info)"),
|
||||
optflag("", "ignored", "run tests marked as ignored / xfailed"),
|
||||
optopt("", "runtool", "supervisor program to run tests under \
|
||||
(eg. emulator, valgrind)", "PROGRAM"),
|
||||
optopt("", "rustcflags", "flags to pass to rustc", "FLAGS"),
|
||||
optflag("", "verbose", "run tests verbosely, showing all output"),
|
||||
optopt("", "logfile", "file to log test execution to", "FILE"),
|
||||
optflag("", "jit", "run tests under the JIT"),
|
||||
optflag("", "newrt", "run tests on the new runtime / scheduler"),
|
||||
optopt("", "target", "the target to build for", "TARGET"),
|
||||
optopt("", "adb-path", "path to the android debugger", "PATH"),
|
||||
optopt("", "adb-test-dir", "path to tests for the android debugger", "PATH"),
|
||||
optflag("h", "help", "show this message"),
|
||||
];
|
||||
|
||||
assert!(!args.is_empty());
|
||||
let argv0 = copy args[0];
|
||||
let args_ = args.tail();
|
||||
if args[1] == ~"-h" || args[1] == ~"--help" {
|
||||
let message = fmt!("Usage: %s [OPTIONS] [TESTNAME...]", argv0);
|
||||
println(getopts::groups::usage(message, groups));
|
||||
fail!()
|
||||
}
|
||||
|
||||
let matches =
|
||||
&match getopts::getopts(args_, opts) {
|
||||
&match getopts::groups::getopts(args_, groups) {
|
||||
Ok(m) => m,
|
||||
Err(f) => fail!(getopts::fail_str(f))
|
||||
};
|
||||
|
||||
if getopts::opt_present(matches, "h") || getopts::opt_present(matches, "help") {
|
||||
let message = fmt!("Usage: %s [OPTIONS] [TESTNAME...]", argv0);
|
||||
println(getopts::groups::usage(message, groups));
|
||||
fail!()
|
||||
}
|
||||
|
||||
fn opt_path(m: &getopts::Matches, nm: &str) -> Path {
|
||||
Path(getopts::opt_str(m, nm))
|
||||
}
|
||||
@ -78,6 +103,8 @@ pub fn parse_config(args: ~[~str]) -> config {
|
||||
compile_lib_path: getopts::opt_str(matches, "compile-lib-path"),
|
||||
run_lib_path: getopts::opt_str(matches, "run-lib-path"),
|
||||
rustc_path: opt_path(matches, "rustc-path"),
|
||||
clang_path: getopts::opt_maybe_str(matches, "clang-path").map(|s| Path(*s)),
|
||||
llvm_bin_path: getopts::opt_maybe_str(matches, "llvm-bin-path").map(|s| Path(*s)),
|
||||
src_base: opt_path(matches, "src-base"),
|
||||
build_base: opt_path(matches, "build-base"),
|
||||
aux_base: opt_path(matches, "aux-base"),
|
||||
@ -159,6 +186,7 @@ pub fn str_mode(s: ~str) -> mode {
|
||||
~"run-pass" => mode_run_pass,
|
||||
~"pretty" => mode_pretty,
|
||||
~"debug-info" => mode_debug_info,
|
||||
~"codegen" => mode_codegen,
|
||||
_ => fail!("invalid mode")
|
||||
}
|
||||
}
|
||||
@ -170,6 +198,7 @@ pub fn mode_str(mode: mode) -> ~str {
|
||||
mode_run_pass => ~"run-pass",
|
||||
mode_pretty => ~"pretty",
|
||||
mode_debug_info => ~"debug-info",
|
||||
mode_codegen => ~"codegen",
|
||||
}
|
||||
}
|
||||
|
||||
@ -187,8 +216,9 @@ pub fn test_opts(config: &config) -> test::TestOpts {
|
||||
logfile: copy config.logfile,
|
||||
run_tests: true,
|
||||
run_benchmarks: false,
|
||||
save_results: None,
|
||||
compare_results: None
|
||||
ratchet_metrics: None,
|
||||
ratchet_noise_percent: None,
|
||||
save_metrics: None,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -39,7 +39,8 @@ pub fn run(config: config, testfile: ~str) {
|
||||
mode_run_fail => run_rfail_test(&config, &props, &testfile),
|
||||
mode_run_pass => run_rpass_test(&config, &props, &testfile),
|
||||
mode_pretty => run_pretty_test(&config, &props, &testfile),
|
||||
mode_debug_info => run_debuginfo_test(&config, &props, &testfile)
|
||||
mode_debug_info => run_debuginfo_test(&config, &props, &testfile),
|
||||
mode_codegen => run_codegen_test(&config, &props, &testfile)
|
||||
}
|
||||
}
|
||||
|
||||
@ -835,3 +836,118 @@ fn _arm_push_aux_shared_library(config: &config, testfile: &Path) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// codegen tests (vs. clang)
|
||||
|
||||
fn make_o_name(config: &config, testfile: &Path) -> Path {
|
||||
output_base_name(config, testfile).with_filetype("o")
|
||||
}
|
||||
|
||||
fn append_suffix_to_stem(p: &Path, suffix: &str) -> Path {
|
||||
if suffix.len() == 0 {
|
||||
copy *p
|
||||
} else {
|
||||
let stem = p.filestem().get();
|
||||
p.with_filestem(stem + "-" + suffix)
|
||||
}
|
||||
}
|
||||
|
||||
fn compile_test_and_save_bitcode(config: &config, props: &TestProps,
|
||||
testfile: &Path) -> ProcRes {
|
||||
let link_args = ~[~"-L", aux_output_dir_name(config, testfile).to_str()];
|
||||
let llvm_args = ~[~"-c", ~"--lib", ~"--save-temps"];
|
||||
let args = make_compile_args(config, props,
|
||||
link_args + llvm_args,
|
||||
make_o_name, testfile);
|
||||
compose_and_run_compiler(config, props, testfile, args, None)
|
||||
}
|
||||
|
||||
fn compile_cc_with_clang_and_save_bitcode(config: &config, _props: &TestProps,
|
||||
testfile: &Path) -> ProcRes {
|
||||
let bitcodefile = output_base_name(config, testfile).with_filetype("bc");
|
||||
let bitcodefile = append_suffix_to_stem(&bitcodefile, "clang");
|
||||
let ProcArgs = ProcArgs {
|
||||
prog: config.clang_path.get_ref().to_str(),
|
||||
args: ~[~"-c",
|
||||
~"-emit-llvm",
|
||||
~"-o", bitcodefile.to_str(),
|
||||
testfile.with_filetype("cc").to_str() ]
|
||||
};
|
||||
compose_and_run(config, testfile, ProcArgs, ~[], "", None)
|
||||
}
|
||||
|
||||
fn extract_function_from_bitcode(config: &config, _props: &TestProps,
|
||||
fname: &str, testfile: &Path,
|
||||
suffix: &str) -> ProcRes {
|
||||
let bitcodefile = output_base_name(config, testfile).with_filetype("bc");
|
||||
let bitcodefile = append_suffix_to_stem(&bitcodefile, suffix);
|
||||
let extracted_bc = append_suffix_to_stem(&bitcodefile, "extract");
|
||||
let ProcArgs = ProcArgs {
|
||||
prog: config.llvm_bin_path.get_ref().push("llvm-extract").to_str(),
|
||||
args: ~[~"-func=" + fname,
|
||||
~"-o=" + extracted_bc.to_str(),
|
||||
bitcodefile.to_str() ]
|
||||
};
|
||||
compose_and_run(config, testfile, ProcArgs, ~[], "", None)
|
||||
}
|
||||
|
||||
fn disassemble_extract(config: &config, _props: &TestProps,
|
||||
testfile: &Path, suffix: &str) -> ProcRes {
|
||||
let bitcodefile = output_base_name(config, testfile).with_filetype("bc");
|
||||
let bitcodefile = append_suffix_to_stem(&bitcodefile, suffix);
|
||||
let extracted_bc = append_suffix_to_stem(&bitcodefile, "extract");
|
||||
let extracted_ll = extracted_bc.with_filetype("ll");
|
||||
let ProcArgs = ProcArgs {
|
||||
prog: config.llvm_bin_path.get_ref().push("llvm-dis").to_str(),
|
||||
args: ~[~"-o=" + extracted_ll.to_str(),
|
||||
extracted_bc.to_str() ]
|
||||
};
|
||||
compose_and_run(config, testfile, ProcArgs, ~[], "", None)
|
||||
}
|
||||
|
||||
|
||||
fn run_codegen_test(config: &config, props: &TestProps, testfile: &Path) {
|
||||
|
||||
if config.llvm_bin_path.is_none() {
|
||||
fatal(~"missing --llvm-bin-path");
|
||||
}
|
||||
|
||||
if config.clang_path.is_none() {
|
||||
fatal(~"missing --clang-path");
|
||||
}
|
||||
|
||||
let mut ProcRes = compile_test_and_save_bitcode(config, props, testfile);
|
||||
if ProcRes.status != 0 {
|
||||
fatal_ProcRes(~"compilation failed!", &ProcRes);
|
||||
}
|
||||
|
||||
ProcRes = extract_function_from_bitcode(config, props, "test", testfile, "");
|
||||
if ProcRes.status != 0 {
|
||||
fatal_ProcRes(~"extracting 'test' function failed", &ProcRes);
|
||||
}
|
||||
|
||||
ProcRes = disassemble_extract(config, props, testfile, "");
|
||||
if ProcRes.status != 0 {
|
||||
fatal_ProcRes(~"disassembling extract failed", &ProcRes);
|
||||
}
|
||||
|
||||
|
||||
let mut ProcRes = compile_cc_with_clang_and_save_bitcode(config, props, testfile);
|
||||
if ProcRes.status != 0 {
|
||||
fatal_ProcRes(~"compilation failed!", &ProcRes);
|
||||
}
|
||||
|
||||
ProcRes = extract_function_from_bitcode(config, props, "test", testfile, "clang");
|
||||
if ProcRes.status != 0 {
|
||||
fatal_ProcRes(~"extracting 'test' function failed", &ProcRes);
|
||||
}
|
||||
|
||||
ProcRes = disassemble_extract(config, props, testfile, "clang");
|
||||
if ProcRes.status != 0 {
|
||||
fatal_ProcRes(~"disassembling extract failed", &ProcRes);
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,7 @@ use std::to_str;
|
||||
use serialize::Encodable;
|
||||
use serialize;
|
||||
use sort::Sort;
|
||||
use treemap::TreeMap;
|
||||
|
||||
/// Represents a json value
|
||||
pub enum Json {
|
||||
@ -1225,7 +1226,7 @@ impl Ord for Json {
|
||||
}
|
||||
|
||||
/// A trait for converting values to JSON
|
||||
trait ToJson {
|
||||
pub trait ToJson {
|
||||
/// Converts the value of `self` to an instance of JSON
|
||||
fn to_json(&self) -> Json;
|
||||
}
|
||||
@ -1330,7 +1331,17 @@ impl<A:ToJson> ToJson for ~[A] {
|
||||
fn to_json(&self) -> Json { List(self.map(|elt| elt.to_json())) }
|
||||
}
|
||||
|
||||
impl<A:ToJson + Copy> ToJson for HashMap<~str, A> {
|
||||
impl<A:ToJson> ToJson for HashMap<~str, A> {
|
||||
fn to_json(&self) -> Json {
|
||||
let mut d = HashMap::new();
|
||||
for self.iter().advance |(key, value)| {
|
||||
d.insert(copy *key, value.to_json());
|
||||
}
|
||||
Object(~d)
|
||||
}
|
||||
}
|
||||
|
||||
impl<A:ToJson> ToJson for TreeMap<~str, A> {
|
||||
fn to_json(&self) -> Json {
|
||||
let mut d = HashMap::new();
|
||||
for self.iter().advance |(key, value)| {
|
||||
|
@ -100,6 +100,7 @@ pub trait Stats {
|
||||
}
|
||||
|
||||
/// Extracted collection of all the summary statistics of a sample set.
|
||||
#[deriving(Eq)]
|
||||
struct Summary {
|
||||
sum: f64,
|
||||
min: f64,
|
||||
@ -116,7 +117,9 @@ struct Summary {
|
||||
}
|
||||
|
||||
impl Summary {
|
||||
fn new(samples: &[f64]) -> Summary {
|
||||
|
||||
/// Construct a new summary of a sample set.
|
||||
pub fn new(samples: &[f64]) -> Summary {
|
||||
Summary {
|
||||
sum: samples.sum(),
|
||||
min: samples.min(),
|
||||
|
@ -17,24 +17,26 @@
|
||||
|
||||
|
||||
use getopts;
|
||||
use json::ToJson;
|
||||
use json;
|
||||
use serialize::Decodable;
|
||||
use sort;
|
||||
use stats::Stats;
|
||||
use stats;
|
||||
use term;
|
||||
use time::precise_time_ns;
|
||||
use treemap::TreeMap;
|
||||
|
||||
use std::comm::{stream, SharedChan};
|
||||
use std::either;
|
||||
use std::io;
|
||||
use std::num;
|
||||
use std::option;
|
||||
use std::rand::RngUtil;
|
||||
use std::rand;
|
||||
use std::result;
|
||||
use std::task;
|
||||
use std::to_str::ToStr;
|
||||
use std::u64;
|
||||
use std::uint;
|
||||
use std::vec;
|
||||
use std::f64;
|
||||
use std::hashmap::HashMap;
|
||||
use std::os;
|
||||
|
||||
|
||||
// The name of a test. By convention this follows the rules for rust
|
||||
@ -87,6 +89,25 @@ pub struct TestDescAndFn {
|
||||
testfn: TestFn,
|
||||
}
|
||||
|
||||
#[deriving(Encodable,Decodable,Eq)]
|
||||
pub struct Metric {
|
||||
value: f64,
|
||||
noise: f64
|
||||
}
|
||||
|
||||
pub struct MetricMap(TreeMap<~str,Metric>);
|
||||
|
||||
/// Analysis of a single change in metric
|
||||
pub enum MetricChange {
|
||||
LikelyNoise,
|
||||
MetricAdded,
|
||||
MetricRemoved,
|
||||
Improvement(f64),
|
||||
Regression(f64)
|
||||
}
|
||||
|
||||
pub type MetricDiff = TreeMap<~str,MetricChange>;
|
||||
|
||||
// The default console test runner. It accepts the command line
|
||||
// arguments and a vector of test_descs.
|
||||
pub fn test_main(args: &[~str], tests: ~[TestDescAndFn]) {
|
||||
@ -127,8 +148,9 @@ pub struct TestOpts {
|
||||
run_ignored: bool,
|
||||
run_tests: bool,
|
||||
run_benchmarks: bool,
|
||||
save_results: Option<Path>,
|
||||
compare_results: Option<Path>,
|
||||
ratchet_metrics: Option<Path>,
|
||||
ratchet_noise_percent: Option<f64>,
|
||||
save_metrics: Option<Path>,
|
||||
logfile: Option<Path>
|
||||
}
|
||||
|
||||
@ -140,8 +162,9 @@ pub fn parse_opts(args: &[~str]) -> OptRes {
|
||||
let opts = ~[getopts::optflag("ignored"),
|
||||
getopts::optflag("test"),
|
||||
getopts::optflag("bench"),
|
||||
getopts::optopt("save"),
|
||||
getopts::optopt("diff"),
|
||||
getopts::optopt("save-metrics"),
|
||||
getopts::optopt("ratchet-metrics"),
|
||||
getopts::optopt("ratchet-noise-percent"),
|
||||
getopts::optopt("logfile")];
|
||||
let matches =
|
||||
match getopts::getopts(args_, opts) {
|
||||
@ -151,8 +174,8 @@ pub fn parse_opts(args: &[~str]) -> OptRes {
|
||||
|
||||
let filter =
|
||||
if matches.free.len() > 0 {
|
||||
option::Some(copy (matches).free[0])
|
||||
} else { option::None };
|
||||
Some(copy (matches).free[0])
|
||||
} else { None };
|
||||
|
||||
let run_ignored = getopts::opt_present(&matches, "ignored");
|
||||
|
||||
@ -163,19 +186,24 @@ pub fn parse_opts(args: &[~str]) -> OptRes {
|
||||
let run_tests = ! run_benchmarks ||
|
||||
getopts::opt_present(&matches, "test");
|
||||
|
||||
let save_results = getopts::opt_maybe_str(&matches, "save");
|
||||
let save_results = save_results.map(|s| Path(*s));
|
||||
let ratchet_metrics = getopts::opt_maybe_str(&matches, "ratchet-metrics");
|
||||
let ratchet_metrics = ratchet_metrics.map(|s| Path(*s));
|
||||
|
||||
let compare_results = getopts::opt_maybe_str(&matches, "diff");
|
||||
let compare_results = compare_results.map(|s| Path(*s));
|
||||
let ratchet_noise_percent =
|
||||
getopts::opt_maybe_str(&matches, "ratchet-noise-percent");
|
||||
let ratchet_noise_percent = ratchet_noise_percent.map(|s| f64::from_str(*s).get());
|
||||
|
||||
let save_metrics = getopts::opt_maybe_str(&matches, "save-metrics");
|
||||
let save_metrics = save_metrics.map(|s| Path(*s));
|
||||
|
||||
let test_opts = TestOpts {
|
||||
filter: filter,
|
||||
run_ignored: run_ignored,
|
||||
run_tests: run_tests,
|
||||
run_benchmarks: run_benchmarks,
|
||||
save_results: save_results,
|
||||
compare_results: compare_results,
|
||||
ratchet_metrics: ratchet_metrics,
|
||||
ratchet_noise_percent: ratchet_noise_percent,
|
||||
save_metrics: save_metrics,
|
||||
logfile: logfile
|
||||
};
|
||||
|
||||
@ -184,7 +212,7 @@ pub fn parse_opts(args: &[~str]) -> OptRes {
|
||||
|
||||
#[deriving(Eq)]
|
||||
pub struct BenchSamples {
|
||||
ns_iter_samples: ~[f64],
|
||||
ns_iter_summ: stats::Summary,
|
||||
mb_s: uint
|
||||
}
|
||||
|
||||
@ -194,181 +222,288 @@ pub enum TestResult { TrOk, TrFailed, TrIgnored, TrBench(BenchSamples) }
|
||||
struct ConsoleTestState {
|
||||
out: @io::Writer,
|
||||
log_out: Option<@io::Writer>,
|
||||
term: Option<term::Terminal>,
|
||||
use_color: bool,
|
||||
total: uint,
|
||||
passed: uint,
|
||||
failed: uint,
|
||||
ignored: uint,
|
||||
benchmarked: uint,
|
||||
metrics: MetricMap,
|
||||
failures: ~[TestDesc]
|
||||
}
|
||||
|
||||
impl ConsoleTestState {
|
||||
pub fn new(opts: &TestOpts) -> ConsoleTestState {
|
||||
let log_out = match opts.logfile {
|
||||
Some(ref path) => match io::file_writer(path,
|
||||
[io::Create,
|
||||
io::Truncate]) {
|
||||
result::Ok(w) => Some(w),
|
||||
result::Err(ref s) => {
|
||||
fail!("can't open output file: %s", *s)
|
||||
}
|
||||
},
|
||||
None => None
|
||||
};
|
||||
let out = io::stdout();
|
||||
let term = match term::Terminal::new(out) {
|
||||
Err(_) => None,
|
||||
Ok(t) => Some(t)
|
||||
};
|
||||
ConsoleTestState {
|
||||
out: out,
|
||||
log_out: log_out,
|
||||
use_color: use_color(),
|
||||
term: term,
|
||||
total: 0u,
|
||||
passed: 0u,
|
||||
failed: 0u,
|
||||
ignored: 0u,
|
||||
benchmarked: 0u,
|
||||
metrics: MetricMap::new(),
|
||||
failures: ~[]
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write_ok(&self) {
|
||||
self.write_pretty("ok", term::color::GREEN);
|
||||
}
|
||||
|
||||
pub fn write_failed(&self) {
|
||||
self.write_pretty("FAILED", term::color::RED);
|
||||
}
|
||||
|
||||
pub fn write_ignored(&self) {
|
||||
self.write_pretty("ignored", term::color::YELLOW);
|
||||
}
|
||||
|
||||
pub fn write_bench(&self) {
|
||||
self.write_pretty("bench", term::color::CYAN);
|
||||
}
|
||||
|
||||
|
||||
pub fn write_added(&self) {
|
||||
self.write_pretty("added", term::color::GREEN);
|
||||
}
|
||||
|
||||
pub fn write_improved(&self) {
|
||||
self.write_pretty("improved", term::color::GREEN);
|
||||
}
|
||||
|
||||
pub fn write_removed(&self) {
|
||||
self.write_pretty("removed", term::color::YELLOW);
|
||||
}
|
||||
|
||||
pub fn write_regressed(&self) {
|
||||
self.write_pretty("regressed", term::color::RED);
|
||||
}
|
||||
|
||||
pub fn write_pretty(&self,
|
||||
word: &str,
|
||||
color: term::color::Color) {
|
||||
match self.term {
|
||||
None => self.out.write_str(word),
|
||||
Some(ref t) => {
|
||||
if self.use_color {
|
||||
t.fg(color);
|
||||
}
|
||||
self.out.write_str(word);
|
||||
if self.use_color {
|
||||
t.reset();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write_run_start(&mut self, len: uint) {
|
||||
self.total = len;
|
||||
let noun = if len != 1 { &"tests" } else { &"test" };
|
||||
self.out.write_line(fmt!("\nrunning %u %s", len, noun));
|
||||
}
|
||||
|
||||
pub fn write_test_start(&self, test: &TestDesc) {
|
||||
self.out.write_str(fmt!("test %s ... ", test.name.to_str()));
|
||||
}
|
||||
|
||||
pub fn write_result(&self, result: &TestResult) {
|
||||
match *result {
|
||||
TrOk => self.write_ok(),
|
||||
TrFailed => self.write_failed(),
|
||||
TrIgnored => self.write_ignored(),
|
||||
TrBench(ref bs) => {
|
||||
self.write_bench();
|
||||
self.out.write_str(": " + fmt_bench_samples(bs))
|
||||
}
|
||||
}
|
||||
self.out.write_str(&"\n");
|
||||
}
|
||||
|
||||
pub fn write_log(&self, test: &TestDesc, result: &TestResult) {
|
||||
match self.log_out {
|
||||
None => (),
|
||||
Some(out) => {
|
||||
out.write_line(fmt!("%s %s",
|
||||
match *result {
|
||||
TrOk => ~"ok",
|
||||
TrFailed => ~"failed",
|
||||
TrIgnored => ~"ignored",
|
||||
TrBench(ref bs) => fmt_bench_samples(bs)
|
||||
}, test.name.to_str()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write_failures(&self) {
|
||||
self.out.write_line("\nfailures:");
|
||||
let mut failures = ~[];
|
||||
for self.failures.iter().advance() |f| {
|
||||
failures.push(f.name.to_str());
|
||||
}
|
||||
sort::tim_sort(failures);
|
||||
for failures.iter().advance |name| {
|
||||
self.out.write_line(fmt!(" %s", name.to_str()));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write_metric_diff(&self, diff: &MetricDiff) {
|
||||
let mut noise = 0;
|
||||
let mut improved = 0;
|
||||
let mut regressed = 0;
|
||||
let mut added = 0;
|
||||
let mut removed = 0;
|
||||
|
||||
for diff.iter().advance() |(k, v)| {
|
||||
match *v {
|
||||
LikelyNoise => noise += 1,
|
||||
MetricAdded => {
|
||||
added += 1;
|
||||
self.write_added();
|
||||
self.out.write_line(fmt!(": %s", *k));
|
||||
}
|
||||
MetricRemoved => {
|
||||
removed += 1;
|
||||
self.write_removed();
|
||||
self.out.write_line(fmt!(": %s", *k));
|
||||
}
|
||||
Improvement(pct) => {
|
||||
improved += 1;
|
||||
self.out.write_str(*k);
|
||||
self.out.write_str(": ");
|
||||
self.write_improved();
|
||||
self.out.write_line(fmt!(" by %.2f%%", pct as float))
|
||||
}
|
||||
Regression(pct) => {
|
||||
regressed += 1;
|
||||
self.out.write_str(*k);
|
||||
self.out.write_str(": ");
|
||||
self.write_regressed();
|
||||
self.out.write_line(fmt!(" by %.2f%%", pct as float))
|
||||
}
|
||||
}
|
||||
}
|
||||
self.out.write_line(fmt!("result of ratchet: %u matrics added, %u removed, \
|
||||
%u improved, %u regressed, %u noise",
|
||||
added, removed, improved, regressed, noise));
|
||||
if regressed == 0 {
|
||||
self.out.write_line("updated ratchet file")
|
||||
} else {
|
||||
self.out.write_line("left ratchet file untouched")
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write_run_finish(&self,
|
||||
ratchet_metrics: &Option<Path>,
|
||||
ratchet_pct: Option<f64>) -> bool {
|
||||
assert!(self.passed + self.failed + self.ignored + self.benchmarked == self.total);
|
||||
|
||||
let ratchet_success = match *ratchet_metrics {
|
||||
None => true,
|
||||
Some(ref pth) => {
|
||||
self.out.write_str(fmt!("\nusing metrics ratchet: %s\n", pth.to_str()));
|
||||
match ratchet_pct {
|
||||
None => (),
|
||||
Some(pct) =>
|
||||
self.out.write_str(fmt!("with noise-tolerance forced to: %f%%\n",
|
||||
pct as float))
|
||||
}
|
||||
let (diff, ok) = self.metrics.ratchet(pth, ratchet_pct);
|
||||
self.write_metric_diff(&diff);
|
||||
ok
|
||||
}
|
||||
};
|
||||
|
||||
let test_success = self.failed == 0u;
|
||||
if !test_success {
|
||||
self.write_failures();
|
||||
}
|
||||
|
||||
let success = ratchet_success && test_success;
|
||||
|
||||
self.out.write_str("\ntest result: ");
|
||||
if success {
|
||||
// There's no parallelism at this point so it's safe to use color
|
||||
self.write_ok();
|
||||
} else {
|
||||
self.write_failed();
|
||||
}
|
||||
self.out.write_str(fmt!(". %u passed; %u failed; %u ignored, %u benchmarked\n\n",
|
||||
self.passed, self.failed, self.ignored, self.benchmarked));
|
||||
return success;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn fmt_bench_samples(bs: &BenchSamples) -> ~str {
|
||||
if bs.mb_s != 0 {
|
||||
fmt!("%u ns/iter (+/- %u) = %u MB/s",
|
||||
bs.ns_iter_summ.median as uint,
|
||||
(bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
|
||||
bs.mb_s)
|
||||
} else {
|
||||
fmt!("%u ns/iter (+/- %u)",
|
||||
bs.ns_iter_summ.median as uint,
|
||||
(bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
|
||||
}
|
||||
}
|
||||
|
||||
// A simple console test runner
|
||||
pub fn run_tests_console(opts: &TestOpts,
|
||||
tests: ~[TestDescAndFn]) -> bool {
|
||||
fn callback(event: &TestEvent, st: &mut ConsoleTestState) {
|
||||
debug!("callback(event=%?)", event);
|
||||
match copy *event {
|
||||
TeFiltered(ref filtered_tests) => {
|
||||
st.total = filtered_tests.len();
|
||||
let noun = if st.total != 1 { ~"tests" } else { ~"test" };
|
||||
st.out.write_line(fmt!("\nrunning %u %s", st.total, noun));
|
||||
}
|
||||
TeWait(ref test) => st.out.write_str(
|
||||
fmt!("test %s ... ", test.name.to_str())),
|
||||
TeResult(test, result) => {
|
||||
match st.log_out {
|
||||
Some(f) => write_log(f, copy result, &test),
|
||||
None => ()
|
||||
TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
|
||||
TeWait(ref test) => st.write_test_start(test),
|
||||
TeResult(test, result) => {
|
||||
st.write_log(&test, &result);
|
||||
st.write_result(&result);
|
||||
match result {
|
||||
TrOk => st.passed += 1,
|
||||
TrIgnored => st.ignored += 1,
|
||||
TrBench(bs) => {
|
||||
st.metrics.insert_metric(test.name.to_str(),
|
||||
bs.ns_iter_summ.median,
|
||||
bs.ns_iter_summ.max - bs.ns_iter_summ.min);
|
||||
st.benchmarked += 1
|
||||
}
|
||||
TrFailed => {
|
||||
st.failed += 1;
|
||||
st.failures.push(test);
|
||||
}
|
||||
}
|
||||
}
|
||||
match result {
|
||||
TrOk => {
|
||||
st.passed += 1;
|
||||
write_ok(st.out, st.use_color);
|
||||
st.out.write_line("");
|
||||
}
|
||||
TrFailed => {
|
||||
st.failed += 1;
|
||||
write_failed(st.out, st.use_color);
|
||||
st.out.write_line("");
|
||||
st.failures.push(test);
|
||||
}
|
||||
TrIgnored => {
|
||||
st.ignored += 1;
|
||||
write_ignored(st.out, st.use_color);
|
||||
st.out.write_line("");
|
||||
}
|
||||
TrBench(bs) => {
|
||||
st.benchmarked += 1u;
|
||||
write_bench(st.out, st.use_color);
|
||||
st.out.write_line(fmt!(": %s",
|
||||
fmt_bench_samples(&bs)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let log_out = match opts.logfile {
|
||||
Some(ref path) => match io::file_writer(path,
|
||||
[io::Create,
|
||||
io::Truncate]) {
|
||||
result::Ok(w) => Some(w),
|
||||
result::Err(ref s) => {
|
||||
fail!("can't open output file: %s", *s)
|
||||
}
|
||||
},
|
||||
None => None
|
||||
};
|
||||
|
||||
let st = @mut ConsoleTestState {
|
||||
out: io::stdout(),
|
||||
log_out: log_out,
|
||||
use_color: use_color(),
|
||||
total: 0u,
|
||||
passed: 0u,
|
||||
failed: 0u,
|
||||
ignored: 0u,
|
||||
benchmarked: 0u,
|
||||
failures: ~[]
|
||||
};
|
||||
|
||||
let st = @mut ConsoleTestState::new(opts);
|
||||
run_tests(opts, tests, |x| callback(&x, st));
|
||||
|
||||
assert!(st.passed + st.failed +
|
||||
st.ignored + st.benchmarked == st.total);
|
||||
let success = st.failed == 0u;
|
||||
|
||||
if !success {
|
||||
print_failures(st);
|
||||
}
|
||||
|
||||
{
|
||||
let st: &mut ConsoleTestState = st;
|
||||
st.out.write_str(fmt!("\nresult: "));
|
||||
if success {
|
||||
// There's no parallelism at this point so it's safe to use color
|
||||
write_ok(st.out, true);
|
||||
} else {
|
||||
write_failed(st.out, true);
|
||||
}
|
||||
st.out.write_str(fmt!(". %u passed; %u failed; %u ignored\n\n",
|
||||
st.passed, st.failed, st.ignored));
|
||||
}
|
||||
|
||||
return success;
|
||||
|
||||
fn fmt_bench_samples(bs: &BenchSamples) -> ~str {
|
||||
use stats::Stats;
|
||||
if bs.mb_s != 0 {
|
||||
fmt!("%u ns/iter (+/- %u) = %u MB/s",
|
||||
bs.ns_iter_samples.median() as uint,
|
||||
3 * (bs.ns_iter_samples.median_abs_dev() as uint),
|
||||
bs.mb_s)
|
||||
} else {
|
||||
fmt!("%u ns/iter (+/- %u)",
|
||||
bs.ns_iter_samples.median() as uint,
|
||||
3 * (bs.ns_iter_samples.median_abs_dev() as uint))
|
||||
match opts.save_metrics {
|
||||
None => (),
|
||||
Some(ref pth) => {
|
||||
st.metrics.save(pth);
|
||||
st.out.write_str(fmt!("\nmetrics saved to: %s", pth.to_str()));
|
||||
}
|
||||
}
|
||||
|
||||
fn write_log(out: @io::Writer, result: TestResult, test: &TestDesc) {
|
||||
out.write_line(fmt!("%s %s",
|
||||
match result {
|
||||
TrOk => ~"ok",
|
||||
TrFailed => ~"failed",
|
||||
TrIgnored => ~"ignored",
|
||||
TrBench(ref bs) => fmt_bench_samples(bs)
|
||||
}, test.name.to_str()));
|
||||
}
|
||||
|
||||
fn write_ok(out: @io::Writer, use_color: bool) {
|
||||
write_pretty(out, "ok", term::color::GREEN, use_color);
|
||||
}
|
||||
|
||||
fn write_failed(out: @io::Writer, use_color: bool) {
|
||||
write_pretty(out, "FAILED", term::color::RED, use_color);
|
||||
}
|
||||
|
||||
fn write_ignored(out: @io::Writer, use_color: bool) {
|
||||
write_pretty(out, "ignored", term::color::YELLOW, use_color);
|
||||
}
|
||||
|
||||
fn write_bench(out: @io::Writer, use_color: bool) {
|
||||
write_pretty(out, "bench", term::color::CYAN, use_color);
|
||||
}
|
||||
|
||||
fn write_pretty(out: @io::Writer,
|
||||
word: &str,
|
||||
color: term::color::Color,
|
||||
use_color: bool) {
|
||||
let t = term::Terminal::new(out);
|
||||
match t {
|
||||
Ok(term) => {
|
||||
if use_color {
|
||||
term.fg(color);
|
||||
}
|
||||
out.write_str(word);
|
||||
if use_color {
|
||||
term.reset();
|
||||
}
|
||||
},
|
||||
Err(_) => out.write_str(word)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn print_failures(st: &ConsoleTestState) {
|
||||
st.out.write_line("\nfailures:");
|
||||
let mut failures = ~[];
|
||||
for uint::range(0, st.failures.len()) |i| {
|
||||
let name = copy st.failures[i].name;
|
||||
failures.push(name.to_str());
|
||||
}
|
||||
sort::tim_sort(failures);
|
||||
for failures.iter().advance |name| {
|
||||
st.out.write_line(fmt!(" %s", name.to_str()));
|
||||
}
|
||||
return st.write_run_finish(&opts.ratchet_metrics, opts.ratchet_noise_percent);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -390,17 +525,19 @@ fn should_sort_failures_before_printing_them() {
|
||||
|
||||
let st = @ConsoleTestState {
|
||||
out: wr,
|
||||
log_out: option::None,
|
||||
log_out: None,
|
||||
term: None,
|
||||
use_color: false,
|
||||
total: 0u,
|
||||
passed: 0u,
|
||||
failed: 0u,
|
||||
ignored: 0u,
|
||||
benchmarked: 0u,
|
||||
metrics: MetricMap::new(),
|
||||
failures: ~[test_b, test_a]
|
||||
};
|
||||
|
||||
print_failures(st);
|
||||
st.write_failures();
|
||||
};
|
||||
|
||||
let apos = s.find_str("a").get();
|
||||
@ -503,15 +640,17 @@ pub fn filter_tests(
|
||||
filtered
|
||||
} else {
|
||||
let filter_str = match opts.filter {
|
||||
option::Some(ref f) => copy *f,
|
||||
option::None => ~""
|
||||
Some(ref f) => copy *f,
|
||||
None => ~""
|
||||
};
|
||||
|
||||
fn filter_fn(test: TestDescAndFn, filter_str: &str) ->
|
||||
Option<TestDescAndFn> {
|
||||
if test.desc.name.to_str().contains(filter_str) {
|
||||
return option::Some(test);
|
||||
} else { return option::None; }
|
||||
return Some(test);
|
||||
} else {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
filtered.consume_iter().filter_map(|x| filter_fn(x, filter_str)).collect()
|
||||
@ -605,6 +744,138 @@ fn calc_result(desc: &TestDesc, task_succeeded: bool) -> TestResult {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl ToJson for Metric {
|
||||
fn to_json(&self) -> json::Json {
|
||||
let mut map = ~HashMap::new();
|
||||
map.insert(~"value", json::Number(self.value as float));
|
||||
map.insert(~"noise", json::Number(self.noise as float));
|
||||
json::Object(map)
|
||||
}
|
||||
}
|
||||
|
||||
impl MetricMap {
|
||||
|
||||
fn new() -> MetricMap {
|
||||
MetricMap(TreeMap::new())
|
||||
}
|
||||
|
||||
/// Load MetricDiff from a file.
|
||||
fn load(p: &Path) -> MetricMap {
|
||||
assert!(os::path_exists(p));
|
||||
let f = io::file_reader(p).get();
|
||||
let mut decoder = json::Decoder(json::from_reader(f).get());
|
||||
MetricMap(Decodable::decode(&mut decoder))
|
||||
}
|
||||
|
||||
/// Write MetricDiff to a file.
|
||||
pub fn save(&self, p: &Path) {
|
||||
let f = io::file_writer(p, [io::Create, io::Truncate]).get();
|
||||
json::to_pretty_writer(f, &self.to_json());
|
||||
}
|
||||
|
||||
/// Compare against another MetricMap
|
||||
pub fn compare_to_old(&self, old: MetricMap,
|
||||
noise_pct: Option<f64>) -> MetricDiff {
|
||||
let mut diff : MetricDiff = TreeMap::new();
|
||||
for old.iter().advance |(k, vold)| {
|
||||
let r = match self.find(k) {
|
||||
None => MetricRemoved,
|
||||
Some(v) => {
|
||||
let delta = (v.value - vold.value);
|
||||
let noise = match noise_pct {
|
||||
None => f64::max(vold.noise.abs(), v.noise.abs()),
|
||||
Some(pct) => vold.value * pct / 100.0
|
||||
};
|
||||
if delta.abs() < noise {
|
||||
LikelyNoise
|
||||
} else {
|
||||
let pct = delta.abs() / v.value * 100.0;
|
||||
if vold.noise < 0.0 {
|
||||
// When 'noise' is negative, it means we want
|
||||
// to see deltas that go up over time, and can
|
||||
// only tolerate slight negative movement.
|
||||
if delta < 0.0 {
|
||||
Regression(pct)
|
||||
} else {
|
||||
Improvement(pct)
|
||||
}
|
||||
} else {
|
||||
// When 'noise' is positive, it means we want
|
||||
// to see deltas that go down over time, and
|
||||
// can only tolerate slight positive movements.
|
||||
if delta < 0.0 {
|
||||
Improvement(pct)
|
||||
} else {
|
||||
Regression(pct)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
diff.insert(copy *k, r);
|
||||
}
|
||||
for self.iter().advance |(k, _)| {
|
||||
if !diff.contains_key(k) {
|
||||
diff.insert(copy *k, MetricAdded);
|
||||
}
|
||||
}
|
||||
diff
|
||||
}
|
||||
|
||||
/// Insert a named `value` (+/- `noise`) metric into the map. The value
|
||||
/// must be non-negative. The `noise` indicates the uncertainty of the
|
||||
/// metric, which doubles as the "noise range" of acceptable
|
||||
/// pairwise-regressions on this named value, when comparing from one
|
||||
/// metric to the next using `compare_to_old`.
|
||||
///
|
||||
/// If `noise` is positive, then it means this metric is of a value
|
||||
/// you want to see grow smaller, so a change larger than `noise` in the
|
||||
/// positive direction represents a regression.
|
||||
///
|
||||
/// If `noise` is negative, then it means this metric is of a value
|
||||
/// you want to see grow larger, so a change larger than `noise` in the
|
||||
/// negative direction represents a regression.
|
||||
pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
|
||||
let m = Metric {
|
||||
value: value,
|
||||
noise: noise
|
||||
};
|
||||
self.insert(name.to_owned(), m);
|
||||
}
|
||||
|
||||
/// Attempt to "ratchet" an external metric file. This involves loading
|
||||
/// metrics from a metric file (if it exists), comparing against
|
||||
/// the metrics in `self` using `compare_to_old`, and rewriting the
|
||||
/// file to contain the metrics in `self` if none of the
|
||||
/// `MetricChange`s are `Regression`. Returns the diff as well
|
||||
/// as a boolean indicating whether the ratchet succeeded.
|
||||
pub fn ratchet(&self, p: &Path, pct: Option<f64>) -> (MetricDiff, bool) {
|
||||
let old = if os::path_exists(p) {
|
||||
MetricMap::load(p)
|
||||
} else {
|
||||
MetricMap::new()
|
||||
};
|
||||
|
||||
let diff : MetricDiff = self.compare_to_old(old, pct);
|
||||
let ok = do diff.iter().all() |(_, v)| {
|
||||
match *v {
|
||||
Regression(_) => false,
|
||||
_ => true
|
||||
}
|
||||
};
|
||||
|
||||
if ok {
|
||||
debug!("rewriting file '%s' with updated metrics");
|
||||
self.save(p);
|
||||
}
|
||||
return (diff, ok)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Benchmarking
|
||||
|
||||
impl BenchHarness {
|
||||
/// Callback for benchmark functions to run in their body.
|
||||
pub fn iter(&mut self, inner:&fn()) {
|
||||
@ -639,105 +910,72 @@ impl BenchHarness {
|
||||
f(self);
|
||||
}
|
||||
|
||||
// This is the Go benchmark algorithm. It produces a single
|
||||
// datapoint and always tries to run for 1s.
|
||||
pub fn go_bench(&mut self, f: &fn(&mut BenchHarness)) {
|
||||
|
||||
// Rounds a number down to the nearest power of 10.
|
||||
fn round_down_10(n: u64) -> u64 {
|
||||
let mut n = n;
|
||||
let mut res = 1;
|
||||
while n > 10 {
|
||||
n = n / 10;
|
||||
res *= 10;
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
// Rounds x up to a number of the form [1eX, 2eX, 5eX].
|
||||
fn round_up(n: u64) -> u64 {
|
||||
let base = round_down_10(n);
|
||||
if n < (2 * base) {
|
||||
2 * base
|
||||
} else if n < (5 * base) {
|
||||
5 * base
|
||||
} else {
|
||||
10 * base
|
||||
}
|
||||
}
|
||||
// This is a more statistics-driven benchmark algorithm
|
||||
pub fn auto_bench(&mut self, f: &fn(&mut BenchHarness)) -> stats::Summary {
|
||||
|
||||
// Initial bench run to get ballpark figure.
|
||||
let mut n = 1_u64;
|
||||
self.bench_n(n, |x| f(x));
|
||||
|
||||
while n < 1_000_000_000 &&
|
||||
self.ns_elapsed() < 1_000_000_000 {
|
||||
let last = n;
|
||||
|
||||
// Try to estimate iter count for 1s falling back to 1bn
|
||||
// iterations if first run took < 1ns.
|
||||
if self.ns_per_iter() == 0 {
|
||||
n = 1_000_000_000;
|
||||
} else {
|
||||
n = 1_000_000_000 / self.ns_per_iter();
|
||||
}
|
||||
|
||||
n = u64::max(u64::min(n+n/2, 100*last), last+1);
|
||||
n = round_up(n);
|
||||
self.bench_n(n, |x| f(x));
|
||||
// Try to estimate iter count for 1ms falling back to 1m
|
||||
// iterations if first run took < 1ns.
|
||||
if self.ns_per_iter() == 0 {
|
||||
n = 1_000_000;
|
||||
} else {
|
||||
n = 1_000_000 / self.ns_per_iter();
|
||||
}
|
||||
}
|
||||
|
||||
// This is a more statistics-driven benchmark algorithm.
|
||||
// It stops as quickly as 50ms, so long as the statistical
|
||||
// properties are satisfactory. If those properties are
|
||||
// not met, it may run as long as the Go algorithm.
|
||||
pub fn auto_bench(&mut self, f: &fn(&mut BenchHarness)) -> ~[f64] {
|
||||
|
||||
let mut rng = rand::rng();
|
||||
let mut magnitude = 10;
|
||||
let mut prev_madp = 0.0;
|
||||
|
||||
let mut total_run = 0;
|
||||
let samples : &mut [f64] = [0.0_f64, ..50];
|
||||
loop {
|
||||
let n_samples = rng.gen_uint_range(50, 60);
|
||||
let n_iter = rng.gen_uint_range(magnitude,
|
||||
magnitude * 2);
|
||||
let loop_start = precise_time_ns();
|
||||
|
||||
let samples = do vec::from_fn(n_samples) |_| {
|
||||
self.bench_n(n_iter as u64, |x| f(x));
|
||||
self.ns_per_iter() as f64
|
||||
for samples.mut_iter().advance() |p| {
|
||||
self.bench_n(n as u64, |x| f(x));
|
||||
*p = self.ns_per_iter() as f64;
|
||||
};
|
||||
|
||||
// Eliminate outliers
|
||||
let med = samples.median();
|
||||
let mad = samples.median_abs_dev();
|
||||
let samples = do samples.consume_iter().filter |f| {
|
||||
num::abs(*f - med) <= 3.0 * mad
|
||||
}.collect::<~[f64]>();
|
||||
stats::winsorize(samples, 5.0);
|
||||
let summ = stats::Summary::new(samples);
|
||||
|
||||
debug!("%u samples, median %f, MAD=%f, %u survived filter",
|
||||
n_samples, med as float, mad as float,
|
||||
samples.len());
|
||||
for samples.mut_iter().advance() |p| {
|
||||
self.bench_n(5 * n as u64, |x| f(x));
|
||||
*p = self.ns_per_iter() as f64;
|
||||
};
|
||||
|
||||
if samples.len() != 0 {
|
||||
// If we have _any_ cluster of signal...
|
||||
let curr_madp = samples.median_abs_dev_pct();
|
||||
if self.ns_elapsed() > 1_000_000 &&
|
||||
(curr_madp < 1.0 ||
|
||||
num::abs(curr_madp - prev_madp) < 0.1) {
|
||||
return samples;
|
||||
}
|
||||
prev_madp = curr_madp;
|
||||
stats::winsorize(samples, 5.0);
|
||||
let summ5 = stats::Summary::new(samples);
|
||||
|
||||
if n_iter > 20_000_000 ||
|
||||
self.ns_elapsed() > 20_000_000 {
|
||||
return samples;
|
||||
}
|
||||
debug!("%u samples, median %f, MAD=%f, MADP=%f",
|
||||
samples.len(),
|
||||
summ.median as float,
|
||||
summ.median_abs_dev as float,
|
||||
summ.median_abs_dev_pct as float);
|
||||
|
||||
let now = precise_time_ns();
|
||||
let loop_run = now - loop_start;
|
||||
|
||||
// If we've run for 100ms an seem to have converged to a
|
||||
// stable median.
|
||||
if loop_run > 100_000_000 &&
|
||||
summ.median_abs_dev_pct < 1.0 &&
|
||||
summ.median - summ5.median < summ5.median_abs_dev {
|
||||
return summ5;
|
||||
}
|
||||
|
||||
magnitude *= 2;
|
||||
total_run += loop_run;
|
||||
// Longest we ever run for is 10s.
|
||||
if total_run > 10_000_000_000 {
|
||||
return summ5;
|
||||
}
|
||||
|
||||
n *= 2;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
pub mod bench {
|
||||
@ -752,13 +990,13 @@ pub mod bench {
|
||||
bytes: 0
|
||||
};
|
||||
|
||||
let ns_iter_samples = bs.auto_bench(f);
|
||||
let ns_iter_summ = bs.auto_bench(f);
|
||||
|
||||
let iter_s = 1_000_000_000 / (ns_iter_samples.median() as u64);
|
||||
let iter_s = 1_000_000_000 / (ns_iter_summ.median as u64);
|
||||
let mb_s = (bs.bytes * iter_s) / 1_000_000;
|
||||
|
||||
BenchSamples {
|
||||
ns_iter_samples: ns_iter_samples,
|
||||
ns_iter_summ: ns_iter_summ,
|
||||
mb_s: mb_s as uint
|
||||
}
|
||||
}
|
||||
@ -877,13 +1115,14 @@ mod tests {
|
||||
// unignored tests and flip the ignore flag on the rest to false
|
||||
|
||||
let opts = TestOpts {
|
||||
filter: option::None,
|
||||
filter: None,
|
||||
run_ignored: true,
|
||||
logfile: option::None,
|
||||
logfile: None,
|
||||
run_tests: true,
|
||||
run_benchmarks: false,
|
||||
save_results: option::None,
|
||||
compare_results: option::None
|
||||
ratchet_noise_percent: None,
|
||||
ratchet_metrics: None,
|
||||
save_metrics: None,
|
||||
};
|
||||
|
||||
let tests = ~[
|
||||
@ -914,13 +1153,14 @@ mod tests {
|
||||
#[test]
|
||||
pub fn sort_tests() {
|
||||
let opts = TestOpts {
|
||||
filter: option::None,
|
||||
filter: None,
|
||||
run_ignored: false,
|
||||
logfile: option::None,
|
||||
logfile: None,
|
||||
run_tests: true,
|
||||
run_benchmarks: false,
|
||||
save_results: option::None,
|
||||
compare_results: option::None
|
||||
ratchet_noise_percent: None,
|
||||
ratchet_metrics: None,
|
||||
save_metrics: None,
|
||||
};
|
||||
|
||||
let names =
|
||||
|
12
src/test/codegen/hello.cc
Normal file
12
src/test/codegen/hello.cc
Normal file
@ -0,0 +1,12 @@
|
||||
#include <stddef.h>
|
||||
|
||||
struct slice {
|
||||
char const *p;
|
||||
size_t len;
|
||||
};
|
||||
|
||||
extern "C"
|
||||
void test() {
|
||||
struct slice s = { .p = "hello",
|
||||
.len = 5 };
|
||||
}
|
4
src/test/codegen/hello.rs
Normal file
4
src/test/codegen/hello.rs
Normal file
@ -0,0 +1,4 @@
|
||||
#[no_mangle]
|
||||
fn test() {
|
||||
let _x = "hello";
|
||||
}
|
Loading…
Reference in New Issue
Block a user