auto merge of #7829 : graydon/rust/codegen-compiletests, r=cmr

This should get us over the hump of activating basic ratcheting on codegen tests, at least. It also puts in place optional (disabled by default) ratcheting on all #[bench] tests, and records all metrics from them to harvestable .json files in any case.
This commit is contained in:
bors 2013-07-17 13:07:24 -07:00
commit 8c082658be
6 changed files with 270 additions and 34 deletions

1
configure vendored
View File

@ -372,6 +372,7 @@ opt optimize 1 "build optimized rust code"
opt optimize-cxx 1 "build optimized C++ code"
opt optimize-llvm 1 "build optimized LLVM"
opt debug 0 "build with extra debug fun"
opt ratchet-bench 0 "ratchet benchmarks"
opt fast-make 0 "use .gitmodules as timestamp for submodule deps"
opt manage-submodules 1 "let the build manage the git submodules"
opt mingw-cross 0 "cross-compile for win32 using mingw"

View File

@ -34,9 +34,12 @@ ifdef CHECK_XFAILS
TESTARGS += --ignored
endif
CTEST_BENCH = --bench
# Arguments to the cfail/rfail/rpass/bench tests
ifdef CFG_VALGRIND
CTEST_RUNTOOL = --runtool "$(CFG_VALGRIND)"
CTEST_BENCH =
endif
# Arguments to the perf tests
@ -60,6 +63,21 @@ endif
TEST_LOG_FILE=tmp/check-stage$(1)-T-$(2)-H-$(3)-$(4).log
TEST_OK_FILE=tmp/check-stage$(1)-T-$(2)-H-$(3)-$(4).ok
TEST_RATCHET_FILE=tmp/check-stage$(1)-T-$(2)-H-$(3)-$(4)-metrics.json
TEST_RATCHET_NOISE_PERCENT=10.0
# Whether to ratchet or merely save benchmarks
ifdef CFG_RATCHET_BENCH
CRATE_TEST_BENCH_ARGS=\
--test $(CTEST_BENCH) \
--ratchet-metrics $(call TEST_RATCHET_FILE,$(1),$(2),$(3),$(4)) \
--ratchet-noise-percent $(TEST_RATCHET_NOISE_PERCENT)
else
CRATE_TEST_BENCH_ARGS=\
--test $(CTEST_BENCH) \
--save-metrics $(call TEST_RATCHET_FILE,$(1),$(2),$(3),$(4))
endif
define DEF_TARGET_COMMANDS
ifdef CFG_UNIXY_$(1)
@ -359,11 +377,14 @@ $(foreach host,$(CFG_HOST_TRIPLES), \
define DEF_TEST_CRATE_RULES
check-stage$(1)-T-$(2)-H-$(3)-$(4)-exec: $$(call TEST_OK_FILE,$(1),$(2),$(3),$(4))
check-stage$(1)-T-$(2)-H-$(3)-$(4)-exec: $$(call TEST_OK_FILE,$(1),$(2),$(3),$(4))
$$(call TEST_OK_FILE,$(1),$(2),$(3),$(4)): \
$(3)/stage$(1)/test/$(4)test-$(2)$$(X_$(2))
@$$(call E, run: $$<)
$$(Q)$$(call CFG_RUN_TEST_$(2),$$<,$(2),$(3)) $$(TESTARGS) \
--logfile $$(call TEST_LOG_FILE,$(1),$(2),$(3),$(4)) \
$$(call CRATE_TEST_BENCH_ARGS,$(1),$(2),$(3),$(4)) \
&& touch $$@
endef
@ -552,6 +573,7 @@ CTEST_ARGS$(1)-T-$(2)-H-$(3)-$(4) := \
$$(CTEST_COMMON_ARGS$(1)-T-$(2)-H-$(3)) \
--src-base $$(S)src/test/$$(CTEST_SRC_BASE_$(4))/ \
--build-base $(3)/test/$$(CTEST_BUILD_BASE_$(4))/ \
--ratchet-metrics $(call TEST_RATCHET_FILE,$(1),$(2),$(3),$(4)) \
--mode $$(CTEST_MODE_$(4)) \
$$(CTEST_RUNTOOL_$(4))

View File

@ -58,6 +58,15 @@ pub struct config {
// Write out a parseable log of tests that were run
logfile: Option<Path>,
// Write out a json file containing any metrics of the run
save_metrics: Option<Path>,
// Write and ratchet a metrics file
ratchet_metrics: Option<Path>,
// Percent change in metrics to consider noise
ratchet_noise_percent: Option<f64>,
// A command line to prefix program execution with,
// for running under valgrind
runtool: Option<~str>,

View File

@ -17,6 +17,7 @@
extern mod extra;
use std::os;
use std::f64;
use extra::getopts;
use extra::getopts::groups::{optopt, optflag, reqopt};
@ -66,6 +67,10 @@ pub fn parse_config(args: ~[~str]) -> config {
optopt("", "rustcflags", "flags to pass to rustc", "FLAGS"),
optflag("", "verbose", "run tests verbosely, showing all output"),
optopt("", "logfile", "file to log test execution to", "FILE"),
optopt("", "save-metrics", "file to save metrics to", "FILE"),
optopt("", "ratchet-metrics", "file to ratchet metrics against", "FILE"),
optopt("", "ratchet-noise-percent",
"percent change in metrics to consider noise", "N"),
optflag("", "jit", "run tests under the JIT"),
optflag("", "newrt", "run tests on the new runtime / scheduler"),
optopt("", "target", "the target to build for", "TARGET"),
@ -116,6 +121,13 @@ pub fn parse_config(args: ~[~str]) -> config {
Some(copy matches.free[0])
} else { None },
logfile: getopts::opt_maybe_str(matches, "logfile").map(|s| Path(*s)),
save_metrics: getopts::opt_maybe_str(matches, "save-metrics").map(|s| Path(*s)),
ratchet_metrics:
getopts::opt_maybe_str(matches, "ratchet-metrics").map(|s| Path(*s)),
ratchet_noise_percent:
getopts::opt_maybe_str(matches,
"ratchet-noise-percent").map(|s|
f64::from_str(*s).get()),
runtool: getopts::opt_maybe_str(matches, "runtool"),
rustcflags: getopts::opt_maybe_str(matches, "rustcflags"),
jit: getopts::opt_present(matches, "jit"),
@ -215,10 +227,10 @@ pub fn test_opts(config: &config) -> test::TestOpts {
run_ignored: config.run_ignored,
logfile: copy config.logfile,
run_tests: true,
run_benchmarks: false,
ratchet_metrics: None,
ratchet_noise_percent: None,
save_metrics: None,
run_benchmarks: true,
ratchet_metrics: copy config.ratchet_metrics,
ratchet_noise_percent: copy config.ratchet_noise_percent,
save_metrics: copy config.save_metrics,
}
}
@ -231,7 +243,13 @@ pub fn make_tests(config: &config) -> ~[test::TestDescAndFn] {
let file = copy *file;
debug!("inspecting file %s", file.to_str());
if is_test(config, file) {
tests.push(make_test(config, file))
let t = do make_test(config, file) {
match config.mode {
mode_codegen => make_metrics_test_closure(config, file),
_ => make_test_closure(config, file)
}
};
tests.push(t)
}
}
tests
@ -260,14 +278,15 @@ pub fn is_test(config: &config, testfile: &Path) -> bool {
return valid;
}
pub fn make_test(config: &config, testfile: &Path) -> test::TestDescAndFn {
pub fn make_test(config: &config, testfile: &Path,
f: &fn()->test::TestFn) -> test::TestDescAndFn {
test::TestDescAndFn {
desc: test::TestDesc {
name: make_test_name(config, testfile),
ignore: header::is_test_ignored(config, testfile),
should_fail: false
},
testfn: make_test_closure(config, testfile),
testfn: f(),
}
}
@ -291,3 +310,10 @@ pub fn make_test_closure(config: &config, testfile: &Path) -> test::TestFn {
let testfile = Cell::new(testfile.to_str());
test::DynTestFn(|| { runtest::run(config.take(), testfile.take()) })
}
pub fn make_metrics_test_closure(config: &config, testfile: &Path) -> test::TestFn {
use std::cell::Cell;
let config = Cell::new(copy *config);
let testfile = Cell::new(testfile.to_str());
test::DynMetricFn(|mm| { runtest::run_metrics(config.take(), testfile.take(), mm) })
}

View File

@ -25,7 +25,14 @@ use std::os;
use std::uint;
use std::vec;
use extra::test::MetricMap;
pub fn run(config: config, testfile: ~str) {
let mut _mm = MetricMap::new();
run_metrics(config, testfile, &mut _mm);
}
pub fn run_metrics(config: config, testfile: ~str, mm: &mut MetricMap) {
if config.verbose {
// We're going to be dumping a lot of info. Start on a new line.
io::stdout().write_str("\n\n");
@ -40,7 +47,7 @@ pub fn run(config: config, testfile: ~str) {
mode_run_pass => run_rpass_test(&config, &props, &testfile),
mode_pretty => run_pretty_test(&config, &props, &testfile),
mode_debug_info => run_debuginfo_test(&config, &props, &testfile),
mode_codegen => run_codegen_test(&config, &props, &testfile)
mode_codegen => run_codegen_test(&config, &props, &testfile, mm)
}
}
@ -906,7 +913,14 @@ fn disassemble_extract(config: &config, _props: &TestProps,
}
fn run_codegen_test(config: &config, props: &TestProps, testfile: &Path) {
fn count_extracted_lines(p: &Path) -> uint {
let x = io::read_whole_file_str(&p.with_filetype("ll")).get();
x.line_iter().len_()
}
fn run_codegen_test(config: &config, props: &TestProps,
testfile: &Path, mm: &mut MetricMap) {
if config.llvm_bin_path.is_none() {
fatal(~"missing --llvm-bin-path");
@ -947,7 +961,17 @@ fn run_codegen_test(config: &config, props: &TestProps, testfile: &Path) {
fatal_ProcRes(~"disassembling extract failed", &ProcRes);
}
let base = output_base_name(config, testfile);
let base_extract = append_suffix_to_stem(&base, "extract");
let base_clang = append_suffix_to_stem(&base, "clang");
let base_clang_extract = append_suffix_to_stem(&base_clang, "extract");
let base_lines = count_extracted_lines(&base_extract);
let clang_lines = count_extracted_lines(&base_clang_extract);
mm.insert_metric("clang-codegen-ratio",
(base_lines as f64) / (clang_lines as f64),
0.001);
}

View File

@ -64,7 +64,9 @@ impl ToStr for TestName {
pub enum TestFn {
StaticTestFn(extern fn()),
StaticBenchFn(extern fn(&mut BenchHarness)),
StaticMetricFn(~fn(&mut MetricMap)),
DynTestFn(~fn()),
DynMetricFn(~fn(&mut MetricMap)),
DynBenchFn(~fn(&mut BenchHarness))
}
@ -95,9 +97,11 @@ pub struct Metric {
noise: f64
}
#[deriving(Eq)]
pub struct MetricMap(TreeMap<~str,Metric>);
/// Analysis of a single change in metric
#[deriving(Eq)]
pub enum MetricChange {
LikelyNoise,
MetricAdded,
@ -217,7 +221,13 @@ pub struct BenchSamples {
}
#[deriving(Eq)]
pub enum TestResult { TrOk, TrFailed, TrIgnored, TrBench(BenchSamples) }
pub enum TestResult {
TrOk,
TrFailed,
TrIgnored,
TrMetrics(MetricMap),
TrBench(BenchSamples)
}
struct ConsoleTestState {
out: @io::Writer,
@ -228,7 +238,7 @@ struct ConsoleTestState {
passed: uint,
failed: uint,
ignored: uint,
benchmarked: uint,
measured: uint,
metrics: MetricMap,
failures: ~[TestDesc]
}
@ -260,7 +270,7 @@ impl ConsoleTestState {
passed: 0u,
failed: 0u,
ignored: 0u,
benchmarked: 0u,
measured: 0u,
metrics: MetricMap::new(),
failures: ~[]
}
@ -278,11 +288,14 @@ impl ConsoleTestState {
self.write_pretty("ignored", term::color::YELLOW);
}
pub fn write_metric(&self) {
self.write_pretty("metric", term::color::CYAN);
}
pub fn write_bench(&self) {
self.write_pretty("bench", term::color::CYAN);
}
pub fn write_added(&self) {
self.write_pretty("added", term::color::GREEN);
}
@ -331,6 +344,10 @@ impl ConsoleTestState {
TrOk => self.write_ok(),
TrFailed => self.write_failed(),
TrIgnored => self.write_ignored(),
TrMetrics(ref mm) => {
self.write_metric();
self.out.write_str(": " + fmt_metrics(mm));
}
TrBench(ref bs) => {
self.write_bench();
self.out.write_str(": " + fmt_bench_samples(bs))
@ -348,6 +365,7 @@ impl ConsoleTestState {
TrOk => ~"ok",
TrFailed => ~"failed",
TrIgnored => ~"ignored",
TrMetrics(ref mm) => fmt_metrics(mm),
TrBench(ref bs) => fmt_bench_samples(bs)
}, test.name.to_str()));
}
@ -415,7 +433,7 @@ impl ConsoleTestState {
pub fn write_run_finish(&self,
ratchet_metrics: &Option<Path>,
ratchet_pct: Option<f64>) -> bool {
assert!(self.passed + self.failed + self.ignored + self.benchmarked == self.total);
assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
let ratchet_success = match *ratchet_metrics {
None => true,
@ -447,12 +465,23 @@ impl ConsoleTestState {
} else {
self.write_failed();
}
self.out.write_str(fmt!(". %u passed; %u failed; %u ignored, %u benchmarked\n\n",
self.passed, self.failed, self.ignored, self.benchmarked));
self.out.write_str(fmt!(". %u passed; %u failed; %u ignored; %u measured\n\n",
self.passed, self.failed, self.ignored, self.measured));
return success;
}
}
pub fn fmt_metrics(mm: &MetricMap) -> ~str {
use std::iterator::IteratorUtil;
let v : ~[~str] = mm.iter()
.transform(|(k,v)| fmt!("%s: %f (+/- %f)",
*k,
v.value as float,
v.noise as float))
.collect();
v.connect(", ")
}
pub fn fmt_bench_samples(bs: &BenchSamples) -> ~str {
if bs.mb_s != 0 {
fmt!("%u ns/iter (+/- %u) = %u MB/s",
@ -480,11 +509,19 @@ pub fn run_tests_console(opts: &TestOpts,
match result {
TrOk => st.passed += 1,
TrIgnored => st.ignored += 1,
TrMetrics(mm) => {
let tname = test.name.to_str();
for mm.iter().advance() |(k,v)| {
st.metrics.insert_metric(tname + "." + *k,
v.value, v.noise);
}
st.measured += 1
}
TrBench(bs) => {
st.metrics.insert_metric(test.name.to_str(),
bs.ns_iter_summ.median,
bs.ns_iter_summ.max - bs.ns_iter_summ.min);
st.benchmarked += 1
st.measured += 1
}
TrFailed => {
st.failed += 1;
@ -532,7 +569,7 @@ fn should_sort_failures_before_printing_them() {
passed: 0u,
failed: 0u,
ignored: 0u,
benchmarked: 0u,
measured: 0u,
metrics: MetricMap::new(),
failures: ~[test_b, test_a]
};
@ -564,11 +601,11 @@ fn run_tests(opts: &TestOpts,
callback(TeFiltered(filtered_descs));
let (filtered_tests, filtered_benchs) =
let (filtered_tests, filtered_benchs_and_metrics) =
do filtered_tests.partition |e| {
match e.testfn {
StaticTestFn(_) | DynTestFn(_) => true,
StaticBenchFn(_) | DynBenchFn(_) => false
_ => false
}
};
@ -606,7 +643,8 @@ fn run_tests(opts: &TestOpts,
}
// All benchmarks run at the end, in serial.
for filtered_benchs.consume_iter().advance |b| {
// (this includes metric fns)
for filtered_benchs_and_metrics.consume_iter().advance |b| {
callback(TeWait(copy b.desc));
run_test(!opts.run_benchmarks, b, ch.clone());
let (test, result) = p.recv();
@ -729,6 +767,18 @@ pub fn run_test(force_ignore: bool,
monitor_ch.send((desc, TrBench(bs)));
return;
}
DynMetricFn(f) => {
let mut mm = MetricMap::new();
f(&mut mm);
monitor_ch.send((desc, TrMetrics(mm)));
return;
}
StaticMetricFn(f) => {
let mut mm = MetricMap::new();
f(&mut mm);
monitor_ch.send((desc, TrMetrics(mm)));
return;
}
DynTestFn(f) => run_test_inner(desc, monitor_ch, f),
StaticTestFn(f) => run_test_inner(desc, monitor_ch, || f())
}
@ -756,12 +806,12 @@ impl ToJson for Metric {
impl MetricMap {
fn new() -> MetricMap {
pub fn new() -> MetricMap {
MetricMap(TreeMap::new())
}
/// Load MetricDiff from a file.
fn load(p: &Path) -> MetricMap {
pub fn load(p: &Path) -> MetricMap {
assert!(os::path_exists(p));
let f = io::file_reader(p).get();
let mut decoder = json::Decoder(json::from_reader(f).get());
@ -774,8 +824,13 @@ impl MetricMap {
json::to_pretty_writer(f, &self.to_json());
}
/// Compare against another MetricMap
pub fn compare_to_old(&self, old: MetricMap,
/// Compare against another MetricMap. Optionally compare all
/// measurements in the maps using the provided `noise_pct` as a
/// percentage of each value to consider noise. If `None`, each
/// measurement's noise threshold is independently chosen as the
/// maximum of that measurement's recorded noise quantity in either
/// map.
pub fn compare_to_old(&self, old: &MetricMap,
noise_pct: Option<f64>) -> MetricDiff {
let mut diff : MetricDiff = TreeMap::new();
for old.iter().advance |(k, vold)| {
@ -787,10 +842,10 @@ impl MetricMap {
None => f64::max(vold.noise.abs(), v.noise.abs()),
Some(pct) => vold.value * pct / 100.0
};
if delta.abs() < noise {
if delta.abs() <= noise {
LikelyNoise
} else {
let pct = delta.abs() / v.value * 100.0;
let pct = delta.abs() / (vold.value).max(&f64::epsilon) * 100.0;
if vold.noise < 0.0 {
// When 'noise' is negative, it means we want
// to see deltas that go up over time, and can
@ -857,7 +912,7 @@ impl MetricMap {
MetricMap::new()
};
let diff : MetricDiff = self.compare_to_old(old, pct);
let diff : MetricDiff = self.compare_to_old(&old, pct);
let ok = do diff.iter().all() |(_, v)| {
match *v {
Regression(_) => false,
@ -899,7 +954,7 @@ impl BenchHarness {
if self.iterations == 0 {
0
} else {
self.ns_elapsed() / self.iterations
self.ns_elapsed() / self.iterations.max(&1)
}
}
@ -922,7 +977,7 @@ impl BenchHarness {
if self.ns_per_iter() == 0 {
n = 1_000_000;
} else {
n = 1_000_000 / self.ns_per_iter();
n = 1_000_000 / self.ns_per_iter().max(&1);
}
let mut total_run = 0;
@ -964,8 +1019,8 @@ impl BenchHarness {
}
total_run += loop_run;
// Longest we ever run for is 10s.
if total_run > 10_000_000_000 {
// Longest we ever run for is 3s.
if total_run > 3_000_000_000 {
return summ5;
}
@ -992,7 +1047,8 @@ pub mod bench {
let ns_iter_summ = bs.auto_bench(f);
let iter_s = 1_000_000_000 / (ns_iter_summ.median as u64);
let ns_iter = (ns_iter_summ.median as u64).max(&1);
let iter_s = 1_000_000_000 / ns_iter;
let mb_s = (bs.bytes * iter_s) / 1_000_000;
BenchSamples {
@ -1006,13 +1062,16 @@ pub mod bench {
mod tests {
use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
TestDesc, TestDescAndFn,
Metric, MetricMap, MetricAdded, MetricRemoved,
Improvement, Regression, LikelyNoise,
StaticTestName, DynTestName, DynTestFn};
use test::{TestOpts, run_test};
use std::either;
use std::comm::{stream, SharedChan};
use std::option;
use std::vec;
use tempfile;
use std::os;
#[test]
pub fn do_not_run_ignored_tests() {
@ -1208,4 +1267,99 @@ mod tests {
}
}
}
#[test]
pub fn test_metricmap_compare() {
let mut m1 = MetricMap::new();
let mut m2 = MetricMap::new();
m1.insert_metric("in-both-noise", 1000.0, 200.0);
m2.insert_metric("in-both-noise", 1100.0, 200.0);
m1.insert_metric("in-first-noise", 1000.0, 2.0);
m2.insert_metric("in-second-noise", 1000.0, 2.0);
m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
let diff1 = m2.compare_to_old(&m1, None);
assert_eq!(*(diff1.find(&~"in-both-noise").get()), LikelyNoise);
assert_eq!(*(diff1.find(&~"in-first-noise").get()), MetricRemoved);
assert_eq!(*(diff1.find(&~"in-second-noise").get()), MetricAdded);
assert_eq!(*(diff1.find(&~"in-both-want-downwards-but-regressed").get()),
Regression(100.0));
assert_eq!(*(diff1.find(&~"in-both-want-downwards-and-improved").get()),
Improvement(50.0));
assert_eq!(*(diff1.find(&~"in-both-want-upwards-but-regressed").get()),
Regression(50.0));
assert_eq!(*(diff1.find(&~"in-both-want-upwards-and-improved").get()),
Improvement(100.0));
assert_eq!(diff1.len(), 7);
let diff2 = m2.compare_to_old(&m1, Some(200.0));
assert_eq!(*(diff2.find(&~"in-both-noise").get()), LikelyNoise);
assert_eq!(*(diff2.find(&~"in-first-noise").get()), MetricRemoved);
assert_eq!(*(diff2.find(&~"in-second-noise").get()), MetricAdded);
assert_eq!(*(diff2.find(&~"in-both-want-downwards-but-regressed").get()), LikelyNoise);
assert_eq!(*(diff2.find(&~"in-both-want-downwards-and-improved").get()), LikelyNoise);
assert_eq!(*(diff2.find(&~"in-both-want-upwards-but-regressed").get()), LikelyNoise);
assert_eq!(*(diff2.find(&~"in-both-want-upwards-and-improved").get()), LikelyNoise);
assert_eq!(diff2.len(), 7);
}
pub fn ratchet_test() {
let dpth = tempfile::mkdtemp(&os::tmpdir(),
"test-ratchet").expect("missing test for ratchet");
let pth = dpth.push("ratchet.json");
let mut m1 = MetricMap::new();
m1.insert_metric("runtime", 1000.0, 2.0);
m1.insert_metric("throughput", 50.0, 2.0);
let mut m2 = MetricMap::new();
m2.insert_metric("runtime", 1100.0, 2.0);
m2.insert_metric("throughput", 50.0, 2.0);
m1.save(&pth);
// Ask for a ratchet that should fail to advance.
let (diff1, ok1) = m2.ratchet(&pth, None);
assert_eq!(ok1, false);
assert_eq!(diff1.len(), 2);
assert_eq!(*(diff1.find(&~"runtime").get()), Regression(10.0));
assert_eq!(*(diff1.find(&~"throughput").get()), LikelyNoise);
// Check that it was not rewritten.
let m3 = MetricMap::load(&pth);
assert_eq!(m3.len(), 2);
assert_eq!(*(m3.find(&~"runtime").get()), Metric { value: 1000.0, noise: 2.0 });
assert_eq!(*(m3.find(&~"throughput").get()), Metric { value: 50.0, noise: 2.0 });
// Ask for a ratchet with an explicit noise-percentage override,
// that should advance.
let (diff2, ok2) = m2.ratchet(&pth, Some(10.0));
assert_eq!(ok2, true);
assert_eq!(diff2.len(), 2);
assert_eq!(*(diff2.find(&~"runtime").get()), LikelyNoise);
assert_eq!(*(diff2.find(&~"throughput").get()), LikelyNoise);
// Check that it was rewritten.
let m4 = MetricMap::load(&pth);
assert_eq!(m4.len(), 2);
assert_eq!(*(m4.find(&~"runtime").get()), Metric { value: 1100.0, noise: 2.0 });
assert_eq!(*(m4.find(&~"throughput").get()), Metric { value: 50.0, noise: 2.0 });
os::remove_dir_recursive(&dpth);
}
}