diff --git a/scripts/simplebench/bench-example.py b/scripts/simplebench/bench-example.py index f24cf22fe9..d9c7f7bc17 100644 --- a/scripts/simplebench/bench-example.py +++ b/scripts/simplebench/bench-example.py @@ -19,6 +19,7 @@ # import simplebench +from results_to_text import results_to_text from bench_block_job import bench_block_copy, drv_file, drv_nbd @@ -77,4 +78,4 @@ test_envs = [ ] result = simplebench.bench(bench_func, test_envs, test_cases, count=3) -print(simplebench.results_to_text(result)) +print(results_to_text(result)) diff --git a/scripts/simplebench/bench_write_req.py b/scripts/simplebench/bench_write_req.py index e175bcd7a4..da601ea2fe 100755 --- a/scripts/simplebench/bench_write_req.py +++ b/scripts/simplebench/bench_write_req.py @@ -26,6 +26,7 @@ import sys import os import subprocess import simplebench +from results_to_text import results_to_text def bench_func(env, case): @@ -167,4 +168,4 @@ if __name__ == '__main__': result = simplebench.bench(bench_func, test_envs, test_cases, count=3, initial_run=False) - print(simplebench.results_to_text(result)) + print(results_to_text(result)) diff --git a/scripts/simplebench/results_to_text.py b/scripts/simplebench/results_to_text.py new file mode 100644 index 0000000000..58d909ffd9 --- /dev/null +++ b/scripts/simplebench/results_to_text.py @@ -0,0 +1,48 @@ +# Simple benchmarking framework +# +# Copyright (c) 2019 Virtuozzo International GmbH. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + + +def result_to_text(result): + """Return text representation of bench_one() returned dict.""" + if 'average' in result: + s = '{:.2f} +- {:.2f}'.format(result['average'], result['stdev']) + if 'n-failed' in result: + s += '\n({} failed)'.format(result['n-failed']) + return s + else: + return 'FAILED' + + +def results_to_text(results): + """Return text representation of bench() returned dict.""" + from tabulate import tabulate + + dim = None + tab = [[""] + [c['id'] for c in results['envs']]] + for case in results['cases']: + row = [case['id']] + for env in results['envs']: + res = results['tab'][case['id']][env['id']] + if dim is None: + dim = res['dimension'] + else: + assert dim == res['dimension'] + row.append(result_to_text(res)) + tab.append(row) + + return f'All results are in {dim}\n\n' + tabulate(tab) diff --git a/scripts/simplebench/simplebench.py b/scripts/simplebench/simplebench.py index aa74b78a04..f61513af90 100644 --- a/scripts/simplebench/simplebench.py +++ b/scripts/simplebench/simplebench.py @@ -79,17 +79,6 @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True): return result -def result_to_text(result): - """Return text representation of bench_one() returned dict.""" - if 'average' in result: - s = '{:.2f} +- {:.2f}'.format(result['average'], result['stdev']) - if 'n-failed' in result: - s += '\n({} failed)'.format(result['n-failed']) - return s - else: - return 'FAILED' - - def bench(test_func, test_envs, test_cases, *args, **vargs): """Fill benchmark table @@ -125,23 +114,3 @@ def bench(test_func, test_envs, test_cases, *args, **vargs): print('Done') return results - - -def results_to_text(results): - """Return text representation of bench() returned dict.""" - from tabulate import tabulate - - dim = None - tab = [[""] + [c['id'] for c in results['envs']]] - for case in results['cases']: - row = [case['id']] - for env in results['envs']: - res = results['tab'][case['id']][env['id']] - if dim is None: - dim = res['dimension'] - else: - assert dim == res['dimension'] - row.append(result_to_text(res)) - tab.append(row) - - return f'All results are in {dim}\n\n' + tabulate(tab)