Skip to content
Snippets Groups Projects
Commit 6236292b authored by Zhizhou Yang's avatar Zhizhou Yang
Browse files

Reindent python files in benchmark suite

Reindented python files in the suite.

Test: None.
Change-Id: I573272982e3788fd5b6b8e909546f2601a5a919b
parent 8c9803af
Branches
No related tags found
No related merge requests found
......@@ -26,7 +26,8 @@ def try_patch_skia():
skia_patch = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'skia_aosp.diff')
else:
raise ValueError('Adnroid source type should be either aosp or internal.')
raise ValueError('Adnroid source type should be either aosp or '
'internal.')
# FIXME: A quick hack, need to handle errors and check whether has been
# applied in the future.
try:
......@@ -37,18 +38,20 @@ def try_patch_skia():
def try_patch_autotest():
# Patch autotest, which includes all the testcases on device, setting device,
# and running the benchmarks
# Patch autotest, which includes all the testcases on device,
# setting device, and running the benchmarks
autotest_dir = os.path.join(config.android_home, config.autotest_dir)
autotest_patch = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'autotest.diff')
dex2oat_dir = os.path.join(autotest_dir, 'server/site_tests/android_Dex2oat')
dex2oat_dir = os.path.join(autotest_dir,
'server/site_tests/android_Dex2oat')
panorama_dir = os.path.join(autotest_dir,
'server/site_tests/android_Panorama')
# FIXME: A quick hack, need to handle errors and check whether has been
# applied in the future.
try:
subprocess.check_call(['git', '-C', autotest_dir, 'apply', autotest_patch])
subprocess.check_call(['git', '-C', autotest_dir,
'apply', autotest_patch])
subprocess.check_call(['cp', '-rf', 'dex2oat_input', dex2oat_dir])
subprocess.check_call(['cp', '-rf', 'panorama_input', panorama_dir])
print('Autotest patched successfully!')
......@@ -65,7 +68,8 @@ def try_patch_panorama():
# applied in the future.
try:
subprocess.check_call(['mkdir', '-p', panorama_dir])
subprocess.check_call(['git', '-C', panorama_dir, 'apply', panorama_patch])
subprocess.check_call(['git', '-C', panorama_dir,
'apply', panorama_patch])
print('Panorama patched successfully!')
except subprocess.CalledProcessError:
print('Panorama patch not applied, error or already patched.')
......
......@@ -26,7 +26,10 @@ def _parse_arguments_internal(argv):
'specified toolchain settings')
parser.add_argument(
'-b', '--bench', required=True, help='Select the benchmark to be built.')
'-b',
'--bench',
required=True,
help='Select the benchmark to be built.')
parser.add_argument(
'-c',
......@@ -36,7 +39,9 @@ def _parse_arguments_internal(argv):
'directory.')
parser.add_argument(
'-o', '--build_os', help='Specify the host OS to build benchmark.')
'-o',
'--build_os',
help='Specify the host OS to build benchmark.')
parser.add_argument(
'-l',
......@@ -46,11 +51,11 @@ def _parse_arguments_internal(argv):
parser.add_argument(
'-f',
'--cflags',
help='Specify the optimization cflags for '
'the toolchain.')
help='Specify the optimization cflags for the toolchain.')
parser.add_argument(
'--ldflags', help='Specify linker flags for the toolchain.')
'--ldflags',
help='Specify linker flags for the toolchain.')
return parser.parse_args(argv)
......@@ -91,7 +96,8 @@ def set_llvm_prebuilts_version(llvm_prebuilts_version):
logging.info('LLVM_PREBUILTS_VERSION set to "%s"...',
llvm_prebuilts_version)
else:
logging.info('No LLVM_PREBUILTS_VERSION specified, using default one...')
logging.info('No LLVM_PREBUILTS_VERSION specified, '
'using default one...')
def set_compiler(compiler):
......@@ -144,7 +150,8 @@ def set_compiler_env(bench, compiler, build_os, llvm_prebuilts_version, cflags,
def remove_tmp_dir():
tmp_dir = os.path.join(config.android_home, 'prebuilts/clang/host/linux-x86',
tmp_dir = os.path.join(config.android_home,
'prebuilts/clang/host/linux-x86',
'clang-tmp')
try:
......@@ -189,8 +196,8 @@ def build_bench(bench, source_dir):
restore_makefile(bench)
raise
logging.info('Logs for building benchmark %s are written to %s.', bench,
log_file)
logging.info('Logs for building benchmark %s are written to %s.',
bench, log_file)
logging.info('Benchmark built successfully!')
......
......@@ -48,7 +48,8 @@ def _GetTimeMultiplier(label_name):
This expects the time unit to be separated from anything else by '_'.
"""
ms_mul = 1000 * 1000.
endings = [('_ns', 1), ('_us', 1000), ('_ms', ms_mul), ('_s', ms_mul * 1000)]
endings = [('_ns', 1), ('_us', 1000),
('_ms', ms_mul), ('_s', ms_mul * 1000)]
for end, mul in endings:
if label_name.endswith(end):
return ms_mul / mul
......@@ -97,8 +98,8 @@ def _TransformBenchmarks(raw_benchmarks):
for bench_name, bench_result in benchmarks.iteritems():
try:
for cfg_name, keyvals in bench_result.iteritems():
# Some benchmarks won't have timing data (either it won't exist at all,
# or it'll be empty); skip them.
# Some benchmarks won't have timing data (either it won't exist
# at all, or it'll be empty); skip them.
samples = keyvals.get('samples')
if not samples:
continue
......@@ -122,9 +123,9 @@ def _TransformBenchmarks(raw_benchmarks):
bench_result, bench_name, e.message)
raise
# Realistically, [results] should be multiple results, where each entry in the
# list is the result for a different label. Because we only deal with one
# label at the moment, we need to wrap it in its own list.
# Realistically, [results] should be multiple results, where each entry in
# the list is the result for a different label. Because we only deal with
# one label at the moment, we need to wrap it in its own list.
return results
......
......@@ -182,8 +182,8 @@ def check_call_with_log(cmd, log_file):
try:
subprocess.check_call(cmd, stdout=logfile)
except subprocess.CalledProcessError:
logging.error('Error running %s, please check %s for more info.', cmd,
log_file)
logging.error('Error running %s, please check %s for more info.',
cmd, log_file)
raise
logging.info('Logs for %s are written to %s.', cmd, log_file)
......@@ -260,7 +260,8 @@ def build_bench(setting_no, bench, compiler, llvm_version, build_os, cflags,
if ldflags:
build_cmd.append('--ldflags=' + ldflags[setting_no])
logging.info('Building benchmark for toolchain setting No.%d...', setting_no)
logging.info('Building benchmark for toolchain setting No.%d...',
setting_no)
logging.info('Command: %s', build_cmd)
try:
......@@ -285,13 +286,15 @@ def run_and_collect_result(test_cmd, setting_no, i, bench, serial='default'):
'please check test_log for details.', bench_result)
raise OSError('Result file %s not found.' % bench_result)
new_bench_result = 'bench_result_%s_%s_%d_%d' % (bench, serial, setting_no, i)
new_bench_result_path = os.path.join(config.bench_suite_dir, new_bench_result)
new_bench_result = 'bench_result_%s_%s_%d_%d' % (bench, serial,
setting_no, i)
new_bench_result_path = os.path.join(config.bench_suite_dir,
new_bench_result)
try:
os.rename(bench_result, new_bench_result_path)
except OSError:
logging.error('Error while renaming raw result %s to %s', bench_result,
new_bench_result_path)
logging.error('Error while renaming raw result %s to %s',
bench_result, new_bench_result_path)
raise
logging.info('Benchmark result saved at %s.', new_bench_result_path)
......@@ -312,9 +315,9 @@ def test_bench(bench, setting_no, iterations, serials, remote, mode):
test_cmd.append('-r=' + remote)
test_cmd.append('-m=' + mode)
# Deal with serials.
# If there is no serails specified, try to run test on the only device.
# If specified, split the serials into a list and run test on each device.
# Deal with serials. If there is no serails specified, try to run test
# on the only device. If specified, split the serials into a list and
# run test on each device.
if serials:
for serial in serials.split(','):
test_cmd.append('-s=' + serial)
......@@ -340,7 +343,8 @@ def gen_json(bench, setting_no, iterations, serials):
experiment = config.product_combo
# Input format: bench_result_{bench}_{serial}_{setting_no}_
input_file = '_'.join([bench_result, bench, serial, str(setting_no), ''])
input_file = '_'.join([bench_result, bench,
serial, str(setting_no), ''])
gen_json_cmd = [
'./gen_json.py', '--input=' + input_file,
'--output=%s.json' % os.path.join(config.bench_suite_dir, bench),
......@@ -350,8 +354,8 @@ def gen_json(bench, setting_no, iterations, serials):
logging.info('Command: %s', gen_json_cmd)
if subprocess.call(gen_json_cmd):
logging.error('Error while generating JSON file, please check raw data'
'of the results at %s.', input_file)
logging.error('Error while generating JSON file, please check raw'
' data of the results at %s.', input_file)
def gen_crosperf(infile, outfile):
......@@ -423,7 +427,8 @@ def main(argv):
if not test_config.read(test):
logging.error('Error while reading from building '
'configuration file %s.', test)
raise RuntimeError('Error while reading configuration file %s.' % test)
raise RuntimeError('Error while reading configuration file %s.'
% test)
for setting_no, section in enumerate(test_config.sections()):
bench = test_config.get(section, 'bench')
......@@ -433,15 +438,16 @@ def main(argv):
cflags = [test_config.get(section, 'cflags')]
ldflags = [test_config.get(section, 'ldflags')]
# Set iterations from test_config file, if not exist, use the one from
# command line.
# Set iterations from test_config file, if not exist, use the one
# from command line.
it = test_config.get(section, 'iterations')
if not it:
it = iterations
it = int(it)
# Build benchmark for each single test configuration
build_bench(0, bench, compiler, llvm_version, build_os, cflags, ldflags)
build_bench(0, bench, compiler, llvm_version,
build_os, cflags, ldflags)
test_bench(bench, setting_no, it, serials, remote, mode)
......@@ -450,7 +456,8 @@ def main(argv):
for bench in config.bench_list:
infile = os.path.join(config.bench_suite_dir, bench + '.json')
if os.path.exists(infile):
outfile = os.path.join(config.bench_suite_dir, bench + '_report')
outfile = os.path.join(config.bench_suite_dir,
bench + '_report')
gen_crosperf(infile, outfile)
# Stop script if there is only config file provided
......@@ -458,14 +465,15 @@ def main(argv):
# If no configuration file specified, continue running.
# Check if the count of the setting arguments are log_ambiguous.
setting_count = check_count(compiler, llvm_version, build_os, cflags, ldflags)
setting_count = check_count(compiler, llvm_version, build_os,
cflags, ldflags)
for bench in bench_list:
logging.info('Start building and running benchmark: [%s]', bench)
# Run script for each toolchain settings
for setting_no in xrange(setting_count):
build_bench(setting_no, bench, compiler, llvm_version, build_os, cflags,
ldflags)
build_bench(setting_no, bench, compiler, llvm_version,
build_os, cflags, ldflags)
# Run autotest script for benchmark test on device
test_bench(bench, setting_no, iterations, serials, remote, mode)
......
......@@ -24,7 +24,8 @@ def backup_file(bench, file_type):
'tmp_makefile')
])
except subprocess.CalledProcessError():
raise OSError('Cannot backup Android.%s file for %s' % (file_type, bench))
raise OSError('Cannot backup Android.%s file for %s' % (file_type,
bench))
# Insert lines to add LOCAL_CFLAGS/LOCAL_LDFLAGS to the benchmarks
......@@ -72,7 +73,7 @@ def apply_patches(bench):
try:
subprocess.check_call(['git', '-C', bench_dir, 'apply', flags_patch])
except subprocess.CalledProcessError:
raise OSError('Patch for adding flags for %s does not succeed.' % (bench))
raise OSError('Patch for adding flags for %s does not succeed.' % bench)
def replace_flags_in_dir(bench, cflags, ldflags):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment