summaryrefslogtreecommitdiff
path: root/third-party/benchmark/tools/gbench/util.py
diff options
context:
space:
mode:
Diffstat (limited to 'third-party/benchmark/tools/gbench/util.py')
-rw-r--r--third-party/benchmark/tools/gbench/util.py181
1 files changed, 0 insertions, 181 deletions
diff --git a/third-party/benchmark/tools/gbench/util.py b/third-party/benchmark/tools/gbench/util.py
deleted file mode 100644
index 5d0012c0cb1c..000000000000
--- a/third-party/benchmark/tools/gbench/util.py
+++ /dev/null
@@ -1,181 +0,0 @@
-"""util.py - General utilities for running, loading, and processing benchmarks
-"""
-import json
-import os
-import tempfile
-import subprocess
-import sys
-import functools
-
-# Input file type enumeration
-IT_Invalid = 0
-IT_JSON = 1
-IT_Executable = 2
-
-_num_magic_bytes = 2 if sys.platform.startswith('win') else 4
-
-
-def is_executable_file(filename):
- """
- Return 'True' if 'filename' names a valid file which is likely
- an executable. A file is considered an executable if it starts with the
- magic bytes for a EXE, Mach O, or ELF file.
- """
- if not os.path.isfile(filename):
- return False
- with open(filename, mode='rb') as f:
- magic_bytes = f.read(_num_magic_bytes)
- if sys.platform == 'darwin':
- return magic_bytes in [
- b'\xfe\xed\xfa\xce', # MH_MAGIC
- b'\xce\xfa\xed\xfe', # MH_CIGAM
- b'\xfe\xed\xfa\xcf', # MH_MAGIC_64
- b'\xcf\xfa\xed\xfe', # MH_CIGAM_64
- b'\xca\xfe\xba\xbe', # FAT_MAGIC
- b'\xbe\xba\xfe\xca' # FAT_CIGAM
- ]
- elif sys.platform.startswith('win'):
- return magic_bytes == b'MZ'
- else:
- return magic_bytes == b'\x7FELF'
-
-
-def is_json_file(filename):
- """
- Returns 'True' if 'filename' names a valid JSON output file.
- 'False' otherwise.
- """
- try:
- with open(filename, 'r') as f:
- json.load(f)
- return True
- except BaseException:
- pass
- return False
-
-
-def classify_input_file(filename):
- """
- Return a tuple (type, msg) where 'type' specifies the classified type
- of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable
- string represeting the error.
- """
- ftype = IT_Invalid
- err_msg = None
- if not os.path.exists(filename):
- err_msg = "'%s' does not exist" % filename
- elif not os.path.isfile(filename):
- err_msg = "'%s' does not name a file" % filename
- elif is_executable_file(filename):
- ftype = IT_Executable
- elif is_json_file(filename):
- ftype = IT_JSON
- else:
- err_msg = "'%s' does not name a valid benchmark executable or JSON file" % filename
- return ftype, err_msg
-
-
-def check_input_file(filename):
- """
- Classify the file named by 'filename' and return the classification.
- If the file is classified as 'IT_Invalid' print an error message and exit
- the program.
- """
- ftype, msg = classify_input_file(filename)
- if ftype == IT_Invalid:
- print("Invalid input file: %s" % msg)
- sys.exit(1)
- return ftype
-
-
-def find_benchmark_flag(prefix, benchmark_flags):
- """
- Search the specified list of flags for a flag matching `<prefix><arg>` and
- if it is found return the arg it specifies. If specified more than once the
- last value is returned. If the flag is not found None is returned.
- """
- assert prefix.startswith('--') and prefix.endswith('=')
- result = None
- for f in benchmark_flags:
- if f.startswith(prefix):
- result = f[len(prefix):]
- return result
-
-
-def remove_benchmark_flags(prefix, benchmark_flags):
- """
- Return a new list containing the specified benchmark_flags except those
- with the specified prefix.
- """
- assert prefix.startswith('--') and prefix.endswith('=')
- return [f for f in benchmark_flags if not f.startswith(prefix)]
-
-
-def load_benchmark_results(fname):
- """
- Read benchmark output from a file and return the JSON object.
- REQUIRES: 'fname' names a file containing JSON benchmark output.
- """
- with open(fname, 'r') as f:
- return json.load(f)
-
-
-def sort_benchmark_results(result):
- benchmarks = result['benchmarks']
-
- # From inner key to the outer key!
- benchmarks = sorted(
- benchmarks, key=lambda benchmark: benchmark['repetition_index'] if 'repetition_index' in benchmark else -1)
- benchmarks = sorted(
- benchmarks, key=lambda benchmark: 1 if 'run_type' in benchmark and benchmark['run_type'] == "aggregate" else 0)
- benchmarks = sorted(
- benchmarks, key=lambda benchmark: benchmark['per_family_instance_index'] if 'per_family_instance_index' in benchmark else -1)
- benchmarks = sorted(
- benchmarks, key=lambda benchmark: benchmark['family_index'] if 'family_index' in benchmark else -1)
-
- result['benchmarks'] = benchmarks
- return result
-
-
-def run_benchmark(exe_name, benchmark_flags):
- """
- Run a benchmark specified by 'exe_name' with the specified
- 'benchmark_flags'. The benchmark is run directly as a subprocess to preserve
- real time console output.
- RETURNS: A JSON object representing the benchmark output
- """
- output_name = find_benchmark_flag('--benchmark_out=',
- benchmark_flags)
- is_temp_output = False
- if output_name is None:
- is_temp_output = True
- thandle, output_name = tempfile.mkstemp()
- os.close(thandle)
- benchmark_flags = list(benchmark_flags) + \
- ['--benchmark_out=%s' % output_name]
-
- cmd = [exe_name] + benchmark_flags
- print("RUNNING: %s" % ' '.join(cmd))
- exitCode = subprocess.call(cmd)
- if exitCode != 0:
- print('TEST FAILED...')
- sys.exit(exitCode)
- json_res = load_benchmark_results(output_name)
- if is_temp_output:
- os.unlink(output_name)
- return json_res
-
-
-def run_or_load_benchmark(filename, benchmark_flags):
- """
- Get the results for a specified benchmark. If 'filename' specifies
- an executable benchmark then the results are generated by running the
- benchmark. Otherwise 'filename' must name a valid JSON output file,
- which is loaded and the result returned.
- """
- ftype = check_input_file(filename)
- if ftype == IT_JSON:
- return load_benchmark_results(filename)
- if ftype == IT_Executable:
- return run_benchmark(filename, benchmark_flags)
- raise ValueError('Unknown file type %s' % ftype)