summaryrefslogtreecommitdiff
path: root/src/third_party/benchmark
diff options
context:
space:
mode:
authorBilly Donahue <billy.donahue@mongodb.com>2020-05-04 04:52:56 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-05-09 17:27:43 +0000
commitcbfc03979e1b567eb58a5d4bb6d8bbcd65988775 (patch)
treed7933d5a2bf6f04e15f4cf21d3e772680e84b8b2 /src/third_party/benchmark
parentc4cebb3de7d11aa8f73df45f417e3be712bcf215 (diff)
downloadmongo-cbfc03979e1b567eb58a5d4bb6d8bbcd65988775.tar.gz
SERVER-41567 Upgrade google-benchmark to tag mongo-v1.5.0
Diffstat (limited to 'src/third_party/benchmark')
-rw-r--r--src/third_party/benchmark/SConscript40
-rw-r--r--src/third_party/benchmark/dist/LICENSE202
-rw-r--r--src/third_party/benchmark/dist/README.md1179
-rw-r--r--src/third_party/benchmark/dist/include/benchmark/benchmark.h1586
-rw-r--r--src/third_party/benchmark/dist/src/arraysize.h33
-rw-r--r--src/third_party/benchmark/dist/src/benchmark.cc494
-rw-r--r--src/third_party/benchmark/dist/src/benchmark_api_internal.cc15
-rw-r--r--src/third_party/benchmark/dist/src/benchmark_api_internal.h53
-rw-r--r--src/third_party/benchmark/dist/src/benchmark_main.cc17
-rw-r--r--src/third_party/benchmark/dist/src/benchmark_name.cc58
-rw-r--r--src/third_party/benchmark/dist/src/benchmark_register.cc504
-rw-r--r--src/third_party/benchmark/dist/src/benchmark_register.h107
-rw-r--r--src/third_party/benchmark/dist/src/benchmark_runner.cc361
-rw-r--r--src/third_party/benchmark/dist/src/benchmark_runner.h51
-rw-r--r--src/third_party/benchmark/dist/src/check.h82
-rw-r--r--src/third_party/benchmark/dist/src/colorprint.cc188
-rw-r--r--src/third_party/benchmark/dist/src/colorprint.h33
-rw-r--r--src/third_party/benchmark/dist/src/commandlineflags.cc222
-rw-r--r--src/third_party/benchmark/dist/src/commandlineflags.h73
-rw-r--r--src/third_party/benchmark/dist/src/complexity.cc238
-rw-r--r--src/third_party/benchmark/dist/src/complexity.h55
-rw-r--r--src/third_party/benchmark/dist/src/console_reporter.cc179
-rw-r--r--src/third_party/benchmark/dist/src/counter.cc76
-rw-r--r--src/third_party/benchmark/dist/src/counter.h27
-rw-r--r--src/third_party/benchmark/dist/src/csv_reporter.cc154
-rw-r--r--src/third_party/benchmark/dist/src/cycleclock.h177
-rw-r--r--src/third_party/benchmark/dist/src/internal_macros.h94
-rw-r--r--src/third_party/benchmark/dist/src/json_reporter.cc265
-rw-r--r--src/third_party/benchmark/dist/src/log.h74
-rw-r--r--src/third_party/benchmark/dist/src/mutex.h155
-rw-r--r--src/third_party/benchmark/dist/src/re.h158
-rw-r--r--src/third_party/benchmark/dist/src/reporter.cc105
-rw-r--r--src/third_party/benchmark/dist/src/sleep.cc51
-rw-r--r--src/third_party/benchmark/dist/src/sleep.h15
-rw-r--r--src/third_party/benchmark/dist/src/statistics.cc193
-rw-r--r--src/third_party/benchmark/dist/src/statistics.h37
-rw-r--r--src/third_party/benchmark/dist/src/string_util.cc252
-rw-r--r--src/third_party/benchmark/dist/src/string_util.h59
-rw-r--r--src/third_party/benchmark/dist/src/sysinfo.cc699
-rw-r--r--src/third_party/benchmark/dist/src/thread_manager.h64
-rw-r--r--src/third_party/benchmark/dist/src/thread_timer.h86
-rw-r--r--src/third_party/benchmark/dist/src/timers.cc217
-rw-r--r--src/third_party/benchmark/dist/src/timers.h48
-rwxr-xr-xsrc/third_party/benchmark/scripts/import.sh34
44 files changed, 8810 insertions, 0 deletions
diff --git a/src/third_party/benchmark/SConscript b/src/third_party/benchmark/SConscript
new file mode 100644
index 00000000000..fc8549e0c0f
--- /dev/null
+++ b/src/third_party/benchmark/SConscript
@@ -0,0 +1,40 @@
+# -*- mode: python -*-
+
+Import("env")
+
+env = env.Clone()
+
+if env.TargetOSIs('windows'):
+ env.Prepend(CCFLAGS=[
+ # 'function' : destructor never returns, potential memory leak
+ '/wd4722',
+ ])
+
+ env.Append(LIBS=["ShLwApi.lib"])
+
+env.Append(CPPDEFINES=["HAVE_STD_REGEX"])
+
+src_dir=env.Dir('dist/src')
+
+env.Library(
+ target="benchmark",
+ source=env.File([
+ 'benchmark_api_internal.cc',
+ 'benchmark.cc',
+ 'benchmark_name.cc',
+ 'benchmark_register.cc',
+ 'benchmark_runner.cc',
+ 'colorprint.cc',
+ 'commandlineflags.cc',
+ 'complexity.cc',
+ 'console_reporter.cc',
+ 'counter.cc',
+ 'csv_reporter.cc',
+ 'json_reporter.cc',
+ 'reporter.cc',
+ 'sleep.cc',
+ 'statistics.cc',
+ 'string_util.cc',
+ 'sysinfo.cc',
+ 'timers.cc',
+ ], src_dir))
diff --git a/src/third_party/benchmark/dist/LICENSE b/src/third_party/benchmark/dist/LICENSE
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/src/third_party/benchmark/dist/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/third_party/benchmark/dist/README.md b/src/third_party/benchmark/dist/README.md
new file mode 100644
index 00000000000..45e41588438
--- /dev/null
+++ b/src/third_party/benchmark/dist/README.md
@@ -0,0 +1,1179 @@
+# Benchmark
+[![Build Status](https://travis-ci.org/google/benchmark.svg?branch=master)](https://travis-ci.org/google/benchmark)
+[![Build status](https://ci.appveyor.com/api/projects/status/u0qsyp7t1tk7cpxs/branch/master?svg=true)](https://ci.appveyor.com/project/google/benchmark/branch/master)
+[![Coverage Status](https://coveralls.io/repos/google/benchmark/badge.svg)](https://coveralls.io/r/google/benchmark)
+[![slackin](https://slackin-iqtfqnpzxd.now.sh/badge.svg)](https://slackin-iqtfqnpzxd.now.sh/)
+
+
+A library to benchmark code snippets, similar to unit tests. Example:
+
+```c++
+#include <benchmark/benchmark.h>
+
+static void BM_SomeFunction(benchmark::State& state) {
+ // Perform setup here
+ for (auto _ : state) {
+ // This code gets timed
+ SomeFunction();
+ }
+}
+// Register the function as a benchmark
+BENCHMARK(BM_SomeFunction);
+// Run the benchmark
+BENCHMARK_MAIN();
+```
+
+To get started, see [Requirements](#requirements) and
+[Installation](#installation). See [Usage](#usage) for a full example and the
+[User Guide](#user-guide) for a more comprehensive feature overview.
+
+It may also help to read the [Google Test documentation](https://github.com/google/googletest/blob/master/googletest/docs/primer.md)
+as some of the structural aspects of the APIs are similar.
+
+### Resources
+
+[Discussion group](https://groups.google.com/d/forum/benchmark-discuss)
+
+IRC channel: [freenode](https://freenode.net) #googlebenchmark
+
+[Additional Tooling Documentation](docs/tools.md)
+
+[Assembly Testing Documentation](docs/AssemblyTests.md)
+
+## Requirements
+
+The library can be used with C++03. However, it requires C++11 to build,
+including compiler and standard library support.
+
+The following minimum versions are required to build the library:
+
+* GCC 4.8
+* Clang 3.4
+* Visual Studio 2013
+* Intel 2015 Update 1
+
+## Installation
+
+This describes the installation process using cmake. As pre-requisites, you'll
+need git and cmake installed.
+
+_See [dependencies.md](dependencies.md) for more details regarding supported
+versions of build tools._
+
+```bash
+# Check out the library.
+$ git clone https://github.com/google/benchmark.git
+# Benchmark requires Google Test as a dependency. Add the source tree as a subdirectory.
+$ git clone https://github.com/google/googletest.git benchmark/googletest
+# Make a build directory to place the build output.
+$ mkdir build && cd build
+# Generate a Makefile with cmake.
+# Use cmake -G <generator> to generate a different file type.
+$ cmake ../benchmark
+# Build the library.
+$ make
+```
+This builds the `benchmark` and `benchmark_main` libraries and tests.
+On a unix system, the build directory should now look something like this:
+
+```
+/benchmark
+/build
+ /src
+ /libbenchmark.a
+ /libbenchmark_main.a
+ /test
+ ...
+```
+
+Next, you can run the tests to check the build.
+
+```bash
+$ make test
+```
+
+If you want to install the library globally, also run:
+
+```
+sudo make install
+```
+
+Note that Google Benchmark requires Google Test to build and run the tests. This
+dependency can be provided two ways:
+
+* Checkout the Google Test sources into `benchmark/googletest` as above.
+* Otherwise, if `-DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON` is specified during
+ configuration, the library will automatically download and build any required
+ dependencies.
+
+If you do not wish to build and run the tests, add `-DBENCHMARK_ENABLE_GTEST_TESTS=OFF`
+to `CMAKE_ARGS`.
+
+### Debug vs Release
+
+By default, benchmark builds as a debug library. You will see a warning in the
+output when this is the case. To build it as a release library instead, use:
+
+```
+cmake -DCMAKE_BUILD_TYPE=Release
+```
+
+To enable link-time optimisation, use
+
+```
+cmake -DCMAKE_BUILD_TYPE=Release -DBENCHMARK_ENABLE_LTO=true
+```
+
+If you are using gcc, you might need to set `GCC_AR` and `GCC_RANLIB` cmake
+cache variables, if autodetection fails.
+
+If you are using clang, you may need to set `LLVMAR_EXECUTABLE`,
+`LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables.
+
+
+### Stable and Experimental Library Versions
+
+The main branch contains the latest stable version of the benchmarking library;
+the API of which can be considered largely stable, with source breaking changes
+being made only upon the release of a new major version.
+
+Newer, experimental, features are implemented and tested on the
+[`v2` branch](https://github.com/google/benchmark/tree/v2). Users who wish
+to use, test, and provide feedback on the new features are encouraged to try
+this branch. However, this branch provides no stability guarantees and reserves
+the right to change and break the API at any time.
+
+## Usage
+### Basic usage
+Define a function that executes the code to measure, register it as a benchmark
+function using the `BENCHMARK` macro, and ensure an appropriate `main` function
+is available:
+
+```c++
+#include <benchmark/benchmark.h>
+
+static void BM_StringCreation(benchmark::State& state) {
+ for (auto _ : state)
+ std::string empty_string;
+}
+// Register the function as a benchmark
+BENCHMARK(BM_StringCreation);
+
+// Define another benchmark
+static void BM_StringCopy(benchmark::State& state) {
+ std::string x = "hello";
+ for (auto _ : state)
+ std::string copy(x);
+}
+BENCHMARK(BM_StringCopy);
+
+BENCHMARK_MAIN();
+```
+
+To run the benchmark, compile and link against the `benchmark` library
+(libbenchmark.a/.so). If you followed the build steps above, this
+library will be under the build directory you created.
+
+```bash
+# Example on linux after running the build steps above. Assumes the
+# `benchmark` and `build` directories are under the current directory.
+$ g++ -std=c++11 -isystem benchmark/include -Lbuild/src -lpthread \
+ -lbenchmark mybenchmark.cc -o mybenchmark
+```
+
+Alternatively, link against the `benchmark_main` library and remove
+`BENCHMARK_MAIN();` above to get the same behavior.
+
+The compiled executable will run all benchmarks by default. Pass the `--help`
+flag for option information or see the guide below.
+
+### Platform-specific instructions
+
+When the library is built using GCC it is necessary to link with the pthread
+library due to how GCC implements `std::thread`. Failing to link to pthread will
+lead to runtime exceptions (unless you're using libc++), not linker errors. See
+[issue #67](https://github.com/google/benchmark/issues/67) for more details. You
+can link to pthread by adding `-pthread` to your linker command. Note, you can
+also use `-lpthread`, but there are potential issues with ordering of command
+line parameters if you use that.
+
+If you're running benchmarks on Windows, the shlwapi library (`-lshlwapi`) is
+also required.
+
+If you're running benchmarks on solaris, you'll want the kstat library linked in
+too (`-lkstat`).
+
+## User Guide
+
+### Command Line
+[Output Formats](#output-formats)
+
+[Output Files](#output-files)
+
+[Running a Subset of Benchmarks](#running-a-subset-of-benchmarks)
+
+[Result Comparison](#result-comparison)
+
+### Library
+[Runtime and Reporting Considerations](#runtime-and-reporting-considerations)
+
+[Passing Arguments](#passing-arguments)
+
+[Calculating Asymptotic Complexity](#asymptotic-complexity)
+
+[Templated Benchmarks](#templated-benchmarks)
+
+[Fixtures](#fixtures)
+
+[Custom Counters](#custom-counters)
+
+[Multithreaded Benchmarks](#multithreaded-benchmarks)
+
+[CPU Timers](#cpu-timers)
+
+[Manual Timing](#manual-timing)
+
+[Setting the Time Unit](#setting-the-time-unit)
+
+[Preventing Optimization](#preventing-optimization)
+
+[Reporting Statistics](#reporting-statistics)
+
+[Custom Statistics](#custom-statistics)
+
+[Using RegisterBenchmark](#using-register-benchmark)
+
+[Exiting with an Error](#exiting-with-an-error)
+
+[A Faster KeepRunning Loop](#a-faster-keep-running-loop)
+
+[Disabling CPU Frequency Scaling](#disabling-cpu-frequency-scaling)
+
+<a name="output-formats" />
+
+### Output Formats
+
+The library supports multiple output formats. Use the
+`--benchmark_format=<console|json|csv>` flag to set the format type. `console`
+is the default format.
+
+The Console format is intended to be a human readable format. By default
+the format generates color output. Context is output on stderr and the
+tabular data on stdout. Example tabular output looks like:
+```
+Benchmark Time(ns) CPU(ns) Iterations
+----------------------------------------------------------------------
+BM_SetInsert/1024/1 28928 29349 23853 133.097kB/s 33.2742k items/s
+BM_SetInsert/1024/8 32065 32913 21375 949.487kB/s 237.372k items/s
+BM_SetInsert/1024/10 33157 33648 21431 1.13369MB/s 290.225k items/s
+```
+
+The JSON format outputs human readable json split into two top level attributes.
+The `context` attribute contains information about the run in general, including
+information about the CPU and the date.
+The `benchmarks` attribute contains a list of every benchmark run. Example json
+output looks like:
+```json
+{
+ "context": {
+ "date": "2015/03/17-18:40:25",
+ "num_cpus": 40,
+ "mhz_per_cpu": 2801,
+ "cpu_scaling_enabled": false,
+ "build_type": "debug"
+ },
+ "benchmarks": [
+ {
+ "name": "BM_SetInsert/1024/1",
+ "iterations": 94877,
+ "real_time": 29275,
+ "cpu_time": 29836,
+ "bytes_per_second": 134066,
+ "items_per_second": 33516
+ },
+ {
+ "name": "BM_SetInsert/1024/8",
+ "iterations": 21609,
+ "real_time": 32317,
+ "cpu_time": 32429,
+ "bytes_per_second": 986770,
+ "items_per_second": 246693
+ },
+ {
+ "name": "BM_SetInsert/1024/10",
+ "iterations": 21393,
+ "real_time": 32724,
+ "cpu_time": 33355,
+ "bytes_per_second": 1199226,
+ "items_per_second": 299807
+ }
+ ]
+}
+```
+
+The CSV format outputs comma-separated values. The `context` is output on stderr
+and the CSV itself on stdout. Example CSV output looks like:
+```
+name,iterations,real_time,cpu_time,bytes_per_second,items_per_second,label
+"BM_SetInsert/1024/1",65465,17890.7,8407.45,475768,118942,
+"BM_SetInsert/1024/8",116606,18810.1,9766.64,3.27646e+06,819115,
+"BM_SetInsert/1024/10",106365,17238.4,8421.53,4.74973e+06,1.18743e+06,
+```
+
+<a name="output-files" />
+
+### Output Files
+
+Write benchmark results to a file with the `--benchmark_out=<filename>` option.
+Specify the output format with `--benchmark_out_format={json|console|csv}`. Note that Specifying
+`--benchmark_out` does not suppress the console output.
+
+<a name="running-a-subset-of-benchmarks" />
+
+### Running a Subset of Benchmarks
+
+The `--benchmark_filter=<regex>` option can be used to only run the benchmarks
+which match the specified `<regex>`. For example:
+
+```bash
+$ ./run_benchmarks.x --benchmark_filter=BM_memcpy/32
+Run on (1 X 2300 MHz CPU )
+2016-06-25 19:34:24
+Benchmark Time CPU Iterations
+----------------------------------------------------
+BM_memcpy/32 11 ns 11 ns 79545455
+BM_memcpy/32k 2181 ns 2185 ns 324074
+BM_memcpy/32 12 ns 12 ns 54687500
+BM_memcpy/32k 1834 ns 1837 ns 357143
+```
+
+<a name="result-comparison" />
+
+### Result comparison
+
+It is possible to compare the benchmarking results. See [Additional Tooling Documentation](docs/tools.md)
+
+<a name="runtime-and-reporting-considerations" />
+
+### Runtime and Reporting Considerations
+
+When the benchmark binary is executed, each benchmark function is run serially.
+The number of iterations to run is determined dynamically by running the
+benchmark a few times and measuring the time taken and ensuring that the
+ultimate result will be statistically stable. As such, faster benchmark
+functions will be run for more iterations than slower benchmark functions, and
+the number of iterations is thus reported.
+
+In all cases, the number of iterations for which the benchmark is run is
+governed by the amount of time the benchmark takes. Concretely, the number of
+iterations is at least one, not more than 1e9, until CPU time is greater than
+the minimum time, or the wallclock time is 5x minimum time. The minimum time is
+set per benchmark by calling `MinTime` on the registered benchmark object.
+
+Average timings are then reported over the iterations run. If multiple
+repetitions are requested using the `--benchmark_repetitions` command-line
+option, or at registration time, the benchmark function will be run several
+times and statistical results across these repetitions will also be reported.
+
+As well as the per-benchmark entries, a preamble in the report will include
+information about the machine on which the benchmarks are run.
+
+<a name="passing-arguments" />
+
+### Passing Arguments
+
+Sometimes a family of benchmarks can be implemented with just one routine that
+takes an extra argument to specify which one of the family of benchmarks to
+run. For example, the following code defines a family of benchmarks for
+measuring the speed of `memcpy()` calls of different lengths:
+
+```c++
+static void BM_memcpy(benchmark::State& state) {
+ char* src = new char[state.range(0)];
+ char* dst = new char[state.range(0)];
+ memset(src, 'x', state.range(0));
+ for (auto _ : state)
+ memcpy(dst, src, state.range(0));
+ state.SetBytesProcessed(int64_t(state.iterations()) *
+ int64_t(state.range(0)));
+ delete[] src;
+ delete[] dst;
+}
+BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(1<<10)->Arg(8<<10);
+```
+
+The preceding code is quite repetitive, and can be replaced with the following
+short-hand. The following invocation will pick a few appropriate arguments in
+the specified range and will generate a benchmark for each such argument.
+
+```c++
+BENCHMARK(BM_memcpy)->Range(8, 8<<10);
+```
+
+By default the arguments in the range are generated in multiples of eight and
+the command above selects [ 8, 64, 512, 4k, 8k ]. In the following code the
+range multiplier is changed to multiples of two.
+
+```c++
+BENCHMARK(BM_memcpy)->RangeMultiplier(2)->Range(8, 8<<10);
+```
+Now arguments generated are [ 8, 16, 32, 64, 128, 256, 512, 1024, 2k, 4k, 8k ].
+
+You might have a benchmark that depends on two or more inputs. For example, the
+following code defines a family of benchmarks for measuring the speed of set
+insertion.
+
+```c++
+static void BM_SetInsert(benchmark::State& state) {
+ std::set<int> data;
+ for (auto _ : state) {
+ state.PauseTiming();
+ data = ConstructRandomSet(state.range(0));
+ state.ResumeTiming();
+ for (int j = 0; j < state.range(1); ++j)
+ data.insert(RandomNumber());
+ }
+}
+BENCHMARK(BM_SetInsert)
+ ->Args({1<<10, 128})
+ ->Args({2<<10, 128})
+ ->Args({4<<10, 128})
+ ->Args({8<<10, 128})
+ ->Args({1<<10, 512})
+ ->Args({2<<10, 512})
+ ->Args({4<<10, 512})
+ ->Args({8<<10, 512});
+```
+
+The preceding code is quite repetitive, and can be replaced with the following
+short-hand. The following macro will pick a few appropriate arguments in the
+product of the two specified ranges and will generate a benchmark for each such
+pair.
+
+```c++
+BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}});
+```
+
+For more complex patterns of inputs, passing a custom function to `Apply` allows
+programmatic specification of an arbitrary set of arguments on which to run the
+benchmark. The following example enumerates a dense range on one parameter,
+and a sparse range on the second.
+
+```c++
+static void CustomArguments(benchmark::internal::Benchmark* b) {
+ for (int i = 0; i <= 10; ++i)
+ for (int j = 32; j <= 1024*1024; j *= 8)
+ b->Args({i, j});
+}
+BENCHMARK(BM_SetInsert)->Apply(CustomArguments);
+```
+
+#### Passing Arbitrary Arguments to a Benchmark
+
+In C++11 it is possible to define a benchmark that takes an arbitrary number
+of extra arguments. The `BENCHMARK_CAPTURE(func, test_case_name, ...args)`
+macro creates a benchmark that invokes `func` with the `benchmark::State` as
+the first argument followed by the specified `args...`.
+The `test_case_name` is appended to the name of the benchmark and
+should describe the values passed.
+
+```c++
+template <class ...ExtraArgs>
+void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) {
+ [...]
+}
+// Registers a benchmark named "BM_takes_args/int_string_test" that passes
+// the specified values to `extra_args`.
+BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc"));
+```
+Note that elements of `...args` may refer to global variables. Users should
+avoid modifying global state inside of a benchmark.
+
+<a name="asymptotic-complexity" />
+
+### Calculating Asymptotic Complexity (Big O)
+
+Asymptotic complexity might be calculated for a family of benchmarks. The
+following code will calculate the coefficient for the high-order term in the
+running time and the normalized root-mean square error of string comparison.
+
+```c++
+static void BM_StringCompare(benchmark::State& state) {
+ std::string s1(state.range(0), '-');
+ std::string s2(state.range(0), '-');
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(s1.compare(s2));
+ }
+ state.SetComplexityN(state.range(0));
+}
+BENCHMARK(BM_StringCompare)
+ ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(benchmark::oN);
+```
+
+As shown in the following invocation, asymptotic complexity might also be
+calculated automatically.
+
+```c++
+BENCHMARK(BM_StringCompare)
+ ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity();
+```
+
+The following code will specify asymptotic complexity with a lambda function,
+that might be used to customize high-order term calculation.
+
+```c++
+BENCHMARK(BM_StringCompare)->RangeMultiplier(2)
+ ->Range(1<<10, 1<<18)->Complexity([](int64_t n)->double{return n; });
+```
+
+<a name="templated-benchmarks" />
+
+### Templated Benchmarks
+
+This example produces and consumes messages of size `sizeof(v)` `range_x`
+times. It also outputs throughput in the absence of multiprogramming.
+
+```c++
+template <class Q> void BM_Sequential(benchmark::State& state) {
+ Q q;
+ typename Q::value_type v;
+ for (auto _ : state) {
+ for (int i = state.range(0); i--; )
+ q.push(v);
+ for (int e = state.range(0); e--; )
+ q.Wait(&v);
+ }
+ // actually messages, not bytes:
+ state.SetBytesProcessed(
+ static_cast<int64_t>(state.iterations())*state.range(0));
+}
+BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue<int>)->Range(1<<0, 1<<10);
+```
+
+Three macros are provided for adding benchmark templates.
+
+```c++
+#ifdef BENCHMARK_HAS_CXX11
+#define BENCHMARK_TEMPLATE(func, ...) // Takes any number of parameters.
+#else // C++ < C++11
+#define BENCHMARK_TEMPLATE(func, arg1)
+#endif
+#define BENCHMARK_TEMPLATE1(func, arg1)
+#define BENCHMARK_TEMPLATE2(func, arg1, arg2)
+```
+
+<a name="fixtures" />
+
+### Fixtures
+
+Fixture tests are created by first defining a type that derives from
+`::benchmark::Fixture` and then creating/registering the tests using the
+following macros:
+
+* `BENCHMARK_F(ClassName, Method)`
+* `BENCHMARK_DEFINE_F(ClassName, Method)`
+* `BENCHMARK_REGISTER_F(ClassName, Method)`
+
+For Example:
+
+```c++
+class MyFixture : public benchmark::Fixture {
+public:
+ void SetUp(const ::benchmark::State& state) {
+ }
+
+ void TearDown(const ::benchmark::State& state) {
+ }
+};
+
+BENCHMARK_F(MyFixture, FooTest)(benchmark::State& st) {
+ for (auto _ : st) {
+ ...
+ }
+}
+
+BENCHMARK_DEFINE_F(MyFixture, BarTest)(benchmark::State& st) {
+ for (auto _ : st) {
+ ...
+ }
+}
+/* BarTest is NOT registered */
+BENCHMARK_REGISTER_F(MyFixture, BarTest)->Threads(2);
+/* BarTest is now registered */
+```
+
+#### Templated Fixtures
+
+Also you can create templated fixture by using the following macros:
+
+* `BENCHMARK_TEMPLATE_F(ClassName, Method, ...)`
+* `BENCHMARK_TEMPLATE_DEFINE_F(ClassName, Method, ...)`
+
+For example:
+```c++
+template<typename T>
+class MyFixture : public benchmark::Fixture {};
+
+BENCHMARK_TEMPLATE_F(MyFixture, IntTest, int)(benchmark::State& st) {
+ for (auto _ : st) {
+ ...
+ }
+}
+
+BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, DoubleTest, double)(benchmark::State& st) {
+ for (auto _ : st) {
+ ...
+ }
+}
+
+BENCHMARK_REGISTER_F(MyFixture, DoubleTest)->Threads(2);
+```
+
+<a name="custom-counters" />
+
+### Custom Counters
+
+You can add your own counters with user-defined names. The example below
+will add columns "Foo", "Bar" and "Baz" in its output:
+
+```c++
+static void UserCountersExample1(benchmark::State& state) {
+ double numFoos = 0, numBars = 0, numBazs = 0;
+ for (auto _ : state) {
+ // ... count Foo,Bar,Baz events
+ }
+ state.counters["Foo"] = numFoos;
+ state.counters["Bar"] = numBars;
+ state.counters["Baz"] = numBazs;
+}
+```
+
+The `state.counters` object is a `std::map` with `std::string` keys
+and `Counter` values. The latter is a `double`-like class, via an implicit
+conversion to `double&`. Thus you can use all of the standard arithmetic
+assignment operators (`=,+=,-=,*=,/=`) to change the value of each counter.
+
+In multithreaded benchmarks, each counter is set on the calling thread only.
+When the benchmark finishes, the counters from each thread will be summed;
+the resulting sum is the value which will be shown for the benchmark.
+
+The `Counter` constructor accepts three parameters: the value as a `double`
+; a bit flag which allows you to show counters as rates, and/or as per-thread
+iteration, and/or as per-thread averages, and/or iteration invariants;
+and a flag specifying the 'unit' - i.e. is 1k a 1000 (default,
+`benchmark::Counter::OneK::kIs1000`), or 1024
+(`benchmark::Counter::OneK::kIs1024`)?
+
+```c++
+ // sets a simple counter
+ state.counters["Foo"] = numFoos;
+
+ // Set the counter as a rate. It will be presented divided
+ // by the duration of the benchmark.
+ state.counters["FooRate"] = Counter(numFoos, benchmark::Counter::kIsRate);
+
+ // Set the counter as a thread-average quantity. It will
+ // be presented divided by the number of threads.
+ state.counters["FooAvg"] = Counter(numFoos, benchmark::Counter::kAvgThreads);
+
+ // There's also a combined flag:
+ state.counters["FooAvgRate"] = Counter(numFoos,benchmark::Counter::kAvgThreadsRate);
+
+ // This says that we process with the rate of state.range(0) bytes every iteration:
+ state.counters["BytesProcessed"] = Counter(state.range(0), benchmark::Counter::kIsIterationInvariantRate, benchmark::Counter::OneK::kIs1024);
+```
+
+When you're compiling in C++11 mode or later you can use `insert()` with
+`std::initializer_list`:
+
+```c++
+ // With C++11, this can be done:
+ state.counters.insert({{"Foo", numFoos}, {"Bar", numBars}, {"Baz", numBazs}});
+ // ... instead of:
+ state.counters["Foo"] = numFoos;
+ state.counters["Bar"] = numBars;
+ state.counters["Baz"] = numBazs;
+```
+
+#### Counter Reporting
+
+When using the console reporter, by default, user counters are are printed at
+the end after the table, the same way as ``bytes_processed`` and
+``items_processed``. This is best for cases in which there are few counters,
+or where there are only a couple of lines per benchmark. Here's an example of
+the default output:
+
+```
+------------------------------------------------------------------------------
+Benchmark Time CPU Iterations UserCounters...
+------------------------------------------------------------------------------
+BM_UserCounter/threads:8 2248 ns 10277 ns 68808 Bar=16 Bat=40 Baz=24 Foo=8
+BM_UserCounter/threads:1 9797 ns 9788 ns 71523 Bar=2 Bat=5 Baz=3 Foo=1024m
+BM_UserCounter/threads:2 4924 ns 9842 ns 71036 Bar=4 Bat=10 Baz=6 Foo=2
+BM_UserCounter/threads:4 2589 ns 10284 ns 68012 Bar=8 Bat=20 Baz=12 Foo=4
+BM_UserCounter/threads:8 2212 ns 10287 ns 68040 Bar=16 Bat=40 Baz=24 Foo=8
+BM_UserCounter/threads:16 1782 ns 10278 ns 68144 Bar=32 Bat=80 Baz=48 Foo=16
+BM_UserCounter/threads:32 1291 ns 10296 ns 68256 Bar=64 Bat=160 Baz=96 Foo=32
+BM_UserCounter/threads:4 2615 ns 10307 ns 68040 Bar=8 Bat=20 Baz=12 Foo=4
+BM_Factorial 26 ns 26 ns 26608979 40320
+BM_Factorial/real_time 26 ns 26 ns 26587936 40320
+BM_CalculatePiRange/1 16 ns 16 ns 45704255 0
+BM_CalculatePiRange/8 73 ns 73 ns 9520927 3.28374
+BM_CalculatePiRange/64 609 ns 609 ns 1140647 3.15746
+BM_CalculatePiRange/512 4900 ns 4901 ns 142696 3.14355
+```
+
+If this doesn't suit you, you can print each counter as a table column by
+passing the flag `--benchmark_counters_tabular=true` to the benchmark
+application. This is best for cases in which there are a lot of counters, or
+a lot of lines per individual benchmark. Note that this will trigger a
+reprinting of the table header any time the counter set changes between
+individual benchmarks. Here's an example of corresponding output when
+`--benchmark_counters_tabular=true` is passed:
+
+```
+---------------------------------------------------------------------------------------
+Benchmark Time CPU Iterations Bar Bat Baz Foo
+---------------------------------------------------------------------------------------
+BM_UserCounter/threads:8 2198 ns 9953 ns 70688 16 40 24 8
+BM_UserCounter/threads:1 9504 ns 9504 ns 73787 2 5 3 1
+BM_UserCounter/threads:2 4775 ns 9550 ns 72606 4 10 6 2
+BM_UserCounter/threads:4 2508 ns 9951 ns 70332 8 20 12 4
+BM_UserCounter/threads:8 2055 ns 9933 ns 70344 16 40 24 8
+BM_UserCounter/threads:16 1610 ns 9946 ns 70720 32 80 48 16
+BM_UserCounter/threads:32 1192 ns 9948 ns 70496 64 160 96 32
+BM_UserCounter/threads:4 2506 ns 9949 ns 70332 8 20 12 4
+--------------------------------------------------------------
+Benchmark Time CPU Iterations
+--------------------------------------------------------------
+BM_Factorial 26 ns 26 ns 26392245 40320
+BM_Factorial/real_time 26 ns 26 ns 26494107 40320
+BM_CalculatePiRange/1 15 ns 15 ns 45571597 0
+BM_CalculatePiRange/8 74 ns 74 ns 9450212 3.28374
+BM_CalculatePiRange/64 595 ns 595 ns 1173901 3.15746
+BM_CalculatePiRange/512 4752 ns 4752 ns 147380 3.14355
+BM_CalculatePiRange/4k 37970 ns 37972 ns 18453 3.14184
+BM_CalculatePiRange/32k 303733 ns 303744 ns 2305 3.14162
+BM_CalculatePiRange/256k 2434095 ns 2434186 ns 288 3.1416
+BM_CalculatePiRange/1024k 9721140 ns 9721413 ns 71 3.14159
+BM_CalculatePi/threads:8 2255 ns 9943 ns 70936
+```
+Note above the additional header printed when the benchmark changes from
+``BM_UserCounter`` to ``BM_Factorial``. This is because ``BM_Factorial`` does
+not have the same counter set as ``BM_UserCounter``.
+
+<a name="multithreaded-benchmarks"/>
+
+### Multithreaded Benchmarks
+
+In a multithreaded test (benchmark invoked by multiple threads simultaneously),
+it is guaranteed that none of the threads will start until all have reached
+the start of the benchmark loop, and all will have finished before any thread
+exits the benchmark loop. (This behavior is also provided by the `KeepRunning()`
+API) As such, any global setup or teardown can be wrapped in a check against the thread
+index:
+
+```c++
+static void BM_MultiThreaded(benchmark::State& state) {
+ if (state.thread_index == 0) {
+ // Setup code here.
+ }
+ for (auto _ : state) {
+ // Run the test as normal.
+ }
+ if (state.thread_index == 0) {
+ // Teardown code here.
+ }
+}
+BENCHMARK(BM_MultiThreaded)->Threads(2);
+```
+
+If the benchmarked code itself uses threads and you want to compare it to
+single-threaded code, you may want to use real-time ("wallclock") measurements
+for latency comparisons:
+
+```c++
+BENCHMARK(BM_test)->Range(8, 8<<10)->UseRealTime();
+```
+
+Without `UseRealTime`, CPU time is used by default.
+
+<a name="cpu-timers" />
+
+### CPU Timers
+
+By default, the CPU timer only measures the time spent by the main thread.
+If the benchmark itself uses threads internally, this measurement may not
+be what you are looking for. Instead, there is a way to measure the total
+CPU usage of the process, by all the threads.
+
+```c++
+void callee(int i);
+
+static void MyMain(int size) {
+#pragma omp parallel for
+ for(int i = 0; i < size; i++)
+ callee(i);
+}
+
+static void BM_OpenMP(benchmark::State& state) {
+ for (auto _ : state)
+ MyMain(state.range(0);
+}
+
+// Measure the time spent by the main thread, use it to decide for how long to
+// run the benchmark loop. Depending on the internal implementation detail may
+// measure to anywhere from near-zero (the overhead spent before/after work
+// handoff to worker thread[s]) to the whole single-thread time.
+BENCHMARK(BM_OpenMP)->Range(8, 8<<10);
+
+// Measure the user-visible time, the wall clock (literally, the time that
+// has passed on the clock on the wall), use it to decide for how long to
+// run the benchmark loop. This will always be meaningful, an will match the
+// time spent by the main thread in single-threaded case, in general decreasing
+// with the number of internal threads doing the work.
+BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->UseRealTime();
+
+// Measure the total CPU consumption, use it to decide for how long to
+// run the benchmark loop. This will always measure to no less than the
+// time spent by the main thread in single-threaded case.
+BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->MeasureProcessCPUTime();
+
+// A mixture of the last two. Measure the total CPU consumption, but use the
+// wall clock to decide for how long to run the benchmark loop.
+BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->MeasureProcessCPUTime()->UseRealTime();
+```
+
+#### Controlling Timers
+
+Normally, the entire duration of the work loop (`for (auto _ : state) {}`)
+is measured. But sometimes, it is necessary to do some work inside of
+that loop, every iteration, but without counting that time to the benchmark time.
+That is possible, althought it is not recommended, since it has high overhead.
+
+```c++
+static void BM_SetInsert_With_Timer_Control(benchmark::State& state) {
+ std::set<int> data;
+ for (auto _ : state) {
+ state.PauseTiming(); // Stop timers. They will not count until they are resumed.
+ data = ConstructRandomSet(state.range(0)); // Do something that should not be measured
+ state.ResumeTiming(); // And resume timers. They are now counting again.
+ // The rest will be measured.
+ for (int j = 0; j < state.range(1); ++j)
+ data.insert(RandomNumber());
+ }
+}
+BENCHMARK(BM_SetInsert_With_Timer_Control)->Ranges({{1<<10, 8<<10}, {128, 512}});
+```
+
+<a name="manual-timing" />
+
+### Manual Timing
+
+For benchmarking something for which neither CPU time nor real-time are
+correct or accurate enough, completely manual timing is supported using
+the `UseManualTime` function.
+
+When `UseManualTime` is used, the benchmarked code must call
+`SetIterationTime` once per iteration of the benchmark loop to
+report the manually measured time.
+
+An example use case for this is benchmarking GPU execution (e.g. OpenCL
+or CUDA kernels, OpenGL or Vulkan or Direct3D draw calls), which cannot
+be accurately measured using CPU time or real-time. Instead, they can be
+measured accurately using a dedicated API, and these measurement results
+can be reported back with `SetIterationTime`.
+
+```c++
+static void BM_ManualTiming(benchmark::State& state) {
+ int microseconds = state.range(0);
+ std::chrono::duration<double, std::micro> sleep_duration {
+ static_cast<double>(microseconds)
+ };
+
+ for (auto _ : state) {
+ auto start = std::chrono::high_resolution_clock::now();
+ // Simulate some useful workload with a sleep
+ std::this_thread::sleep_for(sleep_duration);
+ auto end = std::chrono::high_resolution_clock::now();
+
+ auto elapsed_seconds =
+ std::chrono::duration_cast<std::chrono::duration<double>>(
+ end - start);
+
+ state.SetIterationTime(elapsed_seconds.count());
+ }
+}
+BENCHMARK(BM_ManualTiming)->Range(1, 1<<17)->UseManualTime();
+```
+
+<a name="setting-the-time-unit" />
+
+### Setting the Time Unit
+
+If a benchmark runs a few milliseconds it may be hard to visually compare the
+measured times, since the output data is given in nanoseconds per default. In
+order to manually set the time unit, you can specify it manually:
+
+```c++
+BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
+```
+
+<a name="preventing-optimization" />
+
+### Preventing Optimization
+
+To prevent a value or expression from being optimized away by the compiler
+the `benchmark::DoNotOptimize(...)` and `benchmark::ClobberMemory()`
+functions can be used.
+
+```c++
+static void BM_test(benchmark::State& state) {
+ for (auto _ : state) {
+ int x = 0;
+ for (int i=0; i < 64; ++i) {
+ benchmark::DoNotOptimize(x += i);
+ }
+ }
+}
+```
+
+`DoNotOptimize(<expr>)` forces the *result* of `<expr>` to be stored in either
+memory or a register. For GNU based compilers it acts as read/write barrier
+for global memory. More specifically it forces the compiler to flush pending
+writes to memory and reload any other values as necessary.
+
+Note that `DoNotOptimize(<expr>)` does not prevent optimizations on `<expr>`
+in any way. `<expr>` may even be removed entirely when the result is already
+known. For example:
+
+```c++
+ /* Example 1: `<expr>` is removed entirely. */
+ int foo(int x) { return x + 42; }
+ while (...) DoNotOptimize(foo(0)); // Optimized to DoNotOptimize(42);
+
+ /* Example 2: Result of '<expr>' is only reused */
+ int bar(int) __attribute__((const));
+ while (...) DoNotOptimize(bar(0)); // Optimized to:
+ // int __result__ = bar(0);
+ // while (...) DoNotOptimize(__result__);
+```
+
+The second tool for preventing optimizations is `ClobberMemory()`. In essence
+`ClobberMemory()` forces the compiler to perform all pending writes to global
+memory. Memory managed by block scope objects must be "escaped" using
+`DoNotOptimize(...)` before it can be clobbered. In the below example
+`ClobberMemory()` prevents the call to `v.push_back(42)` from being optimized
+away.
+
+```c++
+static void BM_vector_push_back(benchmark::State& state) {
+ for (auto _ : state) {
+ std::vector<int> v;
+ v.reserve(1);
+ benchmark::DoNotOptimize(v.data()); // Allow v.data() to be clobbered.
+ v.push_back(42);
+ benchmark::ClobberMemory(); // Force 42 to be written to memory.
+ }
+}
+```
+
+Note that `ClobberMemory()` is only available for GNU or MSVC based compilers.
+
+<a name="reporting-statistics" />
+
+### Statistics: Reporting the Mean, Median and Standard Deviation of Repeated Benchmarks
+
+By default each benchmark is run once and that single result is reported.
+However benchmarks are often noisy and a single result may not be representative
+of the overall behavior. For this reason it's possible to repeatedly rerun the
+benchmark.
+
+The number of runs of each benchmark is specified globally by the
+`--benchmark_repetitions` flag or on a per benchmark basis by calling
+`Repetitions` on the registered benchmark object. When a benchmark is run more
+than once the mean, median and standard deviation of the runs will be reported.
+
+Additionally the `--benchmark_report_aggregates_only={true|false}`,
+`--benchmark_display_aggregates_only={true|false}` flags or
+`ReportAggregatesOnly(bool)`, `DisplayAggregatesOnly(bool)` functions can be
+used to change how repeated tests are reported. By default the result of each
+repeated run is reported. When `report aggregates only` option is `true`,
+only the aggregates (i.e. mean, median and standard deviation, maybe complexity
+measurements if they were requested) of the runs is reported, to both the
+reporters - standard output (console), and the file.
+However when only the `display aggregates only` option is `true`,
+only the aggregates are displayed in the standard output, while the file
+output still contains everything.
+Calling `ReportAggregatesOnly(bool)` / `DisplayAggregatesOnly(bool)` on a
+registered benchmark object overrides the value of the appropriate flag for that
+benchmark.
+
+<a name="custom-statistics" />
+
+### Custom Statistics
+
+While having mean, median and standard deviation is nice, this may not be
+enough for everyone. For example you may want to know what the largest
+observation is, e.g. because you have some real-time constraints. This is easy.
+The following code will specify a custom statistic to be calculated, defined
+by a lambda function.
+
+```c++
+void BM_spin_empty(benchmark::State& state) {
+ for (auto _ : state) {
+ for (int x = 0; x < state.range(0); ++x) {
+ benchmark::DoNotOptimize(x);
+ }
+ }
+}
+
+BENCHMARK(BM_spin_empty)
+ ->ComputeStatistics("max", [](const std::vector<double>& v) -> double {
+ return *(std::max_element(std::begin(v), std::end(v)));
+ })
+ ->Arg(512);
+```
+
+<a name="using-register-benchmark" />
+
+### Using RegisterBenchmark(name, fn, args...)
+
+The `RegisterBenchmark(name, func, args...)` function provides an alternative
+way to create and register benchmarks.
+`RegisterBenchmark(name, func, args...)` creates, registers, and returns a
+pointer to a new benchmark with the specified `name` that invokes
+`func(st, args...)` where `st` is a `benchmark::State` object.
+
+Unlike the `BENCHMARK` registration macros, which can only be used at the global
+scope, the `RegisterBenchmark` can be called anywhere. This allows for
+benchmark tests to be registered programmatically.
+
+Additionally `RegisterBenchmark` allows any callable object to be registered
+as a benchmark. Including capturing lambdas and function objects.
+
+For Example:
+```c++
+auto BM_test = [](benchmark::State& st, auto Inputs) { /* ... */ };
+
+int main(int argc, char** argv) {
+ for (auto& test_input : { /* ... */ })
+ benchmark::RegisterBenchmark(test_input.name(), BM_test, test_input);
+ benchmark::Initialize(&argc, argv);
+ benchmark::RunSpecifiedBenchmarks();
+}
+```
+
+<a name="exiting-with-an-error" />
+
+### Exiting with an Error
+
+When errors caused by external influences, such as file I/O and network
+communication, occur within a benchmark the
+`State::SkipWithError(const char* msg)` function can be used to skip that run
+of benchmark and report the error. Note that only future iterations of the
+`KeepRunning()` are skipped. For the ranged-for version of the benchmark loop
+Users must explicitly exit the loop, otherwise all iterations will be performed.
+Users may explicitly return to exit the benchmark immediately.
+
+The `SkipWithError(...)` function may be used at any point within the benchmark,
+including before and after the benchmark loop.
+
+For example:
+
+```c++
+static void BM_test(benchmark::State& state) {
+ auto resource = GetResource();
+ if (!resource.good()) {
+ state.SkipWithError("Resource is not good!");
+ // KeepRunning() loop will not be entered.
+ }
+ for (state.KeepRunning()) {
+ auto data = resource.read_data();
+ if (!resource.good()) {
+ state.SkipWithError("Failed to read data!");
+ break; // Needed to skip the rest of the iteration.
+ }
+ do_stuff(data);
+ }
+}
+
+static void BM_test_ranged_fo(benchmark::State & state) {
+ state.SkipWithError("test will not be entered");
+ for (auto _ : state) {
+ state.SkipWithError("Failed!");
+ break; // REQUIRED to prevent all further iterations.
+ }
+}
+```
+<a name="a-faster-keep-running-loop" />
+
+### A Faster KeepRunning Loop
+
+In C++11 mode, a ranged-based for loop should be used in preference to
+the `KeepRunning` loop for running the benchmarks. For example:
+
+```c++
+static void BM_Fast(benchmark::State &state) {
+ for (auto _ : state) {
+ FastOperation();
+ }
+}
+BENCHMARK(BM_Fast);
+```
+
+The reason the ranged-for loop is faster than using `KeepRunning`, is
+because `KeepRunning` requires a memory load and store of the iteration count
+ever iteration, whereas the ranged-for variant is able to keep the iteration count
+in a register.
+
+For example, an empty inner loop of using the ranged-based for method looks like:
+
+```asm
+# Loop Init
+ mov rbx, qword ptr [r14 + 104]
+ call benchmark::State::StartKeepRunning()
+ test rbx, rbx
+ je .LoopEnd
+.LoopHeader: # =>This Inner Loop Header: Depth=1
+ add rbx, -1
+ jne .LoopHeader
+.LoopEnd:
+```
+
+Compared to an empty `KeepRunning` loop, which looks like:
+
+```asm
+.LoopHeader: # in Loop: Header=BB0_3 Depth=1
+ cmp byte ptr [rbx], 1
+ jne .LoopInit
+.LoopBody: # =>This Inner Loop Header: Depth=1
+ mov rax, qword ptr [rbx + 8]
+ lea rcx, [rax + 1]
+ mov qword ptr [rbx + 8], rcx
+ cmp rax, qword ptr [rbx + 104]
+ jb .LoopHeader
+ jmp .LoopEnd
+.LoopInit:
+ mov rdi, rbx
+ call benchmark::State::StartKeepRunning()
+ jmp .LoopBody
+.LoopEnd:
+```
+
+Unless C++03 compatibility is required, the ranged-for variant of writing
+the benchmark loop should be preferred.
+
+<a name="disabling-cpu-frequency-scaling" />
+
+### Disabling CPU Frequency Scaling
+If you see this error:
+```
+***WARNING*** CPU scaling is enabled, the benchmark real time measurements may be noisy and will incur extra overhead.
+```
+you might want to disable the CPU frequency scaling while running the benchmark:
+```bash
+sudo cpupower frequency-set --governor performance
+./mybench
+sudo cpupower frequency-set --governor powersave
+```
diff --git a/src/third_party/benchmark/dist/include/benchmark/benchmark.h b/src/third_party/benchmark/dist/include/benchmark/benchmark.h
new file mode 100644
index 00000000000..4f40501596e
--- /dev/null
+++ b/src/third_party/benchmark/dist/include/benchmark/benchmark.h
@@ -0,0 +1,1586 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Support for registering benchmarks for functions.
+
+/* Example usage:
+// Define a function that executes the code to be measured a
+// specified number of times:
+static void BM_StringCreation(benchmark::State& state) {
+ for (auto _ : state)
+ std::string empty_string;
+}
+
+// Register the function as a benchmark
+BENCHMARK(BM_StringCreation);
+
+// Define another benchmark
+static void BM_StringCopy(benchmark::State& state) {
+ std::string x = "hello";
+ for (auto _ : state)
+ std::string copy(x);
+}
+BENCHMARK(BM_StringCopy);
+
+// Augment the main() program to invoke benchmarks if specified
+// via the --benchmarks command line flag. E.g.,
+// my_unittest --benchmark_filter=all
+// my_unittest --benchmark_filter=BM_StringCreation
+// my_unittest --benchmark_filter=String
+// my_unittest --benchmark_filter='Copy|Creation'
+int main(int argc, char** argv) {
+ benchmark::Initialize(&argc, argv);
+ benchmark::RunSpecifiedBenchmarks();
+ return 0;
+}
+
+// Sometimes a family of microbenchmarks can be implemented with
+// just one routine that takes an extra argument to specify which
+// one of the family of benchmarks to run. For example, the following
+// code defines a family of microbenchmarks for measuring the speed
+// of memcpy() calls of different lengths:
+
+static void BM_memcpy(benchmark::State& state) {
+ char* src = new char[state.range(0)]; char* dst = new char[state.range(0)];
+ memset(src, 'x', state.range(0));
+ for (auto _ : state)
+ memcpy(dst, src, state.range(0));
+ state.SetBytesProcessed(state.iterations() * state.range(0));
+ delete[] src; delete[] dst;
+}
+BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(1<<10)->Arg(8<<10);
+
+// The preceding code is quite repetitive, and can be replaced with the
+// following short-hand. The following invocation will pick a few
+// appropriate arguments in the specified range and will generate a
+// microbenchmark for each such argument.
+BENCHMARK(BM_memcpy)->Range(8, 8<<10);
+
+// You might have a microbenchmark that depends on two inputs. For
+// example, the following code defines a family of microbenchmarks for
+// measuring the speed of set insertion.
+static void BM_SetInsert(benchmark::State& state) {
+ set<int> data;
+ for (auto _ : state) {
+ state.PauseTiming();
+ data = ConstructRandomSet(state.range(0));
+ state.ResumeTiming();
+ for (int j = 0; j < state.range(1); ++j)
+ data.insert(RandomNumber());
+ }
+}
+BENCHMARK(BM_SetInsert)
+ ->Args({1<<10, 128})
+ ->Args({2<<10, 128})
+ ->Args({4<<10, 128})
+ ->Args({8<<10, 128})
+ ->Args({1<<10, 512})
+ ->Args({2<<10, 512})
+ ->Args({4<<10, 512})
+ ->Args({8<<10, 512});
+
+// The preceding code is quite repetitive, and can be replaced with
+// the following short-hand. The following macro will pick a few
+// appropriate arguments in the product of the two specified ranges
+// and will generate a microbenchmark for each such pair.
+BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}});
+
+// For more complex patterns of inputs, passing a custom function
+// to Apply allows programmatic specification of an
+// arbitrary set of arguments to run the microbenchmark on.
+// The following example enumerates a dense range on
+// one parameter, and a sparse range on the second.
+static void CustomArguments(benchmark::internal::Benchmark* b) {
+ for (int i = 0; i <= 10; ++i)
+ for (int j = 32; j <= 1024*1024; j *= 8)
+ b->Args({i, j});
+}
+BENCHMARK(BM_SetInsert)->Apply(CustomArguments);
+
+// Templated microbenchmarks work the same way:
+// Produce then consume 'size' messages 'iters' times
+// Measures throughput in the absence of multiprogramming.
+template <class Q> int BM_Sequential(benchmark::State& state) {
+ Q q;
+ typename Q::value_type v;
+ for (auto _ : state) {
+ for (int i = state.range(0); i--; )
+ q.push(v);
+ for (int e = state.range(0); e--; )
+ q.Wait(&v);
+ }
+ // actually messages, not bytes:
+ state.SetBytesProcessed(state.iterations() * state.range(0));
+}
+BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue<int>)->Range(1<<0, 1<<10);
+
+Use `Benchmark::MinTime(double t)` to set the minimum time used to run the
+benchmark. This option overrides the `benchmark_min_time` flag.
+
+void BM_test(benchmark::State& state) {
+ ... body ...
+}
+BENCHMARK(BM_test)->MinTime(2.0); // Run for at least 2 seconds.
+
+In a multithreaded test, it is guaranteed that none of the threads will start
+until all have reached the loop start, and all will have finished before any
+thread exits the loop body. As such, any global setup or teardown you want to
+do can be wrapped in a check against the thread index:
+
+static void BM_MultiThreaded(benchmark::State& state) {
+ if (state.thread_index == 0) {
+ // Setup code here.
+ }
+ for (auto _ : state) {
+ // Run the test as normal.
+ }
+ if (state.thread_index == 0) {
+ // Teardown code here.
+ }
+}
+BENCHMARK(BM_MultiThreaded)->Threads(4);
+
+
+If a benchmark runs a few milliseconds it may be hard to visually compare the
+measured times, since the output data is given in nanoseconds per default. In
+order to manually set the time unit, you can specify it manually:
+
+BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
+*/
+
+#ifndef BENCHMARK_BENCHMARK_H_
+#define BENCHMARK_BENCHMARK_H_
+
+// The _MSVC_LANG check should detect Visual Studio 2015 Update 3 and newer.
+#if __cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L)
+#define BENCHMARK_HAS_CXX11
+#endif
+
+#include <stdint.h>
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <iosfwd>
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+
+#if defined(BENCHMARK_HAS_CXX11)
+#include <initializer_list>
+#include <type_traits>
+#include <utility>
+#endif
+
+#if defined(_MSC_VER)
+#include <intrin.h> // for _ReadWriteBarrier
+#endif
+
+#ifndef BENCHMARK_HAS_CXX11
+#define BENCHMARK_DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&); \
+ TypeName& operator=(const TypeName&)
+#else
+#define BENCHMARK_DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&) = delete; \
+ TypeName& operator=(const TypeName&) = delete
+#endif
+
+#if defined(__GNUC__)
+#define BENCHMARK_UNUSED __attribute__((unused))
+#define BENCHMARK_ALWAYS_INLINE __attribute__((always_inline))
+#define BENCHMARK_NOEXCEPT noexcept
+#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x)
+#elif defined(_MSC_VER) && !defined(__clang__)
+#define BENCHMARK_UNUSED
+#define BENCHMARK_ALWAYS_INLINE __forceinline
+#if _MSC_VER >= 1900
+#define BENCHMARK_NOEXCEPT noexcept
+#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x)
+#else
+#define BENCHMARK_NOEXCEPT
+#define BENCHMARK_NOEXCEPT_OP(x)
+#endif
+#define __func__ __FUNCTION__
+#else
+#define BENCHMARK_UNUSED
+#define BENCHMARK_ALWAYS_INLINE
+#define BENCHMARK_NOEXCEPT
+#define BENCHMARK_NOEXCEPT_OP(x)
+#endif
+
+#define BENCHMARK_INTERNAL_TOSTRING2(x) #x
+#define BENCHMARK_INTERNAL_TOSTRING(x) BENCHMARK_INTERNAL_TOSTRING2(x)
+
+#if defined(__GNUC__) || defined(__clang__)
+#define BENCHMARK_BUILTIN_EXPECT(x, y) __builtin_expect(x, y)
+#define BENCHMARK_DEPRECATED_MSG(msg) __attribute__((deprecated(msg)))
+#else
+#define BENCHMARK_BUILTIN_EXPECT(x, y) x
+#define BENCHMARK_DEPRECATED_MSG(msg)
+#define BENCHMARK_WARNING_MSG(msg) \
+ __pragma(message(__FILE__ "(" BENCHMARK_INTERNAL_TOSTRING( \
+ __LINE__) ") : warning note: " msg))
+#endif
+
+#if defined(__GNUC__) && !defined(__clang__)
+#define BENCHMARK_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+#endif
+
+#ifndef __has_builtin
+#define __has_builtin(x) 0
+#endif
+
+#if defined(__GNUC__) || __has_builtin(__builtin_unreachable)
+#define BENCHMARK_UNREACHABLE() __builtin_unreachable()
+#elif defined(_MSC_VER)
+#define BENCHMARK_UNREACHABLE() __assume(false)
+#else
+#define BENCHMARK_UNREACHABLE() ((void)0)
+#endif
+
+namespace benchmark {
+class BenchmarkReporter;
+class MemoryManager;
+
+void Initialize(int* argc, char** argv);
+
+// Report to stdout all arguments in 'argv' as unrecognized except the first.
+// Returns true there is at least on unrecognized argument (i.e. 'argc' > 1).
+bool ReportUnrecognizedArguments(int argc, char** argv);
+
+// Generate a list of benchmarks matching the specified --benchmark_filter flag
+// and if --benchmark_list_tests is specified return after printing the name
+// of each matching benchmark. Otherwise run each matching benchmark and
+// report the results.
+//
+// The second and third overload use the specified 'display_reporter' and
+// 'file_reporter' respectively. 'file_reporter' will write to the file
+// specified
+// by '--benchmark_output'. If '--benchmark_output' is not given the
+// 'file_reporter' is ignored.
+//
+// RETURNS: The number of matching benchmarks.
+size_t RunSpecifiedBenchmarks();
+size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter);
+size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
+ BenchmarkReporter* file_reporter);
+
+// Register a MemoryManager instance that will be used to collect and report
+// allocation measurements for benchmark runs.
+void RegisterMemoryManager(MemoryManager* memory_manager);
+
+namespace internal {
+class Benchmark;
+class BenchmarkImp;
+class BenchmarkFamilies;
+
+void UseCharPointer(char const volatile*);
+
+// Take ownership of the pointer and register the benchmark. Return the
+// registered benchmark.
+Benchmark* RegisterBenchmarkInternal(Benchmark*);
+
+// Ensure that the standard streams are properly initialized in every TU.
+int InitializeStreams();
+BENCHMARK_UNUSED static int stream_init_anchor = InitializeStreams();
+
+} // namespace internal
+
+#if (!defined(__GNUC__) && !defined(__clang__)) || defined(__pnacl__) || \
+ defined(__EMSCRIPTEN__)
+#define BENCHMARK_HAS_NO_INLINE_ASSEMBLY
+#endif
+
+// The DoNotOptimize(...) function can be used to prevent a value or
+// expression from being optimized away by the compiler. This function is
+// intended to add little to no overhead.
+// See: https://youtu.be/nXaxk27zwlk?t=2441
+#ifndef BENCHMARK_HAS_NO_INLINE_ASSEMBLY
+template <class Tp>
+inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
+ asm volatile("" : : "r,m"(value) : "memory");
+}
+
+template <class Tp>
+inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp& value) {
+#if defined(__clang__)
+ asm volatile("" : "+r,m"(value) : : "memory");
+#else
+ asm volatile("" : "+m,r"(value) : : "memory");
+#endif
+}
+
+// Force the compiler to flush pending writes to global memory. Acts as an
+// effective read/write barrier
+inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() {
+ asm volatile("" : : : "memory");
+}
+#elif defined(_MSC_VER)
+template <class Tp>
+inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
+ internal::UseCharPointer(&reinterpret_cast<char const volatile&>(value));
+ _ReadWriteBarrier();
+}
+
+inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { _ReadWriteBarrier(); }
+#else
+template <class Tp>
+inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
+ internal::UseCharPointer(&reinterpret_cast<char const volatile&>(value));
+}
+// FIXME Add ClobberMemory() for non-gnu and non-msvc compilers
+#endif
+
+// This class is used for user-defined counters.
+class Counter {
+ public:
+ enum Flags {
+ kDefaults = 0,
+ // Mark the counter as a rate. It will be presented divided
+ // by the duration of the benchmark.
+ kIsRate = 1U << 0U,
+ // Mark the counter as a thread-average quantity. It will be
+ // presented divided by the number of threads.
+ kAvgThreads = 1U << 1U,
+ // Mark the counter as a thread-average rate. See above.
+ kAvgThreadsRate = kIsRate | kAvgThreads,
+ // Mark the counter as a constant value, valid/same for *every* iteration.
+ // When reporting, it will be *multiplied* by the iteration count.
+ kIsIterationInvariant = 1U << 2U,
+ // Mark the counter as a constant rate.
+ // When reporting, it will be *multiplied* by the iteration count
+ // and then divided by the duration of the benchmark.
+ kIsIterationInvariantRate = kIsRate | kIsIterationInvariant,
+ // Mark the counter as a iteration-average quantity.
+ // It will be presented divided by the number of iterations.
+ kAvgIterations = 1U << 3U,
+ // Mark the counter as a iteration-average rate. See above.
+ kAvgIterationsRate = kIsRate | kAvgIterations
+ };
+
+ enum OneK {
+ // 1'000 items per 1k
+ kIs1000 = 1000,
+ // 1'024 items per 1k
+ kIs1024 = 1024
+ };
+
+ double value;
+ Flags flags;
+ OneK oneK;
+
+ BENCHMARK_ALWAYS_INLINE
+ Counter(double v = 0., Flags f = kDefaults, OneK k = kIs1000)
+ : value(v), flags(f), oneK(k) {}
+
+ BENCHMARK_ALWAYS_INLINE operator double const&() const { return value; }
+ BENCHMARK_ALWAYS_INLINE operator double&() { return value; }
+};
+
+// A helper for user code to create unforeseen combinations of Flags, without
+// having to do this cast manually each time, or providing this operator.
+Counter::Flags inline operator|(const Counter::Flags& LHS,
+ const Counter::Flags& RHS) {
+ return static_cast<Counter::Flags>(static_cast<int>(LHS) |
+ static_cast<int>(RHS));
+}
+
+// This is the container for the user-defined counters.
+typedef std::map<std::string, Counter> UserCounters;
+
+// TimeUnit is passed to a benchmark in order to specify the order of magnitude
+// for the measured time.
+enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond };
+
+// BigO is passed to a benchmark in order to specify the asymptotic
+// computational
+// complexity for the benchmark. In case oAuto is selected, complexity will be
+// calculated automatically to the best fit.
+enum BigO { oNone, o1, oN, oNSquared, oNCubed, oLogN, oNLogN, oAuto, oLambda };
+
+typedef uint64_t IterationCount;
+
+// BigOFunc is passed to a benchmark in order to specify the asymptotic
+// computational complexity for the benchmark.
+typedef double(BigOFunc)(IterationCount);
+
+// StatisticsFunc is passed to a benchmark in order to compute some descriptive
+// statistics over all the measurements of some type
+typedef double(StatisticsFunc)(const std::vector<double>&);
+
+namespace internal {
+struct Statistics {
+ std::string name_;
+ StatisticsFunc* compute_;
+
+ Statistics(const std::string& name, StatisticsFunc* compute)
+ : name_(name), compute_(compute) {}
+};
+
+struct BenchmarkInstance;
+class ThreadTimer;
+class ThreadManager;
+
+enum AggregationReportMode
+#if defined(BENCHMARK_HAS_CXX11)
+ : unsigned
+#else
+#endif
+{
+ // The mode has not been manually specified
+ ARM_Unspecified = 0,
+ // The mode is user-specified.
+ // This may or may not be set when the following bit-flags are set.
+ ARM_Default = 1U << 0U,
+ // File reporter should only output aggregates.
+ ARM_FileReportAggregatesOnly = 1U << 1U,
+ // Display reporter should only output aggregates
+ ARM_DisplayReportAggregatesOnly = 1U << 2U,
+ // Both reporters should only display aggregates.
+ ARM_ReportAggregatesOnly =
+ ARM_FileReportAggregatesOnly | ARM_DisplayReportAggregatesOnly
+};
+
+} // namespace internal
+
+// State is passed to a running Benchmark and contains state for the
+// benchmark to use.
+class State {
+ public:
+ struct StateIterator;
+ friend struct StateIterator;
+
+ // Returns iterators used to run each iteration of a benchmark using a
+ // C++11 ranged-based for loop. These functions should not be called directly.
+ //
+ // REQUIRES: The benchmark has not started running yet. Neither begin nor end
+ // have been called previously.
+ //
+ // NOTE: KeepRunning may not be used after calling either of these functions.
+ BENCHMARK_ALWAYS_INLINE StateIterator begin();
+ BENCHMARK_ALWAYS_INLINE StateIterator end();
+
+ // Returns true if the benchmark should continue through another iteration.
+ // NOTE: A benchmark may not return from the test until KeepRunning() has
+ // returned false.
+ bool KeepRunning();
+
+ // Returns true iff the benchmark should run n more iterations.
+ // REQUIRES: 'n' > 0.
+ // NOTE: A benchmark must not return from the test until KeepRunningBatch()
+ // has returned false.
+ // NOTE: KeepRunningBatch() may overshoot by up to 'n' iterations.
+ //
+ // Intended usage:
+ // while (state.KeepRunningBatch(1000)) {
+ // // process 1000 elements
+ // }
+ bool KeepRunningBatch(IterationCount n);
+
+ // REQUIRES: timer is running and 'SkipWithError(...)' has not been called
+ // by the current thread.
+ // Stop the benchmark timer. If not called, the timer will be
+ // automatically stopped after the last iteration of the benchmark loop.
+ //
+ // For threaded benchmarks the PauseTiming() function only pauses the timing
+ // for the current thread.
+ //
+ // NOTE: The "real time" measurement is per-thread. If different threads
+ // report different measurements the largest one is reported.
+ //
+ // NOTE: PauseTiming()/ResumeTiming() are relatively
+ // heavyweight, and so their use should generally be avoided
+ // within each benchmark iteration, if possible.
+ void PauseTiming();
+
+ // REQUIRES: timer is not running and 'SkipWithError(...)' has not been called
+ // by the current thread.
+ // Start the benchmark timer. The timer is NOT running on entrance to the
+ // benchmark function. It begins running after control flow enters the
+ // benchmark loop.
+ //
+ // NOTE: PauseTiming()/ResumeTiming() are relatively
+ // heavyweight, and so their use should generally be avoided
+ // within each benchmark iteration, if possible.
+ void ResumeTiming();
+
+ // REQUIRES: 'SkipWithError(...)' has not been called previously by the
+ // current thread.
+ // Report the benchmark as resulting in an error with the specified 'msg'.
+ // After this call the user may explicitly 'return' from the benchmark.
+ //
+ // If the ranged-for style of benchmark loop is used, the user must explicitly
+ // break from the loop, otherwise all future iterations will be run.
+ // If the 'KeepRunning()' loop is used the current thread will automatically
+ // exit the loop at the end of the current iteration.
+ //
+ // For threaded benchmarks only the current thread stops executing and future
+ // calls to `KeepRunning()` will block until all threads have completed
+ // the `KeepRunning()` loop. If multiple threads report an error only the
+ // first error message is used.
+ //
+ // NOTE: Calling 'SkipWithError(...)' does not cause the benchmark to exit
+ // the current scope immediately. If the function is called from within
+ // the 'KeepRunning()' loop the current iteration will finish. It is the users
+ // responsibility to exit the scope as needed.
+ void SkipWithError(const char* msg);
+
+ // REQUIRES: called exactly once per iteration of the benchmarking loop.
+ // Set the manually measured time for this benchmark iteration, which
+ // is used instead of automatically measured time if UseManualTime() was
+ // specified.
+ //
+ // For threaded benchmarks the final value will be set to the largest
+ // reported values.
+ void SetIterationTime(double seconds);
+
+ // Set the number of bytes processed by the current benchmark
+ // execution. This routine is typically called once at the end of a
+ // throughput oriented benchmark.
+ //
+ // REQUIRES: a benchmark has exited its benchmarking loop.
+ BENCHMARK_ALWAYS_INLINE
+ void SetBytesProcessed(int64_t bytes) {
+ counters["bytes_per_second"] =
+ Counter(static_cast<double>(bytes), Counter::kIsRate, Counter::kIs1024);
+ }
+
+ BENCHMARK_ALWAYS_INLINE
+ int64_t bytes_processed() const {
+ if (counters.find("bytes_per_second") != counters.end())
+ return static_cast<int64_t>(counters.at("bytes_per_second"));
+ return 0;
+ }
+
+ // If this routine is called with complexity_n > 0 and complexity report is
+ // requested for the
+ // family benchmark, then current benchmark will be part of the computation
+ // and complexity_n will
+ // represent the length of N.
+ BENCHMARK_ALWAYS_INLINE
+ void SetComplexityN(int64_t complexity_n) { complexity_n_ = complexity_n; }
+
+ BENCHMARK_ALWAYS_INLINE
+ int64_t complexity_length_n() { return complexity_n_; }
+
+ // If this routine is called with items > 0, then an items/s
+ // label is printed on the benchmark report line for the currently
+ // executing benchmark. It is typically called at the end of a processing
+ // benchmark where a processing items/second output is desired.
+ //
+ // REQUIRES: a benchmark has exited its benchmarking loop.
+ BENCHMARK_ALWAYS_INLINE
+ void SetItemsProcessed(int64_t items) {
+ counters["items_per_second"] =
+ Counter(static_cast<double>(items), benchmark::Counter::kIsRate);
+ }
+
+ BENCHMARK_ALWAYS_INLINE
+ int64_t items_processed() const {
+ if (counters.find("items_per_second") != counters.end())
+ return static_cast<int64_t>(counters.at("items_per_second"));
+ return 0;
+ }
+
+ // If this routine is called, the specified label is printed at the
+ // end of the benchmark report line for the currently executing
+ // benchmark. Example:
+ // static void BM_Compress(benchmark::State& state) {
+ // ...
+ // double compress = input_size / output_size;
+ // state.SetLabel(StrFormat("compress:%.1f%%", 100.0*compression));
+ // }
+ // Produces output that looks like:
+ // BM_Compress 50 50 14115038 compress:27.3%
+ //
+ // REQUIRES: a benchmark has exited its benchmarking loop.
+ void SetLabel(const char* label);
+
+ void BENCHMARK_ALWAYS_INLINE SetLabel(const std::string& str) {
+ this->SetLabel(str.c_str());
+ }
+
+ // Range arguments for this run. CHECKs if the argument has been set.
+ BENCHMARK_ALWAYS_INLINE
+ int64_t range(std::size_t pos = 0) const {
+ assert(range_.size() > pos);
+ return range_[pos];
+ }
+
+ BENCHMARK_DEPRECATED_MSG("use 'range(0)' instead")
+ int64_t range_x() const { return range(0); }
+
+ BENCHMARK_DEPRECATED_MSG("use 'range(1)' instead")
+ int64_t range_y() const { return range(1); }
+
+ BENCHMARK_ALWAYS_INLINE
+ IterationCount iterations() const {
+ if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) {
+ return 0;
+ }
+ return max_iterations - total_iterations_ + batch_leftover_;
+ }
+
+ private
+ : // items we expect on the first cache line (ie 64 bytes of the struct)
+ // When total_iterations_ is 0, KeepRunning() and friends will return false.
+ // May be larger than max_iterations.
+ IterationCount total_iterations_;
+
+ // When using KeepRunningBatch(), batch_leftover_ holds the number of
+ // iterations beyond max_iters that were run. Used to track
+ // completed_iterations_ accurately.
+ IterationCount batch_leftover_;
+
+ public:
+ const IterationCount max_iterations;
+
+ private:
+ bool started_;
+ bool finished_;
+ bool error_occurred_;
+
+ private: // items we don't need on the first cache line
+ std::vector<int64_t> range_;
+
+ int64_t complexity_n_;
+
+ public:
+ // Container for user-defined counters.
+ UserCounters counters;
+ // Index of the executing thread. Values from [0, threads).
+ const int thread_index;
+ // Number of threads concurrently executing the benchmark.
+ const int threads;
+
+ private:
+ State(IterationCount max_iters, const std::vector<int64_t>& ranges,
+ int thread_i, int n_threads, internal::ThreadTimer* timer,
+ internal::ThreadManager* manager);
+
+ void StartKeepRunning();
+ // Implementation of KeepRunning() and KeepRunningBatch().
+ // is_batch must be true unless n is 1.
+ bool KeepRunningInternal(IterationCount n, bool is_batch);
+ void FinishKeepRunning();
+ internal::ThreadTimer* timer_;
+ internal::ThreadManager* manager_;
+
+ friend struct internal::BenchmarkInstance;
+};
+
+inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunning() {
+ return KeepRunningInternal(1, /*is_batch=*/false);
+}
+
+inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningBatch(IterationCount n) {
+ return KeepRunningInternal(n, /*is_batch=*/true);
+}
+
+inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningInternal(IterationCount n,
+ bool is_batch) {
+ // total_iterations_ is set to 0 by the constructor, and always set to a
+ // nonzero value by StartKepRunning().
+ assert(n > 0);
+ // n must be 1 unless is_batch is true.
+ assert(is_batch || n == 1);
+ if (BENCHMARK_BUILTIN_EXPECT(total_iterations_ >= n, true)) {
+ total_iterations_ -= n;
+ return true;
+ }
+ if (!started_) {
+ StartKeepRunning();
+ if (!error_occurred_ && total_iterations_ >= n) {
+ total_iterations_ -= n;
+ return true;
+ }
+ }
+ // For non-batch runs, total_iterations_ must be 0 by now.
+ if (is_batch && total_iterations_ != 0) {
+ batch_leftover_ = n - total_iterations_;
+ total_iterations_ = 0;
+ return true;
+ }
+ FinishKeepRunning();
+ return false;
+}
+
+struct State::StateIterator {
+ struct BENCHMARK_UNUSED Value {};
+ typedef std::forward_iterator_tag iterator_category;
+ typedef Value value_type;
+ typedef Value reference;
+ typedef Value pointer;
+ typedef std::ptrdiff_t difference_type;
+
+ private:
+ friend class State;
+ BENCHMARK_ALWAYS_INLINE
+ StateIterator() : cached_(0), parent_() {}
+
+ BENCHMARK_ALWAYS_INLINE
+ explicit StateIterator(State* st)
+ : cached_(st->error_occurred_ ? 0 : st->max_iterations), parent_(st) {}
+
+ public:
+ BENCHMARK_ALWAYS_INLINE
+ Value operator*() const { return Value(); }
+
+ BENCHMARK_ALWAYS_INLINE
+ StateIterator& operator++() {
+ assert(cached_ > 0);
+ --cached_;
+ return *this;
+ }
+
+ BENCHMARK_ALWAYS_INLINE
+ bool operator!=(StateIterator const&) const {
+ if (BENCHMARK_BUILTIN_EXPECT(cached_ != 0, true)) return true;
+ parent_->FinishKeepRunning();
+ return false;
+ }
+
+ private:
+ IterationCount cached_;
+ State* const parent_;
+};
+
+inline BENCHMARK_ALWAYS_INLINE State::StateIterator State::begin() {
+ return StateIterator(this);
+}
+inline BENCHMARK_ALWAYS_INLINE State::StateIterator State::end() {
+ StartKeepRunning();
+ return StateIterator();
+}
+
+namespace internal {
+
+typedef void(Function)(State&);
+
+// ------------------------------------------------------
+// Benchmark registration object. The BENCHMARK() macro expands
+// into an internal::Benchmark* object. Various methods can
+// be called on this object to change the properties of the benchmark.
+// Each method returns "this" so that multiple method calls can
+// chained into one expression.
+class Benchmark {
+ public:
+ virtual ~Benchmark();
+
+ // Note: the following methods all return "this" so that multiple
+ // method calls can be chained together in one expression.
+
+ // Run this benchmark once with "x" as the extra argument passed
+ // to the function.
+ // REQUIRES: The function passed to the constructor must accept an arg1.
+ Benchmark* Arg(int64_t x);
+
+ // Run this benchmark with the given time unit for the generated output report
+ Benchmark* Unit(TimeUnit unit);
+
+ // Run this benchmark once for a number of values picked from the
+ // range [start..limit]. (start and limit are always picked.)
+ // REQUIRES: The function passed to the constructor must accept an arg1.
+ Benchmark* Range(int64_t start, int64_t limit);
+
+ // Run this benchmark once for all values in the range [start..limit] with
+ // specific step
+ // REQUIRES: The function passed to the constructor must accept an arg1.
+ Benchmark* DenseRange(int64_t start, int64_t limit, int step = 1);
+
+ // Run this benchmark once with "args" as the extra arguments passed
+ // to the function.
+ // REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
+ Benchmark* Args(const std::vector<int64_t>& args);
+
+ // Equivalent to Args({x, y})
+ // NOTE: This is a legacy C++03 interface provided for compatibility only.
+ // New code should use 'Args'.
+ Benchmark* ArgPair(int64_t x, int64_t y) {
+ std::vector<int64_t> args;
+ args.push_back(x);
+ args.push_back(y);
+ return Args(args);
+ }
+
+ // Run this benchmark once for a number of values picked from the
+ // ranges [start..limit]. (starts and limits are always picked.)
+ // REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
+ Benchmark* Ranges(const std::vector<std::pair<int64_t, int64_t> >& ranges);
+
+ // Equivalent to ArgNames({name})
+ Benchmark* ArgName(const std::string& name);
+
+ // Set the argument names to display in the benchmark name. If not called,
+ // only argument values will be shown.
+ Benchmark* ArgNames(const std::vector<std::string>& names);
+
+ // Equivalent to Ranges({{lo1, hi1}, {lo2, hi2}}).
+ // NOTE: This is a legacy C++03 interface provided for compatibility only.
+ // New code should use 'Ranges'.
+ Benchmark* RangePair(int64_t lo1, int64_t hi1, int64_t lo2, int64_t hi2) {
+ std::vector<std::pair<int64_t, int64_t> > ranges;
+ ranges.push_back(std::make_pair(lo1, hi1));
+ ranges.push_back(std::make_pair(lo2, hi2));
+ return Ranges(ranges);
+ }
+
+ // Pass this benchmark object to *func, which can customize
+ // the benchmark by calling various methods like Arg, Args,
+ // Threads, etc.
+ Benchmark* Apply(void (*func)(Benchmark* benchmark));
+
+ // Set the range multiplier for non-dense range. If not called, the range
+ // multiplier kRangeMultiplier will be used.
+ Benchmark* RangeMultiplier(int multiplier);
+
+ // Set the minimum amount of time to use when running this benchmark. This
+ // option overrides the `benchmark_min_time` flag.
+ // REQUIRES: `t > 0` and `Iterations` has not been called on this benchmark.
+ Benchmark* MinTime(double t);
+
+ // Specify the amount of iterations that should be run by this benchmark.
+ // REQUIRES: 'n > 0' and `MinTime` has not been called on this benchmark.
+ //
+ // NOTE: This function should only be used when *exact* iteration control is
+ // needed and never to control or limit how long a benchmark runs, where
+ // `--benchmark_min_time=N` or `MinTime(...)` should be used instead.
+ Benchmark* Iterations(IterationCount n);
+
+ // Specify the amount of times to repeat this benchmark. This option overrides
+ // the `benchmark_repetitions` flag.
+ // REQUIRES: `n > 0`
+ Benchmark* Repetitions(int n);
+
+ // Specify if each repetition of the benchmark should be reported separately
+ // or if only the final statistics should be reported. If the benchmark
+ // is not repeated then the single result is always reported.
+ // Applies to *ALL* reporters (display and file).
+ Benchmark* ReportAggregatesOnly(bool value = true);
+
+ // Same as ReportAggregatesOnly(), but applies to display reporter only.
+ Benchmark* DisplayAggregatesOnly(bool value = true);
+
+ // By default, the CPU time is measured only for the main thread, which may
+ // be unrepresentative if the benchmark uses threads internally. If called,
+ // the total CPU time spent by all the threads will be measured instead.
+ // By default, the only the main thread CPU time will be measured.
+ Benchmark* MeasureProcessCPUTime();
+
+ // If a particular benchmark should use the Wall clock instead of the CPU time
+ // (be it either the CPU time of the main thread only (default), or the
+ // total CPU usage of the benchmark), call this method. If called, the elapsed
+ // (wall) time will be used to control how many iterations are run, and in the
+ // printing of items/second or MB/seconds values.
+ // If not called, the CPU time used by the benchmark will be used.
+ Benchmark* UseRealTime();
+
+ // If a benchmark must measure time manually (e.g. if GPU execution time is
+ // being
+ // measured), call this method. If called, each benchmark iteration should
+ // call
+ // SetIterationTime(seconds) to report the measured time, which will be used
+ // to control how many iterations are run, and in the printing of items/second
+ // or MB/second values.
+ Benchmark* UseManualTime();
+
+ // Set the asymptotic computational complexity for the benchmark. If called
+ // the asymptotic computational complexity will be shown on the output.
+ Benchmark* Complexity(BigO complexity = benchmark::oAuto);
+
+ // Set the asymptotic computational complexity for the benchmark. If called
+ // the asymptotic computational complexity will be shown on the output.
+ Benchmark* Complexity(BigOFunc* complexity);
+
+ // Add this statistics to be computed over all the values of benchmark run
+ Benchmark* ComputeStatistics(std::string name, StatisticsFunc* statistics);
+
+ // Support for running multiple copies of the same benchmark concurrently
+ // in multiple threads. This may be useful when measuring the scaling
+ // of some piece of code.
+
+ // Run one instance of this benchmark concurrently in t threads.
+ Benchmark* Threads(int t);
+
+ // Pick a set of values T from [min_threads,max_threads].
+ // min_threads and max_threads are always included in T. Run this
+ // benchmark once for each value in T. The benchmark run for a
+ // particular value t consists of t threads running the benchmark
+ // function concurrently. For example, consider:
+ // BENCHMARK(Foo)->ThreadRange(1,16);
+ // This will run the following benchmarks:
+ // Foo in 1 thread
+ // Foo in 2 threads
+ // Foo in 4 threads
+ // Foo in 8 threads
+ // Foo in 16 threads
+ Benchmark* ThreadRange(int min_threads, int max_threads);
+
+ // For each value n in the range, run this benchmark once using n threads.
+ // min_threads and max_threads are always included in the range.
+ // stride specifies the increment. E.g. DenseThreadRange(1, 8, 3) starts
+ // a benchmark with 1, 4, 7 and 8 threads.
+ Benchmark* DenseThreadRange(int min_threads, int max_threads, int stride = 1);
+
+ // Equivalent to ThreadRange(NumCPUs(), NumCPUs())
+ Benchmark* ThreadPerCpu();
+
+ virtual void Run(State& state) = 0;
+
+ protected:
+ explicit Benchmark(const char* name);
+ Benchmark(Benchmark const&);
+ void SetName(const char* name);
+
+ int ArgsCnt() const;
+
+ private:
+ friend class BenchmarkFamilies;
+
+ std::string name_;
+ AggregationReportMode aggregation_report_mode_;
+ std::vector<std::string> arg_names_; // Args for all benchmark runs
+ std::vector<std::vector<int64_t> > args_; // Args for all benchmark runs
+ TimeUnit time_unit_;
+ int range_multiplier_;
+ double min_time_;
+ IterationCount iterations_;
+ int repetitions_;
+ bool measure_process_cpu_time_;
+ bool use_real_time_;
+ bool use_manual_time_;
+ BigO complexity_;
+ BigOFunc* complexity_lambda_;
+ std::vector<Statistics> statistics_;
+ std::vector<int> thread_counts_;
+
+ Benchmark& operator=(Benchmark const&);
+};
+
+} // namespace internal
+
+// Create and register a benchmark with the specified 'name' that invokes
+// the specified functor 'fn'.
+//
+// RETURNS: A pointer to the registered benchmark.
+internal::Benchmark* RegisterBenchmark(const char* name,
+ internal::Function* fn);
+
+#if defined(BENCHMARK_HAS_CXX11)
+template <class Lambda>
+internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn);
+#endif
+
+// Remove all registered benchmarks. All pointers to previously registered
+// benchmarks are invalidated.
+void ClearRegisteredBenchmarks();
+
+namespace internal {
+// The class used to hold all Benchmarks created from static function.
+// (ie those created using the BENCHMARK(...) macros.
+class FunctionBenchmark : public Benchmark {
+ public:
+ FunctionBenchmark(const char* name, Function* func)
+ : Benchmark(name), func_(func) {}
+
+ virtual void Run(State& st);
+
+ private:
+ Function* func_;
+};
+
+#ifdef BENCHMARK_HAS_CXX11
+template <class Lambda>
+class LambdaBenchmark : public Benchmark {
+ public:
+ virtual void Run(State& st) { lambda_(st); }
+
+ private:
+ template <class OLambda>
+ LambdaBenchmark(const char* name, OLambda&& lam)
+ : Benchmark(name), lambda_(std::forward<OLambda>(lam)) {}
+
+ LambdaBenchmark(LambdaBenchmark const&) = delete;
+
+ private:
+ template <class Lam>
+ friend Benchmark* ::benchmark::RegisterBenchmark(const char*, Lam&&);
+
+ Lambda lambda_;
+};
+#endif
+
+} // namespace internal
+
+inline internal::Benchmark* RegisterBenchmark(const char* name,
+ internal::Function* fn) {
+ return internal::RegisterBenchmarkInternal(
+ ::new internal::FunctionBenchmark(name, fn));
+}
+
+#ifdef BENCHMARK_HAS_CXX11
+template <class Lambda>
+internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn) {
+ using BenchType =
+ internal::LambdaBenchmark<typename std::decay<Lambda>::type>;
+ return internal::RegisterBenchmarkInternal(
+ ::new BenchType(name, std::forward<Lambda>(fn)));
+}
+#endif
+
+#if defined(BENCHMARK_HAS_CXX11) && \
+ (!defined(BENCHMARK_GCC_VERSION) || BENCHMARK_GCC_VERSION >= 409)
+template <class Lambda, class... Args>
+internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn,
+ Args&&... args) {
+ return benchmark::RegisterBenchmark(
+ name, [=](benchmark::State& st) { fn(st, args...); });
+}
+#else
+#define BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
+#endif
+
+// The base class for all fixture tests.
+class Fixture : public internal::Benchmark {
+ public:
+ Fixture() : internal::Benchmark("") {}
+
+ virtual void Run(State& st) {
+ this->SetUp(st);
+ this->BenchmarkCase(st);
+ this->TearDown(st);
+ }
+
+ // MONGODB MODIFICATION: Remove the deprecated version of SetUp() and TearDown() that
+ // require `const State&` as an argument.
+ virtual void SetUp(State&) {}
+ virtual void TearDown(State&) {}
+
+ protected:
+ virtual void BenchmarkCase(State&) = 0;
+};
+
+} // namespace benchmark
+
+// ------------------------------------------------------
+// Macro to register benchmarks
+
+// Check that __COUNTER__ is defined and that __COUNTER__ increases by 1
+// every time it is expanded. X + 1 == X + 0 is used in case X is defined to be
+// empty. If X is empty the expression becomes (+1 == +0).
+//
+// MONGODB MODIFICATION: all of our supported compilers support __COUNTER__ so we don't need to test
+// for it here. This test interferes with -E -fdirectives-only since it is illegal to use
+// __COUNTER__ in an #if clause with that flag because its value could change between the partial
+// preprocessing and the compile phases.
+#if true // defined(__COUNTER__) && (__COUNTER__ + 1 == __COUNTER__ + 0)
+#define BENCHMARK_PRIVATE_UNIQUE_ID __COUNTER__
+#else
+#define BENCHMARK_PRIVATE_UNIQUE_ID __LINE__
+#endif
+
+// Helpers for generating unique variable names
+#define BENCHMARK_PRIVATE_NAME(n) \
+ BENCHMARK_PRIVATE_CONCAT(_benchmark_, BENCHMARK_PRIVATE_UNIQUE_ID, n)
+#define BENCHMARK_PRIVATE_CONCAT(a, b, c) BENCHMARK_PRIVATE_CONCAT2(a, b, c)
+#define BENCHMARK_PRIVATE_CONCAT2(a, b, c) a##b##c
+
+#define BENCHMARK_PRIVATE_DECLARE(n) \
+ static ::benchmark::internal::Benchmark* BENCHMARK_PRIVATE_NAME(n) \
+ BENCHMARK_UNUSED
+
+#define BENCHMARK(n) \
+ BENCHMARK_PRIVATE_DECLARE(n) = \
+ (::benchmark::internal::RegisterBenchmarkInternal( \
+ new ::benchmark::internal::FunctionBenchmark(#n, n)))
+
+// Old-style macros
+#define BENCHMARK_WITH_ARG(n, a) BENCHMARK(n)->Arg((a))
+#define BENCHMARK_WITH_ARG2(n, a1, a2) BENCHMARK(n)->Args({(a1), (a2)})
+#define BENCHMARK_WITH_UNIT(n, t) BENCHMARK(n)->Unit((t))
+#define BENCHMARK_RANGE(n, lo, hi) BENCHMARK(n)->Range((lo), (hi))
+#define BENCHMARK_RANGE2(n, l1, h1, l2, h2) \
+ BENCHMARK(n)->RangePair({{(l1), (h1)}, {(l2), (h2)}})
+
+#ifdef BENCHMARK_HAS_CXX11
+
+// Register a benchmark which invokes the function specified by `func`
+// with the additional arguments specified by `...`.
+//
+// For example:
+//
+// template <class ...ExtraArgs>`
+// void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) {
+// [...]
+//}
+// /* Registers a benchmark named "BM_takes_args/int_string_test` */
+// BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc"));
+#define BENCHMARK_CAPTURE(func, test_case_name, ...) \
+ BENCHMARK_PRIVATE_DECLARE(func) = \
+ (::benchmark::internal::RegisterBenchmarkInternal( \
+ new ::benchmark::internal::FunctionBenchmark( \
+ #func "/" #test_case_name, \
+ [](::benchmark::State& st) { func(st, __VA_ARGS__); })))
+
+#endif // BENCHMARK_HAS_CXX11
+
+// This will register a benchmark for a templatized function. For example:
+//
+// template<int arg>
+// void BM_Foo(int iters);
+//
+// BENCHMARK_TEMPLATE(BM_Foo, 1);
+//
+// will register BM_Foo<1> as a benchmark.
+#define BENCHMARK_TEMPLATE1(n, a) \
+ BENCHMARK_PRIVATE_DECLARE(n) = \
+ (::benchmark::internal::RegisterBenchmarkInternal( \
+ new ::benchmark::internal::FunctionBenchmark(#n "<" #a ">", n<a>)))
+
+#define BENCHMARK_TEMPLATE2(n, a, b) \
+ BENCHMARK_PRIVATE_DECLARE(n) = \
+ (::benchmark::internal::RegisterBenchmarkInternal( \
+ new ::benchmark::internal::FunctionBenchmark(#n "<" #a "," #b ">", \
+ n<a, b>)))
+
+#ifdef BENCHMARK_HAS_CXX11
+#define BENCHMARK_TEMPLATE(n, ...) \
+ BENCHMARK_PRIVATE_DECLARE(n) = \
+ (::benchmark::internal::RegisterBenchmarkInternal( \
+ new ::benchmark::internal::FunctionBenchmark( \
+ #n "<" #__VA_ARGS__ ">", n<__VA_ARGS__>)))
+#else
+#define BENCHMARK_TEMPLATE(n, a) BENCHMARK_TEMPLATE1(n, a)
+#endif
+
+#define BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \
+ class BaseClass##_##Method##_Benchmark : public BaseClass { \
+ public: \
+ BaseClass##_##Method##_Benchmark() : BaseClass() { \
+ this->SetName(#BaseClass "/" #Method); \
+ } \
+ \
+ protected: \
+ virtual void BenchmarkCase(::benchmark::State&); \
+ };
+
+#define BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \
+ class BaseClass##_##Method##_Benchmark : public BaseClass<a> { \
+ public: \
+ BaseClass##_##Method##_Benchmark() : BaseClass<a>() { \
+ this->SetName(#BaseClass "<" #a ">/" #Method); \
+ } \
+ \
+ protected: \
+ virtual void BenchmarkCase(::benchmark::State&); \
+ };
+
+#define BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \
+ class BaseClass##_##Method##_Benchmark : public BaseClass<a, b> { \
+ public: \
+ BaseClass##_##Method##_Benchmark() : BaseClass<a, b>() { \
+ this->SetName(#BaseClass "<" #a "," #b ">/" #Method); \
+ } \
+ \
+ protected: \
+ virtual void BenchmarkCase(::benchmark::State&); \
+ };
+
+#ifdef BENCHMARK_HAS_CXX11
+#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, ...) \
+ class BaseClass##_##Method##_Benchmark : public BaseClass<__VA_ARGS__> { \
+ public: \
+ BaseClass##_##Method##_Benchmark() : BaseClass<__VA_ARGS__>() { \
+ this->SetName(#BaseClass "<" #__VA_ARGS__ ">/" #Method); \
+ } \
+ \
+ protected: \
+ virtual void BenchmarkCase(::benchmark::State&); \
+ };
+#else
+#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a) \
+ BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(n, a)
+#endif
+
+#define BENCHMARK_DEFINE_F(BaseClass, Method) \
+ BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \
+ void BaseClass##_##Method##_Benchmark::BenchmarkCase
+
+#define BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a) \
+ BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \
+ void BaseClass##_##Method##_Benchmark::BenchmarkCase
+
+#define BENCHMARK_TEMPLATE2_DEFINE_F(BaseClass, Method, a, b) \
+ BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \
+ void BaseClass##_##Method##_Benchmark::BenchmarkCase
+
+#ifdef BENCHMARK_HAS_CXX11
+#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, ...) \
+ BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \
+ void BaseClass##_##Method##_Benchmark::BenchmarkCase
+#else
+#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a) \
+ BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a)
+#endif
+
+#define BENCHMARK_REGISTER_F(BaseClass, Method) \
+ BENCHMARK_PRIVATE_REGISTER_F(BaseClass##_##Method##_Benchmark)
+
+#define BENCHMARK_PRIVATE_REGISTER_F(TestName) \
+ BENCHMARK_PRIVATE_DECLARE(TestName) = \
+ (::benchmark::internal::RegisterBenchmarkInternal(new TestName()))
+
+// This macro will define and register a benchmark within a fixture class.
+#define BENCHMARK_F(BaseClass, Method) \
+ BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \
+ BENCHMARK_REGISTER_F(BaseClass, Method); \
+ void BaseClass##_##Method##_Benchmark::BenchmarkCase
+
+#define BENCHMARK_TEMPLATE1_F(BaseClass, Method, a) \
+ BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \
+ BENCHMARK_REGISTER_F(BaseClass, Method); \
+ void BaseClass##_##Method##_Benchmark::BenchmarkCase
+
+#define BENCHMARK_TEMPLATE2_F(BaseClass, Method, a, b) \
+ BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \
+ BENCHMARK_REGISTER_F(BaseClass, Method); \
+ void BaseClass##_##Method##_Benchmark::BenchmarkCase
+
+#ifdef BENCHMARK_HAS_CXX11
+#define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \
+ BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \
+ BENCHMARK_REGISTER_F(BaseClass, Method); \
+ void BaseClass##_##Method##_Benchmark::BenchmarkCase
+#else
+#define BENCHMARK_TEMPLATE_F(BaseClass, Method, a) \
+ BENCHMARK_TEMPLATE1_F(BaseClass, Method, a)
+#endif
+
+// Helper macro to create a main routine in a test that runs the benchmarks
+#define BENCHMARK_MAIN() \
+ int main(int argc, char** argv) { \
+ ::benchmark::Initialize(&argc, argv); \
+ if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return 1; \
+ ::benchmark::RunSpecifiedBenchmarks(); \
+ } \
+ int main(int, char**)
+
+// ------------------------------------------------------
+// Benchmark Reporters
+
+namespace benchmark {
+
+struct CPUInfo {
+ struct CacheInfo {
+ std::string type;
+ int level;
+ int size;
+ int num_sharing;
+ };
+
+ int num_cpus;
+ double cycles_per_second;
+ std::vector<CacheInfo> caches;
+ bool scaling_enabled;
+ std::vector<double> load_avg;
+
+ static const CPUInfo& Get();
+
+ private:
+ CPUInfo();
+ BENCHMARK_DISALLOW_COPY_AND_ASSIGN(CPUInfo);
+};
+
+// Adding Struct for System Information
+struct SystemInfo {
+ std::string name;
+ static const SystemInfo& Get();
+
+ private:
+ SystemInfo();
+ BENCHMARK_DISALLOW_COPY_AND_ASSIGN(SystemInfo);
+};
+
+// BenchmarkName contains the components of the Benchmark's name
+// which allows individual fields to be modified or cleared before
+// building the final name using 'str()'.
+struct BenchmarkName {
+ std::string function_name;
+ std::string args;
+ std::string min_time;
+ std::string iterations;
+ std::string repetitions;
+ std::string time_type;
+ std::string threads;
+
+ // Return the full name of the benchmark with each non-empty
+ // field separated by a '/'
+ std::string str() const;
+};
+
+// Interface for custom benchmark result printers.
+// By default, benchmark reports are printed to stdout. However an application
+// can control the destination of the reports by calling
+// RunSpecifiedBenchmarks and passing it a custom reporter object.
+// The reporter object must implement the following interface.
+class BenchmarkReporter {
+ public:
+ struct Context {
+ CPUInfo const& cpu_info;
+ SystemInfo const& sys_info;
+ // The number of chars in the longest benchmark name.
+ size_t name_field_width;
+ static const char* executable_name;
+ Context();
+ };
+
+ struct Run {
+ static const int64_t no_repetition_index = -1;
+ enum RunType { RT_Iteration, RT_Aggregate };
+
+ Run()
+ : run_type(RT_Iteration),
+ error_occurred(false),
+ iterations(1),
+ threads(1),
+ time_unit(kNanosecond),
+ real_accumulated_time(0),
+ cpu_accumulated_time(0),
+ max_heapbytes_used(0),
+ complexity(oNone),
+ complexity_lambda(),
+ complexity_n(0),
+ report_big_o(false),
+ report_rms(false),
+ counters(),
+ has_memory_result(false),
+ allocs_per_iter(0.0),
+ max_bytes_used(0) {}
+
+ std::string benchmark_name() const;
+ BenchmarkName run_name;
+ RunType run_type;
+ std::string aggregate_name;
+ std::string report_label; // Empty if not set by benchmark.
+ bool error_occurred;
+ std::string error_message;
+
+ IterationCount iterations;
+ int64_t threads;
+ int64_t repetition_index;
+ int64_t repetitions;
+ TimeUnit time_unit;
+ double real_accumulated_time;
+ double cpu_accumulated_time;
+
+ // Return a value representing the real time per iteration in the unit
+ // specified by 'time_unit'.
+ // NOTE: If 'iterations' is zero the returned value represents the
+ // accumulated time.
+ double GetAdjustedRealTime() const;
+
+ // Return a value representing the cpu time per iteration in the unit
+ // specified by 'time_unit'.
+ // NOTE: If 'iterations' is zero the returned value represents the
+ // accumulated time.
+ double GetAdjustedCPUTime() const;
+
+ // This is set to 0.0 if memory tracing is not enabled.
+ double max_heapbytes_used;
+
+ // Keep track of arguments to compute asymptotic complexity
+ BigO complexity;
+ BigOFunc* complexity_lambda;
+ int64_t complexity_n;
+
+ // what statistics to compute from the measurements
+ const std::vector<internal::Statistics>* statistics;
+
+ // Inform print function whether the current run is a complexity report
+ bool report_big_o;
+ bool report_rms;
+
+ UserCounters counters;
+
+ // Memory metrics.
+ bool has_memory_result;
+ double allocs_per_iter;
+ int64_t max_bytes_used;
+ };
+
+ // Construct a BenchmarkReporter with the output stream set to 'std::cout'
+ // and the error stream set to 'std::cerr'
+ BenchmarkReporter();
+
+ // Called once for every suite of benchmarks run.
+ // The parameter "context" contains information that the
+ // reporter may wish to use when generating its report, for example the
+ // platform under which the benchmarks are running. The benchmark run is
+ // never started if this function returns false, allowing the reporter
+ // to skip runs based on the context information.
+ virtual bool ReportContext(const Context& context) = 0;
+
+ // Called once for each group of benchmark runs, gives information about
+ // cpu-time and heap memory usage during the benchmark run. If the group
+ // of runs contained more than two entries then 'report' contains additional
+ // elements representing the mean and standard deviation of those runs.
+ // Additionally if this group of runs was the last in a family of benchmarks
+ // 'reports' contains additional entries representing the asymptotic
+ // complexity and RMS of that benchmark family.
+ virtual void ReportRuns(const std::vector<Run>& report) = 0;
+
+ // Called once and only once after ever group of benchmarks is run and
+ // reported.
+ virtual void Finalize() {}
+
+ // REQUIRES: The object referenced by 'out' is valid for the lifetime
+ // of the reporter.
+ void SetOutputStream(std::ostream* out) {
+ assert(out);
+ output_stream_ = out;
+ }
+
+ // REQUIRES: The object referenced by 'err' is valid for the lifetime
+ // of the reporter.
+ void SetErrorStream(std::ostream* err) {
+ assert(err);
+ error_stream_ = err;
+ }
+
+ std::ostream& GetOutputStream() const { return *output_stream_; }
+
+ std::ostream& GetErrorStream() const { return *error_stream_; }
+
+ virtual ~BenchmarkReporter();
+
+ // Write a human readable string to 'out' representing the specified
+ // 'context'.
+ // REQUIRES: 'out' is non-null.
+ static void PrintBasicContext(std::ostream* out, Context const& context);
+
+ private:
+ std::ostream* output_stream_;
+ std::ostream* error_stream_;
+};
+
+// Simple reporter that outputs benchmark data to the console. This is the
+// default reporter used by RunSpecifiedBenchmarks().
+class ConsoleReporter : public BenchmarkReporter {
+ public:
+ enum OutputOptions {
+ OO_None = 0,
+ OO_Color = 1,
+ OO_Tabular = 2,
+ OO_ColorTabular = OO_Color | OO_Tabular,
+ OO_Defaults = OO_ColorTabular
+ };
+ explicit ConsoleReporter(OutputOptions opts_ = OO_Defaults)
+ : output_options_(opts_),
+ name_field_width_(0),
+ prev_counters_(),
+ printed_header_(false) {}
+
+ virtual bool ReportContext(const Context& context);
+ virtual void ReportRuns(const std::vector<Run>& reports);
+
+ protected:
+ virtual void PrintRunData(const Run& report);
+ virtual void PrintHeader(const Run& report);
+
+ OutputOptions output_options_;
+ size_t name_field_width_;
+ UserCounters prev_counters_;
+ bool printed_header_;
+};
+
+class JSONReporter : public BenchmarkReporter {
+ public:
+ JSONReporter() : first_report_(true) {}
+ virtual bool ReportContext(const Context& context);
+ virtual void ReportRuns(const std::vector<Run>& reports);
+ virtual void Finalize();
+
+ private:
+ void PrintRunData(const Run& report);
+
+ bool first_report_;
+};
+
+class BENCHMARK_DEPRECATED_MSG(
+ "The CSV Reporter will be removed in a future release") CSVReporter
+ : public BenchmarkReporter {
+ public:
+ CSVReporter() : printed_header_(false) {}
+ virtual bool ReportContext(const Context& context);
+ virtual void ReportRuns(const std::vector<Run>& reports);
+
+ private:
+ void PrintRunData(const Run& report);
+
+ bool printed_header_;
+ std::set<std::string> user_counter_names_;
+};
+
+// If a MemoryManager is registered, it can be used to collect and report
+// allocation metrics for a run of the benchmark.
+class MemoryManager {
+ public:
+ struct Result {
+ Result() : num_allocs(0), max_bytes_used(0) {}
+
+ // The number of allocations made in total between Start and Stop.
+ int64_t num_allocs;
+
+ // The peak memory use between Start and Stop.
+ int64_t max_bytes_used;
+ };
+
+ virtual ~MemoryManager() {}
+
+ // Implement this to start recording allocation information.
+ virtual void Start() = 0;
+
+ // Implement this to stop recording and fill out the given Result structure.
+ virtual void Stop(Result* result) = 0;
+};
+
+inline const char* GetTimeUnitString(TimeUnit unit) {
+ switch (unit) {
+ case kMillisecond:
+ return "ms";
+ case kMicrosecond:
+ return "us";
+ case kNanosecond:
+ return "ns";
+ }
+ BENCHMARK_UNREACHABLE();
+}
+
+inline double GetTimeUnitMultiplier(TimeUnit unit) {
+ switch (unit) {
+ case kMillisecond:
+ return 1e3;
+ case kMicrosecond:
+ return 1e6;
+ case kNanosecond:
+ return 1e9;
+ }
+ BENCHMARK_UNREACHABLE();
+}
+
+} // namespace benchmark
+
+#endif // BENCHMARK_BENCHMARK_H_
diff --git a/src/third_party/benchmark/dist/src/arraysize.h b/src/third_party/benchmark/dist/src/arraysize.h
new file mode 100644
index 00000000000..51a50f2dff2
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/arraysize.h
@@ -0,0 +1,33 @@
+#ifndef BENCHMARK_ARRAYSIZE_H_
+#define BENCHMARK_ARRAYSIZE_H_
+
+#include "internal_macros.h"
+
+namespace benchmark {
+namespace internal {
+// The arraysize(arr) macro returns the # of elements in an array arr.
+// The expression is a compile-time constant, and therefore can be
+// used in defining new arrays, for example. If you use arraysize on
+// a pointer by mistake, you will get a compile-time error.
+//
+
+// This template function declaration is used in defining arraysize.
+// Note that the function doesn't need an implementation, as we only
+// use its type.
+template <typename T, size_t N>
+char (&ArraySizeHelper(T (&array)[N]))[N];
+
+// That gcc wants both of these prototypes seems mysterious. VC, for
+// its part, can't decide which to use (another mystery). Matching of
+// template overloads: the final frontier.
+#ifndef COMPILER_MSVC
+template <typename T, size_t N>
+char (&ArraySizeHelper(const T (&array)[N]))[N];
+#endif
+
+#define arraysize(array) (sizeof(::benchmark::internal::ArraySizeHelper(array)))
+
+} // end namespace internal
+} // end namespace benchmark
+
+#endif // BENCHMARK_ARRAYSIZE_H_
diff --git a/src/third_party/benchmark/dist/src/benchmark.cc b/src/third_party/benchmark/dist/src/benchmark.cc
new file mode 100644
index 00000000000..29bfa3512f9
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/benchmark.cc
@@ -0,0 +1,494 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark/benchmark.h"
+#include "benchmark_api_internal.h"
+#include "benchmark_runner.h"
+#include "internal_macros.h"
+
+#ifndef BENCHMARK_OS_WINDOWS
+#ifndef BENCHMARK_OS_FUCHSIA
+#include <sys/resource.h>
+#endif
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+
+#include <algorithm>
+#include <atomic>
+#include <condition_variable>
+#include <cstdio>
+#include <cstdlib>
+#include <fstream>
+#include <iostream>
+#include <memory>
+#include <string>
+#include <thread>
+#include <utility>
+
+#include "check.h"
+#include "colorprint.h"
+#include "commandlineflags.h"
+#include "complexity.h"
+#include "counter.h"
+#include "internal_macros.h"
+#include "log.h"
+#include "mutex.h"
+#include "re.h"
+#include "statistics.h"
+#include "string_util.h"
+#include "thread_manager.h"
+#include "thread_timer.h"
+
+DEFINE_bool(benchmark_list_tests, false,
+ "Print a list of benchmarks. This option overrides all other "
+ "options.");
+
+DEFINE_string(benchmark_filter, ".",
+ "A regular expression that specifies the set of benchmarks "
+ "to execute. If this flag is empty, or if this flag is the "
+ "string \"all\", all benchmarks linked into the binary are "
+ "run.");
+
+DEFINE_double(benchmark_min_time, 0.5,
+ "Minimum number of seconds we should run benchmark before "
+ "results are considered significant. For cpu-time based "
+ "tests, this is the lower bound on the total cpu time "
+ "used by all threads that make up the test. For real-time "
+ "based tests, this is the lower bound on the elapsed time "
+ "of the benchmark execution, regardless of number of "
+ "threads.");
+
+DEFINE_int32(benchmark_repetitions, 1,
+ "The number of runs of each benchmark. If greater than 1, the "
+ "mean and standard deviation of the runs will be reported.");
+
+DEFINE_bool(
+ benchmark_report_aggregates_only, false,
+ "Report the result of each benchmark repetitions. When 'true' is specified "
+ "only the mean, standard deviation, and other statistics are reported for "
+ "repeated benchmarks. Affects all reporters.");
+
+DEFINE_bool(
+ benchmark_display_aggregates_only, false,
+ "Display the result of each benchmark repetitions. When 'true' is "
+ "specified only the mean, standard deviation, and other statistics are "
+ "displayed for repeated benchmarks. Unlike "
+ "benchmark_report_aggregates_only, only affects the display reporter, but "
+ "*NOT* file reporter, which will still contain all the output.");
+
+DEFINE_string(benchmark_format, "console",
+ "The format to use for console output. Valid values are "
+ "'console', 'json', or 'csv'.");
+
+DEFINE_string(benchmark_out_format, "json",
+ "The format to use for file output. Valid values are "
+ "'console', 'json', or 'csv'.");
+
+DEFINE_string(benchmark_out, "", "The file to write additional output to");
+
+DEFINE_string(benchmark_color, "auto",
+ "Whether to use colors in the output. Valid values: "
+ "'true'/'yes'/1, 'false'/'no'/0, and 'auto'. 'auto' means to use "
+ "colors if the output is being sent to a terminal and the TERM "
+ "environment variable is set to a terminal type that supports "
+ "colors.");
+
+DEFINE_bool(benchmark_counters_tabular, false,
+ "Whether to use tabular format when printing user counters to "
+ "the console. Valid values: 'true'/'yes'/1, 'false'/'no'/0."
+ "Defaults to false.");
+
+DEFINE_int32(v, 0, "The level of verbose logging to output");
+
+namespace benchmark {
+
+namespace internal {
+
+// FIXME: wouldn't LTO mess this up?
+void UseCharPointer(char const volatile*) {}
+
+} // namespace internal
+
+State::State(IterationCount max_iters, const std::vector<int64_t>& ranges,
+ int thread_i, int n_threads, internal::ThreadTimer* timer,
+ internal::ThreadManager* manager)
+ : total_iterations_(0),
+ batch_leftover_(0),
+ max_iterations(max_iters),
+ started_(false),
+ finished_(false),
+ error_occurred_(false),
+ range_(ranges),
+ complexity_n_(0),
+ counters(),
+ thread_index(thread_i),
+ threads(n_threads),
+ timer_(timer),
+ manager_(manager) {
+ CHECK(max_iterations != 0) << "At least one iteration must be run";
+ CHECK_LT(thread_index, threads) << "thread_index must be less than threads";
+
+ // Note: The use of offsetof below is technically undefined until C++17
+ // because State is not a standard layout type. However, all compilers
+ // currently provide well-defined behavior as an extension (which is
+ // demonstrated since constexpr evaluation must diagnose all undefined
+ // behavior). However, GCC and Clang also warn about this use of offsetof,
+ // which must be suppressed.
+#if defined(__INTEL_COMPILER)
+#pragma warning push
+#pragma warning(disable:1875)
+#elif defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Winvalid-offsetof"
+#endif
+ // Offset tests to ensure commonly accessed data is on the first cache line.
+ const int cache_line_size = 64;
+ static_assert(offsetof(State, error_occurred_) <=
+ (cache_line_size - sizeof(error_occurred_)),
+ "");
+#if defined(__INTEL_COMPILER)
+#pragma warning pop
+#elif defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+}
+
+void State::PauseTiming() {
+ // Add in time accumulated so far
+ CHECK(started_ && !finished_ && !error_occurred_);
+ timer_->StopTimer();
+}
+
+void State::ResumeTiming() {
+ CHECK(started_ && !finished_ && !error_occurred_);
+ timer_->StartTimer();
+}
+
+void State::SkipWithError(const char* msg) {
+ CHECK(msg);
+ error_occurred_ = true;
+ {
+ MutexLock l(manager_->GetBenchmarkMutex());
+ if (manager_->results.has_error_ == false) {
+ manager_->results.error_message_ = msg;
+ manager_->results.has_error_ = true;
+ }
+ }
+ total_iterations_ = 0;
+ if (timer_->running()) timer_->StopTimer();
+}
+
+void State::SetIterationTime(double seconds) {
+ timer_->SetIterationTime(seconds);
+}
+
+void State::SetLabel(const char* label) {
+ MutexLock l(manager_->GetBenchmarkMutex());
+ manager_->results.report_label_ = label;
+}
+
+void State::StartKeepRunning() {
+ CHECK(!started_ && !finished_);
+ started_ = true;
+ total_iterations_ = error_occurred_ ? 0 : max_iterations;
+ manager_->StartStopBarrier();
+ if (!error_occurred_) ResumeTiming();
+}
+
+void State::FinishKeepRunning() {
+ CHECK(started_ && (!finished_ || error_occurred_));
+ if (!error_occurred_) {
+ PauseTiming();
+ }
+ // Total iterations has now wrapped around past 0. Fix this.
+ total_iterations_ = 0;
+ finished_ = true;
+ manager_->StartStopBarrier();
+}
+
+namespace internal {
+namespace {
+
+void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
+ BenchmarkReporter* display_reporter,
+ BenchmarkReporter* file_reporter) {
+ // Note the file_reporter can be null.
+ CHECK(display_reporter != nullptr);
+
+ // Determine the width of the name field using a minimum width of 10.
+ bool might_have_aggregates = FLAGS_benchmark_repetitions > 1;
+ size_t name_field_width = 10;
+ size_t stat_field_width = 0;
+ for (const BenchmarkInstance& benchmark : benchmarks) {
+ name_field_width =
+ std::max<size_t>(name_field_width, benchmark.name.str().size());
+ might_have_aggregates |= benchmark.repetitions > 1;
+
+ for (const auto& Stat : *benchmark.statistics)
+ stat_field_width = std::max<size_t>(stat_field_width, Stat.name_.size());
+ }
+ if (might_have_aggregates) name_field_width += 1 + stat_field_width;
+
+ // Print header here
+ BenchmarkReporter::Context context;
+ context.name_field_width = name_field_width;
+
+ // Keep track of running times of all instances of current benchmark
+ std::vector<BenchmarkReporter::Run> complexity_reports;
+
+ // We flush streams after invoking reporter methods that write to them. This
+ // ensures users get timely updates even when streams are not line-buffered.
+ auto flushStreams = [](BenchmarkReporter* reporter) {
+ if (!reporter) return;
+ std::flush(reporter->GetOutputStream());
+ std::flush(reporter->GetErrorStream());
+ };
+
+ if (display_reporter->ReportContext(context) &&
+ (!file_reporter || file_reporter->ReportContext(context))) {
+ flushStreams(display_reporter);
+ flushStreams(file_reporter);
+
+ for (const auto& benchmark : benchmarks) {
+ RunResults run_results = RunBenchmark(benchmark, &complexity_reports);
+
+ auto report = [&run_results](BenchmarkReporter* reporter,
+ bool report_aggregates_only) {
+ assert(reporter);
+ // If there are no aggregates, do output non-aggregates.
+ report_aggregates_only &= !run_results.aggregates_only.empty();
+ if (!report_aggregates_only)
+ reporter->ReportRuns(run_results.non_aggregates);
+ if (!run_results.aggregates_only.empty())
+ reporter->ReportRuns(run_results.aggregates_only);
+ };
+
+ report(display_reporter, run_results.display_report_aggregates_only);
+ if (file_reporter)
+ report(file_reporter, run_results.file_report_aggregates_only);
+
+ flushStreams(display_reporter);
+ flushStreams(file_reporter);
+ }
+ }
+ display_reporter->Finalize();
+ if (file_reporter) file_reporter->Finalize();
+ flushStreams(display_reporter);
+ flushStreams(file_reporter);
+}
+
+std::unique_ptr<BenchmarkReporter> CreateReporter(
+ std::string const& name, ConsoleReporter::OutputOptions output_opts) {
+ typedef std::unique_ptr<BenchmarkReporter> PtrType;
+ if (name == "console") {
+ return PtrType(new ConsoleReporter(output_opts));
+ } else if (name == "json") {
+ return PtrType(new JSONReporter);
+ } else if (name == "csv") {
+ return PtrType(new CSVReporter);
+ } else {
+ std::cerr << "Unexpected format: '" << name << "'\n";
+ std::exit(1);
+ }
+}
+
+} // end namespace
+
+bool IsZero(double n) {
+ return std::abs(n) < std::numeric_limits<double>::epsilon();
+}
+
+ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) {
+ int output_opts = ConsoleReporter::OO_Defaults;
+ auto is_benchmark_color = [force_no_color] () -> bool {
+ if (force_no_color) {
+ return false;
+ }
+ if (FLAGS_benchmark_color == "auto") {
+ return IsColorTerminal();
+ }
+ return IsTruthyFlagValue(FLAGS_benchmark_color);
+ };
+ if (is_benchmark_color()) {
+ output_opts |= ConsoleReporter::OO_Color;
+ } else {
+ output_opts &= ~ConsoleReporter::OO_Color;
+ }
+ if (FLAGS_benchmark_counters_tabular) {
+ output_opts |= ConsoleReporter::OO_Tabular;
+ } else {
+ output_opts &= ~ConsoleReporter::OO_Tabular;
+ }
+ return static_cast<ConsoleReporter::OutputOptions>(output_opts);
+}
+
+} // end namespace internal
+
+size_t RunSpecifiedBenchmarks() {
+ return RunSpecifiedBenchmarks(nullptr, nullptr);
+}
+
+size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter) {
+ return RunSpecifiedBenchmarks(display_reporter, nullptr);
+}
+
+size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
+ BenchmarkReporter* file_reporter) {
+ std::string spec = FLAGS_benchmark_filter;
+ if (spec.empty() || spec == "all")
+ spec = "."; // Regexp that matches all benchmarks
+
+ // Setup the reporters
+ std::ofstream output_file;
+ std::unique_ptr<BenchmarkReporter> default_display_reporter;
+ std::unique_ptr<BenchmarkReporter> default_file_reporter;
+ if (!display_reporter) {
+ default_display_reporter = internal::CreateReporter(
+ FLAGS_benchmark_format, internal::GetOutputOptions());
+ display_reporter = default_display_reporter.get();
+ }
+ auto& Out = display_reporter->GetOutputStream();
+ auto& Err = display_reporter->GetErrorStream();
+
+ std::string const& fname = FLAGS_benchmark_out;
+ if (fname.empty() && file_reporter) {
+ Err << "A custom file reporter was provided but "
+ "--benchmark_out=<file> was not specified."
+ << std::endl;
+ std::exit(1);
+ }
+ if (!fname.empty()) {
+ output_file.open(fname);
+ if (!output_file.is_open()) {
+ Err << "invalid file name: '" << fname << std::endl;
+ std::exit(1);
+ }
+ if (!file_reporter) {
+ default_file_reporter = internal::CreateReporter(
+ FLAGS_benchmark_out_format, ConsoleReporter::OO_None);
+ file_reporter = default_file_reporter.get();
+ }
+ file_reporter->SetOutputStream(&output_file);
+ file_reporter->SetErrorStream(&output_file);
+ }
+
+ std::vector<internal::BenchmarkInstance> benchmarks;
+ if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0;
+
+ if (benchmarks.empty()) {
+ Err << "Failed to match any benchmarks against regex: " << spec << "\n";
+ return 0;
+ }
+
+ if (FLAGS_benchmark_list_tests) {
+ for (auto const& benchmark : benchmarks)
+ Out << benchmark.name.str() << "\n";
+ } else {
+ internal::RunBenchmarks(benchmarks, display_reporter, file_reporter);
+ }
+
+ return benchmarks.size();
+}
+
+void RegisterMemoryManager(MemoryManager* manager) {
+ internal::memory_manager = manager;
+}
+
+namespace internal {
+
+void PrintUsageAndExit() {
+ fprintf(stdout,
+ "benchmark"
+ " [--benchmark_list_tests={true|false}]\n"
+ " [--benchmark_filter=<regex>]\n"
+ " [--benchmark_min_time=<min_time>]\n"
+ " [--benchmark_repetitions=<num_repetitions>]\n"
+ " [--benchmark_report_aggregates_only={true|false}]\n"
+ " [--benchmark_display_aggregates_only={true|false}]\n"
+ " [--benchmark_format=<console|json|csv>]\n"
+ " [--benchmark_out=<filename>]\n"
+ " [--benchmark_out_format=<json|console|csv>]\n"
+ " [--benchmark_color={auto|true|false}]\n"
+ " [--benchmark_counters_tabular={true|false}]\n"
+ " [--v=<verbosity>]\n");
+ exit(0);
+}
+
+void ParseCommandLineFlags(int* argc, char** argv) {
+ using namespace benchmark;
+ BenchmarkReporter::Context::executable_name =
+ (argc && *argc > 0) ? argv[0] : "unknown";
+ for (int i = 1; i < *argc; ++i) {
+ if (ParseBoolFlag(argv[i], "benchmark_list_tests",
+ &FLAGS_benchmark_list_tests) ||
+ ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) ||
+ ParseDoubleFlag(argv[i], "benchmark_min_time",
+ &FLAGS_benchmark_min_time) ||
+ ParseInt32Flag(argv[i], "benchmark_repetitions",
+ &FLAGS_benchmark_repetitions) ||
+ ParseBoolFlag(argv[i], "benchmark_report_aggregates_only",
+ &FLAGS_benchmark_report_aggregates_only) ||
+ ParseBoolFlag(argv[i], "benchmark_display_aggregates_only",
+ &FLAGS_benchmark_display_aggregates_only) ||
+ ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) ||
+ ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) ||
+ ParseStringFlag(argv[i], "benchmark_out_format",
+ &FLAGS_benchmark_out_format) ||
+ ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) ||
+ // "color_print" is the deprecated name for "benchmark_color".
+ // TODO: Remove this.
+ ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) ||
+ ParseBoolFlag(argv[i], "benchmark_counters_tabular",
+ &FLAGS_benchmark_counters_tabular) ||
+ ParseInt32Flag(argv[i], "v", &FLAGS_v)) {
+ for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1];
+
+ --(*argc);
+ --i;
+ } else if (IsFlag(argv[i], "help")) {
+ PrintUsageAndExit();
+ }
+ }
+ for (auto const* flag :
+ {&FLAGS_benchmark_format, &FLAGS_benchmark_out_format})
+ if (*flag != "console" && *flag != "json" && *flag != "csv") {
+ PrintUsageAndExit();
+ }
+ if (FLAGS_benchmark_color.empty()) {
+ PrintUsageAndExit();
+ }
+}
+
+int InitializeStreams() {
+ static std::ios_base::Init init;
+ return 0;
+}
+
+} // end namespace internal
+
+void Initialize(int* argc, char** argv) {
+ internal::ParseCommandLineFlags(argc, argv);
+ internal::LogLevel() = FLAGS_v;
+}
+
+bool ReportUnrecognizedArguments(int argc, char** argv) {
+ for (int i = 1; i < argc; ++i) {
+ fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0],
+ argv[i]);
+ }
+ return argc > 1;
+}
+
+} // end namespace benchmark
diff --git a/src/third_party/benchmark/dist/src/benchmark_api_internal.cc b/src/third_party/benchmark/dist/src/benchmark_api_internal.cc
new file mode 100644
index 00000000000..d468a257e39
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/benchmark_api_internal.cc
@@ -0,0 +1,15 @@
+#include "benchmark_api_internal.h"
+
+namespace benchmark {
+namespace internal {
+
+State BenchmarkInstance::Run(IterationCount iters, int thread_id,
+ internal::ThreadTimer* timer,
+ internal::ThreadManager* manager) const {
+ State st(iters, arg, thread_id, threads, timer, manager);
+ benchmark->Run(st);
+ return st;
+}
+
+} // internal
+} // benchmark
diff --git a/src/third_party/benchmark/dist/src/benchmark_api_internal.h b/src/third_party/benchmark/dist/src/benchmark_api_internal.h
new file mode 100644
index 00000000000..264eff95c5c
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/benchmark_api_internal.h
@@ -0,0 +1,53 @@
+#ifndef BENCHMARK_API_INTERNAL_H
+#define BENCHMARK_API_INTERNAL_H
+
+#include "benchmark/benchmark.h"
+#include "commandlineflags.h"
+
+#include <cmath>
+#include <iosfwd>
+#include <limits>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace benchmark {
+namespace internal {
+
+// Information kept per benchmark we may want to run
+struct BenchmarkInstance {
+ BenchmarkName name;
+ Benchmark* benchmark;
+ AggregationReportMode aggregation_report_mode;
+ std::vector<int64_t> arg;
+ TimeUnit time_unit;
+ int range_multiplier;
+ bool measure_process_cpu_time;
+ bool use_real_time;
+ bool use_manual_time;
+ BigO complexity;
+ BigOFunc* complexity_lambda;
+ UserCounters counters;
+ const std::vector<Statistics>* statistics;
+ bool last_benchmark_instance;
+ int repetitions;
+ double min_time;
+ IterationCount iterations;
+ int threads; // Number of concurrent threads to us
+
+ State Run(IterationCount iters, int thread_id, internal::ThreadTimer* timer,
+ internal::ThreadManager* manager) const;
+};
+
+bool FindBenchmarksInternal(const std::string& re,
+ std::vector<BenchmarkInstance>* benchmarks,
+ std::ostream* Err);
+
+bool IsZero(double n);
+
+ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color = false);
+
+} // end namespace internal
+} // end namespace benchmark
+
+#endif // BENCHMARK_API_INTERNAL_H
diff --git a/src/third_party/benchmark/dist/src/benchmark_main.cc b/src/third_party/benchmark/dist/src/benchmark_main.cc
new file mode 100644
index 00000000000..b3b24783149
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/benchmark_main.cc
@@ -0,0 +1,17 @@
+// Copyright 2018 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark/benchmark.h"
+
+BENCHMARK_MAIN();
diff --git a/src/third_party/benchmark/dist/src/benchmark_name.cc b/src/third_party/benchmark/dist/src/benchmark_name.cc
new file mode 100644
index 00000000000..2a17ebce277
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/benchmark_name.cc
@@ -0,0 +1,58 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <benchmark/benchmark.h>
+
+namespace benchmark {
+
+namespace {
+
+// Compute the total size of a pack of std::strings
+size_t size_impl() { return 0; }
+
+template <typename Head, typename... Tail>
+size_t size_impl(const Head& head, const Tail&... tail) {
+ return head.size() + size_impl(tail...);
+}
+
+// Join a pack of std::strings using a delimiter
+// TODO: use absl::StrJoin
+void join_impl(std::string&, char) {}
+
+template <typename Head, typename... Tail>
+void join_impl(std::string& s, const char delimiter, const Head& head,
+ const Tail&... tail) {
+ if (!s.empty() && !head.empty()) {
+ s += delimiter;
+ }
+
+ s += head;
+
+ join_impl(s, delimiter, tail...);
+}
+
+template <typename... Ts>
+std::string join(char delimiter, const Ts&... ts) {
+ std::string s;
+ s.reserve(sizeof...(Ts) + size_impl(ts...));
+ join_impl(s, delimiter, ts...);
+ return s;
+}
+} // namespace
+
+std::string BenchmarkName::str() const {
+ return join('/', function_name, args, min_time, iterations, repetitions,
+ time_type, threads);
+}
+} // namespace benchmark
diff --git a/src/third_party/benchmark/dist/src/benchmark_register.cc b/src/third_party/benchmark/dist/src/benchmark_register.cc
new file mode 100644
index 00000000000..6696c382b80
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/benchmark_register.cc
@@ -0,0 +1,504 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark_register.h"
+
+#ifndef BENCHMARK_OS_WINDOWS
+#ifndef BENCHMARK_OS_FUCHSIA
+#include <sys/resource.h>
+#endif
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+
+#include <algorithm>
+#include <atomic>
+#include <condition_variable>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <fstream>
+#include <iostream>
+#include <memory>
+#include <sstream>
+#include <thread>
+
+#define __STDC_FORMAT_MACROS
+#include <inttypes.h>
+
+#include "benchmark/benchmark.h"
+#include "benchmark_api_internal.h"
+#include "check.h"
+#include "commandlineflags.h"
+#include "complexity.h"
+#include "internal_macros.h"
+#include "log.h"
+#include "mutex.h"
+#include "re.h"
+#include "statistics.h"
+#include "string_util.h"
+#include "timers.h"
+
+namespace benchmark {
+
+namespace {
+// For non-dense Range, intermediate values are powers of kRangeMultiplier.
+static const int kRangeMultiplier = 8;
+// The size of a benchmark family determines is the number of inputs to repeat
+// the benchmark on. If this is "large" then warn the user during configuration.
+static const size_t kMaxFamilySize = 100;
+} // end namespace
+
+namespace internal {
+
+//=============================================================================//
+// BenchmarkFamilies
+//=============================================================================//
+
+// Class for managing registered benchmarks. Note that each registered
+// benchmark identifies a family of related benchmarks to run.
+class BenchmarkFamilies {
+ public:
+ static BenchmarkFamilies* GetInstance();
+
+ // Registers a benchmark family and returns the index assigned to it.
+ size_t AddBenchmark(std::unique_ptr<Benchmark> family);
+
+ // Clear all registered benchmark families.
+ void ClearBenchmarks();
+
+ // Extract the list of benchmark instances that match the specified
+ // regular expression.
+ bool FindBenchmarks(std::string re,
+ std::vector<BenchmarkInstance>* benchmarks,
+ std::ostream* Err);
+
+ private:
+ BenchmarkFamilies() {}
+
+ std::vector<std::unique_ptr<Benchmark>> families_;
+ Mutex mutex_;
+};
+
+BenchmarkFamilies* BenchmarkFamilies::GetInstance() {
+ static BenchmarkFamilies instance;
+ return &instance;
+}
+
+size_t BenchmarkFamilies::AddBenchmark(std::unique_ptr<Benchmark> family) {
+ MutexLock l(mutex_);
+ size_t index = families_.size();
+ families_.push_back(std::move(family));
+ return index;
+}
+
+void BenchmarkFamilies::ClearBenchmarks() {
+ MutexLock l(mutex_);
+ families_.clear();
+ families_.shrink_to_fit();
+}
+
+bool BenchmarkFamilies::FindBenchmarks(
+ std::string spec, std::vector<BenchmarkInstance>* benchmarks,
+ std::ostream* ErrStream) {
+ CHECK(ErrStream);
+ auto& Err = *ErrStream;
+ // Make regular expression out of command-line flag
+ std::string error_msg;
+ Regex re;
+ bool isNegativeFilter = false;
+ if (spec[0] == '-') {
+ spec.replace(0, 1, "");
+ isNegativeFilter = true;
+ }
+ if (!re.Init(spec, &error_msg)) {
+ Err << "Could not compile benchmark re: " << error_msg << std::endl;
+ return false;
+ }
+
+ // Special list of thread counts to use when none are specified
+ const std::vector<int> one_thread = {1};
+
+ MutexLock l(mutex_);
+ for (std::unique_ptr<Benchmark>& family : families_) {
+ // Family was deleted or benchmark doesn't match
+ if (!family) continue;
+
+ if (family->ArgsCnt() == -1) {
+ family->Args({});
+ }
+ const std::vector<int>* thread_counts =
+ (family->thread_counts_.empty()
+ ? &one_thread
+ : &static_cast<const std::vector<int>&>(family->thread_counts_));
+ const size_t family_size = family->args_.size() * thread_counts->size();
+ // The benchmark will be run at least 'family_size' different inputs.
+ // If 'family_size' is very large warn the user.
+ if (family_size > kMaxFamilySize) {
+ Err << "The number of inputs is very large. " << family->name_
+ << " will be repeated at least " << family_size << " times.\n";
+ }
+ // reserve in the special case the regex ".", since we know the final
+ // family size.
+ if (spec == ".") benchmarks->reserve(family_size);
+
+ for (auto const& args : family->args_) {
+ for (int num_threads : *thread_counts) {
+ BenchmarkInstance instance;
+ instance.name.function_name = family->name_;
+ instance.benchmark = family.get();
+ instance.aggregation_report_mode = family->aggregation_report_mode_;
+ instance.arg = args;
+ instance.time_unit = family->time_unit_;
+ instance.range_multiplier = family->range_multiplier_;
+ instance.min_time = family->min_time_;
+ instance.iterations = family->iterations_;
+ instance.repetitions = family->repetitions_;
+ instance.measure_process_cpu_time = family->measure_process_cpu_time_;
+ instance.use_real_time = family->use_real_time_;
+ instance.use_manual_time = family->use_manual_time_;
+ instance.complexity = family->complexity_;
+ instance.complexity_lambda = family->complexity_lambda_;
+ instance.statistics = &family->statistics_;
+ instance.threads = num_threads;
+
+ // Add arguments to instance name
+ size_t arg_i = 0;
+ for (auto const& arg : args) {
+ if (!instance.name.args.empty()) {
+ instance.name.args += '/';
+ }
+
+ if (arg_i < family->arg_names_.size()) {
+ const auto& arg_name = family->arg_names_[arg_i];
+ if (!arg_name.empty()) {
+ instance.name.args += StrFormat("%s:", arg_name.c_str());
+ }
+ }
+
+ instance.name.args += StrFormat("%" PRId64, arg);
+ ++arg_i;
+ }
+
+ if (!IsZero(family->min_time_))
+ instance.name.min_time =
+ StrFormat("min_time:%0.3f", family->min_time_);
+ if (family->iterations_ != 0) {
+ instance.name.iterations =
+ StrFormat("iterations:%lu",
+ static_cast<unsigned long>(family->iterations_));
+ }
+ if (family->repetitions_ != 0)
+ instance.name.repetitions =
+ StrFormat("repeats:%d", family->repetitions_);
+
+ if (family->measure_process_cpu_time_) {
+ instance.name.time_type = "process_time";
+ }
+
+ if (family->use_manual_time_) {
+ if (!instance.name.time_type.empty()) {
+ instance.name.time_type += '/';
+ }
+ instance.name.time_type += "manual_time";
+ } else if (family->use_real_time_) {
+ if (!instance.name.time_type.empty()) {
+ instance.name.time_type += '/';
+ }
+ instance.name.time_type += "real_time";
+ }
+
+ // Add the number of threads used to the name
+ if (!family->thread_counts_.empty()) {
+ instance.name.threads = StrFormat("threads:%d", instance.threads);
+ }
+
+ const auto full_name = instance.name.str();
+ if ((re.Match(full_name) && !isNegativeFilter) ||
+ (!re.Match(full_name) && isNegativeFilter)) {
+ instance.last_benchmark_instance = (&args == &family->args_.back());
+ benchmarks->push_back(std::move(instance));
+ }
+ }
+ }
+ }
+ return true;
+}
+
+Benchmark* RegisterBenchmarkInternal(Benchmark* bench) {
+ std::unique_ptr<Benchmark> bench_ptr(bench);
+ BenchmarkFamilies* families = BenchmarkFamilies::GetInstance();
+ families->AddBenchmark(std::move(bench_ptr));
+ return bench;
+}
+
+// FIXME: This function is a hack so that benchmark.cc can access
+// `BenchmarkFamilies`
+bool FindBenchmarksInternal(const std::string& re,
+ std::vector<BenchmarkInstance>* benchmarks,
+ std::ostream* Err) {
+ return BenchmarkFamilies::GetInstance()->FindBenchmarks(re, benchmarks, Err);
+}
+
+//=============================================================================//
+// Benchmark
+//=============================================================================//
+
+Benchmark::Benchmark(const char* name)
+ : name_(name),
+ aggregation_report_mode_(ARM_Unspecified),
+ time_unit_(kNanosecond),
+ range_multiplier_(kRangeMultiplier),
+ min_time_(0),
+ iterations_(0),
+ repetitions_(0),
+ measure_process_cpu_time_(false),
+ use_real_time_(false),
+ use_manual_time_(false),
+ complexity_(oNone),
+ complexity_lambda_(nullptr) {
+ ComputeStatistics("mean", StatisticsMean);
+ ComputeStatistics("median", StatisticsMedian);
+ ComputeStatistics("stddev", StatisticsStdDev);
+}
+
+Benchmark::~Benchmark() {}
+
+Benchmark* Benchmark::Arg(int64_t x) {
+ CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
+ args_.push_back({x});
+ return this;
+}
+
+Benchmark* Benchmark::Unit(TimeUnit unit) {
+ time_unit_ = unit;
+ return this;
+}
+
+Benchmark* Benchmark::Range(int64_t start, int64_t limit) {
+ CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
+ std::vector<int64_t> arglist;
+ AddRange(&arglist, start, limit, range_multiplier_);
+
+ for (int64_t i : arglist) {
+ args_.push_back({i});
+ }
+ return this;
+}
+
+Benchmark* Benchmark::Ranges(
+ const std::vector<std::pair<int64_t, int64_t>>& ranges) {
+ CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(ranges.size()));
+ std::vector<std::vector<int64_t>> arglists(ranges.size());
+ std::size_t total = 1;
+ for (std::size_t i = 0; i < ranges.size(); i++) {
+ AddRange(&arglists[i], ranges[i].first, ranges[i].second,
+ range_multiplier_);
+ total *= arglists[i].size();
+ }
+
+ std::vector<std::size_t> ctr(arglists.size(), 0);
+
+ for (std::size_t i = 0; i < total; i++) {
+ std::vector<int64_t> tmp;
+ tmp.reserve(arglists.size());
+
+ for (std::size_t j = 0; j < arglists.size(); j++) {
+ tmp.push_back(arglists[j].at(ctr[j]));
+ }
+
+ args_.push_back(std::move(tmp));
+
+ for (std::size_t j = 0; j < arglists.size(); j++) {
+ if (ctr[j] + 1 < arglists[j].size()) {
+ ++ctr[j];
+ break;
+ }
+ ctr[j] = 0;
+ }
+ }
+ return this;
+}
+
+Benchmark* Benchmark::ArgName(const std::string& name) {
+ CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
+ arg_names_ = {name};
+ return this;
+}
+
+Benchmark* Benchmark::ArgNames(const std::vector<std::string>& names) {
+ CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(names.size()));
+ arg_names_ = names;
+ return this;
+}
+
+Benchmark* Benchmark::DenseRange(int64_t start, int64_t limit, int step) {
+ CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
+ CHECK_LE(start, limit);
+ for (int64_t arg = start; arg <= limit; arg += step) {
+ args_.push_back({arg});
+ }
+ return this;
+}
+
+Benchmark* Benchmark::Args(const std::vector<int64_t>& args) {
+ CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(args.size()));
+ args_.push_back(args);
+ return this;
+}
+
+Benchmark* Benchmark::Apply(void (*custom_arguments)(Benchmark* benchmark)) {
+ custom_arguments(this);
+ return this;
+}
+
+Benchmark* Benchmark::RangeMultiplier(int multiplier) {
+ CHECK(multiplier > 1);
+ range_multiplier_ = multiplier;
+ return this;
+}
+
+Benchmark* Benchmark::MinTime(double t) {
+ CHECK(t > 0.0);
+ CHECK(iterations_ == 0);
+ min_time_ = t;
+ return this;
+}
+
+Benchmark* Benchmark::Iterations(IterationCount n) {
+ CHECK(n > 0);
+ CHECK(IsZero(min_time_));
+ iterations_ = n;
+ return this;
+}
+
+Benchmark* Benchmark::Repetitions(int n) {
+ CHECK(n > 0);
+ repetitions_ = n;
+ return this;
+}
+
+Benchmark* Benchmark::ReportAggregatesOnly(bool value) {
+ aggregation_report_mode_ = value ? ARM_ReportAggregatesOnly : ARM_Default;
+ return this;
+}
+
+Benchmark* Benchmark::DisplayAggregatesOnly(bool value) {
+ // If we were called, the report mode is no longer 'unspecified', in any case.
+ aggregation_report_mode_ = static_cast<AggregationReportMode>(
+ aggregation_report_mode_ | ARM_Default);
+
+ if (value) {
+ aggregation_report_mode_ = static_cast<AggregationReportMode>(
+ aggregation_report_mode_ | ARM_DisplayReportAggregatesOnly);
+ } else {
+ aggregation_report_mode_ = static_cast<AggregationReportMode>(
+ aggregation_report_mode_ & ~ARM_DisplayReportAggregatesOnly);
+ }
+
+ return this;
+}
+
+Benchmark* Benchmark::MeasureProcessCPUTime() {
+ // Can be used together with UseRealTime() / UseManualTime().
+ measure_process_cpu_time_ = true;
+ return this;
+}
+
+Benchmark* Benchmark::UseRealTime() {
+ CHECK(!use_manual_time_)
+ << "Cannot set UseRealTime and UseManualTime simultaneously.";
+ use_real_time_ = true;
+ return this;
+}
+
+Benchmark* Benchmark::UseManualTime() {
+ CHECK(!use_real_time_)
+ << "Cannot set UseRealTime and UseManualTime simultaneously.";
+ use_manual_time_ = true;
+ return this;
+}
+
+Benchmark* Benchmark::Complexity(BigO complexity) {
+ complexity_ = complexity;
+ return this;
+}
+
+Benchmark* Benchmark::Complexity(BigOFunc* complexity) {
+ complexity_lambda_ = complexity;
+ complexity_ = oLambda;
+ return this;
+}
+
+Benchmark* Benchmark::ComputeStatistics(std::string name,
+ StatisticsFunc* statistics) {
+ statistics_.emplace_back(name, statistics);
+ return this;
+}
+
+Benchmark* Benchmark::Threads(int t) {
+ CHECK_GT(t, 0);
+ thread_counts_.push_back(t);
+ return this;
+}
+
+Benchmark* Benchmark::ThreadRange(int min_threads, int max_threads) {
+ CHECK_GT(min_threads, 0);
+ CHECK_GE(max_threads, min_threads);
+
+ AddRange(&thread_counts_, min_threads, max_threads, 2);
+ return this;
+}
+
+Benchmark* Benchmark::DenseThreadRange(int min_threads, int max_threads,
+ int stride) {
+ CHECK_GT(min_threads, 0);
+ CHECK_GE(max_threads, min_threads);
+ CHECK_GE(stride, 1);
+
+ for (auto i = min_threads; i < max_threads; i += stride) {
+ thread_counts_.push_back(i);
+ }
+ thread_counts_.push_back(max_threads);
+ return this;
+}
+
+Benchmark* Benchmark::ThreadPerCpu() {
+ thread_counts_.push_back(CPUInfo::Get().num_cpus);
+ return this;
+}
+
+void Benchmark::SetName(const char* name) { name_ = name; }
+
+int Benchmark::ArgsCnt() const {
+ if (args_.empty()) {
+ if (arg_names_.empty()) return -1;
+ return static_cast<int>(arg_names_.size());
+ }
+ return static_cast<int>(args_.front().size());
+}
+
+//=============================================================================//
+// FunctionBenchmark
+//=============================================================================//
+
+void FunctionBenchmark::Run(State& st) { func_(st); }
+
+} // end namespace internal
+
+void ClearRegisteredBenchmarks() {
+ internal::BenchmarkFamilies::GetInstance()->ClearBenchmarks();
+}
+
+} // end namespace benchmark
diff --git a/src/third_party/benchmark/dist/src/benchmark_register.h b/src/third_party/benchmark/dist/src/benchmark_register.h
new file mode 100644
index 00000000000..61377d74230
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/benchmark_register.h
@@ -0,0 +1,107 @@
+#ifndef BENCHMARK_REGISTER_H
+#define BENCHMARK_REGISTER_H
+
+#include <vector>
+
+#include "check.h"
+
+namespace benchmark {
+namespace internal {
+
+// Append the powers of 'mult' in the closed interval [lo, hi].
+// Returns iterator to the start of the inserted range.
+template <typename T>
+typename std::vector<T>::iterator
+AddPowers(std::vector<T>* dst, T lo, T hi, int mult) {
+ CHECK_GE(lo, 0);
+ CHECK_GE(hi, lo);
+ CHECK_GE(mult, 2);
+
+ const size_t start_offset = dst->size();
+
+ static const T kmax = std::numeric_limits<T>::max();
+
+ // Space out the values in multiples of "mult"
+ for (T i = 1; i <= hi; i *= mult) {
+ if (i >= lo) {
+ dst->push_back(i);
+ }
+ // Break the loop here since multiplying by
+ // 'mult' would move outside of the range of T
+ if (i > kmax / mult) break;
+ }
+
+ return dst->begin() + start_offset;
+}
+
+template <typename T>
+void AddNegatedPowers(std::vector<T>* dst, T lo, T hi, int mult) {
+ // We negate lo and hi so we require that they cannot be equal to 'min'.
+ CHECK_GT(lo, std::numeric_limits<T>::min());
+ CHECK_GT(hi, std::numeric_limits<T>::min());
+ CHECK_GE(hi, lo);
+ CHECK_LE(hi, 0);
+
+ // Add positive powers, then negate and reverse.
+ // Casts necessary since small integers get promoted
+ // to 'int' when negating.
+ const auto lo_complement = static_cast<T>(-lo);
+ const auto hi_complement = static_cast<T>(-hi);
+
+ const auto it = AddPowers(dst, hi_complement, lo_complement, mult);
+
+ std::for_each(it, dst->end(), [](T& t) { t *= -1; });
+ std::reverse(it, dst->end());
+}
+
+template <typename T>
+void AddRange(std::vector<T>* dst, T lo, T hi, int mult) {
+ static_assert(std::is_integral<T>::value && std::is_signed<T>::value,
+ "Args type must be a signed integer");
+
+ CHECK_GE(hi, lo);
+ CHECK_GE(mult, 2);
+
+ // Add "lo"
+ dst->push_back(lo);
+
+ // Handle lo == hi as a special case, so we then know
+ // lo < hi and so it is safe to add 1 to lo and subtract 1
+ // from hi without falling outside of the range of T.
+ if (lo == hi) return;
+
+ // Ensure that lo_inner <= hi_inner below.
+ if (lo + 1 == hi) {
+ dst->push_back(hi);
+ return;
+ }
+
+ // Add all powers of 'mult' in the range [lo+1, hi-1] (inclusive).
+ const auto lo_inner = static_cast<T>(lo + 1);
+ const auto hi_inner = static_cast<T>(hi - 1);
+
+ // Insert negative values
+ if (lo_inner < 0) {
+ AddNegatedPowers(dst, lo_inner, std::min(hi_inner, T{-1}), mult);
+ }
+
+ // Treat 0 as a special case (see discussion on #762).
+ if (lo <= 0 && hi >= 0) {
+ dst->push_back(0);
+ }
+
+ // Insert positive values
+ if (hi_inner > 0) {
+ AddPowers(dst, std::max(lo_inner, T{1}), hi_inner, mult);
+ }
+
+ // Add "hi" (if different from last value).
+ if (hi != dst->back()) {
+ dst->push_back(hi);
+ }
+}
+
+} // namespace internal
+} // namespace benchmark
+
+#endif // BENCHMARK_REGISTER_H
diff --git a/src/third_party/benchmark/dist/src/benchmark_runner.cc b/src/third_party/benchmark/dist/src/benchmark_runner.cc
new file mode 100644
index 00000000000..0bae6a545ef
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/benchmark_runner.cc
@@ -0,0 +1,361 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark_runner.h"
+#include "benchmark/benchmark.h"
+#include "benchmark_api_internal.h"
+#include "internal_macros.h"
+
+#ifndef BENCHMARK_OS_WINDOWS
+#ifndef BENCHMARK_OS_FUCHSIA
+#include <sys/resource.h>
+#endif
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+
+#include <algorithm>
+#include <atomic>
+#include <condition_variable>
+#include <cstdio>
+#include <cstdlib>
+#include <fstream>
+#include <iostream>
+#include <memory>
+#include <string>
+#include <thread>
+#include <utility>
+
+#include "check.h"
+#include "colorprint.h"
+#include "commandlineflags.h"
+#include "complexity.h"
+#include "counter.h"
+#include "internal_macros.h"
+#include "log.h"
+#include "mutex.h"
+#include "re.h"
+#include "statistics.h"
+#include "string_util.h"
+#include "thread_manager.h"
+#include "thread_timer.h"
+
+namespace benchmark {
+
+namespace internal {
+
+MemoryManager* memory_manager = nullptr;
+
+namespace {
+
+static constexpr IterationCount kMaxIterations = 1000000000;
+
+BenchmarkReporter::Run CreateRunReport(
+ const benchmark::internal::BenchmarkInstance& b,
+ const internal::ThreadManager::Result& results,
+ IterationCount memory_iterations,
+ const MemoryManager::Result& memory_result, double seconds,
+ int64_t repetition_index) {
+ // Create report about this benchmark run.
+ BenchmarkReporter::Run report;
+
+ report.run_name = b.name;
+ report.error_occurred = results.has_error_;
+ report.error_message = results.error_message_;
+ report.report_label = results.report_label_;
+ // This is the total iterations across all threads.
+ report.iterations = results.iterations;
+ report.time_unit = b.time_unit;
+ report.threads = b.threads;
+ report.repetition_index = repetition_index;
+ report.repetitions = b.repetitions;
+
+ if (!report.error_occurred) {
+ if (b.use_manual_time) {
+ report.real_accumulated_time = results.manual_time_used;
+ } else {
+ report.real_accumulated_time = results.real_time_used;
+ }
+ report.cpu_accumulated_time = results.cpu_time_used;
+ report.complexity_n = results.complexity_n;
+ report.complexity = b.complexity;
+ report.complexity_lambda = b.complexity_lambda;
+ report.statistics = b.statistics;
+ report.counters = results.counters;
+
+ if (memory_iterations > 0) {
+ report.has_memory_result = true;
+ report.allocs_per_iter =
+ memory_iterations ? static_cast<double>(memory_result.num_allocs) /
+ memory_iterations
+ : 0;
+ report.max_bytes_used = memory_result.max_bytes_used;
+ }
+
+ internal::Finish(&report.counters, results.iterations, seconds, b.threads);
+ }
+ return report;
+}
+
+// Execute one thread of benchmark b for the specified number of iterations.
+// Adds the stats collected for the thread into *total.
+void RunInThread(const BenchmarkInstance* b, IterationCount iters,
+ int thread_id, ThreadManager* manager) {
+ internal::ThreadTimer timer(
+ b->measure_process_cpu_time
+ ? internal::ThreadTimer::CreateProcessCpuTime()
+ : internal::ThreadTimer::Create());
+ State st = b->Run(iters, thread_id, &timer, manager);
+ CHECK(st.iterations() >= st.max_iterations)
+ << "Benchmark returned before State::KeepRunning() returned false!";
+ {
+ MutexLock l(manager->GetBenchmarkMutex());
+ internal::ThreadManager::Result& results = manager->results;
+ results.iterations += st.iterations();
+ results.cpu_time_used += timer.cpu_time_used();
+ results.real_time_used += timer.real_time_used();
+ results.manual_time_used += timer.manual_time_used();
+ results.complexity_n += st.complexity_length_n();
+ internal::Increment(&results.counters, st.counters);
+ }
+ manager->NotifyThreadComplete();
+}
+
+class BenchmarkRunner {
+ public:
+ BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_,
+ std::vector<BenchmarkReporter::Run>* complexity_reports_)
+ : b(b_),
+ complexity_reports(*complexity_reports_),
+ min_time(!IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time),
+ repeats(b.repetitions != 0 ? b.repetitions
+ : FLAGS_benchmark_repetitions),
+ has_explicit_iteration_count(b.iterations != 0),
+ pool(b.threads - 1),
+ iters(has_explicit_iteration_count ? b.iterations : 1) {
+ run_results.display_report_aggregates_only =
+ (FLAGS_benchmark_report_aggregates_only ||
+ FLAGS_benchmark_display_aggregates_only);
+ run_results.file_report_aggregates_only =
+ FLAGS_benchmark_report_aggregates_only;
+ if (b.aggregation_report_mode != internal::ARM_Unspecified) {
+ run_results.display_report_aggregates_only =
+ (b.aggregation_report_mode &
+ internal::ARM_DisplayReportAggregatesOnly);
+ run_results.file_report_aggregates_only =
+ (b.aggregation_report_mode & internal::ARM_FileReportAggregatesOnly);
+ }
+
+ for (int repetition_num = 0; repetition_num < repeats; repetition_num++) {
+ DoOneRepetition(repetition_num);
+ }
+
+ // Calculate additional statistics
+ run_results.aggregates_only = ComputeStats(run_results.non_aggregates);
+
+ // Maybe calculate complexity report
+ if ((b.complexity != oNone) && b.last_benchmark_instance) {
+ auto additional_run_stats = ComputeBigO(complexity_reports);
+ run_results.aggregates_only.insert(run_results.aggregates_only.end(),
+ additional_run_stats.begin(),
+ additional_run_stats.end());
+ complexity_reports.clear();
+ }
+ }
+
+ RunResults&& get_results() { return std::move(run_results); }
+
+ private:
+ RunResults run_results;
+
+ const benchmark::internal::BenchmarkInstance& b;
+ std::vector<BenchmarkReporter::Run>& complexity_reports;
+
+ const double min_time;
+ const int repeats;
+ const bool has_explicit_iteration_count;
+
+ std::vector<std::thread> pool;
+
+ IterationCount iters; // preserved between repetitions!
+ // So only the first repetition has to find/calculate it,
+ // the other repetitions will just use that precomputed iteration count.
+
+ struct IterationResults {
+ internal::ThreadManager::Result results;
+ IterationCount iters;
+ double seconds;
+ };
+ IterationResults DoNIterations() {
+ VLOG(2) << "Running " << b.name.str() << " for " << iters << "\n";
+
+ std::unique_ptr<internal::ThreadManager> manager;
+ manager.reset(new internal::ThreadManager(b.threads));
+
+ // Run all but one thread in separate threads
+ for (std::size_t ti = 0; ti < pool.size(); ++ti) {
+ pool[ti] = std::thread(&RunInThread, &b, iters, static_cast<int>(ti + 1),
+ manager.get());
+ }
+ // And run one thread here directly.
+ // (If we were asked to run just one thread, we don't create new threads.)
+ // Yes, we need to do this here *after* we start the separate threads.
+ RunInThread(&b, iters, 0, manager.get());
+
+ // The main thread has finished. Now let's wait for the other threads.
+ manager->WaitForAllThreads();
+ for (std::thread& thread : pool) thread.join();
+
+ IterationResults i;
+ // Acquire the measurements/counters from the manager, UNDER THE LOCK!
+ {
+ MutexLock l(manager->GetBenchmarkMutex());
+ i.results = manager->results;
+ }
+
+ // And get rid of the manager.
+ manager.reset();
+
+ // Adjust real/manual time stats since they were reported per thread.
+ i.results.real_time_used /= b.threads;
+ i.results.manual_time_used /= b.threads;
+ // If we were measuring whole-process CPU usage, adjust the CPU time too.
+ if (b.measure_process_cpu_time) i.results.cpu_time_used /= b.threads;
+
+ VLOG(2) << "Ran in " << i.results.cpu_time_used << "/"
+ << i.results.real_time_used << "\n";
+
+ // So for how long were we running?
+ i.iters = iters;
+ // Base decisions off of real time if requested by this benchmark.
+ i.seconds = i.results.cpu_time_used;
+ if (b.use_manual_time) {
+ i.seconds = i.results.manual_time_used;
+ } else if (b.use_real_time) {
+ i.seconds = i.results.real_time_used;
+ }
+
+ return i;
+ }
+
+ IterationCount PredictNumItersNeeded(const IterationResults& i) const {
+ // See how much iterations should be increased by.
+ // Note: Avoid division by zero with max(seconds, 1ns).
+ double multiplier = min_time * 1.4 / std::max(i.seconds, 1e-9);
+ // If our last run was at least 10% of FLAGS_benchmark_min_time then we
+ // use the multiplier directly.
+ // Otherwise we use at most 10 times expansion.
+ // NOTE: When the last run was at least 10% of the min time the max
+ // expansion should be 14x.
+ bool is_significant = (i.seconds / min_time) > 0.1;
+ multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
+ if (multiplier <= 1.0) multiplier = 2.0;
+
+ // So what seems to be the sufficiently-large iteration count? Round up.
+ const IterationCount max_next_iters =
+ 0.5 + std::max(multiplier * i.iters, i.iters + 1.0);
+ // But we do have *some* sanity limits though..
+ const IterationCount next_iters = std::min(max_next_iters, kMaxIterations);
+
+ VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
+ return next_iters; // round up before conversion to integer.
+ }
+
+ bool ShouldReportIterationResults(const IterationResults& i) const {
+ // Determine if this run should be reported;
+ // Either it has run for a sufficient amount of time
+ // or because an error was reported.
+ return i.results.has_error_ ||
+ i.iters >= kMaxIterations || // Too many iterations already.
+ i.seconds >= min_time || // The elapsed time is large enough.
+ // CPU time is specified but the elapsed real time greatly exceeds
+ // the minimum time.
+ // Note that user provided timers are except from this sanity check.
+ ((i.results.real_time_used >= 5 * min_time) && !b.use_manual_time);
+ }
+
+ void DoOneRepetition(int64_t repetition_index) {
+ const bool is_the_first_repetition = repetition_index == 0;
+ IterationResults i;
+
+ // We *may* be gradually increasing the length (iteration count)
+ // of the benchmark until we decide the results are significant.
+ // And once we do, we report those last results and exit.
+ // Please do note that the if there are repetitions, the iteration count
+ // is *only* calculated for the *first* repetition, and other repetitions
+ // simply use that precomputed iteration count.
+ for (;;) {
+ i = DoNIterations();
+
+ // Do we consider the results to be significant?
+ // If we are doing repetitions, and the first repetition was already done,
+ // it has calculated the correct iteration time, so we have run that very
+ // iteration count just now. No need to calculate anything. Just report.
+ // Else, the normal rules apply.
+ const bool results_are_significant = !is_the_first_repetition ||
+ has_explicit_iteration_count ||
+ ShouldReportIterationResults(i);
+
+ if (results_are_significant) break; // Good, let's report them!
+
+ // Nope, bad iteration. Let's re-estimate the hopefully-sufficient
+ // iteration count, and run the benchmark again...
+
+ iters = PredictNumItersNeeded(i);
+ assert(iters > i.iters &&
+ "if we did more iterations than we want to do the next time, "
+ "then we should have accepted the current iteration run.");
+ }
+
+ // Oh, one last thing, we need to also produce the 'memory measurements'..
+ MemoryManager::Result memory_result;
+ IterationCount memory_iterations = 0;
+ if (memory_manager != nullptr) {
+ // Only run a few iterations to reduce the impact of one-time
+ // allocations in benchmarks that are not properly managed.
+ memory_iterations = std::min<IterationCount>(16, iters);
+ memory_manager->Start();
+ std::unique_ptr<internal::ThreadManager> manager;
+ manager.reset(new internal::ThreadManager(1));
+ RunInThread(&b, memory_iterations, 0, manager.get());
+ manager->WaitForAllThreads();
+ manager.reset();
+
+ memory_manager->Stop(&memory_result);
+ }
+
+ // Ok, now actualy report.
+ BenchmarkReporter::Run report =
+ CreateRunReport(b, i.results, memory_iterations, memory_result,
+ i.seconds, repetition_index);
+
+ if (!report.error_occurred && b.complexity != oNone)
+ complexity_reports.push_back(report);
+
+ run_results.non_aggregates.push_back(report);
+ }
+};
+
+} // end namespace
+
+RunResults RunBenchmark(
+ const benchmark::internal::BenchmarkInstance& b,
+ std::vector<BenchmarkReporter::Run>* complexity_reports) {
+ internal::BenchmarkRunner r(b, complexity_reports);
+ return r.get_results();
+}
+
+} // end namespace internal
+
+} // end namespace benchmark
diff --git a/src/third_party/benchmark/dist/src/benchmark_runner.h b/src/third_party/benchmark/dist/src/benchmark_runner.h
new file mode 100644
index 00000000000..96e8282a11a
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/benchmark_runner.h
@@ -0,0 +1,51 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef BENCHMARK_RUNNER_H_
+#define BENCHMARK_RUNNER_H_
+
+#include "benchmark_api_internal.h"
+#include "internal_macros.h"
+
+DECLARE_double(benchmark_min_time);
+
+DECLARE_int32(benchmark_repetitions);
+
+DECLARE_bool(benchmark_report_aggregates_only);
+
+DECLARE_bool(benchmark_display_aggregates_only);
+
+namespace benchmark {
+
+namespace internal {
+
+extern MemoryManager* memory_manager;
+
+struct RunResults {
+ std::vector<BenchmarkReporter::Run> non_aggregates;
+ std::vector<BenchmarkReporter::Run> aggregates_only;
+
+ bool display_report_aggregates_only = false;
+ bool file_report_aggregates_only = false;
+};
+
+RunResults RunBenchmark(
+ const benchmark::internal::BenchmarkInstance& b,
+ std::vector<BenchmarkReporter::Run>* complexity_reports);
+
+} // namespace internal
+
+} // end namespace benchmark
+
+#endif // BENCHMARK_RUNNER_H_
diff --git a/src/third_party/benchmark/dist/src/check.h b/src/third_party/benchmark/dist/src/check.h
new file mode 100644
index 00000000000..f5f8253f804
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/check.h
@@ -0,0 +1,82 @@
+#ifndef CHECK_H_
+#define CHECK_H_
+
+#include <cmath>
+#include <cstdlib>
+#include <ostream>
+
+#include "internal_macros.h"
+#include "log.h"
+
+namespace benchmark {
+namespace internal {
+
+typedef void(AbortHandlerT)();
+
+inline AbortHandlerT*& GetAbortHandler() {
+ static AbortHandlerT* handler = &std::abort;
+ return handler;
+}
+
+BENCHMARK_NORETURN inline void CallAbortHandler() {
+ GetAbortHandler()();
+ std::abort(); // fallback to enforce noreturn
+}
+
+// CheckHandler is the class constructed by failing CHECK macros. CheckHandler
+// will log information about the failures and abort when it is destructed.
+class CheckHandler {
+ public:
+ CheckHandler(const char* check, const char* file, const char* func, int line)
+ : log_(GetErrorLogInstance()) {
+ log_ << file << ":" << line << ": " << func << ": Check `" << check
+ << "' failed. ";
+ }
+
+ LogType& GetLog() { return log_; }
+
+ BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false) {
+ log_ << std::endl;
+ CallAbortHandler();
+ }
+
+ CheckHandler& operator=(const CheckHandler&) = delete;
+ CheckHandler(const CheckHandler&) = delete;
+ CheckHandler() = delete;
+
+ private:
+ LogType& log_;
+};
+
+} // end namespace internal
+} // end namespace benchmark
+
+// The CHECK macro returns a std::ostream object that can have extra information
+// written to it.
+#ifndef NDEBUG
+#define CHECK(b) \
+ (b ? ::benchmark::internal::GetNullLogInstance() \
+ : ::benchmark::internal::CheckHandler(#b, __FILE__, __func__, __LINE__) \
+ .GetLog())
+#else
+#define CHECK(b) ::benchmark::internal::GetNullLogInstance()
+#endif
+
+// clang-format off
+// preserve whitespacing between operators for alignment
+#define CHECK_EQ(a, b) CHECK((a) == (b))
+#define CHECK_NE(a, b) CHECK((a) != (b))
+#define CHECK_GE(a, b) CHECK((a) >= (b))
+#define CHECK_LE(a, b) CHECK((a) <= (b))
+#define CHECK_GT(a, b) CHECK((a) > (b))
+#define CHECK_LT(a, b) CHECK((a) < (b))
+
+#define CHECK_FLOAT_EQ(a, b, eps) CHECK(std::fabs((a) - (b)) < (eps))
+#define CHECK_FLOAT_NE(a, b, eps) CHECK(std::fabs((a) - (b)) >= (eps))
+#define CHECK_FLOAT_GE(a, b, eps) CHECK((a) - (b) > -(eps))
+#define CHECK_FLOAT_LE(a, b, eps) CHECK((b) - (a) > -(eps))
+#define CHECK_FLOAT_GT(a, b, eps) CHECK((a) - (b) > (eps))
+#define CHECK_FLOAT_LT(a, b, eps) CHECK((b) - (a) > (eps))
+//clang-format on
+
+#endif // CHECK_H_
diff --git a/src/third_party/benchmark/dist/src/colorprint.cc b/src/third_party/benchmark/dist/src/colorprint.cc
new file mode 100644
index 00000000000..fff6a98818b
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/colorprint.cc
@@ -0,0 +1,188 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "colorprint.h"
+
+#include <cstdarg>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <memory>
+#include <string>
+
+#include "check.h"
+#include "internal_macros.h"
+
+#ifdef BENCHMARK_OS_WINDOWS
+#include <windows.h>
+#include <io.h>
+#else
+#include <unistd.h>
+#endif // BENCHMARK_OS_WINDOWS
+
+namespace benchmark {
+namespace {
+#ifdef BENCHMARK_OS_WINDOWS
+typedef WORD PlatformColorCode;
+#else
+typedef const char* PlatformColorCode;
+#endif
+
+PlatformColorCode GetPlatformColorCode(LogColor color) {
+#ifdef BENCHMARK_OS_WINDOWS
+ switch (color) {
+ case COLOR_RED:
+ return FOREGROUND_RED;
+ case COLOR_GREEN:
+ return FOREGROUND_GREEN;
+ case COLOR_YELLOW:
+ return FOREGROUND_RED | FOREGROUND_GREEN;
+ case COLOR_BLUE:
+ return FOREGROUND_BLUE;
+ case COLOR_MAGENTA:
+ return FOREGROUND_BLUE | FOREGROUND_RED;
+ case COLOR_CYAN:
+ return FOREGROUND_BLUE | FOREGROUND_GREEN;
+ case COLOR_WHITE: // fall through to default
+ default:
+ return 0;
+ }
+#else
+ switch (color) {
+ case COLOR_RED:
+ return "1";
+ case COLOR_GREEN:
+ return "2";
+ case COLOR_YELLOW:
+ return "3";
+ case COLOR_BLUE:
+ return "4";
+ case COLOR_MAGENTA:
+ return "5";
+ case COLOR_CYAN:
+ return "6";
+ case COLOR_WHITE:
+ return "7";
+ default:
+ return nullptr;
+ };
+#endif
+}
+
+} // end namespace
+
+std::string FormatString(const char* msg, va_list args) {
+ // we might need a second shot at this, so pre-emptivly make a copy
+ va_list args_cp;
+ va_copy(args_cp, args);
+
+ std::size_t size = 256;
+ char local_buff[256];
+ auto ret = vsnprintf(local_buff, size, msg, args_cp);
+
+ va_end(args_cp);
+
+ // currently there is no error handling for failure, so this is hack.
+ CHECK(ret >= 0);
+
+ if (ret == 0) // handle empty expansion
+ return {};
+ else if (static_cast<size_t>(ret) < size)
+ return local_buff;
+ else {
+ // we did not provide a long enough buffer on our first attempt.
+ size = (size_t)ret + 1; // + 1 for the null byte
+ std::unique_ptr<char[]> buff(new char[size]);
+ ret = vsnprintf(buff.get(), size, msg, args);
+ CHECK(ret > 0 && ((size_t)ret) < size);
+ return buff.get();
+ }
+}
+
+std::string FormatString(const char* msg, ...) {
+ va_list args;
+ va_start(args, msg);
+ auto tmp = FormatString(msg, args);
+ va_end(args);
+ return tmp;
+}
+
+void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ ColorPrintf(out, color, fmt, args);
+ va_end(args);
+}
+
+void ColorPrintf(std::ostream& out, LogColor color, const char* fmt,
+ va_list args) {
+#ifdef BENCHMARK_OS_WINDOWS
+ ((void)out); // suppress unused warning
+
+ const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE);
+
+ // Gets the current text color.
+ CONSOLE_SCREEN_BUFFER_INFO buffer_info;
+ GetConsoleScreenBufferInfo(stdout_handle, &buffer_info);
+ const WORD old_color_attrs = buffer_info.wAttributes;
+
+ // We need to flush the stream buffers into the console before each
+ // SetConsoleTextAttribute call lest it affect the text that is already
+ // printed but has not yet reached the console.
+ fflush(stdout);
+ SetConsoleTextAttribute(stdout_handle,
+ GetPlatformColorCode(color) | FOREGROUND_INTENSITY);
+ vprintf(fmt, args);
+
+ fflush(stdout);
+ // Restores the text color.
+ SetConsoleTextAttribute(stdout_handle, old_color_attrs);
+#else
+ const char* color_code = GetPlatformColorCode(color);
+ if (color_code) out << FormatString("\033[0;3%sm", color_code);
+ out << FormatString(fmt, args) << "\033[m";
+#endif
+}
+
+bool IsColorTerminal() {
+#if BENCHMARK_OS_WINDOWS
+ // On Windows the TERM variable is usually not set, but the
+ // console there does support colors.
+ return 0 != _isatty(_fileno(stdout));
+#else
+ // On non-Windows platforms, we rely on the TERM variable. This list of
+ // supported TERM values is copied from Google Test:
+ // <https://github.com/google/googletest/blob/master/googletest/src/gtest.cc#L2925>.
+ const char* const SUPPORTED_TERM_VALUES[] = {
+ "xterm", "xterm-color", "xterm-256color",
+ "screen", "screen-256color", "tmux",
+ "tmux-256color", "rxvt-unicode", "rxvt-unicode-256color",
+ "linux", "cygwin",
+ };
+
+ const char* const term = getenv("TERM");
+
+ bool term_supports_color = false;
+ for (const char* candidate : SUPPORTED_TERM_VALUES) {
+ if (term && 0 == strcmp(term, candidate)) {
+ term_supports_color = true;
+ break;
+ }
+ }
+
+ return 0 != isatty(fileno(stdout)) && term_supports_color;
+#endif // BENCHMARK_OS_WINDOWS
+}
+
+} // end namespace benchmark
diff --git a/src/third_party/benchmark/dist/src/colorprint.h b/src/third_party/benchmark/dist/src/colorprint.h
new file mode 100644
index 00000000000..9f6fab9b342
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/colorprint.h
@@ -0,0 +1,33 @@
+#ifndef BENCHMARK_COLORPRINT_H_
+#define BENCHMARK_COLORPRINT_H_
+
+#include <cstdarg>
+#include <iostream>
+#include <string>
+
+namespace benchmark {
+enum LogColor {
+ COLOR_DEFAULT,
+ COLOR_RED,
+ COLOR_GREEN,
+ COLOR_YELLOW,
+ COLOR_BLUE,
+ COLOR_MAGENTA,
+ COLOR_CYAN,
+ COLOR_WHITE
+};
+
+std::string FormatString(const char* msg, va_list args);
+std::string FormatString(const char* msg, ...);
+
+void ColorPrintf(std::ostream& out, LogColor color, const char* fmt,
+ va_list args);
+void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...);
+
+// Returns true if stdout appears to be a terminal that supports colored
+// output, false otherwise.
+bool IsColorTerminal();
+
+} // end namespace benchmark
+
+#endif // BENCHMARK_COLORPRINT_H_
diff --git a/src/third_party/benchmark/dist/src/commandlineflags.cc b/src/third_party/benchmark/dist/src/commandlineflags.cc
new file mode 100644
index 00000000000..6bd65c5ae70
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/commandlineflags.cc
@@ -0,0 +1,222 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "commandlineflags.h"
+
+#include <cctype>
+#include <cstdlib>
+#include <cstring>
+#include <iostream>
+#include <limits>
+
+namespace benchmark {
+namespace {
+
+// Parses 'str' for a 32-bit signed integer. If successful, writes
+// the result to *value and returns true; otherwise leaves *value
+// unchanged and returns false.
+bool ParseInt32(const std::string& src_text, const char* str, int32_t* value) {
+ // Parses the environment variable as a decimal integer.
+ char* end = nullptr;
+ const long long_value = strtol(str, &end, 10); // NOLINT
+
+ // Has strtol() consumed all characters in the string?
+ if (*end != '\0') {
+ // No - an invalid character was encountered.
+ std::cerr << src_text << " is expected to be a 32-bit integer, "
+ << "but actually has value \"" << str << "\".\n";
+ return false;
+ }
+
+ // Is the parsed value in the range of an Int32?
+ const int32_t result = static_cast<int32_t>(long_value);
+ if (long_value == std::numeric_limits<long>::max() ||
+ long_value == std::numeric_limits<long>::min() ||
+ // The parsed value overflows as a long. (strtol() returns
+ // LONG_MAX or LONG_MIN when the input overflows.)
+ result != long_value
+ // The parsed value overflows as an Int32.
+ ) {
+ std::cerr << src_text << " is expected to be a 32-bit integer, "
+ << "but actually has value \"" << str << "\", "
+ << "which overflows.\n";
+ return false;
+ }
+
+ *value = result;
+ return true;
+}
+
+// Parses 'str' for a double. If successful, writes the result to *value and
+// returns true; otherwise leaves *value unchanged and returns false.
+bool ParseDouble(const std::string& src_text, const char* str, double* value) {
+ // Parses the environment variable as a decimal integer.
+ char* end = nullptr;
+ const double double_value = strtod(str, &end); // NOLINT
+
+ // Has strtol() consumed all characters in the string?
+ if (*end != '\0') {
+ // No - an invalid character was encountered.
+ std::cerr << src_text << " is expected to be a double, "
+ << "but actually has value \"" << str << "\".\n";
+ return false;
+ }
+
+ *value = double_value;
+ return true;
+}
+
+// Returns the name of the environment variable corresponding to the
+// given flag. For example, FlagToEnvVar("foo") will return
+// "BENCHMARK_FOO" in the open-source version.
+static std::string FlagToEnvVar(const char* flag) {
+ const std::string flag_str(flag);
+
+ std::string env_var;
+ for (size_t i = 0; i != flag_str.length(); ++i)
+ env_var += static_cast<char>(::toupper(flag_str.c_str()[i]));
+
+ return "BENCHMARK_" + env_var;
+}
+
+} // namespace
+
+// Reads and returns the Boolean environment variable corresponding to
+// the given flag; if it's not set, returns default_value.
+//
+// The value is considered true iff it's not "0".
+bool BoolFromEnv(const char* flag, bool default_value) {
+ const std::string env_var = FlagToEnvVar(flag);
+ const char* const string_value = getenv(env_var.c_str());
+ return string_value == nullptr ? default_value
+ : strcmp(string_value, "0") != 0;
+}
+
+// Reads and returns a 32-bit integer stored in the environment
+// variable corresponding to the given flag; if it isn't set or
+// doesn't represent a valid 32-bit integer, returns default_value.
+int32_t Int32FromEnv(const char* flag, int32_t default_value) {
+ const std::string env_var = FlagToEnvVar(flag);
+ const char* const string_value = getenv(env_var.c_str());
+ if (string_value == nullptr) {
+ // The environment variable is not set.
+ return default_value;
+ }
+
+ int32_t result = default_value;
+ if (!ParseInt32(std::string("Environment variable ") + env_var, string_value,
+ &result)) {
+ std::cout << "The default value " << default_value << " is used.\n";
+ return default_value;
+ }
+
+ return result;
+}
+
+// Reads and returns the string environment variable corresponding to
+// the given flag; if it's not set, returns default_value.
+const char* StringFromEnv(const char* flag, const char* default_value) {
+ const std::string env_var = FlagToEnvVar(flag);
+ const char* const value = getenv(env_var.c_str());
+ return value == nullptr ? default_value : value;
+}
+
+// Parses a string as a command line flag. The string should have
+// the format "--flag=value". When def_optional is true, the "=value"
+// part can be omitted.
+//
+// Returns the value of the flag, or nullptr if the parsing failed.
+const char* ParseFlagValue(const char* str, const char* flag,
+ bool def_optional) {
+ // str and flag must not be nullptr.
+ if (str == nullptr || flag == nullptr) return nullptr;
+
+ // The flag must start with "--".
+ const std::string flag_str = std::string("--") + std::string(flag);
+ const size_t flag_len = flag_str.length();
+ if (strncmp(str, flag_str.c_str(), flag_len) != 0) return nullptr;
+
+ // Skips the flag name.
+ const char* flag_end = str + flag_len;
+
+ // When def_optional is true, it's OK to not have a "=value" part.
+ if (def_optional && (flag_end[0] == '\0')) return flag_end;
+
+ // If def_optional is true and there are more characters after the
+ // flag name, or if def_optional is false, there must be a '=' after
+ // the flag name.
+ if (flag_end[0] != '=') return nullptr;
+
+ // Returns the string after "=".
+ return flag_end + 1;
+}
+
+bool ParseBoolFlag(const char* str, const char* flag, bool* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseFlagValue(str, flag, true);
+
+ // Aborts if the parsing failed.
+ if (value_str == nullptr) return false;
+
+ // Converts the string value to a bool.
+ *value = IsTruthyFlagValue(value_str);
+ return true;
+}
+
+bool ParseInt32Flag(const char* str, const char* flag, int32_t* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseFlagValue(str, flag, false);
+
+ // Aborts if the parsing failed.
+ if (value_str == nullptr) return false;
+
+ // Sets *value to the value of the flag.
+ return ParseInt32(std::string("The value of flag --") + flag, value_str,
+ value);
+}
+
+bool ParseDoubleFlag(const char* str, const char* flag, double* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseFlagValue(str, flag, false);
+
+ // Aborts if the parsing failed.
+ if (value_str == nullptr) return false;
+
+ // Sets *value to the value of the flag.
+ return ParseDouble(std::string("The value of flag --") + flag, value_str,
+ value);
+}
+
+bool ParseStringFlag(const char* str, const char* flag, std::string* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseFlagValue(str, flag, false);
+
+ // Aborts if the parsing failed.
+ if (value_str == nullptr) return false;
+
+ *value = value_str;
+ return true;
+}
+
+bool IsFlag(const char* str, const char* flag) {
+ return (ParseFlagValue(str, flag, true) != nullptr);
+}
+
+bool IsTruthyFlagValue(const std::string& value) {
+ if (value.empty()) return true;
+ char ch = value[0];
+ return isalnum(ch) &&
+ !(ch == '0' || ch == 'f' || ch == 'F' || ch == 'n' || ch == 'N');
+}
+} // end namespace benchmark
diff --git a/src/third_party/benchmark/dist/src/commandlineflags.h b/src/third_party/benchmark/dist/src/commandlineflags.h
new file mode 100644
index 00000000000..5eaea82a59b
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/commandlineflags.h
@@ -0,0 +1,73 @@
+#ifndef BENCHMARK_COMMANDLINEFLAGS_H_
+#define BENCHMARK_COMMANDLINEFLAGS_H_
+
+#include <cstdint>
+#include <string>
+
+// Macro for referencing flags.
+#define FLAG(name) FLAGS_##name
+
+// Macros for declaring flags.
+#define DECLARE_bool(name) extern bool FLAG(name)
+#define DECLARE_int32(name) extern int32_t FLAG(name)
+#define DECLARE_int64(name) extern int64_t FLAG(name)
+#define DECLARE_double(name) extern double FLAG(name)
+#define DECLARE_string(name) extern std::string FLAG(name)
+
+// Macros for defining flags.
+#define DEFINE_bool(name, default_val, doc) bool FLAG(name) = (default_val)
+#define DEFINE_int32(name, default_val, doc) int32_t FLAG(name) = (default_val)
+#define DEFINE_int64(name, default_val, doc) int64_t FLAG(name) = (default_val)
+#define DEFINE_double(name, default_val, doc) double FLAG(name) = (default_val)
+#define DEFINE_string(name, default_val, doc) \
+ std::string FLAG(name) = (default_val)
+
+namespace benchmark {
+// Parses a bool/Int32/string from the environment variable
+// corresponding to the given Google Test flag.
+bool BoolFromEnv(const char* flag, bool default_val);
+int32_t Int32FromEnv(const char* flag, int32_t default_val);
+const char* StringFromEnv(const char* flag, const char* default_val);
+
+// Parses a string for a bool flag, in the form of either
+// "--flag=value" or "--flag".
+//
+// In the former case, the value is taken as true if it passes IsTruthyValue().
+//
+// In the latter case, the value is taken as true.
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+bool ParseBoolFlag(const char* str, const char* flag, bool* value);
+
+// Parses a string for an Int32 flag, in the form of
+// "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+bool ParseInt32Flag(const char* str, const char* flag, int32_t* value);
+
+// Parses a string for a Double flag, in the form of
+// "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+bool ParseDoubleFlag(const char* str, const char* flag, double* value);
+
+// Parses a string for a string flag, in the form of
+// "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+bool ParseStringFlag(const char* str, const char* flag, std::string* value);
+
+// Returns true if the string matches the flag.
+bool IsFlag(const char* str, const char* flag);
+
+// Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or
+// some non-alphanumeric character. As a special case, also returns true if
+// value is the empty string.
+bool IsTruthyFlagValue(const std::string& value);
+} // end namespace benchmark
+
+#endif // BENCHMARK_COMMANDLINEFLAGS_H_
diff --git a/src/third_party/benchmark/dist/src/complexity.cc b/src/third_party/benchmark/dist/src/complexity.cc
new file mode 100644
index 00000000000..aeed67f0c70
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/complexity.cc
@@ -0,0 +1,238 @@
+// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Source project : https://github.com/ismaelJimenez/cpp.leastsq
+// Adapted to be used with google benchmark
+
+#include "benchmark/benchmark.h"
+
+#include <algorithm>
+#include <cmath>
+#include "check.h"
+#include "complexity.h"
+
+namespace benchmark {
+
+// Internal function to calculate the different scalability forms
+BigOFunc* FittingCurve(BigO complexity) {
+ static const double kLog2E = 1.44269504088896340736;
+ switch (complexity) {
+ case oN:
+ return [](IterationCount n) -> double { return static_cast<double>(n); };
+ case oNSquared:
+ return [](IterationCount n) -> double { return std::pow(n, 2); };
+ case oNCubed:
+ return [](IterationCount n) -> double { return std::pow(n, 3); };
+ case oLogN:
+ /* Note: can't use log2 because Android's GNU STL lacks it */
+ return
+ [](IterationCount n) { return kLog2E * log(static_cast<double>(n)); };
+ case oNLogN:
+ /* Note: can't use log2 because Android's GNU STL lacks it */
+ return [](IterationCount n) {
+ return kLog2E * n * log(static_cast<double>(n));
+ };
+ case o1:
+ default:
+ return [](IterationCount) { return 1.0; };
+ }
+}
+
+// Function to return an string for the calculated complexity
+std::string GetBigOString(BigO complexity) {
+ switch (complexity) {
+ case oN:
+ return "N";
+ case oNSquared:
+ return "N^2";
+ case oNCubed:
+ return "N^3";
+ case oLogN:
+ return "lgN";
+ case oNLogN:
+ return "NlgN";
+ case o1:
+ return "(1)";
+ default:
+ return "f(N)";
+ }
+}
+
+// Find the coefficient for the high-order term in the running time, by
+// minimizing the sum of squares of relative error, for the fitting curve
+// given by the lambda expression.
+// - n : Vector containing the size of the benchmark tests.
+// - time : Vector containing the times for the benchmark tests.
+// - fitting_curve : lambda expression (e.g. [](int64_t n) {return n; };).
+
+// For a deeper explanation on the algorithm logic, please refer to
+// https://en.wikipedia.org/wiki/Least_squares#Least_squares,_regression_analysis_and_statistics
+
+LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
+ const std::vector<double>& time,
+ BigOFunc* fitting_curve) {
+ double sigma_gn = 0.0;
+ double sigma_gn_squared = 0.0;
+ double sigma_time = 0.0;
+ double sigma_time_gn = 0.0;
+
+ // Calculate least square fitting parameter
+ for (size_t i = 0; i < n.size(); ++i) {
+ double gn_i = fitting_curve(n[i]);
+ sigma_gn += gn_i;
+ sigma_gn_squared += gn_i * gn_i;
+ sigma_time += time[i];
+ sigma_time_gn += time[i] * gn_i;
+ }
+
+ LeastSq result;
+ result.complexity = oLambda;
+
+ // Calculate complexity.
+ result.coef = sigma_time_gn / sigma_gn_squared;
+
+ // Calculate RMS
+ double rms = 0.0;
+ for (size_t i = 0; i < n.size(); ++i) {
+ double fit = result.coef * fitting_curve(n[i]);
+ rms += pow((time[i] - fit), 2);
+ }
+
+ // Normalized RMS by the mean of the observed values
+ double mean = sigma_time / n.size();
+ result.rms = sqrt(rms / n.size()) / mean;
+
+ return result;
+}
+
+// Find the coefficient for the high-order term in the running time, by
+// minimizing the sum of squares of relative error.
+// - n : Vector containing the size of the benchmark tests.
+// - time : Vector containing the times for the benchmark tests.
+// - complexity : If different than oAuto, the fitting curve will stick to
+// this one. If it is oAuto, it will be calculated the best
+// fitting curve.
+LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
+ const std::vector<double>& time, const BigO complexity) {
+ CHECK_EQ(n.size(), time.size());
+ CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two
+ // benchmark runs are given
+ CHECK_NE(complexity, oNone);
+
+ LeastSq best_fit;
+
+ if (complexity == oAuto) {
+ std::vector<BigO> fit_curves = {oLogN, oN, oNLogN, oNSquared, oNCubed};
+
+ // Take o1 as default best fitting curve
+ best_fit = MinimalLeastSq(n, time, FittingCurve(o1));
+ best_fit.complexity = o1;
+
+ // Compute all possible fitting curves and stick to the best one
+ for (const auto& fit : fit_curves) {
+ LeastSq current_fit = MinimalLeastSq(n, time, FittingCurve(fit));
+ if (current_fit.rms < best_fit.rms) {
+ best_fit = current_fit;
+ best_fit.complexity = fit;
+ }
+ }
+ } else {
+ best_fit = MinimalLeastSq(n, time, FittingCurve(complexity));
+ best_fit.complexity = complexity;
+ }
+
+ return best_fit;
+}
+
+std::vector<BenchmarkReporter::Run> ComputeBigO(
+ const std::vector<BenchmarkReporter::Run>& reports) {
+ typedef BenchmarkReporter::Run Run;
+ std::vector<Run> results;
+
+ if (reports.size() < 2) return results;
+
+ // Accumulators.
+ std::vector<int64_t> n;
+ std::vector<double> real_time;
+ std::vector<double> cpu_time;
+
+ // Populate the accumulators.
+ for (const Run& run : reports) {
+ CHECK_GT(run.complexity_n, 0) << "Did you forget to call SetComplexityN?";
+ n.push_back(run.complexity_n);
+ real_time.push_back(run.real_accumulated_time / run.iterations);
+ cpu_time.push_back(run.cpu_accumulated_time / run.iterations);
+ }
+
+ LeastSq result_cpu;
+ LeastSq result_real;
+
+ if (reports[0].complexity == oLambda) {
+ result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda);
+ result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda);
+ } else {
+ result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity);
+ result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
+ }
+
+ // Drop the 'args' when reporting complexity.
+ auto run_name = reports[0].run_name;
+ run_name.args.clear();
+
+ // Get the data from the accumulator to BenchmarkReporter::Run's.
+ Run big_o;
+ big_o.run_name = run_name;
+ big_o.run_type = BenchmarkReporter::Run::RT_Aggregate;
+ big_o.repetitions = reports[0].repetitions;
+ big_o.repetition_index = Run::no_repetition_index;
+ big_o.threads = reports[0].threads;
+ big_o.aggregate_name = "BigO";
+ big_o.report_label = reports[0].report_label;
+ big_o.iterations = 0;
+ big_o.real_accumulated_time = result_real.coef;
+ big_o.cpu_accumulated_time = result_cpu.coef;
+ big_o.report_big_o = true;
+ big_o.complexity = result_cpu.complexity;
+
+ // All the time results are reported after being multiplied by the
+ // time unit multiplier. But since RMS is a relative quantity it
+ // should not be multiplied at all. So, here, we _divide_ it by the
+ // multiplier so that when it is multiplied later the result is the
+ // correct one.
+ double multiplier = GetTimeUnitMultiplier(reports[0].time_unit);
+
+ // Only add label to mean/stddev if it is same for all runs
+ Run rms;
+ rms.run_name = run_name;
+ rms.run_type = BenchmarkReporter::Run::RT_Aggregate;
+ rms.aggregate_name = "RMS";
+ rms.report_label = big_o.report_label;
+ rms.iterations = 0;
+ rms.repetition_index = Run::no_repetition_index;
+ rms.repetitions = reports[0].repetitions;
+ rms.threads = reports[0].threads;
+ rms.real_accumulated_time = result_real.rms / multiplier;
+ rms.cpu_accumulated_time = result_cpu.rms / multiplier;
+ rms.report_rms = true;
+ rms.complexity = result_cpu.complexity;
+ // don't forget to keep the time unit, or we won't be able to
+ // recover the correct value.
+ rms.time_unit = reports[0].time_unit;
+
+ results.push_back(big_o);
+ results.push_back(rms);
+ return results;
+}
+
+} // end namespace benchmark
diff --git a/src/third_party/benchmark/dist/src/complexity.h b/src/third_party/benchmark/dist/src/complexity.h
new file mode 100644
index 00000000000..df29b48d29b
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/complexity.h
@@ -0,0 +1,55 @@
+// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Source project : https://github.com/ismaelJimenez/cpp.leastsq
+// Adapted to be used with google benchmark
+
+#ifndef COMPLEXITY_H_
+#define COMPLEXITY_H_
+
+#include <string>
+#include <vector>
+
+#include "benchmark/benchmark.h"
+
+namespace benchmark {
+
+// Return a vector containing the bigO and RMS information for the specified
+// list of reports. If 'reports.size() < 2' an empty vector is returned.
+std::vector<BenchmarkReporter::Run> ComputeBigO(
+ const std::vector<BenchmarkReporter::Run>& reports);
+
+// This data structure will contain the result returned by MinimalLeastSq
+// - coef : Estimated coeficient for the high-order term as
+// interpolated from data.
+// - rms : Normalized Root Mean Squared Error.
+// - complexity : Scalability form (e.g. oN, oNLogN). In case a scalability
+// form has been provided to MinimalLeastSq this will return
+// the same value. In case BigO::oAuto has been selected, this
+// parameter will return the best fitting curve detected.
+
+struct LeastSq {
+ LeastSq() : coef(0.0), rms(0.0), complexity(oNone) {}
+
+ double coef;
+ double rms;
+ BigO complexity;
+};
+
+// Function to return an string for the calculated complexity
+std::string GetBigOString(BigO complexity);
+
+} // end namespace benchmark
+
+#endif // COMPLEXITY_H_
diff --git a/src/third_party/benchmark/dist/src/console_reporter.cc b/src/third_party/benchmark/dist/src/console_reporter.cc
new file mode 100644
index 00000000000..cc8ae276f6b
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/console_reporter.cc
@@ -0,0 +1,179 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark/benchmark.h"
+#include "complexity.h"
+#include "counter.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <cstdio>
+#include <iostream>
+#include <string>
+#include <tuple>
+#include <vector>
+
+#include "check.h"
+#include "colorprint.h"
+#include "commandlineflags.h"
+#include "internal_macros.h"
+#include "string_util.h"
+#include "timers.h"
+
+namespace benchmark {
+
+bool ConsoleReporter::ReportContext(const Context& context) {
+ name_field_width_ = context.name_field_width;
+ printed_header_ = false;
+ prev_counters_.clear();
+
+ PrintBasicContext(&GetErrorStream(), context);
+
+#ifdef BENCHMARK_OS_WINDOWS
+ if ((output_options_ & OO_Color) && &std::cout != &GetOutputStream()) {
+ GetErrorStream()
+ << "Color printing is only supported for stdout on windows."
+ " Disabling color printing\n";
+ output_options_ = static_cast< OutputOptions >(output_options_ & ~OO_Color);
+ }
+#endif
+
+ return true;
+}
+
+void ConsoleReporter::PrintHeader(const Run& run) {
+ std::string str = FormatString("%-*s %13s %15s %12s", static_cast<int>(name_field_width_),
+ "Benchmark", "Time", "CPU", "Iterations");
+ if(!run.counters.empty()) {
+ if(output_options_ & OO_Tabular) {
+ for(auto const& c : run.counters) {
+ str += FormatString(" %10s", c.first.c_str());
+ }
+ } else {
+ str += " UserCounters...";
+ }
+ }
+ std::string line = std::string(str.length(), '-');
+ GetOutputStream() << line << "\n" << str << "\n" << line << "\n";
+}
+
+void ConsoleReporter::ReportRuns(const std::vector<Run>& reports) {
+ for (const auto& run : reports) {
+ // print the header:
+ // --- if none was printed yet
+ bool print_header = !printed_header_;
+ // --- or if the format is tabular and this run
+ // has different fields from the prev header
+ print_header |= (output_options_ & OO_Tabular) &&
+ (!internal::SameNames(run.counters, prev_counters_));
+ if (print_header) {
+ printed_header_ = true;
+ prev_counters_ = run.counters;
+ PrintHeader(run);
+ }
+ // As an alternative to printing the headers like this, we could sort
+ // the benchmarks by header and then print. But this would require
+ // waiting for the full results before printing, or printing twice.
+ PrintRunData(run);
+ }
+}
+
+static void IgnoreColorPrint(std::ostream& out, LogColor, const char* fmt,
+ ...) {
+ va_list args;
+ va_start(args, fmt);
+ out << FormatString(fmt, args);
+ va_end(args);
+}
+
+
+static std::string FormatTime(double time) {
+ // Align decimal places...
+ if (time < 1.0) {
+ return FormatString("%10.3f", time);
+ }
+ if (time < 10.0) {
+ return FormatString("%10.2f", time);
+ }
+ if (time < 100.0) {
+ return FormatString("%10.1f", time);
+ }
+ return FormatString("%10.0f", time);
+}
+
+void ConsoleReporter::PrintRunData(const Run& result) {
+ typedef void(PrinterFn)(std::ostream&, LogColor, const char*, ...);
+ auto& Out = GetOutputStream();
+ PrinterFn* printer = (output_options_ & OO_Color) ?
+ (PrinterFn*)ColorPrintf : IgnoreColorPrint;
+ auto name_color =
+ (result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN;
+ printer(Out, name_color, "%-*s ", name_field_width_,
+ result.benchmark_name().c_str());
+
+ if (result.error_occurred) {
+ printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'",
+ result.error_message.c_str());
+ printer(Out, COLOR_DEFAULT, "\n");
+ return;
+ }
+
+ const double real_time = result.GetAdjustedRealTime();
+ const double cpu_time = result.GetAdjustedCPUTime();
+ const std::string real_time_str = FormatTime(real_time);
+ const std::string cpu_time_str = FormatTime(cpu_time);
+
+
+ if (result.report_big_o) {
+ std::string big_o = GetBigOString(result.complexity);
+ printer(Out, COLOR_YELLOW, "%10.2f %-4s %10.2f %-4s ", real_time, big_o.c_str(),
+ cpu_time, big_o.c_str());
+ } else if (result.report_rms) {
+ printer(Out, COLOR_YELLOW, "%10.0f %-4s %10.0f %-4s ", real_time * 100, "%",
+ cpu_time * 100, "%");
+ } else {
+ const char* timeLabel = GetTimeUnitString(result.time_unit);
+ printer(Out, COLOR_YELLOW, "%s %-4s %s %-4s ", real_time_str.c_str(), timeLabel,
+ cpu_time_str.c_str(), timeLabel);
+ }
+
+ if (!result.report_big_o && !result.report_rms) {
+ printer(Out, COLOR_CYAN, "%10lld", result.iterations);
+ }
+
+ for (auto& c : result.counters) {
+ const std::size_t cNameLen = std::max(std::string::size_type(10),
+ c.first.length());
+ auto const& s = HumanReadableNumber(c.second.value, c.second.oneK);
+ if (output_options_ & OO_Tabular) {
+ if (c.second.flags & Counter::kIsRate) {
+ printer(Out, COLOR_DEFAULT, " %*s/s", cNameLen - 2, s.c_str());
+ } else {
+ printer(Out, COLOR_DEFAULT, " %*s", cNameLen, s.c_str());
+ }
+ } else {
+ const char* unit = (c.second.flags & Counter::kIsRate) ? "/s" : "";
+ printer(Out, COLOR_DEFAULT, " %s=%s%s", c.first.c_str(), s.c_str(),
+ unit);
+ }
+ }
+
+ if (!result.report_label.empty()) {
+ printer(Out, COLOR_DEFAULT, " %s", result.report_label.c_str());
+ }
+
+ printer(Out, COLOR_DEFAULT, "\n");
+}
+
+} // end namespace benchmark
diff --git a/src/third_party/benchmark/dist/src/counter.cc b/src/third_party/benchmark/dist/src/counter.cc
new file mode 100644
index 00000000000..c248ea110bc
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/counter.cc
@@ -0,0 +1,76 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "counter.h"
+
+namespace benchmark {
+namespace internal {
+
+double Finish(Counter const& c, IterationCount iterations, double cpu_time,
+ double num_threads) {
+ double v = c.value;
+ if (c.flags & Counter::kIsRate) {
+ v /= cpu_time;
+ }
+ if (c.flags & Counter::kAvgThreads) {
+ v /= num_threads;
+ }
+ if (c.flags & Counter::kIsIterationInvariant) {
+ v *= iterations;
+ }
+ if (c.flags & Counter::kAvgIterations) {
+ v /= iterations;
+ }
+ return v;
+}
+
+void Finish(UserCounters* l, IterationCount iterations, double cpu_time,
+ double num_threads) {
+ for (auto& c : *l) {
+ c.second.value = Finish(c.second, iterations, cpu_time, num_threads);
+ }
+}
+
+void Increment(UserCounters* l, UserCounters const& r) {
+ // add counters present in both or just in *l
+ for (auto& c : *l) {
+ auto it = r.find(c.first);
+ if (it != r.end()) {
+ c.second.value = c.second + it->second;
+ }
+ }
+ // add counters present in r, but not in *l
+ for (auto const& tc : r) {
+ auto it = l->find(tc.first);
+ if (it == l->end()) {
+ (*l)[tc.first] = tc.second;
+ }
+ }
+}
+
+bool SameNames(UserCounters const& l, UserCounters const& r) {
+ if (&l == &r) return true;
+ if (l.size() != r.size()) {
+ return false;
+ }
+ for (auto const& c : l) {
+ if (r.find(c.first) == r.end()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+} // end namespace internal
+} // end namespace benchmark
diff --git a/src/third_party/benchmark/dist/src/counter.h b/src/third_party/benchmark/dist/src/counter.h
new file mode 100644
index 00000000000..1ad46d4940e
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/counter.h
@@ -0,0 +1,27 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark/benchmark.h"
+
+namespace benchmark {
+
+// these counter-related functions are hidden to reduce API surface.
+namespace internal {
+void Finish(UserCounters* l, IterationCount iterations, double time,
+ double num_threads);
+void Increment(UserCounters* l, UserCounters const& r);
+bool SameNames(UserCounters const& l, UserCounters const& r);
+} // end namespace internal
+
+} // end namespace benchmark
diff --git a/src/third_party/benchmark/dist/src/csv_reporter.cc b/src/third_party/benchmark/dist/src/csv_reporter.cc
new file mode 100644
index 00000000000..af2c18fc8a6
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/csv_reporter.cc
@@ -0,0 +1,154 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark/benchmark.h"
+#include "complexity.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <iostream>
+#include <string>
+#include <tuple>
+#include <vector>
+
+#include "check.h"
+#include "string_util.h"
+#include "timers.h"
+
+// File format reference: http://edoceo.com/utilitas/csv-file-format.
+
+namespace benchmark {
+
+namespace {
+std::vector<std::string> elements = {
+ "name", "iterations", "real_time", "cpu_time",
+ "time_unit", "bytes_per_second", "items_per_second", "label",
+ "error_occurred", "error_message"};
+} // namespace
+
+std::string CsvEscape(const std::string & s) {
+ std::string tmp;
+ tmp.reserve(s.size() + 2);
+ for (char c : s) {
+ switch (c) {
+ case '"' : tmp += "\"\""; break;
+ default : tmp += c; break;
+ }
+ }
+ return '"' + tmp + '"';
+}
+
+bool CSVReporter::ReportContext(const Context& context) {
+ PrintBasicContext(&GetErrorStream(), context);
+ return true;
+}
+
+void CSVReporter::ReportRuns(const std::vector<Run>& reports) {
+ std::ostream& Out = GetOutputStream();
+
+ if (!printed_header_) {
+ // save the names of all the user counters
+ for (const auto& run : reports) {
+ for (const auto& cnt : run.counters) {
+ if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
+ continue;
+ user_counter_names_.insert(cnt.first);
+ }
+ }
+
+ // print the header
+ for (auto B = elements.begin(); B != elements.end();) {
+ Out << *B++;
+ if (B != elements.end()) Out << ",";
+ }
+ for (auto B = user_counter_names_.begin();
+ B != user_counter_names_.end();) {
+ Out << ",\"" << *B++ << "\"";
+ }
+ Out << "\n";
+
+ printed_header_ = true;
+ } else {
+ // check that all the current counters are saved in the name set
+ for (const auto& run : reports) {
+ for (const auto& cnt : run.counters) {
+ if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
+ continue;
+ CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end())
+ << "All counters must be present in each run. "
+ << "Counter named \"" << cnt.first
+ << "\" was not in a run after being added to the header";
+ }
+ }
+ }
+
+ // print results for each run
+ for (const auto& run : reports) {
+ PrintRunData(run);
+ }
+}
+
+void CSVReporter::PrintRunData(const Run& run) {
+ std::ostream& Out = GetOutputStream();
+ Out << CsvEscape(run.benchmark_name()) << ",";
+ if (run.error_occurred) {
+ Out << std::string(elements.size() - 3, ',');
+ Out << "true,";
+ Out << CsvEscape(run.error_message) << "\n";
+ return;
+ }
+
+ // Do not print iteration on bigO and RMS report
+ if (!run.report_big_o && !run.report_rms) {
+ Out << run.iterations;
+ }
+ Out << ",";
+
+ Out << run.GetAdjustedRealTime() << ",";
+ Out << run.GetAdjustedCPUTime() << ",";
+
+ // Do not print timeLabel on bigO and RMS report
+ if (run.report_big_o) {
+ Out << GetBigOString(run.complexity);
+ } else if (!run.report_rms) {
+ Out << GetTimeUnitString(run.time_unit);
+ }
+ Out << ",";
+
+ if (run.counters.find("bytes_per_second") != run.counters.end()) {
+ Out << run.counters.at("bytes_per_second");
+ }
+ Out << ",";
+ if (run.counters.find("items_per_second") != run.counters.end()) {
+ Out << run.counters.at("items_per_second");
+ }
+ Out << ",";
+ if (!run.report_label.empty()) {
+ Out << CsvEscape(run.report_label);
+ }
+ Out << ",,"; // for error_occurred and error_message
+
+ // Print user counters
+ for (const auto& ucn : user_counter_names_) {
+ auto it = run.counters.find(ucn);
+ if (it == run.counters.end()) {
+ Out << ",";
+ } else {
+ Out << "," << it->second;
+ }
+ }
+ Out << '\n';
+}
+
+} // end namespace benchmark
diff --git a/src/third_party/benchmark/dist/src/cycleclock.h b/src/third_party/benchmark/dist/src/cycleclock.h
new file mode 100644
index 00000000000..f5e37b011b9
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/cycleclock.h
@@ -0,0 +1,177 @@
+// ----------------------------------------------------------------------
+// CycleClock
+// A CycleClock tells you the current time in Cycles. The "time"
+// is actually time since power-on. This is like time() but doesn't
+// involve a system call and is much more precise.
+//
+// NOTE: Not all cpu/platform/kernel combinations guarantee that this
+// clock increments at a constant rate or is synchronized across all logical
+// cpus in a system.
+//
+// If you need the above guarantees, please consider using a different
+// API. There are efforts to provide an interface which provides a millisecond
+// granularity and implemented as a memory read. A memory read is generally
+// cheaper than the CycleClock for many architectures.
+//
+// Also, in some out of order CPU implementations, the CycleClock is not
+// serializing. So if you're trying to count at cycles granularity, your
+// data might be inaccurate due to out of order instruction execution.
+// ----------------------------------------------------------------------
+
+#ifndef BENCHMARK_CYCLECLOCK_H_
+#define BENCHMARK_CYCLECLOCK_H_
+
+#include <cstdint>
+
+#include "benchmark/benchmark.h"
+#include "internal_macros.h"
+
+#if defined(BENCHMARK_OS_MACOSX)
+#include <mach/mach_time.h>
+#endif
+// For MSVC, we want to use '_asm rdtsc' when possible (since it works
+// with even ancient MSVC compilers), and when not possible the
+// __rdtsc intrinsic, declared in <intrin.h>. Unfortunately, in some
+// environments, <windows.h> and <intrin.h> have conflicting
+// declarations of some other intrinsics, breaking compilation.
+// Therefore, we simply declare __rdtsc ourselves. See also
+// http://connect.microsoft.com/VisualStudio/feedback/details/262047
+#if defined(COMPILER_MSVC) && !defined(_M_IX86)
+extern "C" uint64_t __rdtsc();
+#pragma intrinsic(__rdtsc)
+#endif
+
+#if !defined(BENCHMARK_OS_WINDOWS) || defined(BENCHMARK_OS_MINGW)
+#include <sys/time.h>
+#include <time.h>
+#endif
+
+#ifdef BENCHMARK_OS_EMSCRIPTEN
+#include <emscripten.h>
+#endif
+
+namespace benchmark {
+// NOTE: only i386 and x86_64 have been well tested.
+// PPC, sparc, alpha, and ia64 are based on
+// http://peter.kuscsik.com/wordpress/?p=14
+// with modifications by m3b. See also
+// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h
+namespace cycleclock {
+// This should return the number of cycles since power-on. Thread-safe.
+inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
+#if defined(BENCHMARK_OS_MACOSX)
+ // this goes at the top because we need ALL Macs, regardless of
+ // architecture, to return the number of "mach time units" that
+ // have passed since startup. See sysinfo.cc where
+ // InitializeSystemInfo() sets the supposed cpu clock frequency of
+ // macs to the number of mach time units per second, not actual
+ // CPU clock frequency (which can change in the face of CPU
+ // frequency scaling). Also note that when the Mac sleeps, this
+ // counter pauses; it does not continue counting, nor does it
+ // reset to zero.
+ return mach_absolute_time();
+#elif defined(BENCHMARK_OS_EMSCRIPTEN)
+ // this goes above x86-specific code because old versions of Emscripten
+ // define __x86_64__, although they have nothing to do with it.
+ return static_cast<int64_t>(emscripten_get_now() * 1e+6);
+#elif defined(__i386__)
+ int64_t ret;
+ __asm__ volatile("rdtsc" : "=A"(ret));
+ return ret;
+#elif defined(__x86_64__) || defined(__amd64__)
+ uint64_t low, high;
+ __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
+ return (high << 32) | low;
+#elif defined(__powerpc__) || defined(__ppc__)
+ // This returns a time-base, which is not always precisely a cycle-count.
+ int64_t tbl, tbu0, tbu1;
+ asm("mftbu %0" : "=r"(tbu0));
+ asm("mftb %0" : "=r"(tbl));
+ asm("mftbu %0" : "=r"(tbu1));
+ tbl &= -static_cast<int64_t>(tbu0 == tbu1);
+ // high 32 bits in tbu1; low 32 bits in tbl (tbu0 is garbage)
+ return (tbu1 << 32) | tbl;
+#elif defined(__sparc__)
+ int64_t tick;
+ asm(".byte 0x83, 0x41, 0x00, 0x00");
+ asm("mov %%g1, %0" : "=r"(tick));
+ return tick;
+#elif defined(__ia64__)
+ int64_t itc;
+ asm("mov %0 = ar.itc" : "=r"(itc));
+ return itc;
+#elif defined(COMPILER_MSVC) && defined(_M_IX86)
+ // Older MSVC compilers (like 7.x) don't seem to support the
+ // __rdtsc intrinsic properly, so I prefer to use _asm instead
+ // when I know it will work. Otherwise, I'll use __rdtsc and hope
+ // the code is being compiled with a non-ancient compiler.
+ _asm rdtsc
+#elif defined(COMPILER_MSVC)
+ return __rdtsc();
+#elif defined(BENCHMARK_OS_NACL)
+ // Native Client validator on x86/x86-64 allows RDTSC instructions,
+ // and this case is handled above. Native Client validator on ARM
+ // rejects MRC instructions (used in the ARM-specific sequence below),
+ // so we handle it here. Portable Native Client compiles to
+ // architecture-agnostic bytecode, which doesn't provide any
+ // cycle counter access mnemonics.
+
+ // Native Client does not provide any API to access cycle counter.
+ // Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday
+ // because is provides nanosecond resolution (which is noticable at
+ // least for PNaCl modules running on x86 Mac & Linux).
+ // Initialize to always return 0 if clock_gettime fails.
+ struct timespec ts = {0, 0};
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return static_cast<int64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec;
+#elif defined(__aarch64__)
+ // System timer of ARMv8 runs at a different frequency than the CPU's.
+ // The frequency is fixed, typically in the range 1-50MHz. It can be
+ // read at CNTFRQ special register. We assume the OS has set up
+ // the virtual timer properly.
+ int64_t virtual_timer_value;
+ asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
+ return virtual_timer_value;
+#elif defined(__ARM_ARCH)
+ // V6 is the earliest arch that has a standard cyclecount
+ // Native Client validator doesn't allow MRC instructions.
+#if (__ARM_ARCH >= 6)
+ uint32_t pmccntr;
+ uint32_t pmuseren;
+ uint32_t pmcntenset;
+ // Read the user mode perf monitor counter access permissions.
+ asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
+ if (pmuseren & 1) { // Allows reading perfmon counters for user mode code.
+ asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
+ if (pmcntenset & 0x80000000ul) { // Is it counting?
+ asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
+ // The counter is set up to count every 64th cycle
+ return static_cast<int64_t>(pmccntr) * 64; // Should optimize to << 6
+ }
+ }
+#endif
+ struct timeval tv;
+ gettimeofday(&tv, nullptr);
+ return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
+#elif defined(__mips__)
+ // mips apparently only allows rdtsc for superusers, so we fall
+ // back to gettimeofday. It's possible clock_gettime would be better.
+ struct timeval tv;
+ gettimeofday(&tv, nullptr);
+ return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
+#elif defined(__s390__) // Covers both s390 and s390x.
+ // Return the CPU clock.
+ uint64_t tsc;
+ asm("stck %0" : "=Q"(tsc) : : "cc");
+ return tsc;
+#else
+// The soft failover to a generic implementation is automatic only for ARM.
+// For other platforms the developer is expected to make an attempt to create
+// a fast implementation and use generic version if nothing better is available.
+#error You need to define CycleTimer for your OS and CPU
+#endif
+}
+} // end namespace cycleclock
+} // end namespace benchmark
+
+#endif // BENCHMARK_CYCLECLOCK_H_
diff --git a/src/third_party/benchmark/dist/src/internal_macros.h b/src/third_party/benchmark/dist/src/internal_macros.h
new file mode 100644
index 00000000000..6adf00d0569
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/internal_macros.h
@@ -0,0 +1,94 @@
+#ifndef BENCHMARK_INTERNAL_MACROS_H_
+#define BENCHMARK_INTERNAL_MACROS_H_
+
+#include "benchmark/benchmark.h"
+
+/* Needed to detect STL */
+#include <cstdlib>
+
+// clang-format off
+
+#ifndef __has_feature
+#define __has_feature(x) 0
+#endif
+
+#if defined(__clang__)
+ #if !defined(COMPILER_CLANG)
+ #define COMPILER_CLANG
+ #endif
+#elif defined(_MSC_VER)
+ #if !defined(COMPILER_MSVC)
+ #define COMPILER_MSVC
+ #endif
+#elif defined(__GNUC__)
+ #if !defined(COMPILER_GCC)
+ #define COMPILER_GCC
+ #endif
+#endif
+
+#if __has_feature(cxx_attributes)
+ #define BENCHMARK_NORETURN [[noreturn]]
+#elif defined(__GNUC__)
+ #define BENCHMARK_NORETURN __attribute__((noreturn))
+#elif defined(COMPILER_MSVC)
+ #define BENCHMARK_NORETURN __declspec(noreturn)
+#else
+ #define BENCHMARK_NORETURN
+#endif
+
+#if defined(__CYGWIN__)
+ #define BENCHMARK_OS_CYGWIN 1
+#elif defined(_WIN32)
+ #define BENCHMARK_OS_WINDOWS 1
+ #if defined(__MINGW32__)
+ #define BENCHMARK_OS_MINGW 1
+ #endif
+#elif defined(__APPLE__)
+ #define BENCHMARK_OS_APPLE 1
+ #include "TargetConditionals.h"
+ #if defined(TARGET_OS_MAC)
+ #define BENCHMARK_OS_MACOSX 1
+ #if defined(TARGET_OS_IPHONE)
+ #define BENCHMARK_OS_IOS 1
+ #endif
+ #endif
+#elif defined(__FreeBSD__)
+ #define BENCHMARK_OS_FREEBSD 1
+#elif defined(__NetBSD__)
+ #define BENCHMARK_OS_NETBSD 1
+#elif defined(__OpenBSD__)
+ #define BENCHMARK_OS_OPENBSD 1
+#elif defined(__linux__)
+ #define BENCHMARK_OS_LINUX 1
+#elif defined(__native_client__)
+ #define BENCHMARK_OS_NACL 1
+#elif defined(__EMSCRIPTEN__)
+ #define BENCHMARK_OS_EMSCRIPTEN 1
+#elif defined(__rtems__)
+ #define BENCHMARK_OS_RTEMS 1
+#elif defined(__Fuchsia__)
+#define BENCHMARK_OS_FUCHSIA 1
+#elif defined (__SVR4) && defined (__sun)
+#define BENCHMARK_OS_SOLARIS 1
+#elif defined(__QNX__)
+#define BENCHMARK_OS_QNX 1
+#endif
+
+#if defined(__ANDROID__) && defined(__GLIBCXX__)
+#define BENCHMARK_STL_ANDROID_GNUSTL 1
+#endif
+
+#if !__has_feature(cxx_exceptions) && !defined(__cpp_exceptions) \
+ && !defined(__EXCEPTIONS)
+ #define BENCHMARK_HAS_NO_EXCEPTIONS
+#endif
+
+#if defined(COMPILER_CLANG) || defined(COMPILER_GCC)
+ #define BENCHMARK_MAYBE_UNUSED __attribute__((unused))
+#else
+ #define BENCHMARK_MAYBE_UNUSED
+#endif
+
+// clang-format on
+
+#endif // BENCHMARK_INTERNAL_MACROS_H_
diff --git a/src/third_party/benchmark/dist/src/json_reporter.cc b/src/third_party/benchmark/dist/src/json_reporter.cc
new file mode 100644
index 00000000000..0495d96688c
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/json_reporter.cc
@@ -0,0 +1,265 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark/benchmark.h"
+#include "complexity.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstdint>
+#include <iomanip> // for setprecision
+#include <iostream>
+#include <limits>
+#include <string>
+#include <tuple>
+#include <vector>
+
+#include "string_util.h"
+#include "timers.h"
+
+namespace benchmark {
+
+namespace {
+
+std::string StrEscape(const std::string & s) {
+ std::string tmp;
+ tmp.reserve(s.size());
+ for (char c : s) {
+ switch (c) {
+ case '\b': tmp += "\\b"; break;
+ case '\f': tmp += "\\f"; break;
+ case '\n': tmp += "\\n"; break;
+ case '\r': tmp += "\\r"; break;
+ case '\t': tmp += "\\t"; break;
+ case '\\': tmp += "\\\\"; break;
+ case '"' : tmp += "\\\""; break;
+ default : tmp += c; break;
+ }
+ }
+ return tmp;
+}
+
+std::string FormatKV(std::string const& key, std::string const& value) {
+ return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str());
+}
+
+std::string FormatKV(std::string const& key, const char* value) {
+ return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str());
+}
+
+std::string FormatKV(std::string const& key, bool value) {
+ return StrFormat("\"%s\": %s", StrEscape(key).c_str(), value ? "true" : "false");
+}
+
+std::string FormatKV(std::string const& key, int64_t value) {
+ std::stringstream ss;
+ ss << '"' << StrEscape(key) << "\": " << value;
+ return ss.str();
+}
+
+std::string FormatKV(std::string const& key, IterationCount value) {
+ std::stringstream ss;
+ ss << '"' << StrEscape(key) << "\": " << value;
+ return ss.str();
+}
+
+std::string FormatKV(std::string const& key, double value) {
+ std::stringstream ss;
+ ss << '"' << StrEscape(key) << "\": ";
+
+ if (std::isnan(value))
+ ss << (value < 0 ? "-" : "") << "NaN";
+ else if (std::isinf(value))
+ ss << (value < 0 ? "-" : "") << "Infinity";
+ else {
+ const auto max_digits10 =
+ std::numeric_limits<decltype(value)>::max_digits10;
+ const auto max_fractional_digits10 = max_digits10 - 1;
+ ss << std::scientific << std::setprecision(max_fractional_digits10)
+ << value;
+ }
+ return ss.str();
+}
+
+int64_t RoundDouble(double v) { return static_cast<int64_t>(v + 0.5); }
+
+} // end namespace
+
+bool JSONReporter::ReportContext(const Context& context) {
+ std::ostream& out = GetOutputStream();
+
+ out << "{\n";
+ std::string inner_indent(2, ' ');
+
+ // Open context block and print context information.
+ out << inner_indent << "\"context\": {\n";
+ std::string indent(4, ' ');
+
+ std::string walltime_value = LocalDateTimeString();
+ out << indent << FormatKV("date", walltime_value) << ",\n";
+
+ out << indent << FormatKV("host_name", context.sys_info.name) << ",\n";
+
+ if (Context::executable_name) {
+ // windows uses backslash for its path separator,
+ // which must be escaped in JSON otherwise it blows up conforming JSON
+ // decoders
+ std::string executable_name = Context::executable_name;
+ auto ReplaceAll = [](std::string* str, const std::string& from, const std::string& to) {
+ std::size_t start = 0;
+ while ((start = str->find(from, start)) != std::string::npos) {
+ str->replace(start, from.length(), to);
+ start += to.length();
+ }
+ };
+ ReplaceAll(&executable_name, "\\", "\\\\");
+ out << indent << FormatKV("executable", executable_name) << ",\n";
+ }
+
+ CPUInfo const& info = context.cpu_info;
+ out << indent << FormatKV("num_cpus", static_cast<int64_t>(info.num_cpus))
+ << ",\n";
+ out << indent
+ << FormatKV("mhz_per_cpu",
+ RoundDouble(info.cycles_per_second / 1000000.0))
+ << ",\n";
+ out << indent << FormatKV("cpu_scaling_enabled", info.scaling_enabled)
+ << ",\n";
+
+ out << indent << "\"caches\": [\n";
+ indent = std::string(6, ' ');
+ std::string cache_indent(8, ' ');
+ for (size_t i = 0; i < info.caches.size(); ++i) {
+ auto& CI = info.caches[i];
+ out << indent << "{\n";
+ out << cache_indent << FormatKV("type", CI.type) << ",\n";
+ out << cache_indent << FormatKV("level", static_cast<int64_t>(CI.level))
+ << ",\n";
+ out << cache_indent
+ << FormatKV("size", static_cast<int64_t>(CI.size) * 1000u) << ",\n";
+ out << cache_indent
+ << FormatKV("num_sharing", static_cast<int64_t>(CI.num_sharing))
+ << "\n";
+ out << indent << "}";
+ if (i != info.caches.size() - 1) out << ",";
+ out << "\n";
+ }
+ indent = std::string(4, ' ');
+ out << indent << "],\n";
+ out << indent << "\"load_avg\": [";
+ for (auto it = info.load_avg.begin(); it != info.load_avg.end();) {
+ out << *it++;
+ if (it != info.load_avg.end()) out << ",";
+ }
+ out << "],\n";
+
+#if defined(NDEBUG)
+ const char build_type[] = "release";
+#else
+ const char build_type[] = "debug";
+#endif
+ out << indent << FormatKV("library_build_type", build_type) << "\n";
+ // Close context block and open the list of benchmarks.
+ out << inner_indent << "},\n";
+ out << inner_indent << "\"benchmarks\": [\n";
+ return true;
+}
+
+void JSONReporter::ReportRuns(std::vector<Run> const& reports) {
+ if (reports.empty()) {
+ return;
+ }
+ std::string indent(4, ' ');
+ std::ostream& out = GetOutputStream();
+ if (!first_report_) {
+ out << ",\n";
+ }
+ first_report_ = false;
+
+ for (auto it = reports.begin(); it != reports.end(); ++it) {
+ out << indent << "{\n";
+ PrintRunData(*it);
+ out << indent << '}';
+ auto it_cp = it;
+ if (++it_cp != reports.end()) {
+ out << ",\n";
+ }
+ }
+}
+
+void JSONReporter::Finalize() {
+ // Close the list of benchmarks and the top level object.
+ GetOutputStream() << "\n ]\n}\n";
+}
+
+void JSONReporter::PrintRunData(Run const& run) {
+ std::string indent(6, ' ');
+ std::ostream& out = GetOutputStream();
+ out << indent << FormatKV("name", run.benchmark_name()) << ",\n";
+ out << indent << FormatKV("run_name", run.run_name.str()) << ",\n";
+ out << indent << FormatKV("run_type", [&run]() -> const char* {
+ switch (run.run_type) {
+ case BenchmarkReporter::Run::RT_Iteration:
+ return "iteration";
+ case BenchmarkReporter::Run::RT_Aggregate:
+ return "aggregate";
+ }
+ BENCHMARK_UNREACHABLE();
+ }()) << ",\n";
+ out << indent << FormatKV("repetitions", run.repetitions) << ",\n";
+ if (run.run_type != BenchmarkReporter::Run::RT_Aggregate) {
+ out << indent << FormatKV("repetition_index", run.repetition_index)
+ << ",\n";
+ }
+ out << indent << FormatKV("threads", run.threads) << ",\n";
+ if (run.run_type == BenchmarkReporter::Run::RT_Aggregate) {
+ out << indent << FormatKV("aggregate_name", run.aggregate_name) << ",\n";
+ }
+ if (run.error_occurred) {
+ out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n";
+ out << indent << FormatKV("error_message", run.error_message) << ",\n";
+ }
+ if (!run.report_big_o && !run.report_rms) {
+ out << indent << FormatKV("iterations", run.iterations) << ",\n";
+ out << indent << FormatKV("real_time", run.GetAdjustedRealTime()) << ",\n";
+ out << indent << FormatKV("cpu_time", run.GetAdjustedCPUTime());
+ out << ",\n"
+ << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
+ } else if (run.report_big_o) {
+ out << indent << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime())
+ << ",\n";
+ out << indent << FormatKV("real_coefficient", run.GetAdjustedRealTime())
+ << ",\n";
+ out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n";
+ out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
+ } else if (run.report_rms) {
+ out << indent << FormatKV("rms", run.GetAdjustedCPUTime());
+ }
+
+ for (auto& c : run.counters) {
+ out << ",\n" << indent << FormatKV(c.first, c.second);
+ }
+
+ if (run.has_memory_result) {
+ out << ",\n" << indent << FormatKV("allocs_per_iter", run.allocs_per_iter);
+ out << ",\n" << indent << FormatKV("max_bytes_used", run.max_bytes_used);
+ }
+
+ if (!run.report_label.empty()) {
+ out << ",\n" << indent << FormatKV("label", run.report_label);
+ }
+ out << '\n';
+}
+
+} // end namespace benchmark
diff --git a/src/third_party/benchmark/dist/src/log.h b/src/third_party/benchmark/dist/src/log.h
new file mode 100644
index 00000000000..47d0c35c018
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/log.h
@@ -0,0 +1,74 @@
+#ifndef BENCHMARK_LOG_H_
+#define BENCHMARK_LOG_H_
+
+#include <iostream>
+#include <ostream>
+
+#include "benchmark/benchmark.h"
+
+namespace benchmark {
+namespace internal {
+
+typedef std::basic_ostream<char>&(EndLType)(std::basic_ostream<char>&);
+
+class LogType {
+ friend LogType& GetNullLogInstance();
+ friend LogType& GetErrorLogInstance();
+
+ // FIXME: Add locking to output.
+ template <class Tp>
+ friend LogType& operator<<(LogType&, Tp const&);
+ friend LogType& operator<<(LogType&, EndLType*);
+
+ private:
+ LogType(std::ostream* out) : out_(out) {}
+ std::ostream* out_;
+ BENCHMARK_DISALLOW_COPY_AND_ASSIGN(LogType);
+};
+
+template <class Tp>
+LogType& operator<<(LogType& log, Tp const& value) {
+ if (log.out_) {
+ *log.out_ << value;
+ }
+ return log;
+}
+
+inline LogType& operator<<(LogType& log, EndLType* m) {
+ if (log.out_) {
+ *log.out_ << m;
+ }
+ return log;
+}
+
+inline int& LogLevel() {
+ static int log_level = 0;
+ return log_level;
+}
+
+inline LogType& GetNullLogInstance() {
+ static LogType log(nullptr);
+ return log;
+}
+
+inline LogType& GetErrorLogInstance() {
+ static LogType log(&std::clog);
+ return log;
+}
+
+inline LogType& GetLogInstanceForLevel(int level) {
+ if (level <= LogLevel()) {
+ return GetErrorLogInstance();
+ }
+ return GetNullLogInstance();
+}
+
+} // end namespace internal
+} // end namespace benchmark
+
+// clang-format off
+#define VLOG(x) \
+ (::benchmark::internal::GetLogInstanceForLevel(x) << "-- LOG(" << x << "):" \
+ " ")
+// clang-format on
+#endif
diff --git a/src/third_party/benchmark/dist/src/mutex.h b/src/third_party/benchmark/dist/src/mutex.h
new file mode 100644
index 00000000000..5f461d05a0c
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/mutex.h
@@ -0,0 +1,155 @@
+#ifndef BENCHMARK_MUTEX_H_
+#define BENCHMARK_MUTEX_H_
+
+#include <condition_variable>
+#include <mutex>
+
+#include "check.h"
+
+// Enable thread safety attributes only with clang.
+// The attributes can be safely erased when compiling with other compilers.
+#if defined(HAVE_THREAD_SAFETY_ATTRIBUTES)
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
+#else
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
+#endif
+
+#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(capability(x))
+
+#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
+
+#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
+
+#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
+
+#define ACQUIRED_BEFORE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
+
+#define ACQUIRED_AFTER(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
+
+#define REQUIRES(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__))
+
+#define REQUIRES_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__))
+
+#define ACQUIRE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__))
+
+#define ACQUIRE_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__))
+
+#define RELEASE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__))
+
+#define RELEASE_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__))
+
+#define TRY_ACQUIRE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__))
+
+#define TRY_ACQUIRE_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__))
+
+#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
+
+#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x))
+
+#define ASSERT_SHARED_CAPABILITY(x) \
+ THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x))
+
+#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
+
+#define NO_THREAD_SAFETY_ANALYSIS \
+ THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
+
+namespace benchmark {
+
+typedef std::condition_variable Condition;
+
+// NOTE: Wrappers for std::mutex and std::unique_lock are provided so that
+// we can annotate them with thread safety attributes and use the
+// -Wthread-safety warning with clang. The standard library types cannot be
+// used directly because they do not provided the required annotations.
+class CAPABILITY("mutex") Mutex {
+ public:
+ Mutex() {}
+
+ void lock() ACQUIRE() { mut_.lock(); }
+ void unlock() RELEASE() { mut_.unlock(); }
+ std::mutex& native_handle() { return mut_; }
+
+ private:
+ std::mutex mut_;
+};
+
+class SCOPED_CAPABILITY MutexLock {
+ typedef std::unique_lock<std::mutex> MutexLockImp;
+
+ public:
+ MutexLock(Mutex& m) ACQUIRE(m) : ml_(m.native_handle()) {}
+ ~MutexLock() RELEASE() {}
+ MutexLockImp& native_handle() { return ml_; }
+
+ private:
+ MutexLockImp ml_;
+};
+
+class Barrier {
+ public:
+ Barrier(int num_threads) : running_threads_(num_threads) {}
+
+ // Called by each thread
+ bool wait() EXCLUDES(lock_) {
+ bool last_thread = false;
+ {
+ MutexLock ml(lock_);
+ last_thread = createBarrier(ml);
+ }
+ if (last_thread) phase_condition_.notify_all();
+ return last_thread;
+ }
+
+ void removeThread() EXCLUDES(lock_) {
+ MutexLock ml(lock_);
+ --running_threads_;
+ if (entered_ != 0) phase_condition_.notify_all();
+ }
+
+ private:
+ Mutex lock_;
+ Condition phase_condition_;
+ int running_threads_;
+
+ // State for barrier management
+ int phase_number_ = 0;
+ int entered_ = 0; // Number of threads that have entered this barrier
+
+ // Enter the barrier and wait until all other threads have also
+ // entered the barrier. Returns iff this is the last thread to
+ // enter the barrier.
+ bool createBarrier(MutexLock& ml) REQUIRES(lock_) {
+ CHECK_LT(entered_, running_threads_);
+ entered_++;
+ if (entered_ < running_threads_) {
+ // Wait for all threads to enter
+ int phase_number_cp = phase_number_;
+ auto cb = [this, phase_number_cp]() {
+ return this->phase_number_ > phase_number_cp ||
+ entered_ == running_threads_; // A thread has aborted in error
+ };
+ phase_condition_.wait(ml.native_handle(), cb);
+ if (phase_number_ > phase_number_cp) return false;
+ // else (running_threads_ == entered_) and we are the last thread.
+ }
+ // Last thread has reached the barrier
+ phase_number_++;
+ entered_ = 0;
+ return true;
+ }
+};
+
+} // end namespace benchmark
+
+#endif // BENCHMARK_MUTEX_H_
diff --git a/src/third_party/benchmark/dist/src/re.h b/src/third_party/benchmark/dist/src/re.h
new file mode 100644
index 00000000000..fbe25037b46
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/re.h
@@ -0,0 +1,158 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef BENCHMARK_RE_H_
+#define BENCHMARK_RE_H_
+
+#include "internal_macros.h"
+
+// clang-format off
+
+#if !defined(HAVE_STD_REGEX) && \
+ !defined(HAVE_GNU_POSIX_REGEX) && \
+ !defined(HAVE_POSIX_REGEX)
+ // No explicit regex selection; detect based on builtin hints.
+ #if defined(BENCHMARK_OS_LINUX) || defined(BENCHMARK_OS_APPLE)
+ #define HAVE_POSIX_REGEX 1
+ #elif __cplusplus >= 199711L
+ #define HAVE_STD_REGEX 1
+ #endif
+#endif
+
+// Prefer C regex libraries when compiling w/o exceptions so that we can
+// correctly report errors.
+#if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && \
+ defined(BENCHMARK_HAVE_STD_REGEX) && \
+ (defined(HAVE_GNU_POSIX_REGEX) || defined(HAVE_POSIX_REGEX))
+ #undef HAVE_STD_REGEX
+#endif
+
+#if defined(HAVE_STD_REGEX)
+ #include <regex>
+#elif defined(HAVE_GNU_POSIX_REGEX)
+ #include <gnuregex.h>
+#elif defined(HAVE_POSIX_REGEX)
+ #include <regex.h>
+#else
+#error No regular expression backend was found!
+#endif
+
+// clang-format on
+
+#include <string>
+
+#include "check.h"
+
+namespace benchmark {
+
+// A wrapper around the POSIX regular expression API that provides automatic
+// cleanup
+class Regex {
+ public:
+ Regex() : init_(false) {}
+
+ ~Regex();
+
+ // Compile a regular expression matcher from spec. Returns true on success.
+ //
+ // On failure (and if error is not nullptr), error is populated with a human
+ // readable error message if an error occurs.
+ bool Init(const std::string& spec, std::string* error);
+
+ // Returns whether str matches the compiled regular expression.
+ bool Match(const std::string& str);
+
+ private:
+ bool init_;
+// Underlying regular expression object
+#if defined(HAVE_STD_REGEX)
+ std::regex re_;
+#elif defined(HAVE_POSIX_REGEX) || defined(HAVE_GNU_POSIX_REGEX)
+ regex_t re_;
+#else
+#error No regular expression backend implementation available
+#endif
+};
+
+#if defined(HAVE_STD_REGEX)
+
+inline bool Regex::Init(const std::string& spec, std::string* error) {
+#ifdef BENCHMARK_HAS_NO_EXCEPTIONS
+ ((void)error); // suppress unused warning
+#else
+ try {
+#endif
+ re_ = std::regex(spec, std::regex_constants::extended);
+ init_ = true;
+#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
+}
+catch (const std::regex_error& e) {
+ if (error) {
+ *error = e.what();
+ }
+}
+#endif
+return init_;
+}
+
+inline Regex::~Regex() {}
+
+inline bool Regex::Match(const std::string& str) {
+ if (!init_) {
+ return false;
+ }
+ return std::regex_search(str, re_);
+}
+
+#else
+inline bool Regex::Init(const std::string& spec, std::string* error) {
+ int ec = regcomp(&re_, spec.c_str(), REG_EXTENDED | REG_NOSUB);
+ if (ec != 0) {
+ if (error) {
+ size_t needed = regerror(ec, &re_, nullptr, 0);
+ char* errbuf = new char[needed];
+ regerror(ec, &re_, errbuf, needed);
+
+ // regerror returns the number of bytes necessary to null terminate
+ // the string, so we move that when assigning to error.
+ CHECK_NE(needed, 0);
+ error->assign(errbuf, needed - 1);
+
+ delete[] errbuf;
+ }
+
+ return false;
+ }
+
+ init_ = true;
+ return true;
+}
+
+inline Regex::~Regex() {
+ if (init_) {
+ regfree(&re_);
+ }
+}
+
+inline bool Regex::Match(const std::string& str) {
+ if (!init_) {
+ return false;
+ }
+ return regexec(&re_, str.c_str(), 0, nullptr, 0) == 0;
+}
+#endif
+
+} // end namespace benchmark
+
+#endif // BENCHMARK_RE_H_
diff --git a/src/third_party/benchmark/dist/src/reporter.cc b/src/third_party/benchmark/dist/src/reporter.cc
new file mode 100644
index 00000000000..4d3e477d44a
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/reporter.cc
@@ -0,0 +1,105 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark/benchmark.h"
+#include "timers.h"
+
+#include <cstdlib>
+
+#include <iostream>
+#include <tuple>
+#include <vector>
+
+#include "check.h"
+#include "string_util.h"
+
+namespace benchmark {
+
+BenchmarkReporter::BenchmarkReporter()
+ : output_stream_(&std::cout), error_stream_(&std::cerr) {}
+
+BenchmarkReporter::~BenchmarkReporter() {}
+
+void BenchmarkReporter::PrintBasicContext(std::ostream *out,
+ Context const &context) {
+ CHECK(out) << "cannot be null";
+ auto &Out = *out;
+
+ Out << LocalDateTimeString() << "\n";
+
+ if (context.executable_name)
+ Out << "Running " << context.executable_name << "\n";
+
+ const CPUInfo &info = context.cpu_info;
+ Out << "Run on (" << info.num_cpus << " X "
+ << (info.cycles_per_second / 1000000.0) << " MHz CPU "
+ << ((info.num_cpus > 1) ? "s" : "") << ")\n";
+ if (info.caches.size() != 0) {
+ Out << "CPU Caches:\n";
+ for (auto &CInfo : info.caches) {
+ Out << " L" << CInfo.level << " " << CInfo.type << " "
+ << (CInfo.size / 1000) << "K";
+ if (CInfo.num_sharing != 0)
+ Out << " (x" << (info.num_cpus / CInfo.num_sharing) << ")";
+ Out << "\n";
+ }
+ }
+ if (!info.load_avg.empty()) {
+ Out << "Load Average: ";
+ for (auto It = info.load_avg.begin(); It != info.load_avg.end();) {
+ Out << StrFormat("%.2f", *It++);
+ if (It != info.load_avg.end()) Out << ", ";
+ }
+ Out << "\n";
+ }
+
+ if (info.scaling_enabled) {
+ Out << "***WARNING*** CPU scaling is enabled, the benchmark "
+ "real time measurements may be noisy and will incur extra "
+ "overhead.\n";
+ }
+
+#ifndef NDEBUG
+ Out << "***WARNING*** Library was built as DEBUG. Timings may be "
+ "affected.\n";
+#endif
+}
+
+// No initializer because it's already initialized to NULL.
+const char *BenchmarkReporter::Context::executable_name;
+
+BenchmarkReporter::Context::Context()
+ : cpu_info(CPUInfo::Get()), sys_info(SystemInfo::Get()) {}
+
+std::string BenchmarkReporter::Run::benchmark_name() const {
+ std::string name = run_name.str();
+ if (run_type == RT_Aggregate) {
+ name += "_" + aggregate_name;
+ }
+ return name;
+}
+
+double BenchmarkReporter::Run::GetAdjustedRealTime() const {
+ double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit);
+ if (iterations != 0) new_time /= static_cast<double>(iterations);
+ return new_time;
+}
+
+double BenchmarkReporter::Run::GetAdjustedCPUTime() const {
+ double new_time = cpu_accumulated_time * GetTimeUnitMultiplier(time_unit);
+ if (iterations != 0) new_time /= static_cast<double>(iterations);
+ return new_time;
+}
+
+} // end namespace benchmark
diff --git a/src/third_party/benchmark/dist/src/sleep.cc b/src/third_party/benchmark/dist/src/sleep.cc
new file mode 100644
index 00000000000..1512ac90f7e
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/sleep.cc
@@ -0,0 +1,51 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "sleep.h"
+
+#include <cerrno>
+#include <cstdlib>
+#include <ctime>
+
+#include "internal_macros.h"
+
+#ifdef BENCHMARK_OS_WINDOWS
+#include <windows.h>
+#endif
+
+namespace benchmark {
+#ifdef BENCHMARK_OS_WINDOWS
+// Window's Sleep takes milliseconds argument.
+void SleepForMilliseconds(int milliseconds) { Sleep(milliseconds); }
+void SleepForSeconds(double seconds) {
+ SleepForMilliseconds(static_cast<int>(kNumMillisPerSecond * seconds));
+}
+#else // BENCHMARK_OS_WINDOWS
+void SleepForMicroseconds(int microseconds) {
+ struct timespec sleep_time;
+ sleep_time.tv_sec = microseconds / kNumMicrosPerSecond;
+ sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro;
+ while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR)
+ ; // Ignore signals and wait for the full interval to elapse.
+}
+
+void SleepForMilliseconds(int milliseconds) {
+ SleepForMicroseconds(milliseconds * kNumMicrosPerMilli);
+}
+
+void SleepForSeconds(double seconds) {
+ SleepForMicroseconds(static_cast<int>(seconds * kNumMicrosPerSecond));
+}
+#endif // BENCHMARK_OS_WINDOWS
+} // end namespace benchmark
diff --git a/src/third_party/benchmark/dist/src/sleep.h b/src/third_party/benchmark/dist/src/sleep.h
new file mode 100644
index 00000000000..f98551afe28
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/sleep.h
@@ -0,0 +1,15 @@
+#ifndef BENCHMARK_SLEEP_H_
+#define BENCHMARK_SLEEP_H_
+
+namespace benchmark {
+const int kNumMillisPerSecond = 1000;
+const int kNumMicrosPerMilli = 1000;
+const int kNumMicrosPerSecond = kNumMillisPerSecond * 1000;
+const int kNumNanosPerMicro = 1000;
+const int kNumNanosPerSecond = kNumNanosPerMicro * kNumMicrosPerSecond;
+
+void SleepForMilliseconds(int milliseconds);
+void SleepForSeconds(double seconds);
+} // end namespace benchmark
+
+#endif // BENCHMARK_SLEEP_H_
diff --git a/src/third_party/benchmark/dist/src/statistics.cc b/src/third_party/benchmark/dist/src/statistics.cc
new file mode 100644
index 00000000000..bd5a3d65972
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/statistics.cc
@@ -0,0 +1,193 @@
+// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
+// Copyright 2017 Roman Lebedev. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark/benchmark.h"
+
+#include <algorithm>
+#include <cmath>
+#include <numeric>
+#include <string>
+#include <vector>
+#include "check.h"
+#include "statistics.h"
+
+namespace benchmark {
+
+auto StatisticsSum = [](const std::vector<double>& v) {
+ return std::accumulate(v.begin(), v.end(), 0.0);
+};
+
+double StatisticsMean(const std::vector<double>& v) {
+ if (v.empty()) return 0.0;
+ return StatisticsSum(v) * (1.0 / v.size());
+}
+
+double StatisticsMedian(const std::vector<double>& v) {
+ if (v.size() < 3) return StatisticsMean(v);
+ std::vector<double> copy(v);
+
+ auto center = copy.begin() + v.size() / 2;
+ std::nth_element(copy.begin(), center, copy.end());
+
+ // did we have an odd number of samples?
+ // if yes, then center is the median
+ // it no, then we are looking for the average between center and the value
+ // before
+ if (v.size() % 2 == 1) return *center;
+ auto center2 = copy.begin() + v.size() / 2 - 1;
+ std::nth_element(copy.begin(), center2, copy.end());
+ return (*center + *center2) / 2.0;
+}
+
+// Return the sum of the squares of this sample set
+auto SumSquares = [](const std::vector<double>& v) {
+ return std::inner_product(v.begin(), v.end(), v.begin(), 0.0);
+};
+
+auto Sqr = [](const double dat) { return dat * dat; };
+auto Sqrt = [](const double dat) {
+ // Avoid NaN due to imprecision in the calculations
+ if (dat < 0.0) return 0.0;
+ return std::sqrt(dat);
+};
+
+double StatisticsStdDev(const std::vector<double>& v) {
+ const auto mean = StatisticsMean(v);
+ if (v.empty()) return mean;
+
+ // Sample standard deviation is undefined for n = 1
+ if (v.size() == 1) return 0.0;
+
+ const double avg_squares = SumSquares(v) * (1.0 / v.size());
+ return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean)));
+}
+
+std::vector<BenchmarkReporter::Run> ComputeStats(
+ const std::vector<BenchmarkReporter::Run>& reports) {
+ typedef BenchmarkReporter::Run Run;
+ std::vector<Run> results;
+
+ auto error_count =
+ std::count_if(reports.begin(), reports.end(),
+ [](Run const& run) { return run.error_occurred; });
+
+ if (reports.size() - error_count < 2) {
+ // We don't report aggregated data if there was a single run.
+ return results;
+ }
+
+ // Accumulators.
+ std::vector<double> real_accumulated_time_stat;
+ std::vector<double> cpu_accumulated_time_stat;
+
+ real_accumulated_time_stat.reserve(reports.size());
+ cpu_accumulated_time_stat.reserve(reports.size());
+
+ // All repetitions should be run with the same number of iterations so we
+ // can take this information from the first benchmark.
+ const IterationCount run_iterations = reports.front().iterations;
+ // create stats for user counters
+ struct CounterStat {
+ Counter c;
+ std::vector<double> s;
+ };
+ std::map<std::string, CounterStat> counter_stats;
+ for (Run const& r : reports) {
+ for (auto const& cnt : r.counters) {
+ auto it = counter_stats.find(cnt.first);
+ if (it == counter_stats.end()) {
+ counter_stats.insert({cnt.first, {cnt.second, std::vector<double>{}}});
+ it = counter_stats.find(cnt.first);
+ it->second.s.reserve(reports.size());
+ } else {
+ CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags);
+ }
+ }
+ }
+
+ // Populate the accumulators.
+ for (Run const& run : reports) {
+ CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name());
+ CHECK_EQ(run_iterations, run.iterations);
+ if (run.error_occurred) continue;
+ real_accumulated_time_stat.emplace_back(run.real_accumulated_time);
+ cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time);
+ // user counters
+ for (auto const& cnt : run.counters) {
+ auto it = counter_stats.find(cnt.first);
+ CHECK_NE(it, counter_stats.end());
+ it->second.s.emplace_back(cnt.second);
+ }
+ }
+
+ // Only add label if it is same for all runs
+ std::string report_label = reports[0].report_label;
+ for (std::size_t i = 1; i < reports.size(); i++) {
+ if (reports[i].report_label != report_label) {
+ report_label = "";
+ break;
+ }
+ }
+
+ const double iteration_rescale_factor =
+ double(reports.size()) / double(run_iterations);
+
+ for (const auto& Stat : *reports[0].statistics) {
+ // Get the data from the accumulator to BenchmarkReporter::Run's.
+ Run data;
+ data.run_name = reports[0].run_name;
+ data.run_type = BenchmarkReporter::Run::RT_Aggregate;
+ data.threads = reports[0].threads;
+ data.repetitions = reports[0].repetitions;
+ data.repetition_index = Run::no_repetition_index;
+ data.aggregate_name = Stat.name_;
+ data.report_label = report_label;
+
+ // It is incorrect to say that an aggregate is computed over
+ // run's iterations, because those iterations already got averaged.
+ // Similarly, if there are N repetitions with 1 iterations each,
+ // an aggregate will be computed over N measurements, not 1.
+ // Thus it is best to simply use the count of separate reports.
+ data.iterations = reports.size();
+
+ data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat);
+ data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat);
+
+ // We will divide these times by data.iterations when reporting, but the
+ // data.iterations is not nessesairly the scale of these measurements,
+ // because in each repetition, these timers are sum over all the iterations.
+ // And if we want to say that the stats are over N repetitions and not
+ // M iterations, we need to multiply these by (N/M).
+ data.real_accumulated_time *= iteration_rescale_factor;
+ data.cpu_accumulated_time *= iteration_rescale_factor;
+
+ data.time_unit = reports[0].time_unit;
+
+ // user counters
+ for (auto const& kv : counter_stats) {
+ // Do *NOT* rescale the custom counters. They are already properly scaled.
+ const auto uc_stat = Stat.compute_(kv.second.s);
+ auto c = Counter(uc_stat, counter_stats[kv.first].c.flags,
+ counter_stats[kv.first].c.oneK);
+ data.counters[kv.first] = c;
+ }
+
+ results.push_back(data);
+ }
+
+ return results;
+}
+
+} // end namespace benchmark
diff --git a/src/third_party/benchmark/dist/src/statistics.h b/src/third_party/benchmark/dist/src/statistics.h
new file mode 100644
index 00000000000..7eccc85536a
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/statistics.h
@@ -0,0 +1,37 @@
+// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
+// Copyright 2017 Roman Lebedev. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef STATISTICS_H_
+#define STATISTICS_H_
+
+#include <vector>
+
+#include "benchmark/benchmark.h"
+
+namespace benchmark {
+
+// Return a vector containing the mean, median and standard devation information
+// (and any user-specified info) for the specified list of reports. If 'reports'
+// contains less than two non-errored runs an empty vector is returned
+std::vector<BenchmarkReporter::Run> ComputeStats(
+ const std::vector<BenchmarkReporter::Run>& reports);
+
+double StatisticsMean(const std::vector<double>& v);
+double StatisticsMedian(const std::vector<double>& v);
+double StatisticsStdDev(const std::vector<double>& v);
+
+} // end namespace benchmark
+
+#endif // STATISTICS_H_
diff --git a/src/third_party/benchmark/dist/src/string_util.cc b/src/third_party/benchmark/dist/src/string_util.cc
new file mode 100644
index 00000000000..39b01a1719a
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/string_util.cc
@@ -0,0 +1,252 @@
+#include "string_util.h"
+
+#include <array>
+#include <cmath>
+#include <cstdarg>
+#include <cstdio>
+#include <memory>
+#include <sstream>
+
+#include "arraysize.h"
+
+namespace benchmark {
+namespace {
+
+// kilo, Mega, Giga, Tera, Peta, Exa, Zetta, Yotta.
+const char kBigSIUnits[] = "kMGTPEZY";
+// Kibi, Mebi, Gibi, Tebi, Pebi, Exbi, Zebi, Yobi.
+const char kBigIECUnits[] = "KMGTPEZY";
+// milli, micro, nano, pico, femto, atto, zepto, yocto.
+const char kSmallSIUnits[] = "munpfazy";
+
+// We require that all three arrays have the same size.
+static_assert(arraysize(kBigSIUnits) == arraysize(kBigIECUnits),
+ "SI and IEC unit arrays must be the same size");
+static_assert(arraysize(kSmallSIUnits) == arraysize(kBigSIUnits),
+ "Small SI and Big SI unit arrays must be the same size");
+
+static const int64_t kUnitsSize = arraysize(kBigSIUnits);
+
+void ToExponentAndMantissa(double val, double thresh, int precision,
+ double one_k, std::string* mantissa,
+ int64_t* exponent) {
+ std::stringstream mantissa_stream;
+
+ if (val < 0) {
+ mantissa_stream << "-";
+ val = -val;
+ }
+
+ // Adjust threshold so that it never excludes things which can't be rendered
+ // in 'precision' digits.
+ const double adjusted_threshold =
+ std::max(thresh, 1.0 / std::pow(10.0, precision));
+ const double big_threshold = adjusted_threshold * one_k;
+ const double small_threshold = adjusted_threshold;
+ // Values in ]simple_threshold,small_threshold[ will be printed as-is
+ const double simple_threshold = 0.01;
+
+ if (val > big_threshold) {
+ // Positive powers
+ double scaled = val;
+ for (size_t i = 0; i < arraysize(kBigSIUnits); ++i) {
+ scaled /= one_k;
+ if (scaled <= big_threshold) {
+ mantissa_stream << scaled;
+ *exponent = i + 1;
+ *mantissa = mantissa_stream.str();
+ return;
+ }
+ }
+ mantissa_stream << val;
+ *exponent = 0;
+ } else if (val < small_threshold) {
+ // Negative powers
+ if (val < simple_threshold) {
+ double scaled = val;
+ for (size_t i = 0; i < arraysize(kSmallSIUnits); ++i) {
+ scaled *= one_k;
+ if (scaled >= small_threshold) {
+ mantissa_stream << scaled;
+ *exponent = -static_cast<int64_t>(i + 1);
+ *mantissa = mantissa_stream.str();
+ return;
+ }
+ }
+ }
+ mantissa_stream << val;
+ *exponent = 0;
+ } else {
+ mantissa_stream << val;
+ *exponent = 0;
+ }
+ *mantissa = mantissa_stream.str();
+}
+
+std::string ExponentToPrefix(int64_t exponent, bool iec) {
+ if (exponent == 0) return "";
+
+ const int64_t index = (exponent > 0 ? exponent - 1 : -exponent - 1);
+ if (index >= kUnitsSize) return "";
+
+ const char* array =
+ (exponent > 0 ? (iec ? kBigIECUnits : kBigSIUnits) : kSmallSIUnits);
+ if (iec)
+ return array[index] + std::string("i");
+ else
+ return std::string(1, array[index]);
+}
+
+std::string ToBinaryStringFullySpecified(double value, double threshold,
+ int precision, double one_k = 1024.0) {
+ std::string mantissa;
+ int64_t exponent;
+ ToExponentAndMantissa(value, threshold, precision, one_k, &mantissa,
+ &exponent);
+ return mantissa + ExponentToPrefix(exponent, false);
+}
+
+} // end namespace
+
+void AppendHumanReadable(int n, std::string* str) {
+ std::stringstream ss;
+ // Round down to the nearest SI prefix.
+ ss << ToBinaryStringFullySpecified(n, 1.0, 0);
+ *str += ss.str();
+}
+
+std::string HumanReadableNumber(double n, double one_k) {
+ // 1.1 means that figures up to 1.1k should be shown with the next unit down;
+ // this softens edge effects.
+ // 1 means that we should show one decimal place of precision.
+ return ToBinaryStringFullySpecified(n, 1.1, 1, one_k);
+}
+
+std::string StrFormatImp(const char* msg, va_list args) {
+ // we might need a second shot at this, so pre-emptivly make a copy
+ va_list args_cp;
+ va_copy(args_cp, args);
+
+ // TODO(ericwf): use std::array for first attempt to avoid one memory
+ // allocation guess what the size might be
+ std::array<char, 256> local_buff;
+ std::size_t size = local_buff.size();
+ // 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation
+ // in the android-ndk
+ auto ret = vsnprintf(local_buff.data(), size, msg, args_cp);
+
+ va_end(args_cp);
+
+ // handle empty expansion
+ if (ret == 0) return std::string{};
+ if (static_cast<std::size_t>(ret) < size)
+ return std::string(local_buff.data());
+
+ // we did not provide a long enough buffer on our first attempt.
+ // add 1 to size to account for null-byte in size cast to prevent overflow
+ size = static_cast<std::size_t>(ret) + 1;
+ auto buff_ptr = std::unique_ptr<char[]>(new char[size]);
+ // 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation
+ // in the android-ndk
+ ret = vsnprintf(buff_ptr.get(), size, msg, args);
+ return std::string(buff_ptr.get());
+}
+
+std::string StrFormat(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ std::string tmp = StrFormatImp(format, args);
+ va_end(args);
+ return tmp;
+}
+
+#ifdef BENCHMARK_STL_ANDROID_GNUSTL
+/*
+ * GNU STL in Android NDK lacks support for some C++11 functions, including
+ * stoul, stoi, stod. We reimplement them here using C functions strtoul,
+ * strtol, strtod. Note that reimplemented functions are in benchmark::
+ * namespace, not std:: namespace.
+ */
+unsigned long stoul(const std::string& str, size_t* pos, int base) {
+ /* Record previous errno */
+ const int oldErrno = errno;
+ errno = 0;
+
+ const char* strStart = str.c_str();
+ char* strEnd = const_cast<char*>(strStart);
+ const unsigned long result = strtoul(strStart, &strEnd, base);
+
+ const int strtoulErrno = errno;
+ /* Restore previous errno */
+ errno = oldErrno;
+
+ /* Check for errors and return */
+ if (strtoulErrno == ERANGE) {
+ throw std::out_of_range(
+ "stoul failed: " + str + " is outside of range of unsigned long");
+ } else if (strEnd == strStart || strtoulErrno != 0) {
+ throw std::invalid_argument(
+ "stoul failed: " + str + " is not an integer");
+ }
+ if (pos != nullptr) {
+ *pos = static_cast<size_t>(strEnd - strStart);
+ }
+ return result;
+}
+
+int stoi(const std::string& str, size_t* pos, int base) {
+ /* Record previous errno */
+ const int oldErrno = errno;
+ errno = 0;
+
+ const char* strStart = str.c_str();
+ char* strEnd = const_cast<char*>(strStart);
+ const long result = strtol(strStart, &strEnd, base);
+
+ const int strtolErrno = errno;
+ /* Restore previous errno */
+ errno = oldErrno;
+
+ /* Check for errors and return */
+ if (strtolErrno == ERANGE || long(int(result)) != result) {
+ throw std::out_of_range(
+ "stoul failed: " + str + " is outside of range of int");
+ } else if (strEnd == strStart || strtolErrno != 0) {
+ throw std::invalid_argument(
+ "stoul failed: " + str + " is not an integer");
+ }
+ if (pos != nullptr) {
+ *pos = static_cast<size_t>(strEnd - strStart);
+ }
+ return int(result);
+}
+
+double stod(const std::string& str, size_t* pos) {
+ /* Record previous errno */
+ const int oldErrno = errno;
+ errno = 0;
+
+ const char* strStart = str.c_str();
+ char* strEnd = const_cast<char*>(strStart);
+ const double result = strtod(strStart, &strEnd);
+
+ /* Restore previous errno */
+ const int strtodErrno = errno;
+ errno = oldErrno;
+
+ /* Check for errors and return */
+ if (strtodErrno == ERANGE) {
+ throw std::out_of_range(
+ "stoul failed: " + str + " is outside of range of int");
+ } else if (strEnd == strStart || strtodErrno != 0) {
+ throw std::invalid_argument(
+ "stoul failed: " + str + " is not an integer");
+ }
+ if (pos != nullptr) {
+ *pos = static_cast<size_t>(strEnd - strStart);
+ }
+ return result;
+}
+#endif
+
+} // end namespace benchmark
diff --git a/src/third_party/benchmark/dist/src/string_util.h b/src/third_party/benchmark/dist/src/string_util.h
new file mode 100644
index 00000000000..09d7b4bd2a9
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/string_util.h
@@ -0,0 +1,59 @@
+#ifndef BENCHMARK_STRING_UTIL_H_
+#define BENCHMARK_STRING_UTIL_H_
+
+#include <sstream>
+#include <string>
+#include <utility>
+#include "internal_macros.h"
+
+namespace benchmark {
+
+void AppendHumanReadable(int n, std::string* str);
+
+std::string HumanReadableNumber(double n, double one_k = 1024.0);
+
+#if defined(__MINGW32__)
+__attribute__((format(__MINGW_PRINTF_FORMAT, 1, 2)))
+#elif defined(__GNUC__)
+__attribute__((format(printf, 1, 2)))
+#endif
+std::string
+StrFormat(const char* format, ...);
+
+inline std::ostream& StrCatImp(std::ostream& out) BENCHMARK_NOEXCEPT {
+ return out;
+}
+
+template <class First, class... Rest>
+inline std::ostream& StrCatImp(std::ostream& out, First&& f, Rest&&... rest) {
+ out << std::forward<First>(f);
+ return StrCatImp(out, std::forward<Rest>(rest)...);
+}
+
+template <class... Args>
+inline std::string StrCat(Args&&... args) {
+ std::ostringstream ss;
+ StrCatImp(ss, std::forward<Args>(args)...);
+ return ss.str();
+}
+
+#ifdef BENCHMARK_STL_ANDROID_GNUSTL
+/*
+ * GNU STL in Android NDK lacks support for some C++11 functions, including
+ * stoul, stoi, stod. We reimplement them here using C functions strtoul,
+ * strtol, strtod. Note that reimplemented functions are in benchmark::
+ * namespace, not std:: namespace.
+ */
+unsigned long stoul(const std::string& str, size_t* pos = nullptr,
+ int base = 10);
+int stoi(const std::string& str, size_t* pos = nullptr, int base = 10);
+double stod(const std::string& str, size_t* pos = nullptr);
+#else
+using std::stoul;
+using std::stoi;
+using std::stod;
+#endif
+
+} // end namespace benchmark
+
+#endif // BENCHMARK_STRING_UTIL_H_
diff --git a/src/third_party/benchmark/dist/src/sysinfo.cc b/src/third_party/benchmark/dist/src/sysinfo.cc
new file mode 100644
index 00000000000..28126470bad
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/sysinfo.cc
@@ -0,0 +1,699 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "internal_macros.h"
+
+#ifdef BENCHMARK_OS_WINDOWS
+#include <shlwapi.h>
+#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
+#include <versionhelpers.h>
+#include <windows.h>
+#include <codecvt>
+#else
+#include <fcntl.h>
+#ifndef BENCHMARK_OS_FUCHSIA
+#include <sys/resource.h>
+#endif
+#include <sys/time.h>
+#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
+#include <unistd.h>
+#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX || \
+ defined BENCHMARK_OS_NETBSD || defined BENCHMARK_OS_OPENBSD
+#define BENCHMARK_HAS_SYSCTL
+#include <sys/sysctl.h>
+#endif
+#endif
+#if defined(BENCHMARK_OS_SOLARIS)
+#include <kstat.h>
+#endif
+#if defined(BENCHMARK_OS_QNX)
+#include <sys/syspage.h>
+#endif
+
+#include <algorithm>
+#include <array>
+#include <bitset>
+#include <cerrno>
+#include <climits>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <fstream>
+#include <iostream>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <sstream>
+#include <locale>
+
+#include "check.h"
+#include "cycleclock.h"
+#include "internal_macros.h"
+#include "log.h"
+#include "sleep.h"
+#include "string_util.h"
+
+namespace benchmark {
+namespace {
+
+void PrintImp(std::ostream& out) { out << std::endl; }
+
+template <class First, class... Rest>
+void PrintImp(std::ostream& out, First&& f, Rest&&... rest) {
+ out << std::forward<First>(f);
+ PrintImp(out, std::forward<Rest>(rest)...);
+}
+
+template <class... Args>
+BENCHMARK_NORETURN void PrintErrorAndDie(Args&&... args) {
+ PrintImp(std::cerr, std::forward<Args>(args)...);
+ std::exit(EXIT_FAILURE);
+}
+
+#ifdef BENCHMARK_HAS_SYSCTL
+
+/// ValueUnion - A type used to correctly alias the byte-for-byte output of
+/// `sysctl` with the result type it's to be interpreted as.
+struct ValueUnion {
+ union DataT {
+ uint32_t uint32_value;
+ uint64_t uint64_value;
+ // For correct aliasing of union members from bytes.
+ char bytes[8];
+ };
+ using DataPtr = std::unique_ptr<DataT, decltype(&std::free)>;
+
+ // The size of the data union member + its trailing array size.
+ size_t Size;
+ DataPtr Buff;
+
+ public:
+ ValueUnion() : Size(0), Buff(nullptr, &std::free) {}
+
+ explicit ValueUnion(size_t BuffSize)
+ : Size(sizeof(DataT) + BuffSize),
+ Buff(::new (std::malloc(Size)) DataT(), &std::free) {}
+
+ ValueUnion(ValueUnion&& other) = default;
+
+ explicit operator bool() const { return bool(Buff); }
+
+ char* data() const { return Buff->bytes; }
+
+ std::string GetAsString() const { return std::string(data()); }
+
+ int64_t GetAsInteger() const {
+ if (Size == sizeof(Buff->uint32_value))
+ return static_cast<int32_t>(Buff->uint32_value);
+ else if (Size == sizeof(Buff->uint64_value))
+ return static_cast<int64_t>(Buff->uint64_value);
+ BENCHMARK_UNREACHABLE();
+ }
+
+ uint64_t GetAsUnsigned() const {
+ if (Size == sizeof(Buff->uint32_value))
+ return Buff->uint32_value;
+ else if (Size == sizeof(Buff->uint64_value))
+ return Buff->uint64_value;
+ BENCHMARK_UNREACHABLE();
+ }
+
+ template <class T, int N>
+ std::array<T, N> GetAsArray() {
+ const int ArrSize = sizeof(T) * N;
+ CHECK_LE(ArrSize, Size);
+ std::array<T, N> Arr;
+ std::memcpy(Arr.data(), data(), ArrSize);
+ return Arr;
+ }
+};
+
+ValueUnion GetSysctlImp(std::string const& Name) {
+#if defined BENCHMARK_OS_OPENBSD
+ int mib[2];
+
+ mib[0] = CTL_HW;
+ if ((Name == "hw.ncpu") || (Name == "hw.cpuspeed")){
+ ValueUnion buff(sizeof(int));
+
+ if (Name == "hw.ncpu") {
+ mib[1] = HW_NCPU;
+ } else {
+ mib[1] = HW_CPUSPEED;
+ }
+
+ if (sysctl(mib, 2, buff.data(), &buff.Size, nullptr, 0) == -1) {
+ return ValueUnion();
+ }
+ return buff;
+ }
+ return ValueUnion();
+#else
+ size_t CurBuffSize = 0;
+ if (sysctlbyname(Name.c_str(), nullptr, &CurBuffSize, nullptr, 0) == -1)
+ return ValueUnion();
+
+ ValueUnion buff(CurBuffSize);
+ if (sysctlbyname(Name.c_str(), buff.data(), &buff.Size, nullptr, 0) == 0)
+ return buff;
+ return ValueUnion();
+#endif
+}
+
+BENCHMARK_MAYBE_UNUSED
+bool GetSysctl(std::string const& Name, std::string* Out) {
+ Out->clear();
+ auto Buff = GetSysctlImp(Name);
+ if (!Buff) return false;
+ Out->assign(Buff.data());
+ return true;
+}
+
+template <class Tp,
+ class = typename std::enable_if<std::is_integral<Tp>::value>::type>
+bool GetSysctl(std::string const& Name, Tp* Out) {
+ *Out = 0;
+ auto Buff = GetSysctlImp(Name);
+ if (!Buff) return false;
+ *Out = static_cast<Tp>(Buff.GetAsUnsigned());
+ return true;
+}
+
+template <class Tp, size_t N>
+bool GetSysctl(std::string const& Name, std::array<Tp, N>* Out) {
+ auto Buff = GetSysctlImp(Name);
+ if (!Buff) return false;
+ *Out = Buff.GetAsArray<Tp, N>();
+ return true;
+}
+#endif
+
+template <class ArgT>
+bool ReadFromFile(std::string const& fname, ArgT* arg) {
+ *arg = ArgT();
+ std::ifstream f(fname.c_str());
+ if (!f.is_open()) return false;
+ f >> *arg;
+ return f.good();
+}
+
+bool CpuScalingEnabled(int num_cpus) {
+ // We don't have a valid CPU count, so don't even bother.
+ if (num_cpus <= 0) return false;
+#ifdef BENCHMARK_OS_QNX
+ return false;
+#endif
+#ifndef BENCHMARK_OS_WINDOWS
+ // On Linux, the CPUfreq subsystem exposes CPU information as files on the
+ // local file system. If reading the exported files fails, then we may not be
+ // running on Linux, so we silently ignore all the read errors.
+ std::string res;
+ for (int cpu = 0; cpu < num_cpus; ++cpu) {
+ std::string governor_file =
+ StrCat("/sys/devices/system/cpu/cpu", cpu, "/cpufreq/scaling_governor");
+ if (ReadFromFile(governor_file, &res) && res != "performance") return true;
+ }
+#endif
+ return false;
+}
+
+int CountSetBitsInCPUMap(std::string Val) {
+ auto CountBits = [](std::string Part) {
+ using CPUMask = std::bitset<sizeof(std::uintptr_t) * CHAR_BIT>;
+ Part = "0x" + Part;
+ CPUMask Mask(benchmark::stoul(Part, nullptr, 16));
+ return static_cast<int>(Mask.count());
+ };
+ size_t Pos;
+ int total = 0;
+ while ((Pos = Val.find(',')) != std::string::npos) {
+ total += CountBits(Val.substr(0, Pos));
+ Val = Val.substr(Pos + 1);
+ }
+ if (!Val.empty()) {
+ total += CountBits(Val);
+ }
+ return total;
+}
+
+BENCHMARK_MAYBE_UNUSED
+std::vector<CPUInfo::CacheInfo> GetCacheSizesFromKVFS() {
+ std::vector<CPUInfo::CacheInfo> res;
+ std::string dir = "/sys/devices/system/cpu/cpu0/cache/";
+ int Idx = 0;
+ while (true) {
+ CPUInfo::CacheInfo info;
+ std::string FPath = StrCat(dir, "index", Idx++, "/");
+ std::ifstream f(StrCat(FPath, "size").c_str());
+ if (!f.is_open()) break;
+ std::string suffix;
+ f >> info.size;
+ if (f.fail())
+ PrintErrorAndDie("Failed while reading file '", FPath, "size'");
+ if (f.good()) {
+ f >> suffix;
+ if (f.bad())
+ PrintErrorAndDie(
+ "Invalid cache size format: failed to read size suffix");
+ else if (f && suffix != "K")
+ PrintErrorAndDie("Invalid cache size format: Expected bytes ", suffix);
+ else if (suffix == "K")
+ info.size *= 1000;
+ }
+ if (!ReadFromFile(StrCat(FPath, "type"), &info.type))
+ PrintErrorAndDie("Failed to read from file ", FPath, "type");
+ if (!ReadFromFile(StrCat(FPath, "level"), &info.level))
+ PrintErrorAndDie("Failed to read from file ", FPath, "level");
+ std::string map_str;
+ if (!ReadFromFile(StrCat(FPath, "shared_cpu_map"), &map_str))
+ PrintErrorAndDie("Failed to read from file ", FPath, "shared_cpu_map");
+ info.num_sharing = CountSetBitsInCPUMap(map_str);
+ res.push_back(info);
+ }
+
+ return res;
+}
+
+#ifdef BENCHMARK_OS_MACOSX
+std::vector<CPUInfo::CacheInfo> GetCacheSizesMacOSX() {
+ std::vector<CPUInfo::CacheInfo> res;
+ std::array<uint64_t, 4> CacheCounts{{0, 0, 0, 0}};
+ GetSysctl("hw.cacheconfig", &CacheCounts);
+
+ struct {
+ std::string name;
+ std::string type;
+ int level;
+ uint64_t num_sharing;
+ } Cases[] = {{"hw.l1dcachesize", "Data", 1, CacheCounts[1]},
+ {"hw.l1icachesize", "Instruction", 1, CacheCounts[1]},
+ {"hw.l2cachesize", "Unified", 2, CacheCounts[2]},
+ {"hw.l3cachesize", "Unified", 3, CacheCounts[3]}};
+ for (auto& C : Cases) {
+ int val;
+ if (!GetSysctl(C.name, &val)) continue;
+ CPUInfo::CacheInfo info;
+ info.type = C.type;
+ info.level = C.level;
+ info.size = val;
+ info.num_sharing = static_cast<int>(C.num_sharing);
+ res.push_back(std::move(info));
+ }
+ return res;
+}
+#elif defined(BENCHMARK_OS_WINDOWS)
+std::vector<CPUInfo::CacheInfo> GetCacheSizesWindows() {
+ std::vector<CPUInfo::CacheInfo> res;
+ DWORD buffer_size = 0;
+ using PInfo = SYSTEM_LOGICAL_PROCESSOR_INFORMATION;
+ using CInfo = CACHE_DESCRIPTOR;
+
+ using UPtr = std::unique_ptr<PInfo, decltype(&std::free)>;
+ GetLogicalProcessorInformation(nullptr, &buffer_size);
+ UPtr buff((PInfo*)malloc(buffer_size), &std::free);
+ if (!GetLogicalProcessorInformation(buff.get(), &buffer_size))
+ PrintErrorAndDie("Failed during call to GetLogicalProcessorInformation: ",
+ GetLastError());
+
+ PInfo* it = buff.get();
+ PInfo* end = buff.get() + (buffer_size / sizeof(PInfo));
+
+ for (; it != end; ++it) {
+ if (it->Relationship != RelationCache) continue;
+ using BitSet = std::bitset<sizeof(ULONG_PTR) * CHAR_BIT>;
+ BitSet B(it->ProcessorMask);
+ // To prevent duplicates, only consider caches where CPU 0 is specified
+ if (!B.test(0)) continue;
+ CInfo* Cache = &it->Cache;
+ CPUInfo::CacheInfo C;
+ C.num_sharing = static_cast<int>(B.count());
+ C.level = Cache->Level;
+ C.size = Cache->Size;
+ switch (Cache->Type) {
+ case CacheUnified:
+ C.type = "Unified";
+ break;
+ case CacheInstruction:
+ C.type = "Instruction";
+ break;
+ case CacheData:
+ C.type = "Data";
+ break;
+ case CacheTrace:
+ C.type = "Trace";
+ break;
+ default:
+ C.type = "Unknown";
+ break;
+ }
+ res.push_back(C);
+ }
+ return res;
+}
+#elif BENCHMARK_OS_QNX
+std::vector<CPUInfo::CacheInfo> GetCacheSizesQNX() {
+ std::vector<CPUInfo::CacheInfo> res;
+ struct cacheattr_entry *cache = SYSPAGE_ENTRY(cacheattr);
+ uint32_t const elsize = SYSPAGE_ELEMENT_SIZE(cacheattr);
+ int num = SYSPAGE_ENTRY_SIZE(cacheattr) / elsize ;
+ for(int i = 0; i < num; ++i ) {
+ CPUInfo::CacheInfo info;
+ switch (cache->flags){
+ case CACHE_FLAG_INSTR :
+ info.type = "Instruction";
+ info.level = 1;
+ break;
+ case CACHE_FLAG_DATA :
+ info.type = "Data";
+ info.level = 1;
+ break;
+ case CACHE_FLAG_UNIFIED :
+ info.type = "Unified";
+ info.level = 2;
+ case CACHE_FLAG_SHARED :
+ info.type = "Shared";
+ info.level = 3;
+ default :
+ continue;
+ break;
+ }
+ info.size = cache->line_size * cache->num_lines;
+ info.num_sharing = 0;
+ res.push_back(std::move(info));
+ cache = SYSPAGE_ARRAY_ADJ_OFFSET(cacheattr, cache, elsize);
+ }
+ return res;
+}
+#endif
+
+std::vector<CPUInfo::CacheInfo> GetCacheSizes() {
+#ifdef BENCHMARK_OS_MACOSX
+ return GetCacheSizesMacOSX();
+#elif defined(BENCHMARK_OS_WINDOWS)
+ return GetCacheSizesWindows();
+#elif defined(BENCHMARK_OS_QNX)
+ return GetCacheSizesQNX();
+#else
+ return GetCacheSizesFromKVFS();
+#endif
+}
+
+std::string GetSystemName() {
+#if defined(BENCHMARK_OS_WINDOWS)
+ std::string str;
+ const unsigned COUNT = MAX_COMPUTERNAME_LENGTH+1;
+ TCHAR hostname[COUNT] = {'\0'};
+ DWORD DWCOUNT = COUNT;
+ if (!GetComputerName(hostname, &DWCOUNT))
+ return std::string("");
+#ifndef UNICODE
+ str = std::string(hostname, DWCOUNT);
+#else
+ //Using wstring_convert, Is deprecated in C++17
+ using convert_type = std::codecvt_utf8<wchar_t>;
+ std::wstring_convert<convert_type, wchar_t> converter;
+ std::wstring wStr(hostname, DWCOUNT);
+ str = converter.to_bytes(wStr);
+#endif
+ return str;
+#else // defined(BENCHMARK_OS_WINDOWS)
+#ifdef BENCHMARK_HAS_SYSCTL // BSD/Mac Doesnt have HOST_NAME_MAX defined
+#define HOST_NAME_MAX 64
+#elif defined(BENCHMARK_OS_QNX)
+#define HOST_NAME_MAX 154
+#endif
+ char hostname[HOST_NAME_MAX];
+ int retVal = gethostname(hostname, HOST_NAME_MAX);
+ if (retVal != 0) return std::string("");
+ return std::string(hostname);
+#endif // Catch-all POSIX block.
+}
+
+int GetNumCPUs() {
+#ifdef BENCHMARK_HAS_SYSCTL
+ int NumCPU = -1;
+ if (GetSysctl("hw.ncpu", &NumCPU)) return NumCPU;
+ fprintf(stderr, "Err: %s\n", strerror(errno));
+ std::exit(EXIT_FAILURE);
+#elif defined(BENCHMARK_OS_WINDOWS)
+ SYSTEM_INFO sysinfo;
+ // Use memset as opposed to = {} to avoid GCC missing initializer false
+ // positives.
+ std::memset(&sysinfo, 0, sizeof(SYSTEM_INFO));
+ GetSystemInfo(&sysinfo);
+ return sysinfo.dwNumberOfProcessors; // number of logical
+ // processors in the current
+ // group
+#elif defined(BENCHMARK_OS_SOLARIS)
+ // Returns -1 in case of a failure.
+ int NumCPU = sysconf(_SC_NPROCESSORS_ONLN);
+ if (NumCPU < 0) {
+ fprintf(stderr,
+ "sysconf(_SC_NPROCESSORS_ONLN) failed with error: %s\n",
+ strerror(errno));
+ }
+ return NumCPU;
+#elif defined(BENCHMARK_OS_QNX)
+ return static_cast<int>(_syspage_ptr->num_cpu);
+#else
+ int NumCPUs = 0;
+ int MaxID = -1;
+ std::ifstream f("/proc/cpuinfo");
+ if (!f.is_open()) {
+ std::cerr << "failed to open /proc/cpuinfo\n";
+ return -1;
+ }
+ const std::string Key = "processor";
+ std::string ln;
+ while (std::getline(f, ln)) {
+ if (ln.empty()) continue;
+ size_t SplitIdx = ln.find(':');
+ std::string value;
+#if defined(__s390__)
+ // s390 has another format in /proc/cpuinfo
+ // it needs to be parsed differently
+ if (SplitIdx != std::string::npos) value = ln.substr(Key.size()+1,SplitIdx-Key.size()-1);
+#else
+ if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1);
+#endif
+ if (ln.size() >= Key.size() && ln.compare(0, Key.size(), Key) == 0) {
+ NumCPUs++;
+ if (!value.empty()) {
+ int CurID = benchmark::stoi(value);
+ MaxID = std::max(CurID, MaxID);
+ }
+ }
+ }
+ if (f.bad()) {
+ std::cerr << "Failure reading /proc/cpuinfo\n";
+ return -1;
+ }
+ if (!f.eof()) {
+ std::cerr << "Failed to read to end of /proc/cpuinfo\n";
+ return -1;
+ }
+ f.close();
+
+ if ((MaxID + 1) != NumCPUs) {
+ fprintf(stderr,
+ "CPU ID assignments in /proc/cpuinfo seem messed up."
+ " This is usually caused by a bad BIOS.\n");
+ }
+ return NumCPUs;
+#endif
+ BENCHMARK_UNREACHABLE();
+}
+
+double GetCPUCyclesPerSecond() {
+#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN
+ long freq;
+
+ // If the kernel is exporting the tsc frequency use that. There are issues
+ // where cpuinfo_max_freq cannot be relied on because the BIOS may be
+ // exporintg an invalid p-state (on x86) or p-states may be used to put the
+ // processor in a new mode (turbo mode). Essentially, those frequencies
+ // cannot always be relied upon. The same reasons apply to /proc/cpuinfo as
+ // well.
+ if (ReadFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)
+ // If CPU scaling is in effect, we want to use the *maximum* frequency,
+ // not whatever CPU speed some random processor happens to be using now.
+ || ReadFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
+ &freq)) {
+ // The value is in kHz (as the file name suggests). For example, on a
+ // 2GHz warpstation, the file contains the value "2000000".
+ return freq * 1000.0;
+ }
+
+ const double error_value = -1;
+ double bogo_clock = error_value;
+
+ std::ifstream f("/proc/cpuinfo");
+ if (!f.is_open()) {
+ std::cerr << "failed to open /proc/cpuinfo\n";
+ return error_value;
+ }
+
+ auto startsWithKey = [](std::string const& Value, std::string const& Key) {
+ if (Key.size() > Value.size()) return false;
+ auto Cmp = [&](char X, char Y) {
+ return std::tolower(X) == std::tolower(Y);
+ };
+ return std::equal(Key.begin(), Key.end(), Value.begin(), Cmp);
+ };
+
+ std::string ln;
+ while (std::getline(f, ln)) {
+ if (ln.empty()) continue;
+ size_t SplitIdx = ln.find(':');
+ std::string value;
+ if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1);
+ // When parsing the "cpu MHz" and "bogomips" (fallback) entries, we only
+ // accept positive values. Some environments (virtual machines) report zero,
+ // which would cause infinite looping in WallTime_Init.
+ if (startsWithKey(ln, "cpu MHz")) {
+ if (!value.empty()) {
+ double cycles_per_second = benchmark::stod(value) * 1000000.0;
+ if (cycles_per_second > 0) return cycles_per_second;
+ }
+ } else if (startsWithKey(ln, "bogomips")) {
+ if (!value.empty()) {
+ bogo_clock = benchmark::stod(value) * 1000000.0;
+ if (bogo_clock < 0.0) bogo_clock = error_value;
+ }
+ }
+ }
+ if (f.bad()) {
+ std::cerr << "Failure reading /proc/cpuinfo\n";
+ return error_value;
+ }
+ if (!f.eof()) {
+ std::cerr << "Failed to read to end of /proc/cpuinfo\n";
+ return error_value;
+ }
+ f.close();
+ // If we found the bogomips clock, but nothing better, we'll use it (but
+ // we're not happy about it); otherwise, fallback to the rough estimation
+ // below.
+ if (bogo_clock >= 0.0) return bogo_clock;
+
+#elif defined BENCHMARK_HAS_SYSCTL
+ constexpr auto* FreqStr =
+#if defined(BENCHMARK_OS_FREEBSD) || defined(BENCHMARK_OS_NETBSD)
+ "machdep.tsc_freq";
+#elif defined BENCHMARK_OS_OPENBSD
+ "hw.cpuspeed";
+#else
+ "hw.cpufrequency";
+#endif
+ unsigned long long hz = 0;
+#if defined BENCHMARK_OS_OPENBSD
+ if (GetSysctl(FreqStr, &hz)) return hz * 1000000;
+#else
+ if (GetSysctl(FreqStr, &hz)) return hz;
+#endif
+ fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n",
+ FreqStr, strerror(errno));
+
+#elif defined BENCHMARK_OS_WINDOWS
+ // In NT, read MHz from the registry. If we fail to do so or we're in win9x
+ // then make a crude estimate.
+ DWORD data, data_size = sizeof(data);
+ if (IsWindowsXPOrGreater() &&
+ SUCCEEDED(
+ SHGetValueA(HKEY_LOCAL_MACHINE,
+ "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0",
+ "~MHz", nullptr, &data, &data_size)))
+ return static_cast<double>((int64_t)data *
+ (int64_t)(1000 * 1000)); // was mhz
+#elif defined (BENCHMARK_OS_SOLARIS)
+ kstat_ctl_t *kc = kstat_open();
+ if (!kc) {
+ std::cerr << "failed to open /dev/kstat\n";
+ return -1;
+ }
+ kstat_t *ksp = kstat_lookup(kc, (char*)"cpu_info", -1, (char*)"cpu_info0");
+ if (!ksp) {
+ std::cerr << "failed to lookup in /dev/kstat\n";
+ return -1;
+ }
+ if (kstat_read(kc, ksp, NULL) < 0) {
+ std::cerr << "failed to read from /dev/kstat\n";
+ return -1;
+ }
+ kstat_named_t *knp =
+ (kstat_named_t*)kstat_data_lookup(ksp, (char*)"current_clock_Hz");
+ if (!knp) {
+ std::cerr << "failed to lookup data in /dev/kstat\n";
+ return -1;
+ }
+ if (knp->data_type != KSTAT_DATA_UINT64) {
+ std::cerr << "current_clock_Hz is of unexpected data type: "
+ << knp->data_type << "\n";
+ return -1;
+ }
+ double clock_hz = knp->value.ui64;
+ kstat_close(kc);
+ return clock_hz;
+#elif defined (BENCHMARK_OS_QNX)
+ return static_cast<double>((int64_t)(SYSPAGE_ENTRY(cpuinfo)->speed) *
+ (int64_t)(1000 * 1000));
+#endif
+ // If we've fallen through, attempt to roughly estimate the CPU clock rate.
+ const int estimate_time_ms = 1000;
+ const auto start_ticks = cycleclock::Now();
+ SleepForMilliseconds(estimate_time_ms);
+ return static_cast<double>(cycleclock::Now() - start_ticks);
+}
+
+std::vector<double> GetLoadAvg() {
+#if (defined BENCHMARK_OS_FREEBSD || defined(BENCHMARK_OS_LINUX) || \
+ defined BENCHMARK_OS_MACOSX || defined BENCHMARK_OS_NETBSD || \
+ defined BENCHMARK_OS_OPENBSD) && !defined(__ANDROID__)
+ constexpr int kMaxSamples = 3;
+ std::vector<double> res(kMaxSamples, 0.0);
+ const int nelem = getloadavg(res.data(), kMaxSamples);
+ if (nelem < 1) {
+ res.clear();
+ } else {
+ res.resize(nelem);
+ }
+ return res;
+#else
+ return {};
+#endif
+}
+
+} // end namespace
+
+const CPUInfo& CPUInfo::Get() {
+ static const CPUInfo* info = new CPUInfo();
+ return *info;
+}
+
+CPUInfo::CPUInfo()
+ : num_cpus(GetNumCPUs()),
+ cycles_per_second(GetCPUCyclesPerSecond()),
+ caches(GetCacheSizes()),
+ scaling_enabled(CpuScalingEnabled(num_cpus)),
+ load_avg(GetLoadAvg()) {}
+
+
+const SystemInfo& SystemInfo::Get() {
+ static const SystemInfo* info = new SystemInfo();
+ return *info;
+}
+
+SystemInfo::SystemInfo() : name(GetSystemName()) {}
+} // end namespace benchmark
diff --git a/src/third_party/benchmark/dist/src/thread_manager.h b/src/third_party/benchmark/dist/src/thread_manager.h
new file mode 100644
index 00000000000..1720281f0a1
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/thread_manager.h
@@ -0,0 +1,64 @@
+#ifndef BENCHMARK_THREAD_MANAGER_H
+#define BENCHMARK_THREAD_MANAGER_H
+
+#include <atomic>
+
+#include "benchmark/benchmark.h"
+#include "mutex.h"
+
+namespace benchmark {
+namespace internal {
+
+class ThreadManager {
+ public:
+ ThreadManager(int num_threads)
+ : alive_threads_(num_threads), start_stop_barrier_(num_threads) {}
+
+ Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) {
+ return benchmark_mutex_;
+ }
+
+ bool StartStopBarrier() EXCLUDES(end_cond_mutex_) {
+ return start_stop_barrier_.wait();
+ }
+
+ void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) {
+ start_stop_barrier_.removeThread();
+ if (--alive_threads_ == 0) {
+ MutexLock lock(end_cond_mutex_);
+ end_condition_.notify_all();
+ }
+ }
+
+ void WaitForAllThreads() EXCLUDES(end_cond_mutex_) {
+ MutexLock lock(end_cond_mutex_);
+ end_condition_.wait(lock.native_handle(),
+ [this]() { return alive_threads_ == 0; });
+ }
+
+ public:
+ struct Result {
+ IterationCount iterations = 0;
+ double real_time_used = 0;
+ double cpu_time_used = 0;
+ double manual_time_used = 0;
+ int64_t complexity_n = 0;
+ std::string report_label_;
+ std::string error_message_;
+ bool has_error_ = false;
+ UserCounters counters;
+ };
+ GUARDED_BY(GetBenchmarkMutex()) Result results;
+
+ private:
+ mutable Mutex benchmark_mutex_;
+ std::atomic<int> alive_threads_;
+ Barrier start_stop_barrier_;
+ Mutex end_cond_mutex_;
+ Condition end_condition_;
+};
+
+} // namespace internal
+} // namespace benchmark
+
+#endif // BENCHMARK_THREAD_MANAGER_H
diff --git a/src/third_party/benchmark/dist/src/thread_timer.h b/src/third_party/benchmark/dist/src/thread_timer.h
new file mode 100644
index 00000000000..fbd298d3bd4
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/thread_timer.h
@@ -0,0 +1,86 @@
+#ifndef BENCHMARK_THREAD_TIMER_H
+#define BENCHMARK_THREAD_TIMER_H
+
+#include "check.h"
+#include "timers.h"
+
+namespace benchmark {
+namespace internal {
+
+class ThreadTimer {
+ explicit ThreadTimer(bool measure_process_cpu_time_)
+ : measure_process_cpu_time(measure_process_cpu_time_) {}
+
+ public:
+ static ThreadTimer Create() {
+ return ThreadTimer(/*measure_process_cpu_time_=*/false);
+ }
+ static ThreadTimer CreateProcessCpuTime() {
+ return ThreadTimer(/*measure_process_cpu_time_=*/true);
+ }
+
+ // Called by each thread
+ void StartTimer() {
+ running_ = true;
+ start_real_time_ = ChronoClockNow();
+ start_cpu_time_ = ReadCpuTimerOfChoice();
+ }
+
+ // Called by each thread
+ void StopTimer() {
+ CHECK(running_);
+ running_ = false;
+ real_time_used_ += ChronoClockNow() - start_real_time_;
+ // Floating point error can result in the subtraction producing a negative
+ // time. Guard against that.
+ cpu_time_used_ +=
+ std::max<double>(ReadCpuTimerOfChoice() - start_cpu_time_, 0);
+ }
+
+ // Called by each thread
+ void SetIterationTime(double seconds) { manual_time_used_ += seconds; }
+
+ bool running() const { return running_; }
+
+ // REQUIRES: timer is not running
+ double real_time_used() {
+ CHECK(!running_);
+ return real_time_used_;
+ }
+
+ // REQUIRES: timer is not running
+ double cpu_time_used() {
+ CHECK(!running_);
+ return cpu_time_used_;
+ }
+
+ // REQUIRES: timer is not running
+ double manual_time_used() {
+ CHECK(!running_);
+ return manual_time_used_;
+ }
+
+ private:
+ double ReadCpuTimerOfChoice() const {
+ if (measure_process_cpu_time) return ProcessCPUUsage();
+ return ThreadCPUUsage();
+ }
+
+ // should the thread, or the process, time be measured?
+ const bool measure_process_cpu_time;
+
+ bool running_ = false; // Is the timer running
+ double start_real_time_ = 0; // If running_
+ double start_cpu_time_ = 0; // If running_
+
+ // Accumulated time so far (does not contain current slice if running_)
+ double real_time_used_ = 0;
+ double cpu_time_used_ = 0;
+ // Manually set iteration time. User sets this with SetIterationTime(seconds).
+ double manual_time_used_ = 0;
+};
+
+} // namespace internal
+} // namespace benchmark
+
+#endif // BENCHMARK_THREAD_TIMER_H
diff --git a/src/third_party/benchmark/dist/src/timers.cc b/src/third_party/benchmark/dist/src/timers.cc
new file mode 100644
index 00000000000..7613ff92c6e
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/timers.cc
@@ -0,0 +1,217 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "timers.h"
+#include "internal_macros.h"
+
+#ifdef BENCHMARK_OS_WINDOWS
+#include <shlwapi.h>
+#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
+#include <versionhelpers.h>
+#include <windows.h>
+#else
+#include <fcntl.h>
+#ifndef BENCHMARK_OS_FUCHSIA
+#include <sys/resource.h>
+#endif
+#include <sys/time.h>
+#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
+#include <unistd.h>
+#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX
+#include <sys/sysctl.h>
+#endif
+#if defined(BENCHMARK_OS_MACOSX)
+#include <mach/mach_init.h>
+#include <mach/mach_port.h>
+#include <mach/thread_act.h>
+#endif
+#endif
+
+#ifdef BENCHMARK_OS_EMSCRIPTEN
+#include <emscripten.h>
+#endif
+
+#include <cerrno>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <ctime>
+#include <iostream>
+#include <limits>
+#include <mutex>
+
+#include "check.h"
+#include "log.h"
+#include "sleep.h"
+#include "string_util.h"
+
+namespace benchmark {
+
+// Suppress unused warnings on helper functions.
+#if defined(__GNUC__)
+#pragma GCC diagnostic ignored "-Wunused-function"
+#endif
+
+namespace {
+#if defined(BENCHMARK_OS_WINDOWS)
+double MakeTime(FILETIME const& kernel_time, FILETIME const& user_time) {
+ ULARGE_INTEGER kernel;
+ ULARGE_INTEGER user;
+ kernel.HighPart = kernel_time.dwHighDateTime;
+ kernel.LowPart = kernel_time.dwLowDateTime;
+ user.HighPart = user_time.dwHighDateTime;
+ user.LowPart = user_time.dwLowDateTime;
+ return (static_cast<double>(kernel.QuadPart) +
+ static_cast<double>(user.QuadPart)) *
+ 1e-7;
+}
+#elif !defined(BENCHMARK_OS_FUCHSIA)
+double MakeTime(struct rusage const& ru) {
+ return (static_cast<double>(ru.ru_utime.tv_sec) +
+ static_cast<double>(ru.ru_utime.tv_usec) * 1e-6 +
+ static_cast<double>(ru.ru_stime.tv_sec) +
+ static_cast<double>(ru.ru_stime.tv_usec) * 1e-6);
+}
+#endif
+#if defined(BENCHMARK_OS_MACOSX)
+double MakeTime(thread_basic_info_data_t const& info) {
+ return (static_cast<double>(info.user_time.seconds) +
+ static_cast<double>(info.user_time.microseconds) * 1e-6 +
+ static_cast<double>(info.system_time.seconds) +
+ static_cast<double>(info.system_time.microseconds) * 1e-6);
+}
+#endif
+#if defined(CLOCK_PROCESS_CPUTIME_ID) || defined(CLOCK_THREAD_CPUTIME_ID)
+double MakeTime(struct timespec const& ts) {
+ return ts.tv_sec + (static_cast<double>(ts.tv_nsec) * 1e-9);
+}
+#endif
+
+BENCHMARK_NORETURN static void DiagnoseAndExit(const char* msg) {
+ std::cerr << "ERROR: " << msg << std::endl;
+ std::exit(EXIT_FAILURE);
+}
+
+} // end namespace
+
+double ProcessCPUUsage() {
+#if defined(BENCHMARK_OS_WINDOWS)
+ HANDLE proc = GetCurrentProcess();
+ FILETIME creation_time;
+ FILETIME exit_time;
+ FILETIME kernel_time;
+ FILETIME user_time;
+ if (GetProcessTimes(proc, &creation_time, &exit_time, &kernel_time,
+ &user_time))
+ return MakeTime(kernel_time, user_time);
+ DiagnoseAndExit("GetProccessTimes() failed");
+#elif defined(BENCHMARK_OS_EMSCRIPTEN)
+ // clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) returns 0 on Emscripten.
+ // Use Emscripten-specific API. Reported CPU time would be exactly the
+ // same as total time, but this is ok because there aren't long-latency
+ // syncronous system calls in Emscripten.
+ return emscripten_get_now() * 1e-3;
+#elif defined(CLOCK_PROCESS_CPUTIME_ID) && !defined(BENCHMARK_OS_MACOSX)
+ // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See
+ // https://github.com/google/benchmark/pull/292
+ struct timespec spec;
+ if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &spec) == 0)
+ return MakeTime(spec);
+ DiagnoseAndExit("clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) failed");
+#else
+ struct rusage ru;
+ if (getrusage(RUSAGE_SELF, &ru) == 0) return MakeTime(ru);
+ DiagnoseAndExit("getrusage(RUSAGE_SELF, ...) failed");
+#endif
+}
+
+double ThreadCPUUsage() {
+#if defined(BENCHMARK_OS_WINDOWS)
+ HANDLE this_thread = GetCurrentThread();
+ FILETIME creation_time;
+ FILETIME exit_time;
+ FILETIME kernel_time;
+ FILETIME user_time;
+ GetThreadTimes(this_thread, &creation_time, &exit_time, &kernel_time,
+ &user_time);
+ return MakeTime(kernel_time, user_time);
+#elif defined(BENCHMARK_OS_MACOSX)
+ // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See
+ // https://github.com/google/benchmark/pull/292
+ mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
+ thread_basic_info_data_t info;
+ mach_port_t thread = pthread_mach_thread_np(pthread_self());
+ if (thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)&info, &count) ==
+ KERN_SUCCESS) {
+ return MakeTime(info);
+ }
+ DiagnoseAndExit("ThreadCPUUsage() failed when evaluating thread_info");
+#elif defined(BENCHMARK_OS_EMSCRIPTEN)
+ // Emscripten doesn't support traditional threads
+ return ProcessCPUUsage();
+#elif defined(BENCHMARK_OS_RTEMS)
+ // RTEMS doesn't support CLOCK_THREAD_CPUTIME_ID. See
+ // https://github.com/RTEMS/rtems/blob/master/cpukit/posix/src/clockgettime.c
+ return ProcessCPUUsage();
+#elif defined(BENCHMARK_OS_SOLARIS)
+ struct rusage ru;
+ if (getrusage(RUSAGE_LWP, &ru) == 0) return MakeTime(ru);
+ DiagnoseAndExit("getrusage(RUSAGE_LWP, ...) failed");
+#elif defined(CLOCK_THREAD_CPUTIME_ID)
+ struct timespec ts;
+ if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) return MakeTime(ts);
+ DiagnoseAndExit("clock_gettime(CLOCK_THREAD_CPUTIME_ID, ...) failed");
+#else
+#error Per-thread timing is not available on your system.
+#endif
+}
+
+namespace {
+
+std::string DateTimeString(bool local) {
+ typedef std::chrono::system_clock Clock;
+ std::time_t now = Clock::to_time_t(Clock::now());
+ const std::size_t kStorageSize = 128;
+ char storage[kStorageSize];
+ std::size_t written;
+
+ if (local) {
+#if defined(BENCHMARK_OS_WINDOWS)
+ written =
+ std::strftime(storage, sizeof(storage), "%x %X", ::localtime(&now));
+#else
+ std::tm timeinfo;
+ ::localtime_r(&now, &timeinfo);
+ written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
+#endif
+ } else {
+#if defined(BENCHMARK_OS_WINDOWS)
+ written = std::strftime(storage, sizeof(storage), "%x %X", ::gmtime(&now));
+#else
+ std::tm timeinfo;
+ ::gmtime_r(&now, &timeinfo);
+ written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
+#endif
+ }
+ CHECK(written < kStorageSize);
+ ((void)written); // prevent unused variable in optimized mode.
+ return std::string(storage);
+}
+
+} // end namespace
+
+std::string LocalDateTimeString() { return DateTimeString(true); }
+
+} // end namespace benchmark
diff --git a/src/third_party/benchmark/dist/src/timers.h b/src/third_party/benchmark/dist/src/timers.h
new file mode 100644
index 00000000000..65606ccd93d
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/timers.h
@@ -0,0 +1,48 @@
+#ifndef BENCHMARK_TIMERS_H
+#define BENCHMARK_TIMERS_H
+
+#include <chrono>
+#include <string>
+
+namespace benchmark {
+
+// Return the CPU usage of the current process
+double ProcessCPUUsage();
+
+// Return the CPU usage of the children of the current process
+double ChildrenCPUUsage();
+
+// Return the CPU usage of the current thread
+double ThreadCPUUsage();
+
+#if defined(HAVE_STEADY_CLOCK)
+template <bool HighResIsSteady = std::chrono::high_resolution_clock::is_steady>
+struct ChooseSteadyClock {
+ typedef std::chrono::high_resolution_clock type;
+};
+
+template <>
+struct ChooseSteadyClock<false> {
+ typedef std::chrono::steady_clock type;
+};
+#endif
+
+struct ChooseClockType {
+#if defined(HAVE_STEADY_CLOCK)
+ typedef ChooseSteadyClock<>::type type;
+#else
+ typedef std::chrono::high_resolution_clock type;
+#endif
+};
+
+inline double ChronoClockNow() {
+ typedef ChooseClockType::type ClockType;
+ using FpSeconds = std::chrono::duration<double, std::chrono::seconds::period>;
+ return FpSeconds(ClockType::now().time_since_epoch()).count();
+}
+
+std::string LocalDateTimeString();
+
+} // end namespace benchmark
+
+#endif // BENCHMARK_TIMERS_H
diff --git a/src/third_party/benchmark/scripts/import.sh b/src/third_party/benchmark/scripts/import.sh
new file mode 100755
index 00000000000..627c496cdbf
--- /dev/null
+++ b/src/third_party/benchmark/scripts/import.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+# This script downloads and imports Google Benchmark.
+#
+# Turn on strict error checking, like perl use 'strict'
+set -xeuo pipefail
+IFS=$'\n\t'
+
+NAME="benchmark"
+
+LIB_GIT_REV="mongo/v1.5.0"
+
+LIB_GIT_URL="https://github.com/mongodb-forks/benchmark.git"
+LIB_GIT_DIR=$(mktemp -d /tmp/import-benchmark.XXXXXX)
+
+trap "rm -rf $LIB_GIT_DIR" EXIT
+
+DIST=$(git rev-parse --show-toplevel)/src/third_party/$NAME/dist
+git clone "$LIB_GIT_URL" $LIB_GIT_DIR
+git -C $LIB_GIT_DIR checkout $LIB_GIT_REV
+
+DEST_DIR=$(git rev-parse --show-toplevel)/src/third_party/$NAME
+
+SUBDIR_WHITELIST=(
+ src
+ include/benchmark
+ LICENSE
+ README.md
+)
+
+for subdir in ${SUBDIR_WHITELIST[@]}
+do
+ [[ -d $LIB_GIT_DIR/$subdir ]] && mkdir -p $DIST/$subdir
+ cp -Trp $LIB_GIT_DIR/$subdir $DIST/$subdir
+done