summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBilly Donahue <billy.donahue@mongodb.com>2020-05-04 04:52:56 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-05-09 17:27:43 +0000
commitcbfc03979e1b567eb58a5d4bb6d8bbcd65988775 (patch)
treed7933d5a2bf6f04e15f4cf21d3e772680e84b8b2
parentc4cebb3de7d11aa8f73df45f417e3be712bcf215 (diff)
downloadmongo-cbfc03979e1b567eb58a5d4bb6d8bbcd65988775.tar.gz
SERVER-41567 Upgrade google-benchmark to tag mongo-v1.5.0
-rw-r--r--README.third_party.md2
-rw-r--r--src/third_party/SConscript7
-rw-r--r--src/third_party/benchmark-1.4.1/SConscript36
-rw-r--r--src/third_party/benchmark-1.4.1/benchmark/.gitignore57
-rw-r--r--src/third_party/benchmark-1.4.1/benchmark/src/benchmark_register.h33
-rw-r--r--src/third_party/benchmark-1.4.1/benchmark/src/string_util.h40
-rw-r--r--src/third_party/benchmark-1.4.1/patches/0001-Remove-deprecated-benchmark-fixture-function-decl.patch34
-rw-r--r--src/third_party/benchmark-1.4.1/patches/0001-properly-escape-json-names-652.patch31
-rw-r--r--src/third_party/benchmark-1.4.1/patches/0002-SERVER-33491-Fix-benchmark.h-compile-with-fdirective.patch30
-rw-r--r--src/third_party/benchmark/SConscript40
-rw-r--r--src/third_party/benchmark/dist/LICENSE (renamed from src/third_party/benchmark-1.4.1/benchmark/LICENSE)0
-rw-r--r--src/third_party/benchmark/dist/README.md (renamed from src/third_party/benchmark-1.4.1/benchmark/README.md)1145
-rw-r--r--src/third_party/benchmark/dist/include/benchmark/benchmark.h (renamed from src/third_party/benchmark-1.4.1/benchmark/include/benchmark/benchmark.h)405
-rw-r--r--src/third_party/benchmark/dist/src/arraysize.h (renamed from src/third_party/benchmark-1.4.1/benchmark/src/arraysize.h)0
-rw-r--r--src/third_party/benchmark/dist/src/benchmark.cc (renamed from src/third_party/benchmark-1.4.1/benchmark/src/benchmark.cc)340
-rw-r--r--src/third_party/benchmark/dist/src/benchmark_api_internal.cc15
-rw-r--r--src/third_party/benchmark/dist/src/benchmark_api_internal.h (renamed from src/third_party/benchmark-1.4.1/benchmark/src/benchmark_api_internal.h)16
-rw-r--r--src/third_party/benchmark/dist/src/benchmark_main.cc (renamed from src/third_party/benchmark-1.4.1/benchmark/src/benchmark_main.cc)0
-rw-r--r--src/third_party/benchmark/dist/src/benchmark_name.cc58
-rw-r--r--src/third_party/benchmark/dist/src/benchmark_register.cc (renamed from src/third_party/benchmark-1.4.1/benchmark/src/benchmark_register.cc)95
-rw-r--r--src/third_party/benchmark/dist/src/benchmark_register.h107
-rw-r--r--src/third_party/benchmark/dist/src/benchmark_runner.cc361
-rw-r--r--src/third_party/benchmark/dist/src/benchmark_runner.h51
-rw-r--r--src/third_party/benchmark/dist/src/check.h (renamed from src/third_party/benchmark-1.4.1/benchmark/src/check.h)5
-rw-r--r--src/third_party/benchmark/dist/src/colorprint.cc (renamed from src/third_party/benchmark-1.4.1/benchmark/src/colorprint.cc)2
-rw-r--r--src/third_party/benchmark/dist/src/colorprint.h (renamed from src/third_party/benchmark-1.4.1/benchmark/src/colorprint.h)0
-rw-r--r--src/third_party/benchmark/dist/src/commandlineflags.cc (renamed from src/third_party/benchmark-1.4.1/benchmark/src/commandlineflags.cc)6
-rw-r--r--src/third_party/benchmark/dist/src/commandlineflags.h (renamed from src/third_party/benchmark-1.4.1/benchmark/src/commandlineflags.h)6
-rw-r--r--src/third_party/benchmark/dist/src/complexity.cc (renamed from src/third_party/benchmark-1.4.1/benchmark/src/complexity.cc)44
-rw-r--r--src/third_party/benchmark/dist/src/complexity.h (renamed from src/third_party/benchmark-1.4.1/benchmark/src/complexity.h)0
-rw-r--r--src/third_party/benchmark/dist/src/console_reporter.cc (renamed from src/third_party/benchmark-1.4.1/benchmark/src/console_reporter.cc)57
-rw-r--r--src/third_party/benchmark/dist/src/counter.cc (renamed from src/third_party/benchmark-1.4.1/benchmark/src/counter.cc)26
-rw-r--r--src/third_party/benchmark/dist/src/counter.h (renamed from src/third_party/benchmark-1.4.1/benchmark/src/counter.h)9
-rw-r--r--src/third_party/benchmark/dist/src/csv_reporter.cc (renamed from src/third_party/benchmark-1.4.1/benchmark/src/csv_reporter.cc)61
-rw-r--r--src/third_party/benchmark/dist/src/cycleclock.h (renamed from src/third_party/benchmark-1.4.1/benchmark/src/cycleclock.h)8
-rw-r--r--src/third_party/benchmark/dist/src/internal_macros.h (renamed from src/third_party/benchmark-1.4.1/benchmark/src/internal_macros.h)25
-rw-r--r--src/third_party/benchmark/dist/src/json_reporter.cc (renamed from src/third_party/benchmark-1.4.1/benchmark/src/json_reporter.cc)129
-rw-r--r--src/third_party/benchmark/dist/src/log.h (renamed from src/third_party/benchmark-1.4.1/benchmark/src/log.h)3
-rw-r--r--src/third_party/benchmark/dist/src/mutex.h (renamed from src/third_party/benchmark-1.4.1/benchmark/src/mutex.h)0
-rw-r--r--src/third_party/benchmark/dist/src/re.h (renamed from src/third_party/benchmark-1.4.1/benchmark/src/re.h)24
-rw-r--r--src/third_party/benchmark/dist/src/reporter.cc (renamed from src/third_party/benchmark-1.4.1/benchmark/src/reporter.cc)22
-rw-r--r--src/third_party/benchmark/dist/src/sleep.cc (renamed from src/third_party/benchmark-1.4.1/benchmark/src/sleep.cc)2
-rw-r--r--src/third_party/benchmark/dist/src/sleep.h (renamed from src/third_party/benchmark-1.4.1/benchmark/src/sleep.h)0
-rw-r--r--src/third_party/benchmark/dist/src/statistics.cc (renamed from src/third_party/benchmark-1.4.1/benchmark/src/statistics.cc)67
-rw-r--r--src/third_party/benchmark/dist/src/statistics.h (renamed from src/third_party/benchmark-1.4.1/benchmark/src/statistics.h)0
-rw-r--r--src/third_party/benchmark/dist/src/string_util.cc (renamed from src/third_party/benchmark-1.4.1/benchmark/src/string_util.cc)92
-rw-r--r--src/third_party/benchmark/dist/src/string_util.h59
-rw-r--r--src/third_party/benchmark/dist/src/sysinfo.cc (renamed from src/third_party/benchmark-1.4.1/benchmark/src/sysinfo.cc)130
-rw-r--r--src/third_party/benchmark/dist/src/thread_manager.h (renamed from src/third_party/benchmark-1.4.1/benchmark/src/thread_manager.h)4
-rw-r--r--src/third_party/benchmark/dist/src/thread_timer.h (renamed from src/third_party/benchmark-1.4.1/benchmark/src/thread_timer.h)23
-rw-r--r--src/third_party/benchmark/dist/src/timers.cc (renamed from src/third_party/benchmark-1.4.1/benchmark/src/timers.cc)6
-rw-r--r--src/third_party/benchmark/dist/src/timers.h (renamed from src/third_party/benchmark-1.4.1/benchmark/src/timers.h)0
-rwxr-xr-xsrc/third_party/benchmark/scripts/import.sh34
-rwxr-xr-xsrc/third_party/scripts/benchmark_get_sources.sh63
54 files changed, 2408 insertions, 1402 deletions
diff --git a/README.third_party.md b/README.third_party.md
index ed09d036c9a..b1ac497a373 100644
--- a/README.third_party.md
+++ b/README.third_party.md
@@ -24,7 +24,7 @@ a notice will be included in
| [abseil-cpp] | Apache-2.0 | | 070f6e47b3 | | ✗ |
| Aladdin MD5 | Zlib | | Unknown | ✗ | ✗ |
| [ASIO] | BSL-1.0 | 1.14.0 | b0926b61b0 | | ✗ |
-| [benchmark] | Apache-2.0 | 1.5.0 | 1.4.1 | | |
+| [benchmark] | Apache-2.0 | 1.5.0 | 1.5.0 | | |
| [Boost] | BSL-1.0 | 1.72.0 | 1.70.0 | | ✗ |
| [fmt] | BSD-2-Clause | 6.1.2 | 6.1.1 | | ✗ |
| [GPerfTools] | BSD-3-Clause | 2.7 | 2.7 | | ✗ |
diff --git a/src/third_party/SConscript b/src/third_party/SConscript
index 9e80376fb15..86a36df7670 100644
--- a/src/third_party/SConscript
+++ b/src/third_party/SConscript
@@ -22,7 +22,6 @@ icuSuffix = '-57.1'
gperftoolsSuffix = '-2.7'
timelibSuffix = '-2018.01'
tomcryptSuffix = '-1.18.2'
-benchmarkSuffix = '-1.4.1'
thirdPartyEnvironmentModifications = {
'fmt' : {
@@ -123,7 +122,7 @@ if not use_system_version_of_library('zstd'):
if not use_system_version_of_library('google-benchmark'):
thirdPartyEnvironmentModifications['benchmark'] = {
- 'CPPPATH' : ['#/src/third_party/benchmark' + benchmarkSuffix + '/benchmark/include'],
+ 'CPPPATH' : ['#/src/third_party/benchmark/dist/include'],
}
# TODO: figure out if we want to offer system versions of mozjs. Mozilla
@@ -466,11 +465,11 @@ else:
benchmarkEnv = env.Clone()
benchmarkEnv.InjectThirdParty(libraries=['benchmark'])
benchmarkEnv.SConscript(
- 'benchmark' + benchmarkSuffix + '/SConscript',
+ 'benchmark/SConscript',
exports={ 'env' : benchmarkEnv })
benchmarkEnv = benchmarkEnv.Clone(
LIBDEPS_INTERFACE=[
- 'benchmark' + benchmarkSuffix + '/benchmark',
+ 'benchmark/benchmark',
])
benchmarkEnv.Library(
diff --git a/src/third_party/benchmark-1.4.1/SConscript b/src/third_party/benchmark-1.4.1/SConscript
deleted file mode 100644
index 7e3b6e7487f..00000000000
--- a/src/third_party/benchmark-1.4.1/SConscript
+++ /dev/null
@@ -1,36 +0,0 @@
-# -*- mode: python -*-
-
-Import("env")
-
-env = env.Clone()
-
-if env.TargetOSIs('windows'):
- env.Prepend(CCFLAGS=[
- # 'function' : destructor never returns, potential memory leak
- '/wd4722',
- ])
-
- env.Append(LIBS=["ShLwApi.lib"])
-
-env.Append(CPPDEFINES=["HAVE_STD_REGEX"])
-
-env.Library(
- target="benchmark",
- source=[
- "benchmark/src/benchmark.cc",
- "benchmark/src/benchmark_register.cc",
- "benchmark/src/colorprint.cc",
- "benchmark/src/commandlineflags.cc",
- "benchmark/src/complexity.cc",
- "benchmark/src/console_reporter.cc",
- "benchmark/src/counter.cc",
- "benchmark/src/csv_reporter.cc",
- "benchmark/src/json_reporter.cc",
- "benchmark/src/reporter.cc",
- "benchmark/src/sleep.cc",
- "benchmark/src/statistics.cc",
- "benchmark/src/string_util.cc",
- "benchmark/src/sysinfo.cc",
- "benchmark/src/timers.cc",
- ]
-)
diff --git a/src/third_party/benchmark-1.4.1/benchmark/.gitignore b/src/third_party/benchmark-1.4.1/benchmark/.gitignore
deleted file mode 100644
index 050e46987fa..00000000000
--- a/src/third_party/benchmark-1.4.1/benchmark/.gitignore
+++ /dev/null
@@ -1,57 +0,0 @@
-*.a
-*.so
-*.so.?*
-*.dll
-*.exe
-*.dylib
-*.cmake
-!/cmake/*.cmake
-!/test/AssemblyTests.cmake
-*~
-*.pyc
-__pycache__
-
-# lcov
-*.lcov
-/lcov
-
-# cmake files.
-/Testing
-CMakeCache.txt
-CMakeFiles/
-cmake_install.cmake
-
-# makefiles.
-Makefile
-
-# in-source build.
-bin/
-lib/
-/test/*_test
-
-# exuberant ctags.
-tags
-
-# YouCompleteMe configuration.
-.ycm_extra_conf.pyc
-
-# ninja generated files.
-.ninja_deps
-.ninja_log
-build.ninja
-install_manifest.txt
-rules.ninja
-
-# bazel output symlinks.
-bazel-*
-
-# out-of-source build top-level folders.
-build/
-_build/
-
-# in-source dependencies
-/googletest/
-
-# Visual Studio 2015/2017 cache/options directory
-.vs/
-CMakeSettings.json
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/benchmark_register.h b/src/third_party/benchmark-1.4.1/benchmark/src/benchmark_register.h
deleted file mode 100644
index 0705e219f2f..00000000000
--- a/src/third_party/benchmark-1.4.1/benchmark/src/benchmark_register.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef BENCHMARK_REGISTER_H
-#define BENCHMARK_REGISTER_H
-
-#include <vector>
-
-#include "check.h"
-
-template <typename T>
-void AddRange(std::vector<T>* dst, T lo, T hi, int mult) {
- CHECK_GE(lo, 0);
- CHECK_GE(hi, lo);
- CHECK_GE(mult, 2);
-
- // Add "lo"
- dst->push_back(lo);
-
- static const T kmax = std::numeric_limits<T>::max();
-
- // Now space out the benchmarks in multiples of "mult"
- for (T i = 1; i < kmax / mult; i *= mult) {
- if (i >= hi) break;
- if (i > lo) {
- dst->push_back(i);
- }
- }
-
- // Add "hi" (if different from "lo")
- if (hi != lo) {
- dst->push_back(hi);
- }
-}
-
-#endif // BENCHMARK_REGISTER_H
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/string_util.h b/src/third_party/benchmark-1.4.1/benchmark/src/string_util.h
deleted file mode 100644
index e70e7698724..00000000000
--- a/src/third_party/benchmark-1.4.1/benchmark/src/string_util.h
+++ /dev/null
@@ -1,40 +0,0 @@
-#ifndef BENCHMARK_STRING_UTIL_H_
-#define BENCHMARK_STRING_UTIL_H_
-
-#include <sstream>
-#include <string>
-#include <utility>
-#include "internal_macros.h"
-
-namespace benchmark {
-
-void AppendHumanReadable(int n, std::string* str);
-
-std::string HumanReadableNumber(double n, double one_k = 1024.0);
-
-std::string StrFormat(const char* format, ...);
-
-inline std::ostream& StrCatImp(std::ostream& out) BENCHMARK_NOEXCEPT {
- return out;
-}
-
-template <class First, class... Rest>
-inline std::ostream& StrCatImp(std::ostream& out, First&& f,
- Rest&&... rest) {
- out << std::forward<First>(f);
- return StrCatImp(out, std::forward<Rest>(rest)...);
-}
-
-template <class... Args>
-inline std::string StrCat(Args&&... args) {
- std::ostringstream ss;
- StrCatImp(ss, std::forward<Args>(args)...);
- return ss.str();
-}
-
-void ReplaceAll(std::string* str, const std::string& from,
- const std::string& to);
-
-} // end namespace benchmark
-
-#endif // BENCHMARK_STRING_UTIL_H_
diff --git a/src/third_party/benchmark-1.4.1/patches/0001-Remove-deprecated-benchmark-fixture-function-decl.patch b/src/third_party/benchmark-1.4.1/patches/0001-Remove-deprecated-benchmark-fixture-function-decl.patch
deleted file mode 100644
index 21430e250ca..00000000000
--- a/src/third_party/benchmark-1.4.1/patches/0001-Remove-deprecated-benchmark-fixture-function-decl.patch
+++ /dev/null
@@ -1,34 +0,0 @@
-From 9db968c3544432d17191a1b3c8873873dcd672fe Mon Sep 17 00:00:00 2001
-From: Robert Guo <robert.guo@10gen.com>
-Date: Wed, 28 Feb 2018 17:06:34 -0500
-Subject: [PATCH] SERVER-33560 Remove deprecated Fixture function declarations
- from Google Benchmark
-
----
- .../benchmark-1.4.1/benchmark/include/benchmark/benchmark.h | 10 ++++------
- 1 file changed, 4 insertions(+), 6 deletions(-)
-
-diff --git a/src/third_party/benchmark-1.4.1/benchmark/include/benchmark/benchmark.h b/src/third_party/benchmark-1.4.1/benchmark/include/benchmark/benchmark.h
-index 15ed02e538..2183c8a1d2 100644
---- a/src/third_party/benchmark-1.4.1/benchmark/include/benchmark/benchmark.h
-+++ b/src/third_party/benchmark-1.4.1/benchmark/include/benchmark/benchmark.h
-@@ -943,12 +943,10 @@ class Fixture : public internal::Benchmark {
- this->TearDown(st);
- }
-
-- // These will be deprecated ...
-- virtual void SetUp(const State&) {}
-- virtual void TearDown(const State&) {}
-- // ... In favor of these.
-- virtual void SetUp(State& st) { SetUp(const_cast<const State&>(st)); }
-- virtual void TearDown(State& st) { TearDown(const_cast<const State&>(st)); }
-+ // MONGODB MODIFICATION: Remove the deprecated version of SetUp() and TearDown() that
-+ // require `const State&` as an argument.
-+ virtual void SetUp(State&) {}
-+ virtual void TearDown(State&) {}
-
- protected:
- virtual void BenchmarkCase(State&) = 0;
---
-2.14.3 (Apple Git-98)
-
diff --git a/src/third_party/benchmark-1.4.1/patches/0001-properly-escape-json-names-652.patch b/src/third_party/benchmark-1.4.1/patches/0001-properly-escape-json-names-652.patch
deleted file mode 100644
index 77306ecf2fc..00000000000
--- a/src/third_party/benchmark-1.4.1/patches/0001-properly-escape-json-names-652.patch
+++ /dev/null
@@ -1,31 +0,0 @@
-From af441fc1143e33e539ceec4df67c2d95ac2bf5f8 Mon Sep 17 00:00:00 2001
-From: BaaMeow <38274252+BaaMeow@users.noreply.github.com>
-Date: Thu, 16 Aug 2018 12:47:09 -0400
-Subject: [PATCH] properly escape json names (#652)
-
----
- src/json_reporter.cc | 7 ++++++-
- test/reporter_output_test.cc | 3 ++-
- 2 files changed, 8 insertions(+), 2 deletions(-)
-
-diff --git a/src/json_reporter.cc b/src/json_reporter.cc
-index 6d0706f..127a96a 100644
---- a/src/json_reporter.cc
-+++ b/src/json_reporter.cc
-@@ -78,7 +78,12 @@ bool JSONReporter::ReportContext(const Context& context) {
- out << indent << FormatKV("date", walltime_value) << ",\n";
-
- if (Context::executable_name) {
-- out << indent << FormatKV("executable", Context::executable_name) << ",\n";
-+ // windows uses backslash for its path separator,
-+ // which must be escaped in JSON otherwise it blows up conforming JSON
-+ // decoders
-+ std::string executable_name = Context::executable_name;
-+ ReplaceAll(&executable_name, "\\", "\\\\");
-+ out << indent << FormatKV("executable", executable_name) << ",\n";
- }
-
- CPUInfo const& info = context.cpu_info;
---
-2.10.1.windows.1
-
diff --git a/src/third_party/benchmark-1.4.1/patches/0002-SERVER-33491-Fix-benchmark.h-compile-with-fdirective.patch b/src/third_party/benchmark-1.4.1/patches/0002-SERVER-33491-Fix-benchmark.h-compile-with-fdirective.patch
deleted file mode 100644
index d13573118ef..00000000000
--- a/src/third_party/benchmark-1.4.1/patches/0002-SERVER-33491-Fix-benchmark.h-compile-with-fdirective.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From 8a275d29b6e17c37ac66380a7689c80e8a52fbb6 Mon Sep 17 00:00:00 2001
-From: Mathias Stearn <mathias@10gen.com>
-Date: Mon, 26 Feb 2018 12:24:33 -0500
-Subject: [PATCH] SERVER-33491 Fix benchmark.h compile with -fdirectives-only
-
----
- .../benchmark-1.4.1/benchmark/include/benchmark/benchmark.h | 7 ++++++-
- 1 file changed, 6 insertions(+), 1 deletion(-)
-
-diff --git a/src/third_party/benchmark-1.4.1/benchmark/include/benchmark/benchmark.h b/src/third_party/benchmark-1.4.1/benchmark/include/benchmark/benchmark.h
-index c8360da..9529faf 100644
---- a/src/third_party/benchmark-1.4.1/benchmark/include/benchmark/benchmark.h
-+++ b/src/third_party/benchmark-1.4.1/benchmark/include/benchmark/benchmark.h
-@@ -960,7 +960,12 @@ class Fixture : public internal::Benchmark {
- // Check that __COUNTER__ is defined and that __COUNTER__ increases by 1
- // every time it is expanded. X + 1 == X + 0 is used in case X is defined to be
- // empty. If X is empty the expression becomes (+1 == +0).
--#if defined(__COUNTER__) && (__COUNTER__ + 1 == __COUNTER__ + 0)
-+//
-+// MONGODB MODIFICATION: all of our supported compilers support __COUNTER__ so we don't need to test
-+// for it here. This test interferes with -E -fdirectives-only since it is illegal to use
-+// __COUNTER__ in an #if clause with that flag because its value could change between the partial
-+// preprocessing and the compile phases.
-+#if true // defined(__COUNTER__) && (__COUNTER__ + 1 == __COUNTER__ + 0)
- #define BENCHMARK_PRIVATE_UNIQUE_ID __COUNTER__
- #else
- #define BENCHMARK_PRIVATE_UNIQUE_ID __LINE__
---
-2.10.1.windows.1
-
diff --git a/src/third_party/benchmark/SConscript b/src/third_party/benchmark/SConscript
new file mode 100644
index 00000000000..fc8549e0c0f
--- /dev/null
+++ b/src/third_party/benchmark/SConscript
@@ -0,0 +1,40 @@
+# -*- mode: python -*-
+
+Import("env")
+
+env = env.Clone()
+
+if env.TargetOSIs('windows'):
+ env.Prepend(CCFLAGS=[
+ # 'function' : destructor never returns, potential memory leak
+ '/wd4722',
+ ])
+
+ env.Append(LIBS=["ShLwApi.lib"])
+
+env.Append(CPPDEFINES=["HAVE_STD_REGEX"])
+
+src_dir=env.Dir('dist/src')
+
+env.Library(
+ target="benchmark",
+ source=env.File([
+ 'benchmark_api_internal.cc',
+ 'benchmark.cc',
+ 'benchmark_name.cc',
+ 'benchmark_register.cc',
+ 'benchmark_runner.cc',
+ 'colorprint.cc',
+ 'commandlineflags.cc',
+ 'complexity.cc',
+ 'console_reporter.cc',
+ 'counter.cc',
+ 'csv_reporter.cc',
+ 'json_reporter.cc',
+ 'reporter.cc',
+ 'sleep.cc',
+ 'statistics.cc',
+ 'string_util.cc',
+ 'sysinfo.cc',
+ 'timers.cc',
+ ], src_dir))
diff --git a/src/third_party/benchmark-1.4.1/benchmark/LICENSE b/src/third_party/benchmark/dist/LICENSE
index d6456956733..d6456956733 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/LICENSE
+++ b/src/third_party/benchmark/dist/LICENSE
diff --git a/src/third_party/benchmark-1.4.1/benchmark/README.md b/src/third_party/benchmark/dist/README.md
index 0341c31bd74..45e41588438 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/README.md
+++ b/src/third_party/benchmark/dist/README.md
@@ -1,35 +1,102 @@
-# benchmark
+# Benchmark
[![Build Status](https://travis-ci.org/google/benchmark.svg?branch=master)](https://travis-ci.org/google/benchmark)
[![Build status](https://ci.appveyor.com/api/projects/status/u0qsyp7t1tk7cpxs/branch/master?svg=true)](https://ci.appveyor.com/project/google/benchmark/branch/master)
[![Coverage Status](https://coveralls.io/repos/google/benchmark/badge.svg)](https://coveralls.io/r/google/benchmark)
[![slackin](https://slackin-iqtfqnpzxd.now.sh/badge.svg)](https://slackin-iqtfqnpzxd.now.sh/)
-A library to support the benchmarking of functions, similar to unit-tests.
-Discussion group: https://groups.google.com/d/forum/benchmark-discuss
+A library to benchmark code snippets, similar to unit tests. Example:
-IRC channel: https://freenode.net #googlebenchmark
+```c++
+#include <benchmark/benchmark.h>
+
+static void BM_SomeFunction(benchmark::State& state) {
+ // Perform setup here
+ for (auto _ : state) {
+ // This code gets timed
+ SomeFunction();
+ }
+}
+// Register the function as a benchmark
+BENCHMARK(BM_SomeFunction);
+// Run the benchmark
+BENCHMARK_MAIN();
+```
+
+To get started, see [Requirements](#requirements) and
+[Installation](#installation). See [Usage](#usage) for a full example and the
+[User Guide](#user-guide) for a more comprehensive feature overview.
+
+It may also help to read the [Google Test documentation](https://github.com/google/googletest/blob/master/googletest/docs/primer.md)
+as some of the structural aspects of the APIs are similar.
+
+### Resources
+
+[Discussion group](https://groups.google.com/d/forum/benchmark-discuss)
-[Known issues and common problems](#known-issues)
+IRC channel: [freenode](https://freenode.net) #googlebenchmark
[Additional Tooling Documentation](docs/tools.md)
[Assembly Testing Documentation](docs/AssemblyTests.md)
+## Requirements
-## Building
+The library can be used with C++03. However, it requires C++11 to build,
+including compiler and standard library support.
-The basic steps for configuring and building the library look like this:
+The following minimum versions are required to build the library:
+
+* GCC 4.8
+* Clang 3.4
+* Visual Studio 2013
+* Intel 2015 Update 1
+
+## Installation
+
+This describes the installation process using cmake. As pre-requisites, you'll
+need git and cmake installed.
+
+_See [dependencies.md](dependencies.md) for more details regarding supported
+versions of build tools._
```bash
+# Check out the library.
$ git clone https://github.com/google/benchmark.git
# Benchmark requires Google Test as a dependency. Add the source tree as a subdirectory.
$ git clone https://github.com/google/googletest.git benchmark/googletest
+# Make a build directory to place the build output.
$ mkdir build && cd build
-$ cmake -G <generator> [options] ../benchmark
-# Assuming a makefile generator was used
+# Generate a Makefile with cmake.
+# Use cmake -G <generator> to generate a different file type.
+$ cmake ../benchmark
+# Build the library.
$ make
```
+This builds the `benchmark` and `benchmark_main` libraries and tests.
+On a unix system, the build directory should now look something like this:
+
+```
+/benchmark
+/build
+ /src
+ /libbenchmark.a
+ /libbenchmark_main.a
+ /test
+ ...
+```
+
+Next, you can run the tests to check the build.
+
+```bash
+$ make test
+```
+
+If you want to install the library globally, also run:
+
+```
+sudo make install
+```
Note that Google Benchmark requires Google Test to build and run the tests. This
dependency can be provided two ways:
@@ -42,40 +109,29 @@ dependency can be provided two ways:
If you do not wish to build and run the tests, add `-DBENCHMARK_ENABLE_GTEST_TESTS=OFF`
to `CMAKE_ARGS`.
+### Debug vs Release
-## Installation Guide
-
-For Ubuntu and Debian Based System
-
-First make sure you have git and cmake installed (If not please install it)
+By default, benchmark builds as a debug library. You will see a warning in the
+output when this is the case. To build it as a release library instead, use:
```
-sudo apt-get install git
-sudo apt-get install cmake
+cmake -DCMAKE_BUILD_TYPE=Release
```
-Now, let's clone the repository and build it
+To enable link-time optimisation, use
```
-git clone https://github.com/google/benchmark.git
-cd benchmark
-git clone https://github.com/google/googletest.git
-mkdir build
-cd build
-cmake .. -DCMAKE_BUILD_TYPE=RELEASE
-make
+cmake -DCMAKE_BUILD_TYPE=Release -DBENCHMARK_ENABLE_LTO=true
```
-We need to install the library globally now
+If you are using gcc, you might need to set `GCC_AR` and `GCC_RANLIB` cmake
+cache variables, if autodetection fails.
-```
-sudo make install
-```
+If you are using clang, you may need to set `LLVMAR_EXECUTABLE`,
+`LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables.
-Now you have google/benchmark installed in your machine
-Note: Don't forget to link to pthread library while building
-## Stable and Experimental Library Versions
+### Stable and Experimental Library Versions
The main branch contains the latest stable version of the benchmarking library;
the API of which can be considered largely stable, with source breaking changes
@@ -87,15 +143,11 @@ to use, test, and provide feedback on the new features are encouraged to try
this branch. However, this branch provides no stability guarantees and reserves
the right to change and break the API at any time.
-##Prerequisite knowledge
-
-Before attempting to understand this framework one should ideally have some familiarity with the structure and format of the Google Test framework, upon which it is based. Documentation for Google Test, including a "Getting Started" (primer) guide, is available here:
-https://github.com/google/googletest/blob/master/googletest/docs/Documentation.md
-
-
-## Example usage
+## Usage
### Basic usage
-Define a function that executes the code to be measured.
+Define a function that executes the code to measure, register it as a benchmark
+function using the `BENCHMARK` macro, and ensure an appropriate `main` function
+is available:
```c++
#include <benchmark/benchmark.h>
@@ -118,14 +170,218 @@ BENCHMARK(BM_StringCopy);
BENCHMARK_MAIN();
```
-Don't forget to inform your linker to add benchmark library e.g. through
-`-lbenchmark` compilation flag. Alternatively, you may leave out the
-`BENCHMARK_MAIN();` at the end of the source file and link against
-`-lbenchmark_main` to get the same default behavior.
+To run the benchmark, compile and link against the `benchmark` library
+(libbenchmark.a/.so). If you followed the build steps above, this
+library will be under the build directory you created.
+
+```bash
+# Example on linux after running the build steps above. Assumes the
+# `benchmark` and `build` directories are under the current directory.
+$ g++ -std=c++11 -isystem benchmark/include -Lbuild/src -lpthread \
+ -lbenchmark mybenchmark.cc -o mybenchmark
+```
+
+Alternatively, link against the `benchmark_main` library and remove
+`BENCHMARK_MAIN();` above to get the same behavior.
+
+The compiled executable will run all benchmarks by default. Pass the `--help`
+flag for option information or see the guide below.
+
+### Platform-specific instructions
+
+When the library is built using GCC it is necessary to link with the pthread
+library due to how GCC implements `std::thread`. Failing to link to pthread will
+lead to runtime exceptions (unless you're using libc++), not linker errors. See
+[issue #67](https://github.com/google/benchmark/issues/67) for more details. You
+can link to pthread by adding `-pthread` to your linker command. Note, you can
+also use `-lpthread`, but there are potential issues with ordering of command
+line parameters if you use that.
+
+If you're running benchmarks on Windows, the shlwapi library (`-lshlwapi`) is
+also required.
+
+If you're running benchmarks on solaris, you'll want the kstat library linked in
+too (`-lkstat`).
+
+## User Guide
+
+### Command Line
+[Output Formats](#output-formats)
+
+[Output Files](#output-files)
+
+[Running a Subset of Benchmarks](#running-a-subset-of-benchmarks)
+
+[Result Comparison](#result-comparison)
+
+### Library
+[Runtime and Reporting Considerations](#runtime-and-reporting-considerations)
+
+[Passing Arguments](#passing-arguments)
+
+[Calculating Asymptotic Complexity](#asymptotic-complexity)
+
+[Templated Benchmarks](#templated-benchmarks)
+
+[Fixtures](#fixtures)
+
+[Custom Counters](#custom-counters)
+
+[Multithreaded Benchmarks](#multithreaded-benchmarks)
+
+[CPU Timers](#cpu-timers)
+
+[Manual Timing](#manual-timing)
+
+[Setting the Time Unit](#setting-the-time-unit)
+
+[Preventing Optimization](#preventing-optimization)
+
+[Reporting Statistics](#reporting-statistics)
+
+[Custom Statistics](#custom-statistics)
+
+[Using RegisterBenchmark](#using-register-benchmark)
+
+[Exiting with an Error](#exiting-with-an-error)
+
+[A Faster KeepRunning Loop](#a-faster-keep-running-loop)
+
+[Disabling CPU Frequency Scaling](#disabling-cpu-frequency-scaling)
+
+<a name="output-formats" />
+
+### Output Formats
+
+The library supports multiple output formats. Use the
+`--benchmark_format=<console|json|csv>` flag to set the format type. `console`
+is the default format.
+
+The Console format is intended to be a human readable format. By default
+the format generates color output. Context is output on stderr and the
+tabular data on stdout. Example tabular output looks like:
+```
+Benchmark Time(ns) CPU(ns) Iterations
+----------------------------------------------------------------------
+BM_SetInsert/1024/1 28928 29349 23853 133.097kB/s 33.2742k items/s
+BM_SetInsert/1024/8 32065 32913 21375 949.487kB/s 237.372k items/s
+BM_SetInsert/1024/10 33157 33648 21431 1.13369MB/s 290.225k items/s
+```
+
+The JSON format outputs human readable json split into two top level attributes.
+The `context` attribute contains information about the run in general, including
+information about the CPU and the date.
+The `benchmarks` attribute contains a list of every benchmark run. Example json
+output looks like:
+```json
+{
+ "context": {
+ "date": "2015/03/17-18:40:25",
+ "num_cpus": 40,
+ "mhz_per_cpu": 2801,
+ "cpu_scaling_enabled": false,
+ "build_type": "debug"
+ },
+ "benchmarks": [
+ {
+ "name": "BM_SetInsert/1024/1",
+ "iterations": 94877,
+ "real_time": 29275,
+ "cpu_time": 29836,
+ "bytes_per_second": 134066,
+ "items_per_second": 33516
+ },
+ {
+ "name": "BM_SetInsert/1024/8",
+ "iterations": 21609,
+ "real_time": 32317,
+ "cpu_time": 32429,
+ "bytes_per_second": 986770,
+ "items_per_second": 246693
+ },
+ {
+ "name": "BM_SetInsert/1024/10",
+ "iterations": 21393,
+ "real_time": 32724,
+ "cpu_time": 33355,
+ "bytes_per_second": 1199226,
+ "items_per_second": 299807
+ }
+ ]
+}
+```
+
+The CSV format outputs comma-separated values. The `context` is output on stderr
+and the CSV itself on stdout. Example CSV output looks like:
+```
+name,iterations,real_time,cpu_time,bytes_per_second,items_per_second,label
+"BM_SetInsert/1024/1",65465,17890.7,8407.45,475768,118942,
+"BM_SetInsert/1024/8",116606,18810.1,9766.64,3.27646e+06,819115,
+"BM_SetInsert/1024/10",106365,17238.4,8421.53,4.74973e+06,1.18743e+06,
+```
+
+<a name="output-files" />
+
+### Output Files
+
+Write benchmark results to a file with the `--benchmark_out=<filename>` option.
+Specify the output format with `--benchmark_out_format={json|console|csv}`. Note that Specifying
+`--benchmark_out` does not suppress the console output.
+
+<a name="running-a-subset-of-benchmarks" />
+
+### Running a Subset of Benchmarks
+
+The `--benchmark_filter=<regex>` option can be used to only run the benchmarks
+which match the specified `<regex>`. For example:
+
+```bash
+$ ./run_benchmarks.x --benchmark_filter=BM_memcpy/32
+Run on (1 X 2300 MHz CPU )
+2016-06-25 19:34:24
+Benchmark Time CPU Iterations
+----------------------------------------------------
+BM_memcpy/32 11 ns 11 ns 79545455
+BM_memcpy/32k 2181 ns 2185 ns 324074
+BM_memcpy/32 12 ns 12 ns 54687500
+BM_memcpy/32k 1834 ns 1837 ns 357143
+```
+
+<a name="result-comparison" />
+
+### Result comparison
+
+It is possible to compare the benchmarking results. See [Additional Tooling Documentation](docs/tools.md)
+
+<a name="runtime-and-reporting-considerations" />
+
+### Runtime and Reporting Considerations
+
+When the benchmark binary is executed, each benchmark function is run serially.
+The number of iterations to run is determined dynamically by running the
+benchmark a few times and measuring the time taken and ensuring that the
+ultimate result will be statistically stable. As such, faster benchmark
+functions will be run for more iterations than slower benchmark functions, and
+the number of iterations is thus reported.
+
+In all cases, the number of iterations for which the benchmark is run is
+governed by the amount of time the benchmark takes. Concretely, the number of
+iterations is at least one, not more than 1e9, until CPU time is greater than
+the minimum time, or the wallclock time is 5x minimum time. The minimum time is
+set per benchmark by calling `MinTime` on the registered benchmark object.
+
+Average timings are then reported over the iterations run. If multiple
+repetitions are requested using the `--benchmark_repetitions` command-line
+option, or at registration time, the benchmark function will be run several
+times and statistical results across these repetitions will also be reported.
+
+As well as the per-benchmark entries, a preamble in the report will include
+information about the machine on which the benchmarks are run.
+
+<a name="passing-arguments" />
-The benchmark library will reporting the timing for the code within the `for(...)` loop.
+### Passing Arguments
-### Passing arguments
Sometimes a family of benchmarks can be implemented with just one routine that
takes an extra argument to specify which one of the family of benchmarks to
run. For example, the following code defines a family of benchmarks for
@@ -212,7 +468,31 @@ static void CustomArguments(benchmark::internal::Benchmark* b) {
BENCHMARK(BM_SetInsert)->Apply(CustomArguments);
```
-### Calculate asymptotic complexity (Big O)
+#### Passing Arbitrary Arguments to a Benchmark
+
+In C++11 it is possible to define a benchmark that takes an arbitrary number
+of extra arguments. The `BENCHMARK_CAPTURE(func, test_case_name, ...args)`
+macro creates a benchmark that invokes `func` with the `benchmark::State` as
+the first argument followed by the specified `args...`.
+The `test_case_name` is appended to the name of the benchmark and
+should describe the values passed.
+
+```c++
+template <class ...ExtraArgs>
+void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) {
+ [...]
+}
+// Registers a benchmark named "BM_takes_args/int_string_test" that passes
+// the specified values to `extra_args`.
+BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc"));
+```
+Note that elements of `...args` may refer to global variables. Users should
+avoid modifying global state inside of a benchmark.
+
+<a name="asymptotic-complexity" />
+
+### Calculating Asymptotic Complexity (Big O)
+
Asymptotic complexity might be calculated for a family of benchmarks. The
following code will calculate the coefficient for the high-order term in the
running time and the normalized root-mean square error of string comparison.
@@ -243,16 +523,18 @@ that might be used to customize high-order term calculation.
```c++
BENCHMARK(BM_StringCompare)->RangeMultiplier(2)
- ->Range(1<<10, 1<<18)->Complexity([](int n)->double{return n; });
+ ->Range(1<<10, 1<<18)->Complexity([](int64_t n)->double{return n; });
```
-### Templated benchmarks
-Templated benchmarks work the same way: This example produces and consumes
-messages of size `sizeof(v)` `range_x` times. It also outputs throughput in the
-absence of multiprogramming.
+<a name="templated-benchmarks" />
+
+### Templated Benchmarks
+
+This example produces and consumes messages of size `sizeof(v)` `range_x`
+times. It also outputs throughput in the absence of multiprogramming.
```c++
-template <class Q> int BM_Sequential(benchmark::State& state) {
+template <class Q> void BM_Sequential(benchmark::State& state) {
Q q;
typename Q::value_type v;
for (auto _ : state) {
@@ -280,110 +562,210 @@ Three macros are provided for adding benchmark templates.
#define BENCHMARK_TEMPLATE2(func, arg1, arg2)
```
-### A Faster KeepRunning loop
+<a name="fixtures" />
-In C++11 mode, a ranged-based for loop should be used in preference to
-the `KeepRunning` loop for running the benchmarks. For example:
+### Fixtures
+
+Fixture tests are created by first defining a type that derives from
+`::benchmark::Fixture` and then creating/registering the tests using the
+following macros:
+
+* `BENCHMARK_F(ClassName, Method)`
+* `BENCHMARK_DEFINE_F(ClassName, Method)`
+* `BENCHMARK_REGISTER_F(ClassName, Method)`
+
+For Example:
```c++
-static void BM_Fast(benchmark::State &state) {
- for (auto _ : state) {
- FastOperation();
+class MyFixture : public benchmark::Fixture {
+public:
+ void SetUp(const ::benchmark::State& state) {
+ }
+
+ void TearDown(const ::benchmark::State& state) {
+ }
+};
+
+BENCHMARK_F(MyFixture, FooTest)(benchmark::State& st) {
+ for (auto _ : st) {
+ ...
}
}
-BENCHMARK(BM_Fast);
+
+BENCHMARK_DEFINE_F(MyFixture, BarTest)(benchmark::State& st) {
+ for (auto _ : st) {
+ ...
+ }
+}
+/* BarTest is NOT registered */
+BENCHMARK_REGISTER_F(MyFixture, BarTest)->Threads(2);
+/* BarTest is now registered */
```
-The reason the ranged-for loop is faster than using `KeepRunning`, is
-because `KeepRunning` requires a memory load and store of the iteration count
-ever iteration, whereas the ranged-for variant is able to keep the iteration count
-in a register.
+#### Templated Fixtures
-For example, an empty inner loop of using the ranged-based for method looks like:
+Also you can create templated fixture by using the following macros:
-```asm
-# Loop Init
- mov rbx, qword ptr [r14 + 104]
- call benchmark::State::StartKeepRunning()
- test rbx, rbx
- je .LoopEnd
-.LoopHeader: # =>This Inner Loop Header: Depth=1
- add rbx, -1
- jne .LoopHeader
-.LoopEnd:
-```
+* `BENCHMARK_TEMPLATE_F(ClassName, Method, ...)`
+* `BENCHMARK_TEMPLATE_DEFINE_F(ClassName, Method, ...)`
-Compared to an empty `KeepRunning` loop, which looks like:
+For example:
+```c++
+template<typename T>
+class MyFixture : public benchmark::Fixture {};
-```asm
-.LoopHeader: # in Loop: Header=BB0_3 Depth=1
- cmp byte ptr [rbx], 1
- jne .LoopInit
-.LoopBody: # =>This Inner Loop Header: Depth=1
- mov rax, qword ptr [rbx + 8]
- lea rcx, [rax + 1]
- mov qword ptr [rbx + 8], rcx
- cmp rax, qword ptr [rbx + 104]
- jb .LoopHeader
- jmp .LoopEnd
-.LoopInit:
- mov rdi, rbx
- call benchmark::State::StartKeepRunning()
- jmp .LoopBody
-.LoopEnd:
+BENCHMARK_TEMPLATE_F(MyFixture, IntTest, int)(benchmark::State& st) {
+ for (auto _ : st) {
+ ...
+ }
+}
+
+BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, DoubleTest, double)(benchmark::State& st) {
+ for (auto _ : st) {
+ ...
+ }
+}
+
+BENCHMARK_REGISTER_F(MyFixture, DoubleTest)->Threads(2);
```
-Unless C++03 compatibility is required, the ranged-for variant of writing
-the benchmark loop should be preferred.
+<a name="custom-counters" />
-## Passing arbitrary arguments to a benchmark
-In C++11 it is possible to define a benchmark that takes an arbitrary number
-of extra arguments. The `BENCHMARK_CAPTURE(func, test_case_name, ...args)`
-macro creates a benchmark that invokes `func` with the `benchmark::State` as
-the first argument followed by the specified `args...`.
-The `test_case_name` is appended to the name of the benchmark and
-should describe the values passed.
+### Custom Counters
+
+You can add your own counters with user-defined names. The example below
+will add columns "Foo", "Bar" and "Baz" in its output:
```c++
-template <class ...ExtraArgs>
-void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) {
- [...]
+static void UserCountersExample1(benchmark::State& state) {
+ double numFoos = 0, numBars = 0, numBazs = 0;
+ for (auto _ : state) {
+ // ... count Foo,Bar,Baz events
+ }
+ state.counters["Foo"] = numFoos;
+ state.counters["Bar"] = numBars;
+ state.counters["Baz"] = numBazs;
}
-// Registers a benchmark named "BM_takes_args/int_string_test" that passes
-// the specified values to `extra_args`.
-BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc"));
```
-Note that elements of `...args` may refer to global variables. Users should
-avoid modifying global state inside of a benchmark.
-## Using RegisterBenchmark(name, fn, args...)
+The `state.counters` object is a `std::map` with `std::string` keys
+and `Counter` values. The latter is a `double`-like class, via an implicit
+conversion to `double&`. Thus you can use all of the standard arithmetic
+assignment operators (`=,+=,-=,*=,/=`) to change the value of each counter.
-The `RegisterBenchmark(name, func, args...)` function provides an alternative
-way to create and register benchmarks.
-`RegisterBenchmark(name, func, args...)` creates, registers, and returns a
-pointer to a new benchmark with the specified `name` that invokes
-`func(st, args...)` where `st` is a `benchmark::State` object.
+In multithreaded benchmarks, each counter is set on the calling thread only.
+When the benchmark finishes, the counters from each thread will be summed;
+the resulting sum is the value which will be shown for the benchmark.
-Unlike the `BENCHMARK` registration macros, which can only be used at the global
-scope, the `RegisterBenchmark` can be called anywhere. This allows for
-benchmark tests to be registered programmatically.
+The `Counter` constructor accepts three parameters: the value as a `double`
+; a bit flag which allows you to show counters as rates, and/or as per-thread
+iteration, and/or as per-thread averages, and/or iteration invariants;
+and a flag specifying the 'unit' - i.e. is 1k a 1000 (default,
+`benchmark::Counter::OneK::kIs1000`), or 1024
+(`benchmark::Counter::OneK::kIs1024`)?
-Additionally `RegisterBenchmark` allows any callable object to be registered
-as a benchmark. Including capturing lambdas and function objects.
+```c++
+ // sets a simple counter
+ state.counters["Foo"] = numFoos;
+
+ // Set the counter as a rate. It will be presented divided
+ // by the duration of the benchmark.
+ state.counters["FooRate"] = Counter(numFoos, benchmark::Counter::kIsRate);
+
+ // Set the counter as a thread-average quantity. It will
+ // be presented divided by the number of threads.
+ state.counters["FooAvg"] = Counter(numFoos, benchmark::Counter::kAvgThreads);
+
+ // There's also a combined flag:
+ state.counters["FooAvgRate"] = Counter(numFoos,benchmark::Counter::kAvgThreadsRate);
+
+ // This says that we process with the rate of state.range(0) bytes every iteration:
+ state.counters["BytesProcessed"] = Counter(state.range(0), benchmark::Counter::kIsIterationInvariantRate, benchmark::Counter::OneK::kIs1024);
+```
+
+When you're compiling in C++11 mode or later you can use `insert()` with
+`std::initializer_list`:
-For Example:
```c++
-auto BM_test = [](benchmark::State& st, auto Inputs) { /* ... */ };
+ // With C++11, this can be done:
+ state.counters.insert({{"Foo", numFoos}, {"Bar", numBars}, {"Baz", numBazs}});
+ // ... instead of:
+ state.counters["Foo"] = numFoos;
+ state.counters["Bar"] = numBars;
+ state.counters["Baz"] = numBazs;
+```
-int main(int argc, char** argv) {
- for (auto& test_input : { /* ... */ })
- benchmark::RegisterBenchmark(test_input.name(), BM_test, test_input);
- benchmark::Initialize(&argc, argv);
- benchmark::RunSpecifiedBenchmarks();
-}
+#### Counter Reporting
+
+When using the console reporter, by default, user counters are are printed at
+the end after the table, the same way as ``bytes_processed`` and
+``items_processed``. This is best for cases in which there are few counters,
+or where there are only a couple of lines per benchmark. Here's an example of
+the default output:
+
+```
+------------------------------------------------------------------------------
+Benchmark Time CPU Iterations UserCounters...
+------------------------------------------------------------------------------
+BM_UserCounter/threads:8 2248 ns 10277 ns 68808 Bar=16 Bat=40 Baz=24 Foo=8
+BM_UserCounter/threads:1 9797 ns 9788 ns 71523 Bar=2 Bat=5 Baz=3 Foo=1024m
+BM_UserCounter/threads:2 4924 ns 9842 ns 71036 Bar=4 Bat=10 Baz=6 Foo=2
+BM_UserCounter/threads:4 2589 ns 10284 ns 68012 Bar=8 Bat=20 Baz=12 Foo=4
+BM_UserCounter/threads:8 2212 ns 10287 ns 68040 Bar=16 Bat=40 Baz=24 Foo=8
+BM_UserCounter/threads:16 1782 ns 10278 ns 68144 Bar=32 Bat=80 Baz=48 Foo=16
+BM_UserCounter/threads:32 1291 ns 10296 ns 68256 Bar=64 Bat=160 Baz=96 Foo=32
+BM_UserCounter/threads:4 2615 ns 10307 ns 68040 Bar=8 Bat=20 Baz=12 Foo=4
+BM_Factorial 26 ns 26 ns 26608979 40320
+BM_Factorial/real_time 26 ns 26 ns 26587936 40320
+BM_CalculatePiRange/1 16 ns 16 ns 45704255 0
+BM_CalculatePiRange/8 73 ns 73 ns 9520927 3.28374
+BM_CalculatePiRange/64 609 ns 609 ns 1140647 3.15746
+BM_CalculatePiRange/512 4900 ns 4901 ns 142696 3.14355
+```
+
+If this doesn't suit you, you can print each counter as a table column by
+passing the flag `--benchmark_counters_tabular=true` to the benchmark
+application. This is best for cases in which there are a lot of counters, or
+a lot of lines per individual benchmark. Note that this will trigger a
+reprinting of the table header any time the counter set changes between
+individual benchmarks. Here's an example of corresponding output when
+`--benchmark_counters_tabular=true` is passed:
+
+```
+---------------------------------------------------------------------------------------
+Benchmark Time CPU Iterations Bar Bat Baz Foo
+---------------------------------------------------------------------------------------
+BM_UserCounter/threads:8 2198 ns 9953 ns 70688 16 40 24 8
+BM_UserCounter/threads:1 9504 ns 9504 ns 73787 2 5 3 1
+BM_UserCounter/threads:2 4775 ns 9550 ns 72606 4 10 6 2
+BM_UserCounter/threads:4 2508 ns 9951 ns 70332 8 20 12 4
+BM_UserCounter/threads:8 2055 ns 9933 ns 70344 16 40 24 8
+BM_UserCounter/threads:16 1610 ns 9946 ns 70720 32 80 48 16
+BM_UserCounter/threads:32 1192 ns 9948 ns 70496 64 160 96 32
+BM_UserCounter/threads:4 2506 ns 9949 ns 70332 8 20 12 4
+--------------------------------------------------------------
+Benchmark Time CPU Iterations
+--------------------------------------------------------------
+BM_Factorial 26 ns 26 ns 26392245 40320
+BM_Factorial/real_time 26 ns 26 ns 26494107 40320
+BM_CalculatePiRange/1 15 ns 15 ns 45571597 0
+BM_CalculatePiRange/8 74 ns 74 ns 9450212 3.28374
+BM_CalculatePiRange/64 595 ns 595 ns 1173901 3.15746
+BM_CalculatePiRange/512 4752 ns 4752 ns 147380 3.14355
+BM_CalculatePiRange/4k 37970 ns 37972 ns 18453 3.14184
+BM_CalculatePiRange/32k 303733 ns 303744 ns 2305 3.14162
+BM_CalculatePiRange/256k 2434095 ns 2434186 ns 288 3.1416
+BM_CalculatePiRange/1024k 9721140 ns 9721413 ns 71 3.14159
+BM_CalculatePi/threads:8 2255 ns 9943 ns 70936
```
+Note above the additional header printed when the benchmark changes from
+``BM_UserCounter`` to ``BM_Factorial``. This is because ``BM_Factorial`` does
+not have the same counter set as ``BM_UserCounter``.
+
+<a name="multithreaded-benchmarks"/>
+
+### Multithreaded Benchmarks
-### Multithreaded benchmarks
In a multithreaded test (benchmark invoked by multiple threads simultaneously),
it is guaranteed that none of the threads will start until all have reached
the start of the benchmark loop, and all will have finished before any thread
@@ -416,8 +798,78 @@ BENCHMARK(BM_test)->Range(8, 8<<10)->UseRealTime();
Without `UseRealTime`, CPU time is used by default.
+<a name="cpu-timers" />
+
+### CPU Timers
+
+By default, the CPU timer only measures the time spent by the main thread.
+If the benchmark itself uses threads internally, this measurement may not
+be what you are looking for. Instead, there is a way to measure the total
+CPU usage of the process, by all the threads.
+
+```c++
+void callee(int i);
+
+static void MyMain(int size) {
+#pragma omp parallel for
+ for(int i = 0; i < size; i++)
+ callee(i);
+}
+
+static void BM_OpenMP(benchmark::State& state) {
+ for (auto _ : state)
+ MyMain(state.range(0);
+}
+
+// Measure the time spent by the main thread, use it to decide for how long to
+// run the benchmark loop. Depending on the internal implementation detail may
+// measure to anywhere from near-zero (the overhead spent before/after work
+// handoff to worker thread[s]) to the whole single-thread time.
+BENCHMARK(BM_OpenMP)->Range(8, 8<<10);
+
+// Measure the user-visible time, the wall clock (literally, the time that
+// has passed on the clock on the wall), use it to decide for how long to
+// run the benchmark loop. This will always be meaningful, an will match the
+// time spent by the main thread in single-threaded case, in general decreasing
+// with the number of internal threads doing the work.
+BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->UseRealTime();
+
+// Measure the total CPU consumption, use it to decide for how long to
+// run the benchmark loop. This will always measure to no less than the
+// time spent by the main thread in single-threaded case.
+BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->MeasureProcessCPUTime();
+
+// A mixture of the last two. Measure the total CPU consumption, but use the
+// wall clock to decide for how long to run the benchmark loop.
+BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->MeasureProcessCPUTime()->UseRealTime();
+```
+
+#### Controlling Timers
+
+Normally, the entire duration of the work loop (`for (auto _ : state) {}`)
+is measured. But sometimes, it is necessary to do some work inside of
+that loop, every iteration, but without counting that time to the benchmark time.
+That is possible, althought it is not recommended, since it has high overhead.
+
+```c++
+static void BM_SetInsert_With_Timer_Control(benchmark::State& state) {
+ std::set<int> data;
+ for (auto _ : state) {
+ state.PauseTiming(); // Stop timers. They will not count until they are resumed.
+ data = ConstructRandomSet(state.range(0)); // Do something that should not be measured
+ state.ResumeTiming(); // And resume timers. They are now counting again.
+ // The rest will be measured.
+ for (int j = 0; j < state.range(1); ++j)
+ data.insert(RandomNumber());
+ }
+}
+BENCHMARK(BM_SetInsert_With_Timer_Control)->Ranges({{1<<10, 8<<10}, {128, 512}});
+```
+
+<a name="manual-timing" />
+
+### Manual Timing
-## Manual timing
For benchmarking something for which neither CPU time nor real-time are
correct or accurate enough, completely manual timing is supported using
the `UseManualTime` function.
@@ -455,7 +907,22 @@ static void BM_ManualTiming(benchmark::State& state) {
BENCHMARK(BM_ManualTiming)->Range(1, 1<<17)->UseManualTime();
```
-### Preventing optimisation
+<a name="setting-the-time-unit" />
+
+### Setting the Time Unit
+
+If a benchmark runs a few milliseconds it may be hard to visually compare the
+measured times, since the output data is given in nanoseconds per default. In
+order to manually set the time unit, you can specify it manually:
+
+```c++
+BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
+```
+
+<a name="preventing-optimization" />
+
+### Preventing Optimization
+
To prevent a value or expression from being optimized away by the compiler
the `benchmark::DoNotOptimize(...)` and `benchmark::ClobberMemory()`
functions can be used.
@@ -513,24 +980,10 @@ static void BM_vector_push_back(benchmark::State& state) {
Note that `ClobberMemory()` is only available for GNU or MSVC based compilers.
-### Set time unit manually
-If a benchmark runs a few milliseconds it may be hard to visually compare the
-measured times, since the output data is given in nanoseconds per default. In
-order to manually set the time unit, you can specify it manually:
-
-```c++
-BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
-```
+<a name="reporting-statistics" />
-## Controlling number of iterations
-In all cases, the number of iterations for which the benchmark is run is
-governed by the amount of time the benchmark takes. Concretely, the number of
-iterations is at least one, not more than 1e9, until CPU time is greater than
-the minimum time, or the wallclock time is 5x minimum time. The minimum time is
-set as a flag `--benchmark_min_time` or per benchmark by calling `MinTime` on
-the registered benchmark object.
+### Statistics: Reporting the Mean, Median and Standard Deviation of Repeated Benchmarks
-## Reporting the mean, median and standard deviation by repeated benchmarks
By default each benchmark is run once and that single result is reported.
However benchmarks are often noisy and a single result may not be representative
of the overall behavior. For this reason it's possible to repeatedly rerun the
@@ -541,17 +994,28 @@ The number of runs of each benchmark is specified globally by the
`Repetitions` on the registered benchmark object. When a benchmark is run more
than once the mean, median and standard deviation of the runs will be reported.
-Additionally the `--benchmark_report_aggregates_only={true|false}` flag or
-`ReportAggregatesOnly(bool)` function can be used to change how repeated tests
-are reported. By default the result of each repeated run is reported. When this
-option is `true` only the mean, median and standard deviation of the runs is reported.
-Calling `ReportAggregatesOnly(bool)` on a registered benchmark object overrides
-the value of the flag for that benchmark.
+Additionally the `--benchmark_report_aggregates_only={true|false}`,
+`--benchmark_display_aggregates_only={true|false}` flags or
+`ReportAggregatesOnly(bool)`, `DisplayAggregatesOnly(bool)` functions can be
+used to change how repeated tests are reported. By default the result of each
+repeated run is reported. When `report aggregates only` option is `true`,
+only the aggregates (i.e. mean, median and standard deviation, maybe complexity
+measurements if they were requested) of the runs is reported, to both the
+reporters - standard output (console), and the file.
+However when only the `display aggregates only` option is `true`,
+only the aggregates are displayed in the standard output, while the file
+output still contains everything.
+Calling `ReportAggregatesOnly(bool)` / `DisplayAggregatesOnly(bool)` on a
+registered benchmark object overrides the value of the appropriate flag for that
+benchmark.
+
+<a name="custom-statistics" />
+
+### Custom Statistics
-## User-defined statistics for repeated benchmarks
While having mean, median and standard deviation is nice, this may not be
-enough for everyone. For example you may want to know what is the largest
-observation, e.g. because you have some real-time constraints. This is easy.
+enough for everyone. For example you may want to know what the largest
+observation is, e.g. because you have some real-time constraints. This is easy.
The following code will specify a custom statistic to be calculated, defined
by a lambda function.
@@ -571,188 +1035,38 @@ BENCHMARK(BM_spin_empty)
->Arg(512);
```
-## Fixtures
-Fixture tests are created by
-first defining a type that derives from `::benchmark::Fixture` and then
-creating/registering the tests using the following macros:
+<a name="using-register-benchmark" />
-* `BENCHMARK_F(ClassName, Method)`
-* `BENCHMARK_DEFINE_F(ClassName, Method)`
-* `BENCHMARK_REGISTER_F(ClassName, Method)`
+### Using RegisterBenchmark(name, fn, args...)
-For Example:
-
-```c++
-class MyFixture : public benchmark::Fixture {};
-
-BENCHMARK_F(MyFixture, FooTest)(benchmark::State& st) {
- for (auto _ : st) {
- ...
- }
-}
-
-BENCHMARK_DEFINE_F(MyFixture, BarTest)(benchmark::State& st) {
- for (auto _ : st) {
- ...
- }
-}
-/* BarTest is NOT registered */
-BENCHMARK_REGISTER_F(MyFixture, BarTest)->Threads(2);
-/* BarTest is now registered */
-```
+The `RegisterBenchmark(name, func, args...)` function provides an alternative
+way to create and register benchmarks.
+`RegisterBenchmark(name, func, args...)` creates, registers, and returns a
+pointer to a new benchmark with the specified `name` that invokes
+`func(st, args...)` where `st` is a `benchmark::State` object.
-### Templated fixtures
-Also you can create templated fixture by using the following macros:
+Unlike the `BENCHMARK` registration macros, which can only be used at the global
+scope, the `RegisterBenchmark` can be called anywhere. This allows for
+benchmark tests to be registered programmatically.
-* `BENCHMARK_TEMPLATE_F(ClassName, Method, ...)`
-* `BENCHMARK_TEMPLATE_DEFINE_F(ClassName, Method, ...)`
+Additionally `RegisterBenchmark` allows any callable object to be registered
+as a benchmark. Including capturing lambdas and function objects.
-For example:
+For Example:
```c++
-template<typename T>
-class MyFixture : public benchmark::Fixture {};
-
-BENCHMARK_TEMPLATE_F(MyFixture, IntTest, int)(benchmark::State& st) {
- for (auto _ : st) {
- ...
- }
-}
-
-BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, DoubleTest, double)(benchmark::State& st) {
- for (auto _ : st) {
- ...
- }
-}
-
-BENCHMARK_REGISTER_F(MyFixture, DoubleTest)->Threads(2);
-```
-
-## User-defined counters
-
-You can add your own counters with user-defined names. The example below
-will add columns "Foo", "Bar" and "Baz" in its output:
+auto BM_test = [](benchmark::State& st, auto Inputs) { /* ... */ };
-```c++
-static void UserCountersExample1(benchmark::State& state) {
- double numFoos = 0, numBars = 0, numBazs = 0;
- for (auto _ : state) {
- // ... count Foo,Bar,Baz events
- }
- state.counters["Foo"] = numFoos;
- state.counters["Bar"] = numBars;
- state.counters["Baz"] = numBazs;
+int main(int argc, char** argv) {
+ for (auto& test_input : { /* ... */ })
+ benchmark::RegisterBenchmark(test_input.name(), BM_test, test_input);
+ benchmark::Initialize(&argc, argv);
+ benchmark::RunSpecifiedBenchmarks();
}
```
-The `state.counters` object is a `std::map` with `std::string` keys
-and `Counter` values. The latter is a `double`-like class, via an implicit
-conversion to `double&`. Thus you can use all of the standard arithmetic
-assignment operators (`=,+=,-=,*=,/=`) to change the value of each counter.
-
-In multithreaded benchmarks, each counter is set on the calling thread only.
-When the benchmark finishes, the counters from each thread will be summed;
-the resulting sum is the value which will be shown for the benchmark.
-
-The `Counter` constructor accepts two parameters: the value as a `double`
-and a bit flag which allows you to show counters as rates and/or as
-per-thread averages:
-
-```c++
- // sets a simple counter
- state.counters["Foo"] = numFoos;
-
- // Set the counter as a rate. It will be presented divided
- // by the duration of the benchmark.
- state.counters["FooRate"] = Counter(numFoos, benchmark::Counter::kIsRate);
-
- // Set the counter as a thread-average quantity. It will
- // be presented divided by the number of threads.
- state.counters["FooAvg"] = Counter(numFoos, benchmark::Counter::kAvgThreads);
-
- // There's also a combined flag:
- state.counters["FooAvgRate"] = Counter(numFoos,benchmark::Counter::kAvgThreadsRate);
-```
-
-When you're compiling in C++11 mode or later you can use `insert()` with
-`std::initializer_list`:
+<a name="exiting-with-an-error" />
-```c++
- // With C++11, this can be done:
- state.counters.insert({{"Foo", numFoos}, {"Bar", numBars}, {"Baz", numBazs}});
- // ... instead of:
- state.counters["Foo"] = numFoos;
- state.counters["Bar"] = numBars;
- state.counters["Baz"] = numBazs;
-```
-
-### Counter reporting
-
-When using the console reporter, by default, user counters are are printed at
-the end after the table, the same way as ``bytes_processed`` and
-``items_processed``. This is best for cases in which there are few counters,
-or where there are only a couple of lines per benchmark. Here's an example of
-the default output:
-
-```
-------------------------------------------------------------------------------
-Benchmark Time CPU Iterations UserCounters...
-------------------------------------------------------------------------------
-BM_UserCounter/threads:8 2248 ns 10277 ns 68808 Bar=16 Bat=40 Baz=24 Foo=8
-BM_UserCounter/threads:1 9797 ns 9788 ns 71523 Bar=2 Bat=5 Baz=3 Foo=1024m
-BM_UserCounter/threads:2 4924 ns 9842 ns 71036 Bar=4 Bat=10 Baz=6 Foo=2
-BM_UserCounter/threads:4 2589 ns 10284 ns 68012 Bar=8 Bat=20 Baz=12 Foo=4
-BM_UserCounter/threads:8 2212 ns 10287 ns 68040 Bar=16 Bat=40 Baz=24 Foo=8
-BM_UserCounter/threads:16 1782 ns 10278 ns 68144 Bar=32 Bat=80 Baz=48 Foo=16
-BM_UserCounter/threads:32 1291 ns 10296 ns 68256 Bar=64 Bat=160 Baz=96 Foo=32
-BM_UserCounter/threads:4 2615 ns 10307 ns 68040 Bar=8 Bat=20 Baz=12 Foo=4
-BM_Factorial 26 ns 26 ns 26608979 40320
-BM_Factorial/real_time 26 ns 26 ns 26587936 40320
-BM_CalculatePiRange/1 16 ns 16 ns 45704255 0
-BM_CalculatePiRange/8 73 ns 73 ns 9520927 3.28374
-BM_CalculatePiRange/64 609 ns 609 ns 1140647 3.15746
-BM_CalculatePiRange/512 4900 ns 4901 ns 142696 3.14355
-```
-
-If this doesn't suit you, you can print each counter as a table column by
-passing the flag `--benchmark_counters_tabular=true` to the benchmark
-application. This is best for cases in which there are a lot of counters, or
-a lot of lines per individual benchmark. Note that this will trigger a
-reprinting of the table header any time the counter set changes between
-individual benchmarks. Here's an example of corresponding output when
-`--benchmark_counters_tabular=true` is passed:
-
-```
----------------------------------------------------------------------------------------
-Benchmark Time CPU Iterations Bar Bat Baz Foo
----------------------------------------------------------------------------------------
-BM_UserCounter/threads:8 2198 ns 9953 ns 70688 16 40 24 8
-BM_UserCounter/threads:1 9504 ns 9504 ns 73787 2 5 3 1
-BM_UserCounter/threads:2 4775 ns 9550 ns 72606 4 10 6 2
-BM_UserCounter/threads:4 2508 ns 9951 ns 70332 8 20 12 4
-BM_UserCounter/threads:8 2055 ns 9933 ns 70344 16 40 24 8
-BM_UserCounter/threads:16 1610 ns 9946 ns 70720 32 80 48 16
-BM_UserCounter/threads:32 1192 ns 9948 ns 70496 64 160 96 32
-BM_UserCounter/threads:4 2506 ns 9949 ns 70332 8 20 12 4
---------------------------------------------------------------
-Benchmark Time CPU Iterations
---------------------------------------------------------------
-BM_Factorial 26 ns 26 ns 26392245 40320
-BM_Factorial/real_time 26 ns 26 ns 26494107 40320
-BM_CalculatePiRange/1 15 ns 15 ns 45571597 0
-BM_CalculatePiRange/8 74 ns 74 ns 9450212 3.28374
-BM_CalculatePiRange/64 595 ns 595 ns 1173901 3.15746
-BM_CalculatePiRange/512 4752 ns 4752 ns 147380 3.14355
-BM_CalculatePiRange/4k 37970 ns 37972 ns 18453 3.14184
-BM_CalculatePiRange/32k 303733 ns 303744 ns 2305 3.14162
-BM_CalculatePiRange/256k 2434095 ns 2434186 ns 288 3.1416
-BM_CalculatePiRange/1024k 9721140 ns 9721413 ns 71 3.14159
-BM_CalculatePi/threads:8 2255 ns 9943 ns 70936
-```
-Note above the additional header printed when the benchmark changes from
-``BM_UserCounter`` to ``BM_Factorial``. This is because ``BM_Factorial`` does
-not have the same counter set as ``BM_UserCounter``.
-
-## Exiting Benchmarks in Error
+### Exiting with an Error
When errors caused by external influences, such as file I/O and network
communication, occur within a benchmark the
@@ -792,141 +1106,67 @@ static void BM_test_ranged_fo(benchmark::State & state) {
}
}
```
+<a name="a-faster-keep-running-loop" />
-## Running a subset of the benchmarks
-
-The `--benchmark_filter=<regex>` option can be used to only run the benchmarks
-which match the specified `<regex>`. For example:
-
-```bash
-$ ./run_benchmarks.x --benchmark_filter=BM_memcpy/32
-Run on (1 X 2300 MHz CPU )
-2016-06-25 19:34:24
-Benchmark Time CPU Iterations
-----------------------------------------------------
-BM_memcpy/32 11 ns 11 ns 79545455
-BM_memcpy/32k 2181 ns 2185 ns 324074
-BM_memcpy/32 12 ns 12 ns 54687500
-BM_memcpy/32k 1834 ns 1837 ns 357143
-```
-
-
-## Output Formats
-The library supports multiple output formats. Use the
-`--benchmark_format=<console|json|csv>` flag to set the format type. `console`
-is the default format.
+### A Faster KeepRunning Loop
-The Console format is intended to be a human readable format. By default
-the format generates color output. Context is output on stderr and the
-tabular data on stdout. Example tabular output looks like:
-```
-Benchmark Time(ns) CPU(ns) Iterations
-----------------------------------------------------------------------
-BM_SetInsert/1024/1 28928 29349 23853 133.097kB/s 33.2742k items/s
-BM_SetInsert/1024/8 32065 32913 21375 949.487kB/s 237.372k items/s
-BM_SetInsert/1024/10 33157 33648 21431 1.13369MB/s 290.225k items/s
-```
+In C++11 mode, a ranged-based for loop should be used in preference to
+the `KeepRunning` loop for running the benchmarks. For example:
-The JSON format outputs human readable json split into two top level attributes.
-The `context` attribute contains information about the run in general, including
-information about the CPU and the date.
-The `benchmarks` attribute contains a list of every benchmark run. Example json
-output looks like:
-```json
-{
- "context": {
- "date": "2015/03/17-18:40:25",
- "num_cpus": 40,
- "mhz_per_cpu": 2801,
- "cpu_scaling_enabled": false,
- "build_type": "debug"
- },
- "benchmarks": [
- {
- "name": "BM_SetInsert/1024/1",
- "iterations": 94877,
- "real_time": 29275,
- "cpu_time": 29836,
- "bytes_per_second": 134066,
- "items_per_second": 33516
- },
- {
- "name": "BM_SetInsert/1024/8",
- "iterations": 21609,
- "real_time": 32317,
- "cpu_time": 32429,
- "bytes_per_second": 986770,
- "items_per_second": 246693
- },
- {
- "name": "BM_SetInsert/1024/10",
- "iterations": 21393,
- "real_time": 32724,
- "cpu_time": 33355,
- "bytes_per_second": 1199226,
- "items_per_second": 299807
- }
- ]
+```c++
+static void BM_Fast(benchmark::State &state) {
+ for (auto _ : state) {
+ FastOperation();
+ }
}
+BENCHMARK(BM_Fast);
```
-The CSV format outputs comma-separated values. The `context` is output on stderr
-and the CSV itself on stdout. Example CSV output looks like:
-```
-name,iterations,real_time,cpu_time,bytes_per_second,items_per_second,label
-"BM_SetInsert/1024/1",65465,17890.7,8407.45,475768,118942,
-"BM_SetInsert/1024/8",116606,18810.1,9766.64,3.27646e+06,819115,
-"BM_SetInsert/1024/10",106365,17238.4,8421.53,4.74973e+06,1.18743e+06,
-```
-
-## Output Files
-The library supports writing the output of the benchmark to a file specified
-by `--benchmark_out=<filename>`. The format of the output can be specified
-using `--benchmark_out_format={json|console|csv}`. Specifying
-`--benchmark_out` does not suppress the console output.
+The reason the ranged-for loop is faster than using `KeepRunning`, is
+because `KeepRunning` requires a memory load and store of the iteration count
+ever iteration, whereas the ranged-for variant is able to keep the iteration count
+in a register.
-## Debug vs Release
-By default, benchmark builds as a debug library. You will see a warning in the output when this is the case. To build it as a release library instead, use:
+For example, an empty inner loop of using the ranged-based for method looks like:
-```
-cmake -DCMAKE_BUILD_TYPE=Release
+```asm
+# Loop Init
+ mov rbx, qword ptr [r14 + 104]
+ call benchmark::State::StartKeepRunning()
+ test rbx, rbx
+ je .LoopEnd
+.LoopHeader: # =>This Inner Loop Header: Depth=1
+ add rbx, -1
+ jne .LoopHeader
+.LoopEnd:
```
-To enable link-time optimisation, use
+Compared to an empty `KeepRunning` loop, which looks like:
+```asm
+.LoopHeader: # in Loop: Header=BB0_3 Depth=1
+ cmp byte ptr [rbx], 1
+ jne .LoopInit
+.LoopBody: # =>This Inner Loop Header: Depth=1
+ mov rax, qword ptr [rbx + 8]
+ lea rcx, [rax + 1]
+ mov qword ptr [rbx + 8], rcx
+ cmp rax, qword ptr [rbx + 104]
+ jb .LoopHeader
+ jmp .LoopEnd
+.LoopInit:
+ mov rdi, rbx
+ call benchmark::State::StartKeepRunning()
+ jmp .LoopBody
+.LoopEnd:
```
-cmake -DCMAKE_BUILD_TYPE=Release -DBENCHMARK_ENABLE_LTO=true
-```
-
-If you are using gcc, you might need to set `GCC_AR` and `GCC_RANLIB` cmake cache variables, if autodetection fails.
-If you are using clang, you may need to set `LLVMAR_EXECUTABLE`, `LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables.
-
-## Linking against the library
-
-When the library is built using GCC it is necessary to link with `-pthread`,
-due to how GCC implements `std::thread`.
-
-For GCC 4.x failing to link to pthreads will lead to runtime exceptions, not linker errors.
-See [issue #67](https://github.com/google/benchmark/issues/67) for more details.
-
-## Compiler Support
-
-Google Benchmark uses C++11 when building the library. As such we require
-a modern C++ toolchain, both compiler and standard library.
-The following minimum versions are strongly recommended build the library:
-
-* GCC 4.8
-* Clang 3.4
-* Visual Studio 2013
-* Intel 2015 Update 1
-
-Anything older *may* work.
+Unless C++03 compatibility is required, the ranged-for variant of writing
+the benchmark loop should be preferred.
-Note: Using the library and its headers in C++03 is supported. C++11 is only
-required to build the library.
+<a name="disabling-cpu-frequency-scaling" />
-## Disable CPU frequency scaling
+### Disabling CPU Frequency Scaling
If you see this error:
```
***WARNING*** CPU scaling is enabled, the benchmark real time measurements may be noisy and will incur extra overhead.
@@ -937,14 +1177,3 @@ sudo cpupower frequency-set --governor performance
./mybench
sudo cpupower frequency-set --governor powersave
```
-
-# Known Issues
-
-### Windows with CMake
-
-* Users must manually link `shlwapi.lib`. Failure to do so may result
-in unresolved symbols.
-
-### Solaris
-
-* Users must explicitly link with kstat library (-lkstat compilation flag).
diff --git a/src/third_party/benchmark-1.4.1/benchmark/include/benchmark/benchmark.h b/src/third_party/benchmark/dist/include/benchmark/benchmark.h
index 46fd573f395..4f40501596e 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/include/benchmark/benchmark.h
+++ b/src/third_party/benchmark/dist/include/benchmark/benchmark.h
@@ -56,8 +56,7 @@ static void BM_memcpy(benchmark::State& state) {
memset(src, 'x', state.range(0));
for (auto _ : state)
memcpy(dst, src, state.range(0));
- state.SetBytesProcessed(int64_t(state.iterations()) *
- int64_t(state.range(0)));
+ state.SetBytesProcessed(state.iterations() * state.range(0));
delete[] src; delete[] dst;
}
BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(1<<10)->Arg(8<<10);
@@ -122,8 +121,7 @@ template <class Q> int BM_Sequential(benchmark::State& state) {
q.Wait(&v);
}
// actually messages, not bytes:
- state.SetBytesProcessed(
- static_cast<int64_t>(state.iterations())*state.range(0));
+ state.SetBytesProcessed(state.iterations() * state.range(0));
}
BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue<int>)->Range(1<<0, 1<<10);
@@ -164,7 +162,6 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
#ifndef BENCHMARK_BENCHMARK_H_
#define BENCHMARK_BENCHMARK_H_
-
// The _MSVC_LANG check should detect Visual Studio 2015 Update 3 and newer.
#if __cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L)
#define BENCHMARK_HAS_CXX11
@@ -176,19 +173,19 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
#include <cassert>
#include <cstddef>
#include <iosfwd>
-#include <string>
-#include <vector>
#include <map>
#include <set>
+#include <string>
+#include <vector>
#if defined(BENCHMARK_HAS_CXX11)
-#include <type_traits>
#include <initializer_list>
+#include <type_traits>
#include <utility>
#endif
#if defined(_MSC_VER)
-#include <intrin.h> // for _ReadWriteBarrier
+#include <intrin.h> // for _ReadWriteBarrier
#endif
#ifndef BENCHMARK_HAS_CXX11
@@ -227,21 +224,36 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
#define BENCHMARK_INTERNAL_TOSTRING2(x) #x
#define BENCHMARK_INTERNAL_TOSTRING(x) BENCHMARK_INTERNAL_TOSTRING2(x)
-#if defined(__GNUC__)
+#if defined(__GNUC__) || defined(__clang__)
#define BENCHMARK_BUILTIN_EXPECT(x, y) __builtin_expect(x, y)
#define BENCHMARK_DEPRECATED_MSG(msg) __attribute__((deprecated(msg)))
#else
#define BENCHMARK_BUILTIN_EXPECT(x, y) x
#define BENCHMARK_DEPRECATED_MSG(msg)
-#define BENCHMARK_WARNING_MSG(msg) __pragma(message(__FILE__ "(" BENCHMARK_INTERNAL_TOSTRING(__LINE__) ") : warning note: " msg))
+#define BENCHMARK_WARNING_MSG(msg) \
+ __pragma(message(__FILE__ "(" BENCHMARK_INTERNAL_TOSTRING( \
+ __LINE__) ") : warning note: " msg))
#endif
#if defined(__GNUC__) && !defined(__clang__)
#define BENCHMARK_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
#endif
+#ifndef __has_builtin
+#define __has_builtin(x) 0
+#endif
+
+#if defined(__GNUC__) || __has_builtin(__builtin_unreachable)
+#define BENCHMARK_UNREACHABLE() __builtin_unreachable()
+#elif defined(_MSC_VER)
+#define BENCHMARK_UNREACHABLE() __assume(false)
+#else
+#define BENCHMARK_UNREACHABLE() ((void)0)
+#endif
+
namespace benchmark {
class BenchmarkReporter;
+class MemoryManager;
void Initialize(int* argc, char** argv);
@@ -254,7 +266,7 @@ bool ReportUnrecognizedArguments(int argc, char** argv);
// of each matching benchmark. Otherwise run each matching benchmark and
// report the results.
//
-// The second and third overload use the specified 'console_reporter' and
+// The second and third overload use the specified 'display_reporter' and
// 'file_reporter' respectively. 'file_reporter' will write to the file
// specified
// by '--benchmark_output'. If '--benchmark_output' is not given the
@@ -262,16 +274,13 @@ bool ReportUnrecognizedArguments(int argc, char** argv);
//
// RETURNS: The number of matching benchmarks.
size_t RunSpecifiedBenchmarks();
-size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter);
-size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter,
+size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter);
+size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
BenchmarkReporter* file_reporter);
-// If this routine is called, peak memory allocation past this point in the
-// benchmark is reported at the end of the benchmark report line. (It is
-// computed by running the benchmark once with a single iteration and a memory
-// tracer.)
-// TODO(dominic)
-// void MemoryUsage();
+// Register a MemoryManager instance that will be used to collect and report
+// allocation measurements for benchmark runs.
+void RegisterMemoryManager(MemoryManager* memory_manager);
namespace internal {
class Benchmark;
@@ -290,22 +299,19 @@ BENCHMARK_UNUSED static int stream_init_anchor = InitializeStreams();
} // namespace internal
-
#if (!defined(__GNUC__) && !defined(__clang__)) || defined(__pnacl__) || \
defined(__EMSCRIPTEN__)
-# define BENCHMARK_HAS_NO_INLINE_ASSEMBLY
+#define BENCHMARK_HAS_NO_INLINE_ASSEMBLY
#endif
-
// The DoNotOptimize(...) function can be used to prevent a value or
// expression from being optimized away by the compiler. This function is
// intended to add little to no overhead.
// See: https://youtu.be/nXaxk27zwlk?t=2441
#ifndef BENCHMARK_HAS_NO_INLINE_ASSEMBLY
template <class Tp>
-inline BENCHMARK_ALWAYS_INLINE
-void DoNotOptimize(Tp const& value) {
- asm volatile("" : : "r,m"(value) : "memory");
+inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
+ asm volatile("" : : "r,m"(value) : "memory");
}
template <class Tp>
@@ -329,9 +335,7 @@ inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
_ReadWriteBarrier();
}
-inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() {
- _ReadWriteBarrier();
-}
+inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { _ReadWriteBarrier(); }
#else
template <class Tp>
inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
@@ -340,39 +344,63 @@ inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
// FIXME Add ClobberMemory() for non-gnu and non-msvc compilers
#endif
-
-
// This class is used for user-defined counters.
class Counter {
-public:
-
+ public:
enum Flags {
- kDefaults = 0,
+ kDefaults = 0,
// Mark the counter as a rate. It will be presented divided
// by the duration of the benchmark.
- kIsRate = 1,
+ kIsRate = 1U << 0U,
// Mark the counter as a thread-average quantity. It will be
// presented divided by the number of threads.
- kAvgThreads = 2,
+ kAvgThreads = 1U << 1U,
// Mark the counter as a thread-average rate. See above.
- kAvgThreadsRate = kIsRate|kAvgThreads
+ kAvgThreadsRate = kIsRate | kAvgThreads,
+ // Mark the counter as a constant value, valid/same for *every* iteration.
+ // When reporting, it will be *multiplied* by the iteration count.
+ kIsIterationInvariant = 1U << 2U,
+ // Mark the counter as a constant rate.
+ // When reporting, it will be *multiplied* by the iteration count
+ // and then divided by the duration of the benchmark.
+ kIsIterationInvariantRate = kIsRate | kIsIterationInvariant,
+ // Mark the counter as a iteration-average quantity.
+ // It will be presented divided by the number of iterations.
+ kAvgIterations = 1U << 3U,
+ // Mark the counter as a iteration-average rate. See above.
+ kAvgIterationsRate = kIsRate | kAvgIterations
+ };
+
+ enum OneK {
+ // 1'000 items per 1k
+ kIs1000 = 1000,
+ // 1'024 items per 1k
+ kIs1024 = 1024
};
double value;
- Flags flags;
+ Flags flags;
+ OneK oneK;
BENCHMARK_ALWAYS_INLINE
- Counter(double v = 0., Flags f = kDefaults) : value(v), flags(f) {}
-
- BENCHMARK_ALWAYS_INLINE operator double const& () const { return value; }
- BENCHMARK_ALWAYS_INLINE operator double & () { return value; }
+ Counter(double v = 0., Flags f = kDefaults, OneK k = kIs1000)
+ : value(v), flags(f), oneK(k) {}
+ BENCHMARK_ALWAYS_INLINE operator double const&() const { return value; }
+ BENCHMARK_ALWAYS_INLINE operator double&() { return value; }
};
+// A helper for user code to create unforeseen combinations of Flags, without
+// having to do this cast manually each time, or providing this operator.
+Counter::Flags inline operator|(const Counter::Flags& LHS,
+ const Counter::Flags& RHS) {
+ return static_cast<Counter::Flags>(static_cast<int>(LHS) |
+ static_cast<int>(RHS));
+}
+
// This is the container for the user-defined counters.
typedef std::map<std::string, Counter> UserCounters;
-
// TimeUnit is passed to a benchmark in order to specify the order of magnitude
// for the measured time.
enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond };
@@ -383,36 +411,49 @@ enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond };
// calculated automatically to the best fit.
enum BigO { oNone, o1, oN, oNSquared, oNCubed, oLogN, oNLogN, oAuto, oLambda };
+typedef uint64_t IterationCount;
+
// BigOFunc is passed to a benchmark in order to specify the asymptotic
// computational complexity for the benchmark.
-typedef double(BigOFunc)(int64_t);
+typedef double(BigOFunc)(IterationCount);
// StatisticsFunc is passed to a benchmark in order to compute some descriptive
// statistics over all the measurements of some type
typedef double(StatisticsFunc)(const std::vector<double>&);
+namespace internal {
struct Statistics {
std::string name_;
StatisticsFunc* compute_;
- Statistics(std::string name, StatisticsFunc* compute)
- : name_(name), compute_(compute) {}
+ Statistics(const std::string& name, StatisticsFunc* compute)
+ : name_(name), compute_(compute) {}
};
-namespace internal {
+struct BenchmarkInstance;
class ThreadTimer;
class ThreadManager;
-enum ReportMode
+enum AggregationReportMode
#if defined(BENCHMARK_HAS_CXX11)
- : unsigned
+ : unsigned
#else
#endif
- {
- RM_Unspecified, // The mode has not been manually specified
- RM_Default, // The mode is user-specified as default.
- RM_ReportAggregatesOnly
+{
+ // The mode has not been manually specified
+ ARM_Unspecified = 0,
+ // The mode is user-specified.
+ // This may or may not be set when the following bit-flags are set.
+ ARM_Default = 1U << 0U,
+ // File reporter should only output aggregates.
+ ARM_FileReportAggregatesOnly = 1U << 1U,
+ // Display reporter should only output aggregates
+ ARM_DisplayReportAggregatesOnly = 1U << 2U,
+ // Both reporters should only display aggregates.
+ ARM_ReportAggregatesOnly =
+ ARM_FileReportAggregatesOnly | ARM_DisplayReportAggregatesOnly
};
+
} // namespace internal
// State is passed to a running Benchmark and contains state for the
@@ -447,7 +488,7 @@ class State {
// while (state.KeepRunningBatch(1000)) {
// // process 1000 elements
// }
- bool KeepRunningBatch(size_t n);
+ bool KeepRunningBatch(IterationCount n);
// REQUIRES: timer is running and 'SkipWithError(...)' has not been called
// by the current thread.
@@ -508,16 +549,21 @@ class State {
// Set the number of bytes processed by the current benchmark
// execution. This routine is typically called once at the end of a
- // throughput oriented benchmark. If this routine is called with a
- // value > 0, the report is printed in MB/sec instead of nanoseconds
- // per iteration.
+ // throughput oriented benchmark.
//
// REQUIRES: a benchmark has exited its benchmarking loop.
BENCHMARK_ALWAYS_INLINE
- void SetBytesProcessed(int64_t bytes) { bytes_processed_ = bytes; }
+ void SetBytesProcessed(int64_t bytes) {
+ counters["bytes_per_second"] =
+ Counter(static_cast<double>(bytes), Counter::kIsRate, Counter::kIs1024);
+ }
BENCHMARK_ALWAYS_INLINE
- int64_t bytes_processed() const { return bytes_processed_; }
+ int64_t bytes_processed() const {
+ if (counters.find("bytes_per_second") != counters.end())
+ return static_cast<int64_t>(counters.at("bytes_per_second"));
+ return 0;
+ }
// If this routine is called with complexity_n > 0 and complexity report is
// requested for the
@@ -537,10 +583,17 @@ class State {
//
// REQUIRES: a benchmark has exited its benchmarking loop.
BENCHMARK_ALWAYS_INLINE
- void SetItemsProcessed(int64_t items) { items_processed_ = items; }
+ void SetItemsProcessed(int64_t items) {
+ counters["items_per_second"] =
+ Counter(static_cast<double>(items), benchmark::Counter::kIsRate);
+ }
BENCHMARK_ALWAYS_INLINE
- int64_t items_processed() const { return items_processed_; }
+ int64_t items_processed() const {
+ if (counters.find("items_per_second") != counters.end())
+ return static_cast<int64_t>(counters.at("items_per_second"));
+ return 0;
+ }
// If this routine is called, the specified label is printed at the
// end of the benchmark report line for the currently executing
@@ -574,38 +627,35 @@ class State {
int64_t range_y() const { return range(1); }
BENCHMARK_ALWAYS_INLINE
- size_t iterations() const {
+ IterationCount iterations() const {
if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) {
return 0;
}
return max_iterations - total_iterations_ + batch_leftover_;
}
-private: // items we expect on the first cache line (ie 64 bytes of the struct)
-
+ private
+ : // items we expect on the first cache line (ie 64 bytes of the struct)
// When total_iterations_ is 0, KeepRunning() and friends will return false.
// May be larger than max_iterations.
- size_t total_iterations_;
+ IterationCount total_iterations_;
// When using KeepRunningBatch(), batch_leftover_ holds the number of
// iterations beyond max_iters that were run. Used to track
// completed_iterations_ accurately.
- size_t batch_leftover_;
+ IterationCount batch_leftover_;
-public:
- const size_t max_iterations;
+ public:
+ const IterationCount max_iterations;
-private:
+ private:
bool started_;
bool finished_;
bool error_occurred_;
-private: // items we don't need on the first cache line
+ private: // items we don't need on the first cache line
std::vector<int64_t> range_;
- int64_t bytes_processed_;
- int64_t items_processed_;
-
int64_t complexity_n_;
public:
@@ -616,35 +666,32 @@ private: // items we don't need on the first cache line
// Number of threads concurrently executing the benchmark.
const int threads;
-
- // TODO(EricWF) make me private
- State(size_t max_iters, const std::vector<int64_t>& ranges, int thread_i,
- int n_threads, internal::ThreadTimer* timer,
+ private:
+ State(IterationCount max_iters, const std::vector<int64_t>& ranges,
+ int thread_i, int n_threads, internal::ThreadTimer* timer,
internal::ThreadManager* manager);
- private:
void StartKeepRunning();
// Implementation of KeepRunning() and KeepRunningBatch().
// is_batch must be true unless n is 1.
- bool KeepRunningInternal(size_t n, bool is_batch);
+ bool KeepRunningInternal(IterationCount n, bool is_batch);
void FinishKeepRunning();
internal::ThreadTimer* timer_;
internal::ThreadManager* manager_;
- BENCHMARK_DISALLOW_COPY_AND_ASSIGN(State);
+
+ friend struct internal::BenchmarkInstance;
};
-inline BENCHMARK_ALWAYS_INLINE
-bool State::KeepRunning() {
- return KeepRunningInternal(1, /*is_batch=*/ false);
+inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunning() {
+ return KeepRunningInternal(1, /*is_batch=*/false);
}
-inline BENCHMARK_ALWAYS_INLINE
-bool State::KeepRunningBatch(size_t n) {
- return KeepRunningInternal(n, /*is_batch=*/ true);
+inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningBatch(IterationCount n) {
+ return KeepRunningInternal(n, /*is_batch=*/true);
}
-inline BENCHMARK_ALWAYS_INLINE
-bool State::KeepRunningInternal(size_t n, bool is_batch) {
+inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningInternal(IterationCount n,
+ bool is_batch) {
// total_iterations_ is set to 0 by the constructor, and always set to a
// nonzero value by StartKepRunning().
assert(n > 0);
@@ -657,13 +704,13 @@ bool State::KeepRunningInternal(size_t n, bool is_batch) {
if (!started_) {
StartKeepRunning();
if (!error_occurred_ && total_iterations_ >= n) {
- total_iterations_-= n;
+ total_iterations_ -= n;
return true;
}
}
// For non-batch runs, total_iterations_ must be 0 by now.
if (is_batch && total_iterations_ != 0) {
- batch_leftover_ = n - total_iterations_;
+ batch_leftover_ = n - total_iterations_;
total_iterations_ = 0;
return true;
}
@@ -707,7 +754,7 @@ struct State::StateIterator {
}
private:
- size_t cached_;
+ IterationCount cached_;
State* const parent_;
};
@@ -811,7 +858,7 @@ class Benchmark {
// NOTE: This function should only be used when *exact* iteration control is
// needed and never to control or limit how long a benchmark runs, where
// `--benchmark_min_time=N` or `MinTime(...)` should be used instead.
- Benchmark* Iterations(size_t n);
+ Benchmark* Iterations(IterationCount n);
// Specify the amount of times to repeat this benchmark. This option overrides
// the `benchmark_repetitions` flag.
@@ -821,13 +868,24 @@ class Benchmark {
// Specify if each repetition of the benchmark should be reported separately
// or if only the final statistics should be reported. If the benchmark
// is not repeated then the single result is always reported.
+ // Applies to *ALL* reporters (display and file).
Benchmark* ReportAggregatesOnly(bool value = true);
- // If a particular benchmark is I/O bound, runs multiple threads internally or
- // if for some reason CPU timings are not representative, call this method. If
- // called, the elapsed time will be used to control how many iterations are
- // run, and in the printing of items/second or MB/seconds values. If not
- // called, the cpu time used by the benchmark will be used.
+ // Same as ReportAggregatesOnly(), but applies to display reporter only.
+ Benchmark* DisplayAggregatesOnly(bool value = true);
+
+ // By default, the CPU time is measured only for the main thread, which may
+ // be unrepresentative if the benchmark uses threads internally. If called,
+ // the total CPU time spent by all the threads will be measured instead.
+ // By default, the only the main thread CPU time will be measured.
+ Benchmark* MeasureProcessCPUTime();
+
+ // If a particular benchmark should use the Wall clock instead of the CPU time
+ // (be it either the CPU time of the main thread only (default), or the
+ // total CPU usage of the benchmark), call this method. If called, the elapsed
+ // (wall) time will be used to control how many iterations are run, and in the
+ // printing of items/second or MB/seconds values.
+ // If not called, the CPU time used by the benchmark will be used.
Benchmark* UseRealTime();
// If a benchmark must measure time manually (e.g. if GPU execution time is
@@ -882,9 +940,6 @@ class Benchmark {
virtual void Run(State& state) = 0;
- // Used inside the benchmark implementation
- struct Instance;
-
protected:
explicit Benchmark(const char* name);
Benchmark(Benchmark const&);
@@ -896,14 +951,15 @@ class Benchmark {
friend class BenchmarkFamilies;
std::string name_;
- ReportMode report_mode_;
- std::vector<std::string> arg_names_; // Args for all benchmark runs
+ AggregationReportMode aggregation_report_mode_;
+ std::vector<std::string> arg_names_; // Args for all benchmark runs
std::vector<std::vector<int64_t> > args_; // Args for all benchmark runs
TimeUnit time_unit_;
int range_multiplier_;
double min_time_;
- size_t iterations_;
+ IterationCount iterations_;
int repetitions_;
+ bool measure_process_cpu_time_;
bool use_real_time_;
bool use_manual_time_;
BigO complexity_;
@@ -1125,7 +1181,7 @@ class Fixture : public internal::Benchmark {
class BaseClass##_##Method##_Benchmark : public BaseClass<a> { \
public: \
BaseClass##_##Method##_Benchmark() : BaseClass<a>() { \
- this->SetName(#BaseClass"<" #a ">/" #Method); \
+ this->SetName(#BaseClass "<" #a ">/" #Method); \
} \
\
protected: \
@@ -1136,7 +1192,7 @@ class Fixture : public internal::Benchmark {
class BaseClass##_##Method##_Benchmark : public BaseClass<a, b> { \
public: \
BaseClass##_##Method##_Benchmark() : BaseClass<a, b>() { \
- this->SetName(#BaseClass"<" #a "," #b ">/" #Method); \
+ this->SetName(#BaseClass "<" #a "," #b ">/" #Method); \
} \
\
protected: \
@@ -1148,14 +1204,15 @@ class Fixture : public internal::Benchmark {
class BaseClass##_##Method##_Benchmark : public BaseClass<__VA_ARGS__> { \
public: \
BaseClass##_##Method##_Benchmark() : BaseClass<__VA_ARGS__>() { \
- this->SetName(#BaseClass"<" #__VA_ARGS__ ">/" #Method); \
+ this->SetName(#BaseClass "<" #__VA_ARGS__ ">/" #Method); \
} \
\
protected: \
virtual void BenchmarkCase(::benchmark::State&); \
};
#else
-#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a) BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(n, a)
+#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a) \
+ BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(n, a)
#endif
#define BENCHMARK_DEFINE_F(BaseClass, Method) \
@@ -1175,7 +1232,8 @@ class Fixture : public internal::Benchmark {
BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \
void BaseClass##_##Method##_Benchmark::BenchmarkCase
#else
-#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a) BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a)
+#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a) \
+ BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a)
#endif
#define BENCHMARK_REGISTER_F(BaseClass, Method) \
@@ -1202,24 +1260,24 @@ class Fixture : public internal::Benchmark {
void BaseClass##_##Method##_Benchmark::BenchmarkCase
#ifdef BENCHMARK_HAS_CXX11
-#define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \
+#define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \
BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \
- BENCHMARK_REGISTER_F(BaseClass, Method); \
+ BENCHMARK_REGISTER_F(BaseClass, Method); \
void BaseClass##_##Method##_Benchmark::BenchmarkCase
#else
-#define BENCHMARK_TEMPLATE_F(BaseClass, Method, a) BENCHMARK_TEMPLATE1_F(BaseClass, Method, a)
+#define BENCHMARK_TEMPLATE_F(BaseClass, Method, a) \
+ BENCHMARK_TEMPLATE1_F(BaseClass, Method, a)
#endif
// Helper macro to create a main routine in a test that runs the benchmarks
-#define BENCHMARK_MAIN() \
- int main(int argc, char** argv) { \
- ::benchmark::Initialize(&argc, argv); \
+#define BENCHMARK_MAIN() \
+ int main(int argc, char** argv) { \
+ ::benchmark::Initialize(&argc, argv); \
if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return 1; \
- ::benchmark::RunSpecifiedBenchmarks(); \
- } \
+ ::benchmark::RunSpecifiedBenchmarks(); \
+ } \
int main(int, char**)
-
// ------------------------------------------------------
// Benchmark Reporters
@@ -1237,6 +1295,7 @@ struct CPUInfo {
double cycles_per_second;
std::vector<CacheInfo> caches;
bool scaling_enabled;
+ std::vector<double> load_avg;
static const CPUInfo& Get();
@@ -1245,6 +1304,33 @@ struct CPUInfo {
BENCHMARK_DISALLOW_COPY_AND_ASSIGN(CPUInfo);
};
+// Adding Struct for System Information
+struct SystemInfo {
+ std::string name;
+ static const SystemInfo& Get();
+
+ private:
+ SystemInfo();
+ BENCHMARK_DISALLOW_COPY_AND_ASSIGN(SystemInfo);
+};
+
+// BenchmarkName contains the components of the Benchmark's name
+// which allows individual fields to be modified or cleared before
+// building the final name using 'str()'.
+struct BenchmarkName {
+ std::string function_name;
+ std::string args;
+ std::string min_time;
+ std::string iterations;
+ std::string repetitions;
+ std::string time_type;
+ std::string threads;
+
+ // Return the full name of the benchmark with each non-empty
+ // field separated by a '/'
+ std::string str() const;
+};
+
// Interface for custom benchmark result printers.
// By default, benchmark reports are printed to stdout. However an application
// can control the destination of the reports by calling
@@ -1254,35 +1340,48 @@ class BenchmarkReporter {
public:
struct Context {
CPUInfo const& cpu_info;
+ SystemInfo const& sys_info;
// The number of chars in the longest benchmark name.
size_t name_field_width;
- static const char *executable_name;
+ static const char* executable_name;
Context();
};
struct Run {
+ static const int64_t no_repetition_index = -1;
+ enum RunType { RT_Iteration, RT_Aggregate };
+
Run()
- : error_occurred(false),
+ : run_type(RT_Iteration),
+ error_occurred(false),
iterations(1),
+ threads(1),
time_unit(kNanosecond),
real_accumulated_time(0),
cpu_accumulated_time(0),
- bytes_per_second(0),
- items_per_second(0),
max_heapbytes_used(0),
complexity(oNone),
complexity_lambda(),
complexity_n(0),
report_big_o(false),
report_rms(false),
- counters() {}
-
- std::string benchmark_name;
+ counters(),
+ has_memory_result(false),
+ allocs_per_iter(0.0),
+ max_bytes_used(0) {}
+
+ std::string benchmark_name() const;
+ BenchmarkName run_name;
+ RunType run_type;
+ std::string aggregate_name;
std::string report_label; // Empty if not set by benchmark.
bool error_occurred;
std::string error_message;
- int64_t iterations;
+ IterationCount iterations;
+ int64_t threads;
+ int64_t repetition_index;
+ int64_t repetitions;
TimeUnit time_unit;
double real_accumulated_time;
double cpu_accumulated_time;
@@ -1299,10 +1398,6 @@ class BenchmarkReporter {
// accumulated time.
double GetAdjustedCPUTime() const;
- // Zero if not set by benchmark.
- double bytes_per_second;
- double items_per_second;
-
// This is set to 0.0 if memory tracing is not enabled.
double max_heapbytes_used;
@@ -1312,13 +1407,18 @@ class BenchmarkReporter {
int64_t complexity_n;
// what statistics to compute from the measurements
- const std::vector<Statistics>* statistics;
+ const std::vector<internal::Statistics>* statistics;
// Inform print function whether the current run is a complexity report
bool report_big_o;
bool report_rms;
UserCounters counters;
+
+ // Memory metrics.
+ bool has_memory_result;
+ double allocs_per_iter;
+ int64_t max_bytes_used;
};
// Construct a BenchmarkReporter with the output stream set to 'std::cout'
@@ -1379,17 +1479,19 @@ class BenchmarkReporter {
// Simple reporter that outputs benchmark data to the console. This is the
// default reporter used by RunSpecifiedBenchmarks().
class ConsoleReporter : public BenchmarkReporter {
-public:
+ public:
enum OutputOptions {
OO_None = 0,
OO_Color = 1,
OO_Tabular = 2,
- OO_ColorTabular = OO_Color|OO_Tabular,
+ OO_ColorTabular = OO_Color | OO_Tabular,
OO_Defaults = OO_ColorTabular
};
explicit ConsoleReporter(OutputOptions opts_ = OO_Defaults)
- : output_options_(opts_), name_field_width_(0),
- prev_counters_(), printed_header_(false) {}
+ : output_options_(opts_),
+ name_field_width_(0),
+ prev_counters_(),
+ printed_header_(false) {}
virtual bool ReportContext(const Context& context);
virtual void ReportRuns(const std::vector<Run>& reports);
@@ -1417,7 +1519,9 @@ class JSONReporter : public BenchmarkReporter {
bool first_report_;
};
-class CSVReporter : public BenchmarkReporter {
+class BENCHMARK_DEPRECATED_MSG(
+ "The CSV Reporter will be removed in a future release") CSVReporter
+ : public BenchmarkReporter {
public:
CSVReporter() : printed_header_(false) {}
virtual bool ReportContext(const Context& context);
@@ -1427,7 +1531,30 @@ class CSVReporter : public BenchmarkReporter {
void PrintRunData(const Run& report);
bool printed_header_;
- std::set< std::string > user_counter_names_;
+ std::set<std::string> user_counter_names_;
+};
+
+// If a MemoryManager is registered, it can be used to collect and report
+// allocation metrics for a run of the benchmark.
+class MemoryManager {
+ public:
+ struct Result {
+ Result() : num_allocs(0), max_bytes_used(0) {}
+
+ // The number of allocations made in total between Start and Stop.
+ int64_t num_allocs;
+
+ // The peak memory use between Start and Stop.
+ int64_t max_bytes_used;
+ };
+
+ virtual ~MemoryManager() {}
+
+ // Implement this to start recording allocation information.
+ virtual void Start() = 0;
+
+ // Implement this to stop recording and fill out the given Result structure.
+ virtual void Stop(Result* result) = 0;
};
inline const char* GetTimeUnitString(TimeUnit unit) {
@@ -1437,9 +1564,9 @@ inline const char* GetTimeUnitString(TimeUnit unit) {
case kMicrosecond:
return "us";
case kNanosecond:
- default:
return "ns";
}
+ BENCHMARK_UNREACHABLE();
}
inline double GetTimeUnitMultiplier(TimeUnit unit) {
@@ -1449,11 +1576,11 @@ inline double GetTimeUnitMultiplier(TimeUnit unit) {
case kMicrosecond:
return 1e6;
case kNanosecond:
- default:
return 1e9;
}
+ BENCHMARK_UNREACHABLE();
}
-} // namespace benchmark
+} // namespace benchmark
#endif // BENCHMARK_BENCHMARK_H_
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/arraysize.h b/src/third_party/benchmark/dist/src/arraysize.h
index 51a50f2dff2..51a50f2dff2 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/arraysize.h
+++ b/src/third_party/benchmark/dist/src/arraysize.h
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/benchmark.cc b/src/third_party/benchmark/dist/src/benchmark.cc
index 82b15ac7090..29bfa3512f9 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/benchmark.cc
+++ b/src/third_party/benchmark/dist/src/benchmark.cc
@@ -14,6 +14,7 @@
#include "benchmark/benchmark.h"
#include "benchmark_api_internal.h"
+#include "benchmark_runner.h"
#include "internal_macros.h"
#ifndef BENCHMARK_OS_WINDOWS
@@ -34,6 +35,7 @@
#include <memory>
#include <string>
#include <thread>
+#include <utility>
#include "check.h"
#include "colorprint.h"
@@ -55,9 +57,9 @@ DEFINE_bool(benchmark_list_tests, false,
DEFINE_string(benchmark_filter, ".",
"A regular expression that specifies the set of benchmarks "
- "to execute. If this flag is empty, no benchmarks are run. "
- "If this flag is the string \"all\", all benchmarks linked "
- "into the process are run.");
+ "to execute. If this flag is empty, or if this flag is the "
+ "string \"all\", all benchmarks linked into the binary are "
+ "run.");
DEFINE_double(benchmark_min_time, 0.5,
"Minimum number of seconds we should run benchmark before "
@@ -72,10 +74,19 @@ DEFINE_int32(benchmark_repetitions, 1,
"The number of runs of each benchmark. If greater than 1, the "
"mean and standard deviation of the runs will be reported.");
-DEFINE_bool(benchmark_report_aggregates_only, false,
- "Report the result of each benchmark repetitions. When 'true' is "
- "specified only the mean, standard deviation, and other statistics "
- "are reported for repeated benchmarks.");
+DEFINE_bool(
+ benchmark_report_aggregates_only, false,
+ "Report the result of each benchmark repetitions. When 'true' is specified "
+ "only the mean, standard deviation, and other statistics are reported for "
+ "repeated benchmarks. Affects all reporters.");
+
+DEFINE_bool(
+ benchmark_display_aggregates_only, false,
+ "Display the result of each benchmark repetitions. When 'true' is "
+ "specified only the mean, standard deviation, and other statistics are "
+ "displayed for repeated benchmarks. Unlike "
+ "benchmark_report_aggregates_only, only affects the display reporter, but "
+ "*NOT* file reporter, which will still contain all the output.");
DEFINE_string(benchmark_format, "console",
"The format to use for console output. Valid values are "
@@ -103,195 +114,15 @@ DEFINE_int32(v, 0, "The level of verbose logging to output");
namespace benchmark {
-namespace {
-static const size_t kMaxIterations = 1000000000;
-} // end namespace
-
namespace internal {
+// FIXME: wouldn't LTO mess this up?
void UseCharPointer(char const volatile*) {}
-namespace {
-
-BenchmarkReporter::Run CreateRunReport(
- const benchmark::internal::Benchmark::Instance& b,
- const internal::ThreadManager::Result& results,
- double seconds) {
- // Create report about this benchmark run.
- BenchmarkReporter::Run report;
-
- report.benchmark_name = b.name;
- report.error_occurred = results.has_error_;
- report.error_message = results.error_message_;
- report.report_label = results.report_label_;
- // This is the total iterations across all threads.
- report.iterations = results.iterations;
- report.time_unit = b.time_unit;
-
- if (!report.error_occurred) {
- double bytes_per_second = 0;
- if (results.bytes_processed > 0 && seconds > 0.0) {
- bytes_per_second = (results.bytes_processed / seconds);
- }
- double items_per_second = 0;
- if (results.items_processed > 0 && seconds > 0.0) {
- items_per_second = (results.items_processed / seconds);
- }
-
- if (b.use_manual_time) {
- report.real_accumulated_time = results.manual_time_used;
- } else {
- report.real_accumulated_time = results.real_time_used;
- }
- report.cpu_accumulated_time = results.cpu_time_used;
- report.bytes_per_second = bytes_per_second;
- report.items_per_second = items_per_second;
- report.complexity_n = results.complexity_n;
- report.complexity = b.complexity;
- report.complexity_lambda = b.complexity_lambda;
- report.statistics = b.statistics;
- report.counters = results.counters;
- internal::Finish(&report.counters, seconds, b.threads);
- }
- return report;
-}
-
-// Execute one thread of benchmark b for the specified number of iterations.
-// Adds the stats collected for the thread into *total.
-void RunInThread(const benchmark::internal::Benchmark::Instance* b,
- size_t iters, int thread_id,
- internal::ThreadManager* manager) {
- internal::ThreadTimer timer;
- State st(iters, b->arg, thread_id, b->threads, &timer, manager);
- b->benchmark->Run(st);
- CHECK(st.iterations() >= st.max_iterations)
- << "Benchmark returned before State::KeepRunning() returned false!";
- {
- MutexLock l(manager->GetBenchmarkMutex());
- internal::ThreadManager::Result& results = manager->results;
- results.iterations += st.iterations();
- results.cpu_time_used += timer.cpu_time_used();
- results.real_time_used += timer.real_time_used();
- results.manual_time_used += timer.manual_time_used();
- results.bytes_processed += st.bytes_processed();
- results.items_processed += st.items_processed();
- results.complexity_n += st.complexity_length_n();
- internal::Increment(&results.counters, st.counters);
- }
- manager->NotifyThreadComplete();
-}
-
-std::vector<BenchmarkReporter::Run> RunBenchmark(
- const benchmark::internal::Benchmark::Instance& b,
- std::vector<BenchmarkReporter::Run>* complexity_reports) {
- std::vector<BenchmarkReporter::Run> reports; // return value
-
- const bool has_explicit_iteration_count = b.iterations != 0;
- size_t iters = has_explicit_iteration_count ? b.iterations : 1;
- std::unique_ptr<internal::ThreadManager> manager;
- std::vector<std::thread> pool(b.threads - 1);
- const int repeats =
- b.repetitions != 0 ? b.repetitions : FLAGS_benchmark_repetitions;
- const bool report_aggregates_only =
- repeats != 1 &&
- (b.report_mode == internal::RM_Unspecified
- ? FLAGS_benchmark_report_aggregates_only
- : b.report_mode == internal::RM_ReportAggregatesOnly);
- for (int repetition_num = 0; repetition_num < repeats; repetition_num++) {
- for (;;) {
- // Try benchmark
- VLOG(2) << "Running " << b.name << " for " << iters << "\n";
-
- manager.reset(new internal::ThreadManager(b.threads));
- for (std::size_t ti = 0; ti < pool.size(); ++ti) {
- pool[ti] = std::thread(&RunInThread, &b, iters,
- static_cast<int>(ti + 1), manager.get());
- }
- RunInThread(&b, iters, 0, manager.get());
- manager->WaitForAllThreads();
- for (std::thread& thread : pool) thread.join();
- internal::ThreadManager::Result results;
- {
- MutexLock l(manager->GetBenchmarkMutex());
- results = manager->results;
- }
- manager.reset();
- // Adjust real/manual time stats since they were reported per thread.
- results.real_time_used /= b.threads;
- results.manual_time_used /= b.threads;
-
- VLOG(2) << "Ran in " << results.cpu_time_used << "/"
- << results.real_time_used << "\n";
-
- // Base decisions off of real time if requested by this benchmark.
- double seconds = results.cpu_time_used;
- if (b.use_manual_time) {
- seconds = results.manual_time_used;
- } else if (b.use_real_time) {
- seconds = results.real_time_used;
- }
-
- const double min_time =
- !IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time;
-
- // Determine if this run should be reported; Either it has
- // run for a sufficient amount of time or because an error was reported.
- const bool should_report = repetition_num > 0
- || has_explicit_iteration_count // An exact iteration count was requested
- || results.has_error_
- || iters >= kMaxIterations // No chance to try again, we hit the limit.
- || seconds >= min_time // the elapsed time is large enough
- // CPU time is specified but the elapsed real time greatly exceeds the
- // minimum time. Note that user provided timers are except from this
- // sanity check.
- || ((results.real_time_used >= 5 * min_time) && !b.use_manual_time);
-
- if (should_report) {
- BenchmarkReporter::Run report = CreateRunReport(b, results, seconds);
- if (!report.error_occurred && b.complexity != oNone)
- complexity_reports->push_back(report);
- reports.push_back(report);
- break;
- }
-
- // See how much iterations should be increased by
- // Note: Avoid division by zero with max(seconds, 1ns).
- double multiplier = min_time * 1.4 / std::max(seconds, 1e-9);
- // If our last run was at least 10% of FLAGS_benchmark_min_time then we
- // use the multiplier directly. Otherwise we use at most 10 times
- // expansion.
- // NOTE: When the last run was at least 10% of the min time the max
- // expansion should be 14x.
- bool is_significant = (seconds / min_time) > 0.1;
- multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
- if (multiplier <= 1.0) multiplier = 2.0;
- double next_iters = std::max(multiplier * iters, iters + 1.0);
- if (next_iters > kMaxIterations) {
- next_iters = kMaxIterations;
- }
- VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
- iters = static_cast<int>(next_iters + 0.5);
- }
- }
- // Calculate additional statistics
- auto stat_reports = ComputeStats(reports);
- if ((b.complexity != oNone) && b.last_benchmark_instance) {
- auto additional_run_stats = ComputeBigO(*complexity_reports);
- stat_reports.insert(stat_reports.end(), additional_run_stats.begin(),
- additional_run_stats.end());
- complexity_reports->clear();
- }
-
- if (report_aggregates_only) reports.clear();
- reports.insert(reports.end(), stat_reports.begin(), stat_reports.end());
- return reports;
-}
-
-} // namespace
} // namespace internal
-State::State(size_t max_iters, const std::vector<int64_t>& ranges, int thread_i,
- int n_threads, internal::ThreadTimer* timer,
+State::State(IterationCount max_iters, const std::vector<int64_t>& ranges,
+ int thread_i, int n_threads, internal::ThreadTimer* timer,
internal::ThreadManager* manager)
: total_iterations_(0),
batch_leftover_(0),
@@ -300,8 +131,6 @@ State::State(size_t max_iters, const std::vector<int64_t>& ranges, int thread_i,
finished_(false),
error_occurred_(false),
range_(ranges),
- bytes_processed_(0),
- items_processed_(0),
complexity_n_(0),
counters(),
thread_index(thread_i),
@@ -317,15 +146,21 @@ State::State(size_t max_iters, const std::vector<int64_t>& ranges, int thread_i,
// demonstrated since constexpr evaluation must diagnose all undefined
// behavior). However, GCC and Clang also warn about this use of offsetof,
// which must be suppressed.
-#ifdef __GNUC__
+#if defined(__INTEL_COMPILER)
+#pragma warning push
+#pragma warning(disable:1875)
+#elif defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Winvalid-offsetof"
#endif
// Offset tests to ensure commonly accessed data is on the first cache line.
const int cache_line_size = 64;
static_assert(offsetof(State, error_occurred_) <=
- (cache_line_size - sizeof(error_occurred_)), "");
-#ifdef __GNUC__
+ (cache_line_size - sizeof(error_occurred_)),
+ "");
+#if defined(__INTEL_COMPILER)
+#pragma warning pop
+#elif defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
}
@@ -386,25 +221,25 @@ void State::FinishKeepRunning() {
namespace internal {
namespace {
-void RunBenchmarks(const std::vector<Benchmark::Instance>& benchmarks,
- BenchmarkReporter* console_reporter,
- BenchmarkReporter* file_reporter) {
+void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
+ BenchmarkReporter* display_reporter,
+ BenchmarkReporter* file_reporter) {
// Note the file_reporter can be null.
- CHECK(console_reporter != nullptr);
+ CHECK(display_reporter != nullptr);
// Determine the width of the name field using a minimum width of 10.
- bool has_repetitions = FLAGS_benchmark_repetitions > 1;
+ bool might_have_aggregates = FLAGS_benchmark_repetitions > 1;
size_t name_field_width = 10;
size_t stat_field_width = 0;
- for (const Benchmark::Instance& benchmark : benchmarks) {
+ for (const BenchmarkInstance& benchmark : benchmarks) {
name_field_width =
- std::max<size_t>(name_field_width, benchmark.name.size());
- has_repetitions |= benchmark.repetitions > 1;
+ std::max<size_t>(name_field_width, benchmark.name.str().size());
+ might_have_aggregates |= benchmark.repetitions > 1;
- for(const auto& Stat : *benchmark.statistics)
+ for (const auto& Stat : *benchmark.statistics)
stat_field_width = std::max<size_t>(stat_field_width, Stat.name_.size());
}
- if (has_repetitions) name_field_width += 1 + stat_field_width;
+ if (might_have_aggregates) name_field_width += 1 + stat_field_width;
// Print header here
BenchmarkReporter::Context context;
@@ -421,22 +256,36 @@ void RunBenchmarks(const std::vector<Benchmark::Instance>& benchmarks,
std::flush(reporter->GetErrorStream());
};
- if (console_reporter->ReportContext(context) &&
+ if (display_reporter->ReportContext(context) &&
(!file_reporter || file_reporter->ReportContext(context))) {
- flushStreams(console_reporter);
+ flushStreams(display_reporter);
flushStreams(file_reporter);
+
for (const auto& benchmark : benchmarks) {
- std::vector<BenchmarkReporter::Run> reports =
- RunBenchmark(benchmark, &complexity_reports);
- console_reporter->ReportRuns(reports);
- if (file_reporter) file_reporter->ReportRuns(reports);
- flushStreams(console_reporter);
+ RunResults run_results = RunBenchmark(benchmark, &complexity_reports);
+
+ auto report = [&run_results](BenchmarkReporter* reporter,
+ bool report_aggregates_only) {
+ assert(reporter);
+ // If there are no aggregates, do output non-aggregates.
+ report_aggregates_only &= !run_results.aggregates_only.empty();
+ if (!report_aggregates_only)
+ reporter->ReportRuns(run_results.non_aggregates);
+ if (!run_results.aggregates_only.empty())
+ reporter->ReportRuns(run_results.aggregates_only);
+ };
+
+ report(display_reporter, run_results.display_report_aggregates_only);
+ if (file_reporter)
+ report(file_reporter, run_results.file_report_aggregates_only);
+
+ flushStreams(display_reporter);
flushStreams(file_reporter);
}
}
- console_reporter->Finalize();
+ display_reporter->Finalize();
if (file_reporter) file_reporter->Finalize();
- flushStreams(console_reporter);
+ flushStreams(display_reporter);
flushStreams(file_reporter);
}
@@ -463,21 +312,26 @@ bool IsZero(double n) {
ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) {
int output_opts = ConsoleReporter::OO_Defaults;
- if ((FLAGS_benchmark_color == "auto" && IsColorTerminal()) ||
- IsTruthyFlagValue(FLAGS_benchmark_color)) {
+ auto is_benchmark_color = [force_no_color] () -> bool {
+ if (force_no_color) {
+ return false;
+ }
+ if (FLAGS_benchmark_color == "auto") {
+ return IsColorTerminal();
+ }
+ return IsTruthyFlagValue(FLAGS_benchmark_color);
+ };
+ if (is_benchmark_color()) {
output_opts |= ConsoleReporter::OO_Color;
} else {
output_opts &= ~ConsoleReporter::OO_Color;
}
- if(force_no_color) {
- output_opts &= ~ConsoleReporter::OO_Color;
- }
- if(FLAGS_benchmark_counters_tabular) {
+ if (FLAGS_benchmark_counters_tabular) {
output_opts |= ConsoleReporter::OO_Tabular;
} else {
output_opts &= ~ConsoleReporter::OO_Tabular;
}
- return static_cast< ConsoleReporter::OutputOptions >(output_opts);
+ return static_cast<ConsoleReporter::OutputOptions>(output_opts);
}
} // end namespace internal
@@ -486,11 +340,11 @@ size_t RunSpecifiedBenchmarks() {
return RunSpecifiedBenchmarks(nullptr, nullptr);
}
-size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter) {
- return RunSpecifiedBenchmarks(console_reporter, nullptr);
+size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter) {
+ return RunSpecifiedBenchmarks(display_reporter, nullptr);
}
-size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter,
+size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
BenchmarkReporter* file_reporter) {
std::string spec = FLAGS_benchmark_filter;
if (spec.empty() || spec == "all")
@@ -498,15 +352,15 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter,
// Setup the reporters
std::ofstream output_file;
- std::unique_ptr<BenchmarkReporter> default_console_reporter;
+ std::unique_ptr<BenchmarkReporter> default_display_reporter;
std::unique_ptr<BenchmarkReporter> default_file_reporter;
- if (!console_reporter) {
- default_console_reporter = internal::CreateReporter(
- FLAGS_benchmark_format, internal::GetOutputOptions());
- console_reporter = default_console_reporter.get();
+ if (!display_reporter) {
+ default_display_reporter = internal::CreateReporter(
+ FLAGS_benchmark_format, internal::GetOutputOptions());
+ display_reporter = default_display_reporter.get();
}
- auto& Out = console_reporter->GetOutputStream();
- auto& Err = console_reporter->GetErrorStream();
+ auto& Out = display_reporter->GetOutputStream();
+ auto& Err = display_reporter->GetErrorStream();
std::string const& fname = FLAGS_benchmark_out;
if (fname.empty() && file_reporter) {
@@ -530,7 +384,7 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter,
file_reporter->SetErrorStream(&output_file);
}
- std::vector<internal::Benchmark::Instance> benchmarks;
+ std::vector<internal::BenchmarkInstance> benchmarks;
if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0;
if (benchmarks.empty()) {
@@ -539,14 +393,19 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter,
}
if (FLAGS_benchmark_list_tests) {
- for (auto const& benchmark : benchmarks) Out << benchmark.name << "\n";
+ for (auto const& benchmark : benchmarks)
+ Out << benchmark.name.str() << "\n";
} else {
- internal::RunBenchmarks(benchmarks, console_reporter, file_reporter);
+ internal::RunBenchmarks(benchmarks, display_reporter, file_reporter);
}
return benchmarks.size();
}
+void RegisterMemoryManager(MemoryManager* manager) {
+ internal::memory_manager = manager;
+}
+
namespace internal {
void PrintUsageAndExit() {
@@ -556,7 +415,8 @@ void PrintUsageAndExit() {
" [--benchmark_filter=<regex>]\n"
" [--benchmark_min_time=<min_time>]\n"
" [--benchmark_repetitions=<num_repetitions>]\n"
- " [--benchmark_report_aggregates_only={true|false}\n"
+ " [--benchmark_report_aggregates_only={true|false}]\n"
+ " [--benchmark_display_aggregates_only={true|false}]\n"
" [--benchmark_format=<console|json|csv>]\n"
" [--benchmark_out=<filename>]\n"
" [--benchmark_out_format=<json|console|csv>]\n"
@@ -568,7 +428,8 @@ void PrintUsageAndExit() {
void ParseCommandLineFlags(int* argc, char** argv) {
using namespace benchmark;
- BenchmarkReporter::Context::executable_name = argv[0];
+ BenchmarkReporter::Context::executable_name =
+ (argc && *argc > 0) ? argv[0] : "unknown";
for (int i = 1; i < *argc; ++i) {
if (ParseBoolFlag(argv[i], "benchmark_list_tests",
&FLAGS_benchmark_list_tests) ||
@@ -579,6 +440,8 @@ void ParseCommandLineFlags(int* argc, char** argv) {
&FLAGS_benchmark_repetitions) ||
ParseBoolFlag(argv[i], "benchmark_report_aggregates_only",
&FLAGS_benchmark_report_aggregates_only) ||
+ ParseBoolFlag(argv[i], "benchmark_display_aggregates_only",
+ &FLAGS_benchmark_display_aggregates_only) ||
ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) ||
ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) ||
ParseStringFlag(argv[i], "benchmark_out_format",
@@ -588,7 +451,7 @@ void ParseCommandLineFlags(int* argc, char** argv) {
// TODO: Remove this.
ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) ||
ParseBoolFlag(argv[i], "benchmark_counters_tabular",
- &FLAGS_benchmark_counters_tabular) ||
+ &FLAGS_benchmark_counters_tabular) ||
ParseInt32Flag(argv[i], "v", &FLAGS_v)) {
for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1];
@@ -622,7 +485,8 @@ void Initialize(int* argc, char** argv) {
bool ReportUnrecognizedArguments(int argc, char** argv) {
for (int i = 1; i < argc; ++i) {
- fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0], argv[i]);
+ fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0],
+ argv[i]);
}
return argc > 1;
}
diff --git a/src/third_party/benchmark/dist/src/benchmark_api_internal.cc b/src/third_party/benchmark/dist/src/benchmark_api_internal.cc
new file mode 100644
index 00000000000..d468a257e39
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/benchmark_api_internal.cc
@@ -0,0 +1,15 @@
+#include "benchmark_api_internal.h"
+
+namespace benchmark {
+namespace internal {
+
+State BenchmarkInstance::Run(IterationCount iters, int thread_id,
+ internal::ThreadTimer* timer,
+ internal::ThreadManager* manager) const {
+ State st(iters, arg, thread_id, threads, timer, manager);
+ benchmark->Run(st);
+ return st;
+}
+
+} // internal
+} // benchmark
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/benchmark_api_internal.h b/src/third_party/benchmark/dist/src/benchmark_api_internal.h
index dd7a3ffe8cb..264eff95c5c 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/benchmark_api_internal.h
+++ b/src/third_party/benchmark/dist/src/benchmark_api_internal.h
@@ -2,10 +2,12 @@
#define BENCHMARK_API_INTERNAL_H
#include "benchmark/benchmark.h"
+#include "commandlineflags.h"
#include <cmath>
#include <iosfwd>
#include <limits>
+#include <memory>
#include <string>
#include <vector>
@@ -13,13 +15,14 @@ namespace benchmark {
namespace internal {
// Information kept per benchmark we may want to run
-struct Benchmark::Instance {
- std::string name;
+struct BenchmarkInstance {
+ BenchmarkName name;
Benchmark* benchmark;
- ReportMode report_mode;
+ AggregationReportMode aggregation_report_mode;
std::vector<int64_t> arg;
TimeUnit time_unit;
int range_multiplier;
+ bool measure_process_cpu_time;
bool use_real_time;
bool use_manual_time;
BigO complexity;
@@ -29,12 +32,15 @@ struct Benchmark::Instance {
bool last_benchmark_instance;
int repetitions;
double min_time;
- size_t iterations;
+ IterationCount iterations;
int threads; // Number of concurrent threads to us
+
+ State Run(IterationCount iters, int thread_id, internal::ThreadTimer* timer,
+ internal::ThreadManager* manager) const;
};
bool FindBenchmarksInternal(const std::string& re,
- std::vector<Benchmark::Instance>* benchmarks,
+ std::vector<BenchmarkInstance>* benchmarks,
std::ostream* Err);
bool IsZero(double n);
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/benchmark_main.cc b/src/third_party/benchmark/dist/src/benchmark_main.cc
index b3b24783149..b3b24783149 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/benchmark_main.cc
+++ b/src/third_party/benchmark/dist/src/benchmark_main.cc
diff --git a/src/third_party/benchmark/dist/src/benchmark_name.cc b/src/third_party/benchmark/dist/src/benchmark_name.cc
new file mode 100644
index 00000000000..2a17ebce277
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/benchmark_name.cc
@@ -0,0 +1,58 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <benchmark/benchmark.h>
+
+namespace benchmark {
+
+namespace {
+
+// Compute the total size of a pack of std::strings
+size_t size_impl() { return 0; }
+
+template <typename Head, typename... Tail>
+size_t size_impl(const Head& head, const Tail&... tail) {
+ return head.size() + size_impl(tail...);
+}
+
+// Join a pack of std::strings using a delimiter
+// TODO: use absl::StrJoin
+void join_impl(std::string&, char) {}
+
+template <typename Head, typename... Tail>
+void join_impl(std::string& s, const char delimiter, const Head& head,
+ const Tail&... tail) {
+ if (!s.empty() && !head.empty()) {
+ s += delimiter;
+ }
+
+ s += head;
+
+ join_impl(s, delimiter, tail...);
+}
+
+template <typename... Ts>
+std::string join(char delimiter, const Ts&... ts) {
+ std::string s;
+ s.reserve(sizeof...(Ts) + size_impl(ts...));
+ join_impl(s, delimiter, ts...);
+ return s;
+}
+} // namespace
+
+std::string BenchmarkName::str() const {
+ return join('/', function_name, args, min_time, iterations, repetitions,
+ time_type, threads);
+}
+} // namespace benchmark
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/benchmark_register.cc b/src/third_party/benchmark/dist/src/benchmark_register.cc
index dc6f9356853..6696c382b80 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/benchmark_register.cc
+++ b/src/third_party/benchmark/dist/src/benchmark_register.cc
@@ -34,6 +34,9 @@
#include <sstream>
#include <thread>
+#define __STDC_FORMAT_MACROS
+#include <inttypes.h>
+
#include "benchmark/benchmark.h"
#include "benchmark_api_internal.h"
#include "check.h"
@@ -78,7 +81,7 @@ class BenchmarkFamilies {
// Extract the list of benchmark instances that match the specified
// regular expression.
bool FindBenchmarks(std::string re,
- std::vector<Benchmark::Instance>* benchmarks,
+ std::vector<BenchmarkInstance>* benchmarks,
std::ostream* Err);
private:
@@ -107,7 +110,7 @@ void BenchmarkFamilies::ClearBenchmarks() {
}
bool BenchmarkFamilies::FindBenchmarks(
- std::string spec, std::vector<Benchmark::Instance>* benchmarks,
+ std::string spec, std::vector<BenchmarkInstance>* benchmarks,
std::ostream* ErrStream) {
CHECK(ErrStream);
auto& Err = *ErrStream;
@@ -115,9 +118,9 @@ bool BenchmarkFamilies::FindBenchmarks(
std::string error_msg;
Regex re;
bool isNegativeFilter = false;
- if(spec[0] == '-') {
- spec.replace(0, 1, "");
- isNegativeFilter = true;
+ if (spec[0] == '-') {
+ spec.replace(0, 1, "");
+ isNegativeFilter = true;
}
if (!re.Init(spec, &error_msg)) {
Err << "Could not compile benchmark re: " << error_msg << std::endl;
@@ -152,16 +155,17 @@ bool BenchmarkFamilies::FindBenchmarks(
for (auto const& args : family->args_) {
for (int num_threads : *thread_counts) {
- Benchmark::Instance instance;
- instance.name = family->name_;
+ BenchmarkInstance instance;
+ instance.name.function_name = family->name_;
instance.benchmark = family.get();
- instance.report_mode = family->report_mode_;
+ instance.aggregation_report_mode = family->aggregation_report_mode_;
instance.arg = args;
instance.time_unit = family->time_unit_;
instance.range_multiplier = family->range_multiplier_;
instance.min_time = family->min_time_;
instance.iterations = family->iterations_;
instance.repetitions = family->repetitions_;
+ instance.measure_process_cpu_time = family->measure_process_cpu_time_;
instance.use_real_time = family->use_real_time_;
instance.use_manual_time = family->use_manual_time_;
instance.complexity = family->complexity_;
@@ -172,40 +176,57 @@ bool BenchmarkFamilies::FindBenchmarks(
// Add arguments to instance name
size_t arg_i = 0;
for (auto const& arg : args) {
- instance.name += "/";
+ if (!instance.name.args.empty()) {
+ instance.name.args += '/';
+ }
if (arg_i < family->arg_names_.size()) {
const auto& arg_name = family->arg_names_[arg_i];
if (!arg_name.empty()) {
- instance.name +=
- StrFormat("%s:", family->arg_names_[arg_i].c_str());
+ instance.name.args += StrFormat("%s:", arg_name.c_str());
}
}
- instance.name += StrFormat("%d", arg);
+ instance.name.args += StrFormat("%" PRId64, arg);
++arg_i;
}
if (!IsZero(family->min_time_))
- instance.name += StrFormat("/min_time:%0.3f", family->min_time_);
- if (family->iterations_ != 0)
- instance.name += StrFormat("/iterations:%d", family->iterations_);
+ instance.name.min_time =
+ StrFormat("min_time:%0.3f", family->min_time_);
+ if (family->iterations_ != 0) {
+ instance.name.iterations =
+ StrFormat("iterations:%lu",
+ static_cast<unsigned long>(family->iterations_));
+ }
if (family->repetitions_ != 0)
- instance.name += StrFormat("/repeats:%d", family->repetitions_);
+ instance.name.repetitions =
+ StrFormat("repeats:%d", family->repetitions_);
+
+ if (family->measure_process_cpu_time_) {
+ instance.name.time_type = "process_time";
+ }
if (family->use_manual_time_) {
- instance.name += "/manual_time";
+ if (!instance.name.time_type.empty()) {
+ instance.name.time_type += '/';
+ }
+ instance.name.time_type += "manual_time";
} else if (family->use_real_time_) {
- instance.name += "/real_time";
+ if (!instance.name.time_type.empty()) {
+ instance.name.time_type += '/';
+ }
+ instance.name.time_type += "real_time";
}
// Add the number of threads used to the name
if (!family->thread_counts_.empty()) {
- instance.name += StrFormat("/threads:%d", instance.threads);
+ instance.name.threads = StrFormat("threads:%d", instance.threads);
}
- if ((re.Match(instance.name) && !isNegativeFilter) ||
- (!re.Match(instance.name) && isNegativeFilter)) {
+ const auto full_name = instance.name.str();
+ if ((re.Match(full_name) && !isNegativeFilter) ||
+ (!re.Match(full_name) && isNegativeFilter)) {
instance.last_benchmark_instance = (&args == &family->args_.back());
benchmarks->push_back(std::move(instance));
}
@@ -225,7 +246,7 @@ Benchmark* RegisterBenchmarkInternal(Benchmark* bench) {
// FIXME: This function is a hack so that benchmark.cc can access
// `BenchmarkFamilies`
bool FindBenchmarksInternal(const std::string& re,
- std::vector<Benchmark::Instance>* benchmarks,
+ std::vector<BenchmarkInstance>* benchmarks,
std::ostream* Err) {
return BenchmarkFamilies::GetInstance()->FindBenchmarks(re, benchmarks, Err);
}
@@ -236,12 +257,13 @@ bool FindBenchmarksInternal(const std::string& re,
Benchmark::Benchmark(const char* name)
: name_(name),
- report_mode_(RM_Unspecified),
+ aggregation_report_mode_(ARM_Unspecified),
time_unit_(kNanosecond),
range_multiplier_(kRangeMultiplier),
min_time_(0),
iterations_(0),
repetitions_(0),
+ measure_process_cpu_time_(false),
use_real_time_(false),
use_manual_time_(false),
complexity_(oNone),
@@ -323,7 +345,6 @@ Benchmark* Benchmark::ArgNames(const std::vector<std::string>& names) {
Benchmark* Benchmark::DenseRange(int64_t start, int64_t limit, int step) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
- CHECK_GE(start, 0);
CHECK_LE(start, limit);
for (int64_t arg = start; arg <= limit; arg += step) {
args_.push_back({arg});
@@ -355,7 +376,7 @@ Benchmark* Benchmark::MinTime(double t) {
return this;
}
-Benchmark* Benchmark::Iterations(size_t n) {
+Benchmark* Benchmark::Iterations(IterationCount n) {
CHECK(n > 0);
CHECK(IsZero(min_time_));
iterations_ = n;
@@ -369,7 +390,29 @@ Benchmark* Benchmark::Repetitions(int n) {
}
Benchmark* Benchmark::ReportAggregatesOnly(bool value) {
- report_mode_ = value ? RM_ReportAggregatesOnly : RM_Default;
+ aggregation_report_mode_ = value ? ARM_ReportAggregatesOnly : ARM_Default;
+ return this;
+}
+
+Benchmark* Benchmark::DisplayAggregatesOnly(bool value) {
+ // If we were called, the report mode is no longer 'unspecified', in any case.
+ aggregation_report_mode_ = static_cast<AggregationReportMode>(
+ aggregation_report_mode_ | ARM_Default);
+
+ if (value) {
+ aggregation_report_mode_ = static_cast<AggregationReportMode>(
+ aggregation_report_mode_ | ARM_DisplayReportAggregatesOnly);
+ } else {
+ aggregation_report_mode_ = static_cast<AggregationReportMode>(
+ aggregation_report_mode_ & ~ARM_DisplayReportAggregatesOnly);
+ }
+
+ return this;
+}
+
+Benchmark* Benchmark::MeasureProcessCPUTime() {
+ // Can be used together with UseRealTime() / UseManualTime().
+ measure_process_cpu_time_ = true;
return this;
}
diff --git a/src/third_party/benchmark/dist/src/benchmark_register.h b/src/third_party/benchmark/dist/src/benchmark_register.h
new file mode 100644
index 00000000000..61377d74230
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/benchmark_register.h
@@ -0,0 +1,107 @@
+#ifndef BENCHMARK_REGISTER_H
+#define BENCHMARK_REGISTER_H
+
+#include <vector>
+
+#include "check.h"
+
+namespace benchmark {
+namespace internal {
+
+// Append the powers of 'mult' in the closed interval [lo, hi].
+// Returns iterator to the start of the inserted range.
+template <typename T>
+typename std::vector<T>::iterator
+AddPowers(std::vector<T>* dst, T lo, T hi, int mult) {
+ CHECK_GE(lo, 0);
+ CHECK_GE(hi, lo);
+ CHECK_GE(mult, 2);
+
+ const size_t start_offset = dst->size();
+
+ static const T kmax = std::numeric_limits<T>::max();
+
+ // Space out the values in multiples of "mult"
+ for (T i = 1; i <= hi; i *= mult) {
+ if (i >= lo) {
+ dst->push_back(i);
+ }
+ // Break the loop here since multiplying by
+ // 'mult' would move outside of the range of T
+ if (i > kmax / mult) break;
+ }
+
+ return dst->begin() + start_offset;
+}
+
+template <typename T>
+void AddNegatedPowers(std::vector<T>* dst, T lo, T hi, int mult) {
+ // We negate lo and hi so we require that they cannot be equal to 'min'.
+ CHECK_GT(lo, std::numeric_limits<T>::min());
+ CHECK_GT(hi, std::numeric_limits<T>::min());
+ CHECK_GE(hi, lo);
+ CHECK_LE(hi, 0);
+
+ // Add positive powers, then negate and reverse.
+ // Casts necessary since small integers get promoted
+ // to 'int' when negating.
+ const auto lo_complement = static_cast<T>(-lo);
+ const auto hi_complement = static_cast<T>(-hi);
+
+ const auto it = AddPowers(dst, hi_complement, lo_complement, mult);
+
+ std::for_each(it, dst->end(), [](T& t) { t *= -1; });
+ std::reverse(it, dst->end());
+}
+
+template <typename T>
+void AddRange(std::vector<T>* dst, T lo, T hi, int mult) {
+ static_assert(std::is_integral<T>::value && std::is_signed<T>::value,
+ "Args type must be a signed integer");
+
+ CHECK_GE(hi, lo);
+ CHECK_GE(mult, 2);
+
+ // Add "lo"
+ dst->push_back(lo);
+
+ // Handle lo == hi as a special case, so we then know
+ // lo < hi and so it is safe to add 1 to lo and subtract 1
+ // from hi without falling outside of the range of T.
+ if (lo == hi) return;
+
+ // Ensure that lo_inner <= hi_inner below.
+ if (lo + 1 == hi) {
+ dst->push_back(hi);
+ return;
+ }
+
+ // Add all powers of 'mult' in the range [lo+1, hi-1] (inclusive).
+ const auto lo_inner = static_cast<T>(lo + 1);
+ const auto hi_inner = static_cast<T>(hi - 1);
+
+ // Insert negative values
+ if (lo_inner < 0) {
+ AddNegatedPowers(dst, lo_inner, std::min(hi_inner, T{-1}), mult);
+ }
+
+ // Treat 0 as a special case (see discussion on #762).
+ if (lo <= 0 && hi >= 0) {
+ dst->push_back(0);
+ }
+
+ // Insert positive values
+ if (hi_inner > 0) {
+ AddPowers(dst, std::max(lo_inner, T{1}), hi_inner, mult);
+ }
+
+ // Add "hi" (if different from last value).
+ if (hi != dst->back()) {
+ dst->push_back(hi);
+ }
+}
+
+} // namespace internal
+} // namespace benchmark
+
+#endif // BENCHMARK_REGISTER_H
diff --git a/src/third_party/benchmark/dist/src/benchmark_runner.cc b/src/third_party/benchmark/dist/src/benchmark_runner.cc
new file mode 100644
index 00000000000..0bae6a545ef
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/benchmark_runner.cc
@@ -0,0 +1,361 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark_runner.h"
+#include "benchmark/benchmark.h"
+#include "benchmark_api_internal.h"
+#include "internal_macros.h"
+
+#ifndef BENCHMARK_OS_WINDOWS
+#ifndef BENCHMARK_OS_FUCHSIA
+#include <sys/resource.h>
+#endif
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+
+#include <algorithm>
+#include <atomic>
+#include <condition_variable>
+#include <cstdio>
+#include <cstdlib>
+#include <fstream>
+#include <iostream>
+#include <memory>
+#include <string>
+#include <thread>
+#include <utility>
+
+#include "check.h"
+#include "colorprint.h"
+#include "commandlineflags.h"
+#include "complexity.h"
+#include "counter.h"
+#include "internal_macros.h"
+#include "log.h"
+#include "mutex.h"
+#include "re.h"
+#include "statistics.h"
+#include "string_util.h"
+#include "thread_manager.h"
+#include "thread_timer.h"
+
+namespace benchmark {
+
+namespace internal {
+
+MemoryManager* memory_manager = nullptr;
+
+namespace {
+
+static constexpr IterationCount kMaxIterations = 1000000000;
+
+BenchmarkReporter::Run CreateRunReport(
+ const benchmark::internal::BenchmarkInstance& b,
+ const internal::ThreadManager::Result& results,
+ IterationCount memory_iterations,
+ const MemoryManager::Result& memory_result, double seconds,
+ int64_t repetition_index) {
+ // Create report about this benchmark run.
+ BenchmarkReporter::Run report;
+
+ report.run_name = b.name;
+ report.error_occurred = results.has_error_;
+ report.error_message = results.error_message_;
+ report.report_label = results.report_label_;
+ // This is the total iterations across all threads.
+ report.iterations = results.iterations;
+ report.time_unit = b.time_unit;
+ report.threads = b.threads;
+ report.repetition_index = repetition_index;
+ report.repetitions = b.repetitions;
+
+ if (!report.error_occurred) {
+ if (b.use_manual_time) {
+ report.real_accumulated_time = results.manual_time_used;
+ } else {
+ report.real_accumulated_time = results.real_time_used;
+ }
+ report.cpu_accumulated_time = results.cpu_time_used;
+ report.complexity_n = results.complexity_n;
+ report.complexity = b.complexity;
+ report.complexity_lambda = b.complexity_lambda;
+ report.statistics = b.statistics;
+ report.counters = results.counters;
+
+ if (memory_iterations > 0) {
+ report.has_memory_result = true;
+ report.allocs_per_iter =
+ memory_iterations ? static_cast<double>(memory_result.num_allocs) /
+ memory_iterations
+ : 0;
+ report.max_bytes_used = memory_result.max_bytes_used;
+ }
+
+ internal::Finish(&report.counters, results.iterations, seconds, b.threads);
+ }
+ return report;
+}
+
+// Execute one thread of benchmark b for the specified number of iterations.
+// Adds the stats collected for the thread into *total.
+void RunInThread(const BenchmarkInstance* b, IterationCount iters,
+ int thread_id, ThreadManager* manager) {
+ internal::ThreadTimer timer(
+ b->measure_process_cpu_time
+ ? internal::ThreadTimer::CreateProcessCpuTime()
+ : internal::ThreadTimer::Create());
+ State st = b->Run(iters, thread_id, &timer, manager);
+ CHECK(st.iterations() >= st.max_iterations)
+ << "Benchmark returned before State::KeepRunning() returned false!";
+ {
+ MutexLock l(manager->GetBenchmarkMutex());
+ internal::ThreadManager::Result& results = manager->results;
+ results.iterations += st.iterations();
+ results.cpu_time_used += timer.cpu_time_used();
+ results.real_time_used += timer.real_time_used();
+ results.manual_time_used += timer.manual_time_used();
+ results.complexity_n += st.complexity_length_n();
+ internal::Increment(&results.counters, st.counters);
+ }
+ manager->NotifyThreadComplete();
+}
+
+class BenchmarkRunner {
+ public:
+ BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_,
+ std::vector<BenchmarkReporter::Run>* complexity_reports_)
+ : b(b_),
+ complexity_reports(*complexity_reports_),
+ min_time(!IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time),
+ repeats(b.repetitions != 0 ? b.repetitions
+ : FLAGS_benchmark_repetitions),
+ has_explicit_iteration_count(b.iterations != 0),
+ pool(b.threads - 1),
+ iters(has_explicit_iteration_count ? b.iterations : 1) {
+ run_results.display_report_aggregates_only =
+ (FLAGS_benchmark_report_aggregates_only ||
+ FLAGS_benchmark_display_aggregates_only);
+ run_results.file_report_aggregates_only =
+ FLAGS_benchmark_report_aggregates_only;
+ if (b.aggregation_report_mode != internal::ARM_Unspecified) {
+ run_results.display_report_aggregates_only =
+ (b.aggregation_report_mode &
+ internal::ARM_DisplayReportAggregatesOnly);
+ run_results.file_report_aggregates_only =
+ (b.aggregation_report_mode & internal::ARM_FileReportAggregatesOnly);
+ }
+
+ for (int repetition_num = 0; repetition_num < repeats; repetition_num++) {
+ DoOneRepetition(repetition_num);
+ }
+
+ // Calculate additional statistics
+ run_results.aggregates_only = ComputeStats(run_results.non_aggregates);
+
+ // Maybe calculate complexity report
+ if ((b.complexity != oNone) && b.last_benchmark_instance) {
+ auto additional_run_stats = ComputeBigO(complexity_reports);
+ run_results.aggregates_only.insert(run_results.aggregates_only.end(),
+ additional_run_stats.begin(),
+ additional_run_stats.end());
+ complexity_reports.clear();
+ }
+ }
+
+ RunResults&& get_results() { return std::move(run_results); }
+
+ private:
+ RunResults run_results;
+
+ const benchmark::internal::BenchmarkInstance& b;
+ std::vector<BenchmarkReporter::Run>& complexity_reports;
+
+ const double min_time;
+ const int repeats;
+ const bool has_explicit_iteration_count;
+
+ std::vector<std::thread> pool;
+
+ IterationCount iters; // preserved between repetitions!
+ // So only the first repetition has to find/calculate it,
+ // the other repetitions will just use that precomputed iteration count.
+
+ struct IterationResults {
+ internal::ThreadManager::Result results;
+ IterationCount iters;
+ double seconds;
+ };
+ IterationResults DoNIterations() {
+ VLOG(2) << "Running " << b.name.str() << " for " << iters << "\n";
+
+ std::unique_ptr<internal::ThreadManager> manager;
+ manager.reset(new internal::ThreadManager(b.threads));
+
+ // Run all but one thread in separate threads
+ for (std::size_t ti = 0; ti < pool.size(); ++ti) {
+ pool[ti] = std::thread(&RunInThread, &b, iters, static_cast<int>(ti + 1),
+ manager.get());
+ }
+ // And run one thread here directly.
+ // (If we were asked to run just one thread, we don't create new threads.)
+ // Yes, we need to do this here *after* we start the separate threads.
+ RunInThread(&b, iters, 0, manager.get());
+
+ // The main thread has finished. Now let's wait for the other threads.
+ manager->WaitForAllThreads();
+ for (std::thread& thread : pool) thread.join();
+
+ IterationResults i;
+ // Acquire the measurements/counters from the manager, UNDER THE LOCK!
+ {
+ MutexLock l(manager->GetBenchmarkMutex());
+ i.results = manager->results;
+ }
+
+ // And get rid of the manager.
+ manager.reset();
+
+ // Adjust real/manual time stats since they were reported per thread.
+ i.results.real_time_used /= b.threads;
+ i.results.manual_time_used /= b.threads;
+ // If we were measuring whole-process CPU usage, adjust the CPU time too.
+ if (b.measure_process_cpu_time) i.results.cpu_time_used /= b.threads;
+
+ VLOG(2) << "Ran in " << i.results.cpu_time_used << "/"
+ << i.results.real_time_used << "\n";
+
+ // So for how long were we running?
+ i.iters = iters;
+ // Base decisions off of real time if requested by this benchmark.
+ i.seconds = i.results.cpu_time_used;
+ if (b.use_manual_time) {
+ i.seconds = i.results.manual_time_used;
+ } else if (b.use_real_time) {
+ i.seconds = i.results.real_time_used;
+ }
+
+ return i;
+ }
+
+ IterationCount PredictNumItersNeeded(const IterationResults& i) const {
+ // See how much iterations should be increased by.
+ // Note: Avoid division by zero with max(seconds, 1ns).
+ double multiplier = min_time * 1.4 / std::max(i.seconds, 1e-9);
+ // If our last run was at least 10% of FLAGS_benchmark_min_time then we
+ // use the multiplier directly.
+ // Otherwise we use at most 10 times expansion.
+ // NOTE: When the last run was at least 10% of the min time the max
+ // expansion should be 14x.
+ bool is_significant = (i.seconds / min_time) > 0.1;
+ multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
+ if (multiplier <= 1.0) multiplier = 2.0;
+
+ // So what seems to be the sufficiently-large iteration count? Round up.
+ const IterationCount max_next_iters =
+ 0.5 + std::max(multiplier * i.iters, i.iters + 1.0);
+ // But we do have *some* sanity limits though..
+ const IterationCount next_iters = std::min(max_next_iters, kMaxIterations);
+
+ VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
+ return next_iters; // round up before conversion to integer.
+ }
+
+ bool ShouldReportIterationResults(const IterationResults& i) const {
+ // Determine if this run should be reported;
+ // Either it has run for a sufficient amount of time
+ // or because an error was reported.
+ return i.results.has_error_ ||
+ i.iters >= kMaxIterations || // Too many iterations already.
+ i.seconds >= min_time || // The elapsed time is large enough.
+ // CPU time is specified but the elapsed real time greatly exceeds
+ // the minimum time.
+ // Note that user provided timers are except from this sanity check.
+ ((i.results.real_time_used >= 5 * min_time) && !b.use_manual_time);
+ }
+
+ void DoOneRepetition(int64_t repetition_index) {
+ const bool is_the_first_repetition = repetition_index == 0;
+ IterationResults i;
+
+ // We *may* be gradually increasing the length (iteration count)
+ // of the benchmark until we decide the results are significant.
+ // And once we do, we report those last results and exit.
+ // Please do note that the if there are repetitions, the iteration count
+ // is *only* calculated for the *first* repetition, and other repetitions
+ // simply use that precomputed iteration count.
+ for (;;) {
+ i = DoNIterations();
+
+ // Do we consider the results to be significant?
+ // If we are doing repetitions, and the first repetition was already done,
+ // it has calculated the correct iteration time, so we have run that very
+ // iteration count just now. No need to calculate anything. Just report.
+ // Else, the normal rules apply.
+ const bool results_are_significant = !is_the_first_repetition ||
+ has_explicit_iteration_count ||
+ ShouldReportIterationResults(i);
+
+ if (results_are_significant) break; // Good, let's report them!
+
+ // Nope, bad iteration. Let's re-estimate the hopefully-sufficient
+ // iteration count, and run the benchmark again...
+
+ iters = PredictNumItersNeeded(i);
+ assert(iters > i.iters &&
+ "if we did more iterations than we want to do the next time, "
+ "then we should have accepted the current iteration run.");
+ }
+
+ // Oh, one last thing, we need to also produce the 'memory measurements'..
+ MemoryManager::Result memory_result;
+ IterationCount memory_iterations = 0;
+ if (memory_manager != nullptr) {
+ // Only run a few iterations to reduce the impact of one-time
+ // allocations in benchmarks that are not properly managed.
+ memory_iterations = std::min<IterationCount>(16, iters);
+ memory_manager->Start();
+ std::unique_ptr<internal::ThreadManager> manager;
+ manager.reset(new internal::ThreadManager(1));
+ RunInThread(&b, memory_iterations, 0, manager.get());
+ manager->WaitForAllThreads();
+ manager.reset();
+
+ memory_manager->Stop(&memory_result);
+ }
+
+ // Ok, now actualy report.
+ BenchmarkReporter::Run report =
+ CreateRunReport(b, i.results, memory_iterations, memory_result,
+ i.seconds, repetition_index);
+
+ if (!report.error_occurred && b.complexity != oNone)
+ complexity_reports.push_back(report);
+
+ run_results.non_aggregates.push_back(report);
+ }
+};
+
+} // end namespace
+
+RunResults RunBenchmark(
+ const benchmark::internal::BenchmarkInstance& b,
+ std::vector<BenchmarkReporter::Run>* complexity_reports) {
+ internal::BenchmarkRunner r(b, complexity_reports);
+ return r.get_results();
+}
+
+} // end namespace internal
+
+} // end namespace benchmark
diff --git a/src/third_party/benchmark/dist/src/benchmark_runner.h b/src/third_party/benchmark/dist/src/benchmark_runner.h
new file mode 100644
index 00000000000..96e8282a11a
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/benchmark_runner.h
@@ -0,0 +1,51 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef BENCHMARK_RUNNER_H_
+#define BENCHMARK_RUNNER_H_
+
+#include "benchmark_api_internal.h"
+#include "internal_macros.h"
+
+DECLARE_double(benchmark_min_time);
+
+DECLARE_int32(benchmark_repetitions);
+
+DECLARE_bool(benchmark_report_aggregates_only);
+
+DECLARE_bool(benchmark_display_aggregates_only);
+
+namespace benchmark {
+
+namespace internal {
+
+extern MemoryManager* memory_manager;
+
+struct RunResults {
+ std::vector<BenchmarkReporter::Run> non_aggregates;
+ std::vector<BenchmarkReporter::Run> aggregates_only;
+
+ bool display_report_aggregates_only = false;
+ bool file_report_aggregates_only = false;
+};
+
+RunResults RunBenchmark(
+ const benchmark::internal::BenchmarkInstance& b,
+ std::vector<BenchmarkReporter::Run>* complexity_reports);
+
+} // namespace internal
+
+} // end namespace benchmark
+
+#endif // BENCHMARK_RUNNER_H_
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/check.h b/src/third_party/benchmark/dist/src/check.h
index 73bead2fb55..f5f8253f804 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/check.h
+++ b/src/third_party/benchmark/dist/src/check.h
@@ -1,9 +1,9 @@
#ifndef CHECK_H_
#define CHECK_H_
+#include <cmath>
#include <cstdlib>
#include <ostream>
-#include <cmath>
#include "internal_macros.h"
#include "log.h"
@@ -62,6 +62,8 @@ class CheckHandler {
#define CHECK(b) ::benchmark::internal::GetNullLogInstance()
#endif
+// clang-format off
+// preserve whitespacing between operators for alignment
#define CHECK_EQ(a, b) CHECK((a) == (b))
#define CHECK_NE(a, b) CHECK((a) != (b))
#define CHECK_GE(a, b) CHECK((a) >= (b))
@@ -75,5 +77,6 @@ class CheckHandler {
#define CHECK_FLOAT_LE(a, b, eps) CHECK((b) - (a) > -(eps))
#define CHECK_FLOAT_GT(a, b, eps) CHECK((a) - (b) > (eps))
#define CHECK_FLOAT_LT(a, b, eps) CHECK((b) - (a) > (eps))
+//clang-format on
#endif // CHECK_H_
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/colorprint.cc b/src/third_party/benchmark/dist/src/colorprint.cc
index 2dec4a8b28b..fff6a98818b 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/colorprint.cc
+++ b/src/third_party/benchmark/dist/src/colorprint.cc
@@ -25,7 +25,7 @@
#include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS
-#include <Windows.h>
+#include <windows.h>
#include <io.h>
#else
#include <unistd.h>
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/colorprint.h b/src/third_party/benchmark/dist/src/colorprint.h
index 9f6fab9b342..9f6fab9b342 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/colorprint.h
+++ b/src/third_party/benchmark/dist/src/colorprint.h
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/commandlineflags.cc b/src/third_party/benchmark/dist/src/commandlineflags.cc
index 2fc92517a32..6bd65c5ae70 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/commandlineflags.cc
+++ b/src/third_party/benchmark/dist/src/commandlineflags.cc
@@ -21,6 +21,8 @@
#include <limits>
namespace benchmark {
+namespace {
+
// Parses 'str' for a 32-bit signed integer. If successful, writes
// the result to *value and returns true; otherwise leaves *value
// unchanged and returns false.
@@ -45,7 +47,7 @@ bool ParseInt32(const std::string& src_text, const char* str, int32_t* value) {
// LONG_MAX or LONG_MIN when the input overflows.)
result != long_value
// The parsed value overflows as an Int32.
- ) {
+ ) {
std::cerr << src_text << " is expected to be a 32-bit integer, "
<< "but actually has value \"" << str << "\", "
<< "which overflows.\n";
@@ -88,6 +90,8 @@ static std::string FlagToEnvVar(const char* flag) {
return "BENCHMARK_" + env_var;
}
+} // namespace
+
// Reads and returns the Boolean environment variable corresponding to
// the given flag; if it's not set, returns default_value.
//
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/commandlineflags.h b/src/third_party/benchmark/dist/src/commandlineflags.h
index 945c9a9fc4a..5eaea82a59b 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/commandlineflags.h
+++ b/src/third_party/benchmark/dist/src/commandlineflags.h
@@ -23,16 +23,10 @@
std::string FLAG(name) = (default_val)
namespace benchmark {
-// Parses 'str' for a 32-bit signed integer. If successful, writes the result
-// to *value and returns true; otherwise leaves *value unchanged and returns
-// false.
-bool ParseInt32(const std::string& src_text, const char* str, int32_t* value);
-
// Parses a bool/Int32/string from the environment variable
// corresponding to the given Google Test flag.
bool BoolFromEnv(const char* flag, bool default_val);
int32_t Int32FromEnv(const char* flag, int32_t default_val);
-double DoubleFromEnv(const char* flag, double default_val);
const char* StringFromEnv(const char* flag, const char* default_val);
// Parses a string for a bool flag, in the form of either
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/complexity.cc b/src/third_party/benchmark/dist/src/complexity.cc
index 97bf6e09b30..aeed67f0c70 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/complexity.cc
+++ b/src/third_party/benchmark/dist/src/complexity.cc
@@ -26,20 +26,26 @@ namespace benchmark {
// Internal function to calculate the different scalability forms
BigOFunc* FittingCurve(BigO complexity) {
+ static const double kLog2E = 1.44269504088896340736;
switch (complexity) {
case oN:
- return [](int64_t n) -> double { return static_cast<double>(n); };
+ return [](IterationCount n) -> double { return static_cast<double>(n); };
case oNSquared:
- return [](int64_t n) -> double { return std::pow(n, 2); };
+ return [](IterationCount n) -> double { return std::pow(n, 2); };
case oNCubed:
- return [](int64_t n) -> double { return std::pow(n, 3); };
+ return [](IterationCount n) -> double { return std::pow(n, 3); };
case oLogN:
- return [](int64_t n) { return log2(n); };
+ /* Note: can't use log2 because Android's GNU STL lacks it */
+ return
+ [](IterationCount n) { return kLog2E * log(static_cast<double>(n)); };
case oNLogN:
- return [](int64_t n) { return n * log2(n); };
+ /* Note: can't use log2 because Android's GNU STL lacks it */
+ return [](IterationCount n) {
+ return kLog2E * n * log(static_cast<double>(n));
+ };
case o1:
default:
- return [](int64_t) { return 1.0; };
+ return [](IterationCount) { return 1.0; };
}
}
@@ -70,8 +76,8 @@ std::string GetBigOString(BigO complexity) {
// - time : Vector containing the times for the benchmark tests.
// - fitting_curve : lambda expression (e.g. [](int64_t n) {return n; };).
-// For a deeper explanation on the algorithm logic, look the README file at
-// http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit
+// For a deeper explanation on the algorithm logic, please refer to
+// https://en.wikipedia.org/wiki/Least_squares#Least_squares,_regression_analysis_and_statistics
LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
const std::vector<double>& time,
@@ -179,12 +185,20 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity);
result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
}
- std::string benchmark_name =
- reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/'));
+
+ // Drop the 'args' when reporting complexity.
+ auto run_name = reports[0].run_name;
+ run_name.args.clear();
// Get the data from the accumulator to BenchmarkReporter::Run's.
Run big_o;
- big_o.benchmark_name = benchmark_name + "_BigO";
+ big_o.run_name = run_name;
+ big_o.run_type = BenchmarkReporter::Run::RT_Aggregate;
+ big_o.repetitions = reports[0].repetitions;
+ big_o.repetition_index = Run::no_repetition_index;
+ big_o.threads = reports[0].threads;
+ big_o.aggregate_name = "BigO";
+ big_o.report_label = reports[0].report_label;
big_o.iterations = 0;
big_o.real_accumulated_time = result_real.coef;
big_o.cpu_accumulated_time = result_cpu.coef;
@@ -200,10 +214,14 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
// Only add label to mean/stddev if it is same for all runs
Run rms;
- big_o.report_label = reports[0].report_label;
- rms.benchmark_name = benchmark_name + "_RMS";
+ rms.run_name = run_name;
+ rms.run_type = BenchmarkReporter::Run::RT_Aggregate;
+ rms.aggregate_name = "RMS";
rms.report_label = big_o.report_label;
rms.iterations = 0;
+ rms.repetition_index = Run::no_repetition_index;
+ rms.repetitions = reports[0].repetitions;
+ rms.threads = reports[0].threads;
rms.real_accumulated_time = result_real.rms / multiplier;
rms.cpu_accumulated_time = result_cpu.rms / multiplier;
rms.report_rms = true;
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/complexity.h b/src/third_party/benchmark/dist/src/complexity.h
index df29b48d29b..df29b48d29b 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/complexity.h
+++ b/src/third_party/benchmark/dist/src/complexity.h
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/console_reporter.cc b/src/third_party/benchmark/dist/src/console_reporter.cc
index 48920ca7829..cc8ae276f6b 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/console_reporter.cc
+++ b/src/third_party/benchmark/dist/src/console_reporter.cc
@@ -53,7 +53,7 @@ bool ConsoleReporter::ReportContext(const Context& context) {
}
void ConsoleReporter::PrintHeader(const Run& run) {
- std::string str = FormatString("%-*s %13s %13s %10s", static_cast<int>(name_field_width_),
+ std::string str = FormatString("%-*s %13s %15s %12s", static_cast<int>(name_field_width_),
"Benchmark", "Time", "CPU", "Iterations");
if(!run.counters.empty()) {
if(output_options_ & OO_Tabular) {
@@ -64,9 +64,8 @@ void ConsoleReporter::PrintHeader(const Run& run) {
str += " UserCounters...";
}
}
- str += "\n";
std::string line = std::string(str.length(), '-');
- GetOutputStream() << line << "\n" << str << line << "\n";
+ GetOutputStream() << line << "\n" << str << "\n" << line << "\n";
}
void ConsoleReporter::ReportRuns(const std::vector<Run>& reports) {
@@ -98,6 +97,21 @@ static void IgnoreColorPrint(std::ostream& out, LogColor, const char* fmt,
va_end(args);
}
+
+static std::string FormatTime(double time) {
+ // Align decimal places...
+ if (time < 1.0) {
+ return FormatString("%10.3f", time);
+ }
+ if (time < 10.0) {
+ return FormatString("%10.2f", time);
+ }
+ if (time < 100.0) {
+ return FormatString("%10.1f", time);
+ }
+ return FormatString("%10.0f", time);
+}
+
void ConsoleReporter::PrintRunData(const Run& result) {
typedef void(PrinterFn)(std::ostream&, LogColor, const char*, ...);
auto& Out = GetOutputStream();
@@ -106,7 +120,7 @@ void ConsoleReporter::PrintRunData(const Run& result) {
auto name_color =
(result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN;
printer(Out, name_color, "%-*s ", name_field_width_,
- result.benchmark_name.c_str());
+ result.benchmark_name().c_str());
if (result.error_occurred) {
printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'",
@@ -114,33 +128,24 @@ void ConsoleReporter::PrintRunData(const Run& result) {
printer(Out, COLOR_DEFAULT, "\n");
return;
}
- // Format bytes per second
- std::string rate;
- if (result.bytes_per_second > 0) {
- rate = StrCat(" ", HumanReadableNumber(result.bytes_per_second), "B/s");
- }
-
- // Format items per second
- std::string items;
- if (result.items_per_second > 0) {
- items =
- StrCat(" ", HumanReadableNumber(result.items_per_second), " items/s");
- }
const double real_time = result.GetAdjustedRealTime();
const double cpu_time = result.GetAdjustedCPUTime();
+ const std::string real_time_str = FormatTime(real_time);
+ const std::string cpu_time_str = FormatTime(cpu_time);
+
if (result.report_big_o) {
std::string big_o = GetBigOString(result.complexity);
- printer(Out, COLOR_YELLOW, "%10.2f %s %10.2f %s ", real_time, big_o.c_str(),
+ printer(Out, COLOR_YELLOW, "%10.2f %-4s %10.2f %-4s ", real_time, big_o.c_str(),
cpu_time, big_o.c_str());
} else if (result.report_rms) {
- printer(Out, COLOR_YELLOW, "%10.0f %% %10.0f %% ", real_time * 100,
- cpu_time * 100);
+ printer(Out, COLOR_YELLOW, "%10.0f %-4s %10.0f %-4s ", real_time * 100, "%",
+ cpu_time * 100, "%");
} else {
const char* timeLabel = GetTimeUnitString(result.time_unit);
- printer(Out, COLOR_YELLOW, "%10.0f %s %10.0f %s ", real_time, timeLabel,
- cpu_time, timeLabel);
+ printer(Out, COLOR_YELLOW, "%s %-4s %s %-4s ", real_time_str.c_str(), timeLabel,
+ cpu_time_str.c_str(), timeLabel);
}
if (!result.report_big_o && !result.report_rms) {
@@ -150,7 +155,7 @@ void ConsoleReporter::PrintRunData(const Run& result) {
for (auto& c : result.counters) {
const std::size_t cNameLen = std::max(std::string::size_type(10),
c.first.length());
- auto const& s = HumanReadableNumber(c.second.value, 1000);
+ auto const& s = HumanReadableNumber(c.second.value, c.second.oneK);
if (output_options_ & OO_Tabular) {
if (c.second.flags & Counter::kIsRate) {
printer(Out, COLOR_DEFAULT, " %*s/s", cNameLen - 2, s.c_str());
@@ -164,14 +169,6 @@ void ConsoleReporter::PrintRunData(const Run& result) {
}
}
- if (!rate.empty()) {
- printer(Out, COLOR_DEFAULT, " %*s", 13, rate.c_str());
- }
-
- if (!items.empty()) {
- printer(Out, COLOR_DEFAULT, " %*s", 18, items.c_str());
- }
-
if (!result.report_label.empty()) {
printer(Out, COLOR_DEFAULT, " %s", result.report_label.c_str());
}
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/counter.cc b/src/third_party/benchmark/dist/src/counter.cc
index ed1aa044ee7..c248ea110bc 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/counter.cc
+++ b/src/third_party/benchmark/dist/src/counter.cc
@@ -17,7 +17,8 @@
namespace benchmark {
namespace internal {
-double Finish(Counter const& c, double cpu_time, double num_threads) {
+double Finish(Counter const& c, IterationCount iterations, double cpu_time,
+ double num_threads) {
double v = c.value;
if (c.flags & Counter::kIsRate) {
v /= cpu_time;
@@ -25,25 +26,32 @@ double Finish(Counter const& c, double cpu_time, double num_threads) {
if (c.flags & Counter::kAvgThreads) {
v /= num_threads;
}
+ if (c.flags & Counter::kIsIterationInvariant) {
+ v *= iterations;
+ }
+ if (c.flags & Counter::kAvgIterations) {
+ v /= iterations;
+ }
return v;
}
-void Finish(UserCounters *l, double cpu_time, double num_threads) {
- for (auto &c : *l) {
- c.second.value = Finish(c.second, cpu_time, num_threads);
+void Finish(UserCounters* l, IterationCount iterations, double cpu_time,
+ double num_threads) {
+ for (auto& c : *l) {
+ c.second.value = Finish(c.second, iterations, cpu_time, num_threads);
}
}
-void Increment(UserCounters *l, UserCounters const& r) {
+void Increment(UserCounters* l, UserCounters const& r) {
// add counters present in both or just in *l
- for (auto &c : *l) {
+ for (auto& c : *l) {
auto it = r.find(c.first);
if (it != r.end()) {
c.second.value = c.second + it->second;
}
}
// add counters present in r, but not in *l
- for (auto const &tc : r) {
+ for (auto const& tc : r) {
auto it = l->find(tc.first);
if (it == l->end()) {
(*l)[tc.first] = tc.second;
@@ -64,5 +72,5 @@ bool SameNames(UserCounters const& l, UserCounters const& r) {
return true;
}
-} // end namespace internal
-} // end namespace benchmark
+} // end namespace internal
+} // end namespace benchmark
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/counter.h b/src/third_party/benchmark/dist/src/counter.h
index dd6865a31d7..1ad46d4940e 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/counter.h
+++ b/src/third_party/benchmark/dist/src/counter.h
@@ -18,9 +18,10 @@ namespace benchmark {
// these counter-related functions are hidden to reduce API surface.
namespace internal {
-void Finish(UserCounters *l, double time, double num_threads);
-void Increment(UserCounters *l, UserCounters const& r);
+void Finish(UserCounters* l, IterationCount iterations, double time,
+ double num_threads);
+void Increment(UserCounters* l, UserCounters const& r);
bool SameNames(UserCounters const& l, UserCounters const& r);
-} // end namespace internal
+} // end namespace internal
-} //end namespace benchmark
+} // end namespace benchmark
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/csv_reporter.cc b/src/third_party/benchmark/dist/src/csv_reporter.cc
index 35510645b08..af2c18fc8a6 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/csv_reporter.cc
+++ b/src/third_party/benchmark/dist/src/csv_reporter.cc
@@ -22,9 +22,9 @@
#include <tuple>
#include <vector>
+#include "check.h"
#include "string_util.h"
#include "timers.h"
-#include "check.h"
// File format reference: http://edoceo.com/utilitas/csv-file-format.
@@ -37,18 +37,32 @@ std::vector<std::string> elements = {
"error_occurred", "error_message"};
} // namespace
+std::string CsvEscape(const std::string & s) {
+ std::string tmp;
+ tmp.reserve(s.size() + 2);
+ for (char c : s) {
+ switch (c) {
+ case '"' : tmp += "\"\""; break;
+ default : tmp += c; break;
+ }
+ }
+ return '"' + tmp + '"';
+}
+
bool CSVReporter::ReportContext(const Context& context) {
PrintBasicContext(&GetErrorStream(), context);
return true;
}
-void CSVReporter::ReportRuns(const std::vector<Run> & reports) {
+void CSVReporter::ReportRuns(const std::vector<Run>& reports) {
std::ostream& Out = GetOutputStream();
if (!printed_header_) {
// save the names of all the user counters
for (const auto& run : reports) {
for (const auto& cnt : run.counters) {
+ if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
+ continue;
user_counter_names_.insert(cnt.first);
}
}
@@ -58,7 +72,8 @@ void CSVReporter::ReportRuns(const std::vector<Run> & reports) {
Out << *B++;
if (B != elements.end()) Out << ",";
}
- for (auto B = user_counter_names_.begin(); B != user_counter_names_.end();) {
+ for (auto B = user_counter_names_.begin();
+ B != user_counter_names_.end();) {
Out << ",\"" << *B++ << "\"";
}
Out << "\n";
@@ -68,10 +83,12 @@ void CSVReporter::ReportRuns(const std::vector<Run> & reports) {
// check that all the current counters are saved in the name set
for (const auto& run : reports) {
for (const auto& cnt : run.counters) {
+ if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
+ continue;
CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end())
- << "All counters must be present in each run. "
- << "Counter named \"" << cnt.first
- << "\" was not in a run after being added to the header";
+ << "All counters must be present in each run. "
+ << "Counter named \"" << cnt.first
+ << "\" was not in a run after being added to the header";
}
}
}
@@ -80,23 +97,15 @@ void CSVReporter::ReportRuns(const std::vector<Run> & reports) {
for (const auto& run : reports) {
PrintRunData(run);
}
-
}
-void CSVReporter::PrintRunData(const Run & run) {
+void CSVReporter::PrintRunData(const Run& run) {
std::ostream& Out = GetOutputStream();
-
- // Field with embedded double-quote characters must be doubled and the field
- // delimited with double-quotes.
- std::string name = run.benchmark_name;
- ReplaceAll(&name, "\"", "\"\"");
- Out << '"' << name << "\",";
+ Out << CsvEscape(run.benchmark_name()) << ",";
if (run.error_occurred) {
Out << std::string(elements.size() - 3, ',');
Out << "true,";
- std::string msg = run.error_message;
- ReplaceAll(&msg, "\"", "\"\"");
- Out << '"' << msg << "\"\n";
+ Out << CsvEscape(run.error_message) << "\n";
return;
}
@@ -117,27 +126,23 @@ void CSVReporter::PrintRunData(const Run & run) {
}
Out << ",";
- if (run.bytes_per_second > 0.0) {
- Out << run.bytes_per_second;
+ if (run.counters.find("bytes_per_second") != run.counters.end()) {
+ Out << run.counters.at("bytes_per_second");
}
Out << ",";
- if (run.items_per_second > 0.0) {
- Out << run.items_per_second;
+ if (run.counters.find("items_per_second") != run.counters.end()) {
+ Out << run.counters.at("items_per_second");
}
Out << ",";
if (!run.report_label.empty()) {
- // Field with embedded double-quote characters must be doubled and the field
- // delimited with double-quotes.
- std::string label = run.report_label;
- ReplaceAll(&label, "\"", "\"\"");
- Out << "\"" << label << "\"";
+ Out << CsvEscape(run.report_label);
}
Out << ",,"; // for error_occurred and error_message
// Print user counters
- for (const auto &ucn : user_counter_names_) {
+ for (const auto& ucn : user_counter_names_) {
auto it = run.counters.find(ucn);
- if(it == run.counters.end()) {
+ if (it == run.counters.end()) {
Out << ",";
} else {
Out << "," << it->second;
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/cycleclock.h b/src/third_party/benchmark/dist/src/cycleclock.h
index 3b376ac57d5..f5e37b011b9 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/cycleclock.h
+++ b/src/third_party/benchmark/dist/src/cycleclock.h
@@ -41,7 +41,7 @@ extern "C" uint64_t __rdtsc();
#pragma intrinsic(__rdtsc)
#endif
-#ifndef BENCHMARK_OS_WINDOWS
+#if !defined(BENCHMARK_OS_WINDOWS) || defined(BENCHMARK_OS_MINGW)
#include <sys/time.h>
#include <time.h>
#endif
@@ -121,7 +121,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
// because is provides nanosecond resolution (which is noticable at
// least for PNaCl modules running on x86 Mac & Linux).
// Initialize to always return 0 if clock_gettime fails.
- struct timespec ts = { 0, 0 };
+ struct timespec ts = {0, 0};
clock_gettime(CLOCK_MONOTONIC, &ts);
return static_cast<int64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec;
#elif defined(__aarch64__)
@@ -159,10 +159,10 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
-#elif defined(__s390__) // Covers both s390 and s390x.
+#elif defined(__s390__) // Covers both s390 and s390x.
// Return the CPU clock.
uint64_t tsc;
- asm("stck %0" : "=Q" (tsc) : : "cc");
+ asm("stck %0" : "=Q"(tsc) : : "cc");
return tsc;
#else
// The soft failover to a generic implementation is automatic only for ARM.
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/internal_macros.h b/src/third_party/benchmark/dist/src/internal_macros.h
index edb8a5c0a35..6adf00d0569 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/internal_macros.h
+++ b/src/third_party/benchmark/dist/src/internal_macros.h
@@ -3,12 +3,14 @@
#include "benchmark/benchmark.h"
+/* Needed to detect STL */
+#include <cstdlib>
+
+// clang-format off
+
#ifndef __has_feature
#define __has_feature(x) 0
#endif
-#ifndef __has_builtin
-#define __has_builtin(x) 0
-#endif
#if defined(__clang__)
#if !defined(COMPILER_CLANG)
@@ -38,6 +40,9 @@
#define BENCHMARK_OS_CYGWIN 1
#elif defined(_WIN32)
#define BENCHMARK_OS_WINDOWS 1
+ #if defined(__MINGW32__)
+ #define BENCHMARK_OS_MINGW 1
+ #endif
#elif defined(__APPLE__)
#define BENCHMARK_OS_APPLE 1
#include "TargetConditionals.h"
@@ -65,6 +70,12 @@
#define BENCHMARK_OS_FUCHSIA 1
#elif defined (__SVR4) && defined (__sun)
#define BENCHMARK_OS_SOLARIS 1
+#elif defined(__QNX__)
+#define BENCHMARK_OS_QNX 1
+#endif
+
+#if defined(__ANDROID__) && defined(__GLIBCXX__)
+#define BENCHMARK_STL_ANDROID_GNUSTL 1
#endif
#if !__has_feature(cxx_exceptions) && !defined(__cpp_exceptions) \
@@ -78,12 +89,6 @@
#define BENCHMARK_MAYBE_UNUSED
#endif
-#if defined(COMPILER_GCC) || __has_builtin(__builtin_unreachable)
- #define BENCHMARK_UNREACHABLE() __builtin_unreachable()
-#elif defined(COMPILER_MSVC)
- #define BENCHMARK_UNREACHABLE() __assume(false)
-#else
- #define BENCHMARK_UNREACHABLE() ((void)0)
-#endif
+// clang-format on
#endif // BENCHMARK_INTERNAL_MACROS_H_
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/json_reporter.cc b/src/third_party/benchmark/dist/src/json_reporter.cc
index faf16ee0b72..0495d96688c 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/json_reporter.cc
+++ b/src/third_party/benchmark/dist/src/json_reporter.cc
@@ -16,13 +16,14 @@
#include "complexity.h"
#include <algorithm>
+#include <cmath>
#include <cstdint>
+#include <iomanip> // for setprecision
#include <iostream>
+#include <limits>
#include <string>
#include <tuple>
#include <vector>
-#include <iomanip> // for setprecision
-#include <limits>
#include "string_util.h"
#include "timers.h"
@@ -31,32 +32,63 @@ namespace benchmark {
namespace {
+std::string StrEscape(const std::string & s) {
+ std::string tmp;
+ tmp.reserve(s.size());
+ for (char c : s) {
+ switch (c) {
+ case '\b': tmp += "\\b"; break;
+ case '\f': tmp += "\\f"; break;
+ case '\n': tmp += "\\n"; break;
+ case '\r': tmp += "\\r"; break;
+ case '\t': tmp += "\\t"; break;
+ case '\\': tmp += "\\\\"; break;
+ case '"' : tmp += "\\\""; break;
+ default : tmp += c; break;
+ }
+ }
+ return tmp;
+}
+
std::string FormatKV(std::string const& key, std::string const& value) {
- return StrFormat("\"%s\": \"%s\"", key.c_str(), value.c_str());
+ return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str());
}
std::string FormatKV(std::string const& key, const char* value) {
- return StrFormat("\"%s\": \"%s\"", key.c_str(), value);
+ return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str());
}
std::string FormatKV(std::string const& key, bool value) {
- return StrFormat("\"%s\": %s", key.c_str(), value ? "true" : "false");
+ return StrFormat("\"%s\": %s", StrEscape(key).c_str(), value ? "true" : "false");
}
std::string FormatKV(std::string const& key, int64_t value) {
std::stringstream ss;
- ss << '"' << key << "\": " << value;
+ ss << '"' << StrEscape(key) << "\": " << value;
return ss.str();
}
-std::string FormatKV(std::string const& key, double value) {
+std::string FormatKV(std::string const& key, IterationCount value) {
std::stringstream ss;
- ss << '"' << key << "\": ";
+ ss << '"' << StrEscape(key) << "\": " << value;
+ return ss.str();
+}
- const auto max_digits10 = std::numeric_limits<decltype (value)>::max_digits10;
- const auto max_fractional_digits10 = max_digits10 - 1;
+std::string FormatKV(std::string const& key, double value) {
+ std::stringstream ss;
+ ss << '"' << StrEscape(key) << "\": ";
- ss << std::scientific << std::setprecision(max_fractional_digits10) << value;
+ if (std::isnan(value))
+ ss << (value < 0 ? "-" : "") << "NaN";
+ else if (std::isinf(value))
+ ss << (value < 0 ? "-" : "") << "Infinity";
+ else {
+ const auto max_digits10 =
+ std::numeric_limits<decltype(value)>::max_digits10;
+ const auto max_fractional_digits10 = max_digits10 - 1;
+ ss << std::scientific << std::setprecision(max_fractional_digits10)
+ << value;
+ }
return ss.str();
}
@@ -77,11 +109,20 @@ bool JSONReporter::ReportContext(const Context& context) {
std::string walltime_value = LocalDateTimeString();
out << indent << FormatKV("date", walltime_value) << ",\n";
+ out << indent << FormatKV("host_name", context.sys_info.name) << ",\n";
+
if (Context::executable_name) {
// windows uses backslash for its path separator,
// which must be escaped in JSON otherwise it blows up conforming JSON
// decoders
std::string executable_name = Context::executable_name;
+ auto ReplaceAll = [](std::string* str, const std::string& from, const std::string& to) {
+ std::size_t start = 0;
+ while ((start = str->find(from, start)) != std::string::npos) {
+ str->replace(start, from.length(), to);
+ start += to.length();
+ }
+ };
ReplaceAll(&executable_name, "\\", "\\\\");
out << indent << FormatKV("executable", executable_name) << ",\n";
}
@@ -116,6 +157,12 @@ bool JSONReporter::ReportContext(const Context& context) {
}
indent = std::string(4, ' ');
out << indent << "],\n";
+ out << indent << "\"load_avg\": [";
+ for (auto it = info.load_avg.begin(); it != info.load_avg.end();) {
+ out << *it++;
+ if (it != info.load_avg.end()) out << ",";
+ }
+ out << "],\n";
#if defined(NDEBUG)
const char build_type[] = "release";
@@ -159,52 +206,60 @@ void JSONReporter::Finalize() {
void JSONReporter::PrintRunData(Run const& run) {
std::string indent(6, ' ');
std::ostream& out = GetOutputStream();
- out << indent << FormatKV("name", run.benchmark_name) << ",\n";
+ out << indent << FormatKV("name", run.benchmark_name()) << ",\n";
+ out << indent << FormatKV("run_name", run.run_name.str()) << ",\n";
+ out << indent << FormatKV("run_type", [&run]() -> const char* {
+ switch (run.run_type) {
+ case BenchmarkReporter::Run::RT_Iteration:
+ return "iteration";
+ case BenchmarkReporter::Run::RT_Aggregate:
+ return "aggregate";
+ }
+ BENCHMARK_UNREACHABLE();
+ }()) << ",\n";
+ out << indent << FormatKV("repetitions", run.repetitions) << ",\n";
+ if (run.run_type != BenchmarkReporter::Run::RT_Aggregate) {
+ out << indent << FormatKV("repetition_index", run.repetition_index)
+ << ",\n";
+ }
+ out << indent << FormatKV("threads", run.threads) << ",\n";
+ if (run.run_type == BenchmarkReporter::Run::RT_Aggregate) {
+ out << indent << FormatKV("aggregate_name", run.aggregate_name) << ",\n";
+ }
if (run.error_occurred) {
out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n";
out << indent << FormatKV("error_message", run.error_message) << ",\n";
}
if (!run.report_big_o && !run.report_rms) {
out << indent << FormatKV("iterations", run.iterations) << ",\n";
- out << indent
- << FormatKV("real_time", run.GetAdjustedRealTime())
- << ",\n";
- out << indent
- << FormatKV("cpu_time", run.GetAdjustedCPUTime());
+ out << indent << FormatKV("real_time", run.GetAdjustedRealTime()) << ",\n";
+ out << indent << FormatKV("cpu_time", run.GetAdjustedCPUTime());
out << ",\n"
<< indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
} else if (run.report_big_o) {
- out << indent
- << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime())
+ out << indent << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime())
<< ",\n";
- out << indent
- << FormatKV("real_coefficient", run.GetAdjustedRealTime())
+ out << indent << FormatKV("real_coefficient", run.GetAdjustedRealTime())
<< ",\n";
out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n";
out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
} else if (run.report_rms) {
- out << indent
- << FormatKV("rms", run.GetAdjustedCPUTime());
+ out << indent << FormatKV("rms", run.GetAdjustedCPUTime());
}
- if (run.bytes_per_second > 0.0) {
- out << ",\n"
- << indent
- << FormatKV("bytes_per_second", run.bytes_per_second);
- }
- if (run.items_per_second > 0.0) {
- out << ",\n"
- << indent
- << FormatKV("items_per_second", run.items_per_second);
+
+ for (auto& c : run.counters) {
+ out << ",\n" << indent << FormatKV(c.first, c.second);
}
- for(auto &c : run.counters) {
- out << ",\n"
- << indent
- << FormatKV(c.first, c.second);
+
+ if (run.has_memory_result) {
+ out << ",\n" << indent << FormatKV("allocs_per_iter", run.allocs_per_iter);
+ out << ",\n" << indent << FormatKV("max_bytes_used", run.max_bytes_used);
}
+
if (!run.report_label.empty()) {
out << ",\n" << indent << FormatKV("label", run.report_label);
}
out << '\n';
}
-} // end namespace benchmark
+} // end namespace benchmark
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/log.h b/src/third_party/benchmark/dist/src/log.h
index d06e1031db1..47d0c35c018 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/log.h
+++ b/src/third_party/benchmark/dist/src/log.h
@@ -66,8 +66,9 @@ inline LogType& GetLogInstanceForLevel(int level) {
} // end namespace internal
} // end namespace benchmark
+// clang-format off
#define VLOG(x) \
(::benchmark::internal::GetLogInstanceForLevel(x) << "-- LOG(" << x << "):" \
" ")
-
+// clang-format on
#endif
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/mutex.h b/src/third_party/benchmark/dist/src/mutex.h
index 5f461d05a0c..5f461d05a0c 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/mutex.h
+++ b/src/third_party/benchmark/dist/src/mutex.h
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/re.h b/src/third_party/benchmark/dist/src/re.h
index 924d2f0ba7e..fbe25037b46 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/re.h
+++ b/src/third_party/benchmark/dist/src/re.h
@@ -17,6 +17,8 @@
#include "internal_macros.h"
+// clang-format off
+
#if !defined(HAVE_STD_REGEX) && \
!defined(HAVE_GNU_POSIX_REGEX) && \
!defined(HAVE_POSIX_REGEX)
@@ -45,6 +47,9 @@
#else
#error No regular expression backend was found!
#endif
+
+// clang-format on
+
#include <string>
#include "check.h"
@@ -76,7 +81,7 @@ class Regex {
#elif defined(HAVE_POSIX_REGEX) || defined(HAVE_GNU_POSIX_REGEX)
regex_t re_;
#else
- #error No regular expression backend implementation available
+#error No regular expression backend implementation available
#endif
};
@@ -84,20 +89,21 @@ class Regex {
inline bool Regex::Init(const std::string& spec, std::string* error) {
#ifdef BENCHMARK_HAS_NO_EXCEPTIONS
- ((void)error); // suppress unused warning
+ ((void)error); // suppress unused warning
#else
try {
#endif
- re_ = std::regex(spec, std::regex_constants::extended);
- init_ = true;
+ re_ = std::regex(spec, std::regex_constants::extended);
+ init_ = true;
#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
- } catch (const std::regex_error& e) {
- if (error) {
- *error = e.what();
- }
+}
+catch (const std::regex_error& e) {
+ if (error) {
+ *error = e.what();
}
+}
#endif
- return init_;
+return init_;
}
inline Regex::~Regex() {}
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/reporter.cc b/src/third_party/benchmark/dist/src/reporter.cc
index 4b40aaec8b9..4d3e477d44a 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/reporter.cc
+++ b/src/third_party/benchmark/dist/src/reporter.cc
@@ -22,6 +22,7 @@
#include <vector>
#include "check.h"
+#include "string_util.h"
namespace benchmark {
@@ -54,6 +55,14 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out,
Out << "\n";
}
}
+ if (!info.load_avg.empty()) {
+ Out << "Load Average: ";
+ for (auto It = info.load_avg.begin(); It != info.load_avg.end();) {
+ Out << StrFormat("%.2f", *It++);
+ if (It != info.load_avg.end()) Out << ", ";
+ }
+ Out << "\n";
+ }
if (info.scaling_enabled) {
Out << "***WARNING*** CPU scaling is enabled, the benchmark "
@@ -68,9 +77,18 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out,
}
// No initializer because it's already initialized to NULL.
-const char* BenchmarkReporter::Context::executable_name;
+const char *BenchmarkReporter::Context::executable_name;
+
+BenchmarkReporter::Context::Context()
+ : cpu_info(CPUInfo::Get()), sys_info(SystemInfo::Get()) {}
-BenchmarkReporter::Context::Context() : cpu_info(CPUInfo::Get()) {}
+std::string BenchmarkReporter::Run::benchmark_name() const {
+ std::string name = run_name.str();
+ if (run_type == RT_Aggregate) {
+ name += "_" + aggregate_name;
+ }
+ return name;
+}
double BenchmarkReporter::Run::GetAdjustedRealTime() const {
double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit);
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/sleep.cc b/src/third_party/benchmark/dist/src/sleep.cc
index 54aa04a4224..1512ac90f7e 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/sleep.cc
+++ b/src/third_party/benchmark/dist/src/sleep.cc
@@ -21,7 +21,7 @@
#include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS
-#include <Windows.h>
+#include <windows.h>
#endif
namespace benchmark {
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/sleep.h b/src/third_party/benchmark/dist/src/sleep.h
index f98551afe28..f98551afe28 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/sleep.h
+++ b/src/third_party/benchmark/dist/src/sleep.h
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/statistics.cc b/src/third_party/benchmark/dist/src/statistics.cc
index 1c91e1015ab..bd5a3d65972 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/statistics.cc
+++ b/src/third_party/benchmark/dist/src/statistics.cc
@@ -17,9 +17,9 @@
#include <algorithm>
#include <cmath>
+#include <numeric>
#include <string>
#include <vector>
-#include <numeric>
#include "check.h"
#include "statistics.h"
@@ -43,9 +43,9 @@ double StatisticsMedian(const std::vector<double>& v) {
// did we have an odd number of samples?
// if yes, then center is the median
- // it no, then we are looking for the average between center and the value before
- if(v.size() % 2 == 1)
- return *center;
+ // it no, then we are looking for the average between center and the value
+ // before
+ if (v.size() % 2 == 1) return *center;
auto center2 = copy.begin() + v.size() / 2 - 1;
std::nth_element(copy.begin(), center2, copy.end());
return (*center + *center2) / 2.0;
@@ -68,8 +68,7 @@ double StatisticsStdDev(const std::vector<double>& v) {
if (v.empty()) return mean;
// Sample standard deviation is undefined for n = 1
- if (v.size() == 1)
- return 0.0;
+ if (v.size() == 1) return 0.0;
const double avg_squares = SumSquares(v) * (1.0 / v.size());
return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean)));
@@ -92,27 +91,23 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
// Accumulators.
std::vector<double> real_accumulated_time_stat;
std::vector<double> cpu_accumulated_time_stat;
- std::vector<double> bytes_per_second_stat;
- std::vector<double> items_per_second_stat;
real_accumulated_time_stat.reserve(reports.size());
cpu_accumulated_time_stat.reserve(reports.size());
- bytes_per_second_stat.reserve(reports.size());
- items_per_second_stat.reserve(reports.size());
// All repetitions should be run with the same number of iterations so we
// can take this information from the first benchmark.
- int64_t const run_iterations = reports.front().iterations;
+ const IterationCount run_iterations = reports.front().iterations;
// create stats for user counters
struct CounterStat {
Counter c;
std::vector<double> s;
};
- std::map< std::string, CounterStat > counter_stats;
- for(Run const& r : reports) {
- for(auto const& cnt : r.counters) {
+ std::map<std::string, CounterStat> counter_stats;
+ for (Run const& r : reports) {
+ for (auto const& cnt : r.counters) {
auto it = counter_stats.find(cnt.first);
- if(it == counter_stats.end()) {
+ if (it == counter_stats.end()) {
counter_stats.insert({cnt.first, {cnt.second, std::vector<double>{}}});
it = counter_stats.find(cnt.first);
it->second.s.reserve(reports.size());
@@ -124,15 +119,13 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
// Populate the accumulators.
for (Run const& run : reports) {
- CHECK_EQ(reports[0].benchmark_name, run.benchmark_name);
+ CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name());
CHECK_EQ(run_iterations, run.iterations);
if (run.error_occurred) continue;
real_accumulated_time_stat.emplace_back(run.real_accumulated_time);
cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time);
- items_per_second_stat.emplace_back(run.items_per_second);
- bytes_per_second_stat.emplace_back(run.bytes_per_second);
// user counters
- for(auto const& cnt : run.counters) {
+ for (auto const& cnt : run.counters) {
auto it = counter_stats.find(cnt.first);
CHECK_NE(it, counter_stats.end());
it->second.s.emplace_back(cnt.second);
@@ -148,24 +141,46 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
}
}
- for(const auto& Stat : *reports[0].statistics) {
+ const double iteration_rescale_factor =
+ double(reports.size()) / double(run_iterations);
+
+ for (const auto& Stat : *reports[0].statistics) {
// Get the data from the accumulator to BenchmarkReporter::Run's.
Run data;
- data.benchmark_name = reports[0].benchmark_name + "_" + Stat.name_;
+ data.run_name = reports[0].run_name;
+ data.run_type = BenchmarkReporter::Run::RT_Aggregate;
+ data.threads = reports[0].threads;
+ data.repetitions = reports[0].repetitions;
+ data.repetition_index = Run::no_repetition_index;
+ data.aggregate_name = Stat.name_;
data.report_label = report_label;
- data.iterations = run_iterations;
+
+ // It is incorrect to say that an aggregate is computed over
+ // run's iterations, because those iterations already got averaged.
+ // Similarly, if there are N repetitions with 1 iterations each,
+ // an aggregate will be computed over N measurements, not 1.
+ // Thus it is best to simply use the count of separate reports.
+ data.iterations = reports.size();
data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat);
data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat);
- data.bytes_per_second = Stat.compute_(bytes_per_second_stat);
- data.items_per_second = Stat.compute_(items_per_second_stat);
+
+ // We will divide these times by data.iterations when reporting, but the
+ // data.iterations is not nessesairly the scale of these measurements,
+ // because in each repetition, these timers are sum over all the iterations.
+ // And if we want to say that the stats are over N repetitions and not
+ // M iterations, we need to multiply these by (N/M).
+ data.real_accumulated_time *= iteration_rescale_factor;
+ data.cpu_accumulated_time *= iteration_rescale_factor;
data.time_unit = reports[0].time_unit;
// user counters
- for(auto const& kv : counter_stats) {
+ for (auto const& kv : counter_stats) {
+ // Do *NOT* rescale the custom counters. They are already properly scaled.
const auto uc_stat = Stat.compute_(kv.second.s);
- auto c = Counter(uc_stat, counter_stats[kv.first].c.flags);
+ auto c = Counter(uc_stat, counter_stats[kv.first].c.flags,
+ counter_stats[kv.first].c.oneK);
data.counters[kv.first] = c;
}
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/statistics.h b/src/third_party/benchmark/dist/src/statistics.h
index 7eccc85536a..7eccc85536a 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/statistics.h
+++ b/src/third_party/benchmark/dist/src/statistics.h
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/string_util.cc b/src/third_party/benchmark/dist/src/string_util.cc
index ebc3acebd2a..39b01a1719a 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/string_util.cc
+++ b/src/third_party/benchmark/dist/src/string_util.cc
@@ -160,13 +160,93 @@ std::string StrFormat(const char* format, ...) {
return tmp;
}
-void ReplaceAll(std::string* str, const std::string& from,
- const std::string& to) {
- std::size_t start = 0;
- while ((start = str->find(from, start)) != std::string::npos) {
- str->replace(start, from.length(), to);
- start += to.length();
+#ifdef BENCHMARK_STL_ANDROID_GNUSTL
+/*
+ * GNU STL in Android NDK lacks support for some C++11 functions, including
+ * stoul, stoi, stod. We reimplement them here using C functions strtoul,
+ * strtol, strtod. Note that reimplemented functions are in benchmark::
+ * namespace, not std:: namespace.
+ */
+unsigned long stoul(const std::string& str, size_t* pos, int base) {
+ /* Record previous errno */
+ const int oldErrno = errno;
+ errno = 0;
+
+ const char* strStart = str.c_str();
+ char* strEnd = const_cast<char*>(strStart);
+ const unsigned long result = strtoul(strStart, &strEnd, base);
+
+ const int strtoulErrno = errno;
+ /* Restore previous errno */
+ errno = oldErrno;
+
+ /* Check for errors and return */
+ if (strtoulErrno == ERANGE) {
+ throw std::out_of_range(
+ "stoul failed: " + str + " is outside of range of unsigned long");
+ } else if (strEnd == strStart || strtoulErrno != 0) {
+ throw std::invalid_argument(
+ "stoul failed: " + str + " is not an integer");
}
+ if (pos != nullptr) {
+ *pos = static_cast<size_t>(strEnd - strStart);
+ }
+ return result;
+}
+
+int stoi(const std::string& str, size_t* pos, int base) {
+ /* Record previous errno */
+ const int oldErrno = errno;
+ errno = 0;
+
+ const char* strStart = str.c_str();
+ char* strEnd = const_cast<char*>(strStart);
+ const long result = strtol(strStart, &strEnd, base);
+
+ const int strtolErrno = errno;
+ /* Restore previous errno */
+ errno = oldErrno;
+
+ /* Check for errors and return */
+ if (strtolErrno == ERANGE || long(int(result)) != result) {
+ throw std::out_of_range(
+ "stoul failed: " + str + " is outside of range of int");
+ } else if (strEnd == strStart || strtolErrno != 0) {
+ throw std::invalid_argument(
+ "stoul failed: " + str + " is not an integer");
+ }
+ if (pos != nullptr) {
+ *pos = static_cast<size_t>(strEnd - strStart);
+ }
+ return int(result);
+}
+
+double stod(const std::string& str, size_t* pos) {
+ /* Record previous errno */
+ const int oldErrno = errno;
+ errno = 0;
+
+ const char* strStart = str.c_str();
+ char* strEnd = const_cast<char*>(strStart);
+ const double result = strtod(strStart, &strEnd);
+
+ /* Restore previous errno */
+ const int strtodErrno = errno;
+ errno = oldErrno;
+
+ /* Check for errors and return */
+ if (strtodErrno == ERANGE) {
+ throw std::out_of_range(
+ "stoul failed: " + str + " is outside of range of int");
+ } else if (strEnd == strStart || strtodErrno != 0) {
+ throw std::invalid_argument(
+ "stoul failed: " + str + " is not an integer");
+ }
+ if (pos != nullptr) {
+ *pos = static_cast<size_t>(strEnd - strStart);
+ }
+ return result;
}
+#endif
} // end namespace benchmark
diff --git a/src/third_party/benchmark/dist/src/string_util.h b/src/third_party/benchmark/dist/src/string_util.h
new file mode 100644
index 00000000000..09d7b4bd2a9
--- /dev/null
+++ b/src/third_party/benchmark/dist/src/string_util.h
@@ -0,0 +1,59 @@
+#ifndef BENCHMARK_STRING_UTIL_H_
+#define BENCHMARK_STRING_UTIL_H_
+
+#include <sstream>
+#include <string>
+#include <utility>
+#include "internal_macros.h"
+
+namespace benchmark {
+
+void AppendHumanReadable(int n, std::string* str);
+
+std::string HumanReadableNumber(double n, double one_k = 1024.0);
+
+#if defined(__MINGW32__)
+__attribute__((format(__MINGW_PRINTF_FORMAT, 1, 2)))
+#elif defined(__GNUC__)
+__attribute__((format(printf, 1, 2)))
+#endif
+std::string
+StrFormat(const char* format, ...);
+
+inline std::ostream& StrCatImp(std::ostream& out) BENCHMARK_NOEXCEPT {
+ return out;
+}
+
+template <class First, class... Rest>
+inline std::ostream& StrCatImp(std::ostream& out, First&& f, Rest&&... rest) {
+ out << std::forward<First>(f);
+ return StrCatImp(out, std::forward<Rest>(rest)...);
+}
+
+template <class... Args>
+inline std::string StrCat(Args&&... args) {
+ std::ostringstream ss;
+ StrCatImp(ss, std::forward<Args>(args)...);
+ return ss.str();
+}
+
+#ifdef BENCHMARK_STL_ANDROID_GNUSTL
+/*
+ * GNU STL in Android NDK lacks support for some C++11 functions, including
+ * stoul, stoi, stod. We reimplement them here using C functions strtoul,
+ * strtol, strtod. Note that reimplemented functions are in benchmark::
+ * namespace, not std:: namespace.
+ */
+unsigned long stoul(const std::string& str, size_t* pos = nullptr,
+ int base = 10);
+int stoi(const std::string& str, size_t* pos = nullptr, int base = 10);
+double stod(const std::string& str, size_t* pos = nullptr);
+#else
+using std::stoul;
+using std::stoi;
+using std::stod;
+#endif
+
+} // end namespace benchmark
+
+#endif // BENCHMARK_STRING_UTIL_H_
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/sysinfo.cc b/src/third_party/benchmark/dist/src/sysinfo.cc
index d19d0ef4c1e..28126470bad 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/sysinfo.cc
+++ b/src/third_party/benchmark/dist/src/sysinfo.cc
@@ -15,10 +15,11 @@
#include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS
-#include <Shlwapi.h>
+#include <shlwapi.h>
#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
-#include <VersionHelpers.h>
-#include <Windows.h>
+#include <versionhelpers.h>
+#include <windows.h>
+#include <codecvt>
#else
#include <fcntl.h>
#ifndef BENCHMARK_OS_FUCHSIA
@@ -36,6 +37,9 @@
#if defined(BENCHMARK_OS_SOLARIS)
#include <kstat.h>
#endif
+#if defined(BENCHMARK_OS_QNX)
+#include <sys/syspage.h>
+#endif
#include <algorithm>
#include <array>
@@ -52,6 +56,7 @@
#include <limits>
#include <memory>
#include <sstream>
+#include <locale>
#include "check.h"
#include "cycleclock.h"
@@ -207,6 +212,9 @@ bool ReadFromFile(std::string const& fname, ArgT* arg) {
bool CpuScalingEnabled(int num_cpus) {
// We don't have a valid CPU count, so don't even bother.
if (num_cpus <= 0) return false;
+#ifdef BENCHMARK_OS_QNX
+ return false;
+#endif
#ifndef BENCHMARK_OS_WINDOWS
// On Linux, the CPUfreq subsystem exposes CPU information as files on the
// local file system. If reading the exported files fails, then we may not be
@@ -225,7 +233,7 @@ int CountSetBitsInCPUMap(std::string Val) {
auto CountBits = [](std::string Part) {
using CPUMask = std::bitset<sizeof(std::uintptr_t) * CHAR_BIT>;
Part = "0x" + Part;
- CPUMask Mask(std::stoul(Part, nullptr, 16));
+ CPUMask Mask(benchmark::stoul(Part, nullptr, 16));
return static_cast<int>(Mask.count());
};
size_t Pos;
@@ -288,7 +296,7 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesMacOSX() {
std::string name;
std::string type;
int level;
- size_t num_sharing;
+ uint64_t num_sharing;
} Cases[] = {{"hw.l1dcachesize", "Data", 1, CacheCounts[1]},
{"hw.l1icachesize", "Instruction", 1, CacheCounts[1]},
{"hw.l2cachesize", "Unified", 2, CacheCounts[2]},
@@ -354,6 +362,40 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesWindows() {
}
return res;
}
+#elif BENCHMARK_OS_QNX
+std::vector<CPUInfo::CacheInfo> GetCacheSizesQNX() {
+ std::vector<CPUInfo::CacheInfo> res;
+ struct cacheattr_entry *cache = SYSPAGE_ENTRY(cacheattr);
+ uint32_t const elsize = SYSPAGE_ELEMENT_SIZE(cacheattr);
+ int num = SYSPAGE_ENTRY_SIZE(cacheattr) / elsize ;
+ for(int i = 0; i < num; ++i ) {
+ CPUInfo::CacheInfo info;
+ switch (cache->flags){
+ case CACHE_FLAG_INSTR :
+ info.type = "Instruction";
+ info.level = 1;
+ break;
+ case CACHE_FLAG_DATA :
+ info.type = "Data";
+ info.level = 1;
+ break;
+ case CACHE_FLAG_UNIFIED :
+ info.type = "Unified";
+ info.level = 2;
+ case CACHE_FLAG_SHARED :
+ info.type = "Shared";
+ info.level = 3;
+ default :
+ continue;
+ break;
+ }
+ info.size = cache->line_size * cache->num_lines;
+ info.num_sharing = 0;
+ res.push_back(std::move(info));
+ cache = SYSPAGE_ARRAY_ADJ_OFFSET(cacheattr, cache, elsize);
+ }
+ return res;
+}
#endif
std::vector<CPUInfo::CacheInfo> GetCacheSizes() {
@@ -361,11 +403,44 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizes() {
return GetCacheSizesMacOSX();
#elif defined(BENCHMARK_OS_WINDOWS)
return GetCacheSizesWindows();
+#elif defined(BENCHMARK_OS_QNX)
+ return GetCacheSizesQNX();
#else
return GetCacheSizesFromKVFS();
#endif
}
+std::string GetSystemName() {
+#if defined(BENCHMARK_OS_WINDOWS)
+ std::string str;
+ const unsigned COUNT = MAX_COMPUTERNAME_LENGTH+1;
+ TCHAR hostname[COUNT] = {'\0'};
+ DWORD DWCOUNT = COUNT;
+ if (!GetComputerName(hostname, &DWCOUNT))
+ return std::string("");
+#ifndef UNICODE
+ str = std::string(hostname, DWCOUNT);
+#else
+ //Using wstring_convert, Is deprecated in C++17
+ using convert_type = std::codecvt_utf8<wchar_t>;
+ std::wstring_convert<convert_type, wchar_t> converter;
+ std::wstring wStr(hostname, DWCOUNT);
+ str = converter.to_bytes(wStr);
+#endif
+ return str;
+#else // defined(BENCHMARK_OS_WINDOWS)
+#ifdef BENCHMARK_HAS_SYSCTL // BSD/Mac Doesnt have HOST_NAME_MAX defined
+#define HOST_NAME_MAX 64
+#elif defined(BENCHMARK_OS_QNX)
+#define HOST_NAME_MAX 154
+#endif
+ char hostname[HOST_NAME_MAX];
+ int retVal = gethostname(hostname, HOST_NAME_MAX);
+ if (retVal != 0) return std::string("");
+ return std::string(hostname);
+#endif // Catch-all POSIX block.
+}
+
int GetNumCPUs() {
#ifdef BENCHMARK_HAS_SYSCTL
int NumCPU = -1;
@@ -390,6 +465,8 @@ int GetNumCPUs() {
strerror(errno));
}
return NumCPU;
+#elif defined(BENCHMARK_OS_QNX)
+ return static_cast<int>(_syspage_ptr->num_cpu);
#else
int NumCPUs = 0;
int MaxID = -1;
@@ -404,11 +481,17 @@ int GetNumCPUs() {
if (ln.empty()) continue;
size_t SplitIdx = ln.find(':');
std::string value;
+#if defined(__s390__)
+ // s390 has another format in /proc/cpuinfo
+ // it needs to be parsed differently
+ if (SplitIdx != std::string::npos) value = ln.substr(Key.size()+1,SplitIdx-Key.size()-1);
+#else
if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1);
+#endif
if (ln.size() >= Key.size() && ln.compare(0, Key.size(), Key) == 0) {
NumCPUs++;
if (!value.empty()) {
- int CurID = std::stoi(value);
+ int CurID = benchmark::stoi(value);
MaxID = std::max(CurID, MaxID);
}
}
@@ -481,12 +564,12 @@ double GetCPUCyclesPerSecond() {
// which would cause infinite looping in WallTime_Init.
if (startsWithKey(ln, "cpu MHz")) {
if (!value.empty()) {
- double cycles_per_second = std::stod(value) * 1000000.0;
+ double cycles_per_second = benchmark::stod(value) * 1000000.0;
if (cycles_per_second > 0) return cycles_per_second;
}
} else if (startsWithKey(ln, "bogomips")) {
if (!value.empty()) {
- bogo_clock = std::stod(value) * 1000000.0;
+ bogo_clock = benchmark::stod(value) * 1000000.0;
if (bogo_clock < 0.0) bogo_clock = error_value;
}
}
@@ -563,6 +646,9 @@ double GetCPUCyclesPerSecond() {
double clock_hz = knp->value.ui64;
kstat_close(kc);
return clock_hz;
+#elif defined (BENCHMARK_OS_QNX)
+ return static_cast<double>((int64_t)(SYSPAGE_ENTRY(cpuinfo)->speed) *
+ (int64_t)(1000 * 1000));
#endif
// If we've fallen through, attempt to roughly estimate the CPU clock rate.
const int estimate_time_ms = 1000;
@@ -571,6 +657,24 @@ double GetCPUCyclesPerSecond() {
return static_cast<double>(cycleclock::Now() - start_ticks);
}
+std::vector<double> GetLoadAvg() {
+#if (defined BENCHMARK_OS_FREEBSD || defined(BENCHMARK_OS_LINUX) || \
+ defined BENCHMARK_OS_MACOSX || defined BENCHMARK_OS_NETBSD || \
+ defined BENCHMARK_OS_OPENBSD) && !defined(__ANDROID__)
+ constexpr int kMaxSamples = 3;
+ std::vector<double> res(kMaxSamples, 0.0);
+ const int nelem = getloadavg(res.data(), kMaxSamples);
+ if (nelem < 1) {
+ res.clear();
+ } else {
+ res.resize(nelem);
+ }
+ return res;
+#else
+ return {};
+#endif
+}
+
} // end namespace
const CPUInfo& CPUInfo::Get() {
@@ -582,6 +686,14 @@ CPUInfo::CPUInfo()
: num_cpus(GetNumCPUs()),
cycles_per_second(GetCPUCyclesPerSecond()),
caches(GetCacheSizes()),
- scaling_enabled(CpuScalingEnabled(num_cpus)) {}
+ scaling_enabled(CpuScalingEnabled(num_cpus)),
+ load_avg(GetLoadAvg()) {}
+
+
+const SystemInfo& SystemInfo::Get() {
+ static const SystemInfo* info = new SystemInfo();
+ return *info;
+}
+SystemInfo::SystemInfo() : name(GetSystemName()) {}
} // end namespace benchmark
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/thread_manager.h b/src/third_party/benchmark/dist/src/thread_manager.h
index 82b4d72b62f..1720281f0a1 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/thread_manager.h
+++ b/src/third_party/benchmark/dist/src/thread_manager.h
@@ -38,12 +38,10 @@ class ThreadManager {
public:
struct Result {
- int64_t iterations = 0;
+ IterationCount iterations = 0;
double real_time_used = 0;
double cpu_time_used = 0;
double manual_time_used = 0;
- int64_t bytes_processed = 0;
- int64_t items_processed = 0;
int64_t complexity_n = 0;
std::string report_label_;
std::string error_message_;
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/thread_timer.h b/src/third_party/benchmark/dist/src/thread_timer.h
index eaf108e017d..fbd298d3bd4 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/thread_timer.h
+++ b/src/third_party/benchmark/dist/src/thread_timer.h
@@ -8,14 +8,22 @@ namespace benchmark {
namespace internal {
class ThreadTimer {
+ explicit ThreadTimer(bool measure_process_cpu_time_)
+ : measure_process_cpu_time(measure_process_cpu_time_) {}
+
public:
- ThreadTimer() = default;
+ static ThreadTimer Create() {
+ return ThreadTimer(/*measure_process_cpu_time_=*/false);
+ }
+ static ThreadTimer CreateProcessCpuTime() {
+ return ThreadTimer(/*measure_process_cpu_time_=*/true);
+ }
// Called by each thread
void StartTimer() {
running_ = true;
start_real_time_ = ChronoClockNow();
- start_cpu_time_ = ThreadCPUUsage();
+ start_cpu_time_ = ReadCpuTimerOfChoice();
}
// Called by each thread
@@ -25,7 +33,8 @@ class ThreadTimer {
real_time_used_ += ChronoClockNow() - start_real_time_;
// Floating point error can result in the subtraction producing a negative
// time. Guard against that.
- cpu_time_used_ += std::max<double>(ThreadCPUUsage() - start_cpu_time_, 0);
+ cpu_time_used_ +=
+ std::max<double>(ReadCpuTimerOfChoice() - start_cpu_time_, 0);
}
// Called by each thread
@@ -52,6 +61,14 @@ class ThreadTimer {
}
private:
+ double ReadCpuTimerOfChoice() const {
+ if (measure_process_cpu_time) return ProcessCPUUsage();
+ return ThreadCPUUsage();
+ }
+
+ // should the thread, or the process, time be measured?
+ const bool measure_process_cpu_time;
+
bool running_ = false; // Is the timer running
double start_real_time_ = 0; // If running_
double start_cpu_time_ = 0; // If running_
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/timers.cc b/src/third_party/benchmark/dist/src/timers.cc
index 2010e2450b4..7613ff92c6e 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/timers.cc
+++ b/src/third_party/benchmark/dist/src/timers.cc
@@ -16,10 +16,10 @@
#include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS
-#include <Shlwapi.h>
+#include <shlwapi.h>
#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
-#include <VersionHelpers.h>
-#include <Windows.h>
+#include <versionhelpers.h>
+#include <windows.h>
#else
#include <fcntl.h>
#ifndef BENCHMARK_OS_FUCHSIA
diff --git a/src/third_party/benchmark-1.4.1/benchmark/src/timers.h b/src/third_party/benchmark/dist/src/timers.h
index 65606ccd93d..65606ccd93d 100644
--- a/src/third_party/benchmark-1.4.1/benchmark/src/timers.h
+++ b/src/third_party/benchmark/dist/src/timers.h
diff --git a/src/third_party/benchmark/scripts/import.sh b/src/third_party/benchmark/scripts/import.sh
new file mode 100755
index 00000000000..627c496cdbf
--- /dev/null
+++ b/src/third_party/benchmark/scripts/import.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+# This script downloads and imports Google Benchmark.
+#
+# Turn on strict error checking, like perl use 'strict'
+set -xeuo pipefail
+IFS=$'\n\t'
+
+NAME="benchmark"
+
+LIB_GIT_REV="mongo/v1.5.0"
+
+LIB_GIT_URL="https://github.com/mongodb-forks/benchmark.git"
+LIB_GIT_DIR=$(mktemp -d /tmp/import-benchmark.XXXXXX)
+
+trap "rm -rf $LIB_GIT_DIR" EXIT
+
+DIST=$(git rev-parse --show-toplevel)/src/third_party/$NAME/dist
+git clone "$LIB_GIT_URL" $LIB_GIT_DIR
+git -C $LIB_GIT_DIR checkout $LIB_GIT_REV
+
+DEST_DIR=$(git rev-parse --show-toplevel)/src/third_party/$NAME
+
+SUBDIR_WHITELIST=(
+ src
+ include/benchmark
+ LICENSE
+ README.md
+)
+
+for subdir in ${SUBDIR_WHITELIST[@]}
+do
+ [[ -d $LIB_GIT_DIR/$subdir ]] && mkdir -p $DIST/$subdir
+ cp -Trp $LIB_GIT_DIR/$subdir $DIST/$subdir
+done
diff --git a/src/third_party/scripts/benchmark_get_sources.sh b/src/third_party/scripts/benchmark_get_sources.sh
deleted file mode 100755
index 2c3256e8b32..00000000000
--- a/src/third_party/scripts/benchmark_get_sources.sh
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/bin/bash
-# This script downloads and imports Google Benchmark.
-# It can be run on Linux, Mac OS X or Windows WSL.
-# Actual integration into the build system is not done by this script.
-#
-# Turn on strict error checking, like perl use 'strict'
-set -xeuo pipefail
-IFS=$'\n\t'
-
-if [ "$#" -ne 0 ]; then
- echo "This script does not take any arguments"
- exit 1
-fi
-
-GIT_EXE=git
-if grep -q Microsoft /proc/version; then
- GIT_EXE=git.exe
-fi
-
-NAME=benchmark
-VERSION=1.4.1
-if grep -q Microsoft /proc/version; then
- SRC_ROOT=$(wslpath -u $(powershell.exe -Command "Get-ChildItem Env:TEMP | Get-Content | Write-Host"))
- SRC_ROOT+="$(mktemp -u /benchmark.XXXXXX)"
- mkdir -p $SRC_ROOT
-else
- SRC_ROOT=$(mktemp -d /tmp/benchmark.XXXXXX)
-fi
-
-SRC=${SRC_ROOT}/${NAME}-${VERSION}
-CLONE_DEST=$SRC
-if grep -q Microsoft /proc/version; then
- CLONE_DEST=$(wslpath -m $SRC)
-fi
-DEST_DIR=$($GIT_EXE rev-parse --show-toplevel)/src/third_party/$NAME-$VERSION
-PATCH_DIR=$($GIT_EXE rev-parse --show-toplevel)/src/third_party/$NAME-$VERSION/patches
-if grep -q Microsoft /proc/version; then
- DEST_DIR=$(wslpath -u "$DEST_DIR")
- PATCH_DIR=$(wslpath -w $(wslpath -u "$PATCH_DIR"))
-fi
-
-echo "dest: $DEST_DIR"
-echo "patch: $PATCH_DIR"
-
-if [ ! -d $SRC ]; then
- $GIT_EXE clone git@github.com:google/benchmark.git $CLONE_DEST
-
- pushd $SRC
- $GIT_EXE checkout v$VERSION
-
- $GIT_EXE am $PATCH_DIR/0001-properly-escape-json-names-652.patch
-
- popd
-fi
-
-test -d $DEST_DIR/benchmark && rm -r $DEST_DIR/benchmark
-mkdir -p $DEST_DIR/benchmark
-
-mv $SRC/.gitignore $DEST_DIR/benchmark/
-mv $SRC/include $DEST_DIR/benchmark/
-mv $SRC/src $DEST_DIR/benchmark/
-mv $SRC/LICENSE $DEST_DIR/benchmark/
-mv $SRC/README.md $DEST_DIR/benchmark/